github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/cache/cache.go (about)

     1  // +build !plan9
     2  
     3  package cache
     4  
     5  import (
     6  	"context"
     7  	"fmt"
     8  	"io"
     9  	"math"
    10  	"os"
    11  	"os/signal"
    12  	"path"
    13  	"path/filepath"
    14  	"sort"
    15  	"strconv"
    16  	"strings"
    17  	"sync"
    18  	"syscall"
    19  	"time"
    20  
    21  	"github.com/ncw/rclone/backend/crypt"
    22  	"github.com/ncw/rclone/fs"
    23  	"github.com/ncw/rclone/fs/cache"
    24  	"github.com/ncw/rclone/fs/config"
    25  	"github.com/ncw/rclone/fs/config/configmap"
    26  	"github.com/ncw/rclone/fs/config/configstruct"
    27  	"github.com/ncw/rclone/fs/config/obscure"
    28  	"github.com/ncw/rclone/fs/fspath"
    29  	"github.com/ncw/rclone/fs/hash"
    30  	"github.com/ncw/rclone/fs/rc"
    31  	"github.com/ncw/rclone/fs/walk"
    32  	"github.com/ncw/rclone/lib/atexit"
    33  	"github.com/pkg/errors"
    34  	"golang.org/x/time/rate"
    35  )
    36  
    37  const (
    38  	// DefCacheChunkSize is the default value for chunk size
    39  	DefCacheChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
    40  	// DefCacheTotalChunkSize is the default value for the maximum size of stored chunks
    41  	DefCacheTotalChunkSize = fs.SizeSuffix(10 * 1024 * 1024 * 1024)
    42  	// DefCacheChunkCleanInterval is the interval at which chunks are cleaned
    43  	DefCacheChunkCleanInterval = fs.Duration(time.Minute)
    44  	// DefCacheInfoAge is the default value for object info age
    45  	DefCacheInfoAge = fs.Duration(6 * time.Hour)
    46  	// DefCacheReadRetries is the default value for read retries
    47  	DefCacheReadRetries = 10
    48  	// DefCacheTotalWorkers is how many workers run in parallel to download chunks
    49  	DefCacheTotalWorkers = 4
    50  	// DefCacheChunkNoMemory will enable or disable in-memory storage for chunks
    51  	DefCacheChunkNoMemory = false
    52  	// DefCacheRps limits the number of requests per second to the source FS
    53  	DefCacheRps = -1
    54  	// DefCacheWrites will cache file data on writes through the cache
    55  	DefCacheWrites = false
    56  	// DefCacheTmpWaitTime says how long should files be stored in local cache before being uploaded
    57  	DefCacheTmpWaitTime = fs.Duration(15 * time.Second)
    58  	// DefCacheDbWaitTime defines how long the cache backend should wait for the DB to be available
    59  	DefCacheDbWaitTime = fs.Duration(1 * time.Second)
    60  )
    61  
    62  // Register with Fs
    63  func init() {
    64  	fs.Register(&fs.RegInfo{
    65  		Name:        "cache",
    66  		Description: "Cache a remote",
    67  		NewFs:       NewFs,
    68  		Options: []fs.Option{{
    69  			Name:     "remote",
    70  			Help:     "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
    71  			Required: true,
    72  		}, {
    73  			Name: "plex_url",
    74  			Help: "The URL of the Plex server",
    75  		}, {
    76  			Name: "plex_username",
    77  			Help: "The username of the Plex user",
    78  		}, {
    79  			Name:       "plex_password",
    80  			Help:       "The password of the Plex user",
    81  			IsPassword: true,
    82  		}, {
    83  			Name:     "plex_token",
    84  			Help:     "The plex token for authentication - auto set normally",
    85  			Hide:     fs.OptionHideBoth,
    86  			Advanced: true,
    87  		}, {
    88  			Name:     "plex_insecure",
    89  			Help:     "Skip all certificate verifications when connecting to the Plex server",
    90  			Advanced: true,
    91  		}, {
    92  			Name: "chunk_size",
    93  			Help: `The size of a chunk (partial file data).
    94  
    95  Use lower numbers for slower connections. If the chunk size is
    96  changed, any downloaded chunks will be invalid and cache-chunk-path
    97  will need to be cleared or unexpected EOF errors will occur.`,
    98  			Default: DefCacheChunkSize,
    99  			Examples: []fs.OptionExample{{
   100  				Value: "1m",
   101  				Help:  "1MB",
   102  			}, {
   103  				Value: "5M",
   104  				Help:  "5 MB",
   105  			}, {
   106  				Value: "10M",
   107  				Help:  "10 MB",
   108  			}},
   109  		}, {
   110  			Name: "info_age",
   111  			Help: `How long to cache file structure information (directory listings, file size, times etc). 
   112  If all write operations are done through the cache then you can safely make
   113  this value very large as the cache store will also be updated in real time.`,
   114  			Default: DefCacheInfoAge,
   115  			Examples: []fs.OptionExample{{
   116  				Value: "1h",
   117  				Help:  "1 hour",
   118  			}, {
   119  				Value: "24h",
   120  				Help:  "24 hours",
   121  			}, {
   122  				Value: "48h",
   123  				Help:  "48 hours",
   124  			}},
   125  		}, {
   126  			Name: "chunk_total_size",
   127  			Help: `The total size that the chunks can take up on the local disk.
   128  
   129  If the cache exceeds this value then it will start to delete the
   130  oldest chunks until it goes under this value.`,
   131  			Default: DefCacheTotalChunkSize,
   132  			Examples: []fs.OptionExample{{
   133  				Value: "500M",
   134  				Help:  "500 MB",
   135  			}, {
   136  				Value: "1G",
   137  				Help:  "1 GB",
   138  			}, {
   139  				Value: "10G",
   140  				Help:  "10 GB",
   141  			}},
   142  		}, {
   143  			Name:     "db_path",
   144  			Default:  filepath.Join(config.CacheDir, "cache-backend"),
   145  			Help:     "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
   146  			Advanced: true,
   147  		}, {
   148  			Name:    "chunk_path",
   149  			Default: filepath.Join(config.CacheDir, "cache-backend"),
   150  			Help: `Directory to cache chunk files.
   151  
   152  Path to where partial file data (chunks) are stored locally. The remote
   153  name is appended to the final path.
   154  
   155  This config follows the "--cache-db-path". If you specify a custom
   156  location for "--cache-db-path" and don't specify one for "--cache-chunk-path"
   157  then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
   158  			Advanced: true,
   159  		}, {
   160  			Name:     "db_purge",
   161  			Default:  false,
   162  			Help:     "Clear all the cached data for this remote on start.",
   163  			Hide:     fs.OptionHideConfigurator,
   164  			Advanced: true,
   165  		}, {
   166  			Name:    "chunk_clean_interval",
   167  			Default: DefCacheChunkCleanInterval,
   168  			Help: `How often should the cache perform cleanups of the chunk storage.
   169  The default value should be ok for most people. If you find that the
   170  cache goes over "cache-chunk-total-size" too often then try to lower
   171  this value to force it to perform cleanups more often.`,
   172  			Advanced: true,
   173  		}, {
   174  			Name:    "read_retries",
   175  			Default: DefCacheReadRetries,
   176  			Help: `How many times to retry a read from a cache storage.
   177  
   178  Since reading from a cache stream is independent from downloading file
   179  data, readers can get to a point where there's no more data in the
   180  cache.  Most of the times this can indicate a connectivity issue if
   181  cache isn't able to provide file data anymore.
   182  
   183  For really slow connections, increase this to a point where the stream is
   184  able to provide data but your experience will be very stuttering.`,
   185  			Advanced: true,
   186  		}, {
   187  			Name:    "workers",
   188  			Default: DefCacheTotalWorkers,
   189  			Help: `How many workers should run in parallel to download chunks.
   190  
   191  Higher values will mean more parallel processing (better CPU needed)
   192  and more concurrent requests on the cloud provider.  This impacts
   193  several aspects like the cloud provider API limits, more stress on the
   194  hardware that rclone runs on but it also means that streams will be
   195  more fluid and data will be available much more faster to readers.
   196  
   197  **Note**: If the optional Plex integration is enabled then this
   198  setting will adapt to the type of reading performed and the value
   199  specified here will be used as a maximum number of workers to use.`,
   200  			Advanced: true,
   201  		}, {
   202  			Name:    "chunk_no_memory",
   203  			Default: DefCacheChunkNoMemory,
   204  			Help: `Disable the in-memory cache for storing chunks during streaming.
   205  
   206  By default, cache will keep file data during streaming in RAM as well
   207  to provide it to readers as fast as possible.
   208  
   209  This transient data is evicted as soon as it is read and the number of
   210  chunks stored doesn't exceed the number of workers. However, depending
   211  on other settings like "cache-chunk-size" and "cache-workers" this footprint
   212  can increase if there are parallel streams too (multiple files being read
   213  at the same time).
   214  
   215  If the hardware permits it, use this feature to provide an overall better
   216  performance during streaming but it can also be disabled if RAM is not
   217  available on the local machine.`,
   218  			Advanced: true,
   219  		}, {
   220  			Name:    "rps",
   221  			Default: int(DefCacheRps),
   222  			Help: `Limits the number of requests per second to the source FS (-1 to disable)
   223  
   224  This setting places a hard limit on the number of requests per second
   225  that cache will be doing to the cloud provider remote and try to
   226  respect that value by setting waits between reads.
   227  
   228  If you find that you're getting banned or limited on the cloud
   229  provider through cache and know that a smaller number of requests per
   230  second will allow you to work with it then you can use this setting
   231  for that.
   232  
   233  A good balance of all the other settings should make this setting
   234  useless but it is available to set for more special cases.
   235  
   236  **NOTE**: This will limit the number of requests during streams but
   237  other API calls to the cloud provider like directory listings will
   238  still pass.`,
   239  			Advanced: true,
   240  		}, {
   241  			Name:    "writes",
   242  			Default: DefCacheWrites,
   243  			Help: `Cache file data on writes through the FS
   244  
   245  If you need to read files immediately after you upload them through
   246  cache you can enable this flag to have their data stored in the
   247  cache store at the same time during upload.`,
   248  			Advanced: true,
   249  		}, {
   250  			Name:    "tmp_upload_path",
   251  			Default: "",
   252  			Help: `Directory to keep temporary files until they are uploaded.
   253  
   254  This is the path where cache will use as a temporary storage for new
   255  files that need to be uploaded to the cloud provider.
   256  
   257  Specifying a value will enable this feature. Without it, it is
   258  completely disabled and files will be uploaded directly to the cloud
   259  provider`,
   260  			Advanced: true,
   261  		}, {
   262  			Name:    "tmp_wait_time",
   263  			Default: DefCacheTmpWaitTime,
   264  			Help: `How long should files be stored in local cache before being uploaded
   265  
   266  This is the duration that a file must wait in the temporary location
   267  _cache-tmp-upload-path_ before it is selected for upload.
   268  
   269  Note that only one file is uploaded at a time and it can take longer
   270  to start the upload if a queue formed for this purpose.`,
   271  			Advanced: true,
   272  		}, {
   273  			Name:    "db_wait_time",
   274  			Default: DefCacheDbWaitTime,
   275  			Help: `How long to wait for the DB to be available - 0 is unlimited
   276  
   277  Only one process can have the DB open at any one time, so rclone waits
   278  for this duration for the DB to become available before it gives an
   279  error.
   280  
   281  If you set it to 0 then it will wait forever.`,
   282  			Advanced: true,
   283  		}},
   284  	})
   285  }
   286  
   287  // Options defines the configuration for this backend
   288  type Options struct {
   289  	Remote             string        `config:"remote"`
   290  	PlexURL            string        `config:"plex_url"`
   291  	PlexUsername       string        `config:"plex_username"`
   292  	PlexPassword       string        `config:"plex_password"`
   293  	PlexToken          string        `config:"plex_token"`
   294  	PlexInsecure       bool          `config:"plex_insecure"`
   295  	ChunkSize          fs.SizeSuffix `config:"chunk_size"`
   296  	InfoAge            fs.Duration   `config:"info_age"`
   297  	ChunkTotalSize     fs.SizeSuffix `config:"chunk_total_size"`
   298  	DbPath             string        `config:"db_path"`
   299  	ChunkPath          string        `config:"chunk_path"`
   300  	DbPurge            bool          `config:"db_purge"`
   301  	ChunkCleanInterval fs.Duration   `config:"chunk_clean_interval"`
   302  	ReadRetries        int           `config:"read_retries"`
   303  	TotalWorkers       int           `config:"workers"`
   304  	ChunkNoMemory      bool          `config:"chunk_no_memory"`
   305  	Rps                int           `config:"rps"`
   306  	StoreWrites        bool          `config:"writes"`
   307  	TempWritePath      string        `config:"tmp_upload_path"`
   308  	TempWaitTime       fs.Duration   `config:"tmp_wait_time"`
   309  	DbWaitTime         fs.Duration   `config:"db_wait_time"`
   310  }
   311  
   312  // Fs represents a wrapped fs.Fs
   313  type Fs struct {
   314  	fs.Fs
   315  	wrapper fs.Fs
   316  
   317  	name     string
   318  	root     string
   319  	opt      Options      // parsed options
   320  	features *fs.Features // optional features
   321  	cache    *Persistent
   322  	tempFs   fs.Fs
   323  
   324  	lastChunkCleanup time.Time
   325  	cleanupMu        sync.Mutex
   326  	rateLimiter      *rate.Limiter
   327  	plexConnector    *plexConnector
   328  	backgroundRunner *backgroundWriter
   329  	cleanupChan      chan bool
   330  	parentsForgetFn  []func(string, fs.EntryType)
   331  	notifiedRemotes  map[string]bool
   332  	notifiedMu       sync.Mutex
   333  	parentsForgetMu  sync.Mutex
   334  }
   335  
   336  // parseRootPath returns a cleaned root path and a nil error or "" and an error when the path is invalid
   337  func parseRootPath(path string) (string, error) {
   338  	return strings.Trim(path, "/"), nil
   339  }
   340  
   341  // NewFs constructs a Fs from the path, container:path
   342  func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
   343  	// Parse config into Options struct
   344  	opt := new(Options)
   345  	err := configstruct.Set(m, opt)
   346  	if err != nil {
   347  		return nil, err
   348  	}
   349  	if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
   350  		return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
   351  			opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
   352  	}
   353  
   354  	if strings.HasPrefix(opt.Remote, name+":") {
   355  		return nil, errors.New("can't point cache remote at itself - check the value of the remote setting")
   356  	}
   357  
   358  	rpath, err := parseRootPath(rootPath)
   359  	if err != nil {
   360  		return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
   361  	}
   362  
   363  	wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
   364  	if err != nil {
   365  		return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
   366  	}
   367  
   368  	remotePath := fspath.JoinRootPath(wPath, rootPath)
   369  	wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
   370  	if wrapErr != nil && wrapErr != fs.ErrorIsFile {
   371  		return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
   372  	}
   373  	var fsErr error
   374  	fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
   375  	if wrapErr == fs.ErrorIsFile {
   376  		fsErr = fs.ErrorIsFile
   377  		rpath = cleanPath(path.Dir(rpath))
   378  	}
   379  	// configure cache backend
   380  	if opt.DbPurge {
   381  		fs.Debugf(name, "Purging the DB")
   382  	}
   383  	f := &Fs{
   384  		Fs:               wrappedFs,
   385  		name:             name,
   386  		root:             rpath,
   387  		opt:              *opt,
   388  		lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30),
   389  		cleanupChan:      make(chan bool, 1),
   390  		notifiedRemotes:  make(map[string]bool),
   391  	}
   392  	f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
   393  
   394  	f.plexConnector = &plexConnector{}
   395  	if opt.PlexURL != "" {
   396  		if opt.PlexToken != "" {
   397  			f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
   398  			if err != nil {
   399  				return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
   400  			}
   401  		} else {
   402  			if opt.PlexPassword != "" && opt.PlexUsername != "" {
   403  				decPass, err := obscure.Reveal(opt.PlexPassword)
   404  				if err != nil {
   405  					decPass = opt.PlexPassword
   406  				}
   407  				f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, opt.PlexInsecure, func(token string) {
   408  					m.Set("plex_token", token)
   409  				})
   410  				if err != nil {
   411  					return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
   412  				}
   413  			}
   414  		}
   415  	}
   416  
   417  	dbPath := f.opt.DbPath
   418  	chunkPath := f.opt.ChunkPath
   419  	// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
   420  	if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
   421  		chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
   422  		chunkPath = dbPath
   423  	}
   424  	if filepath.Ext(dbPath) != "" {
   425  		dbPath = filepath.Dir(dbPath)
   426  	}
   427  	if filepath.Ext(chunkPath) != "" {
   428  		chunkPath = filepath.Dir(chunkPath)
   429  	}
   430  	err = os.MkdirAll(dbPath, os.ModePerm)
   431  	if err != nil {
   432  		return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
   433  	}
   434  	err = os.MkdirAll(chunkPath, os.ModePerm)
   435  	if err != nil {
   436  		return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
   437  	}
   438  
   439  	dbPath = filepath.Join(dbPath, name+".db")
   440  	chunkPath = filepath.Join(chunkPath, name)
   441  	fs.Infof(name, "Cache DB path: %v", dbPath)
   442  	fs.Infof(name, "Cache chunk path: %v", chunkPath)
   443  	f.cache, err = GetPersistent(dbPath, chunkPath, &Features{
   444  		PurgeDb:    opt.DbPurge,
   445  		DbWaitTime: time.Duration(opt.DbWaitTime),
   446  	})
   447  	if err != nil {
   448  		return nil, errors.Wrapf(err, "failed to start cache db")
   449  	}
   450  	// Trap SIGINT and SIGTERM to close the DB handle gracefully
   451  	c := make(chan os.Signal, 1)
   452  	signal.Notify(c, syscall.SIGHUP)
   453  	atexit.Register(func() {
   454  		if opt.PlexURL != "" {
   455  			f.plexConnector.closeWebsocket()
   456  		}
   457  		f.StopBackgroundRunners()
   458  	})
   459  	go func() {
   460  		for {
   461  			s := <-c
   462  			if s == syscall.SIGHUP {
   463  				fs.Infof(f, "Clearing cache from signal")
   464  				f.DirCacheFlush()
   465  			}
   466  		}
   467  	}()
   468  
   469  	fs.Infof(name, "Chunk Memory: %v", !f.opt.ChunkNoMemory)
   470  	fs.Infof(name, "Chunk Size: %v", f.opt.ChunkSize)
   471  	fs.Infof(name, "Chunk Total Size: %v", f.opt.ChunkTotalSize)
   472  	fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
   473  	fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
   474  	fs.Infof(name, "File Age: %v", f.opt.InfoAge)
   475  	if f.opt.StoreWrites {
   476  		fs.Infof(name, "Cache Writes: enabled")
   477  	}
   478  
   479  	if f.opt.TempWritePath != "" {
   480  		err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
   481  		if err != nil {
   482  			return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
   483  		}
   484  		f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
   485  		f.tempFs, err = cache.Get(f.opt.TempWritePath)
   486  		if err != nil {
   487  			return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
   488  		}
   489  		fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
   490  		fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
   491  		f.backgroundRunner, _ = initBackgroundUploader(f)
   492  		go f.backgroundRunner.run()
   493  	}
   494  
   495  	go func() {
   496  		for {
   497  			time.Sleep(time.Duration(f.opt.ChunkCleanInterval))
   498  			select {
   499  			case <-f.cleanupChan:
   500  				fs.Infof(f, "stopping cleanup")
   501  				return
   502  			default:
   503  				fs.Debugf(f, "starting cleanup")
   504  				f.CleanUpCache(false)
   505  			}
   506  		}
   507  	}()
   508  
   509  	if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
   510  		pollInterval := make(chan time.Duration, 1)
   511  		pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
   512  		doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
   513  	}
   514  
   515  	f.features = (&fs.Features{
   516  		CanHaveEmptyDirectories: true,
   517  		DuplicateFiles:          false, // storage doesn't permit this
   518  	}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
   519  	// override only those features that use a temp fs and it doesn't support them
   520  	//f.features.ChangeNotify = f.ChangeNotify
   521  	if f.opt.TempWritePath != "" {
   522  		if f.tempFs.Features().Copy == nil {
   523  			f.features.Copy = nil
   524  		}
   525  		if f.tempFs.Features().Move == nil {
   526  			f.features.Move = nil
   527  		}
   528  		if f.tempFs.Features().Move == nil {
   529  			f.features.Move = nil
   530  		}
   531  		if f.tempFs.Features().DirMove == nil {
   532  			f.features.DirMove = nil
   533  		}
   534  		if f.tempFs.Features().MergeDirs == nil {
   535  			f.features.MergeDirs = nil
   536  		}
   537  	}
   538  	// even if the wrapped fs doesn't support it, we still want it
   539  	f.features.DirCacheFlush = f.DirCacheFlush
   540  
   541  	rc.Add(rc.Call{
   542  		Path:  "cache/expire",
   543  		Fn:    f.httpExpireRemote,
   544  		Title: "Purge a remote from cache",
   545  		Help: `
   546  Purge a remote from the cache backend. Supports either a directory or a file.
   547  Params:
   548    - remote = path to remote (required)
   549    - withData = true/false to delete cached data (chunks) as well (optional)
   550  
   551  Eg
   552  
   553      rclone rc cache/expire remote=path/to/sub/folder/
   554      rclone rc cache/expire remote=/ withData=true 
   555  `,
   556  	})
   557  
   558  	rc.Add(rc.Call{
   559  		Path:  "cache/stats",
   560  		Fn:    f.httpStats,
   561  		Title: "Get cache stats",
   562  		Help: `
   563  Show statistics for the cache remote.
   564  `,
   565  	})
   566  
   567  	rc.Add(rc.Call{
   568  		Path:  "cache/fetch",
   569  		Fn:    f.rcFetch,
   570  		Title: "Fetch file chunks",
   571  		Help: `
   572  Ensure the specified file chunks are cached on disk.
   573  
   574  The chunks= parameter specifies the file chunks to check.
   575  It takes a comma separated list of array slice indices.
   576  The slice indices are similar to Python slices: start[:end]
   577  
   578  start is the 0 based chunk number from the beginning of the file
   579  to fetch inclusive. end is 0 based chunk number from the beginning
   580  of the file to fetch exclusive.
   581  Both values can be negative, in which case they count from the back
   582  of the file. The value "-5:" represents the last 5 chunks of a file.
   583  
   584  Some valid examples are:
   585  ":5,-5:" -> the first and last five chunks
   586  "0,-2" -> the first and the second last chunk
   587  "0:10" -> the first ten chunks
   588  
   589  Any parameter with a key that starts with "file" can be used to
   590  specify files to fetch, eg
   591  
   592      rclone rc cache/fetch chunks=0 file=hello file2=home/goodbye
   593  
   594  File names will automatically be encrypted when the a crypt remote
   595  is used on top of the cache.
   596  
   597  `,
   598  	})
   599  
   600  	return f, fsErr
   601  }
   602  
   603  func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) {
   604  	out = make(rc.Params)
   605  	m, err := f.Stats()
   606  	if err != nil {
   607  		return out, errors.Errorf("error while getting cache stats")
   608  	}
   609  	out["status"] = "ok"
   610  	out["stats"] = m
   611  	return out, nil
   612  }
   613  
   614  func (f *Fs) unwrapRemote(remote string) string {
   615  	remote = cleanPath(remote)
   616  	if remote != "" {
   617  		// if it's wrapped by crypt we need to check what format we got
   618  		if cryptFs, yes := f.isWrappedByCrypt(); yes {
   619  			_, err := cryptFs.DecryptFileName(remote)
   620  			// if it failed to decrypt then it is a decrypted format and we need to encrypt it
   621  			if err != nil {
   622  				return cryptFs.EncryptFileName(remote)
   623  			}
   624  			// else it's an encrypted format and we can use it as it is
   625  		}
   626  	}
   627  	return remote
   628  }
   629  
   630  func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) {
   631  	out = make(rc.Params)
   632  	remoteInt, ok := in["remote"]
   633  	if !ok {
   634  		return out, errors.Errorf("remote is needed")
   635  	}
   636  	remote := remoteInt.(string)
   637  	withData := false
   638  	_, ok = in["withData"]
   639  	if ok {
   640  		withData = true
   641  	}
   642  
   643  	remote = f.unwrapRemote(remote)
   644  	if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
   645  		return out, errors.Errorf("%s doesn't exist in cache", remote)
   646  	}
   647  
   648  	co := NewObject(f, remote)
   649  	err = f.cache.GetObject(co)
   650  	if err != nil { // it could be a dir
   651  		cd := NewDirectory(f, remote)
   652  		err := f.cache.ExpireDir(cd)
   653  		if err != nil {
   654  			return out, errors.WithMessage(err, "error expiring directory")
   655  		}
   656  		// notify vfs too
   657  		f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
   658  		out["status"] = "ok"
   659  		out["message"] = fmt.Sprintf("cached directory cleared: %v", remote)
   660  		return out, nil
   661  	}
   662  	// expire the entry
   663  	err = f.cache.ExpireObject(co, withData)
   664  	if err != nil {
   665  		return out, errors.WithMessage(err, "error expiring file")
   666  	}
   667  	// notify vfs too
   668  	f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
   669  
   670  	out["status"] = "ok"
   671  	out["message"] = fmt.Sprintf("cached file cleared: %v", remote)
   672  	return out, nil
   673  }
   674  
   675  func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
   676  	type chunkRange struct {
   677  		start, end int64
   678  	}
   679  	parseChunks := func(ranges string) (crs []chunkRange, err error) {
   680  		for _, part := range strings.Split(ranges, ",") {
   681  			var start, end int64 = 0, math.MaxInt64
   682  			switch ints := strings.Split(part, ":"); len(ints) {
   683  			case 1:
   684  				start, err = strconv.ParseInt(ints[0], 10, 64)
   685  				if err != nil {
   686  					return nil, errors.Errorf("invalid range: %q", part)
   687  				}
   688  				end = start + 1
   689  			case 2:
   690  				if ints[0] != "" {
   691  					start, err = strconv.ParseInt(ints[0], 10, 64)
   692  					if err != nil {
   693  						return nil, errors.Errorf("invalid range: %q", part)
   694  					}
   695  				}
   696  				if ints[1] != "" {
   697  					end, err = strconv.ParseInt(ints[1], 10, 64)
   698  					if err != nil {
   699  						return nil, errors.Errorf("invalid range: %q", part)
   700  					}
   701  				}
   702  			default:
   703  				return nil, errors.Errorf("invalid range: %q", part)
   704  			}
   705  			crs = append(crs, chunkRange{start: start, end: end})
   706  		}
   707  		return
   708  	}
   709  	walkChunkRange := func(cr chunkRange, size int64, cb func(chunk int64)) {
   710  		if size <= 0 {
   711  			return
   712  		}
   713  		chunks := (size-1)/f.ChunkSize() + 1
   714  
   715  		start, end := cr.start, cr.end
   716  		if start < 0 {
   717  			start += chunks
   718  		}
   719  		if end <= 0 {
   720  			end += chunks
   721  		}
   722  		if end <= start {
   723  			return
   724  		}
   725  		switch {
   726  		case start < 0:
   727  			start = 0
   728  		case start >= chunks:
   729  			return
   730  		}
   731  		switch {
   732  		case end <= start:
   733  			end = start + 1
   734  		case end >= chunks:
   735  			end = chunks
   736  		}
   737  		for i := start; i < end; i++ {
   738  			cb(i)
   739  		}
   740  	}
   741  	walkChunkRanges := func(crs []chunkRange, size int64, cb func(chunk int64)) {
   742  		for _, cr := range crs {
   743  			walkChunkRange(cr, size, cb)
   744  		}
   745  	}
   746  
   747  	v, ok := in["chunks"]
   748  	if !ok {
   749  		return nil, errors.New("missing chunks parameter")
   750  	}
   751  	s, ok := v.(string)
   752  	if !ok {
   753  		return nil, errors.New("invalid chunks parameter")
   754  	}
   755  	delete(in, "chunks")
   756  	crs, err := parseChunks(s)
   757  	if err != nil {
   758  		return nil, errors.Wrap(err, "invalid chunks parameter")
   759  	}
   760  	var files [][2]string
   761  	for k, v := range in {
   762  		if !strings.HasPrefix(k, "file") {
   763  			return nil, errors.Errorf("invalid parameter %s=%s", k, v)
   764  		}
   765  		switch v := v.(type) {
   766  		case string:
   767  			files = append(files, [2]string{v, f.unwrapRemote(v)})
   768  		default:
   769  			return nil, errors.Errorf("invalid parameter %s=%s", k, v)
   770  		}
   771  	}
   772  	type fileStatus struct {
   773  		Error         string
   774  		FetchedChunks int
   775  	}
   776  	fetchedChunks := make(map[string]fileStatus, len(files))
   777  	for _, pair := range files {
   778  		file, remote := pair[0], pair[1]
   779  		var status fileStatus
   780  		o, err := f.NewObject(ctx, remote)
   781  		if err != nil {
   782  			fetchedChunks[file] = fileStatus{Error: err.Error()}
   783  			continue
   784  		}
   785  		co := o.(*Object)
   786  		err = co.refreshFromSource(ctx, true)
   787  		if err != nil {
   788  			fetchedChunks[file] = fileStatus{Error: err.Error()}
   789  			continue
   790  		}
   791  		handle := NewObjectHandle(ctx, co, f)
   792  		handle.UseMemory = false
   793  		handle.scaleWorkers(1)
   794  		walkChunkRanges(crs, co.Size(), func(chunk int64) {
   795  			_, err := handle.getChunk(chunk * f.ChunkSize())
   796  			if err != nil {
   797  				if status.Error == "" {
   798  					status.Error = err.Error()
   799  				}
   800  			} else {
   801  				status.FetchedChunks++
   802  			}
   803  		})
   804  		fetchedChunks[file] = status
   805  	}
   806  
   807  	return rc.Params{"status": fetchedChunks}, nil
   808  }
   809  
   810  // receiveChangeNotify is a wrapper to notifications sent from the wrapped FS about changed files
   811  func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) {
   812  	if crypt, yes := f.isWrappedByCrypt(); yes {
   813  		decryptedPath, err := crypt.DecryptFileName(forgetPath)
   814  		if err == nil {
   815  			fs.Infof(decryptedPath, "received cache expiry notification")
   816  		} else {
   817  			fs.Infof(forgetPath, "received cache expiry notification")
   818  		}
   819  	} else {
   820  		fs.Infof(forgetPath, "received cache expiry notification")
   821  	}
   822  	// notify upstreams too (vfs)
   823  	f.notifyChangeUpstream(forgetPath, entryType)
   824  
   825  	var cd *Directory
   826  	if entryType == fs.EntryObject {
   827  		co := NewObject(f, forgetPath)
   828  		err := f.cache.GetObject(co)
   829  		if err != nil {
   830  			fs.Debugf(f, "got change notification for non cached entry %v", co)
   831  		}
   832  		err = f.cache.ExpireObject(co, true)
   833  		if err != nil {
   834  			fs.Debugf(forgetPath, "notify: error expiring '%v': %v", co, err)
   835  		}
   836  		cd = NewDirectory(f, cleanPath(path.Dir(co.Remote())))
   837  	} else {
   838  		cd = NewDirectory(f, forgetPath)
   839  	}
   840  	// we expire the dir
   841  	err := f.cache.ExpireDir(cd)
   842  	if err != nil {
   843  		fs.Debugf(forgetPath, "notify: error expiring '%v': %v", cd, err)
   844  	} else {
   845  		fs.Debugf(forgetPath, "notify: expired '%v'", cd)
   846  	}
   847  
   848  	f.notifiedMu.Lock()
   849  	defer f.notifiedMu.Unlock()
   850  	f.notifiedRemotes[forgetPath] = true
   851  	f.notifiedRemotes[cd.Remote()] = true
   852  }
   853  
   854  // notifyChangeUpstreamIfNeeded will check if the wrapped remote doesn't notify on changes
   855  // or if we use a temp fs
   856  func (f *Fs) notifyChangeUpstreamIfNeeded(remote string, entryType fs.EntryType) {
   857  	if f.Fs.Features().ChangeNotify == nil || f.opt.TempWritePath != "" {
   858  		f.notifyChangeUpstream(remote, entryType)
   859  	}
   860  }
   861  
   862  // notifyChangeUpstream will loop through all the upstreams and notify
   863  // of the provided remote (should be only a dir)
   864  func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
   865  	f.parentsForgetMu.Lock()
   866  	defer f.parentsForgetMu.Unlock()
   867  	if len(f.parentsForgetFn) > 0 {
   868  		for _, fn := range f.parentsForgetFn {
   869  			fn(remote, entryType)
   870  		}
   871  	}
   872  }
   873  
   874  // ChangeNotify can subscribe multiple callers
   875  // this is coupled with the wrapped fs ChangeNotify (if it supports it)
   876  // and also notifies other caches (i.e VFS) to clear out whenever something changes
   877  func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
   878  	f.parentsForgetMu.Lock()
   879  	defer f.parentsForgetMu.Unlock()
   880  	fs.Debugf(f, "subscribing to ChangeNotify")
   881  	f.parentsForgetFn = append(f.parentsForgetFn, notifyFunc)
   882  	go func() {
   883  		for range pollInterval {
   884  		}
   885  	}()
   886  }
   887  
   888  // Name of the remote (as passed into NewFs)
   889  func (f *Fs) Name() string {
   890  	return f.name
   891  }
   892  
   893  // Root of the remote (as passed into NewFs)
   894  func (f *Fs) Root() string {
   895  	return f.root
   896  }
   897  
   898  // Features returns the optional features of this Fs
   899  func (f *Fs) Features() *fs.Features {
   900  	return f.features
   901  }
   902  
   903  // String returns a description of the FS
   904  func (f *Fs) String() string {
   905  	return fmt.Sprintf("Cache remote %s:%s", f.name, f.root)
   906  }
   907  
   908  // ChunkSize returns the configured chunk size
   909  func (f *Fs) ChunkSize() int64 {
   910  	return int64(f.opt.ChunkSize)
   911  }
   912  
   913  // InfoAge returns the configured file age
   914  func (f *Fs) InfoAge() time.Duration {
   915  	return time.Duration(f.opt.InfoAge)
   916  }
   917  
   918  // TempUploadWaitTime returns the configured temp file upload wait time
   919  func (f *Fs) TempUploadWaitTime() time.Duration {
   920  	return time.Duration(f.opt.TempWaitTime)
   921  }
   922  
   923  // NewObject finds the Object at remote.
   924  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   925  	var err error
   926  
   927  	fs.Debugf(f, "new object '%s'", remote)
   928  	co := NewObject(f, remote)
   929  	// search for entry in cache and validate it
   930  	err = f.cache.GetObject(co)
   931  	if err != nil {
   932  		fs.Debugf(remote, "find: error: %v", err)
   933  	} else if time.Now().After(co.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
   934  		fs.Debugf(co, "find: cold object: %+v", co)
   935  	} else {
   936  		fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(time.Duration(f.opt.InfoAge)))
   937  		return co, nil
   938  	}
   939  
   940  	// search for entry in source or temp fs
   941  	var obj fs.Object
   942  	if f.opt.TempWritePath != "" {
   943  		obj, err = f.tempFs.NewObject(ctx, remote)
   944  		// not found in temp fs
   945  		if err != nil {
   946  			fs.Debugf(remote, "find: not found in local cache fs")
   947  			obj, err = f.Fs.NewObject(ctx, remote)
   948  		} else {
   949  			fs.Debugf(obj, "find: found in local cache fs")
   950  		}
   951  	} else {
   952  		obj, err = f.Fs.NewObject(ctx, remote)
   953  	}
   954  
   955  	// not found in either fs
   956  	if err != nil {
   957  		fs.Debugf(obj, "find failed: not found in either local or remote fs")
   958  		return nil, err
   959  	}
   960  
   961  	// cache the new entry
   962  	co = ObjectFromOriginal(ctx, f, obj).persist()
   963  	fs.Debugf(co, "find: cached object")
   964  	return co, nil
   965  }
   966  
   967  // List the objects and directories in dir into entries
   968  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   969  	fs.Debugf(f, "list '%s'", dir)
   970  	cd := ShallowDirectory(f, dir)
   971  
   972  	// search for cached dir entries and validate them
   973  	entries, err = f.cache.GetDirEntries(cd)
   974  	if err != nil {
   975  		fs.Debugf(dir, "list: error: %v", err)
   976  	} else if time.Now().After(cd.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
   977  		fs.Debugf(dir, "list: cold listing: %v", cd.CacheTs)
   978  	} else if len(entries) == 0 {
   979  		// TODO: read empty dirs from source?
   980  		fs.Debugf(dir, "list: empty listing")
   981  	} else {
   982  		fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(time.Duration(f.opt.InfoAge)))
   983  		fs.Debugf(dir, "list: cached entries: %v", entries)
   984  		return entries, nil
   985  	}
   986  
   987  	// we first search any temporary files stored locally
   988  	var cachedEntries fs.DirEntries
   989  	if f.opt.TempWritePath != "" {
   990  		queuedEntries, err := f.cache.searchPendingUploadFromDir(cd.abs())
   991  		if err != nil {
   992  			fs.Errorf(dir, "list: error getting pending uploads: %v", err)
   993  		} else {
   994  			fs.Debugf(dir, "list: read %v from temp fs", len(queuedEntries))
   995  			fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
   996  
   997  			for _, queuedRemote := range queuedEntries {
   998  				queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote))
   999  				if err != nil {
  1000  					fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
  1001  					continue
  1002  				}
  1003  				co := ObjectFromOriginal(ctx, f, queuedEntry).persist()
  1004  				fs.Debugf(co, "list: cached temp object")
  1005  				cachedEntries = append(cachedEntries, co)
  1006  			}
  1007  		}
  1008  	}
  1009  
  1010  	// search from the source
  1011  	sourceEntries, err := f.Fs.List(ctx, dir)
  1012  	if err != nil {
  1013  		return nil, err
  1014  	}
  1015  	fs.Debugf(dir, "list: read %v from source", len(sourceEntries))
  1016  	fs.Debugf(dir, "list: source entries: %v", sourceEntries)
  1017  
  1018  	sort.Sort(sourceEntries)
  1019  	for _, entry := range entries {
  1020  		entryRemote := entry.Remote()
  1021  		i := sort.Search(len(sourceEntries), func(i int) bool { return sourceEntries[i].Remote() >= entryRemote })
  1022  		if i < len(sourceEntries) && sourceEntries[i].Remote() == entryRemote {
  1023  			continue
  1024  		}
  1025  		fp := path.Join(f.Root(), entryRemote)
  1026  		switch entry.(type) {
  1027  		case fs.Object:
  1028  			_ = f.cache.RemoveObject(fp)
  1029  		case fs.Directory:
  1030  			_ = f.cache.RemoveDir(fp)
  1031  		}
  1032  		fs.Debugf(dir, "list: remove entry: %v", entryRemote)
  1033  	}
  1034  	entries = nil
  1035  
  1036  	// and then iterate over the ones from source (temp Objects will override source ones)
  1037  	var batchDirectories []*Directory
  1038  	sort.Sort(cachedEntries)
  1039  	tmpCnt := len(cachedEntries)
  1040  	for _, entry := range sourceEntries {
  1041  		switch o := entry.(type) {
  1042  		case fs.Object:
  1043  			// skip over temporary objects (might be uploading)
  1044  			oRemote := o.Remote()
  1045  			i := sort.Search(tmpCnt, func(i int) bool { return cachedEntries[i].Remote() >= oRemote })
  1046  			if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
  1047  				continue
  1048  			}
  1049  			co := ObjectFromOriginal(ctx, f, o).persist()
  1050  			cachedEntries = append(cachedEntries, co)
  1051  			fs.Debugf(dir, "list: cached object: %v", co)
  1052  		case fs.Directory:
  1053  			cdd := DirectoryFromOriginal(ctx, f, o)
  1054  			// check if the dir isn't expired and add it in cache if it isn't
  1055  			if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
  1056  				batchDirectories = append(batchDirectories, cdd)
  1057  			}
  1058  			cachedEntries = append(cachedEntries, cdd)
  1059  		default:
  1060  			fs.Debugf(entry, "list: Unknown object type %T", entry)
  1061  		}
  1062  	}
  1063  	err = f.cache.AddBatchDir(batchDirectories)
  1064  	if err != nil {
  1065  		fs.Errorf(dir, "list: error caching directories from listing %v", dir)
  1066  	} else {
  1067  		fs.Debugf(dir, "list: cached directories: %v", len(batchDirectories))
  1068  	}
  1069  
  1070  	// cache dir meta
  1071  	t := time.Now()
  1072  	cd.CacheTs = &t
  1073  	err = f.cache.AddDir(cd)
  1074  	if err != nil {
  1075  		fs.Errorf(cd, "list: save error: '%v'", err)
  1076  	} else {
  1077  		fs.Debugf(dir, "list: cached dir: '%v', cache ts: %v", cd.abs(), cd.CacheTs)
  1078  	}
  1079  
  1080  	return cachedEntries, nil
  1081  }
  1082  
  1083  func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
  1084  	entries, err := f.List(ctx, dir)
  1085  	if err != nil {
  1086  		return err
  1087  	}
  1088  
  1089  	for i := 0; i < len(entries); i++ {
  1090  		innerDir, ok := entries[i].(fs.Directory)
  1091  		if ok {
  1092  			err := f.recurse(ctx, innerDir.Remote(), list)
  1093  			if err != nil {
  1094  				return err
  1095  			}
  1096  		}
  1097  
  1098  		err := list.Add(entries[i])
  1099  		if err != nil {
  1100  			return err
  1101  		}
  1102  	}
  1103  
  1104  	return nil
  1105  }
  1106  
  1107  // ListR lists the objects and directories of the Fs starting
  1108  // from dir recursively into out.
  1109  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
  1110  	fs.Debugf(f, "list recursively from '%s'", dir)
  1111  
  1112  	// we check if the source FS supports ListR
  1113  	// if it does, we'll use that to get all the entries, cache them and return
  1114  	do := f.Fs.Features().ListR
  1115  	if do != nil {
  1116  		return do(ctx, dir, func(entries fs.DirEntries) error {
  1117  			// we got called back with a set of entries so let's cache them and call the original callback
  1118  			for _, entry := range entries {
  1119  				switch o := entry.(type) {
  1120  				case fs.Object:
  1121  					_ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o))
  1122  				case fs.Directory:
  1123  					_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
  1124  				default:
  1125  					return errors.Errorf("Unknown object type %T", entry)
  1126  				}
  1127  			}
  1128  
  1129  			// call the original callback
  1130  			return callback(entries)
  1131  		})
  1132  	}
  1133  
  1134  	// if we're here, we're gonna do a standard recursive traversal and cache everything
  1135  	list := walk.NewListRHelper(callback)
  1136  	err = f.recurse(ctx, dir, list)
  1137  	if err != nil {
  1138  		return err
  1139  	}
  1140  
  1141  	return list.Flush()
  1142  }
  1143  
  1144  // Mkdir makes the directory (container, bucket)
  1145  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  1146  	fs.Debugf(f, "mkdir '%s'", dir)
  1147  	err := f.Fs.Mkdir(ctx, dir)
  1148  	if err != nil {
  1149  		return err
  1150  	}
  1151  	fs.Debugf(dir, "mkdir: created dir in source fs")
  1152  
  1153  	cd := NewDirectory(f, cleanPath(dir))
  1154  	err = f.cache.AddDir(cd)
  1155  	if err != nil {
  1156  		fs.Errorf(dir, "mkdir: add error: %v", err)
  1157  	} else {
  1158  		fs.Debugf(cd, "mkdir: added to cache")
  1159  	}
  1160  	// expire parent of new dir
  1161  	parentCd := NewDirectory(f, cleanPath(path.Dir(dir)))
  1162  	err = f.cache.ExpireDir(parentCd)
  1163  	if err != nil {
  1164  		fs.Errorf(parentCd, "mkdir: cache expire error: %v", err)
  1165  	} else {
  1166  		fs.Infof(parentCd, "mkdir: cache expired")
  1167  	}
  1168  	// advertise to ChangeNotify if wrapped doesn't do that
  1169  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1170  
  1171  	return nil
  1172  }
  1173  
  1174  // Rmdir removes the directory (container, bucket) if empty
  1175  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1176  	fs.Debugf(f, "rmdir '%s'", dir)
  1177  
  1178  	if f.opt.TempWritePath != "" {
  1179  		// pause background uploads
  1180  		f.backgroundRunner.pause()
  1181  		defer f.backgroundRunner.play()
  1182  
  1183  		// we check if the source exists on the remote and make the same move on it too if it does
  1184  		// otherwise, we skip this step
  1185  		_, err := f.UnWrap().List(ctx, dir)
  1186  		if err == nil {
  1187  			err := f.Fs.Rmdir(ctx, dir)
  1188  			if err != nil {
  1189  				return err
  1190  			}
  1191  			fs.Debugf(dir, "rmdir: removed dir in source fs")
  1192  		}
  1193  
  1194  		var queuedEntries []*Object
  1195  		err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
  1196  			for _, o := range entries {
  1197  				if oo, ok := o.(fs.Object); ok {
  1198  					co := ObjectFromOriginal(ctx, f, oo)
  1199  					queuedEntries = append(queuedEntries, co)
  1200  				}
  1201  			}
  1202  			return nil
  1203  		})
  1204  		if err != nil {
  1205  			fs.Errorf(dir, "rmdir: error getting pending uploads: %v", err)
  1206  		} else {
  1207  			fs.Debugf(dir, "rmdir: read %v from temp fs", len(queuedEntries))
  1208  			fs.Debugf(dir, "rmdir: temp fs entries: %v", queuedEntries)
  1209  			if len(queuedEntries) > 0 {
  1210  				fs.Errorf(dir, "rmdir: temporary dir not empty: %v", queuedEntries)
  1211  				return fs.ErrorDirectoryNotEmpty
  1212  			}
  1213  		}
  1214  	} else {
  1215  		err := f.Fs.Rmdir(ctx, dir)
  1216  		if err != nil {
  1217  			return err
  1218  		}
  1219  		fs.Debugf(dir, "rmdir: removed dir in source fs")
  1220  	}
  1221  
  1222  	// remove dir data
  1223  	d := NewDirectory(f, dir)
  1224  	err := f.cache.RemoveDir(d.abs())
  1225  	if err != nil {
  1226  		fs.Errorf(dir, "rmdir: remove error: %v", err)
  1227  	} else {
  1228  		fs.Debugf(d, "rmdir: removed from cache")
  1229  	}
  1230  	// expire parent
  1231  	parentCd := NewDirectory(f, cleanPath(path.Dir(dir)))
  1232  	err = f.cache.ExpireDir(parentCd)
  1233  	if err != nil {
  1234  		fs.Errorf(dir, "rmdir: cache expire error: %v", err)
  1235  	} else {
  1236  		fs.Infof(parentCd, "rmdir: cache expired")
  1237  	}
  1238  	// advertise to ChangeNotify if wrapped doesn't do that
  1239  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1240  
  1241  	return nil
  1242  }
  1243  
  1244  // DirMove moves src, srcRemote to this remote at dstRemote
  1245  // using server side move operations.
  1246  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  1247  	fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
  1248  
  1249  	do := f.Fs.Features().DirMove
  1250  	if do == nil {
  1251  		return fs.ErrorCantDirMove
  1252  	}
  1253  	srcFs, ok := src.(*Fs)
  1254  	if !ok {
  1255  		fs.Errorf(srcFs, "can't move directory - not same remote type")
  1256  		return fs.ErrorCantDirMove
  1257  	}
  1258  	if srcFs.Fs.Name() != f.Fs.Name() {
  1259  		fs.Errorf(srcFs, "can't move directory - not wrapping same remotes")
  1260  		return fs.ErrorCantDirMove
  1261  	}
  1262  
  1263  	if f.opt.TempWritePath != "" {
  1264  		// pause background uploads
  1265  		f.backgroundRunner.pause()
  1266  		defer f.backgroundRunner.play()
  1267  
  1268  		_, errInWrap := srcFs.UnWrap().List(ctx, srcRemote)
  1269  		_, errInTemp := f.tempFs.List(ctx, srcRemote)
  1270  		// not found in either fs
  1271  		if errInWrap != nil && errInTemp != nil {
  1272  			return fs.ErrorDirNotFound
  1273  		}
  1274  
  1275  		// we check if the source exists on the remote and make the same move on it too if it does
  1276  		// otherwise, we skip this step
  1277  		if errInWrap == nil {
  1278  			err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
  1279  			if err != nil {
  1280  				return err
  1281  			}
  1282  			fs.Debugf(srcRemote, "movedir: dir moved in the source fs")
  1283  		}
  1284  		// we need to check if the directory exists in the temp fs
  1285  		// and skip the move if it doesn't
  1286  		if errInTemp != nil {
  1287  			goto cleanup
  1288  		}
  1289  
  1290  		var queuedEntries []*Object
  1291  		err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
  1292  			for _, o := range entries {
  1293  				if oo, ok := o.(fs.Object); ok {
  1294  					co := ObjectFromOriginal(ctx, f, oo)
  1295  					queuedEntries = append(queuedEntries, co)
  1296  					if co.tempFileStartedUpload() {
  1297  						fs.Errorf(co, "can't move - upload has already started. need to finish that")
  1298  						return fs.ErrorCantDirMove
  1299  					}
  1300  				}
  1301  			}
  1302  			return nil
  1303  		})
  1304  		if err != nil {
  1305  			return err
  1306  		}
  1307  		fs.Debugf(srcRemote, "dirmove: read %v from temp fs", len(queuedEntries))
  1308  		fs.Debugf(srcRemote, "dirmove: temp fs entries: %v", queuedEntries)
  1309  
  1310  		do := f.tempFs.Features().DirMove
  1311  		if do == nil {
  1312  			fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
  1313  			return fs.ErrorCantDirMove
  1314  		}
  1315  		err = do(ctx, f.tempFs, srcRemote, dstRemote)
  1316  		if err != nil {
  1317  			return err
  1318  		}
  1319  		err = f.cache.ReconcileTempUploads(ctx, f)
  1320  		if err != nil {
  1321  			return err
  1322  		}
  1323  	} else {
  1324  		err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
  1325  		if err != nil {
  1326  			return err
  1327  		}
  1328  		fs.Debugf(srcRemote, "movedir: dir moved in the source fs")
  1329  	}
  1330  cleanup:
  1331  
  1332  	// delete src dir from cache along with all chunks
  1333  	srcDir := NewDirectory(srcFs, srcRemote)
  1334  	err := f.cache.RemoveDir(srcDir.abs())
  1335  	if err != nil {
  1336  		fs.Errorf(srcDir, "dirmove: remove error: %v", err)
  1337  	} else {
  1338  		fs.Debugf(srcDir, "dirmove: removed cached dir")
  1339  	}
  1340  	// expire src parent
  1341  	srcParent := NewDirectory(f, cleanPath(path.Dir(srcRemote)))
  1342  	err = f.cache.ExpireDir(srcParent)
  1343  	if err != nil {
  1344  		fs.Errorf(srcParent, "dirmove: cache expire error: %v", err)
  1345  	} else {
  1346  		fs.Debugf(srcParent, "dirmove: cache expired")
  1347  	}
  1348  	// advertise to ChangeNotify if wrapped doesn't do that
  1349  	f.notifyChangeUpstreamIfNeeded(srcParent.Remote(), fs.EntryDirectory)
  1350  
  1351  	// expire parent dir at the destination path
  1352  	dstParent := NewDirectory(f, cleanPath(path.Dir(dstRemote)))
  1353  	err = f.cache.ExpireDir(dstParent)
  1354  	if err != nil {
  1355  		fs.Errorf(dstParent, "dirmove: cache expire error: %v", err)
  1356  	} else {
  1357  		fs.Debugf(dstParent, "dirmove: cache expired")
  1358  	}
  1359  	// advertise to ChangeNotify if wrapped doesn't do that
  1360  	f.notifyChangeUpstreamIfNeeded(dstParent.Remote(), fs.EntryDirectory)
  1361  	// TODO: precache dst dir and save the chunks
  1362  
  1363  	return nil
  1364  }
  1365  
  1366  // cacheReader will split the stream of a reader to be cached at the same time it is read by the original source
  1367  func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn io.Reader)) {
  1368  	// create the pipe and tee reader
  1369  	pr, pw := io.Pipe()
  1370  	tr := io.TeeReader(u, pw)
  1371  
  1372  	// create channel to synchronize
  1373  	done := make(chan bool)
  1374  	defer close(done)
  1375  
  1376  	go func() {
  1377  		// notify the cache reader that we're complete after the source FS finishes
  1378  		defer func() {
  1379  			_ = pw.Close()
  1380  		}()
  1381  		// process original reading
  1382  		originalRead(tr)
  1383  		// signal complete
  1384  		done <- true
  1385  	}()
  1386  
  1387  	go func() {
  1388  		var offset int64
  1389  		for {
  1390  			chunk := make([]byte, f.opt.ChunkSize)
  1391  			readSize, err := io.ReadFull(pr, chunk)
  1392  			// we ignore 3 failures which are ok:
  1393  			// 1. EOF - original reading finished and we got a full buffer too
  1394  			// 2. ErrUnexpectedEOF - original reading finished and partial buffer
  1395  			// 3. ErrClosedPipe - source remote reader was closed (usually means it reached the end) and we need to stop too
  1396  			// if we have a different error: we're going to error out the original reading too and stop this
  1397  			if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF && err != io.ErrClosedPipe {
  1398  				fs.Errorf(src, "error saving new data in cache. offset: %v, err: %v", offset, err)
  1399  				_ = pr.CloseWithError(err)
  1400  				break
  1401  			}
  1402  			// if we have some bytes we cache them
  1403  			if readSize > 0 {
  1404  				chunk = chunk[:readSize]
  1405  				err2 := f.cache.AddChunk(cleanPath(path.Join(f.root, src.Remote())), chunk, offset)
  1406  				if err2 != nil {
  1407  					fs.Errorf(src, "error saving new data in cache '%v'", err2)
  1408  					_ = pr.CloseWithError(err2)
  1409  					break
  1410  				}
  1411  				offset += int64(readSize)
  1412  			}
  1413  			// stuff should be closed but let's be sure
  1414  			if err == io.EOF || err == io.ErrUnexpectedEOF || err == io.ErrClosedPipe {
  1415  				_ = pr.Close()
  1416  				break
  1417  			}
  1418  		}
  1419  
  1420  		// signal complete
  1421  		done <- true
  1422  	}()
  1423  
  1424  	// wait until both are done
  1425  	for c := 0; c < 2; c++ {
  1426  		<-done
  1427  	}
  1428  }
  1429  
  1430  type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
  1431  
  1432  // put in to the remote path
  1433  func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
  1434  	var err error
  1435  	var obj fs.Object
  1436  
  1437  	// queue for upload and store in temp fs if configured
  1438  	if f.opt.TempWritePath != "" {
  1439  		// we need to clear the caches before a put through temp fs
  1440  		parentCd := NewDirectory(f, cleanPath(path.Dir(src.Remote())))
  1441  		_ = f.cache.ExpireDir(parentCd)
  1442  		f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1443  
  1444  		obj, err = f.tempFs.Put(ctx, in, src, options...)
  1445  		if err != nil {
  1446  			fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
  1447  			return nil, err
  1448  		}
  1449  		fs.Infof(obj, "put: uploaded in temp fs")
  1450  		err = f.cache.addPendingUpload(path.Join(f.Root(), src.Remote()), false)
  1451  		if err != nil {
  1452  			fs.Errorf(obj, "put: failed to queue for upload: %v", err)
  1453  			return nil, err
  1454  		}
  1455  		fs.Infof(obj, "put: queued for upload")
  1456  		// if cache writes is enabled write it first through cache
  1457  	} else if f.opt.StoreWrites {
  1458  		f.cacheReader(in, src, func(inn io.Reader) {
  1459  			obj, err = put(ctx, inn, src, options...)
  1460  		})
  1461  		if err == nil {
  1462  			fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
  1463  		}
  1464  		// last option: save it directly in remote fs
  1465  	} else {
  1466  		obj, err = put(ctx, in, src, options...)
  1467  		if err == nil {
  1468  			fs.Debugf(obj, "put: uploaded to remote fs")
  1469  		}
  1470  	}
  1471  	// validate and stop if errors are found
  1472  	if err != nil {
  1473  		fs.Errorf(src, "put: error uploading: %v", err)
  1474  		return nil, err
  1475  	}
  1476  
  1477  	// cache the new file
  1478  	cachedObj := ObjectFromOriginal(ctx, f, obj)
  1479  
  1480  	// deleting cached chunks and info to be replaced with new ones
  1481  	_ = f.cache.RemoveObject(cachedObj.abs())
  1482  
  1483  	cachedObj.persist()
  1484  	fs.Debugf(cachedObj, "put: added to cache")
  1485  
  1486  	// expire parent
  1487  	parentCd := NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
  1488  	err = f.cache.ExpireDir(parentCd)
  1489  	if err != nil {
  1490  		fs.Errorf(cachedObj, "put: cache expire error: %v", err)
  1491  	} else {
  1492  		fs.Infof(parentCd, "put: cache expired")
  1493  	}
  1494  	// advertise to ChangeNotify
  1495  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1496  
  1497  	return cachedObj, nil
  1498  }
  1499  
  1500  // Put in to the remote path with the modTime given of the given size
  1501  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1502  	fs.Debugf(f, "put data at '%s'", src.Remote())
  1503  	return f.put(ctx, in, src, options, f.Fs.Put)
  1504  }
  1505  
  1506  // PutUnchecked uploads the object
  1507  func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1508  	do := f.Fs.Features().PutUnchecked
  1509  	if do == nil {
  1510  		return nil, errors.New("can't PutUnchecked")
  1511  	}
  1512  	fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
  1513  	return f.put(ctx, in, src, options, do)
  1514  }
  1515  
  1516  // PutStream uploads the object
  1517  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1518  	do := f.Fs.Features().PutStream
  1519  	if do == nil {
  1520  		return nil, errors.New("can't PutStream")
  1521  	}
  1522  	fs.Debugf(f, "put data streaming in '%s'", src.Remote())
  1523  	return f.put(ctx, in, src, options, do)
  1524  }
  1525  
  1526  // Copy src to this remote using server side copy operations.
  1527  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1528  	fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
  1529  
  1530  	do := f.Fs.Features().Copy
  1531  	if do == nil {
  1532  		fs.Errorf(src, "source remote (%v) doesn't support Copy", src.Fs())
  1533  		return nil, fs.ErrorCantCopy
  1534  	}
  1535  	// the source must be a cached object or we abort
  1536  	srcObj, ok := src.(*Object)
  1537  	if !ok {
  1538  		fs.Errorf(srcObj, "can't copy - not same remote type")
  1539  		return nil, fs.ErrorCantCopy
  1540  	}
  1541  	// both the source cache fs and this cache fs need to wrap the same remote
  1542  	if srcObj.CacheFs.Fs.Name() != f.Fs.Name() {
  1543  		fs.Errorf(srcObj, "can't copy - not wrapping same remotes")
  1544  		return nil, fs.ErrorCantCopy
  1545  	}
  1546  	// refresh from source or abort
  1547  	if err := srcObj.refreshFromSource(ctx, false); err != nil {
  1548  		fs.Errorf(f, "can't copy %v - %v", src, err)
  1549  		return nil, fs.ErrorCantCopy
  1550  	}
  1551  
  1552  	if srcObj.isTempFile() {
  1553  		// we check if the feature is still active
  1554  		if f.opt.TempWritePath == "" {
  1555  			fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
  1556  			return nil, fs.ErrorCantCopy
  1557  		}
  1558  
  1559  		do = srcObj.ParentFs.Features().Copy
  1560  		if do == nil {
  1561  			fs.Errorf(src, "parent remote (%v) doesn't support Copy", srcObj.ParentFs)
  1562  			return nil, fs.ErrorCantCopy
  1563  		}
  1564  	}
  1565  
  1566  	obj, err := do(ctx, srcObj.Object, remote)
  1567  	if err != nil {
  1568  		fs.Errorf(srcObj, "error moving in cache: %v", err)
  1569  		return nil, err
  1570  	}
  1571  	fs.Debugf(obj, "copy: file copied")
  1572  
  1573  	// persist new
  1574  	co := ObjectFromOriginal(ctx, f, obj).persist()
  1575  	fs.Debugf(co, "copy: added to cache")
  1576  	// expire the destination path
  1577  	parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
  1578  	err = f.cache.ExpireDir(parentCd)
  1579  	if err != nil {
  1580  		fs.Errorf(parentCd, "copy: cache expire error: %v", err)
  1581  	} else {
  1582  		fs.Infof(parentCd, "copy: cache expired")
  1583  	}
  1584  	// advertise to ChangeNotify if wrapped doesn't do that
  1585  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1586  	// expire src parent
  1587  	srcParent := NewDirectory(f, cleanPath(path.Dir(src.Remote())))
  1588  	err = f.cache.ExpireDir(srcParent)
  1589  	if err != nil {
  1590  		fs.Errorf(srcParent, "copy: cache expire error: %v", err)
  1591  	} else {
  1592  		fs.Infof(srcParent, "copy: cache expired")
  1593  	}
  1594  	// advertise to ChangeNotify if wrapped doesn't do that
  1595  	f.notifyChangeUpstreamIfNeeded(srcParent.Remote(), fs.EntryDirectory)
  1596  
  1597  	return co, nil
  1598  }
  1599  
  1600  // Move src to this remote using server side move operations.
  1601  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1602  	fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
  1603  
  1604  	// if source fs doesn't support move abort
  1605  	do := f.Fs.Features().Move
  1606  	if do == nil {
  1607  		fs.Errorf(src, "source remote (%v) doesn't support Move", src.Fs())
  1608  		return nil, fs.ErrorCantMove
  1609  	}
  1610  	// the source must be a cached object or we abort
  1611  	srcObj, ok := src.(*Object)
  1612  	if !ok {
  1613  		fs.Errorf(srcObj, "can't move - not same remote type")
  1614  		return nil, fs.ErrorCantMove
  1615  	}
  1616  	// both the source cache fs and this cache fs need to wrap the same remote
  1617  	if srcObj.CacheFs.Fs.Name() != f.Fs.Name() {
  1618  		fs.Errorf(srcObj, "can't move - not wrapping same remote types")
  1619  		return nil, fs.ErrorCantMove
  1620  	}
  1621  	// refresh from source or abort
  1622  	if err := srcObj.refreshFromSource(ctx, false); err != nil {
  1623  		fs.Errorf(f, "can't move %v - %v", src, err)
  1624  		return nil, fs.ErrorCantMove
  1625  	}
  1626  
  1627  	// if this is a temp object then we perform the changes locally
  1628  	if srcObj.isTempFile() {
  1629  		// we check if the feature is still active
  1630  		if f.opt.TempWritePath == "" {
  1631  			fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
  1632  			return nil, fs.ErrorCantMove
  1633  		}
  1634  		// pause background uploads
  1635  		f.backgroundRunner.pause()
  1636  		defer f.backgroundRunner.play()
  1637  
  1638  		// started uploads can't be moved until they complete
  1639  		if srcObj.tempFileStartedUpload() {
  1640  			fs.Errorf(srcObj, "can't move - upload has already started. need to finish that")
  1641  			return nil, fs.ErrorCantMove
  1642  		}
  1643  		do = f.tempFs.Features().Move
  1644  
  1645  		// we must also update the pending queue
  1646  		err := f.cache.updatePendingUpload(srcObj.abs(), func(item *tempUploadInfo) error {
  1647  			item.DestPath = path.Join(f.Root(), remote)
  1648  			item.AddedOn = time.Now()
  1649  			return nil
  1650  		})
  1651  		if err != nil {
  1652  			fs.Errorf(srcObj, "failed to rename queued file for upload: %v", err)
  1653  			return nil, fs.ErrorCantMove
  1654  		}
  1655  		fs.Debugf(srcObj, "move: queued file moved to %v", remote)
  1656  	}
  1657  
  1658  	obj, err := do(ctx, srcObj.Object, remote)
  1659  	if err != nil {
  1660  		fs.Errorf(srcObj, "error moving: %v", err)
  1661  		return nil, err
  1662  	}
  1663  	fs.Debugf(obj, "move: file moved")
  1664  
  1665  	// remove old
  1666  	err = f.cache.RemoveObject(srcObj.abs())
  1667  	if err != nil {
  1668  		fs.Errorf(srcObj, "move: remove error: %v", err)
  1669  	} else {
  1670  		fs.Debugf(srcObj, "move: removed from cache")
  1671  	}
  1672  	// expire old parent
  1673  	parentCd := NewDirectory(f, cleanPath(path.Dir(srcObj.Remote())))
  1674  	err = f.cache.ExpireDir(parentCd)
  1675  	if err != nil {
  1676  		fs.Errorf(parentCd, "move: parent cache expire error: %v", err)
  1677  	} else {
  1678  		fs.Infof(parentCd, "move: cache expired")
  1679  	}
  1680  	// advertise to ChangeNotify if wrapped doesn't do that
  1681  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1682  	// persist new
  1683  	cachedObj := ObjectFromOriginal(ctx, f, obj).persist()
  1684  	fs.Debugf(cachedObj, "move: added to cache")
  1685  	// expire new parent
  1686  	parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
  1687  	err = f.cache.ExpireDir(parentCd)
  1688  	if err != nil {
  1689  		fs.Errorf(parentCd, "move: expire error: %v", err)
  1690  	} else {
  1691  		fs.Infof(parentCd, "move: cache expired")
  1692  	}
  1693  	// advertise to ChangeNotify if wrapped doesn't do that
  1694  	f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
  1695  
  1696  	return cachedObj, nil
  1697  }
  1698  
  1699  // Hashes returns the supported hash sets.
  1700  func (f *Fs) Hashes() hash.Set {
  1701  	return f.Fs.Hashes()
  1702  }
  1703  
  1704  // Purge all files in the root and the root directory
  1705  func (f *Fs) Purge(ctx context.Context) error {
  1706  	fs.Infof(f, "purging cache")
  1707  	f.cache.Purge()
  1708  
  1709  	do := f.Fs.Features().Purge
  1710  	if do == nil {
  1711  		return nil
  1712  	}
  1713  
  1714  	err := do(ctx)
  1715  	if err != nil {
  1716  		return err
  1717  	}
  1718  
  1719  	return nil
  1720  }
  1721  
  1722  // CleanUp the trash in the Fs
  1723  func (f *Fs) CleanUp(ctx context.Context) error {
  1724  	f.CleanUpCache(false)
  1725  
  1726  	do := f.Fs.Features().CleanUp
  1727  	if do == nil {
  1728  		return nil
  1729  	}
  1730  
  1731  	return do(ctx)
  1732  }
  1733  
  1734  // About gets quota information from the Fs
  1735  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
  1736  	do := f.Fs.Features().About
  1737  	if do == nil {
  1738  		return nil, errors.New("About not supported")
  1739  	}
  1740  	return do(ctx)
  1741  }
  1742  
  1743  // Stats returns stats about the cache storage
  1744  func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
  1745  	return f.cache.Stats()
  1746  }
  1747  
  1748  // openRateLimited will execute a closure under a rate limiter watch
  1749  func (f *Fs) openRateLimited(fn func() (io.ReadCloser, error)) (io.ReadCloser, error) {
  1750  	var err error
  1751  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
  1752  	defer cancel()
  1753  	start := time.Now()
  1754  
  1755  	if err = f.rateLimiter.Wait(ctx); err != nil {
  1756  		return nil, err
  1757  	}
  1758  
  1759  	elapsed := time.Since(start)
  1760  	if elapsed > time.Second*2 {
  1761  		fs.Debugf(f, "rate limited: %s", elapsed)
  1762  	}
  1763  	return fn()
  1764  }
  1765  
  1766  // CleanUpCache will cleanup only the cache data that is expired
  1767  func (f *Fs) CleanUpCache(ignoreLastTs bool) {
  1768  	f.cleanupMu.Lock()
  1769  	defer f.cleanupMu.Unlock()
  1770  
  1771  	if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(time.Duration(f.opt.ChunkCleanInterval))) {
  1772  		f.cache.CleanChunksBySize(int64(f.opt.ChunkTotalSize))
  1773  		f.lastChunkCleanup = time.Now()
  1774  	}
  1775  }
  1776  
  1777  // StopBackgroundRunners will signall all the runners to stop their work
  1778  // can be triggered from a terminate signal or from testing between runs
  1779  func (f *Fs) StopBackgroundRunners() {
  1780  	f.cleanupChan <- false
  1781  	if f.opt.TempWritePath != "" && f.backgroundRunner != nil && f.backgroundRunner.isRunning() {
  1782  		f.backgroundRunner.close()
  1783  	}
  1784  	f.cache.Close()
  1785  	fs.Debugf(f, "Services stopped")
  1786  }
  1787  
  1788  // UnWrap returns the Fs that this Fs is wrapping
  1789  func (f *Fs) UnWrap() fs.Fs {
  1790  	return f.Fs
  1791  }
  1792  
  1793  // WrapFs returns the Fs that is wrapping this Fs
  1794  func (f *Fs) WrapFs() fs.Fs {
  1795  	return f.wrapper
  1796  }
  1797  
  1798  // SetWrapper sets the Fs that is wrapping this Fs
  1799  func (f *Fs) SetWrapper(wrapper fs.Fs) {
  1800  	f.wrapper = wrapper
  1801  }
  1802  
  1803  // isWrappedByCrypt checks if this is wrapped by a crypt remote
  1804  func (f *Fs) isWrappedByCrypt() (*crypt.Fs, bool) {
  1805  	if f.wrapper == nil {
  1806  		return nil, false
  1807  	}
  1808  	c, ok := f.wrapper.(*crypt.Fs)
  1809  	return c, ok
  1810  }
  1811  
  1812  // cleanRootFromPath trims the root of the current fs from a path
  1813  func (f *Fs) cleanRootFromPath(p string) string {
  1814  	if f.Root() != "" {
  1815  		p = p[len(f.Root()):] // trim out root
  1816  		if len(p) > 0 {       // remove first separator
  1817  			p = p[1:]
  1818  		}
  1819  	}
  1820  
  1821  	return p
  1822  }
  1823  
  1824  func (f *Fs) isRootInPath(p string) bool {
  1825  	if f.Root() == "" {
  1826  		return true
  1827  	}
  1828  	return strings.HasPrefix(p, f.Root()+"/")
  1829  }
  1830  
  1831  // DirCacheFlush flushes the dir cache
  1832  func (f *Fs) DirCacheFlush() {
  1833  	_ = f.cache.RemoveDir("")
  1834  }
  1835  
  1836  // GetBackgroundUploadChannel returns a channel that can be listened to for remote activities that happen
  1837  // in the background
  1838  func (f *Fs) GetBackgroundUploadChannel() chan BackgroundUploadState {
  1839  	if f.opt.TempWritePath != "" {
  1840  		return f.backgroundRunner.notifyCh
  1841  	}
  1842  	return nil
  1843  }
  1844  
  1845  func (f *Fs) isNotifiedRemote(remote string) bool {
  1846  	f.notifiedMu.Lock()
  1847  	defer f.notifiedMu.Unlock()
  1848  
  1849  	n, ok := f.notifiedRemotes[remote]
  1850  	if !ok || !n {
  1851  		return false
  1852  	}
  1853  
  1854  	delete(f.notifiedRemotes, remote)
  1855  	return n
  1856  }
  1857  
  1858  func cleanPath(p string) string {
  1859  	p = path.Clean(p)
  1860  	if p == "." || p == "/" {
  1861  		p = ""
  1862  	}
  1863  
  1864  	return p
  1865  }
  1866  
  1867  // Check the interfaces are satisfied
  1868  var (
  1869  	_ fs.Fs             = (*Fs)(nil)
  1870  	_ fs.Purger         = (*Fs)(nil)
  1871  	_ fs.Copier         = (*Fs)(nil)
  1872  	_ fs.Mover          = (*Fs)(nil)
  1873  	_ fs.DirMover       = (*Fs)(nil)
  1874  	_ fs.PutUncheckeder = (*Fs)(nil)
  1875  	_ fs.PutStreamer    = (*Fs)(nil)
  1876  	_ fs.CleanUpper     = (*Fs)(nil)
  1877  	_ fs.UnWrapper      = (*Fs)(nil)
  1878  	_ fs.Wrapper        = (*Fs)(nil)
  1879  	_ fs.ListRer        = (*Fs)(nil)
  1880  	_ fs.ChangeNotifier = (*Fs)(nil)
  1881  	_ fs.Abouter        = (*Fs)(nil)
  1882  )