github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/vfs/vfscache/item.go (about)

     1  package vfscache
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"errors"
     7  	"fmt"
     8  	"io"
     9  	"os"
    10  	"sync"
    11  	"time"
    12  
    13  	"github.com/rclone/rclone/fs"
    14  	"github.com/rclone/rclone/fs/fserrors"
    15  	"github.com/rclone/rclone/fs/operations"
    16  	"github.com/rclone/rclone/lib/file"
    17  	"github.com/rclone/rclone/lib/ranges"
    18  	"github.com/rclone/rclone/vfs/vfscache/downloaders"
    19  	"github.com/rclone/rclone/vfs/vfscache/writeback"
    20  )
    21  
    22  // NB as Cache and Item are tightly linked it is necessary to have a
    23  // total lock ordering between them. So Cache.mu must always be
    24  // taken before Item.mu to avoid deadlocks.
    25  //
    26  // Cache may call into Item but care is needed if Item calls Cache
    27  //
    28  // A lot of the Cache methods do not require locking, these include
    29  //
    30  // - Cache.toOSPath
    31  // - Cache.toOSPathMeta
    32  // - Cache.createItemDir
    33  // - Cache.objectFingerprint
    34  // - Cache.AddVirtual
    35  
    36  // NB Item and downloader are tightly linked so it is necessary to
    37  // have a total lock ordering between them. downloader.mu must always
    38  // be taken before Item.mu. downloader may call into Item but Item may
    39  // **not** call downloader methods with Item.mu held
    40  
    41  // NB Item and writeback are tightly linked so it is necessary to
    42  // have a total lock ordering between them. writeback.mu must always
    43  // be taken before Item.mu. writeback may call into Item but Item may
    44  // **not** call writeback methods with Item.mu held
    45  
    46  // LL Item reset is invoked by cache cleaner for synchronous recovery
    47  // from ENOSPC errors. The reset operation removes the cache file and
    48  // closes/reopens the downloaders.  Although most parts of reset and
    49  // other item operations are done with the item mutex held, the mutex
    50  // is released during fd.WriteAt and downloaders calls. We use preAccess
    51  // and postAccess calls to serialize reset and other item operations.
    52  
    53  // Item is stored in the item map
    54  //
    55  // The Info field is written to the backing store to store status
    56  type Item struct {
    57  	// read only
    58  	c               *Cache                   // cache this is part of
    59  	mu              sync.Mutex               // protect the variables
    60  	cond            sync.Cond                // synchronize with cache cleaner
    61  	name            string                   // name in the VFS
    62  	opens           int                      // number of times file is open
    63  	downloaders     *downloaders.Downloaders // a record of the downloaders in action - may be nil
    64  	o               fs.Object                // object we are caching - may be nil
    65  	fd              *os.File                 // handle we are using to read and write to the file
    66  	info            Info                     // info about the file to persist to backing store
    67  	writeBackID     writeback.Handle         // id of any writebacks in progress
    68  	pendingAccesses int                      // number of threads - cache reset not allowed if not zero
    69  	modified        bool                     // set if the file has been modified since the last Open
    70  	beingReset      bool                     // cache cleaner is resetting the cache file, access not allowed
    71  }
    72  
    73  // Info is persisted to backing store
    74  type Info struct {
    75  	ModTime     time.Time     // last time file was modified
    76  	ATime       time.Time     // last time file was accessed
    77  	Size        int64         // size of the file
    78  	Rs          ranges.Ranges // which parts of the file are present
    79  	Fingerprint string        // fingerprint of remote object
    80  	Dirty       bool          // set if the backing file has been modified
    81  }
    82  
    83  // Items are a slice of *Item ordered by ATime
    84  type Items []*Item
    85  
    86  // ResetResult reports the actual action taken in the Reset function and reason
    87  type ResetResult int
    88  
    89  // Constants used to report actual action taken in the Reset function and reason
    90  const (
    91  	SkippedDirty         ResetResult = iota // Dirty item cannot be reset
    92  	SkippedPendingAccess                    // Reset pending access can lead to deadlock
    93  	SkippedEmpty                            // Reset empty item does not save space
    94  	RemovedNotInUse                         // Item not used. Remove instead of reset
    95  	ResetFailed                             // Reset failed with an error
    96  	ResetComplete                           // Reset completed successfully
    97  )
    98  
    99  func (rr ResetResult) String() string {
   100  	return [...]string{"Dirty item skipped", "In-access item skipped", "Empty item skipped",
   101  		"Not-in-use item removed", "Item reset failed", "Item reset completed"}[rr]
   102  }
   103  
   104  func (v Items) Len() int      { return len(v) }
   105  func (v Items) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
   106  func (v Items) Less(i, j int) bool {
   107  	if i == j {
   108  		return false
   109  	}
   110  	iItem := v[i]
   111  	jItem := v[j]
   112  	iItem.mu.Lock()
   113  	defer iItem.mu.Unlock()
   114  	jItem.mu.Lock()
   115  	defer jItem.mu.Unlock()
   116  
   117  	return iItem.info.ATime.Before(jItem.info.ATime)
   118  }
   119  
   120  // clean the item after its cache file has been deleted
   121  func (info *Info) clean() {
   122  	*info = Info{}
   123  	info.ModTime = time.Now()
   124  	info.ATime = info.ModTime
   125  }
   126  
   127  // StoreFn is called back with an object after it has been uploaded
   128  type StoreFn func(fs.Object)
   129  
   130  // newItem returns an item for the cache
   131  func newItem(c *Cache, name string) (item *Item) {
   132  	now := time.Now()
   133  	item = &Item{
   134  		c:    c,
   135  		name: name,
   136  		info: Info{
   137  			ModTime: now,
   138  			ATime:   now,
   139  		},
   140  	}
   141  	item.cond = sync.Cond{L: &item.mu}
   142  	// check the cache file exists
   143  	osPath := c.toOSPath(name)
   144  	fi, statErr := os.Stat(osPath)
   145  	if statErr != nil {
   146  		if os.IsNotExist(statErr) {
   147  			item._removeMeta("cache file doesn't exist")
   148  		} else {
   149  			item.remove(fmt.Sprintf("failed to stat cache file: %v", statErr))
   150  		}
   151  	}
   152  
   153  	// Try to load the metadata
   154  	exists, err := item.load()
   155  	if !exists {
   156  		item._removeFile("metadata doesn't exist")
   157  	} else if err != nil {
   158  		item.remove(fmt.Sprintf("failed to load metadata: %v", err))
   159  	}
   160  
   161  	// Get size estimate (which is best we can do until Open() called)
   162  	if statErr == nil {
   163  		item.info.Size = fi.Size()
   164  	}
   165  	return item
   166  }
   167  
   168  // inUse returns true if the item is open or dirty
   169  func (item *Item) inUse() bool {
   170  	item.mu.Lock()
   171  	defer item.mu.Unlock()
   172  	return item.opens != 0 || item.info.Dirty
   173  }
   174  
   175  // getDiskSize returns the size on disk (approximately) of the item
   176  //
   177  // We return the sizes of the chunks we have fetched, however there is
   178  // likely to be some overhead which we are not taking into account.
   179  func (item *Item) getDiskSize() int64 {
   180  	item.mu.Lock()
   181  	defer item.mu.Unlock()
   182  	return item.info.Rs.Size()
   183  }
   184  
   185  // load reads an item from the disk or returns nil if not found
   186  func (item *Item) load() (exists bool, err error) {
   187  	item.mu.Lock()
   188  	defer item.mu.Unlock()
   189  	osPathMeta := item.c.toOSPathMeta(item.name) // No locking in Cache
   190  	in, err := os.Open(osPathMeta)
   191  	if err != nil {
   192  		if os.IsNotExist(err) {
   193  			return false, err
   194  		}
   195  		return true, fmt.Errorf("vfs cache item: failed to read metadata: %w", err)
   196  	}
   197  	defer fs.CheckClose(in, &err)
   198  	decoder := json.NewDecoder(in)
   199  	err = decoder.Decode(&item.info)
   200  	if err != nil {
   201  		return true, fmt.Errorf("vfs cache item: corrupt metadata: %w", err)
   202  	}
   203  	return true, nil
   204  }
   205  
   206  // save writes an item to the disk
   207  //
   208  // call with the lock held
   209  func (item *Item) _save() (err error) {
   210  	osPathMeta := item.c.toOSPathMeta(item.name) // No locking in Cache
   211  	out, err := os.Create(osPathMeta)
   212  	if err != nil {
   213  		return fmt.Errorf("vfs cache item: failed to write metadata: %w", err)
   214  	}
   215  	defer fs.CheckClose(out, &err)
   216  	encoder := json.NewEncoder(out)
   217  	encoder.SetIndent("", "\t")
   218  	err = encoder.Encode(item.info)
   219  	if err != nil {
   220  		return fmt.Errorf("vfs cache item: failed to encode metadata: %w", err)
   221  	}
   222  	return nil
   223  }
   224  
   225  // truncate the item to the given size, creating it if necessary
   226  //
   227  // this does not mark the object as dirty
   228  //
   229  // call with the lock held
   230  func (item *Item) _truncate(size int64) (err error) {
   231  	if size < 0 {
   232  		// FIXME ignore unknown length files
   233  		return nil
   234  	}
   235  
   236  	// Use open handle if available
   237  	fd := item.fd
   238  	if fd == nil {
   239  		// If the metadata says we have some blocks cached then the
   240  		// file should exist, so open without O_CREATE
   241  		oFlags := os.O_WRONLY
   242  		if item.info.Rs.Size() == 0 {
   243  			oFlags |= os.O_CREATE
   244  		}
   245  		osPath := item.c.toOSPath(item.name) // No locking in Cache
   246  		fd, err = file.OpenFile(osPath, oFlags, 0600)
   247  		if err != nil && os.IsNotExist(err) {
   248  			// If the metadata has info but the file doesn't
   249  			// not exist then it has been externally removed
   250  			fs.Errorf(item.name, "vfs cache: detected external removal of cache file")
   251  			item.info.Rs = nil      // show we have no blocks cached
   252  			item.info.Dirty = false // file can't be dirty if it doesn't exist
   253  			item._removeMeta("cache file externally deleted")
   254  			fd, err = file.OpenFile(osPath, os.O_CREATE|os.O_WRONLY, 0600)
   255  		}
   256  		if err != nil {
   257  			return fmt.Errorf("vfs cache: truncate: failed to open cache file: %w", err)
   258  		}
   259  
   260  		defer fs.CheckClose(fd, &err)
   261  
   262  		err = file.SetSparse(fd)
   263  		if err != nil {
   264  			fs.Errorf(item.name, "vfs cache: truncate: failed to set as a sparse file: %v", err)
   265  		}
   266  	}
   267  
   268  	// Check to see what the current size is, and don't truncate
   269  	// if it is already the correct size.
   270  	//
   271  	// Apparently Windows Defender likes to check executables each
   272  	// time they are modified, and truncating a file to its
   273  	// existing size is enough to trigger the Windows Defender
   274  	// scan. This was causing a big slowdown for operations which
   275  	// opened and closed the file a lot, such as looking at
   276  	// properties on an executable.
   277  	fi, err := fd.Stat()
   278  	if err == nil && fi.Size() == size {
   279  		fs.Debugf(item.name, "vfs cache: truncate to size=%d (not needed as size correct)", size)
   280  	} else {
   281  		fs.Debugf(item.name, "vfs cache: truncate to size=%d", size)
   282  
   283  		err = fd.Truncate(size)
   284  		if err != nil {
   285  			return fmt.Errorf("vfs cache: truncate: %w", err)
   286  		}
   287  	}
   288  
   289  	item.info.Size = size
   290  
   291  	return nil
   292  }
   293  
   294  // Truncate the item to the current size, creating if necessary
   295  //
   296  // This does not mark the object as dirty.
   297  //
   298  // call with the lock held
   299  func (item *Item) _truncateToCurrentSize() (err error) {
   300  	size, err := item._getSize()
   301  	if err != nil && !errors.Is(err, os.ErrNotExist) {
   302  		return fmt.Errorf("truncate to current size: %w", err)
   303  	}
   304  	if size < 0 {
   305  		// FIXME ignore unknown length files
   306  		return nil
   307  	}
   308  	err = item._truncate(size)
   309  	if err != nil {
   310  		return err
   311  	}
   312  	return nil
   313  }
   314  
   315  // Truncate the item to the given size, creating it if necessary
   316  //
   317  // If the new size is shorter than the existing size then the object
   318  // will be shortened and marked as dirty.
   319  //
   320  // If the new size is longer than the old size then the object will be
   321  // extended and the extended data will be filled with zeros. The
   322  // object will be marked as dirty in this case also.
   323  func (item *Item) Truncate(size int64) (err error) {
   324  	item.preAccess()
   325  	defer item.postAccess()
   326  	item.mu.Lock()
   327  	defer item.mu.Unlock()
   328  
   329  	if item.fd == nil {
   330  		return errors.New("vfs cache item truncate: internal error: didn't Open file")
   331  	}
   332  
   333  	// Read old size
   334  	oldSize, err := item._getSize()
   335  	if err != nil {
   336  		if !errors.Is(err, os.ErrNotExist) {
   337  			return fmt.Errorf("truncate failed to read size: %w", err)
   338  		}
   339  		oldSize = 0
   340  	}
   341  
   342  	err = item._truncate(size)
   343  	if err != nil {
   344  		return err
   345  	}
   346  
   347  	changed := true
   348  	if size > oldSize {
   349  		// Truncate extends the file in which case all new bytes are
   350  		// read as zeros. In this case we must show we have written to
   351  		// the new parts of the file.
   352  		item._written(oldSize, size)
   353  	} else if size < oldSize {
   354  		// Truncate shrinks the file so clip the downloaded ranges
   355  		item.info.Rs = item.info.Rs.Intersection(ranges.Range{Pos: 0, Size: size})
   356  	} else {
   357  		changed = item.o == nil
   358  	}
   359  	if changed {
   360  		item._dirty()
   361  	}
   362  
   363  	return nil
   364  }
   365  
   366  // _stat gets the current stat of the backing file
   367  //
   368  // Call with mutex held
   369  func (item *Item) _stat() (fi os.FileInfo, err error) {
   370  	if item.fd != nil {
   371  		return item.fd.Stat()
   372  	}
   373  	osPath := item.c.toOSPath(item.name) // No locking in Cache
   374  	return os.Stat(osPath)
   375  }
   376  
   377  // _getSize gets the current size of the item and updates item.info.Size
   378  //
   379  // Call with mutex held
   380  func (item *Item) _getSize() (size int64, err error) {
   381  	fi, err := item._stat()
   382  	if err != nil {
   383  		if os.IsNotExist(err) && item.o != nil {
   384  			size = item.o.Size()
   385  			err = nil
   386  		}
   387  	} else {
   388  		size = fi.Size()
   389  	}
   390  	if err == nil {
   391  		item.info.Size = size
   392  	}
   393  	return size, err
   394  }
   395  
   396  // GetName gets the vfs name of the item
   397  func (item *Item) GetName() (name string) {
   398  	item.mu.Lock()
   399  	defer item.mu.Unlock()
   400  	return item.name
   401  }
   402  
   403  // GetSize gets the current size of the item
   404  func (item *Item) GetSize() (size int64, err error) {
   405  	item.mu.Lock()
   406  	defer item.mu.Unlock()
   407  	return item._getSize()
   408  }
   409  
   410  // _exists returns whether the backing file for the item exists or not
   411  //
   412  // call with mutex held
   413  func (item *Item) _exists() bool {
   414  	osPath := item.c.toOSPath(item.name) // No locking in Cache
   415  	_, err := os.Stat(osPath)
   416  	return err == nil
   417  }
   418  
   419  // Exists returns whether the backing file for the item exists or not
   420  func (item *Item) Exists() bool {
   421  	item.mu.Lock()
   422  	defer item.mu.Unlock()
   423  	return item._exists()
   424  }
   425  
   426  // _dirty marks the item as changed and needing writeback
   427  //
   428  // call with lock held
   429  func (item *Item) _dirty() {
   430  	item.info.ModTime = time.Now()
   431  	item.info.ATime = item.info.ModTime
   432  	if !item.modified {
   433  		item.modified = true
   434  		item.mu.Unlock()
   435  		item.c.writeback.Remove(item.writeBackID)
   436  		item.mu.Lock()
   437  	}
   438  	if !item.info.Dirty {
   439  		item.info.Dirty = true
   440  		err := item._save()
   441  		if err != nil {
   442  			fs.Errorf(item.name, "vfs cache: failed to save item info: %v", err)
   443  		}
   444  	}
   445  }
   446  
   447  // Dirty marks the item as changed and needing writeback
   448  func (item *Item) Dirty() {
   449  	item.preAccess()
   450  	defer item.postAccess()
   451  	item.mu.Lock()
   452  	item._dirty()
   453  	item.mu.Unlock()
   454  }
   455  
   456  // IsDirty returns true if the item data is dirty
   457  func (item *Item) IsDirty() bool {
   458  	item.mu.Lock()
   459  	defer item.mu.Unlock()
   460  	return item.info.Dirty
   461  }
   462  
   463  // Create the cache file and store the metadata on disk
   464  // Called with item.mu locked
   465  func (item *Item) _createFile(osPath string) (err error) {
   466  	if item.fd != nil {
   467  		return errors.New("vfs cache item: internal error: didn't Close file")
   468  	}
   469  	item.modified = false
   470  	// t0 := time.Now()
   471  	fd, err := file.OpenFile(osPath, os.O_RDWR, 0600)
   472  	// fs.Debugf(item.name, "OpenFile took %v", time.Since(t0))
   473  	if err != nil {
   474  		return fmt.Errorf("vfs cache item: open failed: %w", err)
   475  	}
   476  	err = file.SetSparse(fd)
   477  	if err != nil {
   478  		fs.Errorf(item.name, "vfs cache: failed to set as a sparse file: %v", err)
   479  	}
   480  	item.fd = fd
   481  
   482  	err = item._save()
   483  	if err != nil {
   484  		closeErr := item.fd.Close()
   485  		if closeErr != nil {
   486  			fs.Errorf(item.name, "vfs cache: item.fd.Close: closeErr: %v", err)
   487  		}
   488  		item.fd = nil
   489  		return fmt.Errorf("vfs cache item: _save failed: %w", err)
   490  	}
   491  	return err
   492  }
   493  
   494  // Open the local file from the object passed in.  Wraps open()
   495  // to provide recovery from out of space error.
   496  func (item *Item) Open(o fs.Object) (err error) {
   497  	for retries := 0; retries < fs.GetConfig(context.TODO()).LowLevelRetries; retries++ {
   498  		item.preAccess()
   499  		err = item.open(o)
   500  		item.postAccess()
   501  		if err == nil {
   502  			break
   503  		}
   504  		fs.Errorf(item.name, "vfs cache: failed to open item: %v", err)
   505  		if !fserrors.IsErrNoSpace(err) && err.Error() != "no space left on device" {
   506  			fs.Errorf(item.name, "Non-out-of-space error encountered during open")
   507  			break
   508  		}
   509  		item.c.KickCleaner()
   510  	}
   511  	return err
   512  }
   513  
   514  // Open the local file from the object passed in (which may be nil)
   515  // which implies we are about to create the file
   516  func (item *Item) open(o fs.Object) (err error) {
   517  	// defer log.Trace(o, "item=%p", item)("err=%v", &err)
   518  	item.mu.Lock()
   519  	defer item.mu.Unlock()
   520  
   521  	item.info.ATime = time.Now()
   522  
   523  	osPath, err := item.c.createItemDir(item.name) // No locking in Cache
   524  	if err != nil {
   525  		return fmt.Errorf("vfs cache item: createItemDir failed: %w", err)
   526  	}
   527  
   528  	err = item._checkObject(o)
   529  	if err != nil {
   530  		return fmt.Errorf("vfs cache item: check object failed: %w", err)
   531  	}
   532  
   533  	item.opens++
   534  	if item.opens != 1 {
   535  		return nil
   536  	}
   537  
   538  	err = item._createFile(osPath)
   539  	if err != nil {
   540  		item._remove("item.open failed on _createFile, remove cache data/metadata files")
   541  		item.fd = nil
   542  		item.opens--
   543  		return fmt.Errorf("vfs cache item: create cache file failed: %w", err)
   544  	}
   545  	// Unlock the Item.mu so we can call some methods which take Cache.mu
   546  	item.mu.Unlock()
   547  
   548  	// Ensure this item is in the cache. It is possible a cache
   549  	// expiry has run and removed the item if it had no opens so
   550  	// we put it back here. If there was an item with opens
   551  	// already then return an error. This shouldn't happen because
   552  	// there should only be one vfs.File with a pointer to this
   553  	// item in at a time.
   554  	oldItem := item.c.put(item.name, item) // LOCKING in Cache method
   555  	if oldItem != nil {
   556  		oldItem.mu.Lock()
   557  		if oldItem.opens != 0 {
   558  			// Put the item back and return an error
   559  			item.c.put(item.name, oldItem) // LOCKING in Cache method
   560  			err = fmt.Errorf("internal error: item %q already open in the cache", item.name)
   561  		}
   562  		oldItem.mu.Unlock()
   563  	}
   564  
   565  	// Relock the Item.mu for the return
   566  	item.mu.Lock()
   567  
   568  	// Create the downloaders
   569  	if item.o != nil {
   570  		item.downloaders = downloaders.New(item, item.c.opt, item.name, item.o)
   571  	}
   572  
   573  	return err
   574  }
   575  
   576  // Store stores the local cache file to the remote object, returning
   577  // the new remote object. objOld is the old object if known.
   578  //
   579  // Call with lock held
   580  func (item *Item) _store(ctx context.Context, storeFn StoreFn) (err error) {
   581  	// defer log.Trace(item.name, "item=%p", item)("err=%v", &err)
   582  
   583  	// Transfer the temp file to the remote
   584  	cacheObj, err := item.c.fcache.NewObject(ctx, item.name)
   585  	if err != nil && err != fs.ErrorObjectNotFound {
   586  		return fmt.Errorf("vfs cache: failed to find cache file: %w", err)
   587  	}
   588  
   589  	// Object has disappeared if cacheObj == nil
   590  	if cacheObj != nil {
   591  		o, name := item.o, item.name
   592  		item.mu.Unlock()
   593  		o, err := operations.Copy(ctx, item.c.fremote, o, name, cacheObj)
   594  		item.mu.Lock()
   595  		if err != nil {
   596  			if errors.Is(err, fs.ErrorCantUploadEmptyFiles) {
   597  				fs.Errorf(name, "Writeback failed: %v", err)
   598  				return nil
   599  			}
   600  			return fmt.Errorf("vfs cache: failed to transfer file from cache to remote: %w", err)
   601  		}
   602  		item.o = o
   603  		item._updateFingerprint()
   604  	}
   605  
   606  	// Write the object back to the VFS layer before we mark it as
   607  	// clean, otherwise it will become eligible for removal which
   608  	// can cause a deadlock
   609  	if storeFn != nil && item.o != nil {
   610  		fs.Debugf(item.name, "vfs cache: writeback object to VFS layer")
   611  		// Write the object back to the VFS layer last with mutex unlocked
   612  		o := item.o
   613  		item.mu.Unlock()
   614  		storeFn(o)
   615  		item.mu.Lock()
   616  	}
   617  
   618  	// Show item is clean and is eligible for cache removal
   619  	item.info.Dirty = false
   620  	err = item._save()
   621  	if err != nil {
   622  		fs.Errorf(item.name, "vfs cache: failed to write metadata file: %v", err)
   623  	}
   624  
   625  	return nil
   626  }
   627  
   628  // Store stores the local cache file to the remote object, returning
   629  // the new remote object. objOld is the old object if known.
   630  func (item *Item) store(ctx context.Context, storeFn StoreFn) (err error) {
   631  	item.mu.Lock()
   632  	defer item.mu.Unlock()
   633  	return item._store(ctx, storeFn)
   634  }
   635  
   636  // Close the cache file
   637  func (item *Item) Close(storeFn StoreFn) (err error) {
   638  	// defer log.Trace(item.o, "Item.Close")("err=%v", &err)
   639  	item.preAccess()
   640  	defer item.postAccess()
   641  	var (
   642  		downloaders   *downloaders.Downloaders
   643  		syncWriteBack = item.c.opt.WriteBack <= 0
   644  	)
   645  	item.mu.Lock()
   646  	defer item.mu.Unlock()
   647  
   648  	item.info.ATime = time.Now()
   649  	item.opens--
   650  
   651  	if item.opens < 0 {
   652  		return os.ErrClosed
   653  	} else if item.opens > 0 {
   654  		return nil
   655  	}
   656  
   657  	// Update the size on close
   658  	_, _ = item._getSize()
   659  
   660  	// If the file is dirty ensure any segments not transferred
   661  	// are brought in first.
   662  	//
   663  	// FIXME It would be nice to do this asynchronously however it
   664  	// would require keeping the downloaders alive after the item
   665  	// has been closed
   666  	if item.info.Dirty && item.o != nil {
   667  		err = item._ensure(0, item.info.Size)
   668  		if err != nil {
   669  			return fmt.Errorf("vfs cache: failed to download missing parts of cache file: %w", err)
   670  		}
   671  	}
   672  
   673  	// Accumulate and log errors
   674  	checkErr := func(e error) {
   675  		if e != nil {
   676  			fs.Errorf(item.o, "vfs cache: item close failed: %v", e)
   677  			if err == nil {
   678  				err = e
   679  			}
   680  		}
   681  	}
   682  
   683  	// Close the downloaders
   684  	if downloaders = item.downloaders; downloaders != nil {
   685  		item.downloaders = nil
   686  		// FIXME need to unlock to kill downloader - should we
   687  		// re-arrange locking so this isn't necessary?  maybe
   688  		// downloader should use the item mutex for locking? or put a
   689  		// finer lock on Rs?
   690  		//
   691  		// downloader.Write calls ensure which needs the lock
   692  		// close downloader with mutex unlocked
   693  		item.mu.Unlock()
   694  		checkErr(downloaders.Close(nil))
   695  		item.mu.Lock()
   696  	}
   697  
   698  	// close the file handle
   699  	if item.fd == nil {
   700  		checkErr(errors.New("vfs cache item: internal error: didn't Open file"))
   701  	} else {
   702  		checkErr(item.fd.Close())
   703  		item.fd = nil
   704  	}
   705  
   706  	// save the metadata once more since it may be dirty
   707  	// after the downloader
   708  	checkErr(item._save())
   709  
   710  	// if the item hasn't been changed but has been completed then
   711  	// set the modtime from the object otherwise set it from the info
   712  	if item._exists() {
   713  		if !item.info.Dirty && item.o != nil {
   714  			item._setModTime(item.o.ModTime(context.Background()))
   715  		} else {
   716  			item._setModTime(item.info.ModTime)
   717  		}
   718  	}
   719  
   720  	// upload the file to backing store if changed
   721  	if item.info.Dirty {
   722  		fs.Infof(item.name, "vfs cache: queuing for upload in %v", item.c.opt.WriteBack)
   723  		if syncWriteBack {
   724  			// do synchronous writeback
   725  			checkErr(item._store(context.Background(), storeFn))
   726  		} else {
   727  			// asynchronous writeback
   728  			item.c.writeback.SetID(&item.writeBackID)
   729  			id := item.writeBackID
   730  			item.mu.Unlock()
   731  			item.c.writeback.Add(id, item.name, item.modified, func(ctx context.Context) error {
   732  				return item.store(ctx, storeFn)
   733  			})
   734  			item.mu.Lock()
   735  		}
   736  	}
   737  
   738  	// mark as not modified now we have uploaded or queued for upload
   739  	item.modified = false
   740  
   741  	return err
   742  }
   743  
   744  // reload is called with valid items recovered from a cache reload.
   745  //
   746  // If they are dirty then it makes sure they get uploaded.
   747  //
   748  // it is called before the cache has started so opens will be 0 and
   749  // metaDirty will be false.
   750  func (item *Item) reload(ctx context.Context) error {
   751  	item.mu.Lock()
   752  	dirty := item.info.Dirty
   753  	item.mu.Unlock()
   754  	if !dirty {
   755  		return nil
   756  	}
   757  	// see if the object still exists
   758  	obj, _ := item.c.fremote.NewObject(ctx, item.name)
   759  	// open the file with the object (or nil)
   760  	err := item.Open(obj)
   761  	if err != nil {
   762  		return err
   763  	}
   764  	// close the file to execute the writeback if needed
   765  	err = item.Close(nil)
   766  	if err != nil {
   767  		return err
   768  	}
   769  	// put the file into the directory listings
   770  	size, err := item._getSize()
   771  	if err != nil {
   772  		return fmt.Errorf("reload: failed to read size: %w", err)
   773  	}
   774  	err = item.c.AddVirtual(item.name, size, false)
   775  	if err != nil {
   776  		return fmt.Errorf("reload: failed to add virtual dir entry: %w", err)
   777  	}
   778  	return nil
   779  }
   780  
   781  // check the fingerprint of an object and update the item or delete
   782  // the cached file accordingly
   783  //
   784  // If we have local modifications then they take precedence
   785  // over a change in the remote
   786  //
   787  // It ensures the file is the correct size for the object.
   788  //
   789  // call with lock held
   790  func (item *Item) _checkObject(o fs.Object) error {
   791  	if o == nil {
   792  		if item.info.Fingerprint != "" {
   793  			// no remote object && local object
   794  			// remove local object unless dirty
   795  			if !item.info.Dirty {
   796  				item._remove("stale (remote deleted)")
   797  			} else {
   798  				fs.Debugf(item.name, "vfs cache: remote object has gone but local object modified - keeping it")
   799  			}
   800  			//} else {
   801  			// no remote object && no local object
   802  			// OK
   803  		}
   804  	} else {
   805  		remoteFingerprint := fs.Fingerprint(context.TODO(), o, item.c.opt.FastFingerprint)
   806  		fs.Debugf(item.name, "vfs cache: checking remote fingerprint %q against cached fingerprint %q", remoteFingerprint, item.info.Fingerprint)
   807  		if item.info.Fingerprint != "" {
   808  			// remote object && local object
   809  			if remoteFingerprint != item.info.Fingerprint {
   810  				if !item.info.Dirty {
   811  					fs.Debugf(item.name, "vfs cache: removing cached entry as stale (remote fingerprint %q != cached fingerprint %q)", remoteFingerprint, item.info.Fingerprint)
   812  					item._remove("stale (remote is different)")
   813  					item.info.Fingerprint = remoteFingerprint
   814  				} else {
   815  					fs.Debugf(item.name, "vfs cache: remote object has changed but local object modified - keeping it (remote fingerprint %q != cached fingerprint %q)", remoteFingerprint, item.info.Fingerprint)
   816  				}
   817  			}
   818  		} else {
   819  			// remote object && no local object
   820  			// Set fingerprint
   821  			item.info.Fingerprint = remoteFingerprint
   822  		}
   823  		item.info.Size = o.Size()
   824  	}
   825  	item.o = o
   826  
   827  	err := item._truncateToCurrentSize()
   828  	if err != nil {
   829  		return fmt.Errorf("vfs cache item: open truncate failed: %w", err)
   830  	}
   831  
   832  	return nil
   833  }
   834  
   835  // WrittenBack checks to see if the item has been written back or not
   836  func (item *Item) WrittenBack() bool {
   837  	item.mu.Lock()
   838  	defer item.mu.Unlock()
   839  	return item.info.Fingerprint != ""
   840  }
   841  
   842  // remove the cached file
   843  //
   844  // call with lock held
   845  func (item *Item) _removeFile(reason string) {
   846  	osPath := item.c.toOSPath(item.name) // No locking in Cache
   847  	err := os.Remove(osPath)
   848  	if err != nil {
   849  		if !os.IsNotExist(err) {
   850  			fs.Errorf(item.name, "vfs cache: failed to remove cache file as %s: %v", reason, err)
   851  		}
   852  	} else {
   853  		fs.Infof(item.name, "vfs cache: removed cache file as %s", reason)
   854  	}
   855  }
   856  
   857  // remove the metadata
   858  //
   859  // call with lock held
   860  func (item *Item) _removeMeta(reason string) {
   861  	osPathMeta := item.c.toOSPathMeta(item.name) // No locking in Cache
   862  	err := os.Remove(osPathMeta)
   863  	if err != nil {
   864  		if !os.IsNotExist(err) {
   865  			fs.Errorf(item.name, "vfs cache: failed to remove metadata from cache as %s: %v", reason, err)
   866  		}
   867  	} else {
   868  		fs.Debugf(item.name, "vfs cache: removed metadata from cache as %s", reason)
   869  	}
   870  }
   871  
   872  // remove the cached file and empty the metadata
   873  //
   874  // This returns true if the file was in the transfer queue so may not
   875  // have completely uploaded yet.
   876  //
   877  // call with lock held
   878  func (item *Item) _remove(reason string) (wasWriting bool) {
   879  	// Cancel writeback, if any
   880  	item.mu.Unlock()
   881  	wasWriting = item.c.writeback.Remove(item.writeBackID)
   882  	item.mu.Lock()
   883  	item.info.clean()
   884  	item._removeFile(reason)
   885  	item._removeMeta(reason)
   886  	return wasWriting
   887  }
   888  
   889  // remove the cached file and empty the metadata
   890  //
   891  // This returns true if the file was in the transfer queue so may not
   892  // have completely uploaded yet.
   893  func (item *Item) remove(reason string) (wasWriting bool) {
   894  	item.mu.Lock()
   895  	defer item.mu.Unlock()
   896  	return item._remove(reason)
   897  }
   898  
   899  // RemoveNotInUse is called to remove cache file that has not been accessed recently
   900  // It may also be called for removing empty cache files too when the quota is already reached.
   901  func (item *Item) RemoveNotInUse(maxAge time.Duration, emptyOnly bool) (removed bool, spaceFreed int64) {
   902  	item.mu.Lock()
   903  	defer item.mu.Unlock()
   904  
   905  	spaceFreed = 0
   906  	removed = false
   907  
   908  	if item.opens != 0 || item.info.Dirty {
   909  		return
   910  	}
   911  
   912  	removeIt := false
   913  	if maxAge == 0 {
   914  		removeIt = true // quota-driven removal
   915  	}
   916  	if maxAge != 0 {
   917  		cutoff := time.Now().Add(-maxAge)
   918  		// If not locked and access time too long ago - delete the file
   919  		accessTime := item.info.ATime
   920  		if accessTime.Sub(cutoff) <= 0 {
   921  			removeIt = true
   922  		}
   923  	}
   924  	if removeIt {
   925  		spaceUsed := item.info.Rs.Size()
   926  		if !emptyOnly || spaceUsed == 0 {
   927  			spaceFreed = spaceUsed
   928  			removed = true
   929  			if item._remove("Removing old cache file not in use") {
   930  				fs.Errorf(item.name, "item removed when it was writing/uploaded")
   931  			}
   932  		}
   933  	}
   934  	return
   935  }
   936  
   937  // Reset is called by the cache purge functions only to reset (empty the contents) cache files that
   938  // are not dirty.  It is used when cache space runs out and we see some ENOSPC error.
   939  func (item *Item) Reset() (rr ResetResult, spaceFreed int64, err error) {
   940  	item.mu.Lock()
   941  	defer item.mu.Unlock()
   942  
   943  	// The item is not being used now.  Just remove it instead of resetting it.
   944  	if item.opens == 0 && !item.info.Dirty {
   945  		spaceFreed = item.info.Rs.Size()
   946  		if item._remove("Removing old cache file not in use") {
   947  			fs.Errorf(item.name, "item removed when it was writing/uploaded")
   948  		}
   949  		return RemovedNotInUse, spaceFreed, nil
   950  	}
   951  
   952  	// do not reset dirty file
   953  	if item.info.Dirty {
   954  		return SkippedDirty, 0, nil
   955  	}
   956  
   957  	/* A wait on pendingAccessCnt to become 0 can lead to deadlock when an item.Open bumps
   958  	   up the pendingAccesses count, calls item.open, which calls cache.put. The cache.put
   959  	   operation needs the cache mutex, which is held here.  We skip this file now. The
   960  	   caller (the cache cleaner thread) may retry resetting this item if the cache size does
   961  	   not reduce below quota. */
   962  	if item.pendingAccesses > 0 {
   963  		return SkippedPendingAccess, 0, nil
   964  	}
   965  
   966  	/* Do not need to reset an empty cache file unless it was being reset and the reset failed.
   967  	   Some thread(s) may be waiting on the reset's successful completion in that case. */
   968  	if item.info.Rs.Size() == 0 && !item.beingReset {
   969  		return SkippedEmpty, 0, nil
   970  	}
   971  
   972  	item.beingReset = true
   973  
   974  	/* Error handling from this point on (setting item.fd and item.beingReset):
   975  	   Since Reset is called by the cache cleaner thread, there is no direct way to return
   976  	   the error to the io threads.  Set item.fd to nil upon internal errors, so that the
   977  	   io threads will return internal errors seeing a nil fd. In the case when the error
   978  	   is ENOSPC, keep the item in isBeingReset state and that will keep the item.ReadAt
   979  	   waiting at its beginning. The cache purge loop will try to redo the reset after cache
   980  	   space is made available again. This recovery design should allow most io threads to
   981  	   eventually go through, unless large files are written/overwritten concurrently and
   982  	   the total size of these files exceed the cache storage limit. */
   983  
   984  	// Close the downloaders
   985  	// Accumulate and log errors
   986  	checkErr := func(e error) {
   987  		if e != nil {
   988  			fs.Errorf(item.o, "vfs cache: item reset failed: %v", e)
   989  			if err == nil {
   990  				err = e
   991  			}
   992  		}
   993  	}
   994  
   995  	if downloaders := item.downloaders; downloaders != nil {
   996  		item.downloaders = nil
   997  		// FIXME need to unlock to kill downloader - should we
   998  		// re-arrange locking so this isn't necessary?  maybe
   999  		// downloader should use the item mutex for locking? or put a
  1000  		// finer lock on Rs?
  1001  		//
  1002  		// downloader.Write calls ensure which needs the lock
  1003  		// close downloader with mutex unlocked
  1004  		item.mu.Unlock()
  1005  		checkErr(downloaders.Close(nil))
  1006  		item.mu.Lock()
  1007  	}
  1008  
  1009  	// close the file handle
  1010  	// fd can be nil if we tried Reset and failed before because of ENOSPC during reset
  1011  	if item.fd != nil {
  1012  		checkErr(item.fd.Close())
  1013  		if err != nil {
  1014  			// Could not close the cache file
  1015  			item.beingReset = false
  1016  			item.cond.Broadcast()
  1017  			return ResetFailed, 0, err
  1018  		}
  1019  		item.fd = nil
  1020  	}
  1021  
  1022  	spaceFreed = item.info.Rs.Size()
  1023  
  1024  	// This should not be possible.  We get here only if cache data is not dirty.
  1025  	if item._remove("cache out of space, item is clean") {
  1026  		fs.Errorf(item.o, "vfs cache item removed when it was writing/uploaded")
  1027  	}
  1028  
  1029  	// can we have an item with no dirty data (so that we can get here) and nil item.o at the same time?
  1030  	fso := item.o
  1031  	checkErr(item._checkObject(fso))
  1032  	if err != nil {
  1033  		item.beingReset = false
  1034  		item.cond.Broadcast()
  1035  		return ResetFailed, spaceFreed, err
  1036  	}
  1037  
  1038  	osPath := item.c.toOSPath(item.name)
  1039  	checkErr(item._createFile(osPath))
  1040  	if err != nil {
  1041  		item._remove("cache reset failed on _createFile, removed cache data file")
  1042  		item.fd = nil // This allows a new Reset redo to have a clean state to deal with
  1043  		if !fserrors.IsErrNoSpace(err) {
  1044  			item.beingReset = false
  1045  			item.cond.Broadcast()
  1046  		}
  1047  		return ResetFailed, spaceFreed, err
  1048  	}
  1049  
  1050  	// Create the downloaders
  1051  	if item.o != nil {
  1052  		item.downloaders = downloaders.New(item, item.c.opt, item.name, item.o)
  1053  	}
  1054  
  1055  	/* The item will stay in the beingReset state if we get an error that prevents us from
  1056  	reaching this point.  The cache purge loop will redo the failed Reset. */
  1057  	item.beingReset = false
  1058  	item.cond.Broadcast()
  1059  
  1060  	return ResetComplete, spaceFreed, err
  1061  }
  1062  
  1063  // ProtectCache either waits for an ongoing cache reset to finish or increases pendingReads
  1064  // to protect against cache reset on this item while the thread potentially uses the cache file
  1065  // Cache cleaner waits until pendingReads is zero before resetting cache.
  1066  func (item *Item) preAccess() {
  1067  	item.mu.Lock()
  1068  	defer item.mu.Unlock()
  1069  
  1070  	if item.beingReset {
  1071  		for {
  1072  			item.cond.Wait()
  1073  			if !item.beingReset {
  1074  				break
  1075  			}
  1076  		}
  1077  	}
  1078  	item.pendingAccesses++
  1079  }
  1080  
  1081  // postAccess reduces the pendingReads count enabling cache reset upon ENOSPC
  1082  func (item *Item) postAccess() {
  1083  	item.mu.Lock()
  1084  	defer item.mu.Unlock()
  1085  
  1086  	item.pendingAccesses--
  1087  	item.cond.Broadcast()
  1088  }
  1089  
  1090  // _present returns true if the whole file has been downloaded
  1091  //
  1092  // call with the lock held
  1093  func (item *Item) _present() bool {
  1094  	return item.info.Rs.Present(ranges.Range{Pos: 0, Size: item.info.Size})
  1095  }
  1096  
  1097  // present returns true if the whole file has been downloaded
  1098  func (item *Item) present() bool {
  1099  	item.mu.Lock()
  1100  	defer item.mu.Unlock()
  1101  	return item._present()
  1102  }
  1103  
  1104  // HasRange returns true if the current ranges entirely include range
  1105  func (item *Item) HasRange(r ranges.Range) bool {
  1106  	item.mu.Lock()
  1107  	defer item.mu.Unlock()
  1108  	return item.info.Rs.Present(r)
  1109  }
  1110  
  1111  // FindMissing adjusts r returning a new ranges.Range which only
  1112  // contains the range which needs to be downloaded. This could be
  1113  // empty - check with IsEmpty. It also adjust this to make sure it is
  1114  // not larger than the file.
  1115  func (item *Item) FindMissing(r ranges.Range) (outr ranges.Range) {
  1116  	item.mu.Lock()
  1117  	defer item.mu.Unlock()
  1118  	outr = item.info.Rs.FindMissing(r)
  1119  	// Clip returned block to size of file
  1120  	outr.Clip(item.info.Size)
  1121  	return outr
  1122  }
  1123  
  1124  // ensure the range from offset, size is present in the backing file
  1125  //
  1126  // call with the item lock held
  1127  func (item *Item) _ensure(offset, size int64) (err error) {
  1128  	// defer log.Trace(item.name, "offset=%d, size=%d", offset, size)("err=%v", &err)
  1129  	if offset+size > item.info.Size {
  1130  		size = item.info.Size - offset
  1131  	}
  1132  	r := ranges.Range{Pos: offset, Size: size}
  1133  	present := item.info.Rs.Present(r)
  1134  	/* This statement simulates a cache space error for test purpose */
  1135  	/* if present != true && item.info.Rs.Size() > 32*1024*1024 {
  1136  		return errors.New("no space left on device")
  1137  	} */
  1138  	fs.Debugf(nil, "vfs cache: looking for range=%+v in %+v - present %v", r, item.info.Rs, present)
  1139  	item.mu.Unlock()
  1140  	defer item.mu.Lock()
  1141  	if present {
  1142  		// This is a file we are writing so no downloaders needed
  1143  		if item.downloaders == nil {
  1144  			return nil
  1145  		}
  1146  		// Otherwise start the downloader for the future if required
  1147  		return item.downloaders.EnsureDownloader(r)
  1148  	}
  1149  	if item.downloaders == nil {
  1150  		// Downloaders can be nil here if the file has been
  1151  		// renamed, so need to make some more downloaders
  1152  		// OK to call downloaders constructor with item.mu held
  1153  
  1154  		// item.o can also be nil under some circumstances
  1155  		// See: https://github.com/rclone/rclone/issues/6190
  1156  		// See: https://github.com/rclone/rclone/issues/6235
  1157  		if item.o == nil {
  1158  			o, err := item.c.fremote.NewObject(context.Background(), item.name)
  1159  			if err != nil {
  1160  				return err
  1161  			}
  1162  			item.o = o
  1163  		}
  1164  		item.downloaders = downloaders.New(item, item.c.opt, item.name, item.o)
  1165  	}
  1166  	return item.downloaders.Download(r)
  1167  }
  1168  
  1169  // _written marks the (offset, size) as present in the backing file
  1170  //
  1171  // This is called by the downloader downloading file segments and the
  1172  // vfs layer writing to the file.
  1173  //
  1174  // This doesn't mark the item as Dirty - that the responsibility
  1175  // of the caller as we don't know here whether we are adding reads or
  1176  // writes to the cache file.
  1177  //
  1178  // call with lock held
  1179  func (item *Item) _written(offset, size int64) {
  1180  	// defer log.Trace(item.name, "offset=%d, size=%d", offset, size)("")
  1181  	item.info.Rs.Insert(ranges.Range{Pos: offset, Size: size})
  1182  }
  1183  
  1184  // update the fingerprint of the object if any
  1185  //
  1186  // call with lock held
  1187  func (item *Item) _updateFingerprint() {
  1188  	if item.o == nil {
  1189  		return
  1190  	}
  1191  	oldFingerprint := item.info.Fingerprint
  1192  	item.info.Fingerprint = fs.Fingerprint(context.TODO(), item.o, item.c.opt.FastFingerprint)
  1193  	if oldFingerprint != item.info.Fingerprint {
  1194  		fs.Debugf(item.o, "vfs cache: fingerprint now %q", item.info.Fingerprint)
  1195  	}
  1196  }
  1197  
  1198  // setModTime of the cache file
  1199  //
  1200  // call with lock held
  1201  func (item *Item) _setModTime(modTime time.Time) {
  1202  	fs.Debugf(item.name, "vfs cache: setting modification time to %v", modTime)
  1203  	osPath := item.c.toOSPath(item.name) // No locking in Cache
  1204  	err := os.Chtimes(osPath, modTime, modTime)
  1205  	if err != nil {
  1206  		fs.Errorf(item.name, "vfs cache: failed to set modification time of cached file: %v", err)
  1207  	}
  1208  }
  1209  
  1210  // setModTime of the cache file and in the Item
  1211  func (item *Item) setModTime(modTime time.Time) {
  1212  	// defer log.Trace(item.name, "modTime=%v", modTime)("")
  1213  	item.mu.Lock()
  1214  	item._updateFingerprint()
  1215  	item._setModTime(modTime)
  1216  	item.info.ModTime = modTime
  1217  	err := item._save()
  1218  	if err != nil {
  1219  		fs.Errorf(item.name, "vfs cache: setModTime: failed to save item info: %v", err)
  1220  	}
  1221  	item.mu.Unlock()
  1222  }
  1223  
  1224  // GetModTime of the cache file
  1225  func (item *Item) GetModTime() (modTime time.Time, err error) {
  1226  	// defer log.Trace(item.name, "modTime=%v", modTime)("")
  1227  	item.mu.Lock()
  1228  	defer item.mu.Unlock()
  1229  	fi, err := item._stat()
  1230  	if err == nil {
  1231  		modTime = fi.ModTime()
  1232  	}
  1233  	return modTime, nil
  1234  }
  1235  
  1236  // ReadAt bytes from the file at off
  1237  func (item *Item) ReadAt(b []byte, off int64) (n int, err error) {
  1238  	n = 0
  1239  	var expBackOff int
  1240  	for retries := 0; retries < fs.GetConfig(context.TODO()).LowLevelRetries; retries++ {
  1241  		item.preAccess()
  1242  		n, err = item.readAt(b, off)
  1243  		item.postAccess()
  1244  		if err == nil || err == io.EOF {
  1245  			break
  1246  		}
  1247  		fs.Errorf(item.name, "vfs cache: failed to _ensure cache %v", err)
  1248  		if !fserrors.IsErrNoSpace(err) && err.Error() != "no space left on device" {
  1249  			fs.Debugf(item.name, "vfs cache: failed to _ensure cache %v is not out of space", err)
  1250  			break
  1251  		}
  1252  		item.c.KickCleaner()
  1253  		expBackOff = 2 << uint(retries)
  1254  		time.Sleep(time.Duration(expBackOff) * time.Millisecond) // Exponential back-off the retries
  1255  	}
  1256  
  1257  	if fserrors.IsErrNoSpace(err) {
  1258  		fs.Errorf(item.name, "vfs cache: failed to _ensure cache after retries %v", err)
  1259  	}
  1260  
  1261  	return n, err
  1262  }
  1263  
  1264  // ReadAt bytes from the file at off
  1265  func (item *Item) readAt(b []byte, off int64) (n int, err error) {
  1266  	item.mu.Lock()
  1267  	if item.fd == nil {
  1268  		item.mu.Unlock()
  1269  		return 0, errors.New("vfs cache item ReadAt: internal error: didn't Open file")
  1270  	}
  1271  	if off < 0 {
  1272  		item.mu.Unlock()
  1273  		return 0, io.EOF
  1274  	}
  1275  	defer item.mu.Unlock()
  1276  
  1277  	err = item._ensure(off, int64(len(b)))
  1278  	if err != nil {
  1279  		return 0, err
  1280  	}
  1281  
  1282  	// Check to see if object has shrunk - if so don't read too much.
  1283  	if item.o != nil && !item.info.Dirty && item.o.Size() != item.info.Size {
  1284  		fs.Debugf(item.o, "Size has changed from %d to %d", item.info.Size, item.o.Size())
  1285  		err = item._truncate(item.o.Size())
  1286  		if err != nil {
  1287  			return 0, err
  1288  		}
  1289  	}
  1290  
  1291  	item.info.ATime = time.Now()
  1292  	// Do the reading with Item.mu unlocked and cache protected by preAccess
  1293  	n, err = item.fd.ReadAt(b, off)
  1294  	return n, err
  1295  }
  1296  
  1297  // WriteAt bytes to the file at off
  1298  func (item *Item) WriteAt(b []byte, off int64) (n int, err error) {
  1299  	item.preAccess()
  1300  	defer item.postAccess()
  1301  	item.mu.Lock()
  1302  	if item.fd == nil {
  1303  		item.mu.Unlock()
  1304  		return 0, errors.New("vfs cache item WriteAt: internal error: didn't Open file")
  1305  	}
  1306  	item.mu.Unlock()
  1307  	// Do the writing with Item.mu unlocked
  1308  	n, err = item.fd.WriteAt(b, off)
  1309  	if err == nil && n != len(b) {
  1310  		err = fmt.Errorf("short write: tried to write %d but only %d written", len(b), n)
  1311  	}
  1312  	item.mu.Lock()
  1313  	item._written(off, int64(n))
  1314  	if n > 0 {
  1315  		item._dirty()
  1316  	}
  1317  	end := off + int64(n)
  1318  	// Writing off the end of the file so need to make some
  1319  	// zeroes.  we do this by showing that we have written to the
  1320  	// new parts of the file.
  1321  	if off > item.info.Size {
  1322  		item._written(item.info.Size, off-item.info.Size)
  1323  		item._dirty()
  1324  	}
  1325  	// Update size
  1326  	if end > item.info.Size {
  1327  		item.info.Size = end
  1328  	}
  1329  	item.mu.Unlock()
  1330  	return n, err
  1331  }
  1332  
  1333  // WriteAtNoOverwrite writes b to the file, but will not overwrite
  1334  // already present ranges.
  1335  //
  1336  // This is used by the downloader to write bytes to the file.
  1337  //
  1338  // It returns n the total bytes processed and skipped the number of
  1339  // bytes which were processed but not actually written to the file.
  1340  func (item *Item) WriteAtNoOverwrite(b []byte, off int64) (n int, skipped int, err error) {
  1341  	item.mu.Lock()
  1342  
  1343  	var (
  1344  		// Range we wish to write
  1345  		r = ranges.Range{Pos: off, Size: int64(len(b))}
  1346  		// Ranges that we need to write
  1347  		foundRanges = item.info.Rs.FindAll(r)
  1348  		// Length of each write
  1349  		nn int
  1350  	)
  1351  
  1352  	// Write the range out ignoring already written chunks
  1353  	// fs.Debugf(item.name, "Ranges = %v", item.info.Rs)
  1354  	for i := range foundRanges {
  1355  		foundRange := &foundRanges[i]
  1356  		// fs.Debugf(item.name, "foundRange[%d] = %v", i, foundRange)
  1357  		if foundRange.R.Pos != off {
  1358  			err = errors.New("internal error: offset of range is wrong")
  1359  			break
  1360  		}
  1361  		size := int(foundRange.R.Size)
  1362  		if foundRange.Present {
  1363  			// if present want to skip this range
  1364  			// fs.Debugf(item.name, "skip chunk offset=%d size=%d", off, size)
  1365  			nn = size
  1366  			skipped += size
  1367  		} else {
  1368  			// if range not present then we want to write it
  1369  			// fs.Debugf(item.name, "write chunk offset=%d size=%d", off, size)
  1370  			nn, err = item.fd.WriteAt(b[:size], off)
  1371  			if err == nil && nn != size {
  1372  				err = fmt.Errorf("downloader: short write: tried to write %d but only %d written", size, nn)
  1373  			}
  1374  			item._written(off, int64(nn))
  1375  		}
  1376  		off += int64(nn)
  1377  		b = b[nn:]
  1378  		n += nn
  1379  		if err != nil {
  1380  			break
  1381  		}
  1382  	}
  1383  	item.mu.Unlock()
  1384  	return n, skipped, err
  1385  }
  1386  
  1387  // Sync commits the current contents of the file to stable storage. Typically,
  1388  // this means flushing the file system's in-memory copy of recently written
  1389  // data to disk.
  1390  func (item *Item) Sync() (err error) {
  1391  	item.preAccess()
  1392  	defer item.postAccess()
  1393  	item.mu.Lock()
  1394  	defer item.mu.Unlock()
  1395  	if item.fd == nil {
  1396  		return errors.New("vfs cache item sync: internal error: didn't Open file")
  1397  	}
  1398  	// sync the file and the metadata to disk
  1399  	err = item.fd.Sync()
  1400  	if err != nil {
  1401  		return fmt.Errorf("vfs cache item sync: failed to sync file: %w", err)
  1402  	}
  1403  	err = item._save()
  1404  	if err != nil {
  1405  		return fmt.Errorf("vfs cache item sync: failed to sync metadata: %w", err)
  1406  	}
  1407  	return nil
  1408  }
  1409  
  1410  // rename the item
  1411  func (item *Item) rename(name string, newName string, newObj fs.Object) (err error) {
  1412  	item.preAccess()
  1413  	defer item.postAccess()
  1414  	item.mu.Lock()
  1415  
  1416  	// stop downloader
  1417  	downloaders := item.downloaders
  1418  	item.downloaders = nil
  1419  
  1420  	// id for writeback cancel
  1421  	id := item.writeBackID
  1422  
  1423  	// Set internal state
  1424  	item.name = newName
  1425  	item.o = newObj
  1426  
  1427  	// Rename cache file if it exists
  1428  	err = rename(item.c.toOSPath(name), item.c.toOSPath(newName)) // No locking in Cache
  1429  
  1430  	// Rename meta file if it exists
  1431  	err2 := rename(item.c.toOSPathMeta(name), item.c.toOSPathMeta(newName)) // No locking in Cache
  1432  	if err2 != nil {
  1433  		err = err2
  1434  	}
  1435  
  1436  	item.mu.Unlock()
  1437  
  1438  	// close downloader and cancel writebacks with mutex unlocked
  1439  	if downloaders != nil {
  1440  		_ = downloaders.Close(nil)
  1441  	}
  1442  	item.c.writeback.Rename(id, newName)
  1443  	return err
  1444  }