github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/fstest/fstests/fstests.go (about)

     1  // Package fstests provides generic integration tests for the Fs and
     2  // Object interfaces.
     3  //
     4  // These tests are concerned with the basic functionality of a
     5  // backend.  The tests in fs/sync and fs/operations tests more
     6  // cornercases that these tests don't.
     7  package fstests
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"errors"
    13  	"fmt"
    14  	"io"
    15  	"math/bits"
    16  	"os"
    17  	"path"
    18  	"path/filepath"
    19  	"reflect"
    20  	"sort"
    21  	"strconv"
    22  	"strings"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/rclone/rclone/fs"
    27  	"github.com/rclone/rclone/fs/cache"
    28  	"github.com/rclone/rclone/fs/config"
    29  	"github.com/rclone/rclone/fs/fserrors"
    30  	"github.com/rclone/rclone/fs/fspath"
    31  	"github.com/rclone/rclone/fs/hash"
    32  	"github.com/rclone/rclone/fs/object"
    33  	"github.com/rclone/rclone/fs/operations"
    34  	"github.com/rclone/rclone/fs/walk"
    35  	"github.com/rclone/rclone/fstest"
    36  	"github.com/rclone/rclone/fstest/testserver"
    37  	"github.com/rclone/rclone/lib/encoder"
    38  	"github.com/rclone/rclone/lib/random"
    39  	"github.com/rclone/rclone/lib/readers"
    40  	"github.com/stretchr/testify/assert"
    41  	"github.com/stretchr/testify/require"
    42  )
    43  
    44  // InternalTester is an optional interface for Fs which allows to execute internal tests
    45  //
    46  // This interface should be implemented in 'backend'_internal_test.go and not in 'backend'.go
    47  type InternalTester interface {
    48  	InternalTest(*testing.T)
    49  }
    50  
    51  // ChunkedUploadConfig contains the values used by TestFsPutChunked
    52  // to determine the limits of chunked uploading
    53  type ChunkedUploadConfig struct {
    54  	// Minimum allowed chunk size
    55  	MinChunkSize fs.SizeSuffix
    56  	// Maximum allowed chunk size, 0 is no limit
    57  	MaxChunkSize fs.SizeSuffix
    58  	// Rounds the given chunk size up to the next valid value
    59  	// nil will disable rounding
    60  	// e.g. the next power of 2
    61  	CeilChunkSize func(fs.SizeSuffix) fs.SizeSuffix
    62  	// More than one chunk is required on upload
    63  	NeedMultipleChunks bool
    64  	// Skip this particular remote
    65  	Skip bool
    66  }
    67  
    68  // SetUploadChunkSizer is a test only interface to change the upload chunk size at runtime
    69  type SetUploadChunkSizer interface {
    70  	// Change the configured UploadChunkSize.
    71  	// Will only be called while no transfer is in progress.
    72  	SetUploadChunkSize(fs.SizeSuffix) (fs.SizeSuffix, error)
    73  }
    74  
    75  // SetUploadCutoffer is a test only interface to change the upload cutoff size at runtime
    76  type SetUploadCutoffer interface {
    77  	// Change the configured UploadCutoff.
    78  	// Will only be called while no transfer is in progress.
    79  	SetUploadCutoff(fs.SizeSuffix) (fs.SizeSuffix, error)
    80  }
    81  
    82  // SetCopyCutoffer is a test only interface to change the copy cutoff size at runtime
    83  type SetCopyCutoffer interface {
    84  	// Change the configured CopyCutoff.
    85  	// Will only be called while no transfer is in progress.
    86  	SetCopyCutoff(fs.SizeSuffix) (fs.SizeSuffix, error)
    87  }
    88  
    89  // NextPowerOfTwo returns the current or next bigger power of two.
    90  // All values less or equal 0 will return 0
    91  func NextPowerOfTwo(i fs.SizeSuffix) fs.SizeSuffix {
    92  	return 1 << uint(64-bits.LeadingZeros64(uint64(i)-1))
    93  }
    94  
    95  // NextMultipleOf returns a function that can be used as a CeilChunkSize function.
    96  // This function will return the next multiple of m that is equal or bigger than i.
    97  // All values less or equal 0 will return 0.
    98  func NextMultipleOf(m fs.SizeSuffix) func(fs.SizeSuffix) fs.SizeSuffix {
    99  	if m <= 0 {
   100  		panic(fmt.Sprintf("invalid multiplier %s", m))
   101  	}
   102  	return func(i fs.SizeSuffix) fs.SizeSuffix {
   103  		if i <= 0 {
   104  			return 0
   105  		}
   106  
   107  		return (((i - 1) / m) + 1) * m
   108  	}
   109  }
   110  
   111  // dirsToNames returns a sorted list of names
   112  func dirsToNames(dirs []fs.Directory) []string {
   113  	names := []string{}
   114  	for _, dir := range dirs {
   115  		names = append(names, fstest.Normalize(dir.Remote()))
   116  	}
   117  	sort.Strings(names)
   118  	return names
   119  }
   120  
   121  // objsToNames returns a sorted list of object names
   122  func objsToNames(objs []fs.Object) []string {
   123  	names := []string{}
   124  	for _, obj := range objs {
   125  		names = append(names, fstest.Normalize(obj.Remote()))
   126  	}
   127  	sort.Strings(names)
   128  	return names
   129  }
   130  
   131  // retry f() until no retriable error
   132  func retry(t *testing.T, what string, f func() error) {
   133  	const maxTries = 10
   134  	var err error
   135  	for tries := 1; tries <= maxTries; tries++ {
   136  		err = f()
   137  		// exit if no error, or error is not retriable
   138  		if err == nil || !fserrors.IsRetryError(err) {
   139  			break
   140  		}
   141  		t.Logf("%s error: %v - low level retry %d/%d", what, err, tries, maxTries)
   142  		time.Sleep(2 * time.Second)
   143  	}
   144  	require.NoError(t, err, what)
   145  }
   146  
   147  // check interface
   148  
   149  // PutTestContentsMetadata puts file with given contents to the remote and checks it but unlike TestPutLarge doesn't remove
   150  //
   151  // It uploads the object with the mimeType and metadata passed in if set.
   152  //
   153  // It returns the object which will have been checked if check is set
   154  func PutTestContentsMetadata(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, contents string, check bool, mimeType string, metadata fs.Metadata) fs.Object {
   155  	var (
   156  		err        error
   157  		obj        fs.Object
   158  		uploadHash *hash.MultiHasher
   159  	)
   160  	retry(t, "Put", func() error {
   161  		buf := bytes.NewBufferString(contents)
   162  		uploadHash = hash.NewMultiHasher()
   163  		in := io.TeeReader(buf, uploadHash)
   164  
   165  		file.Size = int64(buf.Len())
   166  		obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
   167  		if mimeType != "" || metadata != nil {
   168  			// force the --metadata flag on temporarily
   169  			if metadata != nil {
   170  				ci := fs.GetConfig(ctx)
   171  				previousMetadata := ci.Metadata
   172  				ci.Metadata = true
   173  				defer func() {
   174  					ci.Metadata = previousMetadata
   175  				}()
   176  			}
   177  			obji.WithMetadata(metadata).WithMimeType(mimeType)
   178  		}
   179  		obj, err = f.Put(ctx, in, obji)
   180  		return err
   181  	})
   182  	file.Hashes = uploadHash.Sums()
   183  	if check {
   184  		// Overwrite time with that in metadata if it is already specified
   185  		mtime, ok := metadata["mtime"]
   186  		if ok {
   187  			modTime, err := time.Parse(time.RFC3339Nano, mtime)
   188  			require.NoError(t, err)
   189  			file.ModTime = modTime
   190  		}
   191  		file.Check(t, obj, f.Precision())
   192  		// Re-read the object and check again
   193  		obj = fstest.NewObject(ctx, t, f, file.Path)
   194  		file.Check(t, obj, f.Precision())
   195  	}
   196  	return obj
   197  }
   198  
   199  // PutTestContents puts file with given contents to the remote and checks it but unlike TestPutLarge doesn't remove
   200  func PutTestContents(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, contents string, check bool) fs.Object {
   201  	return PutTestContentsMetadata(ctx, t, f, file, contents, check, "", nil)
   202  }
   203  
   204  // testPut puts file with random contents to the remote
   205  func testPut(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) (string, fs.Object) {
   206  	contents := random.String(100)
   207  	return contents, PutTestContents(ctx, t, f, file, contents, true)
   208  }
   209  
   210  // testPutMimeType puts file with random contents to the remote and the mime type given
   211  func testPutMimeType(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, mimeType string, metadata fs.Metadata) (string, fs.Object) {
   212  	contents := random.String(100)
   213  	return contents, PutTestContentsMetadata(ctx, t, f, file, contents, true, mimeType, metadata)
   214  }
   215  
   216  // testPutLarge puts file to the remote, checks it and removes it on success.
   217  //
   218  // If stream is set, then it uploads the file with size -1
   219  func testPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, stream bool) {
   220  	var (
   221  		err        error
   222  		obj        fs.Object
   223  		uploadHash *hash.MultiHasher
   224  	)
   225  	retry(t, "PutLarge", func() error {
   226  		r := readers.NewPatternReader(file.Size)
   227  		uploadHash = hash.NewMultiHasher()
   228  		in := io.TeeReader(r, uploadHash)
   229  
   230  		size := file.Size
   231  		if stream {
   232  			size = -1
   233  		}
   234  		obji := object.NewStaticObjectInfo(file.Path, file.ModTime, size, true, nil, nil)
   235  		obj, err = f.Put(ctx, in, obji)
   236  		if file.Size == 0 && err == fs.ErrorCantUploadEmptyFiles {
   237  			t.Skip("Can't upload zero length files")
   238  		}
   239  		return err
   240  	})
   241  	file.Hashes = uploadHash.Sums()
   242  	file.Check(t, obj, f.Precision())
   243  
   244  	// Re-read the object and check again
   245  	obj = fstest.NewObject(ctx, t, f, file.Path)
   246  	file.Check(t, obj, f.Precision())
   247  
   248  	// Download the object and check it is OK
   249  	downloadHash := hash.NewMultiHasher()
   250  	download, err := obj.Open(ctx)
   251  	require.NoError(t, err)
   252  	n, err := io.Copy(downloadHash, download)
   253  	require.NoError(t, err)
   254  	assert.Equal(t, file.Size, n)
   255  	require.NoError(t, download.Close())
   256  	assert.Equal(t, file.Hashes, downloadHash.Sums())
   257  
   258  	// Remove the object
   259  	require.NoError(t, obj.Remove(ctx))
   260  }
   261  
   262  // TestPutLarge puts file to the remote, checks it and removes it on success.
   263  func TestPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) {
   264  	testPutLarge(ctx, t, f, file, false)
   265  }
   266  
   267  // TestPutLargeStreamed puts file of unknown size to the remote, checks it and removes it on success.
   268  func TestPutLargeStreamed(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) {
   269  	testPutLarge(ctx, t, f, file, true)
   270  }
   271  
   272  // ReadObject reads the contents of an object as a string
   273  func ReadObject(ctx context.Context, t *testing.T, obj fs.Object, limit int64, options ...fs.OpenOption) string {
   274  	what := fmt.Sprintf("readObject(%q) limit=%d, options=%+v", obj, limit, options)
   275  	in, err := obj.Open(ctx, options...)
   276  	require.NoError(t, err, what)
   277  	var r io.Reader = in
   278  	if limit >= 0 {
   279  		r = &io.LimitedReader{R: r, N: limit}
   280  	}
   281  	contents, err := io.ReadAll(r)
   282  	require.NoError(t, err, what)
   283  	err = in.Close()
   284  	require.NoError(t, err, what)
   285  	return string(contents)
   286  }
   287  
   288  // ExtraConfigItem describes a config item for the tests
   289  type ExtraConfigItem struct{ Name, Key, Value string }
   290  
   291  // Opt is options for Run
   292  type Opt struct {
   293  	RemoteName                      string
   294  	NilObject                       fs.Object
   295  	ExtraConfig                     []ExtraConfigItem
   296  	SkipBadWindowsCharacters        bool     // skips unusable characters for windows if set
   297  	SkipFsMatch                     bool     // if set skip exact matching of Fs value
   298  	TiersToTest                     []string // List of tiers which can be tested in setTier test
   299  	ChunkedUpload                   ChunkedUploadConfig
   300  	UnimplementableFsMethods        []string // List of Fs methods which can't be implemented in this wrapping Fs
   301  	UnimplementableObjectMethods    []string // List of Object methods which can't be implemented in this wrapping Fs
   302  	UnimplementableDirectoryMethods []string // List of Directory methods which can't be implemented in this wrapping Fs
   303  	SkipFsCheckWrap                 bool     // if set skip FsCheckWrap
   304  	SkipObjectCheckWrap             bool     // if set skip ObjectCheckWrap
   305  	SkipDirectoryCheckWrap          bool     // if set skip DirectoryCheckWrap
   306  	SkipInvalidUTF8                 bool     // if set skip invalid UTF-8 checks
   307  	SkipLeadingDot                  bool     // if set skip leading dot checks
   308  	QuickTestOK                     bool     // if set, run this test with make quicktest
   309  }
   310  
   311  // returns true if x is found in ss
   312  func stringsContains(x string, ss []string) bool {
   313  	for _, s := range ss {
   314  		if x == s {
   315  			return true
   316  		}
   317  	}
   318  	return false
   319  }
   320  
   321  // toUpperASCII returns a copy of the string s with all Unicode
   322  // letters mapped to their upper case.
   323  func toUpperASCII(s string) string {
   324  	return strings.Map(func(r rune) rune {
   325  		if 'a' <= r && r <= 'z' {
   326  			r -= 'a' - 'A'
   327  		}
   328  		return r
   329  	}, s)
   330  }
   331  
   332  // removeConfigID removes any {xyz} parts of the name put in for
   333  // config disambiguation
   334  func removeConfigID(s string) string {
   335  	bra := strings.IndexRune(s, '{')
   336  	ket := strings.IndexRune(s, '}')
   337  	if bra >= 0 && ket > bra {
   338  		s = s[:bra] + s[ket+1:]
   339  	}
   340  	return s
   341  }
   342  
   343  // InternalTestFiles is the state of the remote at the moment the internal tests are called
   344  var InternalTestFiles []fstest.Item
   345  
   346  // Run runs the basic integration tests for a remote using the options passed in.
   347  //
   348  // They are structured in a hierarchical way so that dependencies for the tests can be created.
   349  //
   350  // For example some tests require the directory to be created - these
   351  // are inside the "FsMkdir" test.  Some tests require some tests files
   352  // - these are inside the "FsPutFiles" test.
   353  func Run(t *testing.T, opt *Opt) {
   354  	var (
   355  		f             fs.Fs
   356  		remoteName    = opt.RemoteName
   357  		subRemoteName string
   358  		subRemoteLeaf string
   359  		file1         = fstest.Item{
   360  			ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
   361  			Path:    "file name.txt",
   362  		}
   363  		file1Contents string
   364  		file1MimeType = "text/csv"
   365  		file1Metadata = fs.Metadata{"rclone-test": "potato"}
   366  		file2         = fstest.Item{
   367  			ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
   368  			Path:    `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`,
   369  		}
   370  		isLocalRemote        bool
   371  		purged               bool // whether the dir has been purged or not
   372  		ctx                  = context.Background()
   373  		ci                   = fs.GetConfig(ctx)
   374  		unwrappableFsMethods = []string{"Command"} // these Fs methods don't need to be wrapped ever
   375  	)
   376  
   377  	if strings.HasSuffix(os.Getenv("RCLONE_CONFIG"), "/notfound") && *fstest.RemoteName == "" && !opt.QuickTestOK {
   378  		t.Skip("quicktest only")
   379  	}
   380  
   381  	// Skip the test if the remote isn't configured
   382  	skipIfNotOk := func(t *testing.T) {
   383  		if f == nil {
   384  			t.Skipf("WARN: %q not configured", remoteName)
   385  		}
   386  	}
   387  
   388  	// Skip if remote is not ListR capable, otherwise set the useListR
   389  	// flag, returning a function to restore its value
   390  	skipIfNotListR := func(t *testing.T) func() {
   391  		skipIfNotOk(t)
   392  		if f.Features().ListR == nil {
   393  			t.Skip("FS has no ListR interface")
   394  		}
   395  		previous := ci.UseListR
   396  		ci.UseListR = true
   397  		return func() {
   398  			ci.UseListR = previous
   399  		}
   400  	}
   401  
   402  	// Skip if remote is not SetTier and GetTier capable
   403  	skipIfNotSetTier := func(t *testing.T) {
   404  		skipIfNotOk(t)
   405  		if !f.Features().SetTier || !f.Features().GetTier {
   406  			t.Skip("FS has no SetTier & GetTier interfaces")
   407  		}
   408  	}
   409  
   410  	// Return true if f (or any of the things it wraps) is bucket
   411  	// based but not at the root.
   412  	isBucketBasedButNotRoot := func(f fs.Fs) bool {
   413  		f = fs.UnWrapFs(f)
   414  		return f.Features().BucketBased && strings.Contains(strings.Trim(f.Root(), "/"), "/")
   415  	}
   416  
   417  	// Initialise the remote
   418  	fstest.Initialise()
   419  
   420  	// Set extra config if supplied
   421  	for _, item := range opt.ExtraConfig {
   422  		config.FileSet(item.Name, item.Key, item.Value)
   423  	}
   424  	if *fstest.RemoteName != "" {
   425  		remoteName = *fstest.RemoteName
   426  	}
   427  	oldFstestRemoteName := fstest.RemoteName
   428  	fstest.RemoteName = &remoteName
   429  	defer func() {
   430  		fstest.RemoteName = oldFstestRemoteName
   431  	}()
   432  	t.Logf("Using remote %q", remoteName)
   433  	var err error
   434  	if remoteName == "" {
   435  		remoteName, err = fstest.LocalRemote()
   436  		require.NoError(t, err)
   437  		isLocalRemote = true
   438  	}
   439  
   440  	// Start any test servers if required
   441  	finish, err := testserver.Start(remoteName)
   442  	require.NoError(t, err)
   443  	defer finish()
   444  
   445  	// Make the Fs we are testing with, initialising the local variables
   446  	// subRemoteName - name of the remote after the TestRemote:
   447  	// subRemoteLeaf - a subdirectory to use under that
   448  	// remote - the result of  fs.NewFs(TestRemote:subRemoteName)
   449  	subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName)
   450  	require.NoError(t, err)
   451  	f, err = fs.NewFs(context.Background(), subRemoteName)
   452  	if err == fs.ErrorNotFoundInConfigFile {
   453  		t.Logf("Didn't find %q in config file - skipping tests", remoteName)
   454  		return
   455  	}
   456  	require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
   457  
   458  	// Get fsInfo which contains type, etc. of the fs
   459  	fsInfo, _, _, _, err := fs.ConfigFs(subRemoteName)
   460  	require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
   461  
   462  	// Skip the rest if it failed
   463  	skipIfNotOk(t)
   464  
   465  	// Check to see if Fs that wrap other Fs implement all the optional methods
   466  	t.Run("FsCheckWrap", func(t *testing.T) {
   467  		skipIfNotOk(t)
   468  		if opt.SkipFsCheckWrap {
   469  			t.Skip("Skipping FsCheckWrap on this Fs")
   470  		}
   471  		ft := new(fs.Features).Fill(ctx, f)
   472  		if ft.UnWrap == nil && !f.Features().Overlay {
   473  			t.Skip("Not a wrapping Fs")
   474  		}
   475  		v := reflect.ValueOf(ft).Elem()
   476  		vType := v.Type()
   477  		for i := 0; i < v.NumField(); i++ {
   478  			vName := vType.Field(i).Name
   479  			if stringsContains(vName, opt.UnimplementableFsMethods) {
   480  				continue
   481  			}
   482  			if stringsContains(vName, unwrappableFsMethods) {
   483  				continue
   484  			}
   485  			field := v.Field(i)
   486  			// skip the bools
   487  			if field.Type().Kind() == reflect.Bool {
   488  				continue
   489  			}
   490  			if field.IsNil() {
   491  				t.Errorf("Missing Fs wrapper for %s", vName)
   492  			}
   493  		}
   494  	})
   495  
   496  	// Check to see if Fs advertises commands and they work and have docs
   497  	t.Run("FsCommand", func(t *testing.T) {
   498  		skipIfNotOk(t)
   499  		doCommand := f.Features().Command
   500  		if doCommand == nil {
   501  			t.Skip("No commands in this remote")
   502  		}
   503  		// Check the correct error is generated
   504  		_, err := doCommand(context.Background(), "NOTFOUND", nil, nil)
   505  		assert.Equal(t, fs.ErrorCommandNotFound, err, "Incorrect error generated on command not found")
   506  		// Check there are some commands in the fsInfo
   507  		fsInfo, _, _, _, err := fs.ConfigFs(remoteName)
   508  		require.NoError(t, err)
   509  		assert.True(t, len(fsInfo.CommandHelp) > 0, "Command is declared, must return some help in CommandHelp")
   510  	})
   511  
   512  	// TestFsRmdirNotFound tests deleting a nonexistent directory
   513  	t.Run("FsRmdirNotFound", func(t *testing.T) {
   514  		skipIfNotOk(t)
   515  		if isBucketBasedButNotRoot(f) {
   516  			t.Skip("Skipping test as non root bucket-based remote")
   517  		}
   518  		err := f.Rmdir(ctx, "")
   519  		assert.Error(t, err, "Expecting error on Rmdir nonexistent")
   520  	})
   521  
   522  	// Make the directory
   523  	err = f.Mkdir(ctx, "")
   524  	require.NoError(t, err)
   525  	fstest.CheckListing(t, f, []fstest.Item{})
   526  
   527  	// TestFsString tests the String method
   528  	t.Run("FsString", func(t *testing.T) {
   529  		skipIfNotOk(t)
   530  		str := f.String()
   531  		require.NotEqual(t, "", str)
   532  	})
   533  
   534  	// TestFsName tests the Name method
   535  	t.Run("FsName", func(t *testing.T) {
   536  		skipIfNotOk(t)
   537  		got := removeConfigID(f.Name())
   538  		var want string
   539  		if isLocalRemote {
   540  			want = "local"
   541  		} else {
   542  			want = remoteName[:strings.LastIndex(remoteName, ":")]
   543  			comma := strings.IndexRune(remoteName, ',')
   544  			if comma >= 0 {
   545  				want = want[:comma]
   546  			}
   547  		}
   548  		require.Equal(t, want, got)
   549  	})
   550  
   551  	// TestFsRoot tests the Root method
   552  	t.Run("FsRoot", func(t *testing.T) {
   553  		skipIfNotOk(t)
   554  		got := f.Root()
   555  		want := subRemoteName
   556  		colon := strings.LastIndex(want, ":")
   557  		if colon >= 0 {
   558  			want = want[colon+1:]
   559  		}
   560  		if isLocalRemote {
   561  			// only check last path element on local
   562  			require.Equal(t, filepath.Base(subRemoteName), filepath.Base(got))
   563  		} else {
   564  			require.Equal(t, want, got)
   565  		}
   566  	})
   567  
   568  	// TestFsRmdirEmpty tests deleting an empty directory
   569  	t.Run("FsRmdirEmpty", func(t *testing.T) {
   570  		skipIfNotOk(t)
   571  		err := f.Rmdir(ctx, "")
   572  		require.NoError(t, err)
   573  	})
   574  
   575  	// TestFsMkdir tests making a directory
   576  	//
   577  	// Tests that require the directory to be made are within this
   578  	t.Run("FsMkdir", func(t *testing.T) {
   579  		skipIfNotOk(t)
   580  
   581  		err := f.Mkdir(ctx, "")
   582  		require.NoError(t, err)
   583  		fstest.CheckListing(t, f, []fstest.Item{})
   584  
   585  		err = f.Mkdir(ctx, "")
   586  		require.NoError(t, err)
   587  
   588  		// TestFsMkdirRmdirSubdir tests making and removing a sub directory
   589  		t.Run("FsMkdirRmdirSubdir", func(t *testing.T) {
   590  			skipIfNotOk(t)
   591  			dir := "dir/subdir"
   592  			err := operations.Mkdir(ctx, f, dir)
   593  			require.NoError(t, err)
   594  			fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{"dir", "dir/subdir"}, fs.GetModifyWindow(ctx, f))
   595  
   596  			err = operations.Rmdir(ctx, f, dir)
   597  			require.NoError(t, err)
   598  			fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{"dir"}, fs.GetModifyWindow(ctx, f))
   599  
   600  			err = operations.Rmdir(ctx, f, "dir")
   601  			require.NoError(t, err)
   602  			fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, f))
   603  		})
   604  
   605  		// TestFsListEmpty tests listing an empty directory
   606  		t.Run("FsListEmpty", func(t *testing.T) {
   607  			skipIfNotOk(t)
   608  			fstest.CheckListing(t, f, []fstest.Item{})
   609  		})
   610  
   611  		// TestFsListDirEmpty tests listing the directories from an empty directory
   612  		TestFsListDirEmpty := func(t *testing.T) {
   613  			skipIfNotOk(t)
   614  			objs, dirs, err := walk.GetAll(ctx, f, "", true, 1)
   615  			if !f.Features().CanHaveEmptyDirectories {
   616  				if err != fs.ErrorDirNotFound {
   617  					require.NoError(t, err)
   618  				}
   619  			} else {
   620  				require.NoError(t, err)
   621  			}
   622  			assert.Equal(t, []string{}, objsToNames(objs))
   623  			assert.Equal(t, []string{}, dirsToNames(dirs))
   624  		}
   625  		t.Run("FsListDirEmpty", TestFsListDirEmpty)
   626  
   627  		// TestFsListRDirEmpty tests listing the directories from an empty directory using ListR
   628  		t.Run("FsListRDirEmpty", func(t *testing.T) {
   629  			defer skipIfNotListR(t)()
   630  			TestFsListDirEmpty(t)
   631  		})
   632  
   633  		// TestFsListDirNotFound tests listing the directories from an empty directory
   634  		TestFsListDirNotFound := func(t *testing.T) {
   635  			skipIfNotOk(t)
   636  			objs, dirs, err := walk.GetAll(ctx, f, "does not exist", true, 1)
   637  			if !f.Features().CanHaveEmptyDirectories {
   638  				if err != fs.ErrorDirNotFound {
   639  					assert.NoError(t, err)
   640  					assert.Equal(t, 0, len(objs)+len(dirs))
   641  				}
   642  			} else {
   643  				assert.Equal(t, fs.ErrorDirNotFound, err)
   644  			}
   645  		}
   646  		t.Run("FsListDirNotFound", TestFsListDirNotFound)
   647  
   648  		// TestFsListRDirNotFound tests listing the directories from an empty directory using ListR
   649  		t.Run("FsListRDirNotFound", func(t *testing.T) {
   650  			defer skipIfNotListR(t)()
   651  			TestFsListDirNotFound(t)
   652  		})
   653  
   654  		// FsEncoding tests that file name encodings are
   655  		// working by uploading a series of unusual files
   656  		// Must be run in an empty directory
   657  		t.Run("FsEncoding", func(t *testing.T) {
   658  			skipIfNotOk(t)
   659  			if testing.Short() {
   660  				t.Skip("not running with -short")
   661  			}
   662  
   663  			// check no files or dirs as pre-requisite
   664  			fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, f))
   665  
   666  			for _, test := range []struct {
   667  				name string
   668  				path string
   669  			}{
   670  				// See lib/encoder/encoder.go for list of things that go here
   671  				{"control chars", "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\x7F"},
   672  				{"dot", "."},
   673  				{"dot dot", ".."},
   674  				{"punctuation", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"},
   675  				{"leading space", " leading space"},
   676  				{"leading tilde", "~leading tilde"},
   677  				{"leading CR", "\rleading CR"},
   678  				{"leading LF", "\nleading LF"},
   679  				{"leading HT", "\tleading HT"},
   680  				{"leading VT", "\vleading VT"},
   681  				{"leading dot", ".leading dot"},
   682  				{"trailing space", "trailing space "},
   683  				{"trailing CR", "trailing CR\r"},
   684  				{"trailing LF", "trailing LF\n"},
   685  				{"trailing HT", "trailing HT\t"},
   686  				{"trailing VT", "trailing VT\v"},
   687  				{"trailing dot", "trailing dot."},
   688  				{"invalid UTF-8", "invalid utf-8\xfe"},
   689  				{"URL encoding", "test%46.txt"},
   690  			} {
   691  				t.Run(test.name, func(t *testing.T) {
   692  					if opt.SkipInvalidUTF8 && test.name == "invalid UTF-8" {
   693  						t.Skip("Skipping " + test.name)
   694  					}
   695  					if opt.SkipLeadingDot && test.name == "leading dot" {
   696  						t.Skip("Skipping " + test.name)
   697  					}
   698  					// turn raw strings into Standard encoding
   699  					fileName := encoder.Standard.Encode(test.path)
   700  					dirName := fileName
   701  					t.Logf("testing %q", fileName)
   702  					assert.NoError(t, f.Mkdir(ctx, dirName))
   703  					file := fstest.Item{
   704  						ModTime: time.Now(),
   705  						Path:    dirName + "/" + fileName, // test creating a file and dir with that name
   706  					}
   707  					_, o := testPut(context.Background(), t, f, &file)
   708  					fstest.CheckListingWithPrecision(t, f, []fstest.Item{file}, []string{dirName}, fs.GetModifyWindow(ctx, f))
   709  					assert.NoError(t, o.Remove(ctx))
   710  					assert.NoError(t, f.Rmdir(ctx, dirName))
   711  					fstest.CheckListingWithPrecision(t, f, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, f))
   712  				})
   713  			}
   714  		})
   715  
   716  		// TestFsNewObjectNotFound tests not finding an object
   717  		t.Run("FsNewObjectNotFound", func(t *testing.T) {
   718  			skipIfNotOk(t)
   719  			// Object in an existing directory
   720  			o, err := f.NewObject(ctx, "potato")
   721  			assert.Nil(t, o)
   722  			assert.Equal(t, fs.ErrorObjectNotFound, err)
   723  			// Now try an object in a nonexistent directory
   724  			o, err = f.NewObject(ctx, "directory/not/found/potato")
   725  			assert.Nil(t, o)
   726  			assert.Equal(t, fs.ErrorObjectNotFound, err)
   727  		})
   728  
   729  		// TestFsPutError tests uploading a file where there is an error
   730  		//
   731  		// It makes sure that aborting a file half way through does not create
   732  		// a file on the remote.
   733  		//
   734  		// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutError)$'
   735  		t.Run("FsPutError", func(t *testing.T) {
   736  			skipIfNotOk(t)
   737  
   738  			var N int64 = 5 * 1024
   739  			if *fstest.SizeLimit > 0 && N > *fstest.SizeLimit {
   740  				N = *fstest.SizeLimit
   741  				t.Logf("Reduce file size due to limit %d", N)
   742  			}
   743  
   744  			// Read N bytes then produce an error
   745  			contents := random.String(int(N))
   746  			buf := bytes.NewBufferString(contents)
   747  			er := &readers.ErrorReader{Err: errors.New("potato")}
   748  			in := io.MultiReader(buf, er)
   749  
   750  			obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 2*N, true, nil, nil)
   751  			_, err := f.Put(ctx, in, obji)
   752  			// assert.Nil(t, obj) - FIXME some remotes return the object even on nil
   753  			assert.NotNil(t, err)
   754  
   755  			obj, err := f.NewObject(ctx, file2.Path)
   756  			assert.Nil(t, obj)
   757  			assert.Equal(t, fs.ErrorObjectNotFound, err)
   758  		})
   759  
   760  		t.Run("FsPutZeroLength", func(t *testing.T) {
   761  			skipIfNotOk(t)
   762  
   763  			TestPutLarge(ctx, t, f, &fstest.Item{
   764  				ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
   765  				Path:    "zero-length-file",
   766  				Size:    int64(0),
   767  			})
   768  		})
   769  
   770  		t.Run("FsOpenWriterAt", func(t *testing.T) {
   771  			skipIfNotOk(t)
   772  			openWriterAt := f.Features().OpenWriterAt
   773  			if openWriterAt == nil {
   774  				t.Skip("FS has no OpenWriterAt interface")
   775  			}
   776  			path := "writer-at-subdir/writer-at-file"
   777  			out, err := openWriterAt(ctx, path, -1)
   778  			require.NoError(t, err)
   779  
   780  			var n int
   781  			n, err = out.WriteAt([]byte("def"), 3)
   782  			assert.NoError(t, err)
   783  			assert.Equal(t, 3, n)
   784  			n, err = out.WriteAt([]byte("ghi"), 6)
   785  			assert.NoError(t, err)
   786  			assert.Equal(t, 3, n)
   787  			n, err = out.WriteAt([]byte("abc"), 0)
   788  			assert.NoError(t, err)
   789  			assert.Equal(t, 3, n)
   790  
   791  			assert.NoError(t, out.Close())
   792  
   793  			obj := fstest.NewObject(ctx, t, f, path)
   794  			assert.Equal(t, "abcdefghi", ReadObject(ctx, t, obj, -1), "contents of file differ")
   795  
   796  			assert.NoError(t, obj.Remove(ctx))
   797  			assert.NoError(t, f.Rmdir(ctx, "writer-at-subdir"))
   798  		})
   799  
   800  		// TestFsOpenChunkWriter tests writing in chunks to fs
   801  		// then reads back the contents and check if they match
   802  		// go test -v -run 'TestIntegration/FsMkdir/FsOpenChunkWriter'
   803  		t.Run("FsOpenChunkWriter", func(t *testing.T) {
   804  			skipIfNotOk(t)
   805  			openChunkWriter := f.Features().OpenChunkWriter
   806  			if openChunkWriter == nil {
   807  				t.Skip("FS has no OpenChunkWriter interface")
   808  			}
   809  			size5MBs := 5 * 1024 * 1024
   810  			contents1 := random.String(size5MBs)
   811  			contents2 := random.String(size5MBs)
   812  
   813  			size1MB := 1 * 1024 * 1024
   814  			contents3 := random.String(size1MB)
   815  
   816  			path := "writer-at-subdir/writer-at-file"
   817  			objSrc := object.NewStaticObjectInfo(path+"-WRONG-REMOTE", file1.ModTime, -1, true, nil, nil)
   818  			_, out, err := openChunkWriter(ctx, path, objSrc, &fs.ChunkOption{
   819  				ChunkSize: int64(size5MBs),
   820  			})
   821  			require.NoError(t, err)
   822  
   823  			var n int64
   824  			n, err = out.WriteChunk(ctx, 1, strings.NewReader(contents2))
   825  			assert.NoError(t, err)
   826  			assert.Equal(t, int64(size5MBs), n)
   827  			n, err = out.WriteChunk(ctx, 2, strings.NewReader(contents3))
   828  			assert.NoError(t, err)
   829  			assert.Equal(t, int64(size1MB), n)
   830  			n, err = out.WriteChunk(ctx, 0, strings.NewReader(contents1))
   831  			assert.NoError(t, err)
   832  			assert.Equal(t, int64(size5MBs), n)
   833  
   834  			assert.NoError(t, out.Close(ctx))
   835  
   836  			obj := fstest.NewObject(ctx, t, f, path)
   837  			originalContents := contents1 + contents2 + contents3
   838  			fileContents := ReadObject(ctx, t, obj, -1)
   839  			isEqual := originalContents == fileContents
   840  			assert.True(t, isEqual, "contents of file differ")
   841  
   842  			assert.NoError(t, obj.Remove(ctx))
   843  			assert.NoError(t, f.Rmdir(ctx, "writer-at-subdir"))
   844  		})
   845  
   846  		// TestFsChangeNotify tests that changes are properly
   847  		// propagated
   848  		//
   849  		// go test -v -remote TestDrive: -run '^Test(Setup|Init|FsChangeNotify)$' -verbose
   850  		t.Run("FsChangeNotify", func(t *testing.T) {
   851  			skipIfNotOk(t)
   852  
   853  			// Check have ChangeNotify
   854  			doChangeNotify := f.Features().ChangeNotify
   855  			if doChangeNotify == nil {
   856  				t.Skip("FS has no ChangeNotify interface")
   857  			}
   858  
   859  			err := operations.Mkdir(ctx, f, "dir")
   860  			require.NoError(t, err)
   861  
   862  			pollInterval := make(chan time.Duration)
   863  			dirChanges := map[string]struct{}{}
   864  			objChanges := map[string]struct{}{}
   865  			doChangeNotify(ctx, func(x string, e fs.EntryType) {
   866  				fs.Debugf(nil, "doChangeNotify(%q, %+v)", x, e)
   867  				if strings.HasPrefix(x, file1.Path[:5]) || strings.HasPrefix(x, file2.Path[:5]) {
   868  					fs.Debugf(nil, "Ignoring notify for file1 or file2: %q, %v", x, e)
   869  					return
   870  				}
   871  				if e == fs.EntryDirectory {
   872  					dirChanges[x] = struct{}{}
   873  				} else if e == fs.EntryObject {
   874  					objChanges[x] = struct{}{}
   875  				}
   876  			}, pollInterval)
   877  			defer func() { close(pollInterval) }()
   878  			pollInterval <- time.Second
   879  
   880  			var dirs []string
   881  			for _, idx := range []int{1, 3, 2} {
   882  				dir := fmt.Sprintf("dir/subdir%d", idx)
   883  				err = operations.Mkdir(ctx, f, dir)
   884  				require.NoError(t, err)
   885  				dirs = append(dirs, dir)
   886  			}
   887  
   888  			var objs []fs.Object
   889  			for _, idx := range []int{2, 4, 3} {
   890  				file := fstest.Item{
   891  					ModTime: time.Now(),
   892  					Path:    fmt.Sprintf("dir/file%d", idx),
   893  				}
   894  				_, o := testPut(ctx, t, f, &file)
   895  				objs = append(objs, o)
   896  			}
   897  
   898  			// Looks for each item in wants in changes -
   899  			// if they are all found it returns true
   900  			contains := func(changes map[string]struct{}, wants []string) bool {
   901  				for _, want := range wants {
   902  					_, ok := changes[want]
   903  					if !ok {
   904  						return false
   905  					}
   906  				}
   907  				return true
   908  			}
   909  
   910  			// Wait a little while for the changes to come in
   911  			wantDirChanges := []string{"dir/subdir1", "dir/subdir3", "dir/subdir2"}
   912  			wantObjChanges := []string{"dir/file2", "dir/file4", "dir/file3"}
   913  			ok := false
   914  			for tries := 1; tries < 10; tries++ {
   915  				ok = contains(dirChanges, wantDirChanges) && contains(objChanges, wantObjChanges)
   916  				if ok {
   917  					break
   918  				}
   919  				t.Logf("Try %d/10 waiting for dirChanges and objChanges", tries)
   920  				time.Sleep(3 * time.Second)
   921  			}
   922  			if !ok {
   923  				t.Errorf("%+v does not contain %+v or \n%+v does not contain %+v", dirChanges, wantDirChanges, objChanges, wantObjChanges)
   924  			}
   925  
   926  			// tidy up afterwards
   927  			for _, o := range objs {
   928  				assert.NoError(t, o.Remove(ctx))
   929  			}
   930  			dirs = append(dirs, "dir")
   931  			for _, dir := range dirs {
   932  				assert.NoError(t, f.Rmdir(ctx, dir))
   933  			}
   934  		})
   935  
   936  		// TestFsPut files writes file1, file2 and tests an update
   937  		//
   938  		// Tests that require file1, file2 are within this
   939  		t.Run("FsPutFiles", func(t *testing.T) {
   940  			skipIfNotOk(t)
   941  			file1Contents, _ = testPut(ctx, t, f, &file1)
   942  			/* file2Contents = */ testPut(ctx, t, f, &file2)
   943  			file1Contents, _ = testPutMimeType(ctx, t, f, &file1, file1MimeType, file1Metadata)
   944  			// Note that the next test will check there are no duplicated file names
   945  
   946  			// TestFsListDirFile2 tests the files are correctly uploaded by doing
   947  			// Depth 1 directory listings
   948  			TestFsListDirFile2 := func(t *testing.T) {
   949  				skipIfNotOk(t)
   950  				list := func(dir string, expectedDirNames, expectedObjNames []string) {
   951  					var objNames, dirNames []string
   952  					for i := 1; i <= *fstest.ListRetries; i++ {
   953  						objs, dirs, err := walk.GetAll(ctx, f, dir, true, 1)
   954  						if errors.Is(err, fs.ErrorDirNotFound) {
   955  							objs, dirs, err = walk.GetAll(ctx, f, dir, true, 1)
   956  						}
   957  						require.NoError(t, err)
   958  						objNames = objsToNames(objs)
   959  						dirNames = dirsToNames(dirs)
   960  						if len(objNames) >= len(expectedObjNames) && len(dirNames) >= len(expectedDirNames) {
   961  							break
   962  						}
   963  						t.Logf("Sleeping for 1 second for TestFsListDirFile2 eventual consistency: %d/%d", i, *fstest.ListRetries)
   964  						time.Sleep(1 * time.Second)
   965  					}
   966  					assert.Equal(t, expectedDirNames, dirNames)
   967  					assert.Equal(t, expectedObjNames, objNames)
   968  				}
   969  				dir := file2.Path
   970  				deepest := true
   971  				for dir != "" {
   972  					expectedObjNames := []string{}
   973  					expectedDirNames := []string{}
   974  					child := dir
   975  					dir = path.Dir(dir)
   976  					if dir == "." {
   977  						dir = ""
   978  						expectedObjNames = append(expectedObjNames, file1.Path)
   979  					}
   980  					if deepest {
   981  						expectedObjNames = append(expectedObjNames, file2.Path)
   982  						deepest = false
   983  					} else {
   984  						expectedDirNames = append(expectedDirNames, child)
   985  					}
   986  					list(dir, expectedDirNames, expectedObjNames)
   987  				}
   988  			}
   989  			t.Run("FsListDirFile2", TestFsListDirFile2)
   990  
   991  			// TestFsListRDirFile2 tests the files are correctly uploaded by doing
   992  			// Depth 1 directory listings using ListR
   993  			t.Run("FsListRDirFile2", func(t *testing.T) {
   994  				defer skipIfNotListR(t)()
   995  				TestFsListDirFile2(t)
   996  			})
   997  
   998  			// Test the files are all there with walk.ListR recursive listings
   999  			t.Run("FsListR", func(t *testing.T) {
  1000  				skipIfNotOk(t)
  1001  				objs, dirs, err := walk.GetAll(ctx, f, "", true, -1)
  1002  				require.NoError(t, err)
  1003  				assert.Equal(t, []string{
  1004  					"hello? sausage",
  1005  					"hello? sausage/êé",
  1006  					"hello? sausage/êé/Hello, 世界",
  1007  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1008  				}, dirsToNames(dirs))
  1009  				assert.Equal(t, []string{
  1010  					"file name.txt",
  1011  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt",
  1012  				}, objsToNames(objs))
  1013  			})
  1014  
  1015  			// Test the files are all there with
  1016  			// walk.ListR recursive listings on a sub dir
  1017  			t.Run("FsListRSubdir", func(t *testing.T) {
  1018  				skipIfNotOk(t)
  1019  				objs, dirs, err := walk.GetAll(ctx, f, path.Dir(path.Dir(path.Dir(path.Dir(file2.Path)))), true, -1)
  1020  				require.NoError(t, err)
  1021  				assert.Equal(t, []string{
  1022  					"hello? sausage/êé",
  1023  					"hello? sausage/êé/Hello, 世界",
  1024  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1025  				}, dirsToNames(dirs))
  1026  				assert.Equal(t, []string{
  1027  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt",
  1028  				}, objsToNames(objs))
  1029  			})
  1030  
  1031  			// TestFsListDirRoot tests that DirList works in the root
  1032  			TestFsListDirRoot := func(t *testing.T) {
  1033  				skipIfNotOk(t)
  1034  				rootRemote, err := fs.NewFs(context.Background(), remoteName)
  1035  				require.NoError(t, err)
  1036  				_, dirs, err := walk.GetAll(ctx, rootRemote, "", true, 1)
  1037  				require.NoError(t, err)
  1038  				assert.Contains(t, dirsToNames(dirs), subRemoteLeaf, "Remote leaf not found")
  1039  			}
  1040  			t.Run("FsListDirRoot", TestFsListDirRoot)
  1041  
  1042  			// TestFsListRDirRoot tests that DirList works in the root using ListR
  1043  			t.Run("FsListRDirRoot", func(t *testing.T) {
  1044  				defer skipIfNotListR(t)()
  1045  				TestFsListDirRoot(t)
  1046  			})
  1047  
  1048  			// TestFsListSubdir tests List works for a subdirectory
  1049  			TestFsListSubdir := func(t *testing.T) {
  1050  				skipIfNotOk(t)
  1051  				fileName := file2.Path
  1052  				var err error
  1053  				var objs []fs.Object
  1054  				var dirs []fs.Directory
  1055  				for i := 0; i < 2; i++ {
  1056  					dir, _ := path.Split(fileName)
  1057  					dir = dir[:len(dir)-1]
  1058  					objs, dirs, err = walk.GetAll(ctx, f, dir, true, -1)
  1059  				}
  1060  				require.NoError(t, err)
  1061  				require.Len(t, objs, 1)
  1062  				assert.Equal(t, fileName, objs[0].Remote())
  1063  				require.Len(t, dirs, 0)
  1064  			}
  1065  			t.Run("FsListSubdir", TestFsListSubdir)
  1066  
  1067  			// TestFsListRSubdir tests List works for a subdirectory using ListR
  1068  			t.Run("FsListRSubdir", func(t *testing.T) {
  1069  				defer skipIfNotListR(t)()
  1070  				TestFsListSubdir(t)
  1071  			})
  1072  
  1073  			// TestFsListLevel2 tests List works for 2 levels
  1074  			TestFsListLevel2 := func(t *testing.T) {
  1075  				skipIfNotOk(t)
  1076  				objs, dirs, err := walk.GetAll(ctx, f, "", true, 2)
  1077  				if err == fs.ErrorLevelNotSupported {
  1078  					return
  1079  				}
  1080  				require.NoError(t, err)
  1081  				assert.Equal(t, []string{file1.Path}, objsToNames(objs))
  1082  				assert.Equal(t, []string{"hello? sausage", "hello? sausage/êé"}, dirsToNames(dirs))
  1083  			}
  1084  			t.Run("FsListLevel2", TestFsListLevel2)
  1085  
  1086  			// TestFsListRLevel2 tests List works for 2 levels using ListR
  1087  			t.Run("FsListRLevel2", func(t *testing.T) {
  1088  				defer skipIfNotListR(t)()
  1089  				TestFsListLevel2(t)
  1090  			})
  1091  
  1092  			// TestFsListFile1 tests file present
  1093  			t.Run("FsListFile1", func(t *testing.T) {
  1094  				skipIfNotOk(t)
  1095  				fstest.CheckListing(t, f, []fstest.Item{file1, file2})
  1096  			})
  1097  
  1098  			// TestFsNewObject tests NewObject
  1099  			t.Run("FsNewObject", func(t *testing.T) {
  1100  				skipIfNotOk(t)
  1101  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1102  				file1.Check(t, obj, f.Precision())
  1103  			})
  1104  
  1105  			// FsNewObjectCaseInsensitive tests NewObject on a case insensitive file system
  1106  			t.Run("FsNewObjectCaseInsensitive", func(t *testing.T) {
  1107  				skipIfNotOk(t)
  1108  				if !f.Features().CaseInsensitive {
  1109  					t.Skip("Not Case Insensitive")
  1110  				}
  1111  				obj := fstest.NewObject(ctx, t, f, toUpperASCII(file1.Path))
  1112  				file1.Check(t, obj, f.Precision())
  1113  				t.Run("Dir", func(t *testing.T) {
  1114  					obj := fstest.NewObject(ctx, t, f, toUpperASCII(file2.Path))
  1115  					file2.Check(t, obj, f.Precision())
  1116  				})
  1117  			})
  1118  
  1119  			// TestFsListFile1and2 tests two files present
  1120  			t.Run("FsListFile1and2", func(t *testing.T) {
  1121  				skipIfNotOk(t)
  1122  				fstest.CheckListing(t, f, []fstest.Item{file1, file2})
  1123  			})
  1124  
  1125  			// TestFsNewObjectDir tests NewObject on a directory which should produce fs.ErrorIsDir if possible or fs.ErrorObjectNotFound if not
  1126  			t.Run("FsNewObjectDir", func(t *testing.T) {
  1127  				skipIfNotOk(t)
  1128  				dir := path.Dir(file2.Path)
  1129  				obj, err := f.NewObject(ctx, dir)
  1130  				assert.Nil(t, obj)
  1131  				assert.True(t, err == fs.ErrorIsDir || err == fs.ErrorObjectNotFound, fmt.Sprintf("Wrong error: expecting fs.ErrorIsDir or fs.ErrorObjectNotFound but got: %#v", err))
  1132  			})
  1133  
  1134  			// TestFsPurge tests Purge
  1135  			t.Run("FsPurge", func(t *testing.T) {
  1136  				skipIfNotOk(t)
  1137  
  1138  				// Check have Purge
  1139  				doPurge := f.Features().Purge
  1140  				if doPurge == nil {
  1141  					t.Skip("FS has no Purge interface")
  1142  				}
  1143  
  1144  				// put up a file to purge
  1145  				fileToPurge := fstest.Item{
  1146  					ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
  1147  					Path:    "dirToPurge/fileToPurge.txt",
  1148  				}
  1149  				_, _ = testPut(ctx, t, f, &fileToPurge)
  1150  
  1151  				fstest.CheckListingWithPrecision(t, f, []fstest.Item{file1, file2, fileToPurge}, []string{
  1152  					"dirToPurge",
  1153  					"hello? sausage",
  1154  					"hello? sausage/êé",
  1155  					"hello? sausage/êé/Hello, 世界",
  1156  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1157  				}, fs.GetModifyWindow(ctx, f))
  1158  
  1159  				// Now purge it
  1160  				err = operations.Purge(ctx, f, "dirToPurge")
  1161  				require.NoError(t, err)
  1162  
  1163  				fstest.CheckListingWithPrecision(t, f, []fstest.Item{file1, file2}, []string{
  1164  					"hello? sausage",
  1165  					"hello? sausage/êé",
  1166  					"hello? sausage/êé/Hello, 世界",
  1167  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1168  				}, fs.GetModifyWindow(ctx, f))
  1169  			})
  1170  
  1171  			// TestFsPurge tests Purge on the Root
  1172  			t.Run("FsPurgeRoot", func(t *testing.T) {
  1173  				skipIfNotOk(t)
  1174  
  1175  				// Check have Purge
  1176  				doPurge := f.Features().Purge
  1177  				if doPurge == nil {
  1178  					t.Skip("FS has no Purge interface")
  1179  				}
  1180  
  1181  				// put up a file to purge
  1182  				fileToPurge := fstest.Item{
  1183  					ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
  1184  					Path:    "dirToPurgeFromRoot/fileToPurgeFromRoot.txt",
  1185  				}
  1186  				_, _ = testPut(ctx, t, f, &fileToPurge)
  1187  
  1188  				fstest.CheckListingWithPrecision(t, f, []fstest.Item{file1, file2, fileToPurge}, []string{
  1189  					"dirToPurgeFromRoot",
  1190  					"hello? sausage",
  1191  					"hello? sausage/êé",
  1192  					"hello? sausage/êé/Hello, 世界",
  1193  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1194  				}, fs.GetModifyWindow(ctx, f))
  1195  
  1196  				// Create a new Fs pointing at the directory
  1197  				remoteName := subRemoteName + "/" + "dirToPurgeFromRoot"
  1198  				fPurge, err := fs.NewFs(context.Background(), remoteName)
  1199  				require.NoError(t, err)
  1200  
  1201  				// Now purge it from the root
  1202  				err = operations.Purge(ctx, fPurge, "")
  1203  				require.NoError(t, err)
  1204  
  1205  				fstest.CheckListingWithPrecision(t, f, []fstest.Item{file1, file2}, []string{
  1206  					"hello? sausage",
  1207  					"hello? sausage/êé",
  1208  					"hello? sausage/êé/Hello, 世界",
  1209  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1210  				}, fs.GetModifyWindow(ctx, f))
  1211  			})
  1212  
  1213  			// TestFsListRootedSubdir tests putting and listing with an Fs that is rooted at a subdirectory 2 levels down
  1214  			TestFsListRootedSubdir := func(t *testing.T) {
  1215  				skipIfNotOk(t)
  1216  				newF, err := cache.Get(ctx, subRemoteName+"/hello? sausage/êé")
  1217  				assert.NoError(t, err)
  1218  				nestedFile := fstest.Item{
  1219  					ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
  1220  					Path:    "a/b/c/d/e.txt",
  1221  				}
  1222  				_, _ = testPut(ctx, t, newF, &nestedFile)
  1223  
  1224  				objs, dirs, err := walk.GetAll(ctx, newF, "", true, 10)
  1225  				require.NoError(t, err)
  1226  				assert.Equal(t, []string{`Hello, 世界/ " ' @ < > & ? + ≠/z.txt`, nestedFile.Path}, objsToNames(objs))
  1227  				assert.Equal(t, []string{`Hello, 世界`, `Hello, 世界/ " ' @ < > & ? + ≠`, "a", "a/b", "a/b/c", "a/b/c/d"}, dirsToNames(dirs))
  1228  
  1229  				// cleanup
  1230  				err = operations.Purge(ctx, newF, "a")
  1231  				require.NoError(t, err)
  1232  			}
  1233  			t.Run("FsListRootedSubdir", TestFsListRootedSubdir)
  1234  
  1235  			// TestFsCopy tests Copy
  1236  			t.Run("FsCopy", func(t *testing.T) {
  1237  				skipIfNotOk(t)
  1238  
  1239  				// Check have Copy
  1240  				doCopy := f.Features().Copy
  1241  				if doCopy == nil {
  1242  					t.Skip("FS has no Copier interface")
  1243  				}
  1244  
  1245  				// Test with file2 so have + and ' ' in file name
  1246  				var file2Copy = file2
  1247  				file2Copy.Path += "-copy"
  1248  
  1249  				// do the copy
  1250  				src := fstest.NewObject(ctx, t, f, file2.Path)
  1251  				dst, err := doCopy(ctx, src, file2Copy.Path)
  1252  				if err == fs.ErrorCantCopy {
  1253  					t.Skip("FS can't copy")
  1254  				}
  1255  				require.NoError(t, err, fmt.Sprintf("Error: %#v", err))
  1256  
  1257  				// check file exists in new listing
  1258  				fstest.CheckListing(t, f, []fstest.Item{file1, file2, file2Copy})
  1259  
  1260  				// Check dst lightly - list above has checked ModTime/Hashes
  1261  				assert.Equal(t, file2Copy.Path, dst.Remote())
  1262  
  1263  				// check that mutating dst does not mutate src
  1264  				err = dst.SetModTime(ctx, fstest.Time("2004-03-03T04:05:06.499999999Z"))
  1265  				if err != fs.ErrorCantSetModTimeWithoutDelete && err != fs.ErrorCantSetModTime {
  1266  					assert.NoError(t, err)
  1267  					assert.False(t, src.ModTime(ctx).Equal(dst.ModTime(ctx)), "mutating dst should not mutate src -- is it Copying by pointer?")
  1268  				}
  1269  
  1270  				// Delete copy
  1271  				err = dst.Remove(ctx)
  1272  				require.NoError(t, err)
  1273  
  1274  				// Test that server side copying files does the correct thing with metadata
  1275  				t.Run("Metadata", func(t *testing.T) {
  1276  					if !f.Features().WriteMetadata {
  1277  						t.Skip("Skipping test as can't write metadata")
  1278  					}
  1279  					ctx, ci := fs.AddConfig(ctx)
  1280  					ci.Metadata = true
  1281  
  1282  					// Create file with metadata
  1283  					const srcName = "test metadata copy.txt"
  1284  					const dstName = "test metadata copied.txt"
  1285  					t1 := fstest.Time("2003-02-03T04:05:06.499999999Z")
  1286  					t2 := fstest.Time("2004-03-03T04:05:06.499999999Z")
  1287  					fileSrc := fstest.NewItem(srcName, srcName, t1)
  1288  					contents := random.String(100)
  1289  					var testMetadata = fs.Metadata{
  1290  						// System metadata supported by all backends
  1291  						"mtime": t1.Format(time.RFC3339Nano),
  1292  						// User metadata
  1293  						"potato": "jersey",
  1294  					}
  1295  					oSrc := PutTestContentsMetadata(ctx, t, f, &fileSrc, contents, true, "text/plain", testMetadata)
  1296  					fstest.CheckEntryMetadata(ctx, t, f, oSrc, testMetadata)
  1297  
  1298  					// Copy it with --metadata-set
  1299  					ci.MetadataSet = fs.Metadata{
  1300  						// System metadata supported by all backends
  1301  						"mtime": t2.Format(time.RFC3339Nano),
  1302  						// User metadata
  1303  						"potato": "royal",
  1304  					}
  1305  					oDst, err := doCopy(ctx, oSrc, dstName)
  1306  					require.NoError(t, err)
  1307  					fileDst := fileSrc
  1308  					fileDst.Path = dstName
  1309  					fileDst.ModTime = t2
  1310  					fstest.CheckListing(t, f, []fstest.Item{file1, file2, fileSrc, fileDst})
  1311  
  1312  					// Check metadata is correct
  1313  					fstest.CheckEntryMetadata(ctx, t, f, oDst, ci.MetadataSet)
  1314  					oDst = fstest.NewObject(ctx, t, f, dstName)
  1315  					fstest.CheckEntryMetadata(ctx, t, f, oDst, ci.MetadataSet)
  1316  
  1317  					// Remove test files
  1318  					require.NoError(t, oSrc.Remove(ctx))
  1319  					require.NoError(t, oDst.Remove(ctx))
  1320  				})
  1321  			})
  1322  
  1323  			// TestFsMove tests Move
  1324  			t.Run("FsMove", func(t *testing.T) {
  1325  				skipIfNotOk(t)
  1326  
  1327  				// Check have Move
  1328  				doMove := f.Features().Move
  1329  				if doMove == nil {
  1330  					t.Skip("FS has no Mover interface")
  1331  				}
  1332  
  1333  				// state of files now:
  1334  				// 1: file name.txt
  1335  				// 2: hello sausage?/../z.txt
  1336  
  1337  				var file1Move = file1
  1338  				var file2Move = file2
  1339  
  1340  				// check happy path, i.e. no naming conflicts when rename and move are two
  1341  				// separate operations
  1342  				file2Move.Path = "other.txt"
  1343  				src := fstest.NewObject(ctx, t, f, file2.Path)
  1344  				dst, err := doMove(ctx, src, file2Move.Path)
  1345  				if err == fs.ErrorCantMove {
  1346  					t.Skip("FS can't move")
  1347  				}
  1348  				require.NoError(t, err)
  1349  				// check file exists in new listing
  1350  				fstest.CheckListing(t, f, []fstest.Item{file1, file2Move})
  1351  				// Check dst lightly - list above has checked ModTime/Hashes
  1352  				assert.Equal(t, file2Move.Path, dst.Remote())
  1353  				// 1: file name.txt
  1354  				// 2: other.txt
  1355  
  1356  				// Check conflict on "rename, then move"
  1357  				file1Move.Path = "moveTest/other.txt"
  1358  				src = fstest.NewObject(ctx, t, f, file1.Path)
  1359  				_, err = doMove(ctx, src, file1Move.Path)
  1360  				require.NoError(t, err)
  1361  				fstest.CheckListing(t, f, []fstest.Item{file1Move, file2Move})
  1362  				// 1: moveTest/other.txt
  1363  				// 2: other.txt
  1364  
  1365  				// Check conflict on "move, then rename"
  1366  				src = fstest.NewObject(ctx, t, f, file1Move.Path)
  1367  				_, err = doMove(ctx, src, file1.Path)
  1368  				require.NoError(t, err)
  1369  				fstest.CheckListing(t, f, []fstest.Item{file1, file2Move})
  1370  				// 1: file name.txt
  1371  				// 2: other.txt
  1372  
  1373  				src = fstest.NewObject(ctx, t, f, file2Move.Path)
  1374  				_, err = doMove(ctx, src, file2.Path)
  1375  				require.NoError(t, err)
  1376  				fstest.CheckListing(t, f, []fstest.Item{file1, file2})
  1377  				// 1: file name.txt
  1378  				// 2: hello sausage?/../z.txt
  1379  
  1380  				// Tidy up moveTest directory
  1381  				require.NoError(t, f.Rmdir(ctx, "moveTest"))
  1382  
  1383  				// Test that server side moving files does the correct thing with metadata
  1384  				t.Run("Metadata", func(t *testing.T) {
  1385  					if !f.Features().WriteMetadata {
  1386  						t.Skip("Skipping test as can't write metadata")
  1387  					}
  1388  					ctx, ci := fs.AddConfig(ctx)
  1389  					ci.Metadata = true
  1390  
  1391  					// Create file with metadata
  1392  					const name = "test metadata move.txt"
  1393  					const newName = "test metadata moved.txt"
  1394  					t1 := fstest.Time("2003-02-03T04:05:06.499999999Z")
  1395  					t2 := fstest.Time("2004-03-03T04:05:06.499999999Z")
  1396  					file := fstest.NewItem(name, name, t1)
  1397  					contents := random.String(100)
  1398  					var testMetadata = fs.Metadata{
  1399  						// System metadata supported by all backends
  1400  						"mtime": t1.Format(time.RFC3339Nano),
  1401  						// User metadata
  1402  						"potato": "jersey",
  1403  					}
  1404  					o := PutTestContentsMetadata(ctx, t, f, &file, contents, true, "text/plain", testMetadata)
  1405  					fstest.CheckEntryMetadata(ctx, t, f, o, testMetadata)
  1406  
  1407  					// Move it with --metadata-set
  1408  					ci.MetadataSet = fs.Metadata{
  1409  						// System metadata supported by all backends
  1410  						"mtime": t2.Format(time.RFC3339Nano),
  1411  						// User metadata
  1412  						"potato": "royal",
  1413  					}
  1414  					newO, err := doMove(ctx, o, newName)
  1415  					require.NoError(t, err)
  1416  					file.Path = newName
  1417  					file.ModTime = t2
  1418  					fstest.CheckListing(t, f, []fstest.Item{file1, file2, file})
  1419  
  1420  					// Check metadata is correct
  1421  					fstest.CheckEntryMetadata(ctx, t, f, newO, ci.MetadataSet)
  1422  					newO = fstest.NewObject(ctx, t, f, newName)
  1423  					fstest.CheckEntryMetadata(ctx, t, f, newO, ci.MetadataSet)
  1424  
  1425  					// Remove test file
  1426  					require.NoError(t, newO.Remove(ctx))
  1427  				})
  1428  			})
  1429  
  1430  			// Move src to this remote using server-side move operations.
  1431  			//
  1432  			// Will only be called if src.Fs().Name() == f.Name()
  1433  			//
  1434  			// If it isn't possible then return fs.ErrorCantDirMove
  1435  			//
  1436  			// If destination exists then return fs.ErrorDirExists
  1437  
  1438  			// TestFsDirMove tests DirMove
  1439  			//
  1440  			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsDirMove)$
  1441  			t.Run("FsDirMove", func(t *testing.T) {
  1442  				skipIfNotOk(t)
  1443  
  1444  				// Check have DirMove
  1445  				doDirMove := f.Features().DirMove
  1446  				if doDirMove == nil {
  1447  					t.Skip("FS has no DirMover interface")
  1448  				}
  1449  
  1450  				// Check it can't move onto itself
  1451  				err := doDirMove(ctx, f, "", "")
  1452  				require.Equal(t, fs.ErrorDirExists, err)
  1453  
  1454  				// new remote
  1455  				newRemote, _, removeNewRemote, err := fstest.RandomRemote()
  1456  				require.NoError(t, err)
  1457  				defer removeNewRemote()
  1458  
  1459  				const newName = "new_name/sub_new_name"
  1460  				// try the move
  1461  				err = newRemote.Features().DirMove(ctx, f, "", newName)
  1462  				require.NoError(t, err)
  1463  
  1464  				// check remotes
  1465  				// remote should not exist here
  1466  				_, err = f.List(ctx, "")
  1467  				assert.True(t, errors.Is(err, fs.ErrorDirNotFound))
  1468  				//fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, remote.Precision())
  1469  				file1Copy := file1
  1470  				file1Copy.Path = path.Join(newName, file1.Path)
  1471  				file2Copy := file2
  1472  				file2Copy.Path = path.Join(newName, file2.Path)
  1473  				fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{file2Copy, file1Copy}, []string{
  1474  					"new_name",
  1475  					"new_name/sub_new_name",
  1476  					"new_name/sub_new_name/hello? sausage",
  1477  					"new_name/sub_new_name/hello? sausage/êé",
  1478  					"new_name/sub_new_name/hello? sausage/êé/Hello, 世界",
  1479  					"new_name/sub_new_name/hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1480  				}, newRemote.Precision())
  1481  
  1482  				// move it back
  1483  				err = doDirMove(ctx, newRemote, newName, "")
  1484  				require.NoError(t, err)
  1485  
  1486  				// check remotes
  1487  				fstest.CheckListingWithPrecision(t, f, []fstest.Item{file2, file1}, []string{
  1488  					"hello? sausage",
  1489  					"hello? sausage/êé",
  1490  					"hello? sausage/êé/Hello, 世界",
  1491  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1492  				}, f.Precision())
  1493  				fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{}, []string{
  1494  					"new_name",
  1495  				}, newRemote.Precision())
  1496  			})
  1497  
  1498  			// TestFsRmdirFull tests removing a non empty directory
  1499  			t.Run("FsRmdirFull", func(t *testing.T) {
  1500  				skipIfNotOk(t)
  1501  				if isBucketBasedButNotRoot(f) {
  1502  					t.Skip("Skipping test as non root bucket-based remote")
  1503  				}
  1504  				err := f.Rmdir(ctx, "")
  1505  				require.Error(t, err, "Expecting error on RMdir on non empty remote")
  1506  			})
  1507  
  1508  			// TestFsPrecision tests the Precision of the Fs
  1509  			t.Run("FsPrecision", func(t *testing.T) {
  1510  				skipIfNotOk(t)
  1511  				precision := f.Precision()
  1512  				if precision == fs.ModTimeNotSupported {
  1513  					return
  1514  				}
  1515  				if precision > time.Second || precision < 0 {
  1516  					t.Fatalf("Precision out of range %v", precision)
  1517  				}
  1518  				// FIXME check expected precision
  1519  			})
  1520  
  1521  			// TestObjectString tests the Object String method
  1522  			t.Run("ObjectString", func(t *testing.T) {
  1523  				skipIfNotOk(t)
  1524  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1525  				assert.Equal(t, file1.Path, obj.String())
  1526  				if opt.NilObject != nil {
  1527  					assert.Equal(t, "<nil>", opt.NilObject.String())
  1528  				}
  1529  			})
  1530  
  1531  			// TestObjectFs tests the object can be found
  1532  			t.Run("ObjectFs", func(t *testing.T) {
  1533  				skipIfNotOk(t)
  1534  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1535  				// If this is set we don't do the direct comparison of
  1536  				// the Fs from the object as it may be different
  1537  				if opt.SkipFsMatch {
  1538  					return
  1539  				}
  1540  				testRemote := f
  1541  				if obj.Fs() != testRemote {
  1542  					// Check to see if this wraps something else
  1543  					if doUnWrap := testRemote.Features().UnWrap; doUnWrap != nil {
  1544  						testRemote = doUnWrap()
  1545  					}
  1546  				}
  1547  				assert.Equal(t, obj.Fs(), testRemote)
  1548  			})
  1549  
  1550  			// TestObjectRemote tests the Remote is correct
  1551  			t.Run("ObjectRemote", func(t *testing.T) {
  1552  				skipIfNotOk(t)
  1553  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1554  				assert.Equal(t, file1.Path, obj.Remote())
  1555  			})
  1556  
  1557  			// TestObjectHashes checks all the hashes the object supports
  1558  			t.Run("ObjectHashes", func(t *testing.T) {
  1559  				skipIfNotOk(t)
  1560  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1561  				file1.CheckHashes(t, obj)
  1562  			})
  1563  
  1564  			// TestObjectModTime tests the ModTime of the object is correct
  1565  			TestObjectModTime := func(t *testing.T) {
  1566  				skipIfNotOk(t)
  1567  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1568  				file1.CheckModTime(t, obj, obj.ModTime(ctx), f.Precision())
  1569  			}
  1570  			t.Run("ObjectModTime", TestObjectModTime)
  1571  
  1572  			// TestObjectMimeType tests the MimeType of the object is correct
  1573  			t.Run("ObjectMimeType", func(t *testing.T) {
  1574  				skipIfNotOk(t)
  1575  				features := f.Features()
  1576  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1577  				do, ok := obj.(fs.MimeTyper)
  1578  				if !ok {
  1579  					require.False(t, features.ReadMimeType, "Features.ReadMimeType is set but Object.MimeType method not found")
  1580  					t.Skip("MimeType method not supported")
  1581  				}
  1582  				mimeType := do.MimeType(ctx)
  1583  				if !features.ReadMimeType {
  1584  					require.Equal(t, "", mimeType, "Features.ReadMimeType is not set but Object.MimeType returned a non-empty MimeType")
  1585  				} else if features.WriteMimeType {
  1586  					assert.Equal(t, file1MimeType, mimeType, "can read and write mime types but failed")
  1587  				} else {
  1588  					if strings.ContainsRune(mimeType, ';') {
  1589  						assert.Equal(t, "text/plain; charset=utf-8", mimeType)
  1590  					} else {
  1591  						assert.Equal(t, "text/plain", mimeType)
  1592  					}
  1593  				}
  1594  			})
  1595  
  1596  			// TestObjectMetadata tests the Metadata of the object is correct
  1597  			t.Run("ObjectMetadata", func(t *testing.T) {
  1598  				skipIfNotOk(t)
  1599  				ctx, ci := fs.AddConfig(ctx)
  1600  				ci.Metadata = true
  1601  				features := f.Features()
  1602  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1603  				do, objectHasMetadata := obj.(fs.Metadataer)
  1604  				if objectHasMetadata || features.ReadMetadata || features.WriteMetadata || features.UserMetadata {
  1605  					fsInfo := fs.FindFromFs(f)
  1606  					require.NotNil(t, fsInfo)
  1607  					require.NotNil(t, fsInfo.MetadataInfo, "Object declares metadata support but no MetadataInfo in RegInfo")
  1608  				}
  1609  				if !objectHasMetadata {
  1610  					require.False(t, features.ReadMetadata, "Features.ReadMetadata is set but Object.Metadata method not found")
  1611  					t.Skip("Metadata method not supported")
  1612  				}
  1613  				metadata, err := do.Metadata(ctx)
  1614  				require.NoError(t, err)
  1615  				// check standard metadata
  1616  				for k, v := range metadata {
  1617  					switch k {
  1618  					case "atime", "btime", "mtime":
  1619  						mtime, err := time.Parse(time.RFC3339Nano, v)
  1620  						require.NoError(t, err)
  1621  						if k == "mtime" {
  1622  							fstest.AssertTimeEqualWithPrecision(t, file1.Path, file1.ModTime, mtime, f.Precision())
  1623  						}
  1624  					}
  1625  				}
  1626  				if !features.ReadMetadata {
  1627  					if metadata != nil && !features.Overlay {
  1628  						require.Equal(t, "", metadata, "Features.ReadMetadata is not set but Object.Metadata returned a non nil Metadata: %#v", metadata)
  1629  					}
  1630  				} else if features.WriteMetadata {
  1631  					require.NotNil(t, metadata)
  1632  					if features.UserMetadata {
  1633  						// check all the metadata bits we uploaded are present - there may be more we didn't write
  1634  						for k, v := range file1Metadata {
  1635  							assert.Equal(t, v, metadata[k], "can read and write metadata but failed on key %q (want=%+v, got=%+v)", k, file1Metadata, metadata)
  1636  						}
  1637  					}
  1638  					// Now test we can set the mtime and content-type via the metadata and these take precedence
  1639  					t.Run("mtime", func(t *testing.T) {
  1640  						path := "metadatatest"
  1641  						mtimeModTime := fstest.Time("2002-02-03T04:05:06.499999999Z")
  1642  						modTime := fstest.Time("2003-02-03T04:05:06.499999999Z")
  1643  						item := fstest.NewItem(path, path, modTime)
  1644  						metaMimeType := "application/zip"
  1645  						mimeType := "application/gzip"
  1646  						metadata := fs.Metadata{
  1647  							"mtime":        mtimeModTime.Format(time.RFC3339Nano),
  1648  							"content-type": metaMimeType,
  1649  						}
  1650  						// This checks the mtime is correct also and returns the re-read object
  1651  						_, obj := testPutMimeType(ctx, t, f, &item, mimeType, metadata)
  1652  						defer func() {
  1653  							assert.NoError(t, obj.Remove(ctx))
  1654  						}()
  1655  						// Check content-type got updated too
  1656  						if features.ReadMimeType && features.WriteMimeType {
  1657  							// read the object from scratch
  1658  							o, err := f.NewObject(ctx, path)
  1659  							require.NoError(t, err)
  1660  
  1661  							// Check the mimetype is correct
  1662  							do, ok := o.(fs.MimeTyper)
  1663  							require.True(t, ok)
  1664  							gotMimeType := do.MimeType(ctx)
  1665  							assert.Equal(t, metaMimeType, gotMimeType)
  1666  						}
  1667  					})
  1668  				} // else: Have some metadata here we didn't write - can't really check it!
  1669  			})
  1670  
  1671  			// TestObjectSetMetadata tests the SetMetadata of the object
  1672  			t.Run("ObjectSetMetadata", func(t *testing.T) {
  1673  				skipIfNotOk(t)
  1674  				ctx, ci := fs.AddConfig(ctx)
  1675  				ci.Metadata = true
  1676  				features := f.Features()
  1677  
  1678  				// Test to see if SetMetadata is supported on an existing object before creating a new one
  1679  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1680  				_, objectHasSetMetadata := obj.(fs.SetMetadataer)
  1681  				if !objectHasSetMetadata {
  1682  					t.Skip("SetMetadata method not supported")
  1683  				}
  1684  				if !features.Overlay {
  1685  					require.True(t, features.WriteMetadata, "Features.WriteMetadata is false but Object.SetMetadata found")
  1686  				}
  1687  				if !features.ReadMetadata {
  1688  					t.Skip("SetMetadata can't be tested without ReadMetadata")
  1689  				}
  1690  
  1691  				// Create file with metadata
  1692  				const fileName = "test set metadata.txt"
  1693  				t1 := fstest.Time("2003-02-03T04:05:06.499999999Z")
  1694  				t2 := fstest.Time("2004-03-03T04:05:06.499999999Z")
  1695  				contents := random.String(100)
  1696  				file := fstest.NewItem(fileName, contents, t1)
  1697  				var testMetadata = fs.Metadata{
  1698  					// System metadata supported by all backends
  1699  					"mtime": t1.Format(time.RFC3339Nano),
  1700  					// User metadata
  1701  					"potato": "jersey",
  1702  				}
  1703  				obj = PutTestContentsMetadata(ctx, t, f, &file, contents, true, "text/plain", testMetadata)
  1704  				fstest.CheckEntryMetadata(ctx, t, f, obj, testMetadata)
  1705  				do, objectHasSetMetadata := obj.(fs.SetMetadataer)
  1706  				require.True(t, objectHasSetMetadata)
  1707  
  1708  				// Set new metadata
  1709  				err := do.SetMetadata(ctx, fs.Metadata{
  1710  					// System metadata supported by all backends
  1711  					"mtime": t2.Format(time.RFC3339Nano),
  1712  					// User metadata
  1713  					"potato": "royal",
  1714  				})
  1715  				if err == fs.ErrorNotImplemented {
  1716  					t.Log("SetMetadata returned fs.ErrorNotImplemented")
  1717  				} else {
  1718  					require.NoError(t, err)
  1719  					file.ModTime = t2
  1720  					fstest.CheckListing(t, f, []fstest.Item{file1, file2, file})
  1721  
  1722  					// Check metadata is correct
  1723  					fstest.CheckEntryMetadata(ctx, t, f, obj, ci.MetadataSet)
  1724  					obj = fstest.NewObject(ctx, t, f, fileName)
  1725  					fstest.CheckEntryMetadata(ctx, t, f, obj, ci.MetadataSet)
  1726  				}
  1727  
  1728  				// Remove test file
  1729  				require.NoError(t, obj.Remove(ctx))
  1730  			})
  1731  
  1732  			// TestObjectSetModTime tests that SetModTime works
  1733  			t.Run("ObjectSetModTime", func(t *testing.T) {
  1734  				skipIfNotOk(t)
  1735  				newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
  1736  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1737  				err := obj.SetModTime(ctx, newModTime)
  1738  				if err == fs.ErrorCantSetModTime || err == fs.ErrorCantSetModTimeWithoutDelete {
  1739  					t.Log(err)
  1740  					return
  1741  				}
  1742  				require.NoError(t, err)
  1743  				file1.ModTime = newModTime
  1744  				file1.CheckModTime(t, obj, obj.ModTime(ctx), f.Precision())
  1745  				// And make a new object and read it from there too
  1746  				TestObjectModTime(t)
  1747  			})
  1748  
  1749  			// TestObjectSize tests that Size works
  1750  			t.Run("ObjectSize", func(t *testing.T) {
  1751  				skipIfNotOk(t)
  1752  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1753  				assert.Equal(t, file1.Size, obj.Size())
  1754  			})
  1755  
  1756  			// TestObjectOpen tests that Open works
  1757  			t.Run("ObjectOpen", func(t *testing.T) {
  1758  				skipIfNotOk(t)
  1759  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1760  				assert.Equal(t, file1Contents, ReadObject(ctx, t, obj, -1), "contents of file1 differ")
  1761  			})
  1762  
  1763  			// TestObjectOpenSeek tests that Open works with SeekOption
  1764  			t.Run("ObjectOpenSeek", func(t *testing.T) {
  1765  				skipIfNotOk(t)
  1766  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1767  				assert.Equal(t, file1Contents[50:], ReadObject(ctx, t, obj, -1, &fs.SeekOption{Offset: 50}), "contents of file1 differ after seek")
  1768  			})
  1769  
  1770  			// TestObjectOpenRange tests that Open works with RangeOption
  1771  			//
  1772  			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|ObjectOpenRange)$'
  1773  			t.Run("ObjectOpenRange", func(t *testing.T) {
  1774  				skipIfNotOk(t)
  1775  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1776  				for _, test := range []struct {
  1777  					ro                 fs.RangeOption
  1778  					wantStart, wantEnd int
  1779  				}{
  1780  					{fs.RangeOption{Start: 5, End: 15}, 5, 16},
  1781  					{fs.RangeOption{Start: 80, End: -1}, 80, 100},
  1782  					{fs.RangeOption{Start: 81, End: 100000}, 81, 100},
  1783  					{fs.RangeOption{Start: -1, End: 20}, 80, 100}, // if start is omitted this means get the final bytes
  1784  					// {fs.RangeOption{Start: -1, End: -1}, 0, 100}, - this seems to work but the RFC doesn't define it
  1785  				} {
  1786  					got := ReadObject(ctx, t, obj, -1, &test.ro)
  1787  					foundAt := strings.Index(file1Contents, got)
  1788  					help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
  1789  					assert.Equal(t, file1Contents[test.wantStart:test.wantEnd], got, help)
  1790  				}
  1791  			})
  1792  
  1793  			// TestObjectPartialRead tests that reading only part of the object does the correct thing
  1794  			t.Run("ObjectPartialRead", func(t *testing.T) {
  1795  				skipIfNotOk(t)
  1796  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1797  				assert.Equal(t, file1Contents[:50], ReadObject(ctx, t, obj, 50), "contents of file1 differ after limited read")
  1798  			})
  1799  
  1800  			// TestObjectUpdate tests that Update works
  1801  			t.Run("ObjectUpdate", func(t *testing.T) {
  1802  				skipIfNotOk(t)
  1803  				contents := random.String(200)
  1804  				var h *hash.MultiHasher
  1805  
  1806  				file1.Size = int64(len(contents))
  1807  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1808  				remoteBefore := obj.Remote()
  1809  				obji := object.NewStaticObjectInfo(file1.Path+"-should-be-ignored.bin", file1.ModTime, int64(len(contents)), true, nil, obj.Fs())
  1810  				retry(t, "Update object", func() error {
  1811  					buf := bytes.NewBufferString(contents)
  1812  					h = hash.NewMultiHasher()
  1813  					in := io.TeeReader(buf, h)
  1814  					return obj.Update(ctx, in, obji)
  1815  				})
  1816  				remoteAfter := obj.Remote()
  1817  				assert.Equal(t, remoteBefore, remoteAfter, "Remote should not change")
  1818  				file1.Hashes = h.Sums()
  1819  
  1820  				// check the object has been updated
  1821  				file1.Check(t, obj, f.Precision())
  1822  
  1823  				// Re-read the object and check again
  1824  				obj = fstest.NewObject(ctx, t, f, file1.Path)
  1825  				file1.Check(t, obj, f.Precision())
  1826  
  1827  				// check contents correct
  1828  				assert.Equal(t, contents, ReadObject(ctx, t, obj, -1), "contents of updated file1 differ")
  1829  				file1Contents = contents
  1830  			})
  1831  
  1832  			// TestObjectStorable tests that Storable works
  1833  			t.Run("ObjectStorable", func(t *testing.T) {
  1834  				skipIfNotOk(t)
  1835  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  1836  				require.NotNil(t, !obj.Storable(), "Expecting object to be storable")
  1837  			})
  1838  
  1839  			// TestFsIsFile tests that an error is returned along with a valid fs
  1840  			// which points to the parent directory.
  1841  			t.Run("FsIsFile", func(t *testing.T) {
  1842  				skipIfNotOk(t)
  1843  				remoteName := subRemoteName + "/" + file2.Path
  1844  				file2Copy := file2
  1845  				file2Copy.Path = "z.txt"
  1846  				fileRemote, err := fs.NewFs(context.Background(), remoteName)
  1847  				require.NotNil(t, fileRemote)
  1848  				assert.Equal(t, fs.ErrorIsFile, err)
  1849  
  1850  				// Check Fs.Root returns the right thing
  1851  				t.Run("FsRoot", func(t *testing.T) {
  1852  					skipIfNotOk(t)
  1853  					got := fileRemote.Root()
  1854  					remoteDir := path.Dir(remoteName)
  1855  					want := remoteDir
  1856  					colon := strings.LastIndex(want, ":")
  1857  					if colon >= 0 {
  1858  						want = want[colon+1:]
  1859  					}
  1860  					if isLocalRemote {
  1861  						// only check last path element on local
  1862  						require.Equal(t, filepath.Base(remoteDir), filepath.Base(got))
  1863  					} else {
  1864  						require.Equal(t, want, got)
  1865  					}
  1866  				})
  1867  
  1868  				if strings.HasPrefix(remoteName, "TestChunker") && strings.Contains(remoteName, "Nometa") {
  1869  					// TODO fix chunker and remove this bypass
  1870  					t.Logf("Skip listing check -- chunker can't yet handle this tricky case")
  1871  					return
  1872  				}
  1873  				fstest.CheckListing(t, fileRemote, []fstest.Item{file2Copy})
  1874  			})
  1875  
  1876  			// TestFsIsFileNotFound tests that an error is not returned if no object is found
  1877  			t.Run("FsIsFileNotFound", func(t *testing.T) {
  1878  				skipIfNotOk(t)
  1879  				remoteName := subRemoteName + "/not found.txt"
  1880  				fileRemote, err := fs.NewFs(context.Background(), remoteName)
  1881  				require.NoError(t, err)
  1882  				fstest.CheckListing(t, fileRemote, []fstest.Item{})
  1883  			})
  1884  
  1885  			// Test that things work from the root
  1886  			t.Run("FromRoot", func(t *testing.T) {
  1887  				if features := f.Features(); features.BucketBased && !features.BucketBasedRootOK {
  1888  					t.Skip("Can't list from root on this remote")
  1889  				}
  1890  
  1891  				parsed, err := fspath.Parse(subRemoteName)
  1892  				require.NoError(t, err)
  1893  				configName, configLeaf := parsed.ConfigString, parsed.Path
  1894  				if configName == "" {
  1895  					configName, configLeaf = path.Split(subRemoteName)
  1896  				} else {
  1897  					configName += ":"
  1898  				}
  1899  				t.Logf("Opening root remote %q path %q from %q", configName, configLeaf, subRemoteName)
  1900  				rootRemote, err := fs.NewFs(context.Background(), configName)
  1901  				require.NoError(t, err)
  1902  
  1903  				file1Root := file1
  1904  				file1Root.Path = path.Join(configLeaf, file1Root.Path)
  1905  				file2Root := file2
  1906  				file2Root.Path = path.Join(configLeaf, file2Root.Path)
  1907  				var dirs []string
  1908  				dir := file2.Path
  1909  				for {
  1910  					dir = path.Dir(dir)
  1911  					if dir == "" || dir == "." || dir == "/" {
  1912  						break
  1913  					}
  1914  					dirs = append(dirs, path.Join(configLeaf, dir))
  1915  				}
  1916  
  1917  				// Check that we can see file1 and file2 from the root
  1918  				t.Run("List", func(t *testing.T) {
  1919  					fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, dirs, rootRemote.Precision())
  1920  				})
  1921  
  1922  				// Check that listing the entries is OK
  1923  				t.Run("ListEntries", func(t *testing.T) {
  1924  					entries, err := rootRemote.List(context.Background(), configLeaf)
  1925  					require.NoError(t, err)
  1926  					fstest.CompareItems(t, entries, []fstest.Item{file1Root}, dirs[len(dirs)-1:], rootRemote.Precision(), "ListEntries")
  1927  				})
  1928  
  1929  				// List the root with ListR
  1930  				t.Run("ListR", func(t *testing.T) {
  1931  					doListR := rootRemote.Features().ListR
  1932  					if doListR == nil {
  1933  						t.Skip("FS has no ListR interface")
  1934  					}
  1935  					file1Found, file2Found := false, false
  1936  					stopTime := time.Now().Add(10 * time.Second)
  1937  					errTooMany := errors.New("too many files")
  1938  					errFound := errors.New("found")
  1939  					err := doListR(context.Background(), "", func(entries fs.DirEntries) error {
  1940  						for _, entry := range entries {
  1941  							remote := entry.Remote()
  1942  							if remote == file1Root.Path {
  1943  								file1Found = true
  1944  							}
  1945  							if remote == file2Root.Path {
  1946  								file2Found = true
  1947  							}
  1948  							if file1Found && file2Found {
  1949  								return errFound
  1950  							}
  1951  						}
  1952  						if time.Now().After(stopTime) {
  1953  							return errTooMany
  1954  						}
  1955  						return nil
  1956  					})
  1957  					if !errors.Is(err, errFound) && !errors.Is(err, errTooMany) {
  1958  						assert.NoError(t, err)
  1959  					}
  1960  					if !errors.Is(err, errTooMany) {
  1961  						assert.True(t, file1Found, "file1Root %q not found", file1Root.Path)
  1962  						assert.True(t, file2Found, "file2Root %q not found", file2Root.Path)
  1963  					} else {
  1964  						t.Logf("Too many files to list - giving up")
  1965  					}
  1966  				})
  1967  
  1968  				// Create a new file
  1969  				t.Run("Put", func(t *testing.T) {
  1970  					file3Root := fstest.Item{
  1971  						ModTime: time.Now(),
  1972  						Path:    path.Join(configLeaf, "created from root.txt"),
  1973  					}
  1974  					_, file3Obj := testPut(ctx, t, rootRemote, &file3Root)
  1975  					fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root, file3Root}, nil, rootRemote.Precision())
  1976  
  1977  					// And then remove it
  1978  					t.Run("Remove", func(t *testing.T) {
  1979  						require.NoError(t, file3Obj.Remove(context.Background()))
  1980  						fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, nil, rootRemote.Precision())
  1981  					})
  1982  				})
  1983  			})
  1984  
  1985  			// TestPublicLink tests creation of sharable, public links
  1986  			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|PublicLink)$'
  1987  			t.Run("PublicLink", func(t *testing.T) {
  1988  				skipIfNotOk(t)
  1989  
  1990  				publicLinkFunc := f.Features().PublicLink
  1991  				if publicLinkFunc == nil {
  1992  					t.Skip("FS has no PublicLinker interface")
  1993  				}
  1994  
  1995  				type PublicLinkFunc func(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error)
  1996  				wrapPublicLinkFunc := func(f PublicLinkFunc) PublicLinkFunc {
  1997  					return func(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
  1998  						link, err = publicLinkFunc(ctx, remote, expire, unlink)
  1999  						if err == nil {
  2000  							return
  2001  						}
  2002  						// For OneDrive Personal, link expiry is a premium feature
  2003  						// Don't let it fail the test (https://github.com/rclone/rclone/issues/5420)
  2004  						if fsInfo.Name == "onedrive" && strings.Contains(err.Error(), "accountUpgradeRequired") {
  2005  							t.Log("treating accountUpgradeRequired as success for PublicLink")
  2006  							link, err = "bogus link to "+remote, nil
  2007  						}
  2008  						return
  2009  					}
  2010  				}
  2011  
  2012  				expiry := fs.Duration(120 * time.Second)
  2013  				doPublicLink := wrapPublicLinkFunc(publicLinkFunc)
  2014  
  2015  				// if object not found
  2016  				link, err := doPublicLink(ctx, file1.Path+"_does_not_exist", expiry, false)
  2017  				require.Error(t, err, "Expected to get error when file doesn't exist")
  2018  				require.Equal(t, "", link, "Expected link to be empty on error")
  2019  
  2020  				// sharing file for the first time
  2021  				link1, err := doPublicLink(ctx, file1.Path, expiry, false)
  2022  				require.NoError(t, err)
  2023  				require.NotEqual(t, "", link1, "Link should not be empty")
  2024  
  2025  				link2, err := doPublicLink(ctx, file2.Path, expiry, false)
  2026  				require.NoError(t, err)
  2027  				require.NotEqual(t, "", link2, "Link should not be empty")
  2028  
  2029  				require.NotEqual(t, link1, link2, "Links to different files should differ")
  2030  
  2031  				// sharing file for the 2nd time
  2032  				link1, err = doPublicLink(ctx, file1.Path, expiry, false)
  2033  				require.NoError(t, err)
  2034  				require.NotEqual(t, "", link1, "Link should not be empty")
  2035  
  2036  				// sharing directory for the first time
  2037  				path := path.Dir(file2.Path)
  2038  				link3, err := doPublicLink(ctx, path, expiry, false)
  2039  				if err != nil && (errors.Is(err, fs.ErrorCantShareDirectories) || errors.Is(err, fs.ErrorObjectNotFound)) {
  2040  					t.Log("skipping directory tests as not supported on this backend")
  2041  				} else {
  2042  					require.NoError(t, err)
  2043  					require.NotEqual(t, "", link3, "Link should not be empty")
  2044  
  2045  					// sharing directory for the second time
  2046  					link3, err = doPublicLink(ctx, path, expiry, false)
  2047  					require.NoError(t, err)
  2048  					require.NotEqual(t, "", link3, "Link should not be empty")
  2049  
  2050  					// sharing the "root" directory in a subremote
  2051  					subRemote, _, removeSubRemote, err := fstest.RandomRemote()
  2052  					require.NoError(t, err)
  2053  					defer removeSubRemote()
  2054  					// ensure sub remote isn't empty
  2055  					buf := bytes.NewBufferString("somecontent")
  2056  					obji := object.NewStaticObjectInfo("somefile", time.Now(), int64(buf.Len()), true, nil, nil)
  2057  					retry(t, "Put", func() error {
  2058  						_, err := subRemote.Put(ctx, buf, obji)
  2059  						return err
  2060  					})
  2061  
  2062  					link4, err := wrapPublicLinkFunc(subRemote.Features().PublicLink)(ctx, "", expiry, false)
  2063  					require.NoError(t, err, "Sharing root in a sub-remote should work")
  2064  					require.NotEqual(t, "", link4, "Link should not be empty")
  2065  				}
  2066  			})
  2067  
  2068  			// TestSetTier tests SetTier and GetTier functionality
  2069  			t.Run("SetTier", func(t *testing.T) {
  2070  				skipIfNotSetTier(t)
  2071  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  2072  				setter, ok := obj.(fs.SetTierer)
  2073  				assert.NotNil(t, ok)
  2074  				getter, ok := obj.(fs.GetTierer)
  2075  				assert.NotNil(t, ok)
  2076  				// If interfaces are supported TiersToTest should contain
  2077  				// at least one entry
  2078  				supportedTiers := opt.TiersToTest
  2079  				assert.NotEmpty(t, supportedTiers)
  2080  				// test set tier changes on supported storage classes or tiers
  2081  				for _, tier := range supportedTiers {
  2082  					err := setter.SetTier(tier)
  2083  					assert.Nil(t, err)
  2084  					got := getter.GetTier()
  2085  					assert.Equal(t, tier, got)
  2086  				}
  2087  			})
  2088  
  2089  			// Check to see if Fs that wrap other Objects implement all the optional methods
  2090  			t.Run("ObjectCheckWrap", func(t *testing.T) {
  2091  				skipIfNotOk(t)
  2092  				if opt.SkipObjectCheckWrap {
  2093  					t.Skip("Skipping FsCheckWrap on this Fs")
  2094  				}
  2095  				ft := new(fs.Features).Fill(ctx, f)
  2096  				if ft.UnWrap == nil {
  2097  					t.Skip("Not a wrapping Fs")
  2098  				}
  2099  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  2100  				_, unsupported := fs.ObjectOptionalInterfaces(obj)
  2101  				for _, name := range unsupported {
  2102  					if !stringsContains(name, opt.UnimplementableObjectMethods) {
  2103  						t.Errorf("Missing Object wrapper for %s", name)
  2104  					}
  2105  				}
  2106  			})
  2107  
  2108  			// State of remote at the moment the internal tests are called
  2109  			InternalTestFiles = []fstest.Item{file1, file2}
  2110  
  2111  			// TestObjectRemove tests Remove
  2112  			t.Run("ObjectRemove", func(t *testing.T) {
  2113  				skipIfNotOk(t)
  2114  				// remove file1
  2115  				obj := fstest.NewObject(ctx, t, f, file1.Path)
  2116  				err := obj.Remove(ctx)
  2117  				require.NoError(t, err)
  2118  				// check listing without modtime as TestPublicLink may change the modtime
  2119  				fstest.CheckListingWithPrecision(t, f, []fstest.Item{file2}, nil, fs.ModTimeNotSupported)
  2120  				// Show the internal tests file2 is gone
  2121  				InternalTestFiles = []fstest.Item{file2}
  2122  			})
  2123  
  2124  			// TestAbout tests the About optional interface
  2125  			t.Run("ObjectAbout", func(t *testing.T) {
  2126  				skipIfNotOk(t)
  2127  
  2128  				// Check have About
  2129  				doAbout := f.Features().About
  2130  				if doAbout == nil {
  2131  					t.Skip("FS does not support About")
  2132  				}
  2133  
  2134  				// Can't really check the output much!
  2135  				usage, err := doAbout(context.Background())
  2136  				require.NoError(t, err)
  2137  				require.NotNil(t, usage)
  2138  				assert.NotEqual(t, int64(0), usage.Total)
  2139  			})
  2140  
  2141  			// Just file2 remains for Purge to clean up
  2142  
  2143  			// TestFsPutStream tests uploading files when size isn't known in advance.
  2144  			// This may trigger large buffer allocation in some backends, keep it
  2145  			// close to the end of suite. (See fs/operations/xtra_operations_test.go)
  2146  			t.Run("FsPutStream", func(t *testing.T) {
  2147  				skipIfNotOk(t)
  2148  				if f.Features().PutStream == nil {
  2149  					t.Skip("FS has no PutStream interface")
  2150  				}
  2151  
  2152  				for _, contentSize := range []int{0, 100} {
  2153  					t.Run(strconv.Itoa(contentSize), func(t *testing.T) {
  2154  						file := fstest.Item{
  2155  							ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
  2156  							Path:    "piped data.txt",
  2157  							Size:    -1, // use unknown size during upload
  2158  						}
  2159  
  2160  						var (
  2161  							err        error
  2162  							obj        fs.Object
  2163  							uploadHash *hash.MultiHasher
  2164  						)
  2165  						retry(t, "PutStream", func() error {
  2166  							contents := random.String(contentSize)
  2167  							buf := bytes.NewBufferString(contents)
  2168  							uploadHash = hash.NewMultiHasher()
  2169  							in := io.TeeReader(buf, uploadHash)
  2170  
  2171  							file.Size = -1
  2172  							obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
  2173  							obj, err = f.Features().PutStream(ctx, in, obji)
  2174  							return err
  2175  						})
  2176  						file.Hashes = uploadHash.Sums()
  2177  						file.Size = int64(contentSize) // use correct size when checking
  2178  						file.Check(t, obj, f.Precision())
  2179  						// Re-read the object and check again
  2180  						obj = fstest.NewObject(ctx, t, f, file.Path)
  2181  						file.Check(t, obj, f.Precision())
  2182  						require.NoError(t, obj.Remove(ctx))
  2183  					})
  2184  				}
  2185  			})
  2186  
  2187  			// TestInternal calls InternalTest() on the Fs
  2188  			t.Run("Internal", func(t *testing.T) {
  2189  				skipIfNotOk(t)
  2190  				if it, ok := f.(InternalTester); ok {
  2191  					it.InternalTest(t)
  2192  				} else {
  2193  					t.Skipf("%T does not implement InternalTester", f)
  2194  				}
  2195  			})
  2196  
  2197  		})
  2198  
  2199  		// TestFsPutChunked may trigger large buffer allocation with
  2200  		// some backends (see fs/operations/xtra_operations_test.go),
  2201  		// keep it closer to the end of suite.
  2202  		t.Run("FsPutChunked", func(t *testing.T) {
  2203  			skipIfNotOk(t)
  2204  			if testing.Short() {
  2205  				t.Skip("not running with -short")
  2206  			}
  2207  
  2208  			if opt.ChunkedUpload.Skip {
  2209  				t.Skip("skipping as ChunkedUpload.Skip is set")
  2210  			}
  2211  
  2212  			setUploadChunkSizer, _ := f.(SetUploadChunkSizer)
  2213  			if setUploadChunkSizer == nil {
  2214  				t.Skipf("%T does not implement SetUploadChunkSizer", f)
  2215  			}
  2216  
  2217  			setUploadCutoffer, _ := f.(SetUploadCutoffer)
  2218  
  2219  			minChunkSize := opt.ChunkedUpload.MinChunkSize
  2220  			if minChunkSize < 100 {
  2221  				minChunkSize = 100
  2222  			}
  2223  			if opt.ChunkedUpload.CeilChunkSize != nil {
  2224  				minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize)
  2225  			}
  2226  
  2227  			maxChunkSize := 2 * fs.Mebi
  2228  			if maxChunkSize < 2*minChunkSize {
  2229  				maxChunkSize = 2 * minChunkSize
  2230  			}
  2231  			if opt.ChunkedUpload.MaxChunkSize > 0 && maxChunkSize > opt.ChunkedUpload.MaxChunkSize {
  2232  				maxChunkSize = opt.ChunkedUpload.MaxChunkSize
  2233  			}
  2234  			if opt.ChunkedUpload.CeilChunkSize != nil {
  2235  				maxChunkSize = opt.ChunkedUpload.CeilChunkSize(maxChunkSize)
  2236  			}
  2237  
  2238  			next := func(f func(fs.SizeSuffix) fs.SizeSuffix) fs.SizeSuffix {
  2239  				s := f(minChunkSize)
  2240  				if s > maxChunkSize {
  2241  					s = minChunkSize
  2242  				}
  2243  				return s
  2244  			}
  2245  
  2246  			chunkSizes := fs.SizeSuffixList{
  2247  				minChunkSize,
  2248  				minChunkSize + (maxChunkSize-minChunkSize)/3,
  2249  				next(NextPowerOfTwo),
  2250  				next(NextMultipleOf(100000)),
  2251  				next(NextMultipleOf(100001)),
  2252  				maxChunkSize,
  2253  			}
  2254  			chunkSizes.Sort()
  2255  
  2256  			// Set the minimum chunk size, upload cutoff and reset it at the end
  2257  			oldChunkSize, err := setUploadChunkSizer.SetUploadChunkSize(minChunkSize)
  2258  			require.NoError(t, err)
  2259  			var oldUploadCutoff fs.SizeSuffix
  2260  			if setUploadCutoffer != nil {
  2261  				oldUploadCutoff, err = setUploadCutoffer.SetUploadCutoff(minChunkSize)
  2262  				require.NoError(t, err)
  2263  			}
  2264  			defer func() {
  2265  				_, err := setUploadChunkSizer.SetUploadChunkSize(oldChunkSize)
  2266  				assert.NoError(t, err)
  2267  				if setUploadCutoffer != nil {
  2268  					_, err := setUploadCutoffer.SetUploadCutoff(oldUploadCutoff)
  2269  					assert.NoError(t, err)
  2270  				}
  2271  			}()
  2272  
  2273  			var lastCs fs.SizeSuffix
  2274  			for _, cs := range chunkSizes {
  2275  				if cs <= lastCs {
  2276  					continue
  2277  				}
  2278  				if opt.ChunkedUpload.CeilChunkSize != nil {
  2279  					cs = opt.ChunkedUpload.CeilChunkSize(cs)
  2280  				}
  2281  				lastCs = cs
  2282  
  2283  				t.Run(cs.String(), func(t *testing.T) {
  2284  					_, err := setUploadChunkSizer.SetUploadChunkSize(cs)
  2285  					require.NoError(t, err)
  2286  					if setUploadCutoffer != nil {
  2287  						_, err = setUploadCutoffer.SetUploadCutoff(cs)
  2288  						require.NoError(t, err)
  2289  					}
  2290  
  2291  					var testChunks []fs.SizeSuffix
  2292  					if opt.ChunkedUpload.NeedMultipleChunks {
  2293  						// If NeedMultipleChunks is set then test with > cs
  2294  						testChunks = []fs.SizeSuffix{cs + 1, 2 * cs, 2*cs + 1}
  2295  					} else {
  2296  						testChunks = []fs.SizeSuffix{cs - 1, cs, 2*cs + 1}
  2297  					}
  2298  
  2299  					for _, fileSize := range testChunks {
  2300  						t.Run(fmt.Sprintf("%d", fileSize), func(t *testing.T) {
  2301  							TestPutLarge(ctx, t, f, &fstest.Item{
  2302  								ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
  2303  								Path:    fmt.Sprintf("chunked-%s-%s.bin", cs.String(), fileSize.String()),
  2304  								Size:    int64(fileSize),
  2305  							})
  2306  							t.Run("Streamed", func(t *testing.T) {
  2307  								if f.Features().PutStream == nil {
  2308  									t.Skip("FS has no PutStream interface")
  2309  								}
  2310  								TestPutLargeStreamed(ctx, t, f, &fstest.Item{
  2311  									ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
  2312  									Path:    fmt.Sprintf("chunked-%s-%s-streamed.bin", cs.String(), fileSize.String()),
  2313  									Size:    int64(fileSize),
  2314  								})
  2315  							})
  2316  						})
  2317  					}
  2318  				})
  2319  			}
  2320  		})
  2321  
  2322  		// Copy files with chunked copy if available
  2323  		t.Run("FsCopyChunked", func(t *testing.T) {
  2324  			skipIfNotOk(t)
  2325  			if testing.Short() {
  2326  				t.Skip("not running with -short")
  2327  			}
  2328  
  2329  			// Check have Copy
  2330  			doCopy := f.Features().Copy
  2331  			if doCopy == nil {
  2332  				t.Skip("FS has no Copier interface")
  2333  			}
  2334  
  2335  			if opt.ChunkedUpload.Skip {
  2336  				t.Skip("skipping as ChunkedUpload.Skip is set")
  2337  			}
  2338  
  2339  			if strings.HasPrefix(f.Name(), "serves3") || strings.HasPrefix(f.Name(), "TestS3Rclone") {
  2340  				t.Skip("FIXME skip test - see #7454")
  2341  			}
  2342  
  2343  			do, _ := f.(SetCopyCutoffer)
  2344  			if do == nil {
  2345  				t.Skipf("%T does not implement SetCopyCutoff", f)
  2346  			}
  2347  
  2348  			minChunkSize := opt.ChunkedUpload.MinChunkSize
  2349  			if minChunkSize < 100 {
  2350  				minChunkSize = 100
  2351  			}
  2352  			if opt.ChunkedUpload.CeilChunkSize != nil {
  2353  				minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize)
  2354  			}
  2355  
  2356  			chunkSizes := fs.SizeSuffixList{
  2357  				minChunkSize,
  2358  				minChunkSize + 1,
  2359  				2*minChunkSize - 1,
  2360  				2 * minChunkSize,
  2361  				2*minChunkSize + 1,
  2362  			}
  2363  			for _, chunkSize := range chunkSizes {
  2364  				t.Run(fmt.Sprintf("%d", chunkSize), func(t *testing.T) {
  2365  					contents := random.String(int(chunkSize))
  2366  					item := fstest.NewItem("chunked-copy", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
  2367  					src := PutTestContents(ctx, t, f, &item, contents, true)
  2368  					defer func() {
  2369  						assert.NoError(t, src.Remove(ctx))
  2370  					}()
  2371  
  2372  					var itemCopy = item
  2373  					itemCopy.Path += ".copy"
  2374  
  2375  					// Set copy cutoff to mininum value so we make chunks
  2376  					origCutoff, err := do.SetCopyCutoff(minChunkSize)
  2377  					require.NoError(t, err)
  2378  					defer func() {
  2379  						_, err = do.SetCopyCutoff(origCutoff)
  2380  						require.NoError(t, err)
  2381  					}()
  2382  
  2383  					// Do the copy
  2384  					dst, err := doCopy(ctx, src, itemCopy.Path)
  2385  					require.NoError(t, err)
  2386  					defer func() {
  2387  						assert.NoError(t, dst.Remove(ctx))
  2388  					}()
  2389  
  2390  					// Check size
  2391  					assert.Equal(t, src.Size(), dst.Size())
  2392  
  2393  					// Check modtime
  2394  					srcModTime := src.ModTime(ctx)
  2395  					dstModTime := dst.ModTime(ctx)
  2396  					assert.True(t, srcModTime.Equal(dstModTime))
  2397  
  2398  					// Make sure contents are correct
  2399  					gotContents := ReadObject(ctx, t, dst, -1)
  2400  					assert.Equal(t, contents, gotContents)
  2401  				})
  2402  			}
  2403  		})
  2404  
  2405  		// TestFsUploadUnknownSize ensures Fs.Put() and Object.Update() don't panic when
  2406  		// src.Size() == -1
  2407  		//
  2408  		// This may trigger large buffer allocation in some backends, keep it
  2409  		// closer to the suite end. (See fs/operations/xtra_operations_test.go)
  2410  		t.Run("FsUploadUnknownSize", func(t *testing.T) {
  2411  			skipIfNotOk(t)
  2412  
  2413  			t.Run("FsPutUnknownSize", func(t *testing.T) {
  2414  				defer func() {
  2415  					assert.Nil(t, recover(), "Fs.Put() should not panic when src.Size() == -1")
  2416  				}()
  2417  
  2418  				contents := random.String(100)
  2419  				in := bytes.NewBufferString(contents)
  2420  
  2421  				obji := object.NewStaticObjectInfo("unknown-size-put.txt", fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
  2422  				obj, err := f.Put(ctx, in, obji)
  2423  				if err == nil {
  2424  					require.NoError(t, obj.Remove(ctx), "successfully uploaded unknown-sized file but failed to remove")
  2425  				}
  2426  				// if err != nil: it's okay as long as no panic
  2427  			})
  2428  
  2429  			t.Run("FsUpdateUnknownSize", func(t *testing.T) {
  2430  				unknownSizeUpdateFile := fstest.Item{
  2431  					ModTime: fstest.Time("2002-02-03T04:05:06.499999999Z"),
  2432  					Path:    "unknown-size-update.txt",
  2433  				}
  2434  
  2435  				testPut(ctx, t, f, &unknownSizeUpdateFile)
  2436  
  2437  				defer func() {
  2438  					assert.Nil(t, recover(), "Object.Update() should not panic when src.Size() == -1")
  2439  				}()
  2440  
  2441  				newContents := random.String(200)
  2442  				in := bytes.NewBufferString(newContents)
  2443  
  2444  				obj := fstest.NewObject(ctx, t, f, unknownSizeUpdateFile.Path)
  2445  				obji := object.NewStaticObjectInfo(unknownSizeUpdateFile.Path, unknownSizeUpdateFile.ModTime, -1, true, nil, obj.Fs())
  2446  				err := obj.Update(ctx, in, obji)
  2447  				if err == nil {
  2448  					require.NoError(t, obj.Remove(ctx), "successfully updated object with unknown-sized source but failed to remove")
  2449  				}
  2450  				// if err != nil: it's okay as long as no panic
  2451  			})
  2452  
  2453  		})
  2454  
  2455  		// TestFsRootCollapse tests if the root of an fs "collapses" to the
  2456  		// absolute root. It creates a new fs of the same backend type with its
  2457  		// root set to a *nonexistent* folder, and attempts to read the info of
  2458  		// an object in that folder, whose name is taken from a directory that
  2459  		// exists in the absolute root.
  2460  		// This test is added after
  2461  		// https://github.com/rclone/rclone/issues/3164.
  2462  		t.Run("FsRootCollapse", func(t *testing.T) {
  2463  			deepRemoteName := subRemoteName + "/deeper/nonexisting/directory"
  2464  			deepRemote, err := fs.NewFs(context.Background(), deepRemoteName)
  2465  			require.NoError(t, err)
  2466  
  2467  			colonIndex := strings.IndexRune(deepRemoteName, ':')
  2468  			firstSlashIndex := strings.IndexRune(deepRemoteName, '/')
  2469  			firstDir := deepRemoteName[colonIndex+1 : firstSlashIndex]
  2470  			_, err = deepRemote.NewObject(ctx, firstDir)
  2471  			require.Equal(t, fs.ErrorObjectNotFound, err)
  2472  			// If err is not fs.ErrorObjectNotFound, it means the backend is
  2473  			// somehow confused about root and absolute root.
  2474  		})
  2475  
  2476  		// FsDirSetModTime tests setting the mod time on a directory if possible
  2477  		t.Run("FsDirSetModTime", func(t *testing.T) {
  2478  			const name = "dir-mod-time"
  2479  			do := f.Features().DirSetModTime
  2480  			if do == nil {
  2481  				t.Skip("FS has no DirSetModTime interface")
  2482  			}
  2483  
  2484  			// Set ModTime on non existing directory should return error
  2485  			t1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
  2486  			err := do(ctx, name, t1)
  2487  			require.Error(t, err)
  2488  
  2489  			// Make the directory and try again
  2490  			err = f.Mkdir(ctx, name)
  2491  			require.NoError(t, err)
  2492  			err = do(ctx, name, t1)
  2493  			require.NoError(t, err)
  2494  
  2495  			// Check the modtime got set properly
  2496  			dir := fstest.NewDirectory(ctx, t, f, name)
  2497  			fstest.CheckDirModTime(ctx, t, f, dir, t1)
  2498  
  2499  			// Tidy up
  2500  			err = f.Rmdir(ctx, name)
  2501  			require.NoError(t, err)
  2502  		})
  2503  
  2504  		var testMetadata = fs.Metadata{
  2505  			// System metadata supported by all backends
  2506  			"mtime": "2001-02-03T04:05:06.499999999Z",
  2507  			// User metadata
  2508  			"potato": "jersey",
  2509  		}
  2510  		var testMetadata2 = fs.Metadata{
  2511  			// System metadata supported by all backends
  2512  			"mtime": "2002-02-03T04:05:06.499999999Z",
  2513  			// User metadata
  2514  			"potato": "king edwards",
  2515  		}
  2516  
  2517  		// FsMkdirMetadata tests creating a directory with metadata if possible
  2518  		t.Run("FsMkdirMetadata", func(t *testing.T) {
  2519  			ctx, ci := fs.AddConfig(ctx)
  2520  			ci.Metadata = true
  2521  			const name = "dir-metadata"
  2522  			do := f.Features().MkdirMetadata
  2523  			if do == nil {
  2524  				t.Skip("FS has no MkdirMetadata interface")
  2525  			}
  2526  			assert.True(t, f.Features().WriteDirMetadata, "Backends must support Directory.SetMetadata and Fs.MkdirMetadata")
  2527  
  2528  			// Create the directory from fresh
  2529  			dir, err := do(ctx, name, testMetadata)
  2530  			require.NoError(t, err)
  2531  			require.NotNil(t, dir)
  2532  
  2533  			// Check the returned directory and one read from the listing
  2534  			fstest.CheckEntryMetadata(ctx, t, f, dir, testMetadata)
  2535  			fstest.CheckEntryMetadata(ctx, t, f, fstest.NewDirectory(ctx, t, f, name), testMetadata)
  2536  
  2537  			// Now update the metadata on the existing directory
  2538  			t.Run("Update", func(t *testing.T) {
  2539  				dir, err := do(ctx, name, testMetadata2)
  2540  				require.NoError(t, err)
  2541  				require.NotNil(t, dir)
  2542  
  2543  				// Check the returned directory and one read from the listing
  2544  				fstest.CheckEntryMetadata(ctx, t, f, dir, testMetadata2)
  2545  				// The TestUnionPolicy2 has randomness in it so it sets metadata on
  2546  				// one directory but can read a different one from the listing.
  2547  				if f.Name() != "TestUnionPolicy2" {
  2548  					fstest.CheckEntryMetadata(ctx, t, f, fstest.NewDirectory(ctx, t, f, name), testMetadata2)
  2549  				}
  2550  			})
  2551  
  2552  			// Now test the Directory methods
  2553  			t.Run("CheckDirectory", func(t *testing.T) {
  2554  				_, ok := dir.(fs.Object)
  2555  				assert.False(t, ok, "Directory must not type assert to Object")
  2556  				_, ok = dir.(fs.ObjectInfo)
  2557  				assert.False(t, ok, "Directory must not type assert to ObjectInfo")
  2558  			})
  2559  
  2560  			// Tidy up
  2561  			err = f.Rmdir(ctx, name)
  2562  			require.NoError(t, err)
  2563  		})
  2564  
  2565  		// FsDirectory checks methods on the directory object
  2566  		t.Run("FsDirectory", func(t *testing.T) {
  2567  			ctx, ci := fs.AddConfig(ctx)
  2568  			ci.Metadata = true
  2569  			const name = "dir-methods"
  2570  			features := f.Features()
  2571  
  2572  			if !features.CanHaveEmptyDirectories {
  2573  				t.Skip("Can't test if can't have empty directories")
  2574  			}
  2575  			if !features.ReadDirMetadata &&
  2576  				!features.WriteDirMetadata &&
  2577  				!features.WriteDirSetModTime &&
  2578  				!features.UserDirMetadata &&
  2579  				!features.Overlay &&
  2580  				features.UnWrap == nil {
  2581  				t.Skip("FS has no Directory methods and doesn't Wrap")
  2582  			}
  2583  
  2584  			// Create a directory to start with
  2585  			err := f.Mkdir(ctx, name)
  2586  			require.NoError(t, err)
  2587  
  2588  			// Get the directory object
  2589  			dir := fstest.NewDirectory(ctx, t, f, name)
  2590  			_, ok := dir.(fs.Object)
  2591  			assert.False(t, ok, "Directory must not type assert to Object")
  2592  			_, ok = dir.(fs.ObjectInfo)
  2593  			assert.False(t, ok, "Directory must not type assert to ObjectInfo")
  2594  
  2595  			// Now test the directory methods
  2596  			t.Run("ReadDirMetadata", func(t *testing.T) {
  2597  				if !features.ReadDirMetadata {
  2598  					t.Skip("Directories don't support ReadDirMetadata")
  2599  				}
  2600  				if f.Name() == "TestUnionPolicy3" {
  2601  					t.Skipf("Test unreliable on %q", f.Name())
  2602  				}
  2603  				fstest.CheckEntryMetadata(ctx, t, f, dir, fs.Metadata{
  2604  					"mtime": dir.ModTime(ctx).Format(time.RFC3339Nano),
  2605  				})
  2606  			})
  2607  
  2608  			t.Run("WriteDirMetadata", func(t *testing.T) {
  2609  				if !features.WriteDirMetadata {
  2610  					t.Skip("Directories don't support WriteDirMetadata")
  2611  				}
  2612  				assert.NotNil(t, features.MkdirMetadata, "Backends must support Directory.SetMetadata and Fs.MkdirMetadata")
  2613  				do, ok := dir.(fs.SetMetadataer)
  2614  				require.True(t, ok, "Expected to find SetMetadata method on Directory")
  2615  				err := do.SetMetadata(ctx, testMetadata)
  2616  				require.NoError(t, err)
  2617  
  2618  				fstest.CheckEntryMetadata(ctx, t, f, dir, testMetadata)
  2619  				fstest.CheckEntryMetadata(ctx, t, f, fstest.NewDirectory(ctx, t, f, name), testMetadata)
  2620  			})
  2621  
  2622  			t.Run("WriteDirSetModTime", func(t *testing.T) {
  2623  				if !features.WriteDirSetModTime {
  2624  					t.Skip("Directories don't support WriteDirSetModTime")
  2625  				}
  2626  				assert.NotNil(t, features.DirSetModTime, "Backends must support Directory.SetModTime and Fs.DirSetModTime")
  2627  
  2628  				t1 := fstest.Time("2001-02-03T04:05:10.123123123Z")
  2629  
  2630  				do, ok := dir.(fs.SetModTimer)
  2631  				require.True(t, ok, "Expected to find SetMetadata method on Directory")
  2632  				err := do.SetModTime(ctx, t1)
  2633  				require.NoError(t, err)
  2634  
  2635  				fstest.CheckDirModTime(ctx, t, f, dir, t1)
  2636  				fstest.CheckDirModTime(ctx, t, f, fstest.NewDirectory(ctx, t, f, name), t1)
  2637  			})
  2638  
  2639  			// Check to see if Fs that wrap other Directories implement all the optional methods
  2640  			t.Run("DirectoryCheckWrap", func(t *testing.T) {
  2641  				if opt.SkipDirectoryCheckWrap {
  2642  					t.Skip("Skipping DirectoryCheckWrap on this Fs")
  2643  				}
  2644  				if !features.Overlay && features.UnWrap == nil {
  2645  					t.Skip("Not a wrapping Fs")
  2646  				}
  2647  				_, unsupported := fs.DirectoryOptionalInterfaces(dir)
  2648  				for _, name := range unsupported {
  2649  					if !stringsContains(name, opt.UnimplementableDirectoryMethods) {
  2650  						t.Errorf("Missing Directory wrapper for %s", name)
  2651  					}
  2652  				}
  2653  			})
  2654  
  2655  			// Tidy up
  2656  			err = f.Rmdir(ctx, name)
  2657  			require.NoError(t, err)
  2658  		})
  2659  
  2660  		// Purge the folder
  2661  		err = operations.Purge(ctx, f, "")
  2662  		if !errors.Is(err, fs.ErrorDirNotFound) {
  2663  			require.NoError(t, err)
  2664  		}
  2665  		purged = true
  2666  		fstest.CheckListing(t, f, []fstest.Item{})
  2667  
  2668  		// Check purging again if not bucket-based
  2669  		if !isBucketBasedButNotRoot(f) {
  2670  			err = operations.Purge(ctx, f, "")
  2671  			assert.Error(t, err, "Expecting error after on second purge")
  2672  			if !errors.Is(err, fs.ErrorDirNotFound) {
  2673  				t.Log("Warning: this should produce fs.ErrorDirNotFound")
  2674  			}
  2675  		}
  2676  
  2677  	})
  2678  
  2679  	// Check directory is purged
  2680  	if !purged {
  2681  		_ = operations.Purge(ctx, f, "")
  2682  	}
  2683  
  2684  	t.Run("FsShutdown", func(t *testing.T) {
  2685  		do := f.Features().Shutdown
  2686  		if do == nil {
  2687  			t.Skip("Shutdown method not supported")
  2688  		}
  2689  		require.NoError(t, do(ctx))
  2690  		require.NoError(t, do(ctx), "must be able to call Shutdown twice")
  2691  	})
  2692  
  2693  	// Remove the local directory so we don't clutter up /tmp
  2694  	if strings.HasPrefix(remoteName, "/") {
  2695  		t.Log("remoteName", remoteName)
  2696  		// Remove temp directory
  2697  		err := os.Remove(remoteName)
  2698  		require.NoError(t, err)
  2699  	}
  2700  }