github.com/xhghs/rclone@v1.51.1-0.20200430155106-e186a28cced8/fstest/fstests/fstests.go (about)

     1  // Package fstests provides generic integration tests for the Fs and
     2  // Object interfaces.
     3  //
     4  // These tests are concerned with the basic functionality of a
     5  // backend.  The tests in fs/sync and fs/operations tests more
     6  // cornercases that these tests don't.
     7  package fstests
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"io"
    14  	"io/ioutil"
    15  	"math/bits"
    16  	"os"
    17  	"path"
    18  	"path/filepath"
    19  	"reflect"
    20  	"sort"
    21  	"strconv"
    22  	"strings"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/pkg/errors"
    27  	"github.com/rclone/rclone/fs"
    28  	"github.com/rclone/rclone/fs/config"
    29  	"github.com/rclone/rclone/fs/fserrors"
    30  	"github.com/rclone/rclone/fs/fspath"
    31  	"github.com/rclone/rclone/fs/hash"
    32  	"github.com/rclone/rclone/fs/object"
    33  	"github.com/rclone/rclone/fs/operations"
    34  	"github.com/rclone/rclone/fs/walk"
    35  	"github.com/rclone/rclone/fstest"
    36  	"github.com/rclone/rclone/fstest/testserver"
    37  	"github.com/rclone/rclone/lib/encoder"
    38  	"github.com/rclone/rclone/lib/random"
    39  	"github.com/rclone/rclone/lib/readers"
    40  	"github.com/stretchr/testify/assert"
    41  	"github.com/stretchr/testify/require"
    42  )
    43  
    44  // InternalTester is an optional interface for Fs which allows to execute internal tests
    45  //
    46  // This interface should be implemented in 'backend'_internal_test.go and not in 'backend'.go
    47  type InternalTester interface {
    48  	InternalTest(*testing.T)
    49  }
    50  
    51  // ChunkedUploadConfig contains the values used by TestFsPutChunked
    52  // to determine the limits of chunked uploading
    53  type ChunkedUploadConfig struct {
    54  	// Minimum allowed chunk size
    55  	MinChunkSize fs.SizeSuffix
    56  	// Maximum allowed chunk size, 0 is no limit
    57  	MaxChunkSize fs.SizeSuffix
    58  	// Rounds the given chunk size up to the next valid value
    59  	// nil will disable rounding
    60  	// e.g. the next power of 2
    61  	CeilChunkSize func(fs.SizeSuffix) fs.SizeSuffix
    62  	// More than one chunk is required on upload
    63  	NeedMultipleChunks bool
    64  }
    65  
    66  // SetUploadChunkSizer is a test only interface to change the upload chunk size at runtime
    67  type SetUploadChunkSizer interface {
    68  	// Change the configured UploadChunkSize.
    69  	// Will only be called while no transfer is in progress.
    70  	SetUploadChunkSize(fs.SizeSuffix) (fs.SizeSuffix, error)
    71  }
    72  
    73  // SetUploadCutoffer is a test only interface to change the upload cutoff size at runtime
    74  type SetUploadCutoffer interface {
    75  	// Change the configured UploadCutoff.
    76  	// Will only be called while no transfer is in progress.
    77  	SetUploadCutoff(fs.SizeSuffix) (fs.SizeSuffix, error)
    78  }
    79  
    80  // NextPowerOfTwo returns the current or next bigger power of two.
    81  // All values less or equal 0 will return 0
    82  func NextPowerOfTwo(i fs.SizeSuffix) fs.SizeSuffix {
    83  	return 1 << uint(64-bits.LeadingZeros64(uint64(i)-1))
    84  }
    85  
    86  // NextMultipleOf returns a function that can be used as a CeilChunkSize function.
    87  // This function will return the next multiple of m that is equal or bigger than i.
    88  // All values less or equal 0 will return 0.
    89  func NextMultipleOf(m fs.SizeSuffix) func(fs.SizeSuffix) fs.SizeSuffix {
    90  	if m <= 0 {
    91  		panic(fmt.Sprintf("invalid multiplier %s", m))
    92  	}
    93  	return func(i fs.SizeSuffix) fs.SizeSuffix {
    94  		if i <= 0 {
    95  			return 0
    96  		}
    97  
    98  		return (((i - 1) / m) + 1) * m
    99  	}
   100  }
   101  
   102  // dirsToNames returns a sorted list of names
   103  func dirsToNames(dirs []fs.Directory) []string {
   104  	names := []string{}
   105  	for _, dir := range dirs {
   106  		names = append(names, fstest.Normalize(dir.Remote()))
   107  	}
   108  	sort.Strings(names)
   109  	return names
   110  }
   111  
   112  // objsToNames returns a sorted list of object names
   113  func objsToNames(objs []fs.Object) []string {
   114  	names := []string{}
   115  	for _, obj := range objs {
   116  		names = append(names, fstest.Normalize(obj.Remote()))
   117  	}
   118  	sort.Strings(names)
   119  	return names
   120  }
   121  
   122  // findObject finds the object on the remote
   123  func findObject(ctx context.Context, t *testing.T, f fs.Fs, Name string) fs.Object {
   124  	var obj fs.Object
   125  	var err error
   126  	sleepTime := 1 * time.Second
   127  	for i := 1; i <= *fstest.ListRetries; i++ {
   128  		obj, err = f.NewObject(ctx, Name)
   129  		if err == nil {
   130  			break
   131  		}
   132  		t.Logf("Sleeping for %v for findObject eventual consistency: %d/%d (%v)", sleepTime, i, *fstest.ListRetries, err)
   133  		time.Sleep(sleepTime)
   134  		sleepTime = (sleepTime * 3) / 2
   135  	}
   136  	require.NoError(t, err)
   137  	return obj
   138  }
   139  
   140  // retry f() until no retriable error
   141  func retry(t *testing.T, what string, f func() error) {
   142  	const maxTries = 10
   143  	var err error
   144  	for tries := 1; tries <= maxTries; tries++ {
   145  		err = f()
   146  		// exit if no error, or error is not retriable
   147  		if err == nil || !fserrors.IsRetryError(err) {
   148  			break
   149  		}
   150  		t.Logf("%s error: %v - low level retry %d/%d", what, err, tries, maxTries)
   151  		time.Sleep(2 * time.Second)
   152  	}
   153  	require.NoError(t, err, what)
   154  }
   155  
   156  // testPut puts file with random contents to the remote
   157  func testPut(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) (string, fs.Object) {
   158  	return PutTestContents(ctx, t, f, file, random.String(100), true)
   159  }
   160  
   161  // PutTestContents puts file with given contents to the remote and checks it but unlike TestPutLarge doesn't remove
   162  func PutTestContents(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, contents string, check bool) (string, fs.Object) {
   163  	var (
   164  		err        error
   165  		obj        fs.Object
   166  		uploadHash *hash.MultiHasher
   167  	)
   168  	retry(t, "Put", func() error {
   169  		buf := bytes.NewBufferString(contents)
   170  		uploadHash = hash.NewMultiHasher()
   171  		in := io.TeeReader(buf, uploadHash)
   172  
   173  		file.Size = int64(buf.Len())
   174  		obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
   175  		obj, err = f.Put(ctx, in, obji)
   176  		return err
   177  	})
   178  	file.Hashes = uploadHash.Sums()
   179  	if check {
   180  		file.Check(t, obj, f.Precision())
   181  		// Re-read the object and check again
   182  		obj = findObject(ctx, t, f, file.Path)
   183  		file.Check(t, obj, f.Precision())
   184  	}
   185  	return contents, obj
   186  }
   187  
   188  // TestPutLarge puts file to the remote, checks it and removes it on success.
   189  func TestPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) {
   190  	var (
   191  		err        error
   192  		obj        fs.Object
   193  		uploadHash *hash.MultiHasher
   194  	)
   195  	retry(t, "PutLarge", func() error {
   196  		r := readers.NewPatternReader(file.Size)
   197  		uploadHash = hash.NewMultiHasher()
   198  		in := io.TeeReader(r, uploadHash)
   199  
   200  		obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
   201  		obj, err = f.Put(ctx, in, obji)
   202  		if file.Size == 0 && err == fs.ErrorCantUploadEmptyFiles {
   203  			t.Skip("Can't upload zero length files")
   204  		}
   205  		return err
   206  	})
   207  	file.Hashes = uploadHash.Sums()
   208  	file.Check(t, obj, f.Precision())
   209  
   210  	// Re-read the object and check again
   211  	obj = findObject(ctx, t, f, file.Path)
   212  	file.Check(t, obj, f.Precision())
   213  
   214  	// Download the object and check it is OK
   215  	downloadHash := hash.NewMultiHasher()
   216  	download, err := obj.Open(ctx)
   217  	require.NoError(t, err)
   218  	n, err := io.Copy(downloadHash, download)
   219  	require.NoError(t, err)
   220  	assert.Equal(t, file.Size, n)
   221  	require.NoError(t, download.Close())
   222  	assert.Equal(t, file.Hashes, downloadHash.Sums())
   223  
   224  	// Remove the object
   225  	require.NoError(t, obj.Remove(ctx))
   226  }
   227  
   228  // errorReader just returns an error on Read
   229  type errorReader struct {
   230  	err error
   231  }
   232  
   233  // Read returns an error immediately
   234  func (er errorReader) Read(p []byte) (n int, err error) {
   235  	return 0, er.err
   236  }
   237  
   238  // read the contents of an object as a string
   239  func readObject(ctx context.Context, t *testing.T, obj fs.Object, limit int64, options ...fs.OpenOption) string {
   240  	what := fmt.Sprintf("readObject(%q) limit=%d, options=%+v", obj, limit, options)
   241  	in, err := obj.Open(ctx, options...)
   242  	require.NoError(t, err, what)
   243  	var r io.Reader = in
   244  	if limit >= 0 {
   245  		r = &io.LimitedReader{R: r, N: limit}
   246  	}
   247  	contents, err := ioutil.ReadAll(r)
   248  	require.NoError(t, err, what)
   249  	err = in.Close()
   250  	require.NoError(t, err, what)
   251  	return string(contents)
   252  }
   253  
   254  // ExtraConfigItem describes a config item for the tests
   255  type ExtraConfigItem struct{ Name, Key, Value string }
   256  
   257  // Opt is options for Run
   258  type Opt struct {
   259  	RemoteName                   string
   260  	NilObject                    fs.Object
   261  	ExtraConfig                  []ExtraConfigItem
   262  	SkipBadWindowsCharacters     bool     // skips unusable characters for windows if set
   263  	SkipFsMatch                  bool     // if set skip exact matching of Fs value
   264  	TiersToTest                  []string // List of tiers which can be tested in setTier test
   265  	ChunkedUpload                ChunkedUploadConfig
   266  	UnimplementableFsMethods     []string // List of methods which can't be implemented in this wrapping Fs
   267  	UnimplementableObjectMethods []string // List of methods which can't be implemented in this wrapping Fs
   268  	SkipFsCheckWrap              bool     // if set skip FsCheckWrap
   269  	SkipObjectCheckWrap          bool     // if set skip ObjectCheckWrap
   270  	SkipInvalidUTF8              bool     // if set skip invalid UTF-8 checks
   271  }
   272  
   273  // returns true if x is found in ss
   274  func stringsContains(x string, ss []string) bool {
   275  	for _, s := range ss {
   276  		if x == s {
   277  			return true
   278  		}
   279  	}
   280  	return false
   281  }
   282  
   283  // Run runs the basic integration tests for a remote using the options passed in.
   284  //
   285  // They are structured in a hierarchical way so that dependencies for the tests can be created.
   286  //
   287  // For example some tests require the directory to be created - these
   288  // are inside the "FsMkdir" test.  Some tests require some tests files
   289  // - these are inside the "FsPutFiles" test.
   290  func Run(t *testing.T, opt *Opt) {
   291  	var (
   292  		remote        fs.Fs
   293  		remoteName    = opt.RemoteName
   294  		subRemoteName string
   295  		subRemoteLeaf string
   296  		file1         = fstest.Item{
   297  			ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
   298  			Path:    "file name.txt",
   299  		}
   300  		file1Contents string
   301  		file2         = fstest.Item{
   302  			ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
   303  			Path:    `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`,
   304  		}
   305  		isLocalRemote bool
   306  		purged        bool // whether the dir has been purged or not
   307  		ctx           = context.Background()
   308  	)
   309  
   310  	if strings.HasSuffix(os.Getenv("RCLONE_CONFIG"), "/notfound") && *fstest.RemoteName == "" {
   311  		t.Skip("quicktest only")
   312  	}
   313  
   314  	// Skip the test if the remote isn't configured
   315  	skipIfNotOk := func(t *testing.T) {
   316  		if remote == nil {
   317  			t.Skipf("WARN: %q not configured", remoteName)
   318  		}
   319  	}
   320  
   321  	// Skip if remote is not ListR capable, otherwise set the useListR
   322  	// flag, returning a function to restore its value
   323  	skipIfNotListR := func(t *testing.T) func() {
   324  		skipIfNotOk(t)
   325  		if remote.Features().ListR == nil {
   326  			t.Skip("FS has no ListR interface")
   327  		}
   328  		previous := fs.Config.UseListR
   329  		fs.Config.UseListR = true
   330  		return func() {
   331  			fs.Config.UseListR = previous
   332  		}
   333  	}
   334  
   335  	// Skip if remote is not SetTier and GetTier capable
   336  	skipIfNotSetTier := func(t *testing.T) {
   337  		skipIfNotOk(t)
   338  		if remote.Features().SetTier == false ||
   339  			remote.Features().GetTier == false {
   340  			t.Skip("FS has no SetTier & GetTier interfaces")
   341  		}
   342  	}
   343  
   344  	// Return true if f (or any of the things it wraps) is bucket
   345  	// based but not at the root.
   346  	isBucketBasedButNotRoot := func(f fs.Fs) bool {
   347  		return fs.UnWrapFs(f).Features().BucketBased && strings.Contains(strings.Trim(f.Root(), "/"), "/")
   348  	}
   349  
   350  	// Initialise the remote
   351  	fstest.Initialise()
   352  
   353  	// Set extra config if supplied
   354  	for _, item := range opt.ExtraConfig {
   355  		config.FileSet(item.Name, item.Key, item.Value)
   356  	}
   357  	if *fstest.RemoteName != "" {
   358  		remoteName = *fstest.RemoteName
   359  	}
   360  	oldFstestRemoteName := fstest.RemoteName
   361  	fstest.RemoteName = &remoteName
   362  	defer func() {
   363  		fstest.RemoteName = oldFstestRemoteName
   364  	}()
   365  	t.Logf("Using remote %q", remoteName)
   366  	var err error
   367  	if remoteName == "" {
   368  		remoteName, err = fstest.LocalRemote()
   369  		require.NoError(t, err)
   370  		isLocalRemote = true
   371  	}
   372  
   373  	// Start any test servers if required
   374  	finish, err := testserver.Start(remoteName)
   375  	require.NoError(t, err)
   376  	defer finish()
   377  
   378  	// Make the Fs we are testing with, initialising the local variables
   379  	// subRemoteName - name of the remote after the TestRemote:
   380  	// subRemoteLeaf - a subdirectory to use under that
   381  	// remote - the result of  fs.NewFs(TestRemote:subRemoteName)
   382  	subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName)
   383  	require.NoError(t, err)
   384  	remote, err = fs.NewFs(subRemoteName)
   385  	if err == fs.ErrorNotFoundInConfigFile {
   386  		t.Logf("Didn't find %q in config file - skipping tests", remoteName)
   387  		return
   388  	}
   389  	require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
   390  
   391  	// Skip the rest if it failed
   392  	skipIfNotOk(t)
   393  
   394  	// Check to see if Fs that wrap other Fs implement all the optional methods
   395  	t.Run("FsCheckWrap", func(t *testing.T) {
   396  		skipIfNotOk(t)
   397  		if opt.SkipFsCheckWrap {
   398  			t.Skip("Skipping FsCheckWrap on this Fs")
   399  		}
   400  		ft := new(fs.Features).Fill(remote)
   401  		if ft.UnWrap == nil {
   402  			t.Skip("Not a wrapping Fs")
   403  		}
   404  		v := reflect.ValueOf(ft).Elem()
   405  		vType := v.Type()
   406  		for i := 0; i < v.NumField(); i++ {
   407  			vName := vType.Field(i).Name
   408  			if stringsContains(vName, opt.UnimplementableFsMethods) {
   409  				continue
   410  			}
   411  			field := v.Field(i)
   412  			// skip the bools
   413  			if field.Type().Kind() == reflect.Bool {
   414  				continue
   415  			}
   416  			if field.IsNil() {
   417  				t.Errorf("Missing Fs wrapper for %s", vName)
   418  			}
   419  		}
   420  	})
   421  
   422  	// TestFsRmdirNotFound tests deleting a non existent directory
   423  	t.Run("FsRmdirNotFound", func(t *testing.T) {
   424  		skipIfNotOk(t)
   425  		if isBucketBasedButNotRoot(remote) {
   426  			t.Skip("Skipping test as non root bucket based remote")
   427  		}
   428  		err := remote.Rmdir(ctx, "")
   429  		assert.Error(t, err, "Expecting error on Rmdir non existent")
   430  	})
   431  
   432  	// Make the directory
   433  	err = remote.Mkdir(ctx, "")
   434  	require.NoError(t, err)
   435  	fstest.CheckListing(t, remote, []fstest.Item{})
   436  
   437  	// TestFsString tests the String method
   438  	t.Run("FsString", func(t *testing.T) {
   439  		skipIfNotOk(t)
   440  		str := remote.String()
   441  		require.NotEqual(t, "", str)
   442  	})
   443  
   444  	// TestFsName tests the Name method
   445  	t.Run("FsName", func(t *testing.T) {
   446  		skipIfNotOk(t)
   447  		got := remote.Name()
   448  		want := remoteName[:strings.LastIndex(remoteName, ":")+1]
   449  		if isLocalRemote {
   450  			want = "local:"
   451  		}
   452  		require.Equal(t, want, got+":")
   453  	})
   454  
   455  	// TestFsRoot tests the Root method
   456  	t.Run("FsRoot", func(t *testing.T) {
   457  		skipIfNotOk(t)
   458  		name := remote.Name() + ":"
   459  		root := remote.Root()
   460  		if isLocalRemote {
   461  			// only check last path element on local
   462  			require.Equal(t, filepath.Base(subRemoteName), filepath.Base(root))
   463  		} else {
   464  			require.Equal(t, subRemoteName, name+root)
   465  		}
   466  	})
   467  
   468  	// TestFsRmdirEmpty tests deleting an empty directory
   469  	t.Run("FsRmdirEmpty", func(t *testing.T) {
   470  		skipIfNotOk(t)
   471  		err := remote.Rmdir(ctx, "")
   472  		require.NoError(t, err)
   473  	})
   474  
   475  	// TestFsMkdir tests making a directory
   476  	//
   477  	// Tests that require the directory to be made are within this
   478  	t.Run("FsMkdir", func(t *testing.T) {
   479  		skipIfNotOk(t)
   480  
   481  		err := remote.Mkdir(ctx, "")
   482  		require.NoError(t, err)
   483  		fstest.CheckListing(t, remote, []fstest.Item{})
   484  
   485  		err = remote.Mkdir(ctx, "")
   486  		require.NoError(t, err)
   487  
   488  		// TestFsMkdirRmdirSubdir tests making and removing a sub directory
   489  		t.Run("FsMkdirRmdirSubdir", func(t *testing.T) {
   490  			skipIfNotOk(t)
   491  			dir := "dir/subdir"
   492  			err := operations.Mkdir(ctx, remote, dir)
   493  			require.NoError(t, err)
   494  			fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir", "dir/subdir"}, fs.GetModifyWindow(remote))
   495  
   496  			err = operations.Rmdir(ctx, remote, dir)
   497  			require.NoError(t, err)
   498  			fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir"}, fs.GetModifyWindow(remote))
   499  
   500  			err = operations.Rmdir(ctx, remote, "dir")
   501  			require.NoError(t, err)
   502  			fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.GetModifyWindow(remote))
   503  		})
   504  
   505  		// TestFsListEmpty tests listing an empty directory
   506  		t.Run("FsListEmpty", func(t *testing.T) {
   507  			skipIfNotOk(t)
   508  			fstest.CheckListing(t, remote, []fstest.Item{})
   509  		})
   510  
   511  		// TestFsListDirEmpty tests listing the directories from an empty directory
   512  		TestFsListDirEmpty := func(t *testing.T) {
   513  			skipIfNotOk(t)
   514  			objs, dirs, err := walk.GetAll(ctx, remote, "", true, 1)
   515  			if !remote.Features().CanHaveEmptyDirectories {
   516  				if err != fs.ErrorDirNotFound {
   517  					require.NoError(t, err)
   518  				}
   519  			} else {
   520  				require.NoError(t, err)
   521  			}
   522  			assert.Equal(t, []string{}, objsToNames(objs))
   523  			assert.Equal(t, []string{}, dirsToNames(dirs))
   524  		}
   525  		t.Run("FsListDirEmpty", TestFsListDirEmpty)
   526  
   527  		// TestFsListRDirEmpty tests listing the directories from an empty directory using ListR
   528  		t.Run("FsListRDirEmpty", func(t *testing.T) {
   529  			defer skipIfNotListR(t)()
   530  			TestFsListDirEmpty(t)
   531  		})
   532  
   533  		// TestFsListDirNotFound tests listing the directories from an empty directory
   534  		TestFsListDirNotFound := func(t *testing.T) {
   535  			skipIfNotOk(t)
   536  			objs, dirs, err := walk.GetAll(ctx, remote, "does not exist", true, 1)
   537  			if !remote.Features().CanHaveEmptyDirectories {
   538  				if err != fs.ErrorDirNotFound {
   539  					assert.NoError(t, err)
   540  					assert.Equal(t, 0, len(objs)+len(dirs))
   541  				}
   542  			} else {
   543  				assert.Equal(t, fs.ErrorDirNotFound, err)
   544  			}
   545  		}
   546  		t.Run("FsListDirNotFound", TestFsListDirNotFound)
   547  
   548  		// TestFsListRDirNotFound tests listing the directories from an empty directory using ListR
   549  		t.Run("FsListRDirNotFound", func(t *testing.T) {
   550  			defer skipIfNotListR(t)()
   551  			TestFsListDirNotFound(t)
   552  		})
   553  
   554  		// FsEncoding tests that file name encodings are
   555  		// working by uploading a series of unusual files
   556  		// Must be run in an empty directory
   557  		t.Run("FsEncoding", func(t *testing.T) {
   558  			skipIfNotOk(t)
   559  
   560  			// check no files or dirs as pre-requisite
   561  			fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.GetModifyWindow(remote))
   562  
   563  			for _, test := range []struct {
   564  				name string
   565  				path string
   566  			}{
   567  				// See lib/encoder/encoder.go for list of things that go here
   568  				{"control chars", "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\x7F"},
   569  				{"dot", "."},
   570  				{"dot dot", ".."},
   571  				{"punctuation", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"},
   572  				{"leading space", " leading space"},
   573  				{"leading tilde", "~leading tilde"},
   574  				{"leading CR", "\rleading CR"},
   575  				{"leading LF", "\nleading LF"},
   576  				{"leading HT", "\tleading HT"},
   577  				{"leading VT", "\vleading VT"},
   578  				{"leading dot", ".leading dot"},
   579  				{"trailing space", "trailing space "},
   580  				{"trailing CR", "trailing CR\r"},
   581  				{"trailing LF", "trailing LF\n"},
   582  				{"trailing HT", "trailing HT\t"},
   583  				{"trailing VT", "trailing VT\v"},
   584  				{"trailing dot", "trailing dot."},
   585  				{"invalid UTF-8", "invalid utf-8\xfe"},
   586  			} {
   587  				t.Run(test.name, func(t *testing.T) {
   588  					if opt.SkipInvalidUTF8 && test.name == "invalid UTF-8" {
   589  						t.Skip("Skipping " + test.name)
   590  					}
   591  					// turn raw strings into Standard encoding
   592  					fileName := encoder.Standard.Encode(test.path)
   593  					dirName := fileName
   594  					t.Logf("testing %q", fileName)
   595  					assert.NoError(t, remote.Mkdir(ctx, dirName))
   596  					file := fstest.Item{
   597  						ModTime: time.Now(),
   598  						Path:    dirName + "/" + fileName, // test creating a file and dir with that name
   599  					}
   600  					_, o := testPut(context.Background(), t, remote, &file)
   601  					fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file}, []string{dirName}, fs.GetModifyWindow(remote))
   602  					assert.NoError(t, o.Remove(ctx))
   603  					assert.NoError(t, remote.Rmdir(ctx, dirName))
   604  					fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.GetModifyWindow(remote))
   605  				})
   606  			}
   607  		})
   608  
   609  		// TestFsNewObjectNotFound tests not finding a object
   610  		t.Run("FsNewObjectNotFound", func(t *testing.T) {
   611  			skipIfNotOk(t)
   612  			// Object in an existing directory
   613  			o, err := remote.NewObject(ctx, "potato")
   614  			assert.Nil(t, o)
   615  			assert.Equal(t, fs.ErrorObjectNotFound, err)
   616  			// Now try an object in a non existing directory
   617  			o, err = remote.NewObject(ctx, "directory/not/found/potato")
   618  			assert.Nil(t, o)
   619  			assert.Equal(t, fs.ErrorObjectNotFound, err)
   620  		})
   621  
   622  		// TestFsPutError tests uploading a file where there is an error
   623  		//
   624  		// It makes sure that aborting a file half way through does not create
   625  		// a file on the remote.
   626  		//
   627  		// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutError)$'
   628  		t.Run("FsPutError", func(t *testing.T) {
   629  			skipIfNotOk(t)
   630  
   631  			var N int64 = 5 * 1024
   632  			if *fstest.SizeLimit > 0 && N > *fstest.SizeLimit {
   633  				N = *fstest.SizeLimit
   634  				t.Logf("Reduce file size due to limit %d", N)
   635  			}
   636  
   637  			// Read N bytes then produce an error
   638  			contents := random.String(int(N))
   639  			buf := bytes.NewBufferString(contents)
   640  			er := &errorReader{errors.New("potato")}
   641  			in := io.MultiReader(buf, er)
   642  
   643  			obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 2*N, true, nil, nil)
   644  			_, err := remote.Put(ctx, in, obji)
   645  			// assert.Nil(t, obj) - FIXME some remotes return the object even on nil
   646  			assert.NotNil(t, err)
   647  
   648  			obj, err := remote.NewObject(ctx, file2.Path)
   649  			assert.Nil(t, obj)
   650  			assert.Equal(t, fs.ErrorObjectNotFound, err)
   651  		})
   652  
   653  		t.Run("FsPutZeroLength", func(t *testing.T) {
   654  			skipIfNotOk(t)
   655  
   656  			TestPutLarge(ctx, t, remote, &fstest.Item{
   657  				ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
   658  				Path:    fmt.Sprintf("zero-length-file"),
   659  				Size:    int64(0),
   660  			})
   661  		})
   662  
   663  		t.Run("FsOpenWriterAt", func(t *testing.T) {
   664  			skipIfNotOk(t)
   665  			openWriterAt := remote.Features().OpenWriterAt
   666  			if openWriterAt == nil {
   667  				t.Skip("FS has no OpenWriterAt interface")
   668  			}
   669  			path := "writer-at-subdir/writer-at-file"
   670  			out, err := openWriterAt(ctx, path, -1)
   671  			require.NoError(t, err)
   672  
   673  			var n int
   674  			n, err = out.WriteAt([]byte("def"), 3)
   675  			assert.NoError(t, err)
   676  			assert.Equal(t, 3, n)
   677  			n, err = out.WriteAt([]byte("ghi"), 6)
   678  			assert.NoError(t, err)
   679  			assert.Equal(t, 3, n)
   680  			n, err = out.WriteAt([]byte("abc"), 0)
   681  			assert.NoError(t, err)
   682  			assert.Equal(t, 3, n)
   683  
   684  			assert.NoError(t, out.Close())
   685  
   686  			obj := findObject(ctx, t, remote, path)
   687  			assert.Equal(t, "abcdefghi", readObject(ctx, t, obj, -1), "contents of file differ")
   688  
   689  			assert.NoError(t, obj.Remove(ctx))
   690  			assert.NoError(t, remote.Rmdir(ctx, "writer-at-subdir"))
   691  		})
   692  
   693  		// TestFsChangeNotify tests that changes are properly
   694  		// propagated
   695  		//
   696  		// go test -v -remote TestDrive: -run '^Test(Setup|Init|FsChangeNotify)$' -verbose
   697  		t.Run("FsChangeNotify", func(t *testing.T) {
   698  			skipIfNotOk(t)
   699  
   700  			// Check have ChangeNotify
   701  			doChangeNotify := remote.Features().ChangeNotify
   702  			if doChangeNotify == nil {
   703  				t.Skip("FS has no ChangeNotify interface")
   704  			}
   705  
   706  			err := operations.Mkdir(ctx, remote, "dir")
   707  			require.NoError(t, err)
   708  
   709  			pollInterval := make(chan time.Duration)
   710  			dirChanges := map[string]struct{}{}
   711  			objChanges := map[string]struct{}{}
   712  			doChangeNotify(ctx, func(x string, e fs.EntryType) {
   713  				fs.Debugf(nil, "doChangeNotify(%q, %+v)", x, e)
   714  				if strings.HasPrefix(x, file1.Path[:5]) || strings.HasPrefix(x, file2.Path[:5]) {
   715  					fs.Debugf(nil, "Ignoring notify for file1 or file2: %q, %v", x, e)
   716  					return
   717  				}
   718  				if e == fs.EntryDirectory {
   719  					dirChanges[x] = struct{}{}
   720  				} else if e == fs.EntryObject {
   721  					objChanges[x] = struct{}{}
   722  				}
   723  			}, pollInterval)
   724  			defer func() { close(pollInterval) }()
   725  			pollInterval <- time.Second
   726  
   727  			var dirs []string
   728  			for _, idx := range []int{1, 3, 2} {
   729  				dir := fmt.Sprintf("dir/subdir%d", idx)
   730  				err = operations.Mkdir(ctx, remote, dir)
   731  				require.NoError(t, err)
   732  				dirs = append(dirs, dir)
   733  			}
   734  
   735  			var objs []fs.Object
   736  			for _, idx := range []int{2, 4, 3} {
   737  				file := fstest.Item{
   738  					ModTime: time.Now(),
   739  					Path:    fmt.Sprintf("dir/file%d", idx),
   740  				}
   741  				_, o := testPut(ctx, t, remote, &file)
   742  				objs = append(objs, o)
   743  			}
   744  
   745  			// Looks for each item in wants in changes -
   746  			// if they are all found it returns true
   747  			contains := func(changes map[string]struct{}, wants []string) bool {
   748  				for _, want := range wants {
   749  					_, ok := changes[want]
   750  					if !ok {
   751  						return false
   752  					}
   753  				}
   754  				return true
   755  			}
   756  
   757  			// Wait a little while for the changes to come in
   758  			wantDirChanges := []string{"dir/subdir1", "dir/subdir3", "dir/subdir2"}
   759  			wantObjChanges := []string{"dir/file2", "dir/file4", "dir/file3"}
   760  			ok := false
   761  			for tries := 1; tries < 10; tries++ {
   762  				ok = contains(dirChanges, wantDirChanges) && contains(objChanges, wantObjChanges)
   763  				if ok {
   764  					break
   765  				}
   766  				t.Logf("Try %d/10 waiting for dirChanges and objChanges", tries)
   767  				time.Sleep(3 * time.Second)
   768  			}
   769  			if !ok {
   770  				t.Errorf("%+v does not contain %+v or \n%+v does not contain %+v", dirChanges, wantDirChanges, objChanges, wantObjChanges)
   771  			}
   772  
   773  			// tidy up afterwards
   774  			for _, o := range objs {
   775  				assert.NoError(t, o.Remove(ctx))
   776  			}
   777  			dirs = append(dirs, "dir")
   778  			for _, dir := range dirs {
   779  				assert.NoError(t, remote.Rmdir(ctx, dir))
   780  			}
   781  		})
   782  
   783  		// TestFsPut files writes file1, file2 and tests an update
   784  		//
   785  		// Tests that require file1, file2 are within this
   786  		t.Run("FsPutFiles", func(t *testing.T) {
   787  			skipIfNotOk(t)
   788  			file1Contents, _ = testPut(ctx, t, remote, &file1)
   789  			/* file2Contents = */ testPut(ctx, t, remote, &file2)
   790  			file1Contents, _ = testPut(ctx, t, remote, &file1)
   791  			// Note that the next test will check there are no duplicated file names
   792  
   793  			// TestFsListDirFile2 tests the files are correctly uploaded by doing
   794  			// Depth 1 directory listings
   795  			TestFsListDirFile2 := func(t *testing.T) {
   796  				skipIfNotOk(t)
   797  				list := func(dir string, expectedDirNames, expectedObjNames []string) {
   798  					var objNames, dirNames []string
   799  					for i := 1; i <= *fstest.ListRetries; i++ {
   800  						objs, dirs, err := walk.GetAll(ctx, remote, dir, true, 1)
   801  						if errors.Cause(err) == fs.ErrorDirNotFound {
   802  							objs, dirs, err = walk.GetAll(ctx, remote, dir, true, 1)
   803  						}
   804  						require.NoError(t, err)
   805  						objNames = objsToNames(objs)
   806  						dirNames = dirsToNames(dirs)
   807  						if len(objNames) >= len(expectedObjNames) && len(dirNames) >= len(expectedDirNames) {
   808  							break
   809  						}
   810  						t.Logf("Sleeping for 1 second for TestFsListDirFile2 eventual consistency: %d/%d", i, *fstest.ListRetries)
   811  						time.Sleep(1 * time.Second)
   812  					}
   813  					assert.Equal(t, expectedDirNames, dirNames)
   814  					assert.Equal(t, expectedObjNames, objNames)
   815  				}
   816  				dir := file2.Path
   817  				deepest := true
   818  				for dir != "" {
   819  					expectedObjNames := []string{}
   820  					expectedDirNames := []string{}
   821  					child := dir
   822  					dir = path.Dir(dir)
   823  					if dir == "." {
   824  						dir = ""
   825  						expectedObjNames = append(expectedObjNames, file1.Path)
   826  					}
   827  					if deepest {
   828  						expectedObjNames = append(expectedObjNames, file2.Path)
   829  						deepest = false
   830  					} else {
   831  						expectedDirNames = append(expectedDirNames, child)
   832  					}
   833  					list(dir, expectedDirNames, expectedObjNames)
   834  				}
   835  			}
   836  			t.Run("FsListDirFile2", TestFsListDirFile2)
   837  
   838  			// TestFsListRDirFile2 tests the files are correctly uploaded by doing
   839  			// Depth 1 directory listings using ListR
   840  			t.Run("FsListRDirFile2", func(t *testing.T) {
   841  				defer skipIfNotListR(t)()
   842  				TestFsListDirFile2(t)
   843  			})
   844  
   845  			// Test the files are all there with walk.ListR recursive listings
   846  			t.Run("FsListR", func(t *testing.T) {
   847  				skipIfNotOk(t)
   848  				objs, dirs, err := walk.GetAll(ctx, remote, "", true, -1)
   849  				require.NoError(t, err)
   850  				assert.Equal(t, []string{
   851  					"hello? sausage",
   852  					"hello? sausage/êé",
   853  					"hello? sausage/êé/Hello, 世界",
   854  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
   855  				}, dirsToNames(dirs))
   856  				assert.Equal(t, []string{
   857  					"file name.txt",
   858  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt",
   859  				}, objsToNames(objs))
   860  			})
   861  
   862  			// Test the files are all there with
   863  			// walk.ListR recursive listings on a sub dir
   864  			t.Run("FsListRSubdir", func(t *testing.T) {
   865  				skipIfNotOk(t)
   866  				objs, dirs, err := walk.GetAll(ctx, remote, path.Dir(path.Dir(path.Dir(path.Dir(file2.Path)))), true, -1)
   867  				require.NoError(t, err)
   868  				assert.Equal(t, []string{
   869  					"hello? sausage/êé",
   870  					"hello? sausage/êé/Hello, 世界",
   871  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
   872  				}, dirsToNames(dirs))
   873  				assert.Equal(t, []string{
   874  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt",
   875  				}, objsToNames(objs))
   876  			})
   877  
   878  			// TestFsListDirRoot tests that DirList works in the root
   879  			TestFsListDirRoot := func(t *testing.T) {
   880  				skipIfNotOk(t)
   881  				rootRemote, err := fs.NewFs(remoteName)
   882  				require.NoError(t, err)
   883  				_, dirs, err := walk.GetAll(ctx, rootRemote, "", true, 1)
   884  				require.NoError(t, err)
   885  				assert.Contains(t, dirsToNames(dirs), subRemoteLeaf, "Remote leaf not found")
   886  			}
   887  			t.Run("FsListDirRoot", TestFsListDirRoot)
   888  
   889  			// TestFsListRDirRoot tests that DirList works in the root using ListR
   890  			t.Run("FsListRDirRoot", func(t *testing.T) {
   891  				defer skipIfNotListR(t)()
   892  				TestFsListDirRoot(t)
   893  			})
   894  
   895  			// TestFsListSubdir tests List works for a subdirectory
   896  			TestFsListSubdir := func(t *testing.T) {
   897  				skipIfNotOk(t)
   898  				fileName := file2.Path
   899  				var err error
   900  				var objs []fs.Object
   901  				var dirs []fs.Directory
   902  				for i := 0; i < 2; i++ {
   903  					dir, _ := path.Split(fileName)
   904  					dir = dir[:len(dir)-1]
   905  					objs, dirs, err = walk.GetAll(ctx, remote, dir, true, -1)
   906  				}
   907  				require.NoError(t, err)
   908  				require.Len(t, objs, 1)
   909  				assert.Equal(t, fileName, objs[0].Remote())
   910  				require.Len(t, dirs, 0)
   911  			}
   912  			t.Run("FsListSubdir", TestFsListSubdir)
   913  
   914  			// TestFsListRSubdir tests List works for a subdirectory using ListR
   915  			t.Run("FsListRSubdir", func(t *testing.T) {
   916  				defer skipIfNotListR(t)()
   917  				TestFsListSubdir(t)
   918  			})
   919  
   920  			// TestFsListLevel2 tests List works for 2 levels
   921  			TestFsListLevel2 := func(t *testing.T) {
   922  				skipIfNotOk(t)
   923  				objs, dirs, err := walk.GetAll(ctx, remote, "", true, 2)
   924  				if err == fs.ErrorLevelNotSupported {
   925  					return
   926  				}
   927  				require.NoError(t, err)
   928  				assert.Equal(t, []string{file1.Path}, objsToNames(objs))
   929  				assert.Equal(t, []string{"hello? sausage", "hello? sausage/êé"}, dirsToNames(dirs))
   930  			}
   931  			t.Run("FsListLevel2", TestFsListLevel2)
   932  
   933  			// TestFsListRLevel2 tests List works for 2 levels using ListR
   934  			t.Run("FsListRLevel2", func(t *testing.T) {
   935  				defer skipIfNotListR(t)()
   936  				TestFsListLevel2(t)
   937  			})
   938  
   939  			// TestFsListFile1 tests file present
   940  			t.Run("FsListFile1", func(t *testing.T) {
   941  				skipIfNotOk(t)
   942  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
   943  			})
   944  
   945  			// TestFsNewObject tests NewObject
   946  			t.Run("FsNewObject", func(t *testing.T) {
   947  				skipIfNotOk(t)
   948  				obj := findObject(ctx, t, remote, file1.Path)
   949  				file1.Check(t, obj, remote.Precision())
   950  			})
   951  
   952  			// TestFsListFile1and2 tests two files present
   953  			t.Run("FsListFile1and2", func(t *testing.T) {
   954  				skipIfNotOk(t)
   955  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
   956  			})
   957  
   958  			// TestFsNewObjectDir tests NewObject on a directory which should produce an error
   959  			t.Run("FsNewObjectDir", func(t *testing.T) {
   960  				skipIfNotOk(t)
   961  				dir := path.Dir(file2.Path)
   962  				obj, err := remote.NewObject(ctx, dir)
   963  				assert.Nil(t, obj)
   964  				assert.NotNil(t, err)
   965  			})
   966  
   967  			// TestFsCopy tests Copy
   968  			t.Run("FsCopy", func(t *testing.T) {
   969  				skipIfNotOk(t)
   970  
   971  				// Check have Copy
   972  				doCopy := remote.Features().Copy
   973  				if doCopy == nil {
   974  					t.Skip("FS has no Copier interface")
   975  				}
   976  
   977  				// Test with file2 so have + and ' ' in file name
   978  				var file2Copy = file2
   979  				file2Copy.Path += "-copy"
   980  
   981  				// do the copy
   982  				src := findObject(ctx, t, remote, file2.Path)
   983  				dst, err := doCopy(ctx, src, file2Copy.Path)
   984  				if err == fs.ErrorCantCopy {
   985  					t.Skip("FS can't copy")
   986  				}
   987  				require.NoError(t, err, fmt.Sprintf("Error: %#v", err))
   988  
   989  				// check file exists in new listing
   990  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2, file2Copy})
   991  
   992  				// Check dst lightly - list above has checked ModTime/Hashes
   993  				assert.Equal(t, file2Copy.Path, dst.Remote())
   994  
   995  				// Delete copy
   996  				err = dst.Remove(ctx)
   997  				require.NoError(t, err)
   998  
   999  			})
  1000  
  1001  			// TestFsMove tests Move
  1002  			t.Run("FsMove", func(t *testing.T) {
  1003  				skipIfNotOk(t)
  1004  
  1005  				// Check have Move
  1006  				doMove := remote.Features().Move
  1007  				if doMove == nil {
  1008  					t.Skip("FS has no Mover interface")
  1009  				}
  1010  
  1011  				// state of files now:
  1012  				// 1: file name.txt
  1013  				// 2: hello sausage?/../z.txt
  1014  
  1015  				var file1Move = file1
  1016  				var file2Move = file2
  1017  
  1018  				// check happy path, i.e. no naming conflicts when rename and move are two
  1019  				// separate operations
  1020  				file2Move.Path = "other.txt"
  1021  				src := findObject(ctx, t, remote, file2.Path)
  1022  				dst, err := doMove(ctx, src, file2Move.Path)
  1023  				if err == fs.ErrorCantMove {
  1024  					t.Skip("FS can't move")
  1025  				}
  1026  				require.NoError(t, err)
  1027  				// check file exists in new listing
  1028  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2Move})
  1029  				// Check dst lightly - list above has checked ModTime/Hashes
  1030  				assert.Equal(t, file2Move.Path, dst.Remote())
  1031  				// 1: file name.txt
  1032  				// 2: other.txt
  1033  
  1034  				// Check conflict on "rename, then move"
  1035  				file1Move.Path = "moveTest/other.txt"
  1036  				src = findObject(ctx, t, remote, file1.Path)
  1037  				_, err = doMove(ctx, src, file1Move.Path)
  1038  				require.NoError(t, err)
  1039  				fstest.CheckListing(t, remote, []fstest.Item{file1Move, file2Move})
  1040  				// 1: moveTest/other.txt
  1041  				// 2: other.txt
  1042  
  1043  				// Check conflict on "move, then rename"
  1044  				src = findObject(ctx, t, remote, file1Move.Path)
  1045  				_, err = doMove(ctx, src, file1.Path)
  1046  				require.NoError(t, err)
  1047  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2Move})
  1048  				// 1: file name.txt
  1049  				// 2: other.txt
  1050  
  1051  				src = findObject(ctx, t, remote, file2Move.Path)
  1052  				_, err = doMove(ctx, src, file2.Path)
  1053  				require.NoError(t, err)
  1054  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
  1055  				// 1: file name.txt
  1056  				// 2: hello sausage?/../z.txt
  1057  
  1058  				// Tidy up moveTest directory
  1059  				require.NoError(t, remote.Rmdir(ctx, "moveTest"))
  1060  			})
  1061  
  1062  			// Move src to this remote using server side move operations.
  1063  			//
  1064  			// Will only be called if src.Fs().Name() == f.Name()
  1065  			//
  1066  			// If it isn't possible then return fs.ErrorCantDirMove
  1067  			//
  1068  			// If destination exists then return fs.ErrorDirExists
  1069  
  1070  			// TestFsDirMove tests DirMove
  1071  			//
  1072  			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsDirMove)$
  1073  			t.Run("FsDirMove", func(t *testing.T) {
  1074  				skipIfNotOk(t)
  1075  
  1076  				// Check have DirMove
  1077  				doDirMove := remote.Features().DirMove
  1078  				if doDirMove == nil {
  1079  					t.Skip("FS has no DirMover interface")
  1080  				}
  1081  
  1082  				// Check it can't move onto itself
  1083  				err := doDirMove(ctx, remote, "", "")
  1084  				require.Equal(t, fs.ErrorDirExists, err)
  1085  
  1086  				// new remote
  1087  				newRemote, _, removeNewRemote, err := fstest.RandomRemote()
  1088  				require.NoError(t, err)
  1089  				defer removeNewRemote()
  1090  
  1091  				const newName = "new_name/sub_new_name"
  1092  				// try the move
  1093  				err = newRemote.Features().DirMove(ctx, remote, "", newName)
  1094  				require.NoError(t, err)
  1095  
  1096  				// check remotes
  1097  				// remote should not exist here
  1098  				_, err = remote.List(ctx, "")
  1099  				assert.Equal(t, fs.ErrorDirNotFound, errors.Cause(err))
  1100  				//fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, remote.Precision())
  1101  				file1Copy := file1
  1102  				file1Copy.Path = path.Join(newName, file1.Path)
  1103  				file2Copy := file2
  1104  				file2Copy.Path = path.Join(newName, file2.Path)
  1105  				fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{file2Copy, file1Copy}, []string{
  1106  					"new_name",
  1107  					"new_name/sub_new_name",
  1108  					"new_name/sub_new_name/hello? sausage",
  1109  					"new_name/sub_new_name/hello? sausage/êé",
  1110  					"new_name/sub_new_name/hello? sausage/êé/Hello, 世界",
  1111  					"new_name/sub_new_name/hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1112  				}, newRemote.Precision())
  1113  
  1114  				// move it back
  1115  				err = doDirMove(ctx, newRemote, newName, "")
  1116  				require.NoError(t, err)
  1117  
  1118  				// check remotes
  1119  				fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file2, file1}, []string{
  1120  					"hello? sausage",
  1121  					"hello? sausage/êé",
  1122  					"hello? sausage/êé/Hello, 世界",
  1123  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1124  				}, remote.Precision())
  1125  				fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{}, []string{
  1126  					"new_name",
  1127  				}, newRemote.Precision())
  1128  			})
  1129  
  1130  			// TestFsRmdirFull tests removing a non empty directory
  1131  			t.Run("FsRmdirFull", func(t *testing.T) {
  1132  				skipIfNotOk(t)
  1133  				if isBucketBasedButNotRoot(remote) {
  1134  					t.Skip("Skipping test as non root bucket based remote")
  1135  				}
  1136  				err := remote.Rmdir(ctx, "")
  1137  				require.Error(t, err, "Expecting error on RMdir on non empty remote")
  1138  			})
  1139  
  1140  			// TestFsPrecision tests the Precision of the Fs
  1141  			t.Run("FsPrecision", func(t *testing.T) {
  1142  				skipIfNotOk(t)
  1143  				precision := remote.Precision()
  1144  				if precision == fs.ModTimeNotSupported {
  1145  					return
  1146  				}
  1147  				if precision > time.Second || precision < 0 {
  1148  					t.Fatalf("Precision out of range %v", precision)
  1149  				}
  1150  				// FIXME check expected precision
  1151  			})
  1152  
  1153  			// TestObjectString tests the Object String method
  1154  			t.Run("ObjectString", func(t *testing.T) {
  1155  				skipIfNotOk(t)
  1156  				obj := findObject(ctx, t, remote, file1.Path)
  1157  				assert.Equal(t, file1.Path, obj.String())
  1158  				if opt.NilObject != nil {
  1159  					assert.Equal(t, "<nil>", opt.NilObject.String())
  1160  				}
  1161  			})
  1162  
  1163  			// TestObjectFs tests the object can be found
  1164  			t.Run("ObjectFs", func(t *testing.T) {
  1165  				skipIfNotOk(t)
  1166  				obj := findObject(ctx, t, remote, file1.Path)
  1167  				// If this is set we don't do the direct comparison of
  1168  				// the Fs from the object as it may be different
  1169  				if opt.SkipFsMatch {
  1170  					return
  1171  				}
  1172  				testRemote := remote
  1173  				if obj.Fs() != testRemote {
  1174  					// Check to see if this wraps something else
  1175  					if doUnWrap := testRemote.Features().UnWrap; doUnWrap != nil {
  1176  						testRemote = doUnWrap()
  1177  					}
  1178  				}
  1179  				assert.Equal(t, obj.Fs(), testRemote)
  1180  			})
  1181  
  1182  			// TestObjectRemote tests the Remote is correct
  1183  			t.Run("ObjectRemote", func(t *testing.T) {
  1184  				skipIfNotOk(t)
  1185  				obj := findObject(ctx, t, remote, file1.Path)
  1186  				assert.Equal(t, file1.Path, obj.Remote())
  1187  			})
  1188  
  1189  			// TestObjectHashes checks all the hashes the object supports
  1190  			t.Run("ObjectHashes", func(t *testing.T) {
  1191  				skipIfNotOk(t)
  1192  				obj := findObject(ctx, t, remote, file1.Path)
  1193  				file1.CheckHashes(t, obj)
  1194  			})
  1195  
  1196  			// TestObjectModTime tests the ModTime of the object is correct
  1197  			TestObjectModTime := func(t *testing.T) {
  1198  				skipIfNotOk(t)
  1199  				obj := findObject(ctx, t, remote, file1.Path)
  1200  				file1.CheckModTime(t, obj, obj.ModTime(ctx), remote.Precision())
  1201  			}
  1202  			t.Run("ObjectModTime", TestObjectModTime)
  1203  
  1204  			// TestObjectMimeType tests the MimeType of the object is correct
  1205  			t.Run("ObjectMimeType", func(t *testing.T) {
  1206  				skipIfNotOk(t)
  1207  				obj := findObject(ctx, t, remote, file1.Path)
  1208  				do, ok := obj.(fs.MimeTyper)
  1209  				if !ok {
  1210  					t.Skip("MimeType method not supported")
  1211  				}
  1212  				mimeType := do.MimeType(ctx)
  1213  				if strings.ContainsRune(mimeType, ';') {
  1214  					assert.Equal(t, "text/plain; charset=utf-8", mimeType)
  1215  				} else {
  1216  					assert.Equal(t, "text/plain", mimeType)
  1217  				}
  1218  			})
  1219  
  1220  			// TestObjectSetModTime tests that SetModTime works
  1221  			t.Run("ObjectSetModTime", func(t *testing.T) {
  1222  				skipIfNotOk(t)
  1223  				newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
  1224  				obj := findObject(ctx, t, remote, file1.Path)
  1225  				err := obj.SetModTime(ctx, newModTime)
  1226  				if err == fs.ErrorCantSetModTime || err == fs.ErrorCantSetModTimeWithoutDelete {
  1227  					t.Log(err)
  1228  					return
  1229  				}
  1230  				require.NoError(t, err)
  1231  				file1.ModTime = newModTime
  1232  				file1.CheckModTime(t, obj, obj.ModTime(ctx), remote.Precision())
  1233  				// And make a new object and read it from there too
  1234  				TestObjectModTime(t)
  1235  			})
  1236  
  1237  			// TestObjectSize tests that Size works
  1238  			t.Run("ObjectSize", func(t *testing.T) {
  1239  				skipIfNotOk(t)
  1240  				obj := findObject(ctx, t, remote, file1.Path)
  1241  				assert.Equal(t, file1.Size, obj.Size())
  1242  			})
  1243  
  1244  			// TestObjectOpen tests that Open works
  1245  			t.Run("ObjectOpen", func(t *testing.T) {
  1246  				skipIfNotOk(t)
  1247  				obj := findObject(ctx, t, remote, file1.Path)
  1248  				assert.Equal(t, file1Contents, readObject(ctx, t, obj, -1), "contents of file1 differ")
  1249  			})
  1250  
  1251  			// TestObjectOpenSeek tests that Open works with SeekOption
  1252  			t.Run("ObjectOpenSeek", func(t *testing.T) {
  1253  				skipIfNotOk(t)
  1254  				obj := findObject(ctx, t, remote, file1.Path)
  1255  				assert.Equal(t, file1Contents[50:], readObject(ctx, t, obj, -1, &fs.SeekOption{Offset: 50}), "contents of file1 differ after seek")
  1256  			})
  1257  
  1258  			// TestObjectOpenRange tests that Open works with RangeOption
  1259  			//
  1260  			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|ObjectOpenRange)$'
  1261  			t.Run("ObjectOpenRange", func(t *testing.T) {
  1262  				skipIfNotOk(t)
  1263  				obj := findObject(ctx, t, remote, file1.Path)
  1264  				for _, test := range []struct {
  1265  					ro                 fs.RangeOption
  1266  					wantStart, wantEnd int
  1267  				}{
  1268  					{fs.RangeOption{Start: 5, End: 15}, 5, 16},
  1269  					{fs.RangeOption{Start: 80, End: -1}, 80, 100},
  1270  					{fs.RangeOption{Start: 81, End: 100000}, 81, 100},
  1271  					{fs.RangeOption{Start: -1, End: 20}, 80, 100}, // if start is omitted this means get the final bytes
  1272  					// {fs.RangeOption{Start: -1, End: -1}, 0, 100}, - this seems to work but the RFC doesn't define it
  1273  				} {
  1274  					got := readObject(ctx, t, obj, -1, &test.ro)
  1275  					foundAt := strings.Index(file1Contents, got)
  1276  					help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
  1277  					assert.Equal(t, file1Contents[test.wantStart:test.wantEnd], got, help)
  1278  				}
  1279  			})
  1280  
  1281  			// TestObjectPartialRead tests that reading only part of the object does the correct thing
  1282  			t.Run("ObjectPartialRead", func(t *testing.T) {
  1283  				skipIfNotOk(t)
  1284  				obj := findObject(ctx, t, remote, file1.Path)
  1285  				assert.Equal(t, file1Contents[:50], readObject(ctx, t, obj, 50), "contents of file1 differ after limited read")
  1286  			})
  1287  
  1288  			// TestObjectUpdate tests that Update works
  1289  			t.Run("ObjectUpdate", func(t *testing.T) {
  1290  				skipIfNotOk(t)
  1291  				contents := random.String(200)
  1292  				buf := bytes.NewBufferString(contents)
  1293  				hash := hash.NewMultiHasher()
  1294  				in := io.TeeReader(buf, hash)
  1295  
  1296  				file1.Size = int64(buf.Len())
  1297  				obj := findObject(ctx, t, remote, file1.Path)
  1298  				obji := object.NewStaticObjectInfo(file1.Path, file1.ModTime, int64(len(contents)), true, nil, obj.Fs())
  1299  				err := obj.Update(ctx, in, obji)
  1300  				require.NoError(t, err)
  1301  				file1.Hashes = hash.Sums()
  1302  
  1303  				// check the object has been updated
  1304  				file1.Check(t, obj, remote.Precision())
  1305  
  1306  				// Re-read the object and check again
  1307  				obj = findObject(ctx, t, remote, file1.Path)
  1308  				file1.Check(t, obj, remote.Precision())
  1309  
  1310  				// check contents correct
  1311  				assert.Equal(t, contents, readObject(ctx, t, obj, -1), "contents of updated file1 differ")
  1312  				file1Contents = contents
  1313  			})
  1314  
  1315  			// TestObjectStorable tests that Storable works
  1316  			t.Run("ObjectStorable", func(t *testing.T) {
  1317  				skipIfNotOk(t)
  1318  				obj := findObject(ctx, t, remote, file1.Path)
  1319  				require.NotNil(t, !obj.Storable(), "Expecting object to be storable")
  1320  			})
  1321  
  1322  			// TestFsIsFile tests that an error is returned along with a valid fs
  1323  			// which points to the parent directory.
  1324  			t.Run("FsIsFile", func(t *testing.T) {
  1325  				skipIfNotOk(t)
  1326  				remoteName := subRemoteName + "/" + file2.Path
  1327  				file2Copy := file2
  1328  				file2Copy.Path = "z.txt"
  1329  				fileRemote, err := fs.NewFs(remoteName)
  1330  				require.NotNil(t, fileRemote)
  1331  				assert.Equal(t, fs.ErrorIsFile, err)
  1332  
  1333  				if strings.HasPrefix(remoteName, "TestChunker") && strings.Contains(remoteName, "Nometa") {
  1334  					// TODO fix chunker and remove this bypass
  1335  					t.Logf("Skip listing check -- chunker can't yet handle this tricky case")
  1336  					return
  1337  				}
  1338  				fstest.CheckListing(t, fileRemote, []fstest.Item{file2Copy})
  1339  			})
  1340  
  1341  			// TestFsIsFileNotFound tests that an error is not returned if no object is found
  1342  			t.Run("FsIsFileNotFound", func(t *testing.T) {
  1343  				skipIfNotOk(t)
  1344  				remoteName := subRemoteName + "/not found.txt"
  1345  				fileRemote, err := fs.NewFs(remoteName)
  1346  				require.NoError(t, err)
  1347  				fstest.CheckListing(t, fileRemote, []fstest.Item{})
  1348  			})
  1349  
  1350  			// Test that things work from the root
  1351  			t.Run("FromRoot", func(t *testing.T) {
  1352  				if features := remote.Features(); features.BucketBased && !features.BucketBasedRootOK {
  1353  					t.Skip("Can't list from root on this remote")
  1354  				}
  1355  
  1356  				configName, configLeaf, err := fspath.Parse(subRemoteName)
  1357  				require.NoError(t, err)
  1358  				if configName == "" {
  1359  					configName, configLeaf = path.Split(subRemoteName)
  1360  				} else {
  1361  					configName += ":"
  1362  				}
  1363  				t.Logf("Opening root remote %q path %q from %q", configName, configLeaf, subRemoteName)
  1364  				rootRemote, err := fs.NewFs(configName)
  1365  				require.NoError(t, err)
  1366  
  1367  				file1Root := file1
  1368  				file1Root.Path = path.Join(configLeaf, file1Root.Path)
  1369  				file2Root := file2
  1370  				file2Root.Path = path.Join(configLeaf, file2Root.Path)
  1371  				var dirs []string
  1372  				dir := file2.Path
  1373  				for {
  1374  					dir = path.Dir(dir)
  1375  					if dir == "" || dir == "." || dir == "/" {
  1376  						break
  1377  					}
  1378  					dirs = append(dirs, path.Join(configLeaf, dir))
  1379  				}
  1380  
  1381  				// Check that we can see file1 and file2 from the root
  1382  				t.Run("List", func(t *testing.T) {
  1383  					fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, dirs, rootRemote.Precision())
  1384  				})
  1385  
  1386  				// Check that that listing the entries is OK
  1387  				t.Run("ListEntries", func(t *testing.T) {
  1388  					entries, err := rootRemote.List(context.Background(), configLeaf)
  1389  					require.NoError(t, err)
  1390  					fstest.CompareItems(t, entries, []fstest.Item{file1Root}, dirs[len(dirs)-1:], rootRemote.Precision(), "ListEntries")
  1391  				})
  1392  
  1393  				// List the root with ListR
  1394  				t.Run("ListR", func(t *testing.T) {
  1395  					doListR := rootRemote.Features().ListR
  1396  					if doListR == nil {
  1397  						t.Skip("FS has no ListR interface")
  1398  					}
  1399  					file1Found, file2Found := false, false
  1400  					stopTime := time.Now().Add(10 * time.Second)
  1401  					errTooMany := errors.New("too many files")
  1402  					errFound := errors.New("found")
  1403  					err := doListR(context.Background(), "", func(entries fs.DirEntries) error {
  1404  						for _, entry := range entries {
  1405  							remote := entry.Remote()
  1406  							if remote == file1Root.Path {
  1407  								file1Found = true
  1408  							}
  1409  							if remote == file2Root.Path {
  1410  								file2Found = true
  1411  							}
  1412  							if file1Found && file2Found {
  1413  								return errFound
  1414  							}
  1415  						}
  1416  						if time.Now().After(stopTime) {
  1417  							return errTooMany
  1418  						}
  1419  						return nil
  1420  					})
  1421  					if err != errFound && err != errTooMany {
  1422  						assert.NoError(t, err)
  1423  					}
  1424  					if err != errTooMany {
  1425  						assert.True(t, file1Found, "file1Root not found")
  1426  						assert.True(t, file2Found, "file2Root not found")
  1427  					} else {
  1428  						t.Logf("Too many files to list - giving up")
  1429  					}
  1430  				})
  1431  
  1432  				// Create a new file
  1433  				t.Run("Put", func(t *testing.T) {
  1434  					file3Root := fstest.Item{
  1435  						ModTime: time.Now(),
  1436  						Path:    path.Join(configLeaf, "created from root.txt"),
  1437  					}
  1438  					_, file3Obj := testPut(ctx, t, rootRemote, &file3Root)
  1439  					fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root, file3Root}, nil, rootRemote.Precision())
  1440  
  1441  					// And then remove it
  1442  					t.Run("Remove", func(t *testing.T) {
  1443  						require.NoError(t, file3Obj.Remove(context.Background()))
  1444  						fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, nil, rootRemote.Precision())
  1445  					})
  1446  				})
  1447  			})
  1448  
  1449  			// TestPublicLink tests creation of sharable, public links
  1450  			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|PublicLink)$'
  1451  			t.Run("PublicLink", func(t *testing.T) {
  1452  				skipIfNotOk(t)
  1453  
  1454  				doPublicLink := remote.Features().PublicLink
  1455  				if doPublicLink == nil {
  1456  					t.Skip("FS has no PublicLinker interface")
  1457  				}
  1458  
  1459  				// if object not found
  1460  				link, err := doPublicLink(ctx, file1.Path+"_does_not_exist")
  1461  				require.Error(t, err, "Expected to get error when file doesn't exist")
  1462  				require.Equal(t, "", link, "Expected link to be empty on error")
  1463  
  1464  				// sharing file for the first time
  1465  				link1, err := doPublicLink(ctx, file1.Path)
  1466  				require.NoError(t, err)
  1467  				require.NotEqual(t, "", link1, "Link should not be empty")
  1468  
  1469  				link2, err := doPublicLink(ctx, file2.Path)
  1470  				require.NoError(t, err)
  1471  				require.NotEqual(t, "", link2, "Link should not be empty")
  1472  
  1473  				require.NotEqual(t, link1, link2, "Links to different files should differ")
  1474  
  1475  				// sharing file for the 2nd time
  1476  				link1, err = doPublicLink(ctx, file1.Path)
  1477  				require.NoError(t, err)
  1478  				require.NotEqual(t, "", link1, "Link should not be empty")
  1479  
  1480  				// sharing directory for the first time
  1481  				path := path.Dir(file2.Path)
  1482  				link3, err := doPublicLink(ctx, path)
  1483  				if err != nil && errors.Cause(err) == fs.ErrorCantShareDirectories {
  1484  					t.Log("skipping directory tests as not supported on this backend")
  1485  				} else {
  1486  					require.NoError(t, err)
  1487  					require.NotEqual(t, "", link3, "Link should not be empty")
  1488  
  1489  					// sharing directory for the second time
  1490  					link3, err = doPublicLink(ctx, path)
  1491  					require.NoError(t, err)
  1492  					require.NotEqual(t, "", link3, "Link should not be empty")
  1493  
  1494  					// sharing the "root" directory in a subremote
  1495  					subRemote, _, removeSubRemote, err := fstest.RandomRemote()
  1496  					require.NoError(t, err)
  1497  					defer removeSubRemote()
  1498  					// ensure sub remote isn't empty
  1499  					buf := bytes.NewBufferString("somecontent")
  1500  					obji := object.NewStaticObjectInfo("somefile", time.Now(), int64(buf.Len()), true, nil, nil)
  1501  					_, err = subRemote.Put(ctx, buf, obji)
  1502  					require.NoError(t, err)
  1503  
  1504  					link4, err := subRemote.Features().PublicLink(ctx, "")
  1505  					require.NoError(t, err, "Sharing root in a sub-remote should work")
  1506  					require.NotEqual(t, "", link4, "Link should not be empty")
  1507  				}
  1508  			})
  1509  
  1510  			// TestSetTier tests SetTier and GetTier functionality
  1511  			t.Run("SetTier", func(t *testing.T) {
  1512  				skipIfNotSetTier(t)
  1513  				obj := findObject(ctx, t, remote, file1.Path)
  1514  				setter, ok := obj.(fs.SetTierer)
  1515  				assert.NotNil(t, ok)
  1516  				getter, ok := obj.(fs.GetTierer)
  1517  				assert.NotNil(t, ok)
  1518  				// If interfaces are supported TiersToTest should contain
  1519  				// at least one entry
  1520  				supportedTiers := opt.TiersToTest
  1521  				assert.NotEmpty(t, supportedTiers)
  1522  				// test set tier changes on supported storage classes or tiers
  1523  				for _, tier := range supportedTiers {
  1524  					err := setter.SetTier(tier)
  1525  					assert.Nil(t, err)
  1526  					got := getter.GetTier()
  1527  					assert.Equal(t, tier, got)
  1528  				}
  1529  			})
  1530  
  1531  			// Check to see if Fs that wrap other Objects implement all the optional methods
  1532  			t.Run("ObjectCheckWrap", func(t *testing.T) {
  1533  				skipIfNotOk(t)
  1534  				if opt.SkipObjectCheckWrap {
  1535  					t.Skip("Skipping FsCheckWrap on this Fs")
  1536  				}
  1537  				ft := new(fs.Features).Fill(remote)
  1538  				if ft.UnWrap == nil {
  1539  					t.Skip("Not a wrapping Fs")
  1540  				}
  1541  				obj := findObject(ctx, t, remote, file1.Path)
  1542  				_, unsupported := fs.ObjectOptionalInterfaces(obj)
  1543  				for _, name := range unsupported {
  1544  					if !stringsContains(name, opt.UnimplementableObjectMethods) {
  1545  						t.Errorf("Missing Object wrapper for %s", name)
  1546  					}
  1547  				}
  1548  			})
  1549  
  1550  			// TestObjectRemove tests Remove
  1551  			t.Run("ObjectRemove", func(t *testing.T) {
  1552  				skipIfNotOk(t)
  1553  				// remove file1
  1554  				obj := findObject(ctx, t, remote, file1.Path)
  1555  				err := obj.Remove(ctx)
  1556  				require.NoError(t, err)
  1557  				// check listing without modtime as TestPublicLink may change the modtime
  1558  				fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file2}, nil, fs.ModTimeNotSupported)
  1559  			})
  1560  
  1561  			// TestAbout tests the About optional interface
  1562  			t.Run("ObjectAbout", func(t *testing.T) {
  1563  				skipIfNotOk(t)
  1564  
  1565  				// Check have About
  1566  				doAbout := remote.Features().About
  1567  				if doAbout == nil {
  1568  					t.Skip("FS does not support About")
  1569  				}
  1570  
  1571  				// Can't really check the output much!
  1572  				usage, err := doAbout(context.Background())
  1573  				require.NoError(t, err)
  1574  				require.NotNil(t, usage)
  1575  				assert.NotEqual(t, int64(0), usage.Total)
  1576  			})
  1577  
  1578  			// Just file2 remains for Purge to clean up
  1579  
  1580  			// TestFsPutStream tests uploading files when size isn't known in advance.
  1581  			// This may trigger large buffer allocation in some backends, keep it
  1582  			// close to the end of suite. (See fs/operations/xtra_operations_test.go)
  1583  			t.Run("FsPutStream", func(t *testing.T) {
  1584  				skipIfNotOk(t)
  1585  				if remote.Features().PutStream == nil {
  1586  					t.Skip("FS has no PutStream interface")
  1587  				}
  1588  
  1589  				for _, contentSize := range []int{0, 100} {
  1590  					t.Run(strconv.Itoa(contentSize), func(t *testing.T) {
  1591  						file := fstest.Item{
  1592  							ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
  1593  							Path:    "piped data.txt",
  1594  							Size:    -1, // use unknown size during upload
  1595  						}
  1596  
  1597  						var (
  1598  							err        error
  1599  							obj        fs.Object
  1600  							uploadHash *hash.MultiHasher
  1601  						)
  1602  						retry(t, "PutStream", func() error {
  1603  							contents := random.String(contentSize)
  1604  							buf := bytes.NewBufferString(contents)
  1605  							uploadHash = hash.NewMultiHasher()
  1606  							in := io.TeeReader(buf, uploadHash)
  1607  
  1608  							file.Size = -1
  1609  							obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
  1610  							obj, err = remote.Features().PutStream(ctx, in, obji)
  1611  							return err
  1612  						})
  1613  						file.Hashes = uploadHash.Sums()
  1614  						file.Size = int64(contentSize) // use correct size when checking
  1615  						file.Check(t, obj, remote.Precision())
  1616  						// Re-read the object and check again
  1617  						obj = findObject(ctx, t, remote, file.Path)
  1618  						file.Check(t, obj, remote.Precision())
  1619  						require.NoError(t, obj.Remove(ctx))
  1620  					})
  1621  				}
  1622  			})
  1623  
  1624  			// TestInternal calls InternalTest() on the Fs
  1625  			t.Run("Internal", func(t *testing.T) {
  1626  				skipIfNotOk(t)
  1627  				if it, ok := remote.(InternalTester); ok {
  1628  					it.InternalTest(t)
  1629  				} else {
  1630  					t.Skipf("%T does not implement InternalTester", remote)
  1631  				}
  1632  			})
  1633  
  1634  		})
  1635  
  1636  		// TestFsPutChunked may trigger large buffer allocation with
  1637  		// some backends (see fs/operations/xtra_operations_test.go),
  1638  		// keep it closer to the end of suite.
  1639  		t.Run("FsPutChunked", func(t *testing.T) {
  1640  			skipIfNotOk(t)
  1641  			if testing.Short() {
  1642  				t.Skip("not running with -short")
  1643  			}
  1644  
  1645  			setUploadChunkSizer, _ := remote.(SetUploadChunkSizer)
  1646  			if setUploadChunkSizer == nil {
  1647  				t.Skipf("%T does not implement SetUploadChunkSizer", remote)
  1648  			}
  1649  
  1650  			setUploadCutoffer, _ := remote.(SetUploadCutoffer)
  1651  
  1652  			minChunkSize := opt.ChunkedUpload.MinChunkSize
  1653  			if minChunkSize < 100 {
  1654  				minChunkSize = 100
  1655  			}
  1656  			if opt.ChunkedUpload.CeilChunkSize != nil {
  1657  				minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize)
  1658  			}
  1659  
  1660  			maxChunkSize := 2 * fs.MebiByte
  1661  			if maxChunkSize < 2*minChunkSize {
  1662  				maxChunkSize = 2 * minChunkSize
  1663  			}
  1664  			if opt.ChunkedUpload.MaxChunkSize > 0 && maxChunkSize > opt.ChunkedUpload.MaxChunkSize {
  1665  				maxChunkSize = opt.ChunkedUpload.MaxChunkSize
  1666  			}
  1667  			if opt.ChunkedUpload.CeilChunkSize != nil {
  1668  				maxChunkSize = opt.ChunkedUpload.CeilChunkSize(maxChunkSize)
  1669  			}
  1670  
  1671  			next := func(f func(fs.SizeSuffix) fs.SizeSuffix) fs.SizeSuffix {
  1672  				s := f(minChunkSize)
  1673  				if s > maxChunkSize {
  1674  					s = minChunkSize
  1675  				}
  1676  				return s
  1677  			}
  1678  
  1679  			chunkSizes := fs.SizeSuffixList{
  1680  				minChunkSize,
  1681  				minChunkSize + (maxChunkSize-minChunkSize)/3,
  1682  				next(NextPowerOfTwo),
  1683  				next(NextMultipleOf(100000)),
  1684  				next(NextMultipleOf(100001)),
  1685  				maxChunkSize,
  1686  			}
  1687  			chunkSizes.Sort()
  1688  
  1689  			// Set the minimum chunk size, upload cutoff and reset it at the end
  1690  			oldChunkSize, err := setUploadChunkSizer.SetUploadChunkSize(minChunkSize)
  1691  			require.NoError(t, err)
  1692  			var oldUploadCutoff fs.SizeSuffix
  1693  			if setUploadCutoffer != nil {
  1694  				oldUploadCutoff, err = setUploadCutoffer.SetUploadCutoff(minChunkSize)
  1695  				require.NoError(t, err)
  1696  			}
  1697  			defer func() {
  1698  				_, err := setUploadChunkSizer.SetUploadChunkSize(oldChunkSize)
  1699  				assert.NoError(t, err)
  1700  				if setUploadCutoffer != nil {
  1701  					_, err := setUploadCutoffer.SetUploadCutoff(oldUploadCutoff)
  1702  					assert.NoError(t, err)
  1703  				}
  1704  			}()
  1705  
  1706  			var lastCs fs.SizeSuffix
  1707  			for _, cs := range chunkSizes {
  1708  				if cs <= lastCs {
  1709  					continue
  1710  				}
  1711  				if opt.ChunkedUpload.CeilChunkSize != nil {
  1712  					cs = opt.ChunkedUpload.CeilChunkSize(cs)
  1713  				}
  1714  				lastCs = cs
  1715  
  1716  				t.Run(cs.String(), func(t *testing.T) {
  1717  					_, err := setUploadChunkSizer.SetUploadChunkSize(cs)
  1718  					require.NoError(t, err)
  1719  					if setUploadCutoffer != nil {
  1720  						_, err = setUploadCutoffer.SetUploadCutoff(cs)
  1721  						require.NoError(t, err)
  1722  					}
  1723  
  1724  					var testChunks []fs.SizeSuffix
  1725  					if opt.ChunkedUpload.NeedMultipleChunks {
  1726  						// If NeedMultipleChunks is set then test with > cs
  1727  						testChunks = []fs.SizeSuffix{cs + 1, 2 * cs, 2*cs + 1}
  1728  					} else {
  1729  						testChunks = []fs.SizeSuffix{cs - 1, cs, 2*cs + 1}
  1730  					}
  1731  
  1732  					for _, fileSize := range testChunks {
  1733  						t.Run(fmt.Sprintf("%d", fileSize), func(t *testing.T) {
  1734  							TestPutLarge(ctx, t, remote, &fstest.Item{
  1735  								ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
  1736  								Path:    fmt.Sprintf("chunked-%s-%s.bin", cs.String(), fileSize.String()),
  1737  								Size:    int64(fileSize),
  1738  							})
  1739  						})
  1740  					}
  1741  				})
  1742  			}
  1743  		})
  1744  
  1745  		// TestFsUploadUnknownSize ensures Fs.Put() and Object.Update() don't panic when
  1746  		// src.Size() == -1
  1747  		//
  1748  		// This may trigger large buffer allocation in some backends, keep it
  1749  		// closer to the suite end. (See fs/operations/xtra_operations_test.go)
  1750  		t.Run("FsUploadUnknownSize", func(t *testing.T) {
  1751  			skipIfNotOk(t)
  1752  
  1753  			t.Run("FsPutUnknownSize", func(t *testing.T) {
  1754  				defer func() {
  1755  					assert.Nil(t, recover(), "Fs.Put() should not panic when src.Size() == -1")
  1756  				}()
  1757  
  1758  				contents := random.String(100)
  1759  				in := bytes.NewBufferString(contents)
  1760  
  1761  				obji := object.NewStaticObjectInfo("unknown-size-put.txt", fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
  1762  				obj, err := remote.Put(ctx, in, obji)
  1763  				if err == nil {
  1764  					require.NoError(t, obj.Remove(ctx), "successfully uploaded unknown-sized file but failed to remove")
  1765  				}
  1766  				// if err != nil: it's okay as long as no panic
  1767  			})
  1768  
  1769  			t.Run("FsUpdateUnknownSize", func(t *testing.T) {
  1770  				unknownSizeUpdateFile := fstest.Item{
  1771  					ModTime: fstest.Time("2002-02-03T04:05:06.499999999Z"),
  1772  					Path:    "unknown-size-update.txt",
  1773  				}
  1774  
  1775  				testPut(ctx, t, remote, &unknownSizeUpdateFile)
  1776  
  1777  				defer func() {
  1778  					assert.Nil(t, recover(), "Object.Update() should not panic when src.Size() == -1")
  1779  				}()
  1780  
  1781  				newContents := random.String(200)
  1782  				in := bytes.NewBufferString(newContents)
  1783  
  1784  				obj := findObject(ctx, t, remote, unknownSizeUpdateFile.Path)
  1785  				obji := object.NewStaticObjectInfo(unknownSizeUpdateFile.Path, unknownSizeUpdateFile.ModTime, -1, true, nil, obj.Fs())
  1786  				err := obj.Update(ctx, in, obji)
  1787  				if err == nil {
  1788  					require.NoError(t, obj.Remove(ctx), "successfully updated object with unknown-sized source but failed to remove")
  1789  				}
  1790  				// if err != nil: it's okay as long as no panic
  1791  			})
  1792  
  1793  		})
  1794  
  1795  		// TestFsRootCollapse tests if the root of an fs "collapses" to the
  1796  		// absolute root. It creates a new fs of the same backend type with its
  1797  		// root set to a *non-existent* folder, and attempts to read the info of
  1798  		// an object in that folder, whose name is taken from a directory that
  1799  		// exists in the absolute root.
  1800  		// This test is added after
  1801  		// https://github.com/rclone/rclone/issues/3164.
  1802  		t.Run("FsRootCollapse", func(t *testing.T) {
  1803  			deepRemoteName := subRemoteName + "/deeper/nonexisting/directory"
  1804  			deepRemote, err := fs.NewFs(deepRemoteName)
  1805  			require.NoError(t, err)
  1806  
  1807  			colonIndex := strings.IndexRune(deepRemoteName, ':')
  1808  			firstSlashIndex := strings.IndexRune(deepRemoteName, '/')
  1809  			firstDir := deepRemoteName[colonIndex+1 : firstSlashIndex]
  1810  			_, err = deepRemote.NewObject(ctx, firstDir)
  1811  			require.Equal(t, fs.ErrorObjectNotFound, err)
  1812  			// If err is not fs.ErrorObjectNotFound, it means the backend is
  1813  			// somehow confused about root and absolute root.
  1814  		})
  1815  
  1816  		// Purge the folder
  1817  		err = operations.Purge(ctx, remote, "")
  1818  		if errors.Cause(err) != fs.ErrorDirNotFound {
  1819  			require.NoError(t, err)
  1820  		}
  1821  		purged = true
  1822  		fstest.CheckListing(t, remote, []fstest.Item{})
  1823  
  1824  		// Check purging again if not bucket based
  1825  		if !isBucketBasedButNotRoot(remote) {
  1826  			err = operations.Purge(ctx, remote, "")
  1827  			assert.Error(t, err, "Expecting error after on second purge")
  1828  		}
  1829  
  1830  	})
  1831  
  1832  	// Check directory is purged
  1833  	if !purged {
  1834  		_ = operations.Purge(ctx, remote, "")
  1835  	}
  1836  
  1837  	// Remove the local directory so we don't clutter up /tmp
  1838  	if strings.HasPrefix(remoteName, "/") {
  1839  		t.Log("remoteName", remoteName)
  1840  		// Remove temp directory
  1841  		err := os.Remove(remoteName)
  1842  		require.NoError(t, err)
  1843  	}
  1844  }