github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/fstest/fstests/fstests.go (about)

     1  // Package fstests provides generic integration tests for the Fs and
     2  // Object interfaces.
     3  //
     4  // These tests are concerned with the basic functionality of a
     5  // backend.  The tests in fs/sync and fs/operations tests more
     6  // cornercases that these tests don't.
     7  package fstests
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"io"
    14  	"io/ioutil"
    15  	"math/bits"
    16  	"os"
    17  	"path"
    18  	"path/filepath"
    19  	"reflect"
    20  	"sort"
    21  	"strconv"
    22  	"strings"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/pkg/errors"
    27  	"github.com/rclone/rclone/fs"
    28  	"github.com/rclone/rclone/fs/config"
    29  	"github.com/rclone/rclone/fs/fserrors"
    30  	"github.com/rclone/rclone/fs/fspath"
    31  	"github.com/rclone/rclone/fs/hash"
    32  	"github.com/rclone/rclone/fs/object"
    33  	"github.com/rclone/rclone/fs/operations"
    34  	"github.com/rclone/rclone/fs/walk"
    35  	"github.com/rclone/rclone/fstest"
    36  	"github.com/rclone/rclone/fstest/testserver"
    37  	"github.com/rclone/rclone/lib/encoder"
    38  	"github.com/rclone/rclone/lib/random"
    39  	"github.com/rclone/rclone/lib/readers"
    40  	"github.com/stretchr/testify/assert"
    41  	"github.com/stretchr/testify/require"
    42  )
    43  
    44  // InternalTester is an optional interface for Fs which allows to execute internal tests
    45  //
    46  // This interface should be implemented in 'backend'_internal_test.go and not in 'backend'.go
    47  type InternalTester interface {
    48  	InternalTest(*testing.T)
    49  }
    50  
    51  // ChunkedUploadConfig contains the values used by TestFsPutChunked
    52  // to determine the limits of chunked uploading
    53  type ChunkedUploadConfig struct {
    54  	// Minimum allowed chunk size
    55  	MinChunkSize fs.SizeSuffix
    56  	// Maximum allowed chunk size, 0 is no limit
    57  	MaxChunkSize fs.SizeSuffix
    58  	// Rounds the given chunk size up to the next valid value
    59  	// nil will disable rounding
    60  	// e.g. the next power of 2
    61  	CeilChunkSize func(fs.SizeSuffix) fs.SizeSuffix
    62  	// More than one chunk is required on upload
    63  	NeedMultipleChunks bool
    64  }
    65  
    66  // SetUploadChunkSizer is a test only interface to change the upload chunk size at runtime
    67  type SetUploadChunkSizer interface {
    68  	// Change the configured UploadChunkSize.
    69  	// Will only be called while no transfer is in progress.
    70  	SetUploadChunkSize(fs.SizeSuffix) (fs.SizeSuffix, error)
    71  }
    72  
    73  // SetUploadCutoffer is a test only interface to change the upload cutoff size at runtime
    74  type SetUploadCutoffer interface {
    75  	// Change the configured UploadCutoff.
    76  	// Will only be called while no transfer is in progress.
    77  	SetUploadCutoff(fs.SizeSuffix) (fs.SizeSuffix, error)
    78  }
    79  
    80  // NextPowerOfTwo returns the current or next bigger power of two.
    81  // All values less or equal 0 will return 0
    82  func NextPowerOfTwo(i fs.SizeSuffix) fs.SizeSuffix {
    83  	return 1 << uint(64-bits.LeadingZeros64(uint64(i)-1))
    84  }
    85  
    86  // NextMultipleOf returns a function that can be used as a CeilChunkSize function.
    87  // This function will return the next multiple of m that is equal or bigger than i.
    88  // All values less or equal 0 will return 0.
    89  func NextMultipleOf(m fs.SizeSuffix) func(fs.SizeSuffix) fs.SizeSuffix {
    90  	if m <= 0 {
    91  		panic(fmt.Sprintf("invalid multiplier %s", m))
    92  	}
    93  	return func(i fs.SizeSuffix) fs.SizeSuffix {
    94  		if i <= 0 {
    95  			return 0
    96  		}
    97  
    98  		return (((i - 1) / m) + 1) * m
    99  	}
   100  }
   101  
   102  // dirsToNames returns a sorted list of names
   103  func dirsToNames(dirs []fs.Directory) []string {
   104  	names := []string{}
   105  	for _, dir := range dirs {
   106  		names = append(names, fstest.Normalize(dir.Remote()))
   107  	}
   108  	sort.Strings(names)
   109  	return names
   110  }
   111  
   112  // objsToNames returns a sorted list of object names
   113  func objsToNames(objs []fs.Object) []string {
   114  	names := []string{}
   115  	for _, obj := range objs {
   116  		names = append(names, fstest.Normalize(obj.Remote()))
   117  	}
   118  	sort.Strings(names)
   119  	return names
   120  }
   121  
   122  // findObject finds the object on the remote
   123  func findObject(ctx context.Context, t *testing.T, f fs.Fs, Name string) fs.Object {
   124  	var obj fs.Object
   125  	var err error
   126  	sleepTime := 1 * time.Second
   127  	for i := 1; i <= *fstest.ListRetries; i++ {
   128  		obj, err = f.NewObject(ctx, Name)
   129  		if err == nil {
   130  			break
   131  		}
   132  		t.Logf("Sleeping for %v for findObject eventual consistency: %d/%d (%v)", sleepTime, i, *fstest.ListRetries, err)
   133  		time.Sleep(sleepTime)
   134  		sleepTime = (sleepTime * 3) / 2
   135  	}
   136  	require.NoError(t, err)
   137  	return obj
   138  }
   139  
   140  // retry f() until no retriable error
   141  func retry(t *testing.T, what string, f func() error) {
   142  	const maxTries = 10
   143  	var err error
   144  	for tries := 1; tries <= maxTries; tries++ {
   145  		err = f()
   146  		// exit if no error, or error is not retriable
   147  		if err == nil || !fserrors.IsRetryError(err) {
   148  			break
   149  		}
   150  		t.Logf("%s error: %v - low level retry %d/%d", what, err, tries, maxTries)
   151  		time.Sleep(2 * time.Second)
   152  	}
   153  	require.NoError(t, err, what)
   154  }
   155  
   156  // testPut puts file with random contents to the remote
   157  func testPut(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) (string, fs.Object) {
   158  	return PutTestContents(ctx, t, f, file, random.String(100), true)
   159  }
   160  
   161  // PutTestContents puts file with given contents to the remote and checks it but unlike TestPutLarge doesn't remove
   162  func PutTestContents(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, contents string, check bool) (string, fs.Object) {
   163  	var (
   164  		err        error
   165  		obj        fs.Object
   166  		uploadHash *hash.MultiHasher
   167  	)
   168  	retry(t, "Put", func() error {
   169  		buf := bytes.NewBufferString(contents)
   170  		uploadHash = hash.NewMultiHasher()
   171  		in := io.TeeReader(buf, uploadHash)
   172  
   173  		file.Size = int64(buf.Len())
   174  		obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
   175  		obj, err = f.Put(ctx, in, obji)
   176  		return err
   177  	})
   178  	file.Hashes = uploadHash.Sums()
   179  	if check {
   180  		file.Check(t, obj, f.Precision())
   181  		// Re-read the object and check again
   182  		obj = findObject(ctx, t, f, file.Path)
   183  		file.Check(t, obj, f.Precision())
   184  	}
   185  	return contents, obj
   186  }
   187  
   188  // TestPutLarge puts file to the remote, checks it and removes it on success.
   189  func TestPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) {
   190  	var (
   191  		err        error
   192  		obj        fs.Object
   193  		uploadHash *hash.MultiHasher
   194  	)
   195  	retry(t, "PutLarge", func() error {
   196  		r := readers.NewPatternReader(file.Size)
   197  		uploadHash = hash.NewMultiHasher()
   198  		in := io.TeeReader(r, uploadHash)
   199  
   200  		obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
   201  		obj, err = f.Put(ctx, in, obji)
   202  		if file.Size == 0 && err == fs.ErrorCantUploadEmptyFiles {
   203  			t.Skip("Can't upload zero length files")
   204  		}
   205  		return err
   206  	})
   207  	file.Hashes = uploadHash.Sums()
   208  	file.Check(t, obj, f.Precision())
   209  
   210  	// Re-read the object and check again
   211  	obj = findObject(ctx, t, f, file.Path)
   212  	file.Check(t, obj, f.Precision())
   213  
   214  	// Download the object and check it is OK
   215  	downloadHash := hash.NewMultiHasher()
   216  	download, err := obj.Open(ctx)
   217  	require.NoError(t, err)
   218  	n, err := io.Copy(downloadHash, download)
   219  	require.NoError(t, err)
   220  	assert.Equal(t, file.Size, n)
   221  	require.NoError(t, download.Close())
   222  	assert.Equal(t, file.Hashes, downloadHash.Sums())
   223  
   224  	// Remove the object
   225  	require.NoError(t, obj.Remove(ctx))
   226  }
   227  
   228  // read the contents of an object as a string
   229  func readObject(ctx context.Context, t *testing.T, obj fs.Object, limit int64, options ...fs.OpenOption) string {
   230  	what := fmt.Sprintf("readObject(%q) limit=%d, options=%+v", obj, limit, options)
   231  	in, err := obj.Open(ctx, options...)
   232  	require.NoError(t, err, what)
   233  	var r io.Reader = in
   234  	if limit >= 0 {
   235  		r = &io.LimitedReader{R: r, N: limit}
   236  	}
   237  	contents, err := ioutil.ReadAll(r)
   238  	require.NoError(t, err, what)
   239  	err = in.Close()
   240  	require.NoError(t, err, what)
   241  	return string(contents)
   242  }
   243  
   244  // ExtraConfigItem describes a config item for the tests
   245  type ExtraConfigItem struct{ Name, Key, Value string }
   246  
   247  // Opt is options for Run
   248  type Opt struct {
   249  	RemoteName                   string
   250  	NilObject                    fs.Object
   251  	ExtraConfig                  []ExtraConfigItem
   252  	SkipBadWindowsCharacters     bool     // skips unusable characters for windows if set
   253  	SkipFsMatch                  bool     // if set skip exact matching of Fs value
   254  	TiersToTest                  []string // List of tiers which can be tested in setTier test
   255  	ChunkedUpload                ChunkedUploadConfig
   256  	UnimplementableFsMethods     []string // List of methods which can't be implemented in this wrapping Fs
   257  	UnimplementableObjectMethods []string // List of methods which can't be implemented in this wrapping Fs
   258  	SkipFsCheckWrap              bool     // if set skip FsCheckWrap
   259  	SkipObjectCheckWrap          bool     // if set skip ObjectCheckWrap
   260  	SkipInvalidUTF8              bool     // if set skip invalid UTF-8 checks
   261  }
   262  
   263  // returns true if x is found in ss
   264  func stringsContains(x string, ss []string) bool {
   265  	for _, s := range ss {
   266  		if x == s {
   267  			return true
   268  		}
   269  	}
   270  	return false
   271  }
   272  
   273  // Run runs the basic integration tests for a remote using the options passed in.
   274  //
   275  // They are structured in a hierarchical way so that dependencies for the tests can be created.
   276  //
   277  // For example some tests require the directory to be created - these
   278  // are inside the "FsMkdir" test.  Some tests require some tests files
   279  // - these are inside the "FsPutFiles" test.
   280  func Run(t *testing.T, opt *Opt) {
   281  	var (
   282  		remote        fs.Fs
   283  		remoteName    = opt.RemoteName
   284  		subRemoteName string
   285  		subRemoteLeaf string
   286  		file1         = fstest.Item{
   287  			ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
   288  			Path:    "file name.txt",
   289  		}
   290  		file1Contents string
   291  		file2         = fstest.Item{
   292  			ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
   293  			Path:    `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`,
   294  		}
   295  		isLocalRemote        bool
   296  		purged               bool // whether the dir has been purged or not
   297  		ctx                  = context.Background()
   298  		unwrappableFsMethods = []string{"Command"} // these Fs methods don't need to be wrapped ever
   299  	)
   300  
   301  	if strings.HasSuffix(os.Getenv("RCLONE_CONFIG"), "/notfound") && *fstest.RemoteName == "" {
   302  		t.Skip("quicktest only")
   303  	}
   304  
   305  	// Skip the test if the remote isn't configured
   306  	skipIfNotOk := func(t *testing.T) {
   307  		if remote == nil {
   308  			t.Skipf("WARN: %q not configured", remoteName)
   309  		}
   310  	}
   311  
   312  	// Skip if remote is not ListR capable, otherwise set the useListR
   313  	// flag, returning a function to restore its value
   314  	skipIfNotListR := func(t *testing.T) func() {
   315  		skipIfNotOk(t)
   316  		if remote.Features().ListR == nil {
   317  			t.Skip("FS has no ListR interface")
   318  		}
   319  		previous := fs.Config.UseListR
   320  		fs.Config.UseListR = true
   321  		return func() {
   322  			fs.Config.UseListR = previous
   323  		}
   324  	}
   325  
   326  	// Skip if remote is not SetTier and GetTier capable
   327  	skipIfNotSetTier := func(t *testing.T) {
   328  		skipIfNotOk(t)
   329  		if remote.Features().SetTier == false ||
   330  			remote.Features().GetTier == false {
   331  			t.Skip("FS has no SetTier & GetTier interfaces")
   332  		}
   333  	}
   334  
   335  	// Return true if f (or any of the things it wraps) is bucket
   336  	// based but not at the root.
   337  	isBucketBasedButNotRoot := func(f fs.Fs) bool {
   338  		return fs.UnWrapFs(f).Features().BucketBased && strings.Contains(strings.Trim(f.Root(), "/"), "/")
   339  	}
   340  
   341  	// Initialise the remote
   342  	fstest.Initialise()
   343  
   344  	// Set extra config if supplied
   345  	for _, item := range opt.ExtraConfig {
   346  		config.FileSet(item.Name, item.Key, item.Value)
   347  	}
   348  	if *fstest.RemoteName != "" {
   349  		remoteName = *fstest.RemoteName
   350  	}
   351  	oldFstestRemoteName := fstest.RemoteName
   352  	fstest.RemoteName = &remoteName
   353  	defer func() {
   354  		fstest.RemoteName = oldFstestRemoteName
   355  	}()
   356  	t.Logf("Using remote %q", remoteName)
   357  	var err error
   358  	if remoteName == "" {
   359  		remoteName, err = fstest.LocalRemote()
   360  		require.NoError(t, err)
   361  		isLocalRemote = true
   362  	}
   363  
   364  	// Start any test servers if required
   365  	finish, err := testserver.Start(remoteName)
   366  	require.NoError(t, err)
   367  	defer finish()
   368  
   369  	// Make the Fs we are testing with, initialising the local variables
   370  	// subRemoteName - name of the remote after the TestRemote:
   371  	// subRemoteLeaf - a subdirectory to use under that
   372  	// remote - the result of  fs.NewFs(TestRemote:subRemoteName)
   373  	subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName)
   374  	require.NoError(t, err)
   375  	remote, err = fs.NewFs(subRemoteName)
   376  	if err == fs.ErrorNotFoundInConfigFile {
   377  		t.Logf("Didn't find %q in config file - skipping tests", remoteName)
   378  		return
   379  	}
   380  	require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
   381  
   382  	// Skip the rest if it failed
   383  	skipIfNotOk(t)
   384  
   385  	// Check to see if Fs that wrap other Fs implement all the optional methods
   386  	t.Run("FsCheckWrap", func(t *testing.T) {
   387  		skipIfNotOk(t)
   388  		if opt.SkipFsCheckWrap {
   389  			t.Skip("Skipping FsCheckWrap on this Fs")
   390  		}
   391  		ft := new(fs.Features).Fill(remote)
   392  		if ft.UnWrap == nil {
   393  			t.Skip("Not a wrapping Fs")
   394  		}
   395  		v := reflect.ValueOf(ft).Elem()
   396  		vType := v.Type()
   397  		for i := 0; i < v.NumField(); i++ {
   398  			vName := vType.Field(i).Name
   399  			if stringsContains(vName, opt.UnimplementableFsMethods) {
   400  				continue
   401  			}
   402  			if stringsContains(vName, unwrappableFsMethods) {
   403  				continue
   404  			}
   405  			field := v.Field(i)
   406  			// skip the bools
   407  			if field.Type().Kind() == reflect.Bool {
   408  				continue
   409  			}
   410  			if field.IsNil() {
   411  				t.Errorf("Missing Fs wrapper for %s", vName)
   412  			}
   413  		}
   414  	})
   415  
   416  	// Check to see if Fs advertises commands and they work and have docs
   417  	t.Run("FsCommand", func(t *testing.T) {
   418  		skipIfNotOk(t)
   419  		doCommand := remote.Features().Command
   420  		if doCommand == nil {
   421  			t.Skip("No commands in this remote")
   422  		}
   423  		// Check the correct error is generated
   424  		_, err := doCommand(context.Background(), "NOTFOUND", nil, nil)
   425  		assert.Equal(t, fs.ErrorCommandNotFound, err, "Incorrect error generated on command not found")
   426  		// Check there are some commands in the fsInfo
   427  		fsInfo, _, _, _, err := fs.ConfigFs(remoteName)
   428  		require.NoError(t, err)
   429  		assert.True(t, len(fsInfo.CommandHelp) > 0, "Command is declared, must return some help in CommandHelp")
   430  	})
   431  
   432  	// TestFsRmdirNotFound tests deleting a non existent directory
   433  	t.Run("FsRmdirNotFound", func(t *testing.T) {
   434  		skipIfNotOk(t)
   435  		if isBucketBasedButNotRoot(remote) {
   436  			t.Skip("Skipping test as non root bucket based remote")
   437  		}
   438  		err := remote.Rmdir(ctx, "")
   439  		assert.Error(t, err, "Expecting error on Rmdir non existent")
   440  	})
   441  
   442  	// Make the directory
   443  	err = remote.Mkdir(ctx, "")
   444  	require.NoError(t, err)
   445  	fstest.CheckListing(t, remote, []fstest.Item{})
   446  
   447  	// TestFsString tests the String method
   448  	t.Run("FsString", func(t *testing.T) {
   449  		skipIfNotOk(t)
   450  		str := remote.String()
   451  		require.NotEqual(t, "", str)
   452  	})
   453  
   454  	// TestFsName tests the Name method
   455  	t.Run("FsName", func(t *testing.T) {
   456  		skipIfNotOk(t)
   457  		got := remote.Name()
   458  		want := remoteName[:strings.LastIndex(remoteName, ":")+1]
   459  		if isLocalRemote {
   460  			want = "local:"
   461  		}
   462  		require.Equal(t, want, got+":")
   463  	})
   464  
   465  	// TestFsRoot tests the Root method
   466  	t.Run("FsRoot", func(t *testing.T) {
   467  		skipIfNotOk(t)
   468  		name := remote.Name() + ":"
   469  		root := remote.Root()
   470  		if isLocalRemote {
   471  			// only check last path element on local
   472  			require.Equal(t, filepath.Base(subRemoteName), filepath.Base(root))
   473  		} else {
   474  			require.Equal(t, subRemoteName, name+root)
   475  		}
   476  	})
   477  
   478  	// TestFsRmdirEmpty tests deleting an empty directory
   479  	t.Run("FsRmdirEmpty", func(t *testing.T) {
   480  		skipIfNotOk(t)
   481  		err := remote.Rmdir(ctx, "")
   482  		require.NoError(t, err)
   483  	})
   484  
   485  	// TestFsMkdir tests making a directory
   486  	//
   487  	// Tests that require the directory to be made are within this
   488  	t.Run("FsMkdir", func(t *testing.T) {
   489  		skipIfNotOk(t)
   490  
   491  		err := remote.Mkdir(ctx, "")
   492  		require.NoError(t, err)
   493  		fstest.CheckListing(t, remote, []fstest.Item{})
   494  
   495  		err = remote.Mkdir(ctx, "")
   496  		require.NoError(t, err)
   497  
   498  		// TestFsMkdirRmdirSubdir tests making and removing a sub directory
   499  		t.Run("FsMkdirRmdirSubdir", func(t *testing.T) {
   500  			skipIfNotOk(t)
   501  			dir := "dir/subdir"
   502  			err := operations.Mkdir(ctx, remote, dir)
   503  			require.NoError(t, err)
   504  			fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir", "dir/subdir"}, fs.GetModifyWindow(remote))
   505  
   506  			err = operations.Rmdir(ctx, remote, dir)
   507  			require.NoError(t, err)
   508  			fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir"}, fs.GetModifyWindow(remote))
   509  
   510  			err = operations.Rmdir(ctx, remote, "dir")
   511  			require.NoError(t, err)
   512  			fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.GetModifyWindow(remote))
   513  		})
   514  
   515  		// TestFsListEmpty tests listing an empty directory
   516  		t.Run("FsListEmpty", func(t *testing.T) {
   517  			skipIfNotOk(t)
   518  			fstest.CheckListing(t, remote, []fstest.Item{})
   519  		})
   520  
   521  		// TestFsListDirEmpty tests listing the directories from an empty directory
   522  		TestFsListDirEmpty := func(t *testing.T) {
   523  			skipIfNotOk(t)
   524  			objs, dirs, err := walk.GetAll(ctx, remote, "", true, 1)
   525  			if !remote.Features().CanHaveEmptyDirectories {
   526  				if err != fs.ErrorDirNotFound {
   527  					require.NoError(t, err)
   528  				}
   529  			} else {
   530  				require.NoError(t, err)
   531  			}
   532  			assert.Equal(t, []string{}, objsToNames(objs))
   533  			assert.Equal(t, []string{}, dirsToNames(dirs))
   534  		}
   535  		t.Run("FsListDirEmpty", TestFsListDirEmpty)
   536  
   537  		// TestFsListRDirEmpty tests listing the directories from an empty directory using ListR
   538  		t.Run("FsListRDirEmpty", func(t *testing.T) {
   539  			defer skipIfNotListR(t)()
   540  			TestFsListDirEmpty(t)
   541  		})
   542  
   543  		// TestFsListDirNotFound tests listing the directories from an empty directory
   544  		TestFsListDirNotFound := func(t *testing.T) {
   545  			skipIfNotOk(t)
   546  			objs, dirs, err := walk.GetAll(ctx, remote, "does not exist", true, 1)
   547  			if !remote.Features().CanHaveEmptyDirectories {
   548  				if err != fs.ErrorDirNotFound {
   549  					assert.NoError(t, err)
   550  					assert.Equal(t, 0, len(objs)+len(dirs))
   551  				}
   552  			} else {
   553  				assert.Equal(t, fs.ErrorDirNotFound, err)
   554  			}
   555  		}
   556  		t.Run("FsListDirNotFound", TestFsListDirNotFound)
   557  
   558  		// TestFsListRDirNotFound tests listing the directories from an empty directory using ListR
   559  		t.Run("FsListRDirNotFound", func(t *testing.T) {
   560  			defer skipIfNotListR(t)()
   561  			TestFsListDirNotFound(t)
   562  		})
   563  
   564  		// FsEncoding tests that file name encodings are
   565  		// working by uploading a series of unusual files
   566  		// Must be run in an empty directory
   567  		t.Run("FsEncoding", func(t *testing.T) {
   568  			skipIfNotOk(t)
   569  
   570  			// check no files or dirs as pre-requisite
   571  			fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.GetModifyWindow(remote))
   572  
   573  			for _, test := range []struct {
   574  				name string
   575  				path string
   576  			}{
   577  				// See lib/encoder/encoder.go for list of things that go here
   578  				{"control chars", "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\x7F"},
   579  				{"dot", "."},
   580  				{"dot dot", ".."},
   581  				{"punctuation", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"},
   582  				{"leading space", " leading space"},
   583  				{"leading tilde", "~leading tilde"},
   584  				{"leading CR", "\rleading CR"},
   585  				{"leading LF", "\nleading LF"},
   586  				{"leading HT", "\tleading HT"},
   587  				{"leading VT", "\vleading VT"},
   588  				{"leading dot", ".leading dot"},
   589  				{"trailing space", "trailing space "},
   590  				{"trailing CR", "trailing CR\r"},
   591  				{"trailing LF", "trailing LF\n"},
   592  				{"trailing HT", "trailing HT\t"},
   593  				{"trailing VT", "trailing VT\v"},
   594  				{"trailing dot", "trailing dot."},
   595  				{"invalid UTF-8", "invalid utf-8\xfe"},
   596  			} {
   597  				t.Run(test.name, func(t *testing.T) {
   598  					if opt.SkipInvalidUTF8 && test.name == "invalid UTF-8" {
   599  						t.Skip("Skipping " + test.name)
   600  					}
   601  					// turn raw strings into Standard encoding
   602  					fileName := encoder.Standard.Encode(test.path)
   603  					dirName := fileName
   604  					t.Logf("testing %q", fileName)
   605  					assert.NoError(t, remote.Mkdir(ctx, dirName))
   606  					file := fstest.Item{
   607  						ModTime: time.Now(),
   608  						Path:    dirName + "/" + fileName, // test creating a file and dir with that name
   609  					}
   610  					_, o := testPut(context.Background(), t, remote, &file)
   611  					fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file}, []string{dirName}, fs.GetModifyWindow(remote))
   612  					assert.NoError(t, o.Remove(ctx))
   613  					assert.NoError(t, remote.Rmdir(ctx, dirName))
   614  					fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.GetModifyWindow(remote))
   615  				})
   616  			}
   617  		})
   618  
   619  		// TestFsNewObjectNotFound tests not finding an object
   620  		t.Run("FsNewObjectNotFound", func(t *testing.T) {
   621  			skipIfNotOk(t)
   622  			// Object in an existing directory
   623  			o, err := remote.NewObject(ctx, "potato")
   624  			assert.Nil(t, o)
   625  			assert.Equal(t, fs.ErrorObjectNotFound, err)
   626  			// Now try an object in a non existing directory
   627  			o, err = remote.NewObject(ctx, "directory/not/found/potato")
   628  			assert.Nil(t, o)
   629  			assert.Equal(t, fs.ErrorObjectNotFound, err)
   630  		})
   631  
   632  		// TestFsPutError tests uploading a file where there is an error
   633  		//
   634  		// It makes sure that aborting a file half way through does not create
   635  		// a file on the remote.
   636  		//
   637  		// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutError)$'
   638  		t.Run("FsPutError", func(t *testing.T) {
   639  			skipIfNotOk(t)
   640  
   641  			var N int64 = 5 * 1024
   642  			if *fstest.SizeLimit > 0 && N > *fstest.SizeLimit {
   643  				N = *fstest.SizeLimit
   644  				t.Logf("Reduce file size due to limit %d", N)
   645  			}
   646  
   647  			// Read N bytes then produce an error
   648  			contents := random.String(int(N))
   649  			buf := bytes.NewBufferString(contents)
   650  			er := &readers.ErrorReader{Err: errors.New("potato")}
   651  			in := io.MultiReader(buf, er)
   652  
   653  			obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 2*N, true, nil, nil)
   654  			_, err := remote.Put(ctx, in, obji)
   655  			// assert.Nil(t, obj) - FIXME some remotes return the object even on nil
   656  			assert.NotNil(t, err)
   657  
   658  			obj, err := remote.NewObject(ctx, file2.Path)
   659  			assert.Nil(t, obj)
   660  			assert.Equal(t, fs.ErrorObjectNotFound, err)
   661  		})
   662  
   663  		t.Run("FsPutZeroLength", func(t *testing.T) {
   664  			skipIfNotOk(t)
   665  
   666  			TestPutLarge(ctx, t, remote, &fstest.Item{
   667  				ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
   668  				Path:    fmt.Sprintf("zero-length-file"),
   669  				Size:    int64(0),
   670  			})
   671  		})
   672  
   673  		t.Run("FsOpenWriterAt", func(t *testing.T) {
   674  			skipIfNotOk(t)
   675  			openWriterAt := remote.Features().OpenWriterAt
   676  			if openWriterAt == nil {
   677  				t.Skip("FS has no OpenWriterAt interface")
   678  			}
   679  			path := "writer-at-subdir/writer-at-file"
   680  			out, err := openWriterAt(ctx, path, -1)
   681  			require.NoError(t, err)
   682  
   683  			var n int
   684  			n, err = out.WriteAt([]byte("def"), 3)
   685  			assert.NoError(t, err)
   686  			assert.Equal(t, 3, n)
   687  			n, err = out.WriteAt([]byte("ghi"), 6)
   688  			assert.NoError(t, err)
   689  			assert.Equal(t, 3, n)
   690  			n, err = out.WriteAt([]byte("abc"), 0)
   691  			assert.NoError(t, err)
   692  			assert.Equal(t, 3, n)
   693  
   694  			assert.NoError(t, out.Close())
   695  
   696  			obj := findObject(ctx, t, remote, path)
   697  			assert.Equal(t, "abcdefghi", readObject(ctx, t, obj, -1), "contents of file differ")
   698  
   699  			assert.NoError(t, obj.Remove(ctx))
   700  			assert.NoError(t, remote.Rmdir(ctx, "writer-at-subdir"))
   701  		})
   702  
   703  		// TestFsChangeNotify tests that changes are properly
   704  		// propagated
   705  		//
   706  		// go test -v -remote TestDrive: -run '^Test(Setup|Init|FsChangeNotify)$' -verbose
   707  		t.Run("FsChangeNotify", func(t *testing.T) {
   708  			skipIfNotOk(t)
   709  
   710  			// Check have ChangeNotify
   711  			doChangeNotify := remote.Features().ChangeNotify
   712  			if doChangeNotify == nil {
   713  				t.Skip("FS has no ChangeNotify interface")
   714  			}
   715  
   716  			err := operations.Mkdir(ctx, remote, "dir")
   717  			require.NoError(t, err)
   718  
   719  			pollInterval := make(chan time.Duration)
   720  			dirChanges := map[string]struct{}{}
   721  			objChanges := map[string]struct{}{}
   722  			doChangeNotify(ctx, func(x string, e fs.EntryType) {
   723  				fs.Debugf(nil, "doChangeNotify(%q, %+v)", x, e)
   724  				if strings.HasPrefix(x, file1.Path[:5]) || strings.HasPrefix(x, file2.Path[:5]) {
   725  					fs.Debugf(nil, "Ignoring notify for file1 or file2: %q, %v", x, e)
   726  					return
   727  				}
   728  				if e == fs.EntryDirectory {
   729  					dirChanges[x] = struct{}{}
   730  				} else if e == fs.EntryObject {
   731  					objChanges[x] = struct{}{}
   732  				}
   733  			}, pollInterval)
   734  			defer func() { close(pollInterval) }()
   735  			pollInterval <- time.Second
   736  
   737  			var dirs []string
   738  			for _, idx := range []int{1, 3, 2} {
   739  				dir := fmt.Sprintf("dir/subdir%d", idx)
   740  				err = operations.Mkdir(ctx, remote, dir)
   741  				require.NoError(t, err)
   742  				dirs = append(dirs, dir)
   743  			}
   744  
   745  			var objs []fs.Object
   746  			for _, idx := range []int{2, 4, 3} {
   747  				file := fstest.Item{
   748  					ModTime: time.Now(),
   749  					Path:    fmt.Sprintf("dir/file%d", idx),
   750  				}
   751  				_, o := testPut(ctx, t, remote, &file)
   752  				objs = append(objs, o)
   753  			}
   754  
   755  			// Looks for each item in wants in changes -
   756  			// if they are all found it returns true
   757  			contains := func(changes map[string]struct{}, wants []string) bool {
   758  				for _, want := range wants {
   759  					_, ok := changes[want]
   760  					if !ok {
   761  						return false
   762  					}
   763  				}
   764  				return true
   765  			}
   766  
   767  			// Wait a little while for the changes to come in
   768  			wantDirChanges := []string{"dir/subdir1", "dir/subdir3", "dir/subdir2"}
   769  			wantObjChanges := []string{"dir/file2", "dir/file4", "dir/file3"}
   770  			ok := false
   771  			for tries := 1; tries < 10; tries++ {
   772  				ok = contains(dirChanges, wantDirChanges) && contains(objChanges, wantObjChanges)
   773  				if ok {
   774  					break
   775  				}
   776  				t.Logf("Try %d/10 waiting for dirChanges and objChanges", tries)
   777  				time.Sleep(3 * time.Second)
   778  			}
   779  			if !ok {
   780  				t.Errorf("%+v does not contain %+v or \n%+v does not contain %+v", dirChanges, wantDirChanges, objChanges, wantObjChanges)
   781  			}
   782  
   783  			// tidy up afterwards
   784  			for _, o := range objs {
   785  				assert.NoError(t, o.Remove(ctx))
   786  			}
   787  			dirs = append(dirs, "dir")
   788  			for _, dir := range dirs {
   789  				assert.NoError(t, remote.Rmdir(ctx, dir))
   790  			}
   791  		})
   792  
   793  		// TestFsPut files writes file1, file2 and tests an update
   794  		//
   795  		// Tests that require file1, file2 are within this
   796  		t.Run("FsPutFiles", func(t *testing.T) {
   797  			skipIfNotOk(t)
   798  			file1Contents, _ = testPut(ctx, t, remote, &file1)
   799  			/* file2Contents = */ testPut(ctx, t, remote, &file2)
   800  			file1Contents, _ = testPut(ctx, t, remote, &file1)
   801  			// Note that the next test will check there are no duplicated file names
   802  
   803  			// TestFsListDirFile2 tests the files are correctly uploaded by doing
   804  			// Depth 1 directory listings
   805  			TestFsListDirFile2 := func(t *testing.T) {
   806  				skipIfNotOk(t)
   807  				list := func(dir string, expectedDirNames, expectedObjNames []string) {
   808  					var objNames, dirNames []string
   809  					for i := 1; i <= *fstest.ListRetries; i++ {
   810  						objs, dirs, err := walk.GetAll(ctx, remote, dir, true, 1)
   811  						if errors.Cause(err) == fs.ErrorDirNotFound {
   812  							objs, dirs, err = walk.GetAll(ctx, remote, dir, true, 1)
   813  						}
   814  						require.NoError(t, err)
   815  						objNames = objsToNames(objs)
   816  						dirNames = dirsToNames(dirs)
   817  						if len(objNames) >= len(expectedObjNames) && len(dirNames) >= len(expectedDirNames) {
   818  							break
   819  						}
   820  						t.Logf("Sleeping for 1 second for TestFsListDirFile2 eventual consistency: %d/%d", i, *fstest.ListRetries)
   821  						time.Sleep(1 * time.Second)
   822  					}
   823  					assert.Equal(t, expectedDirNames, dirNames)
   824  					assert.Equal(t, expectedObjNames, objNames)
   825  				}
   826  				dir := file2.Path
   827  				deepest := true
   828  				for dir != "" {
   829  					expectedObjNames := []string{}
   830  					expectedDirNames := []string{}
   831  					child := dir
   832  					dir = path.Dir(dir)
   833  					if dir == "." {
   834  						dir = ""
   835  						expectedObjNames = append(expectedObjNames, file1.Path)
   836  					}
   837  					if deepest {
   838  						expectedObjNames = append(expectedObjNames, file2.Path)
   839  						deepest = false
   840  					} else {
   841  						expectedDirNames = append(expectedDirNames, child)
   842  					}
   843  					list(dir, expectedDirNames, expectedObjNames)
   844  				}
   845  			}
   846  			t.Run("FsListDirFile2", TestFsListDirFile2)
   847  
   848  			// TestFsListRDirFile2 tests the files are correctly uploaded by doing
   849  			// Depth 1 directory listings using ListR
   850  			t.Run("FsListRDirFile2", func(t *testing.T) {
   851  				defer skipIfNotListR(t)()
   852  				TestFsListDirFile2(t)
   853  			})
   854  
   855  			// Test the files are all there with walk.ListR recursive listings
   856  			t.Run("FsListR", func(t *testing.T) {
   857  				skipIfNotOk(t)
   858  				objs, dirs, err := walk.GetAll(ctx, remote, "", true, -1)
   859  				require.NoError(t, err)
   860  				assert.Equal(t, []string{
   861  					"hello? sausage",
   862  					"hello? sausage/êé",
   863  					"hello? sausage/êé/Hello, 世界",
   864  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
   865  				}, dirsToNames(dirs))
   866  				assert.Equal(t, []string{
   867  					"file name.txt",
   868  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt",
   869  				}, objsToNames(objs))
   870  			})
   871  
   872  			// Test the files are all there with
   873  			// walk.ListR recursive listings on a sub dir
   874  			t.Run("FsListRSubdir", func(t *testing.T) {
   875  				skipIfNotOk(t)
   876  				objs, dirs, err := walk.GetAll(ctx, remote, path.Dir(path.Dir(path.Dir(path.Dir(file2.Path)))), true, -1)
   877  				require.NoError(t, err)
   878  				assert.Equal(t, []string{
   879  					"hello? sausage/êé",
   880  					"hello? sausage/êé/Hello, 世界",
   881  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
   882  				}, dirsToNames(dirs))
   883  				assert.Equal(t, []string{
   884  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt",
   885  				}, objsToNames(objs))
   886  			})
   887  
   888  			// TestFsListDirRoot tests that DirList works in the root
   889  			TestFsListDirRoot := func(t *testing.T) {
   890  				skipIfNotOk(t)
   891  				rootRemote, err := fs.NewFs(remoteName)
   892  				require.NoError(t, err)
   893  				_, dirs, err := walk.GetAll(ctx, rootRemote, "", true, 1)
   894  				require.NoError(t, err)
   895  				assert.Contains(t, dirsToNames(dirs), subRemoteLeaf, "Remote leaf not found")
   896  			}
   897  			t.Run("FsListDirRoot", TestFsListDirRoot)
   898  
   899  			// TestFsListRDirRoot tests that DirList works in the root using ListR
   900  			t.Run("FsListRDirRoot", func(t *testing.T) {
   901  				defer skipIfNotListR(t)()
   902  				TestFsListDirRoot(t)
   903  			})
   904  
   905  			// TestFsListSubdir tests List works for a subdirectory
   906  			TestFsListSubdir := func(t *testing.T) {
   907  				skipIfNotOk(t)
   908  				fileName := file2.Path
   909  				var err error
   910  				var objs []fs.Object
   911  				var dirs []fs.Directory
   912  				for i := 0; i < 2; i++ {
   913  					dir, _ := path.Split(fileName)
   914  					dir = dir[:len(dir)-1]
   915  					objs, dirs, err = walk.GetAll(ctx, remote, dir, true, -1)
   916  				}
   917  				require.NoError(t, err)
   918  				require.Len(t, objs, 1)
   919  				assert.Equal(t, fileName, objs[0].Remote())
   920  				require.Len(t, dirs, 0)
   921  			}
   922  			t.Run("FsListSubdir", TestFsListSubdir)
   923  
   924  			// TestFsListRSubdir tests List works for a subdirectory using ListR
   925  			t.Run("FsListRSubdir", func(t *testing.T) {
   926  				defer skipIfNotListR(t)()
   927  				TestFsListSubdir(t)
   928  			})
   929  
   930  			// TestFsListLevel2 tests List works for 2 levels
   931  			TestFsListLevel2 := func(t *testing.T) {
   932  				skipIfNotOk(t)
   933  				objs, dirs, err := walk.GetAll(ctx, remote, "", true, 2)
   934  				if err == fs.ErrorLevelNotSupported {
   935  					return
   936  				}
   937  				require.NoError(t, err)
   938  				assert.Equal(t, []string{file1.Path}, objsToNames(objs))
   939  				assert.Equal(t, []string{"hello? sausage", "hello? sausage/êé"}, dirsToNames(dirs))
   940  			}
   941  			t.Run("FsListLevel2", TestFsListLevel2)
   942  
   943  			// TestFsListRLevel2 tests List works for 2 levels using ListR
   944  			t.Run("FsListRLevel2", func(t *testing.T) {
   945  				defer skipIfNotListR(t)()
   946  				TestFsListLevel2(t)
   947  			})
   948  
   949  			// TestFsListFile1 tests file present
   950  			t.Run("FsListFile1", func(t *testing.T) {
   951  				skipIfNotOk(t)
   952  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
   953  			})
   954  
   955  			// TestFsNewObject tests NewObject
   956  			t.Run("FsNewObject", func(t *testing.T) {
   957  				skipIfNotOk(t)
   958  				obj := findObject(ctx, t, remote, file1.Path)
   959  				file1.Check(t, obj, remote.Precision())
   960  			})
   961  
   962  			// TestFsListFile1and2 tests two files present
   963  			t.Run("FsListFile1and2", func(t *testing.T) {
   964  				skipIfNotOk(t)
   965  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
   966  			})
   967  
   968  			// TestFsNewObjectDir tests NewObject on a directory which should produce an error
   969  			t.Run("FsNewObjectDir", func(t *testing.T) {
   970  				skipIfNotOk(t)
   971  				dir := path.Dir(file2.Path)
   972  				obj, err := remote.NewObject(ctx, dir)
   973  				assert.Nil(t, obj)
   974  				assert.NotNil(t, err)
   975  			})
   976  
   977  			// TestFsCopy tests Copy
   978  			t.Run("FsCopy", func(t *testing.T) {
   979  				skipIfNotOk(t)
   980  
   981  				// Check have Copy
   982  				doCopy := remote.Features().Copy
   983  				if doCopy == nil {
   984  					t.Skip("FS has no Copier interface")
   985  				}
   986  
   987  				// Test with file2 so have + and ' ' in file name
   988  				var file2Copy = file2
   989  				file2Copy.Path += "-copy"
   990  
   991  				// do the copy
   992  				src := findObject(ctx, t, remote, file2.Path)
   993  				dst, err := doCopy(ctx, src, file2Copy.Path)
   994  				if err == fs.ErrorCantCopy {
   995  					t.Skip("FS can't copy")
   996  				}
   997  				require.NoError(t, err, fmt.Sprintf("Error: %#v", err))
   998  
   999  				// check file exists in new listing
  1000  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2, file2Copy})
  1001  
  1002  				// Check dst lightly - list above has checked ModTime/Hashes
  1003  				assert.Equal(t, file2Copy.Path, dst.Remote())
  1004  
  1005  				// Delete copy
  1006  				err = dst.Remove(ctx)
  1007  				require.NoError(t, err)
  1008  
  1009  			})
  1010  
  1011  			// TestFsMove tests Move
  1012  			t.Run("FsMove", func(t *testing.T) {
  1013  				skipIfNotOk(t)
  1014  
  1015  				// Check have Move
  1016  				doMove := remote.Features().Move
  1017  				if doMove == nil {
  1018  					t.Skip("FS has no Mover interface")
  1019  				}
  1020  
  1021  				// state of files now:
  1022  				// 1: file name.txt
  1023  				// 2: hello sausage?/../z.txt
  1024  
  1025  				var file1Move = file1
  1026  				var file2Move = file2
  1027  
  1028  				// check happy path, i.e. no naming conflicts when rename and move are two
  1029  				// separate operations
  1030  				file2Move.Path = "other.txt"
  1031  				src := findObject(ctx, t, remote, file2.Path)
  1032  				dst, err := doMove(ctx, src, file2Move.Path)
  1033  				if err == fs.ErrorCantMove {
  1034  					t.Skip("FS can't move")
  1035  				}
  1036  				require.NoError(t, err)
  1037  				// check file exists in new listing
  1038  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2Move})
  1039  				// Check dst lightly - list above has checked ModTime/Hashes
  1040  				assert.Equal(t, file2Move.Path, dst.Remote())
  1041  				// 1: file name.txt
  1042  				// 2: other.txt
  1043  
  1044  				// Check conflict on "rename, then move"
  1045  				file1Move.Path = "moveTest/other.txt"
  1046  				src = findObject(ctx, t, remote, file1.Path)
  1047  				_, err = doMove(ctx, src, file1Move.Path)
  1048  				require.NoError(t, err)
  1049  				fstest.CheckListing(t, remote, []fstest.Item{file1Move, file2Move})
  1050  				// 1: moveTest/other.txt
  1051  				// 2: other.txt
  1052  
  1053  				// Check conflict on "move, then rename"
  1054  				src = findObject(ctx, t, remote, file1Move.Path)
  1055  				_, err = doMove(ctx, src, file1.Path)
  1056  				require.NoError(t, err)
  1057  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2Move})
  1058  				// 1: file name.txt
  1059  				// 2: other.txt
  1060  
  1061  				src = findObject(ctx, t, remote, file2Move.Path)
  1062  				_, err = doMove(ctx, src, file2.Path)
  1063  				require.NoError(t, err)
  1064  				fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
  1065  				// 1: file name.txt
  1066  				// 2: hello sausage?/../z.txt
  1067  
  1068  				// Tidy up moveTest directory
  1069  				require.NoError(t, remote.Rmdir(ctx, "moveTest"))
  1070  			})
  1071  
  1072  			// Move src to this remote using server side move operations.
  1073  			//
  1074  			// Will only be called if src.Fs().Name() == f.Name()
  1075  			//
  1076  			// If it isn't possible then return fs.ErrorCantDirMove
  1077  			//
  1078  			// If destination exists then return fs.ErrorDirExists
  1079  
  1080  			// TestFsDirMove tests DirMove
  1081  			//
  1082  			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsDirMove)$
  1083  			t.Run("FsDirMove", func(t *testing.T) {
  1084  				skipIfNotOk(t)
  1085  
  1086  				// Check have DirMove
  1087  				doDirMove := remote.Features().DirMove
  1088  				if doDirMove == nil {
  1089  					t.Skip("FS has no DirMover interface")
  1090  				}
  1091  
  1092  				// Check it can't move onto itself
  1093  				err := doDirMove(ctx, remote, "", "")
  1094  				require.Equal(t, fs.ErrorDirExists, err)
  1095  
  1096  				// new remote
  1097  				newRemote, _, removeNewRemote, err := fstest.RandomRemote()
  1098  				require.NoError(t, err)
  1099  				defer removeNewRemote()
  1100  
  1101  				const newName = "new_name/sub_new_name"
  1102  				// try the move
  1103  				err = newRemote.Features().DirMove(ctx, remote, "", newName)
  1104  				require.NoError(t, err)
  1105  
  1106  				// check remotes
  1107  				// remote should not exist here
  1108  				_, err = remote.List(ctx, "")
  1109  				assert.Equal(t, fs.ErrorDirNotFound, errors.Cause(err))
  1110  				//fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, remote.Precision())
  1111  				file1Copy := file1
  1112  				file1Copy.Path = path.Join(newName, file1.Path)
  1113  				file2Copy := file2
  1114  				file2Copy.Path = path.Join(newName, file2.Path)
  1115  				fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{file2Copy, file1Copy}, []string{
  1116  					"new_name",
  1117  					"new_name/sub_new_name",
  1118  					"new_name/sub_new_name/hello? sausage",
  1119  					"new_name/sub_new_name/hello? sausage/êé",
  1120  					"new_name/sub_new_name/hello? sausage/êé/Hello, 世界",
  1121  					"new_name/sub_new_name/hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1122  				}, newRemote.Precision())
  1123  
  1124  				// move it back
  1125  				err = doDirMove(ctx, newRemote, newName, "")
  1126  				require.NoError(t, err)
  1127  
  1128  				// check remotes
  1129  				fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file2, file1}, []string{
  1130  					"hello? sausage",
  1131  					"hello? sausage/êé",
  1132  					"hello? sausage/êé/Hello, 世界",
  1133  					"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
  1134  				}, remote.Precision())
  1135  				fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{}, []string{
  1136  					"new_name",
  1137  				}, newRemote.Precision())
  1138  			})
  1139  
  1140  			// TestFsRmdirFull tests removing a non empty directory
  1141  			t.Run("FsRmdirFull", func(t *testing.T) {
  1142  				skipIfNotOk(t)
  1143  				if isBucketBasedButNotRoot(remote) {
  1144  					t.Skip("Skipping test as non root bucket based remote")
  1145  				}
  1146  				err := remote.Rmdir(ctx, "")
  1147  				require.Error(t, err, "Expecting error on RMdir on non empty remote")
  1148  			})
  1149  
  1150  			// TestFsPrecision tests the Precision of the Fs
  1151  			t.Run("FsPrecision", func(t *testing.T) {
  1152  				skipIfNotOk(t)
  1153  				precision := remote.Precision()
  1154  				if precision == fs.ModTimeNotSupported {
  1155  					return
  1156  				}
  1157  				if precision > time.Second || precision < 0 {
  1158  					t.Fatalf("Precision out of range %v", precision)
  1159  				}
  1160  				// FIXME check expected precision
  1161  			})
  1162  
  1163  			// TestObjectString tests the Object String method
  1164  			t.Run("ObjectString", func(t *testing.T) {
  1165  				skipIfNotOk(t)
  1166  				obj := findObject(ctx, t, remote, file1.Path)
  1167  				assert.Equal(t, file1.Path, obj.String())
  1168  				if opt.NilObject != nil {
  1169  					assert.Equal(t, "<nil>", opt.NilObject.String())
  1170  				}
  1171  			})
  1172  
  1173  			// TestObjectFs tests the object can be found
  1174  			t.Run("ObjectFs", func(t *testing.T) {
  1175  				skipIfNotOk(t)
  1176  				obj := findObject(ctx, t, remote, file1.Path)
  1177  				// If this is set we don't do the direct comparison of
  1178  				// the Fs from the object as it may be different
  1179  				if opt.SkipFsMatch {
  1180  					return
  1181  				}
  1182  				testRemote := remote
  1183  				if obj.Fs() != testRemote {
  1184  					// Check to see if this wraps something else
  1185  					if doUnWrap := testRemote.Features().UnWrap; doUnWrap != nil {
  1186  						testRemote = doUnWrap()
  1187  					}
  1188  				}
  1189  				assert.Equal(t, obj.Fs(), testRemote)
  1190  			})
  1191  
  1192  			// TestObjectRemote tests the Remote is correct
  1193  			t.Run("ObjectRemote", func(t *testing.T) {
  1194  				skipIfNotOk(t)
  1195  				obj := findObject(ctx, t, remote, file1.Path)
  1196  				assert.Equal(t, file1.Path, obj.Remote())
  1197  			})
  1198  
  1199  			// TestObjectHashes checks all the hashes the object supports
  1200  			t.Run("ObjectHashes", func(t *testing.T) {
  1201  				skipIfNotOk(t)
  1202  				obj := findObject(ctx, t, remote, file1.Path)
  1203  				file1.CheckHashes(t, obj)
  1204  			})
  1205  
  1206  			// TestObjectModTime tests the ModTime of the object is correct
  1207  			TestObjectModTime := func(t *testing.T) {
  1208  				skipIfNotOk(t)
  1209  				obj := findObject(ctx, t, remote, file1.Path)
  1210  				file1.CheckModTime(t, obj, obj.ModTime(ctx), remote.Precision())
  1211  			}
  1212  			t.Run("ObjectModTime", TestObjectModTime)
  1213  
  1214  			// TestObjectMimeType tests the MimeType of the object is correct
  1215  			t.Run("ObjectMimeType", func(t *testing.T) {
  1216  				skipIfNotOk(t)
  1217  				obj := findObject(ctx, t, remote, file1.Path)
  1218  				do, ok := obj.(fs.MimeTyper)
  1219  				if !ok {
  1220  					t.Skip("MimeType method not supported")
  1221  				}
  1222  				mimeType := do.MimeType(ctx)
  1223  				if strings.ContainsRune(mimeType, ';') {
  1224  					assert.Equal(t, "text/plain; charset=utf-8", mimeType)
  1225  				} else {
  1226  					assert.Equal(t, "text/plain", mimeType)
  1227  				}
  1228  			})
  1229  
  1230  			// TestObjectSetModTime tests that SetModTime works
  1231  			t.Run("ObjectSetModTime", func(t *testing.T) {
  1232  				skipIfNotOk(t)
  1233  				newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
  1234  				obj := findObject(ctx, t, remote, file1.Path)
  1235  				err := obj.SetModTime(ctx, newModTime)
  1236  				if err == fs.ErrorCantSetModTime || err == fs.ErrorCantSetModTimeWithoutDelete {
  1237  					t.Log(err)
  1238  					return
  1239  				}
  1240  				require.NoError(t, err)
  1241  				file1.ModTime = newModTime
  1242  				file1.CheckModTime(t, obj, obj.ModTime(ctx), remote.Precision())
  1243  				// And make a new object and read it from there too
  1244  				TestObjectModTime(t)
  1245  			})
  1246  
  1247  			// TestObjectSize tests that Size works
  1248  			t.Run("ObjectSize", func(t *testing.T) {
  1249  				skipIfNotOk(t)
  1250  				obj := findObject(ctx, t, remote, file1.Path)
  1251  				assert.Equal(t, file1.Size, obj.Size())
  1252  			})
  1253  
  1254  			// TestObjectOpen tests that Open works
  1255  			t.Run("ObjectOpen", func(t *testing.T) {
  1256  				skipIfNotOk(t)
  1257  				obj := findObject(ctx, t, remote, file1.Path)
  1258  				assert.Equal(t, file1Contents, readObject(ctx, t, obj, -1), "contents of file1 differ")
  1259  			})
  1260  
  1261  			// TestObjectOpenSeek tests that Open works with SeekOption
  1262  			t.Run("ObjectOpenSeek", func(t *testing.T) {
  1263  				skipIfNotOk(t)
  1264  				obj := findObject(ctx, t, remote, file1.Path)
  1265  				assert.Equal(t, file1Contents[50:], readObject(ctx, t, obj, -1, &fs.SeekOption{Offset: 50}), "contents of file1 differ after seek")
  1266  			})
  1267  
  1268  			// TestObjectOpenRange tests that Open works with RangeOption
  1269  			//
  1270  			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|ObjectOpenRange)$'
  1271  			t.Run("ObjectOpenRange", func(t *testing.T) {
  1272  				skipIfNotOk(t)
  1273  				obj := findObject(ctx, t, remote, file1.Path)
  1274  				for _, test := range []struct {
  1275  					ro                 fs.RangeOption
  1276  					wantStart, wantEnd int
  1277  				}{
  1278  					{fs.RangeOption{Start: 5, End: 15}, 5, 16},
  1279  					{fs.RangeOption{Start: 80, End: -1}, 80, 100},
  1280  					{fs.RangeOption{Start: 81, End: 100000}, 81, 100},
  1281  					{fs.RangeOption{Start: -1, End: 20}, 80, 100}, // if start is omitted this means get the final bytes
  1282  					// {fs.RangeOption{Start: -1, End: -1}, 0, 100}, - this seems to work but the RFC doesn't define it
  1283  				} {
  1284  					got := readObject(ctx, t, obj, -1, &test.ro)
  1285  					foundAt := strings.Index(file1Contents, got)
  1286  					help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
  1287  					assert.Equal(t, file1Contents[test.wantStart:test.wantEnd], got, help)
  1288  				}
  1289  			})
  1290  
  1291  			// TestObjectPartialRead tests that reading only part of the object does the correct thing
  1292  			t.Run("ObjectPartialRead", func(t *testing.T) {
  1293  				skipIfNotOk(t)
  1294  				obj := findObject(ctx, t, remote, file1.Path)
  1295  				assert.Equal(t, file1Contents[:50], readObject(ctx, t, obj, 50), "contents of file1 differ after limited read")
  1296  			})
  1297  
  1298  			// TestObjectUpdate tests that Update works
  1299  			t.Run("ObjectUpdate", func(t *testing.T) {
  1300  				skipIfNotOk(t)
  1301  				contents := random.String(200)
  1302  				buf := bytes.NewBufferString(contents)
  1303  				hash := hash.NewMultiHasher()
  1304  				in := io.TeeReader(buf, hash)
  1305  
  1306  				file1.Size = int64(buf.Len())
  1307  				obj := findObject(ctx, t, remote, file1.Path)
  1308  				obji := object.NewStaticObjectInfo(file1.Path, file1.ModTime, int64(len(contents)), true, nil, obj.Fs())
  1309  				err := obj.Update(ctx, in, obji)
  1310  				require.NoError(t, err)
  1311  				file1.Hashes = hash.Sums()
  1312  
  1313  				// check the object has been updated
  1314  				file1.Check(t, obj, remote.Precision())
  1315  
  1316  				// Re-read the object and check again
  1317  				obj = findObject(ctx, t, remote, file1.Path)
  1318  				file1.Check(t, obj, remote.Precision())
  1319  
  1320  				// check contents correct
  1321  				assert.Equal(t, contents, readObject(ctx, t, obj, -1), "contents of updated file1 differ")
  1322  				file1Contents = contents
  1323  			})
  1324  
  1325  			// TestObjectStorable tests that Storable works
  1326  			t.Run("ObjectStorable", func(t *testing.T) {
  1327  				skipIfNotOk(t)
  1328  				obj := findObject(ctx, t, remote, file1.Path)
  1329  				require.NotNil(t, !obj.Storable(), "Expecting object to be storable")
  1330  			})
  1331  
  1332  			// TestFsIsFile tests that an error is returned along with a valid fs
  1333  			// which points to the parent directory.
  1334  			t.Run("FsIsFile", func(t *testing.T) {
  1335  				skipIfNotOk(t)
  1336  				remoteName := subRemoteName + "/" + file2.Path
  1337  				file2Copy := file2
  1338  				file2Copy.Path = "z.txt"
  1339  				fileRemote, err := fs.NewFs(remoteName)
  1340  				require.NotNil(t, fileRemote)
  1341  				assert.Equal(t, fs.ErrorIsFile, err)
  1342  
  1343  				if strings.HasPrefix(remoteName, "TestChunker") && strings.Contains(remoteName, "Nometa") {
  1344  					// TODO fix chunker and remove this bypass
  1345  					t.Logf("Skip listing check -- chunker can't yet handle this tricky case")
  1346  					return
  1347  				}
  1348  				fstest.CheckListing(t, fileRemote, []fstest.Item{file2Copy})
  1349  			})
  1350  
  1351  			// TestFsIsFileNotFound tests that an error is not returned if no object is found
  1352  			t.Run("FsIsFileNotFound", func(t *testing.T) {
  1353  				skipIfNotOk(t)
  1354  				remoteName := subRemoteName + "/not found.txt"
  1355  				fileRemote, err := fs.NewFs(remoteName)
  1356  				require.NoError(t, err)
  1357  				fstest.CheckListing(t, fileRemote, []fstest.Item{})
  1358  			})
  1359  
  1360  			// Test that things work from the root
  1361  			t.Run("FromRoot", func(t *testing.T) {
  1362  				if features := remote.Features(); features.BucketBased && !features.BucketBasedRootOK {
  1363  					t.Skip("Can't list from root on this remote")
  1364  				}
  1365  
  1366  				configName, configLeaf, err := fspath.Parse(subRemoteName)
  1367  				require.NoError(t, err)
  1368  				if configName == "" {
  1369  					configName, configLeaf = path.Split(subRemoteName)
  1370  				} else {
  1371  					configName += ":"
  1372  				}
  1373  				t.Logf("Opening root remote %q path %q from %q", configName, configLeaf, subRemoteName)
  1374  				rootRemote, err := fs.NewFs(configName)
  1375  				require.NoError(t, err)
  1376  
  1377  				file1Root := file1
  1378  				file1Root.Path = path.Join(configLeaf, file1Root.Path)
  1379  				file2Root := file2
  1380  				file2Root.Path = path.Join(configLeaf, file2Root.Path)
  1381  				var dirs []string
  1382  				dir := file2.Path
  1383  				for {
  1384  					dir = path.Dir(dir)
  1385  					if dir == "" || dir == "." || dir == "/" {
  1386  						break
  1387  					}
  1388  					dirs = append(dirs, path.Join(configLeaf, dir))
  1389  				}
  1390  
  1391  				// Check that we can see file1 and file2 from the root
  1392  				t.Run("List", func(t *testing.T) {
  1393  					fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, dirs, rootRemote.Precision())
  1394  				})
  1395  
  1396  				// Check that that listing the entries is OK
  1397  				t.Run("ListEntries", func(t *testing.T) {
  1398  					entries, err := rootRemote.List(context.Background(), configLeaf)
  1399  					require.NoError(t, err)
  1400  					fstest.CompareItems(t, entries, []fstest.Item{file1Root}, dirs[len(dirs)-1:], rootRemote.Precision(), "ListEntries")
  1401  				})
  1402  
  1403  				// List the root with ListR
  1404  				t.Run("ListR", func(t *testing.T) {
  1405  					doListR := rootRemote.Features().ListR
  1406  					if doListR == nil {
  1407  						t.Skip("FS has no ListR interface")
  1408  					}
  1409  					file1Found, file2Found := false, false
  1410  					stopTime := time.Now().Add(10 * time.Second)
  1411  					errTooMany := errors.New("too many files")
  1412  					errFound := errors.New("found")
  1413  					err := doListR(context.Background(), "", func(entries fs.DirEntries) error {
  1414  						for _, entry := range entries {
  1415  							remote := entry.Remote()
  1416  							if remote == file1Root.Path {
  1417  								file1Found = true
  1418  							}
  1419  							if remote == file2Root.Path {
  1420  								file2Found = true
  1421  							}
  1422  							if file1Found && file2Found {
  1423  								return errFound
  1424  							}
  1425  						}
  1426  						if time.Now().After(stopTime) {
  1427  							return errTooMany
  1428  						}
  1429  						return nil
  1430  					})
  1431  					if err != errFound && err != errTooMany {
  1432  						assert.NoError(t, err)
  1433  					}
  1434  					if err != errTooMany {
  1435  						assert.True(t, file1Found, "file1Root not found")
  1436  						assert.True(t, file2Found, "file2Root not found")
  1437  					} else {
  1438  						t.Logf("Too many files to list - giving up")
  1439  					}
  1440  				})
  1441  
  1442  				// Create a new file
  1443  				t.Run("Put", func(t *testing.T) {
  1444  					file3Root := fstest.Item{
  1445  						ModTime: time.Now(),
  1446  						Path:    path.Join(configLeaf, "created from root.txt"),
  1447  					}
  1448  					_, file3Obj := testPut(ctx, t, rootRemote, &file3Root)
  1449  					fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root, file3Root}, nil, rootRemote.Precision())
  1450  
  1451  					// And then remove it
  1452  					t.Run("Remove", func(t *testing.T) {
  1453  						require.NoError(t, file3Obj.Remove(context.Background()))
  1454  						fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, nil, rootRemote.Precision())
  1455  					})
  1456  				})
  1457  			})
  1458  
  1459  			// TestPublicLink tests creation of sharable, public links
  1460  			// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|PublicLink)$'
  1461  			t.Run("PublicLink", func(t *testing.T) {
  1462  				skipIfNotOk(t)
  1463  
  1464  				doPublicLink := remote.Features().PublicLink
  1465  				if doPublicLink == nil {
  1466  					t.Skip("FS has no PublicLinker interface")
  1467  				}
  1468  
  1469  				// if object not found
  1470  				link, err := doPublicLink(ctx, file1.Path+"_does_not_exist")
  1471  				require.Error(t, err, "Expected to get error when file doesn't exist")
  1472  				require.Equal(t, "", link, "Expected link to be empty on error")
  1473  
  1474  				// sharing file for the first time
  1475  				link1, err := doPublicLink(ctx, file1.Path)
  1476  				require.NoError(t, err)
  1477  				require.NotEqual(t, "", link1, "Link should not be empty")
  1478  
  1479  				link2, err := doPublicLink(ctx, file2.Path)
  1480  				require.NoError(t, err)
  1481  				require.NotEqual(t, "", link2, "Link should not be empty")
  1482  
  1483  				require.NotEqual(t, link1, link2, "Links to different files should differ")
  1484  
  1485  				// sharing file for the 2nd time
  1486  				link1, err = doPublicLink(ctx, file1.Path)
  1487  				require.NoError(t, err)
  1488  				require.NotEqual(t, "", link1, "Link should not be empty")
  1489  
  1490  				// sharing directory for the first time
  1491  				path := path.Dir(file2.Path)
  1492  				link3, err := doPublicLink(ctx, path)
  1493  				if err != nil && errors.Cause(err) == fs.ErrorCantShareDirectories {
  1494  					t.Log("skipping directory tests as not supported on this backend")
  1495  				} else {
  1496  					require.NoError(t, err)
  1497  					require.NotEqual(t, "", link3, "Link should not be empty")
  1498  
  1499  					// sharing directory for the second time
  1500  					link3, err = doPublicLink(ctx, path)
  1501  					require.NoError(t, err)
  1502  					require.NotEqual(t, "", link3, "Link should not be empty")
  1503  
  1504  					// sharing the "root" directory in a subremote
  1505  					subRemote, _, removeSubRemote, err := fstest.RandomRemote()
  1506  					require.NoError(t, err)
  1507  					defer removeSubRemote()
  1508  					// ensure sub remote isn't empty
  1509  					buf := bytes.NewBufferString("somecontent")
  1510  					obji := object.NewStaticObjectInfo("somefile", time.Now(), int64(buf.Len()), true, nil, nil)
  1511  					_, err = subRemote.Put(ctx, buf, obji)
  1512  					require.NoError(t, err)
  1513  
  1514  					link4, err := subRemote.Features().PublicLink(ctx, "")
  1515  					require.NoError(t, err, "Sharing root in a sub-remote should work")
  1516  					require.NotEqual(t, "", link4, "Link should not be empty")
  1517  				}
  1518  			})
  1519  
  1520  			// TestSetTier tests SetTier and GetTier functionality
  1521  			t.Run("SetTier", func(t *testing.T) {
  1522  				skipIfNotSetTier(t)
  1523  				obj := findObject(ctx, t, remote, file1.Path)
  1524  				setter, ok := obj.(fs.SetTierer)
  1525  				assert.NotNil(t, ok)
  1526  				getter, ok := obj.(fs.GetTierer)
  1527  				assert.NotNil(t, ok)
  1528  				// If interfaces are supported TiersToTest should contain
  1529  				// at least one entry
  1530  				supportedTiers := opt.TiersToTest
  1531  				assert.NotEmpty(t, supportedTiers)
  1532  				// test set tier changes on supported storage classes or tiers
  1533  				for _, tier := range supportedTiers {
  1534  					err := setter.SetTier(tier)
  1535  					assert.Nil(t, err)
  1536  					got := getter.GetTier()
  1537  					assert.Equal(t, tier, got)
  1538  				}
  1539  			})
  1540  
  1541  			// Check to see if Fs that wrap other Objects implement all the optional methods
  1542  			t.Run("ObjectCheckWrap", func(t *testing.T) {
  1543  				skipIfNotOk(t)
  1544  				if opt.SkipObjectCheckWrap {
  1545  					t.Skip("Skipping FsCheckWrap on this Fs")
  1546  				}
  1547  				ft := new(fs.Features).Fill(remote)
  1548  				if ft.UnWrap == nil {
  1549  					t.Skip("Not a wrapping Fs")
  1550  				}
  1551  				obj := findObject(ctx, t, remote, file1.Path)
  1552  				_, unsupported := fs.ObjectOptionalInterfaces(obj)
  1553  				for _, name := range unsupported {
  1554  					if !stringsContains(name, opt.UnimplementableObjectMethods) {
  1555  						t.Errorf("Missing Object wrapper for %s", name)
  1556  					}
  1557  				}
  1558  			})
  1559  
  1560  			// TestObjectRemove tests Remove
  1561  			t.Run("ObjectRemove", func(t *testing.T) {
  1562  				skipIfNotOk(t)
  1563  				// remove file1
  1564  				obj := findObject(ctx, t, remote, file1.Path)
  1565  				err := obj.Remove(ctx)
  1566  				require.NoError(t, err)
  1567  				// check listing without modtime as TestPublicLink may change the modtime
  1568  				fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file2}, nil, fs.ModTimeNotSupported)
  1569  			})
  1570  
  1571  			// TestAbout tests the About optional interface
  1572  			t.Run("ObjectAbout", func(t *testing.T) {
  1573  				skipIfNotOk(t)
  1574  
  1575  				// Check have About
  1576  				doAbout := remote.Features().About
  1577  				if doAbout == nil {
  1578  					t.Skip("FS does not support About")
  1579  				}
  1580  
  1581  				// Can't really check the output much!
  1582  				usage, err := doAbout(context.Background())
  1583  				require.NoError(t, err)
  1584  				require.NotNil(t, usage)
  1585  				assert.NotEqual(t, int64(0), usage.Total)
  1586  			})
  1587  
  1588  			// Just file2 remains for Purge to clean up
  1589  
  1590  			// TestFsPutStream tests uploading files when size isn't known in advance.
  1591  			// This may trigger large buffer allocation in some backends, keep it
  1592  			// close to the end of suite. (See fs/operations/xtra_operations_test.go)
  1593  			t.Run("FsPutStream", func(t *testing.T) {
  1594  				skipIfNotOk(t)
  1595  				if remote.Features().PutStream == nil {
  1596  					t.Skip("FS has no PutStream interface")
  1597  				}
  1598  
  1599  				for _, contentSize := range []int{0, 100} {
  1600  					t.Run(strconv.Itoa(contentSize), func(t *testing.T) {
  1601  						file := fstest.Item{
  1602  							ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
  1603  							Path:    "piped data.txt",
  1604  							Size:    -1, // use unknown size during upload
  1605  						}
  1606  
  1607  						var (
  1608  							err        error
  1609  							obj        fs.Object
  1610  							uploadHash *hash.MultiHasher
  1611  						)
  1612  						retry(t, "PutStream", func() error {
  1613  							contents := random.String(contentSize)
  1614  							buf := bytes.NewBufferString(contents)
  1615  							uploadHash = hash.NewMultiHasher()
  1616  							in := io.TeeReader(buf, uploadHash)
  1617  
  1618  							file.Size = -1
  1619  							obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
  1620  							obj, err = remote.Features().PutStream(ctx, in, obji)
  1621  							return err
  1622  						})
  1623  						file.Hashes = uploadHash.Sums()
  1624  						file.Size = int64(contentSize) // use correct size when checking
  1625  						file.Check(t, obj, remote.Precision())
  1626  						// Re-read the object and check again
  1627  						obj = findObject(ctx, t, remote, file.Path)
  1628  						file.Check(t, obj, remote.Precision())
  1629  						require.NoError(t, obj.Remove(ctx))
  1630  					})
  1631  				}
  1632  			})
  1633  
  1634  			// TestInternal calls InternalTest() on the Fs
  1635  			t.Run("Internal", func(t *testing.T) {
  1636  				skipIfNotOk(t)
  1637  				if it, ok := remote.(InternalTester); ok {
  1638  					it.InternalTest(t)
  1639  				} else {
  1640  					t.Skipf("%T does not implement InternalTester", remote)
  1641  				}
  1642  			})
  1643  
  1644  		})
  1645  
  1646  		// TestFsPutChunked may trigger large buffer allocation with
  1647  		// some backends (see fs/operations/xtra_operations_test.go),
  1648  		// keep it closer to the end of suite.
  1649  		t.Run("FsPutChunked", func(t *testing.T) {
  1650  			skipIfNotOk(t)
  1651  			if testing.Short() {
  1652  				t.Skip("not running with -short")
  1653  			}
  1654  
  1655  			setUploadChunkSizer, _ := remote.(SetUploadChunkSizer)
  1656  			if setUploadChunkSizer == nil {
  1657  				t.Skipf("%T does not implement SetUploadChunkSizer", remote)
  1658  			}
  1659  
  1660  			setUploadCutoffer, _ := remote.(SetUploadCutoffer)
  1661  
  1662  			minChunkSize := opt.ChunkedUpload.MinChunkSize
  1663  			if minChunkSize < 100 {
  1664  				minChunkSize = 100
  1665  			}
  1666  			if opt.ChunkedUpload.CeilChunkSize != nil {
  1667  				minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize)
  1668  			}
  1669  
  1670  			maxChunkSize := 2 * fs.MebiByte
  1671  			if maxChunkSize < 2*minChunkSize {
  1672  				maxChunkSize = 2 * minChunkSize
  1673  			}
  1674  			if opt.ChunkedUpload.MaxChunkSize > 0 && maxChunkSize > opt.ChunkedUpload.MaxChunkSize {
  1675  				maxChunkSize = opt.ChunkedUpload.MaxChunkSize
  1676  			}
  1677  			if opt.ChunkedUpload.CeilChunkSize != nil {
  1678  				maxChunkSize = opt.ChunkedUpload.CeilChunkSize(maxChunkSize)
  1679  			}
  1680  
  1681  			next := func(f func(fs.SizeSuffix) fs.SizeSuffix) fs.SizeSuffix {
  1682  				s := f(minChunkSize)
  1683  				if s > maxChunkSize {
  1684  					s = minChunkSize
  1685  				}
  1686  				return s
  1687  			}
  1688  
  1689  			chunkSizes := fs.SizeSuffixList{
  1690  				minChunkSize,
  1691  				minChunkSize + (maxChunkSize-minChunkSize)/3,
  1692  				next(NextPowerOfTwo),
  1693  				next(NextMultipleOf(100000)),
  1694  				next(NextMultipleOf(100001)),
  1695  				maxChunkSize,
  1696  			}
  1697  			chunkSizes.Sort()
  1698  
  1699  			// Set the minimum chunk size, upload cutoff and reset it at the end
  1700  			oldChunkSize, err := setUploadChunkSizer.SetUploadChunkSize(minChunkSize)
  1701  			require.NoError(t, err)
  1702  			var oldUploadCutoff fs.SizeSuffix
  1703  			if setUploadCutoffer != nil {
  1704  				oldUploadCutoff, err = setUploadCutoffer.SetUploadCutoff(minChunkSize)
  1705  				require.NoError(t, err)
  1706  			}
  1707  			defer func() {
  1708  				_, err := setUploadChunkSizer.SetUploadChunkSize(oldChunkSize)
  1709  				assert.NoError(t, err)
  1710  				if setUploadCutoffer != nil {
  1711  					_, err := setUploadCutoffer.SetUploadCutoff(oldUploadCutoff)
  1712  					assert.NoError(t, err)
  1713  				}
  1714  			}()
  1715  
  1716  			var lastCs fs.SizeSuffix
  1717  			for _, cs := range chunkSizes {
  1718  				if cs <= lastCs {
  1719  					continue
  1720  				}
  1721  				if opt.ChunkedUpload.CeilChunkSize != nil {
  1722  					cs = opt.ChunkedUpload.CeilChunkSize(cs)
  1723  				}
  1724  				lastCs = cs
  1725  
  1726  				t.Run(cs.String(), func(t *testing.T) {
  1727  					_, err := setUploadChunkSizer.SetUploadChunkSize(cs)
  1728  					require.NoError(t, err)
  1729  					if setUploadCutoffer != nil {
  1730  						_, err = setUploadCutoffer.SetUploadCutoff(cs)
  1731  						require.NoError(t, err)
  1732  					}
  1733  
  1734  					var testChunks []fs.SizeSuffix
  1735  					if opt.ChunkedUpload.NeedMultipleChunks {
  1736  						// If NeedMultipleChunks is set then test with > cs
  1737  						testChunks = []fs.SizeSuffix{cs + 1, 2 * cs, 2*cs + 1}
  1738  					} else {
  1739  						testChunks = []fs.SizeSuffix{cs - 1, cs, 2*cs + 1}
  1740  					}
  1741  
  1742  					for _, fileSize := range testChunks {
  1743  						t.Run(fmt.Sprintf("%d", fileSize), func(t *testing.T) {
  1744  							TestPutLarge(ctx, t, remote, &fstest.Item{
  1745  								ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
  1746  								Path:    fmt.Sprintf("chunked-%s-%s.bin", cs.String(), fileSize.String()),
  1747  								Size:    int64(fileSize),
  1748  							})
  1749  						})
  1750  					}
  1751  				})
  1752  			}
  1753  		})
  1754  
  1755  		// TestFsUploadUnknownSize ensures Fs.Put() and Object.Update() don't panic when
  1756  		// src.Size() == -1
  1757  		//
  1758  		// This may trigger large buffer allocation in some backends, keep it
  1759  		// closer to the suite end. (See fs/operations/xtra_operations_test.go)
  1760  		t.Run("FsUploadUnknownSize", func(t *testing.T) {
  1761  			skipIfNotOk(t)
  1762  
  1763  			t.Run("FsPutUnknownSize", func(t *testing.T) {
  1764  				defer func() {
  1765  					assert.Nil(t, recover(), "Fs.Put() should not panic when src.Size() == -1")
  1766  				}()
  1767  
  1768  				contents := random.String(100)
  1769  				in := bytes.NewBufferString(contents)
  1770  
  1771  				obji := object.NewStaticObjectInfo("unknown-size-put.txt", fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
  1772  				obj, err := remote.Put(ctx, in, obji)
  1773  				if err == nil {
  1774  					require.NoError(t, obj.Remove(ctx), "successfully uploaded unknown-sized file but failed to remove")
  1775  				}
  1776  				// if err != nil: it's okay as long as no panic
  1777  			})
  1778  
  1779  			t.Run("FsUpdateUnknownSize", func(t *testing.T) {
  1780  				unknownSizeUpdateFile := fstest.Item{
  1781  					ModTime: fstest.Time("2002-02-03T04:05:06.499999999Z"),
  1782  					Path:    "unknown-size-update.txt",
  1783  				}
  1784  
  1785  				testPut(ctx, t, remote, &unknownSizeUpdateFile)
  1786  
  1787  				defer func() {
  1788  					assert.Nil(t, recover(), "Object.Update() should not panic when src.Size() == -1")
  1789  				}()
  1790  
  1791  				newContents := random.String(200)
  1792  				in := bytes.NewBufferString(newContents)
  1793  
  1794  				obj := findObject(ctx, t, remote, unknownSizeUpdateFile.Path)
  1795  				obji := object.NewStaticObjectInfo(unknownSizeUpdateFile.Path, unknownSizeUpdateFile.ModTime, -1, true, nil, obj.Fs())
  1796  				err := obj.Update(ctx, in, obji)
  1797  				if err == nil {
  1798  					require.NoError(t, obj.Remove(ctx), "successfully updated object with unknown-sized source but failed to remove")
  1799  				}
  1800  				// if err != nil: it's okay as long as no panic
  1801  			})
  1802  
  1803  		})
  1804  
  1805  		// TestFsRootCollapse tests if the root of an fs "collapses" to the
  1806  		// absolute root. It creates a new fs of the same backend type with its
  1807  		// root set to a *non-existent* folder, and attempts to read the info of
  1808  		// an object in that folder, whose name is taken from a directory that
  1809  		// exists in the absolute root.
  1810  		// This test is added after
  1811  		// https://github.com/rclone/rclone/issues/3164.
  1812  		t.Run("FsRootCollapse", func(t *testing.T) {
  1813  			deepRemoteName := subRemoteName + "/deeper/nonexisting/directory"
  1814  			deepRemote, err := fs.NewFs(deepRemoteName)
  1815  			require.NoError(t, err)
  1816  
  1817  			colonIndex := strings.IndexRune(deepRemoteName, ':')
  1818  			firstSlashIndex := strings.IndexRune(deepRemoteName, '/')
  1819  			firstDir := deepRemoteName[colonIndex+1 : firstSlashIndex]
  1820  			_, err = deepRemote.NewObject(ctx, firstDir)
  1821  			require.Equal(t, fs.ErrorObjectNotFound, err)
  1822  			// If err is not fs.ErrorObjectNotFound, it means the backend is
  1823  			// somehow confused about root and absolute root.
  1824  		})
  1825  
  1826  		// Purge the folder
  1827  		err = operations.Purge(ctx, remote, "")
  1828  		if errors.Cause(err) != fs.ErrorDirNotFound {
  1829  			require.NoError(t, err)
  1830  		}
  1831  		purged = true
  1832  		fstest.CheckListing(t, remote, []fstest.Item{})
  1833  
  1834  		// Check purging again if not bucket based
  1835  		if !isBucketBasedButNotRoot(remote) {
  1836  			err = operations.Purge(ctx, remote, "")
  1837  			assert.Error(t, err, "Expecting error after on second purge")
  1838  		}
  1839  
  1840  	})
  1841  
  1842  	// Check directory is purged
  1843  	if !purged {
  1844  		_ = operations.Purge(ctx, remote, "")
  1845  	}
  1846  
  1847  	// Remove the local directory so we don't clutter up /tmp
  1848  	if strings.HasPrefix(remoteName, "/") {
  1849  		t.Log("remoteName", remoteName)
  1850  		// Remove temp directory
  1851  		err := os.Remove(remoteName)
  1852  		require.NoError(t, err)
  1853  	}
  1854  }