github.com/xhghs/rclone@v1.51.1-0.20200430155106-e186a28cced8/fstest/fstest.go (about)

     1  // Package fstest provides utilities for testing the Fs
     2  package fstest
     3  
     4  // FIXME put name of test FS in Fs structure
     5  
     6  import (
     7  	"bytes"
     8  	"context"
     9  	"flag"
    10  	"fmt"
    11  	"io"
    12  	"io/ioutil"
    13  	"log"
    14  	"math/rand"
    15  	"os"
    16  	"path"
    17  	"path/filepath"
    18  	"regexp"
    19  	"runtime"
    20  	"sort"
    21  	"strings"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/rclone/rclone/fs"
    26  	"github.com/rclone/rclone/fs/accounting"
    27  	"github.com/rclone/rclone/fs/config"
    28  	"github.com/rclone/rclone/fs/hash"
    29  	"github.com/rclone/rclone/fs/walk"
    30  	"github.com/rclone/rclone/lib/random"
    31  	"github.com/stretchr/testify/assert"
    32  	"github.com/stretchr/testify/require"
    33  	"golang.org/x/text/unicode/norm"
    34  )
    35  
    36  // Globals
    37  var (
    38  	RemoteName      = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
    39  	Verbose         = flag.Bool("verbose", false, "Set to enable logging")
    40  	DumpHeaders     = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)")
    41  	DumpBodies      = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)")
    42  	Individual      = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
    43  	LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries")
    44  	UseListR        = flag.Bool("fast-list", false, "Use recursive list if available. Uses more memory but fewer transactions.")
    45  	// SizeLimit signals tests to skip maximum test file size and skip inappropriate runs
    46  	SizeLimit = flag.Int64("size-limit", 0, "Limit maximum test file size")
    47  	// ListRetries is the number of times to retry a listing to overcome eventual consistency
    48  	ListRetries = flag.Int("list-retries", 6, "Number or times to retry listing")
    49  	// MatchTestRemote matches the remote names used for testing
    50  	MatchTestRemote = regexp.MustCompile(`^rclone-test-[abcdefghijklmnopqrstuvwxyz0123456789]{24}$`)
    51  )
    52  
    53  // Seed the random number generator
    54  func init() {
    55  	rand.Seed(time.Now().UnixNano())
    56  
    57  }
    58  
    59  // Initialise rclone for testing
    60  func Initialise() {
    61  	// Never ask for passwords, fail instead.
    62  	// If your local config is encrypted set environment variable
    63  	// "RCLONE_CONFIG_PASS=hunter2" (or your password)
    64  	fs.Config.AskPassword = false
    65  	// Override the config file from the environment - we don't
    66  	// parse the flags any more so this doesn't happen
    67  	// automatically
    68  	if envConfig := os.Getenv("RCLONE_CONFIG"); envConfig != "" {
    69  		config.ConfigPath = envConfig
    70  	}
    71  	config.LoadConfig()
    72  	if *Verbose {
    73  		fs.Config.LogLevel = fs.LogLevelDebug
    74  	}
    75  	if *DumpHeaders {
    76  		fs.Config.Dump |= fs.DumpHeaders
    77  	}
    78  	if *DumpBodies {
    79  		fs.Config.Dump |= fs.DumpBodies
    80  	}
    81  	fs.Config.LowLevelRetries = *LowLevelRetries
    82  	fs.Config.UseListR = *UseListR
    83  }
    84  
    85  // Item represents an item for checking
    86  type Item struct {
    87  	Path    string
    88  	Hashes  map[hash.Type]string
    89  	ModTime time.Time
    90  	Size    int64
    91  }
    92  
    93  // NewItem creates an item from a string content
    94  func NewItem(Path, Content string, modTime time.Time) Item {
    95  	i := Item{
    96  		Path:    Path,
    97  		ModTime: modTime,
    98  		Size:    int64(len(Content)),
    99  	}
   100  	hash := hash.NewMultiHasher()
   101  	buf := bytes.NewBufferString(Content)
   102  	_, err := io.Copy(hash, buf)
   103  	if err != nil {
   104  		log.Fatalf("Failed to create item: %v", err)
   105  	}
   106  	i.Hashes = hash.Sums()
   107  	return i
   108  }
   109  
   110  // CheckTimeEqualWithPrecision checks the times are equal within the
   111  // precision, returns the delta and a flag
   112  func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
   113  	dt := t0.Sub(t1)
   114  	if dt >= precision || dt <= -precision {
   115  		return dt, false
   116  	}
   117  	return dt, true
   118  }
   119  
   120  // CheckModTime checks the mod time to the given precision
   121  func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
   122  	dt, ok := CheckTimeEqualWithPrecision(modTime, i.ModTime, precision)
   123  	assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", obj.Remote(), dt, precision, modTime, i.ModTime, precision))
   124  }
   125  
   126  // CheckHashes checks all the hashes the object supports are correct
   127  func (i *Item) CheckHashes(t *testing.T, obj fs.Object) {
   128  	require.NotNil(t, obj)
   129  	types := obj.Fs().Hashes().Array()
   130  	for _, Hash := range types {
   131  		// Check attributes
   132  		sum, err := obj.Hash(context.Background(), Hash)
   133  		require.NoError(t, err)
   134  		assert.True(t, hash.Equals(i.Hashes[Hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), Hash, i.Hashes[Hash], sum))
   135  	}
   136  }
   137  
   138  // Check checks all the attributes of the object are correct
   139  func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
   140  	i.CheckHashes(t, obj)
   141  	assert.Equal(t, i.Size, obj.Size(), fmt.Sprintf("%s: size incorrect file=%d vs obj=%d", i.Path, i.Size, obj.Size()))
   142  	i.CheckModTime(t, obj, obj.ModTime(context.Background()), precision)
   143  }
   144  
   145  // Normalize runs a utf8 normalization on the string if running on OS
   146  // X.  This is because OS X denormalizes file names it writes to the
   147  // local file system.
   148  func Normalize(name string) string {
   149  	if runtime.GOOS == "darwin" {
   150  		name = norm.NFC.String(name)
   151  	}
   152  	return name
   153  }
   154  
   155  // Items represents all items for checking
   156  type Items struct {
   157  	byName    map[string]*Item
   158  	byNameAlt map[string]*Item
   159  	items     []Item
   160  }
   161  
   162  // NewItems makes an Items
   163  func NewItems(items []Item) *Items {
   164  	is := &Items{
   165  		byName:    make(map[string]*Item),
   166  		byNameAlt: make(map[string]*Item),
   167  		items:     items,
   168  	}
   169  	// Fill up byName
   170  	for i := range items {
   171  		is.byName[Normalize(items[i].Path)] = &items[i]
   172  	}
   173  	return is
   174  }
   175  
   176  // Find checks off an item
   177  func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
   178  	remote := Normalize(obj.Remote())
   179  	i, ok := is.byName[remote]
   180  	if !ok {
   181  		i, ok = is.byNameAlt[remote]
   182  		assert.True(t, ok, fmt.Sprintf("Unexpected file %q", remote))
   183  	}
   184  	if i != nil {
   185  		delete(is.byName, i.Path)
   186  		i.Check(t, obj, precision)
   187  	}
   188  }
   189  
   190  // Done checks all finished
   191  func (is *Items) Done(t *testing.T) {
   192  	if len(is.byName) != 0 {
   193  		for name := range is.byName {
   194  			t.Logf("Not found %q", name)
   195  		}
   196  	}
   197  	assert.Equal(t, 0, len(is.byName), fmt.Sprintf("%d objects not found", len(is.byName)))
   198  }
   199  
   200  // makeListingFromItems returns a string representation of the items
   201  //
   202  // it returns two possible strings, one normal and one for windows
   203  func makeListingFromItems(items []Item) string {
   204  	nameLengths := make([]string, len(items))
   205  	for i, item := range items {
   206  		remote := Normalize(item.Path)
   207  		nameLengths[i] = fmt.Sprintf("%s (%d)", remote, item.Size)
   208  	}
   209  	sort.Strings(nameLengths)
   210  	return strings.Join(nameLengths, ", ")
   211  }
   212  
   213  // makeListingFromObjects returns a string representation of the objects
   214  func makeListingFromObjects(objs []fs.Object) string {
   215  	nameLengths := make([]string, len(objs))
   216  	for i, obj := range objs {
   217  		nameLengths[i] = fmt.Sprintf("%s (%d)", Normalize(obj.Remote()), obj.Size())
   218  	}
   219  	sort.Strings(nameLengths)
   220  	return strings.Join(nameLengths, ", ")
   221  }
   222  
   223  // filterEmptyDirs removes any empty (or containing only directories)
   224  // directories from expectedDirs
   225  func filterEmptyDirs(t *testing.T, items []Item, expectedDirs []string) (newExpectedDirs []string) {
   226  	dirs := map[string]struct{}{"": struct{}{}}
   227  	for _, item := range items {
   228  		base := item.Path
   229  		for {
   230  			base = path.Dir(base)
   231  			if base == "." || base == "/" {
   232  				break
   233  			}
   234  			dirs[base] = struct{}{}
   235  		}
   236  	}
   237  	for _, expectedDir := range expectedDirs {
   238  		if _, found := dirs[expectedDir]; found {
   239  			newExpectedDirs = append(newExpectedDirs, expectedDir)
   240  		} else {
   241  			t.Logf("Filtering empty directory %q", expectedDir)
   242  		}
   243  	}
   244  	return newExpectedDirs
   245  }
   246  
   247  // CheckListingWithRoot checks the fs to see if it has the
   248  // expected contents with the given precision.
   249  //
   250  // If expectedDirs is non nil then we check those too.  Note that no
   251  // directories returned is also OK as some remotes don't return
   252  // directories.
   253  //
   254  // dir is the directory used for the listing.
   255  func CheckListingWithRoot(t *testing.T, f fs.Fs, dir string, items []Item, expectedDirs []string, precision time.Duration) {
   256  	if expectedDirs != nil && !f.Features().CanHaveEmptyDirectories {
   257  		expectedDirs = filterEmptyDirs(t, items, expectedDirs)
   258  	}
   259  	is := NewItems(items)
   260  	ctx := context.Background()
   261  	oldErrors := accounting.Stats(ctx).GetErrors()
   262  	var objs []fs.Object
   263  	var dirs []fs.Directory
   264  	var err error
   265  	var retries = *ListRetries
   266  	sleep := time.Second / 2
   267  	wantListing := makeListingFromItems(items)
   268  	gotListing := "<unset>"
   269  	listingOK := false
   270  	for i := 1; i <= retries; i++ {
   271  		objs, dirs, err = walk.GetAll(ctx, f, dir, true, -1)
   272  		if err != nil && err != fs.ErrorDirNotFound {
   273  			t.Fatalf("Error listing: %v", err)
   274  		}
   275  
   276  		gotListing = makeListingFromObjects(objs)
   277  		listingOK = wantListing == gotListing
   278  		if listingOK && (expectedDirs == nil || len(dirs) == len(expectedDirs)) {
   279  			// Put an extra sleep in if we did any retries just to make sure it really
   280  			// is consistent (here is looking at you Amazon Drive!)
   281  			if i != 1 {
   282  				extraSleep := 5*time.Second + sleep
   283  				t.Logf("Sleeping for %v just to make sure", extraSleep)
   284  				time.Sleep(extraSleep)
   285  			}
   286  			break
   287  		}
   288  		sleep *= 2
   289  		t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
   290  		time.Sleep(sleep)
   291  		if doDirCacheFlush := f.Features().DirCacheFlush; doDirCacheFlush != nil {
   292  			t.Logf("Flushing the directory cache")
   293  			doDirCacheFlush()
   294  		}
   295  	}
   296  	assert.True(t, listingOK, fmt.Sprintf("listing wrong, want\n  %s got\n  %s", wantListing, gotListing))
   297  	for _, obj := range objs {
   298  		require.NotNil(t, obj)
   299  		is.Find(t, obj, precision)
   300  	}
   301  	is.Done(t)
   302  	// Don't notice an error when listing an empty directory
   303  	if len(items) == 0 && oldErrors == 0 && accounting.Stats(ctx).GetErrors() == 1 {
   304  		accounting.Stats(ctx).ResetErrors()
   305  	}
   306  	// Check the directories
   307  	if expectedDirs != nil {
   308  		expectedDirsCopy := make([]string, len(expectedDirs))
   309  		for i, dir := range expectedDirs {
   310  			expectedDirsCopy[i] = Normalize(dir)
   311  		}
   312  		actualDirs := []string{}
   313  		for _, dir := range dirs {
   314  			actualDirs = append(actualDirs, Normalize(dir.Remote()))
   315  		}
   316  		sort.Strings(actualDirs)
   317  		sort.Strings(expectedDirsCopy)
   318  		assert.Equal(t, expectedDirsCopy, actualDirs, "directories")
   319  	}
   320  }
   321  
   322  // CheckListingWithPrecision checks the fs to see if it has the
   323  // expected contents with the given precision.
   324  //
   325  // If expectedDirs is non nil then we check those too.  Note that no
   326  // directories returned is also OK as some remotes don't return
   327  // directories.
   328  func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs []string, precision time.Duration) {
   329  	CheckListingWithRoot(t, f, "", items, expectedDirs, precision)
   330  }
   331  
   332  // CheckListing checks the fs to see if it has the expected contents
   333  func CheckListing(t *testing.T, f fs.Fs, items []Item) {
   334  	precision := f.Precision()
   335  	CheckListingWithPrecision(t, f, items, nil, precision)
   336  }
   337  
   338  // CheckItems checks the fs to see if it has only the items passed in
   339  // using a precision of fs.Config.ModifyWindow
   340  func CheckItems(t *testing.T, f fs.Fs, items ...Item) {
   341  	CheckListingWithPrecision(t, f, items, nil, fs.GetModifyWindow(f))
   342  }
   343  
   344  // CompareItems compares a set of DirEntries to a slice of items and a list of dirs
   345  // The modtimes are compared with the precision supplied
   346  func CompareItems(t *testing.T, entries fs.DirEntries, items []Item, expectedDirs []string, precision time.Duration, what string) {
   347  	is := NewItems(items)
   348  	var objs []fs.Object
   349  	var dirs []fs.Directory
   350  	wantListing := makeListingFromItems(items)
   351  	for _, entry := range entries {
   352  		switch x := entry.(type) {
   353  		case fs.Directory:
   354  			dirs = append(dirs, x)
   355  		case fs.Object:
   356  			objs = append(objs, x)
   357  			// do nothing
   358  		default:
   359  			t.Fatalf("unknown object type %T", entry)
   360  		}
   361  	}
   362  
   363  	gotListing := makeListingFromObjects(objs)
   364  	listingOK := wantListing == gotListing
   365  	assert.True(t, listingOK, fmt.Sprintf("%s not equal, want\n  %s got\n  %s", what, wantListing, gotListing))
   366  	for _, obj := range objs {
   367  		require.NotNil(t, obj)
   368  		is.Find(t, obj, precision)
   369  	}
   370  	is.Done(t)
   371  	// Check the directories
   372  	if expectedDirs != nil {
   373  		expectedDirsCopy := make([]string, len(expectedDirs))
   374  		for i, dir := range expectedDirs {
   375  			expectedDirsCopy[i] = Normalize(dir)
   376  		}
   377  		actualDirs := []string{}
   378  		for _, dir := range dirs {
   379  			actualDirs = append(actualDirs, Normalize(dir.Remote()))
   380  		}
   381  		sort.Strings(actualDirs)
   382  		sort.Strings(expectedDirsCopy)
   383  		assert.Equal(t, expectedDirsCopy, actualDirs, "directories not equal")
   384  	}
   385  }
   386  
   387  // Time parses a time string or logs a fatal error
   388  func Time(timeString string) time.Time {
   389  	t, err := time.Parse(time.RFC3339Nano, timeString)
   390  	if err != nil {
   391  		log.Fatalf("Failed to parse time %q: %v", timeString, err)
   392  	}
   393  	return t
   394  }
   395  
   396  // LocalRemote creates a temporary directory name for local remotes
   397  func LocalRemote() (path string, err error) {
   398  	path, err = ioutil.TempDir("", "rclone")
   399  	if err == nil {
   400  		// Now remove the directory
   401  		err = os.Remove(path)
   402  	}
   403  	path = filepath.ToSlash(path)
   404  	return
   405  }
   406  
   407  // RandomRemoteName makes a random bucket or subdirectory name
   408  //
   409  // Returns a random remote name plus the leaf name
   410  func RandomRemoteName(remoteName string) (string, string, error) {
   411  	var err error
   412  	var leafName string
   413  
   414  	// Make a directory if remote name is null
   415  	if remoteName == "" {
   416  		remoteName, err = LocalRemote()
   417  		if err != nil {
   418  			return "", "", err
   419  		}
   420  	} else {
   421  		if !strings.HasSuffix(remoteName, ":") {
   422  			remoteName += "/"
   423  		}
   424  		leafName = "rclone-test-" + random.String(24)
   425  		if !MatchTestRemote.MatchString(leafName) {
   426  			log.Fatalf("%q didn't match the test remote name regexp", leafName)
   427  		}
   428  		remoteName += leafName
   429  	}
   430  	return remoteName, leafName, nil
   431  }
   432  
   433  // RandomRemote makes a random bucket or subdirectory on the remote
   434  // from the -remote parameter
   435  //
   436  // Call the finalise function returned to Purge the fs at the end (and
   437  // the parent if necessary)
   438  //
   439  // Returns the remote, its url, a finaliser and an error
   440  func RandomRemote() (fs.Fs, string, func(), error) {
   441  	var err error
   442  	var parentRemote fs.Fs
   443  	remoteName := *RemoteName
   444  
   445  	remoteName, _, err = RandomRemoteName(remoteName)
   446  	if err != nil {
   447  		return nil, "", nil, err
   448  	}
   449  
   450  	remote, err := fs.NewFs(remoteName)
   451  	if err != nil {
   452  		return nil, "", nil, err
   453  	}
   454  
   455  	finalise := func() {
   456  		Purge(remote)
   457  		if parentRemote != nil {
   458  			Purge(parentRemote)
   459  			if err != nil {
   460  				log.Printf("Failed to purge %v: %v", parentRemote, err)
   461  			}
   462  		}
   463  	}
   464  
   465  	return remote, remoteName, finalise, nil
   466  }
   467  
   468  // Purge is a simplified re-implementation of operations.Purge for the
   469  // test routine cleanup to avoid circular dependencies.
   470  //
   471  // It logs errors rather than returning them
   472  func Purge(f fs.Fs) {
   473  	ctx := context.Background()
   474  	var err error
   475  	doFallbackPurge := true
   476  	if doPurge := f.Features().Purge; doPurge != nil {
   477  		doFallbackPurge = false
   478  		fs.Debugf(f, "Purge remote")
   479  		err = doPurge(ctx)
   480  		if err == fs.ErrorCantPurge {
   481  			doFallbackPurge = true
   482  		}
   483  	}
   484  	if doFallbackPurge {
   485  		dirs := []string{""}
   486  		err = walk.ListR(ctx, f, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error {
   487  			var err error
   488  			entries.ForObject(func(obj fs.Object) {
   489  				fs.Debugf(f, "Purge object %q", obj.Remote())
   490  				err = obj.Remove(ctx)
   491  				if err != nil {
   492  					log.Printf("purge failed to remove %q: %v", obj.Remote(), err)
   493  				}
   494  			})
   495  			entries.ForDir(func(dir fs.Directory) {
   496  				dirs = append(dirs, dir.Remote())
   497  			})
   498  			return nil
   499  		})
   500  		sort.Strings(dirs)
   501  		for i := len(dirs) - 1; i >= 0; i-- {
   502  			dir := dirs[i]
   503  			fs.Debugf(f, "Purge dir %q", dir)
   504  			err := f.Rmdir(ctx, dir)
   505  			if err != nil {
   506  				log.Printf("purge failed to rmdir %q: %v", dir, err)
   507  			}
   508  		}
   509  	}
   510  	if err != nil {
   511  		log.Printf("purge failed: %v", err)
   512  	}
   513  }