github.com/StarfishStorage/goofys@v0.23.2-0.20200415030923-535558486b34/internal/goofys_test.go (about)

     1  // Copyright 2015 - 2017 Ka-Hing Cheung
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package internal
    16  
    17  import (
    18  	. "github.com/kahing/goofys/api/common"
    19  
    20  	"bufio"
    21  	"bytes"
    22  	"fmt"
    23  	"io"
    24  	"io/ioutil"
    25  	"math/rand"
    26  	"net"
    27  	"os"
    28  	"os/exec"
    29  	"os/signal"
    30  	"os/user"
    31  	"reflect"
    32  	"runtime"
    33  	"sort"
    34  	"strconv"
    35  	"strings"
    36  	"sync"
    37  	"syscall"
    38  	"testing"
    39  	"time"
    40  
    41  	"context"
    42  
    43  	"github.com/aws/aws-sdk-go/aws"
    44  	"github.com/aws/aws-sdk-go/aws/corehandlers"
    45  	"github.com/aws/aws-sdk-go/aws/credentials"
    46  
    47  	"github.com/Azure/azure-storage-blob-go/azblob"
    48  	"github.com/Azure/go-autorest/autorest"
    49  	"github.com/Azure/go-autorest/autorest/azure"
    50  	azureauth "github.com/Azure/go-autorest/autorest/azure/auth"
    51  
    52  	"golang.org/x/sys/unix"
    53  
    54  	"github.com/jacobsa/fuse"
    55  	"github.com/jacobsa/fuse/fuseops"
    56  	"github.com/jacobsa/fuse/fuseutil"
    57  
    58  	"github.com/sirupsen/logrus"
    59  
    60  	. "gopkg.in/check.v1"
    61  )
    62  
    63  // so I don't get complains about unused imports
    64  var ignored = logrus.DebugLevel
    65  
    66  func currentUid() uint32 {
    67  	user, err := user.Current()
    68  	if err != nil {
    69  		panic(err)
    70  	}
    71  
    72  	uid, err := strconv.ParseUint(user.Uid, 10, 32)
    73  	if err != nil {
    74  		panic(err)
    75  	}
    76  
    77  	return uint32(uid)
    78  }
    79  
    80  func currentGid() uint32 {
    81  	user, err := user.Current()
    82  	if err != nil {
    83  		panic(err)
    84  	}
    85  
    86  	gid, err := strconv.ParseUint(user.Gid, 10, 32)
    87  	if err != nil {
    88  		panic(err)
    89  	}
    90  
    91  	return uint32(gid)
    92  }
    93  
    94  type GoofysTest struct {
    95  	fs        *Goofys
    96  	ctx       context.Context
    97  	awsConfig *aws.Config
    98  	cloud     StorageBackend
    99  	emulator  bool
   100  	azurite   bool
   101  
   102  	removeBucket []StorageBackend
   103  
   104  	env map[string]*string
   105  }
   106  
   107  func Test(t *testing.T) {
   108  	TestingT(t)
   109  }
   110  
   111  var _ = Suite(&GoofysTest{})
   112  
   113  func logOutput(t *C, tag string, r io.ReadCloser) {
   114  	in := bufio.NewScanner(r)
   115  
   116  	for in.Scan() {
   117  		t.Log(tag, in.Text())
   118  	}
   119  }
   120  
   121  func waitFor(t *C, addr string) (err error) {
   122  	// wait for it to listen on port
   123  	for i := 0; i < 10; i++ {
   124  		var conn net.Conn
   125  		conn, err = net.Dial("tcp", addr)
   126  		if err == nil {
   127  			// we are done!
   128  			conn.Close()
   129  			return
   130  		} else {
   131  			t.Logf("Cound not connect: %v", err)
   132  			time.Sleep(100 * time.Millisecond)
   133  		}
   134  	}
   135  
   136  	return
   137  }
   138  
   139  func (t *GoofysTest) deleteBlobsParallelly(cloud StorageBackend, blobs []string) error {
   140  	sem := make(semaphore, 100)
   141  	sem.P(100)
   142  	var err error
   143  	for _, blobOuter := range blobs {
   144  		sem.V(1)
   145  		go func(blob string) {
   146  			defer sem.P(1)
   147  			_, localerr := cloud.DeleteBlob(&DeleteBlobInput{blob})
   148  			if localerr != nil && localerr != syscall.ENOENT {
   149  				err = localerr
   150  			}
   151  		}(blobOuter)
   152  		if err != nil {
   153  			break
   154  		}
   155  	}
   156  	sem.V(100)
   157  	return err
   158  }
   159  
   160  // groupByDecresingDepths takes a slice of path strings and returns the paths as
   161  // groups where each group has the same `depth` - depth(a/b/c)=2, depth(a/b/)=1
   162  // The groups are returned in decreasing order of depths.
   163  // - Inp: [] Out: []
   164  // - Inp: ["a/b1/", "a/b/c1", "a/b2", "a/b/c2"]
   165  //   Out: [["a/b/c1", "a/b/c2"], ["a/b1/", "a/b2"]]
   166  // - Inp: ["a/b1/", "z/a/b/c1", "a/b2", "z/a/b/c2"]
   167  //   Out:	[["z/a/b/c1", "z/a/b/c2"], ["a/b1/", "a/b2"]
   168  func groupByDecresingDepths(items []string) [][]string {
   169  	depthToGroup := map[int][]string{}
   170  	for _, item := range items {
   171  		depth := len(strings.Split(strings.TrimRight(item, "/"), "/"))
   172  		if _, ok := depthToGroup[depth]; !ok {
   173  			depthToGroup[depth] = []string{}
   174  		}
   175  		depthToGroup[depth] = append(depthToGroup[depth], item)
   176  	}
   177  	decreasingDepths := []int{}
   178  	for depth := range depthToGroup {
   179  		decreasingDepths = append(decreasingDepths, depth)
   180  	}
   181  	sort.Sort(sort.Reverse(sort.IntSlice(decreasingDepths)))
   182  	ret := [][]string{}
   183  	for _, depth := range decreasingDepths {
   184  		group, _ := depthToGroup[depth]
   185  		ret = append(ret, group)
   186  	}
   187  	return ret
   188  }
   189  
   190  func (t *GoofysTest) DeleteADLBlobs(cloud StorageBackend, items []string) error {
   191  	// If we delete a directory that's not empty, ADL{v1|v2} returns failure. That can
   192  	// happen if we want to delete both "dir1" and "dir1/file" but delete them
   193  	// in the wrong order.
   194  	// So we group the items to delete into multiple groups. All items in a group
   195  	// will have the same depth - depth(/a/b/c) = 2, depth(/a/b/) = 1.
   196  	// We then iterate over the groups in desc order of depth and delete them parallelly.
   197  	for _, group := range groupByDecresingDepths(items) {
   198  		err := t.deleteBlobsParallelly(cloud, group)
   199  		if err != nil {
   200  			return err
   201  		}
   202  	}
   203  	return nil
   204  }
   205  
   206  func (s *GoofysTest) selectTestConfig(t *C, flags *FlagStorage) (conf S3Config) {
   207  	(&conf).Init()
   208  
   209  	if hasEnv("AWS") {
   210  		if isTravis() {
   211  			conf.Region = "us-east-1"
   212  		} else {
   213  			conf.Region = "us-west-2"
   214  		}
   215  		profile := os.Getenv("AWS")
   216  		if profile != "" {
   217  			if profile != "-" {
   218  				conf.Profile = profile
   219  			} else {
   220  				conf.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
   221  				conf.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
   222  			}
   223  		}
   224  	} else if hasEnv("GCS") {
   225  		conf.Region = "us-west1"
   226  		conf.Profile = os.Getenv("GCS")
   227  		flags.Endpoint = "http://storage.googleapis.com"
   228  	} else if hasEnv("MINIO") {
   229  		conf.Region = "us-east-1"
   230  		conf.AccessKey = "Q3AM3UQ867SPQQA43P2F"
   231  		conf.SecretKey = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
   232  		flags.Endpoint = "https://play.minio.io:9000"
   233  	} else {
   234  		s.emulator = true
   235  
   236  		conf.Region = "us-west-2"
   237  		conf.AccessKey = "foo"
   238  		conf.SecretKey = "bar"
   239  		flags.Endpoint = "http://127.0.0.1:8080"
   240  	}
   241  
   242  	return
   243  }
   244  
   245  func (s *GoofysTest) waitForEmulator(t *C) {
   246  	if s.emulator {
   247  		addr := "127.0.0.1:8080"
   248  
   249  		err := waitFor(t, addr)
   250  		t.Assert(err, IsNil)
   251  	}
   252  }
   253  
   254  func (s *GoofysTest) SetUpSuite(t *C) {
   255  }
   256  
   257  func (s *GoofysTest) deleteBucket(cloud StorageBackend) error {
   258  	param := &ListBlobsInput{}
   259  
   260  	// Azure datalake v1,v2 need special handling.
   261  	adlKeysToRemove := make([]string, 0)
   262  	for {
   263  		resp, err := cloud.ListBlobs(param)
   264  		if err != nil {
   265  			return err
   266  		}
   267  
   268  		keysToRemove := []string{}
   269  		for _, o := range resp.Items {
   270  			keysToRemove = append(keysToRemove, *o.Key)
   271  		}
   272  		if len(keysToRemove) != 0 {
   273  			switch cloud.(type) {
   274  			case *ADLv1, *ADLv2:
   275  				// ADLV{1|2} supports directories. => dir can be removed only after the dir is
   276  				// empty. So we will remove the blobs in reverse depth order via DeleteADLBlobs
   277  				// after this for loop.
   278  				adlKeysToRemove = append(adlKeysToRemove, keysToRemove...)
   279  			default:
   280  				_, err = cloud.DeleteBlobs(&DeleteBlobsInput{Items: keysToRemove})
   281  				if err != nil {
   282  					return err
   283  				}
   284  			}
   285  		}
   286  		if resp.IsTruncated {
   287  			param.ContinuationToken = resp.NextContinuationToken
   288  		} else {
   289  			break
   290  		}
   291  	}
   292  
   293  	if len(adlKeysToRemove) != 0 {
   294  		err := s.DeleteADLBlobs(cloud, adlKeysToRemove)
   295  		if err != nil {
   296  			return err
   297  		}
   298  	}
   299  
   300  	_, err := cloud.RemoveBucket(&RemoveBucketInput{})
   301  	return err
   302  }
   303  
   304  func (s *GoofysTest) TearDownTest(t *C) {
   305  	for _, cloud := range s.removeBucket {
   306  		err := s.deleteBucket(cloud)
   307  		t.Assert(err, IsNil)
   308  	}
   309  	s.removeBucket = nil
   310  }
   311  
   312  func (s *GoofysTest) removeBlob(cloud StorageBackend, t *C, blobPath string) {
   313  	params := &DeleteBlobInput{
   314  		Key: blobPath,
   315  	}
   316  	_, err := cloud.DeleteBlob(params)
   317  	t.Assert(err, IsNil)
   318  }
   319  
   320  func (s *GoofysTest) setupBlobs(cloud StorageBackend, t *C, env map[string]*string) {
   321  
   322  	// concurrency = 100
   323  	throttler := make(semaphore, 100)
   324  	throttler.P(100)
   325  
   326  	var globalErr error
   327  	for path, c := range env {
   328  		throttler.V(1)
   329  		go func(path string, content *string) {
   330  			dir := false
   331  			if content == nil {
   332  				if strings.HasSuffix(path, "/") {
   333  					if cloud.Capabilities().DirBlob {
   334  						path = strings.TrimRight(path, "/")
   335  					}
   336  					dir = true
   337  					content = PString("")
   338  				} else {
   339  					content = &path
   340  				}
   341  			}
   342  			defer throttler.P(1)
   343  			params := &PutBlobInput{
   344  				Key:  path,
   345  				Body: bytes.NewReader([]byte(*content)),
   346  				Size: PUInt64(uint64(len(*content))),
   347  				Metadata: map[string]*string{
   348  					"name": aws.String(path + "+/#%00"),
   349  				},
   350  				DirBlob: dir,
   351  			}
   352  
   353  			_, err := cloud.PutBlob(params)
   354  			if err != nil {
   355  				globalErr = err
   356  			}
   357  			t.Assert(err, IsNil)
   358  		}(path, c)
   359  	}
   360  	throttler.V(100)
   361  	throttler = make(semaphore, 100)
   362  	throttler.P(100)
   363  	t.Assert(globalErr, IsNil)
   364  
   365  	// double check, except on AWS S3, because there we sometimes
   366  	// hit 404 NoSuchBucket and there's no way to distinguish that
   367  	// from 404 KeyNotFound
   368  	if !hasEnv("AWS") {
   369  		for path, c := range env {
   370  			throttler.V(1)
   371  			go func(path string, content *string) {
   372  				defer throttler.P(1)
   373  				params := &HeadBlobInput{Key: path}
   374  				res, err := cloud.HeadBlob(params)
   375  				t.Assert(err, IsNil)
   376  				if content != nil {
   377  					t.Assert(res.Size, Equals, uint64(len(*content)))
   378  				} else if strings.HasSuffix(path, "/") || path == "zero" {
   379  					t.Assert(res.Size, Equals, uint64(0))
   380  				} else {
   381  					t.Assert(res.Size, Equals, uint64(len(path)))
   382  				}
   383  			}(path, c)
   384  		}
   385  		throttler.V(100)
   386  		t.Assert(globalErr, IsNil)
   387  	}
   388  }
   389  
   390  func (s *GoofysTest) setupEnv(t *C, env map[string]*string, public bool) {
   391  	if public {
   392  		if s3, ok := s.cloud.Delegate().(*S3Backend); ok {
   393  			s3.config.ACL = "public-read"
   394  		} else {
   395  			t.Error("Not S3 backend")
   396  		}
   397  	}
   398  
   399  	_, err := s.cloud.MakeBucket(&MakeBucketInput{})
   400  	t.Assert(err, IsNil)
   401  
   402  	if !s.emulator {
   403  		//time.Sleep(time.Second)
   404  	}
   405  
   406  	s.setupBlobs(s.cloud, t, env)
   407  
   408  	t.Log("setupEnv done")
   409  }
   410  
   411  func (s *GoofysTest) setupDefaultEnv(t *C, public bool) {
   412  	s.env = map[string]*string{
   413  		"file1":           nil,
   414  		"file2":           nil,
   415  		"dir1/file3":      nil,
   416  		"dir2/dir3/":      nil,
   417  		"dir2/dir3/file4": nil,
   418  		"dir4/":           nil,
   419  		"dir4/file5":      nil,
   420  		"empty_dir/":      nil,
   421  		"empty_dir2/":     nil,
   422  		"zero":            PString(""),
   423  	}
   424  
   425  	s.setupEnv(t, s.env, public)
   426  }
   427  
   428  func (s *GoofysTest) SetUpTest(t *C) {
   429  	log.Infof("Starting at %v", time.Now())
   430  
   431  	var bucket string
   432  	mount := os.Getenv("MOUNT")
   433  
   434  	if mount != "false" {
   435  		bucket = mount
   436  	} else {
   437  		bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16)
   438  	}
   439  	uid, gid := MyUserAndGroup()
   440  	flags := &FlagStorage{
   441  		DirMode:     0700,
   442  		FileMode:    0700,
   443  		Uid:         uint32(uid),
   444  		Gid:         uint32(gid),
   445  		HTTPTimeout: 30 * time.Second,
   446  	}
   447  
   448  	cloud := os.Getenv("CLOUD")
   449  
   450  	if cloud == "s3" {
   451  		s.emulator = !hasEnv("AWS")
   452  		s.waitForEmulator(t)
   453  
   454  		conf := s.selectTestConfig(t, flags)
   455  		flags.Backend = &conf
   456  
   457  		s3, err := NewS3(bucket, flags, &conf)
   458  		t.Assert(err, IsNil)
   459  
   460  		s.cloud = s3
   461  		s3.aws = hasEnv("AWS")
   462  		if s3.aws {
   463  			s.cloud = &S3BucketEventualConsistency{s3}
   464  		}
   465  
   466  		if !hasEnv("MINIO") {
   467  			s3.Handlers.Sign.Clear()
   468  			s3.Handlers.Sign.PushBack(SignV2)
   469  			s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
   470  		}
   471  		_, err = s3.ListBuckets(nil)
   472  		t.Assert(err, IsNil)
   473  
   474  	} else if cloud == "gcs" {
   475  		conf := s.selectTestConfig(t, flags)
   476  		flags.Backend = &conf
   477  
   478  		var err error
   479  		s.cloud, err = NewGCS3(bucket, flags, &conf)
   480  		t.Assert(s.cloud, NotNil)
   481  		t.Assert(err, IsNil)
   482  	} else if cloud == "azblob" {
   483  		config, err := AzureBlobConfig(os.Getenv("ENDPOINT"), "", "blob")
   484  		t.Assert(err, IsNil)
   485  
   486  		if config.Endpoint == AzuriteEndpoint {
   487  			s.azurite = true
   488  			s.emulator = true
   489  			s.waitForEmulator(t)
   490  		}
   491  
   492  		// Azurite's SAS is buggy, ex: https://github.com/Azure/Azurite/issues/216
   493  		if os.Getenv("SAS_EXPIRE") != "" {
   494  			expire, err := time.ParseDuration(os.Getenv("SAS_EXPIRE"))
   495  			t.Assert(err, IsNil)
   496  
   497  			config.TokenRenewBuffer = expire / 2
   498  			credential, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey)
   499  			t.Assert(err, IsNil)
   500  
   501  			// test sas token config
   502  			config.SasToken = func() (string, error) {
   503  				sasQueryParams, err := azblob.AccountSASSignatureValues{
   504  					Protocol:   azblob.SASProtocolHTTPSandHTTP,
   505  					StartTime:  time.Now().UTC().Add(-1 * time.Hour),
   506  					ExpiryTime: time.Now().UTC().Add(expire),
   507  					Services:   azblob.AccountSASServices{Blob: true}.String(),
   508  					ResourceTypes: azblob.AccountSASResourceTypes{
   509  						Service:   true,
   510  						Container: true,
   511  						Object:    true,
   512  					}.String(),
   513  					Permissions: azblob.AccountSASPermissions{
   514  						Read:   true,
   515  						Write:  true,
   516  						Delete: true,
   517  						List:   true,
   518  						Create: true,
   519  					}.String(),
   520  				}.NewSASQueryParameters(credential)
   521  				if err != nil {
   522  					return "", err
   523  				}
   524  				return sasQueryParams.Encode(), nil
   525  			}
   526  		}
   527  
   528  		flags.Backend = &config
   529  
   530  		s.cloud, err = NewAZBlob(bucket, &config)
   531  		t.Assert(err, IsNil)
   532  		t.Assert(s.cloud, NotNil)
   533  	} else if cloud == "adlv1" {
   534  		cred := azureauth.NewClientCredentialsConfig(
   535  			os.Getenv("ADLV1_CLIENT_ID"),
   536  			os.Getenv("ADLV1_CLIENT_CREDENTIAL"),
   537  			os.Getenv("ADLV1_TENANT_ID"))
   538  		auth, err := cred.Authorizer()
   539  		t.Assert(err, IsNil)
   540  
   541  		config := ADLv1Config{
   542  			Endpoint:   os.Getenv("ENDPOINT"),
   543  			Authorizer: auth,
   544  		}
   545  		config.Init()
   546  
   547  		flags.Backend = &config
   548  
   549  		s.cloud, err = NewADLv1(bucket, flags, &config)
   550  		t.Assert(err, IsNil)
   551  		t.Assert(s.cloud, NotNil)
   552  	} else if cloud == "adlv2" {
   553  		var err error
   554  		var auth autorest.Authorizer
   555  
   556  		if os.Getenv("AZURE_STORAGE_ACCOUNT") != "" && os.Getenv("AZURE_STORAGE_KEY") != "" {
   557  			auth = &AZBlobConfig{
   558  				AccountName: os.Getenv("AZURE_STORAGE_ACCOUNT"),
   559  				AccountKey:  os.Getenv("AZURE_STORAGE_KEY"),
   560  			}
   561  		} else {
   562  			cred := azureauth.NewClientCredentialsConfig(
   563  				os.Getenv("ADLV2_CLIENT_ID"),
   564  				os.Getenv("ADLV2_CLIENT_CREDENTIAL"),
   565  				os.Getenv("ADLV2_TENANT_ID"))
   566  			cred.Resource = azure.PublicCloud.ResourceIdentifiers.Storage
   567  			auth, err = cred.Authorizer()
   568  			t.Assert(err, IsNil)
   569  		}
   570  
   571  		config := ADLv2Config{
   572  			Endpoint:   os.Getenv("ENDPOINT"),
   573  			Authorizer: auth,
   574  		}
   575  
   576  		flags.Backend = &config
   577  
   578  		s.cloud, err = NewADLv2(bucket, flags, &config)
   579  		t.Assert(err, IsNil)
   580  		t.Assert(s.cloud, NotNil)
   581  	} else {
   582  		t.Fatal("Unsupported backend")
   583  	}
   584  
   585  	if mount == "false" {
   586  		s.removeBucket = append(s.removeBucket, s.cloud)
   587  		s.setupDefaultEnv(t, false)
   588  	} else {
   589  		_, err := s.cloud.MakeBucket(&MakeBucketInput{})
   590  		if err == fuse.EEXIST {
   591  			err = nil
   592  		}
   593  		t.Assert(err, IsNil)
   594  	}
   595  
   596  	if hasEnv("AWS") {
   597  		s.fs = newGoofys(context.Background(), bucket, flags,
   598  			func(bucket string, flags *FlagStorage) (StorageBackend, error) {
   599  				cloud, err := NewBackend(bucket, flags)
   600  				if err != nil {
   601  					return nil, err
   602  				}
   603  
   604  				return &S3BucketEventualConsistency{cloud.(*S3Backend)}, nil
   605  			})
   606  	} else {
   607  		s.fs = NewGoofys(context.Background(), bucket, flags)
   608  	}
   609  	t.Assert(s.fs, NotNil)
   610  
   611  	s.ctx = context.Background()
   612  
   613  	if hasEnv("GCS") {
   614  		flags.Endpoint = "http://storage.googleapis.com"
   615  	}
   616  }
   617  
   618  func (s *GoofysTest) getRoot(t *C) (inode *Inode) {
   619  	inode = s.fs.inodes[fuseops.RootInodeID]
   620  	t.Assert(inode, NotNil)
   621  	return
   622  }
   623  
   624  func (s *GoofysTest) TestGetRootInode(t *C) {
   625  	root := s.getRoot(t)
   626  	t.Assert(root.Id, Equals, fuseops.InodeID(fuseops.RootInodeID))
   627  }
   628  
   629  func (s *GoofysTest) TestGetRootAttributes(t *C) {
   630  	_, err := s.getRoot(t).GetAttributes()
   631  	t.Assert(err, IsNil)
   632  }
   633  
   634  func (s *GoofysTest) ForgetInode(t *C, inode fuseops.InodeID) {
   635  	err := s.fs.ForgetInode(s.ctx, &fuseops.ForgetInodeOp{Inode: inode})
   636  	t.Assert(err, IsNil)
   637  }
   638  
   639  func (s *GoofysTest) LookUpInode(t *C, name string) (in *Inode, err error) {
   640  	parent := s.getRoot(t)
   641  
   642  	for {
   643  		idx := strings.Index(name, "/")
   644  		if idx == -1 {
   645  			break
   646  		}
   647  
   648  		dirName := name[0:idx]
   649  		name = name[idx+1:]
   650  
   651  		lookup := fuseops.LookUpInodeOp{
   652  			Parent: parent.Id,
   653  			Name:   dirName,
   654  		}
   655  
   656  		err = s.fs.LookUpInode(nil, &lookup)
   657  		if err != nil {
   658  			return
   659  		}
   660  		parent = s.fs.inodes[lookup.Entry.Child]
   661  	}
   662  
   663  	lookup := fuseops.LookUpInodeOp{
   664  		Parent: parent.Id,
   665  		Name:   name,
   666  	}
   667  
   668  	err = s.fs.LookUpInode(nil, &lookup)
   669  	if err != nil {
   670  		return
   671  	}
   672  	in = s.fs.inodes[lookup.Entry.Child]
   673  	return
   674  }
   675  
   676  func (s *GoofysTest) TestSetup(t *C) {
   677  }
   678  
   679  func (s *GoofysTest) TestLookUpInode(t *C) {
   680  	_, err := s.LookUpInode(t, "file1")
   681  	t.Assert(err, IsNil)
   682  
   683  	_, err = s.LookUpInode(t, "fileNotFound")
   684  	t.Assert(err, Equals, fuse.ENOENT)
   685  
   686  	_, err = s.LookUpInode(t, "dir1/file3")
   687  	t.Assert(err, IsNil)
   688  
   689  	_, err = s.LookUpInode(t, "dir2/dir3")
   690  	t.Assert(err, IsNil)
   691  
   692  	_, err = s.LookUpInode(t, "dir2/dir3/file4")
   693  	t.Assert(err, IsNil)
   694  
   695  	_, err = s.LookUpInode(t, "empty_dir")
   696  	t.Assert(err, IsNil)
   697  }
   698  
   699  func (s *GoofysTest) TestPanicWrapper(t *C) {
   700  	fs := FusePanicLogger{s.fs}
   701  	err := fs.GetInodeAttributes(nil, &fuseops.GetInodeAttributesOp{
   702  		Inode: 1234,
   703  	})
   704  	t.Assert(err, Equals, fuse.EIO)
   705  }
   706  
   707  func (s *GoofysTest) TestGetInodeAttributes(t *C) {
   708  	inode, err := s.getRoot(t).LookUp("file1")
   709  	t.Assert(err, IsNil)
   710  
   711  	attr, err := inode.GetAttributes()
   712  	t.Assert(err, IsNil)
   713  	t.Assert(attr.Size, Equals, uint64(len("file1")))
   714  }
   715  
   716  func (s *GoofysTest) readDirFully(t *C, dh *DirHandle) (entries []DirHandleEntry) {
   717  	dh.mu.Lock()
   718  	defer dh.mu.Unlock()
   719  
   720  	en, err := dh.ReadDir(fuseops.DirOffset(0))
   721  	t.Assert(err, IsNil)
   722  	t.Assert(en, NotNil)
   723  	t.Assert(en.Name, Equals, ".")
   724  
   725  	en, err = dh.ReadDir(fuseops.DirOffset(1))
   726  	t.Assert(err, IsNil)
   727  	t.Assert(en, NotNil)
   728  	t.Assert(en.Name, Equals, "..")
   729  
   730  	for i := fuseops.DirOffset(2); ; i++ {
   731  		en, err = dh.ReadDir(i)
   732  		t.Assert(err, IsNil)
   733  
   734  		if en == nil {
   735  			return
   736  		}
   737  
   738  		entries = append(entries, *en)
   739  	}
   740  }
   741  
   742  func namesOf(entries []DirHandleEntry) (names []string) {
   743  	for _, en := range entries {
   744  		names = append(names, en.Name)
   745  	}
   746  	return
   747  }
   748  
   749  func (s *GoofysTest) assertEntries(t *C, in *Inode, names []string) {
   750  	dh := in.OpenDir()
   751  	defer dh.CloseDir()
   752  
   753  	t.Assert(namesOf(s.readDirFully(t, dh)), DeepEquals, names)
   754  }
   755  
   756  func (s *GoofysTest) readDirIntoCache(t *C, inode fuseops.InodeID) {
   757  	openDirOp := fuseops.OpenDirOp{Inode: inode}
   758  	err := s.fs.OpenDir(nil, &openDirOp)
   759  	t.Assert(err, IsNil)
   760  
   761  	readDirOp := fuseops.ReadDirOp{
   762  		Inode:  inode,
   763  		Handle: openDirOp.Handle,
   764  		Dst:    make([]byte, 8*1024),
   765  	}
   766  
   767  	err = s.fs.ReadDir(nil, &readDirOp)
   768  	t.Assert(err, IsNil)
   769  }
   770  
   771  func (s *GoofysTest) TestReadDirCacheLookup(t *C) {
   772  	s.fs.flags.StatCacheTTL = 1 * time.Minute
   773  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
   774  
   775  	s.readDirIntoCache(t, fuseops.RootInodeID)
   776  	s.disableS3()
   777  
   778  	// should be cached so lookup should not need to talk to s3
   779  	entries := []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"}
   780  	for _, en := range entries {
   781  		err := s.fs.LookUpInode(nil, &fuseops.LookUpInodeOp{
   782  			Parent: fuseops.RootInodeID,
   783  			Name:   en,
   784  		})
   785  		t.Assert(err, IsNil)
   786  	}
   787  }
   788  
   789  func (s *GoofysTest) TestReadDirWithExternalChanges(t *C) {
   790  	s.fs.flags.TypeCacheTTL = time.Second
   791  
   792  	dir1, err := s.LookUpInode(t, "dir1")
   793  	t.Assert(err, IsNil)
   794  
   795  	defaultEntries := []string{
   796  		"dir1", "dir2", "dir4", "empty_dir",
   797  		"empty_dir2", "file1", "file2", "zero"}
   798  	s.assertEntries(t, s.getRoot(t), defaultEntries)
   799  	// dir1 has file3 and nothing else.
   800  	s.assertEntries(t, dir1, []string{"file3"})
   801  
   802  	// Do the following 'external' changes in s3 without involving goofys.
   803  	// - Remove file1, add file3.
   804  	// - Remove dir1/file3. Given that dir1 has just this one file,
   805  	//   we are effectively removing dir1 as well.
   806  	s.removeBlob(s.cloud, t, "file1")
   807  	s.setupBlobs(s.cloud, t, map[string]*string{"file3": nil})
   808  	s.removeBlob(s.cloud, t, "dir1/file3")
   809  
   810  	time.Sleep(s.fs.flags.TypeCacheTTL)
   811  	// newEntries = `defaultEntries` - dir1 - file1 + file3.
   812  	newEntries := []string{
   813  		"dir2", "dir4", "empty_dir", "empty_dir2",
   814  		"file2", "file3", "zero"}
   815  	if s.cloud.Capabilities().DirBlob {
   816  		// dir1 is not automatically deleted
   817  		newEntries = append([]string{"dir1"}, newEntries...)
   818  	}
   819  	s.assertEntries(t, s.getRoot(t), newEntries)
   820  }
   821  
   822  func (s *GoofysTest) TestReadDir(t *C) {
   823  	// test listing /
   824  	dh := s.getRoot(t).OpenDir()
   825  	defer dh.CloseDir()
   826  
   827  	s.assertEntries(t, s.getRoot(t), []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"})
   828  
   829  	// test listing dir1/
   830  	in, err := s.LookUpInode(t, "dir1")
   831  	t.Assert(err, IsNil)
   832  	s.assertEntries(t, in, []string{"file3"})
   833  
   834  	// test listing dir2/
   835  	in, err = s.LookUpInode(t, "dir2")
   836  	t.Assert(err, IsNil)
   837  	s.assertEntries(t, in, []string{"dir3"})
   838  
   839  	// test listing dir2/dir3/
   840  	in, err = s.LookUpInode(t, "dir2/dir3")
   841  	t.Assert(err, IsNil)
   842  	s.assertEntries(t, in, []string{"file4"})
   843  }
   844  
   845  func (s *GoofysTest) TestReadFiles(t *C) {
   846  	parent := s.getRoot(t)
   847  	dh := parent.OpenDir()
   848  	defer dh.CloseDir()
   849  
   850  	var entries []*DirHandleEntry
   851  
   852  	dh.mu.Lock()
   853  	for i := fuseops.DirOffset(0); ; i++ {
   854  		en, err := dh.ReadDir(i)
   855  		t.Assert(err, IsNil)
   856  
   857  		if en == nil {
   858  			break
   859  		}
   860  
   861  		entries = append(entries, en)
   862  	}
   863  	dh.mu.Unlock()
   864  
   865  	for _, en := range entries {
   866  		if en.Type == fuseutil.DT_File {
   867  			in, err := parent.LookUp(en.Name)
   868  			t.Assert(err, IsNil)
   869  
   870  			fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
   871  			t.Assert(err, IsNil)
   872  
   873  			buf := make([]byte, 4096)
   874  
   875  			nread, err := fh.ReadFile(0, buf)
   876  			if en.Name == "zero" {
   877  				t.Assert(nread, Equals, 0)
   878  			} else {
   879  				t.Assert(nread, Equals, len(en.Name))
   880  				buf = buf[0:nread]
   881  				t.Assert(string(buf), Equals, en.Name)
   882  			}
   883  		} else {
   884  
   885  		}
   886  	}
   887  }
   888  
   889  func (s *GoofysTest) TestReadOffset(t *C) {
   890  	root := s.getRoot(t)
   891  	f := "file1"
   892  
   893  	in, err := root.LookUp(f)
   894  	t.Assert(err, IsNil)
   895  
   896  	fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
   897  	t.Assert(err, IsNil)
   898  
   899  	buf := make([]byte, 4096)
   900  
   901  	nread, err := fh.ReadFile(1, buf)
   902  	t.Assert(err, IsNil)
   903  	t.Assert(nread, Equals, len(f)-1)
   904  	t.Assert(string(buf[0:nread]), DeepEquals, f[1:])
   905  
   906  	r := rand.New(rand.NewSource(time.Now().UnixNano()))
   907  
   908  	for i := 0; i < 3; i++ {
   909  		off := r.Int31n(int32(len(f)))
   910  		nread, err = fh.ReadFile(int64(off), buf)
   911  		t.Assert(err, IsNil)
   912  		t.Assert(nread, Equals, len(f)-int(off))
   913  		t.Assert(string(buf[0:nread]), DeepEquals, f[off:])
   914  	}
   915  }
   916  
   917  func (s *GoofysTest) TestCreateFiles(t *C) {
   918  	fileName := "testCreateFile"
   919  
   920  	_, fh := s.getRoot(t).Create(fileName, fuseops.OpMetadata{uint32(os.Getpid())})
   921  
   922  	err := fh.FlushFile()
   923  	t.Assert(err, IsNil)
   924  
   925  	resp, err := s.cloud.GetBlob(&GetBlobInput{Key: fileName})
   926  	t.Assert(err, IsNil)
   927  	t.Assert(resp.HeadBlobOutput.Size, DeepEquals, uint64(0))
   928  	defer resp.Body.Close()
   929  
   930  	_, err = s.getRoot(t).LookUp(fileName)
   931  	t.Assert(err, IsNil)
   932  
   933  	fileName = "testCreateFile2"
   934  	s.testWriteFile(t, fileName, 1, 128*1024)
   935  
   936  	inode, err := s.getRoot(t).LookUp(fileName)
   937  	t.Assert(err, IsNil)
   938  
   939  	fh, err = inode.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
   940  	t.Assert(err, IsNil)
   941  
   942  	err = fh.FlushFile()
   943  	t.Assert(err, IsNil)
   944  
   945  	resp, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName})
   946  	t.Assert(err, IsNil)
   947  	// ADLv1 doesn't return size when we do a GET
   948  	if _, adlv1 := s.cloud.(*ADLv1); !adlv1 {
   949  		t.Assert(resp.HeadBlobOutput.Size, Equals, uint64(1))
   950  	}
   951  	defer resp.Body.Close()
   952  }
   953  
   954  func (s *GoofysTest) TestUnlink(t *C) {
   955  	fileName := "file1"
   956  
   957  	err := s.getRoot(t).Unlink(fileName)
   958  	t.Assert(err, IsNil)
   959  
   960  	// make sure that it's gone from s3
   961  	_, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName})
   962  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
   963  }
   964  
   965  type FileHandleReader struct {
   966  	fs     *Goofys
   967  	fh     *FileHandle
   968  	offset int64
   969  }
   970  
   971  func (r *FileHandleReader) Read(p []byte) (nread int, err error) {
   972  	nread, err = r.fh.ReadFile(r.offset, p)
   973  	r.offset += int64(nread)
   974  	return
   975  }
   976  
   977  func (r *FileHandleReader) Seek(offset int64, whence int) (int64, error) {
   978  	switch whence {
   979  	case 0:
   980  		r.offset = offset
   981  	case 1:
   982  		r.offset += offset
   983  	default:
   984  		panic(fmt.Sprintf("unsupported whence: %v", whence))
   985  	}
   986  
   987  	return r.offset, nil
   988  }
   989  
   990  func (s *GoofysTest) testWriteFile(t *C, fileName string, size int64, write_size int) {
   991  	s.testWriteFileAt(t, fileName, int64(0), size, write_size)
   992  }
   993  
   994  func (s *GoofysTest) testWriteFileAt(t *C, fileName string, offset int64, size int64, write_size int) {
   995  	var fh *FileHandle
   996  	root := s.getRoot(t)
   997  
   998  	lookup := fuseops.LookUpInodeOp{
   999  		Parent: root.Id,
  1000  		Name:   fileName,
  1001  	}
  1002  	err := s.fs.LookUpInode(nil, &lookup)
  1003  	if err != nil {
  1004  		if err == fuse.ENOENT {
  1005  			create := fuseops.CreateFileOp{
  1006  				Parent: root.Id,
  1007  				Name:   fileName,
  1008  			}
  1009  			err = s.fs.CreateFile(nil, &create)
  1010  			t.Assert(err, IsNil)
  1011  
  1012  			fh = s.fs.fileHandles[create.Handle]
  1013  		} else {
  1014  			t.Assert(err, IsNil)
  1015  		}
  1016  	} else {
  1017  		in := s.fs.inodes[lookup.Entry.Child]
  1018  		fh, err = in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
  1019  		t.Assert(err, IsNil)
  1020  	}
  1021  
  1022  	buf := make([]byte, write_size)
  1023  	nwritten := offset
  1024  
  1025  	src := io.LimitReader(&SeqReader{}, size)
  1026  
  1027  	for {
  1028  		nread, err := src.Read(buf)
  1029  		if err == io.EOF {
  1030  			t.Assert(nwritten, Equals, size)
  1031  			break
  1032  		}
  1033  		t.Assert(err, IsNil)
  1034  
  1035  		err = fh.WriteFile(nwritten, buf[:nread])
  1036  		t.Assert(err, IsNil)
  1037  		nwritten += int64(nread)
  1038  	}
  1039  
  1040  	err = fh.FlushFile()
  1041  	t.Assert(err, IsNil)
  1042  
  1043  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: fileName})
  1044  	t.Assert(err, IsNil)
  1045  	t.Assert(resp.Size, Equals, uint64(size+offset))
  1046  
  1047  	fr := &FileHandleReader{s.fs, fh, offset}
  1048  	diff, err := CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 0)
  1049  	t.Assert(err, IsNil)
  1050  	t.Assert(diff, Equals, -1)
  1051  	t.Assert(fr.offset, Equals, size)
  1052  
  1053  	err = fh.FlushFile()
  1054  	t.Assert(err, IsNil)
  1055  
  1056  	// read again with exact 4KB to catch aligned read case
  1057  	fr = &FileHandleReader{s.fs, fh, offset}
  1058  	diff, err = CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 4096)
  1059  	t.Assert(err, IsNil)
  1060  	t.Assert(diff, Equals, -1)
  1061  	t.Assert(fr.offset, Equals, size)
  1062  
  1063  	fh.Release()
  1064  }
  1065  
  1066  func (s *GoofysTest) TestWriteLargeFile(t *C) {
  1067  	s.testWriteFile(t, "testLargeFile", int64(READAHEAD_CHUNK)+1024*1024, 128*1024)
  1068  	s.testWriteFile(t, "testLargeFile2", int64(READAHEAD_CHUNK), 128*1024)
  1069  	s.testWriteFile(t, "testLargeFile3", int64(READAHEAD_CHUNK)+1, 128*1024)
  1070  }
  1071  
  1072  func (s *GoofysTest) TestWriteReallyLargeFile(t *C) {
  1073  	s.testWriteFile(t, "testLargeFile", 512*1024*1024+1, 128*1024)
  1074  }
  1075  
  1076  func (s *GoofysTest) TestWriteReplicatorThrottle(t *C) {
  1077  	s.fs.replicators = Ticket{Total: 1}.Init()
  1078  	s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024)
  1079  }
  1080  
  1081  func (s *GoofysTest) TestReadWriteMinimumMemory(t *C) {
  1082  	if _, ok := s.cloud.(*ADLv1); ok {
  1083  		s.fs.bufferPool.maxBuffers = 4
  1084  	} else {
  1085  		s.fs.bufferPool.maxBuffers = 2
  1086  	}
  1087  	s.fs.bufferPool.computedMaxbuffers = s.fs.bufferPool.maxBuffers
  1088  	s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024)
  1089  }
  1090  
  1091  func (s *GoofysTest) TestWriteManyFilesFile(t *C) {
  1092  	var files sync.WaitGroup
  1093  
  1094  	for i := 0; i < 21; i++ {
  1095  		files.Add(1)
  1096  		fileName := "testSmallFile" + strconv.Itoa(i)
  1097  		go func() {
  1098  			defer files.Done()
  1099  			s.testWriteFile(t, fileName, 1, 128*1024)
  1100  		}()
  1101  	}
  1102  
  1103  	files.Wait()
  1104  }
  1105  
  1106  func (s *GoofysTest) testWriteFileNonAlign(t *C) {
  1107  	s.testWriteFile(t, "testWriteFileNonAlign", 6*1024*1024, 128*1024+1)
  1108  }
  1109  
  1110  func (s *GoofysTest) TestReadRandom(t *C) {
  1111  	size := int64(21 * 1024 * 1024)
  1112  
  1113  	s.testWriteFile(t, "testLargeFile", size, 128*1024)
  1114  	in, err := s.LookUpInode(t, "testLargeFile")
  1115  	t.Assert(err, IsNil)
  1116  
  1117  	fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
  1118  	t.Assert(err, IsNil)
  1119  	fr := &FileHandleReader{s.fs, fh, 0}
  1120  
  1121  	src := rand.NewSource(time.Now().UnixNano())
  1122  	truth := &SeqReader{}
  1123  
  1124  	for i := 0; i < 10; i++ {
  1125  		offset := src.Int63() % (size / 2)
  1126  
  1127  		fr.Seek(offset, 0)
  1128  		truth.Seek(offset, 0)
  1129  
  1130  		// read 5MB+1 from that offset
  1131  		nread := int64(5*1024*1024 + 1)
  1132  		CompareReader(io.LimitReader(fr, nread), io.LimitReader(truth, nread), 0)
  1133  	}
  1134  }
  1135  
  1136  func (s *GoofysTest) TestMkDir(t *C) {
  1137  	_, err := s.LookUpInode(t, "new_dir/file")
  1138  	t.Assert(err, Equals, fuse.ENOENT)
  1139  
  1140  	dirName := "new_dir"
  1141  	inode, err := s.getRoot(t).MkDir(dirName)
  1142  	t.Assert(err, IsNil)
  1143  	t.Assert(*inode.FullName(), Equals, dirName)
  1144  
  1145  	_, err = s.LookUpInode(t, dirName)
  1146  	t.Assert(err, IsNil)
  1147  
  1148  	fileName := "file"
  1149  	_, fh := inode.Create(fileName, fuseops.OpMetadata{uint32(os.Getpid())})
  1150  
  1151  	err = fh.FlushFile()
  1152  	t.Assert(err, IsNil)
  1153  
  1154  	_, err = s.LookUpInode(t, dirName+"/"+fileName)
  1155  	t.Assert(err, IsNil)
  1156  }
  1157  
  1158  func (s *GoofysTest) TestRmDir(t *C) {
  1159  	root := s.getRoot(t)
  1160  
  1161  	err := root.RmDir("dir1")
  1162  	t.Assert(err, Equals, fuse.ENOTEMPTY)
  1163  
  1164  	err = root.RmDir("dir2")
  1165  	t.Assert(err, Equals, fuse.ENOTEMPTY)
  1166  
  1167  	err = root.RmDir("empty_dir")
  1168  	t.Assert(err, IsNil)
  1169  
  1170  }
  1171  
  1172  func (s *GoofysTest) TestRenamePreserveMetadata(t *C) {
  1173  	if _, ok := s.cloud.(*ADLv1); ok {
  1174  		t.Skip("ADLv1 doesn't support metadata")
  1175  	}
  1176  	root := s.getRoot(t)
  1177  
  1178  	from, to := "file1", "new_file"
  1179  
  1180  	metadata := make(map[string]*string)
  1181  	metadata["foo"] = aws.String("bar")
  1182  
  1183  	_, err := s.cloud.CopyBlob(&CopyBlobInput{
  1184  		Source:      from,
  1185  		Destination: from,
  1186  		Metadata:    metadata,
  1187  	})
  1188  	t.Assert(err, IsNil)
  1189  
  1190  	err = root.Rename(from, root, to)
  1191  	t.Assert(err, IsNil)
  1192  
  1193  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: to})
  1194  	t.Assert(err, IsNil)
  1195  	t.Assert(resp.Metadata["foo"], NotNil)
  1196  	t.Assert(*resp.Metadata["foo"], Equals, "bar")
  1197  }
  1198  
  1199  func (s *GoofysTest) TestRenameLarge(t *C) {
  1200  	fileSize := int64(2 * 1024 * 1024 * 1024)
  1201  	// AWS S3 can timeout when renaming large file
  1202  	if _, ok := s.cloud.(*S3Backend); ok && s.emulator {
  1203  		// S3proxy runs out of memory on truly large files. We
  1204  		// want to use a large file to test timeout issues
  1205  		// which wouldn't happen on s3proxy anyway
  1206  		fileSize = 21 * 1024 * 1024
  1207  	}
  1208  
  1209  	s.testWriteFile(t, "large_file", fileSize, 128*1024)
  1210  
  1211  	root := s.getRoot(t)
  1212  
  1213  	from, to := "large_file", "large_file2"
  1214  	err := root.Rename(from, root, to)
  1215  	t.Assert(err, IsNil)
  1216  }
  1217  
  1218  func (s *GoofysTest) TestRenameToExisting(t *C) {
  1219  	root := s.getRoot(t)
  1220  
  1221  	// cache these 2 files first
  1222  	_, err := s.LookUpInode(t, "file1")
  1223  	t.Assert(err, IsNil)
  1224  
  1225  	_, err = s.LookUpInode(t, "file2")
  1226  	t.Assert(err, IsNil)
  1227  
  1228  	err = s.fs.Rename(nil, &fuseops.RenameOp{
  1229  		OldParent: root.Id,
  1230  		NewParent: root.Id,
  1231  		OldName:   "file1",
  1232  		NewName:   "file2",
  1233  	})
  1234  	t.Assert(err, IsNil)
  1235  
  1236  	file1 := root.findChild("file1")
  1237  	t.Assert(file1, IsNil)
  1238  
  1239  	file2 := root.findChild("file2")
  1240  	t.Assert(file2, NotNil)
  1241  	t.Assert(*file2.Name, Equals, "file2")
  1242  }
  1243  
  1244  func (s *GoofysTest) TestBackendListPagination(t *C) {
  1245  	if _, ok := s.cloud.(*ADLv1); ok {
  1246  		t.Skip("ADLv1 doesn't have pagination")
  1247  	}
  1248  	if s.azurite {
  1249  		// https://github.com/Azure/Azurite/issues/262
  1250  		t.Skip("Azurite doesn't support pagination")
  1251  	}
  1252  
  1253  	var itemsPerPage int
  1254  	switch s.cloud.Delegate().(type) {
  1255  	case *S3Backend, *GCS3:
  1256  		itemsPerPage = 1000
  1257  	case *AZBlob, *ADLv2:
  1258  		itemsPerPage = 5000
  1259  	default:
  1260  		t.Fatalf("unknown backend: %T", s.cloud)
  1261  	}
  1262  
  1263  	root := s.getRoot(t)
  1264  	root.dir.mountPrefix = "this_test/"
  1265  
  1266  	blobs := make(map[string]*string)
  1267  	expect := make([]string, 0)
  1268  	for i := 0; i < itemsPerPage+1; i++ {
  1269  		b := fmt.Sprintf("%08v", i)
  1270  		blobs["this_test/"+b] = nil
  1271  		expect = append(expect, b)
  1272  	}
  1273  
  1274  	switch s.cloud.(type) {
  1275  	case *ADLv1, *ADLv2:
  1276  		// these backends don't support parallel delete so I
  1277  		// am doing this here
  1278  		defer func() {
  1279  			var wg sync.WaitGroup
  1280  
  1281  			for b, _ := range blobs {
  1282  				SmallActionsGate.Take(1, true)
  1283  				wg.Add(1)
  1284  
  1285  				go func(key string) {
  1286  					// ignore the error here,
  1287  					// anything we didn't cleanup
  1288  					// will be handled by teardown
  1289  					_, _ = s.cloud.DeleteBlob(&DeleteBlobInput{key})
  1290  					SmallActionsGate.Return(1)
  1291  					wg.Done()
  1292  				}(b)
  1293  			}
  1294  
  1295  			wg.Wait()
  1296  		}()
  1297  	}
  1298  
  1299  	s.setupBlobs(s.cloud, t, blobs)
  1300  
  1301  	dh := root.OpenDir()
  1302  	defer dh.CloseDir()
  1303  
  1304  	children := namesOf(s.readDirFully(t, dh))
  1305  	t.Assert(children, DeepEquals, expect)
  1306  }
  1307  
  1308  func (s *GoofysTest) TestBackendListPrefix(t *C) {
  1309  	res, err := s.cloud.ListBlobs(&ListBlobsInput{
  1310  		Prefix:    PString("random"),
  1311  		Delimiter: PString("/"),
  1312  	})
  1313  	t.Assert(err, IsNil)
  1314  	t.Assert(len(res.Prefixes), Equals, 0)
  1315  	t.Assert(len(res.Items), Equals, 0)
  1316  
  1317  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1318  		Prefix:    PString("empty_dir"),
  1319  		Delimiter: PString("/"),
  1320  	})
  1321  	t.Assert(err, IsNil)
  1322  	t.Assert(len(res.Prefixes), Not(Equals), 0)
  1323  	t.Assert(*res.Prefixes[0].Prefix, Equals, "empty_dir/")
  1324  	t.Assert(len(res.Items), Equals, 0)
  1325  
  1326  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1327  		Prefix:    PString("empty_dir/"),
  1328  		Delimiter: PString("/"),
  1329  	})
  1330  	t.Assert(err, IsNil)
  1331  	t.Assert(len(res.Prefixes), Equals, 0)
  1332  	t.Assert(len(res.Items), Equals, 1)
  1333  	t.Assert(*res.Items[0].Key, Equals, "empty_dir/")
  1334  
  1335  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1336  		Prefix:    PString("file1"),
  1337  		Delimiter: PString("/"),
  1338  	})
  1339  	t.Assert(err, IsNil)
  1340  	t.Assert(len(res.Prefixes), Equals, 0)
  1341  	t.Assert(len(res.Items), Equals, 1)
  1342  	t.Assert(*res.Items[0].Key, Equals, "file1")
  1343  
  1344  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1345  		Prefix:    PString("file1/"),
  1346  		Delimiter: PString("/"),
  1347  	})
  1348  	t.Assert(err, IsNil)
  1349  	t.Assert(len(res.Prefixes), Equals, 0)
  1350  	t.Assert(len(res.Items), Equals, 0)
  1351  
  1352  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1353  		Prefix:    PString("dir2/"),
  1354  		Delimiter: PString("/"),
  1355  	})
  1356  	t.Assert(err, IsNil)
  1357  	t.Assert(len(res.Prefixes), Equals, 1)
  1358  	t.Assert(*res.Prefixes[0].Prefix, Equals, "dir2/dir3/")
  1359  	if s.cloud.Capabilities().DirBlob {
  1360  		t.Assert(len(res.Items), Equals, 1)
  1361  		t.Assert(*res.Items[0].Key, Equals, "dir2/")
  1362  	} else {
  1363  		t.Assert(len(res.Items), Equals, 0)
  1364  	}
  1365  
  1366  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1367  		Prefix:    PString("dir2/dir3/"),
  1368  		Delimiter: PString("/"),
  1369  	})
  1370  	t.Assert(err, IsNil)
  1371  	t.Assert(len(res.Prefixes), Equals, 0)
  1372  	t.Assert(len(res.Items), Equals, 2)
  1373  	t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/")
  1374  	t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4")
  1375  
  1376  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1377  		Prefix: PString("dir2/"),
  1378  	})
  1379  	t.Assert(err, IsNil)
  1380  	t.Assert(len(res.Prefixes), Equals, 0)
  1381  	t.Assert(len(res.Items), Equals, 2)
  1382  	t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/")
  1383  	t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4")
  1384  
  1385  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1386  		Prefix: PString("dir2/dir3/file4"),
  1387  	})
  1388  	t.Assert(err, IsNil)
  1389  	t.Assert(len(res.Prefixes), Equals, 0)
  1390  	t.Assert(len(res.Items), Equals, 1)
  1391  	t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/file4")
  1392  }
  1393  
  1394  func (s *GoofysTest) TestRenameDir(t *C) {
  1395  	s.fs.flags.StatCacheTTL = 0
  1396  
  1397  	root := s.getRoot(t)
  1398  
  1399  	err := root.Rename("empty_dir", root, "dir1")
  1400  	t.Assert(err, Equals, fuse.ENOTEMPTY)
  1401  
  1402  	err = root.Rename("empty_dir", root, "new_dir")
  1403  	t.Assert(err, IsNil)
  1404  
  1405  	dir2, err := s.LookUpInode(t, "dir2")
  1406  	t.Assert(err, IsNil)
  1407  	t.Assert(dir2, NotNil)
  1408  
  1409  	_, err = s.LookUpInode(t, "new_dir2")
  1410  	t.Assert(err, Equals, fuse.ENOENT)
  1411  
  1412  	err = s.fs.Rename(nil, &fuseops.RenameOp{
  1413  		OldParent: root.Id,
  1414  		NewParent: root.Id,
  1415  		OldName:   "dir2",
  1416  		NewName:   "new_dir2",
  1417  	})
  1418  	t.Assert(err, IsNil)
  1419  
  1420  	_, err = s.LookUpInode(t, "dir2/dir3")
  1421  	t.Assert(err, Equals, fuse.ENOENT)
  1422  
  1423  	_, err = s.LookUpInode(t, "dir2/dir3/file4")
  1424  	t.Assert(err, Equals, fuse.ENOENT)
  1425  
  1426  	new_dir2, err := s.LookUpInode(t, "new_dir2")
  1427  	t.Assert(err, IsNil)
  1428  	t.Assert(new_dir2, NotNil)
  1429  	t.Assert(dir2.Id, Equals, new_dir2.Id)
  1430  
  1431  	old, err := s.LookUpInode(t, "new_dir2/dir3/file4")
  1432  	t.Assert(err, IsNil)
  1433  	t.Assert(old, NotNil)
  1434  
  1435  	err = s.fs.Rename(nil, &fuseops.RenameOp{
  1436  		OldParent: root.Id,
  1437  		NewParent: root.Id,
  1438  		OldName:   "new_dir2",
  1439  		NewName:   "new_dir3",
  1440  	})
  1441  	t.Assert(err, IsNil)
  1442  
  1443  	new, err := s.LookUpInode(t, "new_dir3/dir3/file4")
  1444  	t.Assert(err, IsNil)
  1445  	t.Assert(new, NotNil)
  1446  	t.Assert(old.Id, Equals, new.Id)
  1447  
  1448  	_, err = s.LookUpInode(t, "new_dir2/dir3")
  1449  	t.Assert(err, Equals, fuse.ENOENT)
  1450  
  1451  	_, err = s.LookUpInode(t, "new_dir2/dir3/file4")
  1452  	t.Assert(err, Equals, fuse.ENOENT)
  1453  }
  1454  
  1455  func (s *GoofysTest) TestRename(t *C) {
  1456  	root := s.getRoot(t)
  1457  
  1458  	from, to := "empty_dir", "file1"
  1459  	err := root.Rename(from, root, to)
  1460  	t.Assert(err, Equals, fuse.ENOTDIR)
  1461  
  1462  	from, to = "file1", "empty_dir"
  1463  	err = root.Rename(from, root, to)
  1464  	t.Assert(err, Equals, syscall.EISDIR)
  1465  
  1466  	from, to = "file1", "new_file"
  1467  	err = root.Rename(from, root, to)
  1468  	t.Assert(err, IsNil)
  1469  
  1470  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to})
  1471  	t.Assert(err, IsNil)
  1472  
  1473  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from})
  1474  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
  1475  
  1476  	from, to = "file3", "new_file2"
  1477  	dir, _ := s.LookUpInode(t, "dir1")
  1478  	err = dir.Rename(from, root, to)
  1479  	t.Assert(err, IsNil)
  1480  
  1481  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to})
  1482  	t.Assert(err, IsNil)
  1483  
  1484  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from})
  1485  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
  1486  
  1487  	from, to = "no_such_file", "new_file"
  1488  	err = root.Rename(from, root, to)
  1489  	t.Assert(err, Equals, fuse.ENOENT)
  1490  
  1491  	if s3, ok := s.cloud.Delegate().(*S3Backend); ok {
  1492  		if !hasEnv("GCS") {
  1493  			// not really rename but can be used by rename
  1494  			from, to = s.fs.bucket+"/file2", "new_file"
  1495  			_, err = s3.copyObjectMultipart(int64(len("file2")), from, to, "", nil, nil, nil)
  1496  			t.Assert(err, IsNil)
  1497  		}
  1498  	}
  1499  }
  1500  
  1501  func (s *GoofysTest) TestConcurrentRefDeref(t *C) {
  1502  	root := s.getRoot(t)
  1503  
  1504  	lookupOp := fuseops.LookUpInodeOp{
  1505  		Parent: root.Id,
  1506  		Name:   "file1",
  1507  	}
  1508  
  1509  	for i := 0; i < 20; i++ {
  1510  		err := s.fs.LookUpInode(nil, &lookupOp)
  1511  		t.Assert(err, IsNil)
  1512  
  1513  		var wg sync.WaitGroup
  1514  
  1515  		wg.Add(2)
  1516  		go func() {
  1517  			// we want to yield to the forget goroutine so that it's run first
  1518  			// to trigger this bug
  1519  			if i%2 == 0 {
  1520  				runtime.Gosched()
  1521  			}
  1522  			s.fs.LookUpInode(nil, &lookupOp)
  1523  			wg.Done()
  1524  		}()
  1525  		go func() {
  1526  			s.fs.ForgetInode(nil, &fuseops.ForgetInodeOp{
  1527  				Inode: lookupOp.Entry.Child,
  1528  				N:     1,
  1529  			})
  1530  			wg.Done()
  1531  		}()
  1532  
  1533  		wg.Wait()
  1534  	}
  1535  }
  1536  
  1537  func hasEnv(env string) bool {
  1538  	v := os.Getenv(env)
  1539  
  1540  	return !(v == "" || v == "0" || v == "false")
  1541  }
  1542  
  1543  func isTravis() bool {
  1544  	return hasEnv("TRAVIS")
  1545  }
  1546  
  1547  func isCatfs() bool {
  1548  	return hasEnv("CATFS")
  1549  }
  1550  
  1551  func (s *GoofysTest) mount(t *C, mountPoint string) {
  1552  	err := os.MkdirAll(mountPoint, 0700)
  1553  	t.Assert(err, IsNil)
  1554  
  1555  	server := fuseutil.NewFileSystemServer(s.fs)
  1556  
  1557  	if isCatfs() {
  1558  		s.fs.flags.MountOptions = make(map[string]string)
  1559  		s.fs.flags.MountOptions["allow_other"] = ""
  1560  	}
  1561  
  1562  	// Mount the file system.
  1563  	mountCfg := &fuse.MountConfig{
  1564  		FSName:                  s.fs.bucket,
  1565  		Options:                 s.fs.flags.MountOptions,
  1566  		ErrorLogger:             GetStdLogger(NewLogger("fuse"), logrus.ErrorLevel),
  1567  		DisableWritebackCaching: true,
  1568  	}
  1569  	if fuseLog.Level == logrus.DebugLevel {
  1570  		mountCfg.DebugLogger = GetStdLogger(fuseLog, logrus.DebugLevel)
  1571  	}
  1572  
  1573  	_, err = fuse.Mount(mountPoint, server, mountCfg)
  1574  	t.Assert(err, IsNil)
  1575  
  1576  	if isCatfs() {
  1577  		cacheDir := mountPoint + "-cache"
  1578  		err := os.MkdirAll(cacheDir, 0700)
  1579  		t.Assert(err, IsNil)
  1580  
  1581  		catfs := exec.Command("catfs", "--test", "-ononempty", "--", mountPoint, cacheDir, mountPoint)
  1582  		_, err = catfs.Output()
  1583  		if err != nil {
  1584  			if ee, ok := err.(*exec.ExitError); ok {
  1585  				panic(ee.Stderr)
  1586  			}
  1587  		}
  1588  
  1589  		catfs = exec.Command("catfs", "-ononempty", "--", mountPoint, cacheDir, mountPoint)
  1590  
  1591  		if isTravis() {
  1592  			logger := NewLogger("catfs")
  1593  			lvl := logrus.InfoLevel
  1594  			logger.Formatter.(*LogHandle).Lvl = &lvl
  1595  			w := logger.Writer()
  1596  
  1597  			catfs.Stdout = w
  1598  			catfs.Stderr = w
  1599  
  1600  			catfs.Env = append(catfs.Env, "RUST_LOG=debug")
  1601  		}
  1602  
  1603  		err = catfs.Start()
  1604  		t.Assert(err, IsNil)
  1605  
  1606  		time.Sleep(time.Second)
  1607  	}
  1608  }
  1609  
  1610  func (s *GoofysTest) umount(t *C, mountPoint string) {
  1611  	var err error
  1612  	for i := 0; i < 10; i++ {
  1613  		err = fuse.Unmount(mountPoint)
  1614  		if err != nil {
  1615  			time.Sleep(100 * time.Millisecond)
  1616  		} else {
  1617  			break
  1618  		}
  1619  	}
  1620  	t.Assert(err, IsNil)
  1621  
  1622  	os.Remove(mountPoint)
  1623  	if isCatfs() {
  1624  		cacheDir := mountPoint + "-cache"
  1625  		os.Remove(cacheDir)
  1626  	}
  1627  }
  1628  
  1629  func (s *GoofysTest) runFuseTest(t *C, mountPoint string, umount bool, cmdArgs ...string) {
  1630  	s.mount(t, mountPoint)
  1631  
  1632  	if umount {
  1633  		defer s.umount(t, mountPoint)
  1634  	}
  1635  
  1636  	// if command starts with ./ or ../ then we are executing a
  1637  	// relative path and cannot do chdir
  1638  	chdir := cmdArgs[0][0] != '.'
  1639  
  1640  	cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
  1641  	cmd.Env = append(cmd.Env, os.Environ()...)
  1642  	cmd.Env = append(cmd.Env, "FAST=true")
  1643  	cmd.Env = append(cmd.Env, "CLEANUP=false")
  1644  
  1645  	if isTravis() {
  1646  		logger := NewLogger("test")
  1647  		lvl := logrus.InfoLevel
  1648  		logger.Formatter.(*LogHandle).Lvl = &lvl
  1649  		w := logger.Writer()
  1650  
  1651  		cmd.Stdout = w
  1652  		cmd.Stderr = w
  1653  	}
  1654  
  1655  	if chdir {
  1656  		oldCwd, err := os.Getwd()
  1657  		t.Assert(err, IsNil)
  1658  
  1659  		err = os.Chdir(mountPoint)
  1660  		t.Assert(err, IsNil)
  1661  
  1662  		defer os.Chdir(oldCwd)
  1663  	}
  1664  
  1665  	err := cmd.Run()
  1666  	t.Assert(err, IsNil)
  1667  }
  1668  
  1669  func (s *GoofysTest) TestFuse(t *C) {
  1670  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1671  
  1672  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1673  }
  1674  
  1675  func (s *GoofysTest) TestFuseWithTTL(t *C) {
  1676  	s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000
  1677  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1678  
  1679  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1680  }
  1681  
  1682  func (s *GoofysTest) TestCheap(t *C) {
  1683  	s.fs.flags.Cheap = true
  1684  	s.TestLookUpInode(t)
  1685  	s.TestWriteLargeFile(t)
  1686  }
  1687  
  1688  func (s *GoofysTest) TestExplicitDir(t *C) {
  1689  	s.fs.flags.ExplicitDir = true
  1690  	s.testExplicitDir(t)
  1691  }
  1692  
  1693  func (s *GoofysTest) TestExplicitDirAndCheap(t *C) {
  1694  	s.fs.flags.ExplicitDir = true
  1695  	s.fs.flags.Cheap = true
  1696  	s.testExplicitDir(t)
  1697  }
  1698  
  1699  func (s *GoofysTest) testExplicitDir(t *C) {
  1700  	if s.cloud.Capabilities().DirBlob {
  1701  		t.Skip("only for backends without dir blob")
  1702  	}
  1703  
  1704  	_, err := s.LookUpInode(t, "file1")
  1705  	t.Assert(err, IsNil)
  1706  
  1707  	_, err = s.LookUpInode(t, "fileNotFound")
  1708  	t.Assert(err, Equals, fuse.ENOENT)
  1709  
  1710  	// dir1/ doesn't exist so we shouldn't be able to see it
  1711  	_, err = s.LookUpInode(t, "dir1/file3")
  1712  	t.Assert(err, Equals, fuse.ENOENT)
  1713  
  1714  	_, err = s.LookUpInode(t, "dir4/file5")
  1715  	t.Assert(err, IsNil)
  1716  
  1717  	_, err = s.LookUpInode(t, "empty_dir")
  1718  	t.Assert(err, IsNil)
  1719  }
  1720  
  1721  func (s *GoofysTest) TestBenchLs(t *C) {
  1722  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1723  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1724  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1725  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "ls")
  1726  }
  1727  
  1728  func (s *GoofysTest) TestBenchCreate(t *C) {
  1729  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1730  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1731  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1732  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create")
  1733  }
  1734  
  1735  func (s *GoofysTest) TestBenchCreateParallel(t *C) {
  1736  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1737  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1738  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1739  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create_parallel")
  1740  }
  1741  
  1742  func (s *GoofysTest) TestBenchIO(t *C) {
  1743  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1744  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1745  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1746  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "io")
  1747  }
  1748  
  1749  func (s *GoofysTest) TestBenchFindTree(t *C) {
  1750  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1751  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1752  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1753  
  1754  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "find")
  1755  }
  1756  
  1757  func (s *GoofysTest) TestIssue231(t *C) {
  1758  	if isTravis() {
  1759  		t.Skip("disable in travis, not sure if it has enough memory")
  1760  	}
  1761  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1762  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue231")
  1763  }
  1764  
  1765  func (s *GoofysTest) TestChmod(t *C) {
  1766  	root := s.getRoot(t)
  1767  
  1768  	lookupOp := fuseops.LookUpInodeOp{
  1769  		Parent: root.Id,
  1770  		Name:   "file1",
  1771  	}
  1772  
  1773  	err := s.fs.LookUpInode(nil, &lookupOp)
  1774  	t.Assert(err, IsNil)
  1775  
  1776  	targetMode := os.FileMode(0777)
  1777  	setOp := fuseops.SetInodeAttributesOp{Inode: lookupOp.Entry.Child, Mode: &targetMode}
  1778  
  1779  	err = s.fs.SetInodeAttributes(s.ctx, &setOp)
  1780  	t.Assert(err, IsNil)
  1781  	t.Assert(setOp.Attributes, NotNil)
  1782  }
  1783  
  1784  func (s *GoofysTest) TestIssue64(t *C) {
  1785  	/*
  1786  		mountPoint := "/tmp/mnt" + s.fs.bucket
  1787  		log.Level = logrus.DebugLevel
  1788  
  1789  		err := os.MkdirAll(mountPoint, 0700)
  1790  		t.Assert(err, IsNil)
  1791  
  1792  		defer os.Remove(mountPoint)
  1793  
  1794  		s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue64")
  1795  	*/
  1796  }
  1797  
  1798  func (s *GoofysTest) TestIssue69Fuse(t *C) {
  1799  	s.fs.flags.StatCacheTTL = 0
  1800  
  1801  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1802  
  1803  	s.mount(t, mountPoint)
  1804  
  1805  	defer func() {
  1806  		err := os.Chdir("/")
  1807  		t.Assert(err, IsNil)
  1808  
  1809  		s.umount(t, mountPoint)
  1810  	}()
  1811  
  1812  	err := os.Chdir(mountPoint)
  1813  	t.Assert(err, IsNil)
  1814  
  1815  	_, err = os.Stat("dir1")
  1816  	t.Assert(err, IsNil)
  1817  
  1818  	err = os.Remove("dir1/file3")
  1819  	t.Assert(err, IsNil)
  1820  
  1821  	// don't really care about error code, but it should be a PathError
  1822  	os.Stat("dir1")
  1823  	os.Stat("dir1")
  1824  }
  1825  
  1826  func (s *GoofysTest) TestGetMimeType(t *C) {
  1827  	// option to use mime type not turned on
  1828  	mime := s.fs.flags.GetMimeType("foo.css")
  1829  	t.Assert(mime, IsNil)
  1830  
  1831  	s.fs.flags.UseContentType = true
  1832  
  1833  	mime = s.fs.flags.GetMimeType("foo.css")
  1834  	t.Assert(mime, NotNil)
  1835  	t.Assert(*mime, Equals, "text/css")
  1836  
  1837  	mime = s.fs.flags.GetMimeType("foo")
  1838  	t.Assert(mime, IsNil)
  1839  
  1840  	mime = s.fs.flags.GetMimeType("foo.")
  1841  	t.Assert(mime, IsNil)
  1842  
  1843  	mime = s.fs.flags.GetMimeType("foo.unknownExtension")
  1844  	t.Assert(mime, IsNil)
  1845  }
  1846  
  1847  func (s *GoofysTest) TestPutMimeType(t *C) {
  1848  	if _, ok := s.cloud.(*ADLv1); ok {
  1849  		// ADLv1 doesn't support content-type
  1850  		t.Skip("ADLv1 doesn't support content-type")
  1851  	}
  1852  
  1853  	s.fs.flags.UseContentType = true
  1854  
  1855  	root := s.getRoot(t)
  1856  	jpg := "test.jpg"
  1857  	jpg2 := "test2.jpg"
  1858  	file := "test"
  1859  
  1860  	s.testWriteFile(t, jpg, 10, 128)
  1861  
  1862  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: jpg})
  1863  	t.Assert(err, IsNil)
  1864  	t.Assert(*resp.ContentType, Equals, "image/jpeg")
  1865  
  1866  	err = root.Rename(jpg, root, file)
  1867  	t.Assert(err, IsNil)
  1868  
  1869  	resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: file})
  1870  	t.Assert(err, IsNil)
  1871  	t.Assert(*resp.ContentType, Equals, "image/jpeg")
  1872  
  1873  	err = root.Rename(file, root, jpg2)
  1874  	t.Assert(err, IsNil)
  1875  
  1876  	resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: jpg2})
  1877  	t.Assert(err, IsNil)
  1878  	t.Assert(*resp.ContentType, Equals, "image/jpeg")
  1879  }
  1880  
  1881  func (s *GoofysTest) TestBucketPrefixSlash(t *C) {
  1882  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2", s.fs.flags)
  1883  	t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/")
  1884  
  1885  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2///", s.fs.flags)
  1886  	t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/")
  1887  }
  1888  
  1889  func (s *GoofysTest) TestFuseWithPrefix(t *C) {
  1890  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1891  
  1892  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":testprefix", s.fs.flags)
  1893  
  1894  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1895  }
  1896  
  1897  func (s *GoofysTest) TestRenameCache(t *C) {
  1898  	root := s.getRoot(t)
  1899  	s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000
  1900  
  1901  	lookupOp1 := fuseops.LookUpInodeOp{
  1902  		Parent: root.Id,
  1903  		Name:   "file1",
  1904  	}
  1905  
  1906  	lookupOp2 := lookupOp1
  1907  	lookupOp2.Name = "newfile"
  1908  
  1909  	err := s.fs.LookUpInode(nil, &lookupOp1)
  1910  	t.Assert(err, IsNil)
  1911  
  1912  	err = s.fs.LookUpInode(nil, &lookupOp2)
  1913  	t.Assert(err, Equals, fuse.ENOENT)
  1914  
  1915  	renameOp := fuseops.RenameOp{
  1916  		OldParent: root.Id,
  1917  		NewParent: root.Id,
  1918  		OldName:   "file1",
  1919  		NewName:   "newfile",
  1920  	}
  1921  
  1922  	err = s.fs.Rename(nil, &renameOp)
  1923  	t.Assert(err, IsNil)
  1924  
  1925  	lookupOp1.Entry = fuseops.ChildInodeEntry{}
  1926  	lookupOp2.Entry = fuseops.ChildInodeEntry{}
  1927  
  1928  	err = s.fs.LookUpInode(nil, &lookupOp1)
  1929  	t.Assert(err, Equals, fuse.ENOENT)
  1930  
  1931  	err = s.fs.LookUpInode(nil, &lookupOp2)
  1932  	t.Assert(err, IsNil)
  1933  }
  1934  
  1935  func (s *GoofysTest) anonymous(t *C) {
  1936  	// On azure this fails because we re-create the bucket with
  1937  	// the same name right away. And well anonymous access is not
  1938  	// implemented yet in our azure backend anyway
  1939  	var s3 *S3Backend
  1940  	var ok bool
  1941  	if s3, ok = s.cloud.Delegate().(*S3Backend); !ok {
  1942  		t.Skip("only for S3")
  1943  	}
  1944  
  1945  	err := s.deleteBucket(s.cloud)
  1946  	t.Assert(err, IsNil)
  1947  
  1948  	// use a different bucket name to prevent 409 Conflict from
  1949  	// delete bucket above
  1950  	s.fs.bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16)
  1951  	s3.bucket = s.fs.bucket
  1952  	s.setupDefaultEnv(t, true)
  1953  
  1954  	s.fs = NewGoofys(context.Background(), s.fs.bucket, s.fs.flags)
  1955  	t.Assert(s.fs, NotNil)
  1956  
  1957  	// should have auto-detected by S3 backend
  1958  	cloud := s.getRoot(t).dir.cloud
  1959  	t.Assert(cloud, NotNil)
  1960  	s3, ok = cloud.Delegate().(*S3Backend)
  1961  	t.Assert(ok, Equals, true)
  1962  
  1963  	s3.awsConfig.Credentials = credentials.AnonymousCredentials
  1964  	s3.newS3()
  1965  }
  1966  
  1967  func (s *GoofysTest) disableS3() {
  1968  	time.Sleep(1 * time.Second) // wait for any background goroutines to finish
  1969  	dir := s.fs.inodes[fuseops.RootInodeID].dir
  1970  	dir.cloud = StorageBackendInitError{
  1971  		fmt.Errorf("cloud disabled"),
  1972  		*dir.cloud.Capabilities(),
  1973  	}
  1974  }
  1975  
  1976  func (s *GoofysTest) TestWriteAnonymous(t *C) {
  1977  	s.anonymous(t)
  1978  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1979  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1980  
  1981  	fileName := "test"
  1982  
  1983  	createOp := fuseops.CreateFileOp{
  1984  		Parent: s.getRoot(t).Id,
  1985  		Name:   fileName,
  1986  	}
  1987  
  1988  	err := s.fs.CreateFile(s.ctx, &createOp)
  1989  	t.Assert(err, IsNil)
  1990  
  1991  	err = s.fs.FlushFile(s.ctx, &fuseops.FlushFileOp{
  1992  		Handle: createOp.Handle,
  1993  		Inode:  createOp.Entry.Child,
  1994  	})
  1995  	t.Assert(err, Equals, syscall.EACCES)
  1996  
  1997  	err = s.fs.ReleaseFileHandle(s.ctx, &fuseops.ReleaseFileHandleOp{Handle: createOp.Handle})
  1998  	t.Assert(err, IsNil)
  1999  
  2000  	err = s.fs.LookUpInode(s.ctx, &fuseops.LookUpInodeOp{
  2001  		Parent: s.getRoot(t).Id,
  2002  		Name:   fileName,
  2003  	})
  2004  	t.Assert(err, Equals, fuse.ENOENT)
  2005  	// BUG! the file shouldn't exist, see test below for comment,
  2006  	// this behaves as expected only because we are bypassing
  2007  	// linux vfs in this test
  2008  }
  2009  
  2010  func (s *GoofysTest) TestWriteAnonymousFuse(t *C) {
  2011  	s.anonymous(t)
  2012  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2013  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2014  
  2015  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2016  
  2017  	s.mount(t, mountPoint)
  2018  	defer s.umount(t, mountPoint)
  2019  
  2020  	err := ioutil.WriteFile(mountPoint+"/test", []byte(""), 0600)
  2021  	t.Assert(err, NotNil)
  2022  	pathErr, ok := err.(*os.PathError)
  2023  	t.Assert(ok, Equals, true)
  2024  	t.Assert(pathErr.Err, Equals, syscall.EACCES)
  2025  
  2026  	_, err = os.Stat(mountPoint + "/test")
  2027  	t.Assert(err, IsNil)
  2028  	// BUG! the file shouldn't exist, the condition below should hold instead
  2029  	// see comment in Goofys.FlushFile
  2030  	// pathErr, ok = err.(*os.PathError)
  2031  	// t.Assert(ok, Equals, true)
  2032  	// t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2033  
  2034  	_, err = ioutil.ReadFile(mountPoint + "/test")
  2035  	t.Assert(err, NotNil)
  2036  	pathErr, ok = err.(*os.PathError)
  2037  	t.Assert(ok, Equals, true)
  2038  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2039  
  2040  	// reading the file and getting ENOENT causes the kernel to
  2041  	// invalidate the entry, failing at open is not sufficient, we
  2042  	// have to fail at read (which means that if the application
  2043  	// uses splice(2) it won't get to us, so this wouldn't work
  2044  	_, err = os.Stat(mountPoint + "/test")
  2045  	t.Assert(err, NotNil)
  2046  	pathErr, ok = err.(*os.PathError)
  2047  	t.Assert(ok, Equals, true)
  2048  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2049  }
  2050  
  2051  func (s *GoofysTest) TestWriteSyncWriteFuse(t *C) {
  2052  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2053  
  2054  	s.mount(t, mountPoint)
  2055  	defer s.umount(t, mountPoint)
  2056  
  2057  	var f *os.File
  2058  	var n int
  2059  	var err error
  2060  
  2061  	defer func() {
  2062  		if err != nil {
  2063  			f.Close()
  2064  		}
  2065  	}()
  2066  
  2067  	f, err = os.Create(mountPoint + "/TestWriteSyncWrite")
  2068  	t.Assert(err, IsNil)
  2069  
  2070  	n, err = f.Write([]byte("hello\n"))
  2071  	t.Assert(err, IsNil)
  2072  	t.Assert(n, Equals, 6)
  2073  
  2074  	err = f.Sync()
  2075  	t.Assert(err, IsNil)
  2076  
  2077  	n, err = f.Write([]byte("world\n"))
  2078  	t.Assert(err, IsNil)
  2079  	t.Assert(n, Equals, 6)
  2080  
  2081  	err = f.Close()
  2082  	t.Assert(err, IsNil)
  2083  }
  2084  
  2085  func (s *GoofysTest) TestIssue156(t *C) {
  2086  	_, err := s.LookUpInode(t, "\xae\x8a-")
  2087  	// S3Proxy and aws s3 return different errors
  2088  	// https://github.com/andrewgaul/s3proxy/issues/201
  2089  	t.Assert(err, NotNil)
  2090  }
  2091  
  2092  func (s *GoofysTest) TestIssue162(t *C) {
  2093  	if s.azurite {
  2094  		t.Skip("https://github.com/Azure/Azurite/issues/221")
  2095  	}
  2096  
  2097  	params := &PutBlobInput{
  2098  		Key:  "dir1/lör 006.jpg",
  2099  		Body: bytes.NewReader([]byte("foo")),
  2100  		Size: PUInt64(3),
  2101  	}
  2102  	_, err := s.cloud.PutBlob(params)
  2103  	t.Assert(err, IsNil)
  2104  
  2105  	dir, err := s.LookUpInode(t, "dir1")
  2106  	t.Assert(err, IsNil)
  2107  
  2108  	err = dir.Rename("lör 006.jpg", dir, "myfile.jpg")
  2109  	t.Assert(err, IsNil)
  2110  
  2111  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "dir1/myfile.jpg"})
  2112  	t.Assert(resp.Size, Equals, uint64(3))
  2113  }
  2114  
  2115  func (s *GoofysTest) TestXAttrGet(t *C) {
  2116  	if _, ok := s.cloud.(*ADLv1); ok {
  2117  		t.Skip("ADLv1 doesn't support metadata")
  2118  	}
  2119  
  2120  	_, checkETag := s.cloud.Delegate().(*S3Backend)
  2121  	xattrPrefix := s.cloud.Capabilities().Name + "."
  2122  
  2123  	file1, err := s.LookUpInode(t, "file1")
  2124  	t.Assert(err, IsNil)
  2125  
  2126  	names, err := file1.ListXattr()
  2127  	t.Assert(err, IsNil)
  2128  	expectedXattrs := []string{
  2129  		xattrPrefix + "etag",
  2130  		xattrPrefix + "storage-class",
  2131  		"user.name",
  2132  	}
  2133  	sort.Strings(expectedXattrs)
  2134  	t.Assert(names, DeepEquals, expectedXattrs)
  2135  
  2136  	_, err = file1.GetXattr("user.foobar")
  2137  	t.Assert(err, Equals, unix.ENODATA)
  2138  
  2139  	if checkETag {
  2140  		value, err := file1.GetXattr("s3.etag")
  2141  		t.Assert(err, IsNil)
  2142  		// md5sum of "file1"
  2143  		t.Assert(string(value), Equals, "\"826e8142e6baabe8af779f5f490cf5f5\"")
  2144  	}
  2145  
  2146  	value, err := file1.GetXattr("user.name")
  2147  	t.Assert(err, IsNil)
  2148  	t.Assert(string(value), Equals, "file1+/#\x00")
  2149  
  2150  	dir1, err := s.LookUpInode(t, "dir1")
  2151  	t.Assert(err, IsNil)
  2152  
  2153  	if !s.cloud.Capabilities().DirBlob {
  2154  		// implicit dir blobs don't have s3.etag at all
  2155  		names, err = dir1.ListXattr()
  2156  		t.Assert(err, IsNil)
  2157  		t.Assert(len(names), Equals, 0, Commentf("names: %v", names))
  2158  
  2159  		value, err = dir1.GetXattr(xattrPrefix + "etag")
  2160  		t.Assert(err, Equals, syscall.ENODATA)
  2161  	}
  2162  
  2163  	// list dir1 to populate file3 in cache, then get file3's xattr
  2164  	lookup := fuseops.LookUpInodeOp{
  2165  		Parent: fuseops.RootInodeID,
  2166  		Name:   "dir1",
  2167  	}
  2168  	err = s.fs.LookUpInode(nil, &lookup)
  2169  	t.Assert(err, IsNil)
  2170  
  2171  	s.readDirIntoCache(t, lookup.Entry.Child)
  2172  
  2173  	dir1 = s.fs.inodes[lookup.Entry.Child]
  2174  	file3 := dir1.findChild("file3")
  2175  	t.Assert(file3, NotNil)
  2176  	t.Assert(file3.userMetadata, IsNil)
  2177  
  2178  	if checkETag {
  2179  		value, err = file3.GetXattr("s3.etag")
  2180  		t.Assert(err, IsNil)
  2181  		// md5sum of "dir1/file3"
  2182  		t.Assert(string(value), Equals, "\"5cd67e0e59fb85be91a515afe0f4bb24\"")
  2183  	}
  2184  
  2185  	// ensure that we get the dir blob instead of list
  2186  	s.fs.flags.Cheap = true
  2187  
  2188  	emptyDir2, err := s.LookUpInode(t, "empty_dir2")
  2189  	t.Assert(err, IsNil)
  2190  
  2191  	names, err = emptyDir2.ListXattr()
  2192  	t.Assert(err, IsNil)
  2193  	sort.Strings(names)
  2194  	t.Assert(names, DeepEquals, expectedXattrs)
  2195  
  2196  	emptyDir, err := s.LookUpInode(t, "empty_dir")
  2197  	t.Assert(err, IsNil)
  2198  
  2199  	if checkETag {
  2200  		value, err = emptyDir.GetXattr("s3.etag")
  2201  		t.Assert(err, IsNil)
  2202  		// dir blobs are empty
  2203  		t.Assert(string(value), Equals, "\"d41d8cd98f00b204e9800998ecf8427e\"")
  2204  	}
  2205  
  2206  	// s3proxy doesn't support storage class yet
  2207  	if hasEnv("AWS") {
  2208  		cloud := s.getRoot(t).dir.cloud
  2209  		s3, ok := cloud.Delegate().(*S3Backend)
  2210  		t.Assert(ok, Equals, true)
  2211  		s3.config.StorageClass = "STANDARD_IA"
  2212  
  2213  		s.testWriteFile(t, "ia", 1, 128*1024)
  2214  
  2215  		ia, err := s.LookUpInode(t, "ia")
  2216  		t.Assert(err, IsNil)
  2217  
  2218  		names, err = ia.ListXattr()
  2219  		t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"})
  2220  
  2221  		value, err = ia.GetXattr("s3.storage-class")
  2222  		t.Assert(err, IsNil)
  2223  		// smaller than 128KB falls back to standard
  2224  		t.Assert(string(value), Equals, "STANDARD")
  2225  
  2226  		s.testWriteFile(t, "ia", 128*1024, 128*1024)
  2227  		time.Sleep(100 * time.Millisecond)
  2228  
  2229  		names, err = ia.ListXattr()
  2230  		t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"})
  2231  
  2232  		value, err = ia.GetXattr("s3.storage-class")
  2233  		t.Assert(err, IsNil)
  2234  		t.Assert(string(value), Equals, "STANDARD_IA")
  2235  	}
  2236  }
  2237  
  2238  func (s *GoofysTest) TestClientForkExec(t *C) {
  2239  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2240  	s.mount(t, mountPoint)
  2241  	defer s.umount(t, mountPoint)
  2242  	file := mountPoint + "/TestClientForkExec"
  2243  
  2244  	// Create new file.
  2245  	fh, err := os.OpenFile(file, os.O_CREATE|os.O_RDWR, 0600)
  2246  	t.Assert(err, IsNil)
  2247  	defer func() { // Defer close file if it's not already closed.
  2248  		if fh != nil {
  2249  			fh.Close()
  2250  		}
  2251  	}()
  2252  	// Write to file.
  2253  	_, err = fh.WriteString("1.1;")
  2254  	t.Assert(err, IsNil)
  2255  	// The `Command` is run via fork+exec.
  2256  	// So all the file descriptors are copied over to the child process.
  2257  	// The child process 'closes' the files before exiting. This should
  2258  	// not result in goofys failing file operations invoked from the test.
  2259  	someCmd := exec.Command("echo", "hello")
  2260  	err = someCmd.Run()
  2261  	t.Assert(err, IsNil)
  2262  	// One more write.
  2263  	_, err = fh.WriteString("1.2;")
  2264  	t.Assert(err, IsNil)
  2265  	// Close file.
  2266  	err = fh.Close()
  2267  	t.Assert(err, IsNil)
  2268  	fh = nil
  2269  	// Check file content.
  2270  	content, err := ioutil.ReadFile(file)
  2271  	t.Assert(err, IsNil)
  2272  	t.Assert(string(content), Equals, "1.1;1.2;")
  2273  
  2274  	// Repeat the same excercise, but now with an existing file.
  2275  	fh, err = os.OpenFile(file, os.O_RDWR, 0600)
  2276  	// Write to file.
  2277  	_, err = fh.WriteString("2.1;")
  2278  	// fork+exec.
  2279  	someCmd = exec.Command("echo", "hello")
  2280  	err = someCmd.Run()
  2281  	t.Assert(err, IsNil)
  2282  	// One more write.
  2283  	_, err = fh.WriteString("2.2;")
  2284  	t.Assert(err, IsNil)
  2285  	// Close file.
  2286  	err = fh.Close()
  2287  	t.Assert(err, IsNil)
  2288  	fh = nil
  2289  	// Verify that the file is updated as per the new write.
  2290  	content, err = ioutil.ReadFile(file)
  2291  	t.Assert(err, IsNil)
  2292  	t.Assert(string(content), Equals, "2.1;2.2;")
  2293  }
  2294  
  2295  func (s *GoofysTest) TestXAttrGetCached(t *C) {
  2296  	if _, ok := s.cloud.(*ADLv1); ok {
  2297  		t.Skip("ADLv1 doesn't support metadata")
  2298  	}
  2299  
  2300  	xattrPrefix := s.cloud.Capabilities().Name + "."
  2301  
  2302  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2303  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2304  	s.readDirIntoCache(t, fuseops.RootInodeID)
  2305  	s.disableS3()
  2306  
  2307  	in, err := s.LookUpInode(t, "file1")
  2308  	t.Assert(err, IsNil)
  2309  	t.Assert(in.userMetadata, IsNil)
  2310  
  2311  	_, err = in.GetXattr(xattrPrefix + "etag")
  2312  	t.Assert(err, IsNil)
  2313  }
  2314  
  2315  func (s *GoofysTest) TestXAttrCopied(t *C) {
  2316  	if _, ok := s.cloud.(*ADLv1); ok {
  2317  		t.Skip("ADLv1 doesn't support metadata")
  2318  	}
  2319  
  2320  	root := s.getRoot(t)
  2321  
  2322  	err := root.Rename("file1", root, "file0")
  2323  	t.Assert(err, IsNil)
  2324  
  2325  	in, err := s.LookUpInode(t, "file0")
  2326  	t.Assert(err, IsNil)
  2327  
  2328  	_, err = in.GetXattr("user.name")
  2329  	t.Assert(err, IsNil)
  2330  }
  2331  
  2332  func (s *GoofysTest) TestXAttrRemove(t *C) {
  2333  	if _, ok := s.cloud.(*ADLv1); ok {
  2334  		t.Skip("ADLv1 doesn't support metadata")
  2335  	}
  2336  
  2337  	in, err := s.LookUpInode(t, "file1")
  2338  	t.Assert(err, IsNil)
  2339  
  2340  	_, err = in.GetXattr("user.name")
  2341  	t.Assert(err, IsNil)
  2342  
  2343  	err = in.RemoveXattr("user.name")
  2344  	t.Assert(err, IsNil)
  2345  
  2346  	_, err = in.GetXattr("user.name")
  2347  	t.Assert(err, Equals, syscall.ENODATA)
  2348  }
  2349  
  2350  func (s *GoofysTest) TestXAttrFuse(t *C) {
  2351  	if _, ok := s.cloud.(*ADLv1); ok {
  2352  		t.Skip("ADLv1 doesn't support metadata")
  2353  	}
  2354  
  2355  	_, checkETag := s.cloud.Delegate().(*S3Backend)
  2356  	xattrPrefix := s.cloud.Capabilities().Name + "."
  2357  
  2358  	//fuseLog.Level = logrus.DebugLevel
  2359  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2360  	s.mount(t, mountPoint)
  2361  	defer s.umount(t, mountPoint)
  2362  
  2363  	expectedXattrs := []string{
  2364  		xattrPrefix + "etag",
  2365  		xattrPrefix + "storage-class",
  2366  		"user.name",
  2367  	}
  2368  	sort.Strings(expectedXattrs)
  2369  
  2370  	var expectedXattrsStr string
  2371  	for _, x := range expectedXattrs {
  2372  		expectedXattrsStr += x + "\x00"
  2373  	}
  2374  	var buf [1024]byte
  2375  
  2376  	// error if size is too small (but not zero)
  2377  	_, err := unix.Listxattr(mountPoint+"/file1", buf[:1])
  2378  	t.Assert(err, Equals, unix.ERANGE)
  2379  
  2380  	// 0 len buffer means interogate the size of buffer
  2381  	nbytes, err := unix.Listxattr(mountPoint+"/file1", nil)
  2382  	t.Assert(err, Equals, nil)
  2383  	t.Assert(nbytes, Equals, len(expectedXattrsStr))
  2384  
  2385  	nbytes, err = unix.Listxattr(mountPoint+"/file1", buf[:nbytes])
  2386  	t.Assert(err, IsNil)
  2387  	t.Assert(nbytes, Equals, len(expectedXattrsStr))
  2388  	t.Assert(string(buf[:nbytes]), Equals, expectedXattrsStr)
  2389  
  2390  	_, err = unix.Getxattr(mountPoint+"/file1", "user.name", buf[:1])
  2391  	t.Assert(err, Equals, unix.ERANGE)
  2392  
  2393  	nbytes, err = unix.Getxattr(mountPoint+"/file1", "user.name", nil)
  2394  	t.Assert(err, IsNil)
  2395  	t.Assert(nbytes, Equals, 9)
  2396  
  2397  	nbytes, err = unix.Getxattr(mountPoint+"/file1", "user.name", buf[:nbytes])
  2398  	t.Assert(err, IsNil)
  2399  	t.Assert(nbytes, Equals, 9)
  2400  	t.Assert(string(buf[:nbytes]), Equals, "file1+/#\x00")
  2401  
  2402  	if !s.cloud.Capabilities().DirBlob {
  2403  		// dir1 has no xattrs
  2404  		nbytes, err = unix.Listxattr(mountPoint+"/dir1", nil)
  2405  		t.Assert(err, IsNil)
  2406  		t.Assert(nbytes, Equals, 0)
  2407  
  2408  		nbytes, err = unix.Listxattr(mountPoint+"/dir1", buf[:1])
  2409  		t.Assert(err, IsNil)
  2410  		t.Assert(nbytes, Equals, 0)
  2411  	}
  2412  
  2413  	if checkETag {
  2414  		_, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", buf[:1])
  2415  		t.Assert(err, Equals, unix.ERANGE)
  2416  
  2417  		nbytes, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", nil)
  2418  		t.Assert(err, IsNil)
  2419  		// 32 bytes md5 plus quotes
  2420  		t.Assert(nbytes, Equals, 34)
  2421  
  2422  		nbytes, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", buf[:nbytes])
  2423  		t.Assert(err, IsNil)
  2424  		t.Assert(nbytes, Equals, 34)
  2425  		t.Assert(string(buf[:nbytes]), Equals,
  2426  			"\"826e8142e6baabe8af779f5f490cf5f5\"")
  2427  	}
  2428  }
  2429  
  2430  func (s *GoofysTest) TestXAttrSet(t *C) {
  2431  	if _, ok := s.cloud.(*ADLv1); ok {
  2432  		t.Skip("ADLv1 doesn't support metadata")
  2433  	}
  2434  
  2435  	in, err := s.LookUpInode(t, "file1")
  2436  	t.Assert(err, IsNil)
  2437  
  2438  	err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_REPLACE)
  2439  	t.Assert(err, Equals, syscall.ENODATA)
  2440  
  2441  	err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_CREATE)
  2442  	t.Assert(err, IsNil)
  2443  
  2444  	err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_CREATE)
  2445  	t.Assert(err, Equals, syscall.EEXIST)
  2446  
  2447  	in, err = s.LookUpInode(t, "file1")
  2448  	t.Assert(err, IsNil)
  2449  
  2450  	value, err := in.GetXattr("user.bar")
  2451  	t.Assert(err, IsNil)
  2452  	t.Assert(string(value), Equals, "hello")
  2453  
  2454  	value = []byte("file1+%/#\x00")
  2455  
  2456  	err = in.SetXattr("user.bar", value, unix.XATTR_REPLACE)
  2457  	t.Assert(err, IsNil)
  2458  
  2459  	in, err = s.LookUpInode(t, "file1")
  2460  	t.Assert(err, IsNil)
  2461  
  2462  	value2, err := in.GetXattr("user.bar")
  2463  	t.Assert(err, IsNil)
  2464  	t.Assert(value2, DeepEquals, value)
  2465  
  2466  	// setting with flag = 0 always works
  2467  	err = in.SetXattr("user.bar", []byte("world"), 0)
  2468  	t.Assert(err, IsNil)
  2469  
  2470  	err = in.SetXattr("user.baz", []byte("world"), 0)
  2471  	t.Assert(err, IsNil)
  2472  
  2473  	value, err = in.GetXattr("user.bar")
  2474  	t.Assert(err, IsNil)
  2475  
  2476  	value2, err = in.GetXattr("user.baz")
  2477  	t.Assert(err, IsNil)
  2478  
  2479  	t.Assert(value2, DeepEquals, value)
  2480  	t.Assert(string(value2), DeepEquals, "world")
  2481  
  2482  	err = in.SetXattr("s3.bar", []byte("hello"), unix.XATTR_CREATE)
  2483  	t.Assert(err, Equals, syscall.EPERM)
  2484  }
  2485  
  2486  func (s *GoofysTest) TestPythonCopyTree(t *C) {
  2487  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2488  
  2489  	s.runFuseTest(t, mountPoint, true, "python", "-c",
  2490  		"import shutil; shutil.copytree('dir2', 'dir5')",
  2491  		mountPoint)
  2492  }
  2493  
  2494  func (s *GoofysTest) TestCreateRenameBeforeCloseFuse(t *C) {
  2495  	if s.azurite {
  2496  		// Azurite returns 400 when copy source doesn't exist
  2497  		// https://github.com/Azure/Azurite/issues/219
  2498  		// so our code to ignore ENOENT fails
  2499  		t.Skip("https://github.com/Azure/Azurite/issues/219")
  2500  	}
  2501  
  2502  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2503  
  2504  	s.mount(t, mountPoint)
  2505  	defer s.umount(t, mountPoint)
  2506  
  2507  	from := mountPoint + "/newfile"
  2508  	to := mountPoint + "/newfile2"
  2509  
  2510  	fh, err := os.Create(from)
  2511  	t.Assert(err, IsNil)
  2512  	defer func() {
  2513  		// close the file if the test failed so we can unmount
  2514  		if fh != nil {
  2515  			fh.Close()
  2516  		}
  2517  	}()
  2518  
  2519  	_, err = fh.WriteString("hello world")
  2520  	t.Assert(err, IsNil)
  2521  
  2522  	err = os.Rename(from, to)
  2523  	t.Assert(err, IsNil)
  2524  
  2525  	err = fh.Close()
  2526  	t.Assert(err, IsNil)
  2527  	fh = nil
  2528  
  2529  	_, err = os.Stat(from)
  2530  	t.Assert(err, NotNil)
  2531  	pathErr, ok := err.(*os.PathError)
  2532  	t.Assert(ok, Equals, true)
  2533  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2534  
  2535  	content, err := ioutil.ReadFile(to)
  2536  	t.Assert(err, IsNil)
  2537  	t.Assert(string(content), Equals, "hello world")
  2538  }
  2539  
  2540  func (s *GoofysTest) TestRenameBeforeCloseFuse(t *C) {
  2541  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2542  
  2543  	s.mount(t, mountPoint)
  2544  	defer s.umount(t, mountPoint)
  2545  
  2546  	from := mountPoint + "/newfile"
  2547  	to := mountPoint + "/newfile2"
  2548  
  2549  	err := ioutil.WriteFile(from, []byte(""), 0600)
  2550  	t.Assert(err, IsNil)
  2551  
  2552  	fh, err := os.OpenFile(from, os.O_WRONLY, 0600)
  2553  	t.Assert(err, IsNil)
  2554  	defer func() {
  2555  		// close the file if the test failed so we can unmount
  2556  		if fh != nil {
  2557  			fh.Close()
  2558  		}
  2559  	}()
  2560  
  2561  	_, err = fh.WriteString("hello world")
  2562  	t.Assert(err, IsNil)
  2563  
  2564  	err = os.Rename(from, to)
  2565  	t.Assert(err, IsNil)
  2566  
  2567  	err = fh.Close()
  2568  	t.Assert(err, IsNil)
  2569  	fh = nil
  2570  
  2571  	_, err = os.Stat(from)
  2572  	t.Assert(err, NotNil)
  2573  	pathErr, ok := err.(*os.PathError)
  2574  	t.Assert(ok, Equals, true)
  2575  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2576  
  2577  	content, err := ioutil.ReadFile(to)
  2578  	t.Assert(err, IsNil)
  2579  	t.Assert(string(content), Equals, "hello world")
  2580  }
  2581  
  2582  func (s *GoofysTest) TestInodeInsert(t *C) {
  2583  	root := s.getRoot(t)
  2584  
  2585  	in := NewInode(s.fs, root, aws.String("2"))
  2586  	in.Attributes = InodeAttributes{}
  2587  	root.insertChild(in)
  2588  	t.Assert(*root.dir.Children[2].Name, Equals, "2")
  2589  
  2590  	in = NewInode(s.fs, root, aws.String("1"))
  2591  	in.Attributes = InodeAttributes{}
  2592  	root.insertChild(in)
  2593  	t.Assert(*root.dir.Children[2].Name, Equals, "1")
  2594  	t.Assert(*root.dir.Children[3].Name, Equals, "2")
  2595  
  2596  	in = NewInode(s.fs, root, aws.String("4"))
  2597  	in.Attributes = InodeAttributes{}
  2598  	root.insertChild(in)
  2599  	t.Assert(*root.dir.Children[2].Name, Equals, "1")
  2600  	t.Assert(*root.dir.Children[3].Name, Equals, "2")
  2601  	t.Assert(*root.dir.Children[4].Name, Equals, "4")
  2602  
  2603  	inode := root.findChild("1")
  2604  	t.Assert(inode, NotNil)
  2605  	t.Assert(*inode.Name, Equals, "1")
  2606  
  2607  	inode = root.findChild("2")
  2608  	t.Assert(inode, NotNil)
  2609  	t.Assert(*inode.Name, Equals, "2")
  2610  
  2611  	inode = root.findChild("4")
  2612  	t.Assert(inode, NotNil)
  2613  	t.Assert(*inode.Name, Equals, "4")
  2614  
  2615  	inode = root.findChild("0")
  2616  	t.Assert(inode, IsNil)
  2617  
  2618  	inode = root.findChild("3")
  2619  	t.Assert(inode, IsNil)
  2620  
  2621  	root.removeChild(root.dir.Children[3])
  2622  	root.removeChild(root.dir.Children[2])
  2623  	root.removeChild(root.dir.Children[2])
  2624  	t.Assert(len(root.dir.Children), Equals, 2)
  2625  }
  2626  
  2627  func (s *GoofysTest) TestReadDirSlurpHeuristic(t *C) {
  2628  	if _, ok := s.cloud.Delegate().(*S3Backend); !ok {
  2629  		t.Skip("only for S3")
  2630  	}
  2631  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2632  
  2633  	s.setupBlobs(s.cloud, t, map[string]*string{"dir2isafile": nil})
  2634  
  2635  	root := s.getRoot(t).dir
  2636  	t.Assert(root.seqOpenDirScore, Equals, uint8(0))
  2637  	s.assertEntries(t, s.getRoot(t), []string{
  2638  		"dir1", "dir2", "dir2isafile", "dir4", "empty_dir",
  2639  		"empty_dir2", "file1", "file2", "zero"})
  2640  
  2641  	dir1, err := s.LookUpInode(t, "dir1")
  2642  	t.Assert(err, IsNil)
  2643  	dh1 := dir1.OpenDir()
  2644  	defer dh1.CloseDir()
  2645  	score := root.seqOpenDirScore
  2646  
  2647  	dir2, err := s.LookUpInode(t, "dir2")
  2648  	t.Assert(err, IsNil)
  2649  	dh2 := dir2.OpenDir()
  2650  	defer dh2.CloseDir()
  2651  	t.Assert(root.seqOpenDirScore, Equals, score+1)
  2652  
  2653  	dir3, err := s.LookUpInode(t, "dir4")
  2654  	t.Assert(err, IsNil)
  2655  	dh3 := dir3.OpenDir()
  2656  	defer dh3.CloseDir()
  2657  	t.Assert(root.seqOpenDirScore, Equals, score+2)
  2658  }
  2659  
  2660  func (s *GoofysTest) TestReadDirSlurpSubtree(t *C) {
  2661  	if _, ok := s.cloud.Delegate().(*S3Backend); !ok {
  2662  		t.Skip("only for S3")
  2663  	}
  2664  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2665  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2666  
  2667  	s.getRoot(t).dir.seqOpenDirScore = 2
  2668  	in, err := s.LookUpInode(t, "dir2")
  2669  	t.Assert(err, IsNil)
  2670  	t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(2))
  2671  
  2672  	s.readDirIntoCache(t, in.Id)
  2673  	// should have incremented the score
  2674  	t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(3))
  2675  
  2676  	// reading dir2 should cause dir2/dir3 to have cached readdir
  2677  	s.disableS3()
  2678  
  2679  	in, err = s.LookUpInode(t, "dir2/dir3")
  2680  	t.Assert(err, IsNil)
  2681  
  2682  	s.assertEntries(t, in, []string{"file4"})
  2683  }
  2684  
  2685  func (s *GoofysTest) TestReadDirCached(t *C) {
  2686  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2687  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2688  
  2689  	s.getRoot(t).dir.seqOpenDirScore = 2
  2690  	s.readDirIntoCache(t, fuseops.RootInodeID)
  2691  	s.disableS3()
  2692  
  2693  	dh := s.getRoot(t).OpenDir()
  2694  
  2695  	entries := s.readDirFully(t, dh)
  2696  	dirs := make([]string, 0)
  2697  	files := make([]string, 0)
  2698  	noMoreDir := false
  2699  
  2700  	for _, en := range entries {
  2701  		if en.Type == fuseutil.DT_Directory {
  2702  			t.Assert(noMoreDir, Equals, false)
  2703  			dirs = append(dirs, en.Name)
  2704  		} else {
  2705  			files = append(files, en.Name)
  2706  			noMoreDir = true
  2707  		}
  2708  	}
  2709  
  2710  	t.Assert(dirs, DeepEquals, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2"})
  2711  	t.Assert(files, DeepEquals, []string{"file1", "file2", "zero"})
  2712  }
  2713  
  2714  func (s *GoofysTest) TestReadDirLookUp(t *C) {
  2715  	s.getRoot(t).dir.seqOpenDirScore = 2
  2716  
  2717  	var wg sync.WaitGroup
  2718  	for i := 0; i < 10; i++ {
  2719  		wg.Add(2)
  2720  		go func() {
  2721  			defer wg.Done()
  2722  			s.readDirIntoCache(t, fuseops.RootInodeID)
  2723  		}()
  2724  		go func() {
  2725  			defer wg.Done()
  2726  
  2727  			lookup := fuseops.LookUpInodeOp{
  2728  				Parent: fuseops.RootInodeID,
  2729  				Name:   "file1",
  2730  			}
  2731  			err := s.fs.LookUpInode(nil, &lookup)
  2732  			t.Assert(err, IsNil)
  2733  		}()
  2734  	}
  2735  	wg.Wait()
  2736  }
  2737  
  2738  func (s *GoofysTest) writeSeekWriteFuse(t *C, file string, fh *os.File, first string, second string, third string) {
  2739  	fi, err := os.Stat(file)
  2740  	t.Assert(err, IsNil)
  2741  
  2742  	defer func() {
  2743  		// close the file if the test failed so we can unmount
  2744  		if fh != nil {
  2745  			fh.Close()
  2746  		}
  2747  	}()
  2748  
  2749  	_, err = fh.WriteString(first)
  2750  	t.Assert(err, IsNil)
  2751  
  2752  	off, err := fh.Seek(int64(len(second)), 1)
  2753  	t.Assert(err, IsNil)
  2754  	t.Assert(off, Equals, int64(len(first)+len(second)))
  2755  
  2756  	_, err = fh.WriteString(third)
  2757  	t.Assert(err, IsNil)
  2758  
  2759  	off, err = fh.Seek(int64(len(first)), 0)
  2760  	t.Assert(err, IsNil)
  2761  	t.Assert(off, Equals, int64(len(first)))
  2762  
  2763  	_, err = fh.WriteString(second)
  2764  	t.Assert(err, IsNil)
  2765  
  2766  	err = fh.Close()
  2767  	t.Assert(err, IsNil)
  2768  	fh = nil
  2769  
  2770  	content, err := ioutil.ReadFile(file)
  2771  	t.Assert(err, IsNil)
  2772  	t.Assert(string(content), Equals, first+second+third)
  2773  
  2774  	fi2, err := os.Stat(file)
  2775  	t.Assert(err, IsNil)
  2776  	t.Assert(fi.Mode(), Equals, fi2.Mode())
  2777  }
  2778  
  2779  func (s *GoofysTest) TestWriteSeekWriteFuse(t *C) {
  2780  	if !isCatfs() {
  2781  		t.Skip("only works with CATFS=true")
  2782  	}
  2783  
  2784  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2785  	s.mount(t, mountPoint)
  2786  	defer s.umount(t, mountPoint)
  2787  
  2788  	file := mountPoint + "/newfile"
  2789  
  2790  	fh, err := os.Create(file)
  2791  	t.Assert(err, IsNil)
  2792  
  2793  	s.writeSeekWriteFuse(t, file, fh, "hello", " ", "world")
  2794  
  2795  	fh, err = os.OpenFile(file, os.O_WRONLY, 0600)
  2796  	t.Assert(err, IsNil)
  2797  
  2798  	s.writeSeekWriteFuse(t, file, fh, "", "never", "minding")
  2799  }
  2800  
  2801  func (s *GoofysTest) TestDirMtimeCreate(t *C) {
  2802  	root := s.getRoot(t)
  2803  
  2804  	attr, _ := root.GetAttributes()
  2805  	m1 := attr.Mtime
  2806  	time.Sleep(time.Second)
  2807  
  2808  	_, _ = root.Create("foo", fuseops.OpMetadata{uint32(os.Getpid())})
  2809  	attr2, _ := root.GetAttributes()
  2810  	m2 := attr2.Mtime
  2811  
  2812  	t.Assert(m1.Before(m2), Equals, true)
  2813  }
  2814  
  2815  func (s *GoofysTest) TestDirMtimeLs(t *C) {
  2816  	root := s.getRoot(t)
  2817  
  2818  	attr, _ := root.GetAttributes()
  2819  	m1 := attr.Mtime
  2820  	time.Sleep(3 * time.Second)
  2821  
  2822  	params := &PutBlobInput{
  2823  		Key:  "newfile",
  2824  		Body: bytes.NewReader([]byte("foo")),
  2825  		Size: PUInt64(3),
  2826  	}
  2827  	_, err := s.cloud.PutBlob(params)
  2828  	t.Assert(err, IsNil)
  2829  
  2830  	s.readDirIntoCache(t, fuseops.RootInodeID)
  2831  
  2832  	attr2, _ := root.GetAttributes()
  2833  	m2 := attr2.Mtime
  2834  
  2835  	t.Assert(m1.Before(m2), Equals, true)
  2836  }
  2837  
  2838  func (s *GoofysTest) TestRenameOverwrite(t *C) {
  2839  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2840  	s.mount(t, mountPoint)
  2841  	defer s.umount(t, mountPoint)
  2842  
  2843  	file := mountPoint + "/newfile"
  2844  	rename := mountPoint + "/file1"
  2845  
  2846  	fh, err := os.Create(file)
  2847  	t.Assert(err, IsNil)
  2848  
  2849  	err = fh.Close()
  2850  	t.Assert(err, IsNil)
  2851  
  2852  	err = os.Rename(file, rename)
  2853  	t.Assert(err, IsNil)
  2854  }
  2855  
  2856  func (s *GoofysTest) TestRead403(t *C) {
  2857  	// anonymous only works in S3 for now
  2858  	cloud := s.getRoot(t).dir.cloud
  2859  	s3, ok := cloud.Delegate().(*S3Backend)
  2860  	if !ok {
  2861  		t.Skip("only for S3")
  2862  	}
  2863  
  2864  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2865  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2866  
  2867  	// cache the inode first so we don't get 403 when we lookup
  2868  	in, err := s.LookUpInode(t, "file1")
  2869  	t.Assert(err, IsNil)
  2870  
  2871  	fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
  2872  	t.Assert(err, IsNil)
  2873  
  2874  	s3.awsConfig.Credentials = credentials.AnonymousCredentials
  2875  	s3.newS3()
  2876  
  2877  	// fake enable read-ahead
  2878  	fh.seqReadAmount = uint64(READAHEAD_CHUNK)
  2879  
  2880  	buf := make([]byte, 5)
  2881  
  2882  	_, err = fh.ReadFile(0, buf)
  2883  	t.Assert(err, Equals, syscall.EACCES)
  2884  
  2885  	// now that the S3 GET has failed, try again, see
  2886  	// https://github.com/kahing/goofys/pull/243
  2887  	_, err = fh.ReadFile(0, buf)
  2888  	t.Assert(err, Equals, syscall.EACCES)
  2889  }
  2890  
  2891  func (s *GoofysTest) TestRmdirWithDiropen(t *C) {
  2892  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2893  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2894  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2895  
  2896  	s.mount(t, mountPoint)
  2897  	defer s.umount(t, mountPoint)
  2898  
  2899  	err := os.MkdirAll(mountPoint+"/dir2/dir4", 0700)
  2900  	t.Assert(err, IsNil)
  2901  	err = os.MkdirAll(mountPoint+"/dir2/dir5", 0700)
  2902  	t.Assert(err, IsNil)
  2903  
  2904  	//1, open dir5
  2905  	dir := mountPoint + "/dir2/dir5"
  2906  	fh, err := os.Open(dir)
  2907  	t.Assert(err, IsNil)
  2908  	defer fh.Close()
  2909  
  2910  	cmd1 := exec.Command("ls", mountPoint+"/dir2")
  2911  	//out, err := cmd.Output()
  2912  	out1, err1 := cmd1.Output()
  2913  	if err1 != nil {
  2914  		if ee, ok := err.(*exec.ExitError); ok {
  2915  			panic(ee.Stderr)
  2916  		}
  2917  	}
  2918  	t.Assert(string(out1), DeepEquals, ""+"dir3\n"+"dir4\n"+"dir5\n")
  2919  
  2920  	//2, rm -rf dir5
  2921  	cmd := exec.Command("rm", "-rf", dir)
  2922  	_, err = cmd.Output()
  2923  	if err != nil {
  2924  		if ee, ok := err.(*exec.ExitError); ok {
  2925  			panic(ee.Stderr)
  2926  		}
  2927  	}
  2928  
  2929  	//3,  readdir dir2
  2930  	fh1, err := os.Open(mountPoint + "/dir2")
  2931  	t.Assert(err, IsNil)
  2932  	defer func() {
  2933  		// close the file if the test failed so we can unmount
  2934  		if fh1 != nil {
  2935  			fh1.Close()
  2936  		}
  2937  	}()
  2938  
  2939  	names, err := fh1.Readdirnames(0)
  2940  	t.Assert(err, IsNil)
  2941  	t.Assert(names, DeepEquals, []string{"dir3", "dir4"})
  2942  
  2943  	cmd = exec.Command("ls", mountPoint+"/dir2")
  2944  	out, err := cmd.Output()
  2945  	if err != nil {
  2946  		if ee, ok := err.(*exec.ExitError); ok {
  2947  			panic(ee.Stderr)
  2948  		}
  2949  	}
  2950  
  2951  	t.Assert(string(out), DeepEquals, ""+"dir3\n"+"dir4\n")
  2952  
  2953  	err = fh1.Close()
  2954  	t.Assert(err, IsNil)
  2955  
  2956  	// 4,reset env
  2957  	err = fh.Close()
  2958  	t.Assert(err, IsNil)
  2959  
  2960  	err = os.RemoveAll(mountPoint + "/dir2/dir4")
  2961  	t.Assert(err, IsNil)
  2962  
  2963  }
  2964  
  2965  func (s *GoofysTest) TestDirMTime(t *C) {
  2966  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2967  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2968  	// enable cheap to ensure GET dir/ will come back before LIST dir/
  2969  	s.fs.flags.Cheap = true
  2970  
  2971  	root := s.getRoot(t)
  2972  	t.Assert(time.Time{}.Before(root.Attributes.Mtime), Equals, true)
  2973  
  2974  	file1, err := s.LookUpInode(t, "dir1")
  2975  	t.Assert(err, IsNil)
  2976  
  2977  	// take mtime from a blob as init time because when we test against
  2978  	// real cloud, server time can be way off from local time
  2979  	initTime := file1.Attributes.Mtime
  2980  
  2981  	dir1, err := s.LookUpInode(t, "dir1")
  2982  	t.Assert(err, IsNil)
  2983  
  2984  	attr1, _ := dir1.GetAttributes()
  2985  	m1 := attr1.Mtime
  2986  	if !s.cloud.Capabilities().DirBlob {
  2987  		// dir1 doesn't have a dir blob, so should take root's mtime
  2988  		t.Assert(m1, Equals, root.Attributes.Mtime)
  2989  	}
  2990  
  2991  	time.Sleep(2 * time.Second)
  2992  
  2993  	dir2, err := dir1.MkDir("dir2")
  2994  	t.Assert(err, IsNil)
  2995  
  2996  	attr2, _ := dir2.GetAttributes()
  2997  	m2 := attr2.Mtime
  2998  	t.Assert(m1.Add(2*time.Second).Before(m2), Equals, true)
  2999  
  3000  	// dir1 didn't have an explicit mtime, so it should update now
  3001  	// that we did a mkdir inside it
  3002  	attr1, _ = dir1.GetAttributes()
  3003  	m1 = attr1.Mtime
  3004  	t.Assert(m1, Equals, m2)
  3005  
  3006  	// we never added the inode so this will do the lookup again
  3007  	dir2, err = dir1.LookUp("dir2")
  3008  	t.Assert(err, IsNil)
  3009  
  3010  	// the new time comes from S3 which only has seconds
  3011  	// granularity
  3012  	attr2, _ = dir2.GetAttributes()
  3013  	t.Assert(m2, Not(Equals), attr2.Mtime)
  3014  	t.Assert(initTime.Add(time.Second).Before(attr2.Mtime), Equals, true)
  3015  
  3016  	// different dir2
  3017  	dir2, err = s.LookUpInode(t, "dir2")
  3018  	t.Assert(err, IsNil)
  3019  
  3020  	attr2, _ = dir2.GetAttributes()
  3021  	m2 = attr2.Mtime
  3022  
  3023  	// this fails because we are listing dir/, which means we
  3024  	// don't actually see the dir blob dir2/dir3/ (it's returned
  3025  	// as common prefix), so we can't get dir3's mtime
  3026  	if false {
  3027  		// dir2/dir3/ exists and has mtime
  3028  		s.readDirIntoCache(t, dir2.Id)
  3029  		dir3, err := s.LookUpInode(t, "dir2/dir3")
  3030  		t.Assert(err, IsNil)
  3031  
  3032  		attr3, _ := dir3.GetAttributes()
  3033  		// setupDefaultEnv is before mounting
  3034  		t.Assert(attr3.Mtime.Before(m2), Equals, true)
  3035  	}
  3036  
  3037  	time.Sleep(time.Second)
  3038  
  3039  	params := &PutBlobInput{
  3040  		Key:  "dir2/newfile",
  3041  		Body: bytes.NewReader([]byte("foo")),
  3042  		Size: PUInt64(3),
  3043  	}
  3044  	_, err = s.cloud.PutBlob(params)
  3045  	t.Assert(err, IsNil)
  3046  
  3047  	s.readDirIntoCache(t, dir2.Id)
  3048  
  3049  	newfile, err := dir2.LookUp("newfile")
  3050  	t.Assert(err, IsNil)
  3051  
  3052  	attr2New, _ := dir2.GetAttributes()
  3053  	// mtime should reflect that of the latest object
  3054  	// GCS can return nano second resolution so truncate to second for compare
  3055  	t.Assert(attr2New.Mtime.Unix(), Equals, newfile.Attributes.Mtime.Unix())
  3056  	t.Assert(m2.Before(attr2New.Mtime), Equals, true)
  3057  }
  3058  
  3059  func (s *GoofysTest) TestDirMTimeNoTTL(t *C) {
  3060  	if s.cloud.Capabilities().DirBlob {
  3061  		t.Skip("Tests for behavior without dir blob")
  3062  	}
  3063  	// enable cheap to ensure GET dir/ will come back before LIST dir/
  3064  	s.fs.flags.Cheap = true
  3065  
  3066  	dir2, err := s.LookUpInode(t, "dir2")
  3067  	t.Assert(err, IsNil)
  3068  
  3069  	attr2, _ := dir2.GetAttributes()
  3070  	m2 := attr2.Mtime
  3071  
  3072  	// dir2/dir3/ exists and has mtime
  3073  	s.readDirIntoCache(t, dir2.Id)
  3074  	dir3, err := s.LookUpInode(t, "dir2/dir3")
  3075  	t.Assert(err, IsNil)
  3076  
  3077  	attr3, _ := dir3.GetAttributes()
  3078  	// setupDefaultEnv is before mounting but we can't really
  3079  	// compare the time here since dir3 is s3 server time and dir2
  3080  	// is local time
  3081  	t.Assert(attr3.Mtime, Not(Equals), m2)
  3082  }
  3083  
  3084  func (s *GoofysTest) TestIssue326(t *C) {
  3085  	root := s.getRoot(t)
  3086  	_, err := root.MkDir("folder@name.something")
  3087  	t.Assert(err, IsNil)
  3088  	_, err = root.MkDir("folder#1#")
  3089  	t.Assert(err, IsNil)
  3090  
  3091  	s.readDirIntoCache(t, root.Id)
  3092  	s.assertEntries(t, root, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2",
  3093  		"file1", "file2", "folder#1#", "folder@name.something", "zero"})
  3094  }
  3095  
  3096  func (s *GoofysTest) TestSlurpFileAndDir(t *C) {
  3097  	if _, ok := s.cloud.Delegate().(*S3Backend); !ok {
  3098  		t.Skip("only for S3")
  3099  	}
  3100  	prefix := "TestSlurpFileAndDir/"
  3101  	// fileAndDir is both a file and a directory, and we are
  3102  	// slurping them together as part of our listing optimization
  3103  	blobs := []string{
  3104  		prefix + "fileAndDir",
  3105  		prefix + "fileAndDir/a",
  3106  	}
  3107  
  3108  	for _, b := range blobs {
  3109  		params := &PutBlobInput{
  3110  			Key:  b,
  3111  			Body: bytes.NewReader([]byte("foo")),
  3112  			Size: PUInt64(3),
  3113  		}
  3114  		_, err := s.cloud.PutBlob(params)
  3115  		t.Assert(err, IsNil)
  3116  	}
  3117  
  3118  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  3119  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  3120  
  3121  	in, err := s.LookUpInode(t, prefix[0:len(prefix)-1])
  3122  	t.Assert(err, IsNil)
  3123  	t.Assert(in.dir, NotNil)
  3124  
  3125  	s.getRoot(t).dir.seqOpenDirScore = 2
  3126  	s.readDirIntoCache(t, in.Id)
  3127  
  3128  	// should have slurped these
  3129  	in = in.findChild("fileAndDir")
  3130  	t.Assert(in, NotNil)
  3131  	t.Assert(in.dir, NotNil)
  3132  
  3133  	in = in.findChild("a")
  3134  	t.Assert(in, NotNil)
  3135  
  3136  	// because of slurping we've decided that this is a directory,
  3137  	// lookup must _not_ talk to S3 again because otherwise we may
  3138  	// decide it's a file again because of S3 race
  3139  	s.disableS3()
  3140  	in, err = s.LookUpInode(t, prefix+"fileAndDir")
  3141  	t.Assert(err, IsNil)
  3142  
  3143  	s.assertEntries(t, in, []string{"a"})
  3144  }
  3145  
  3146  func (s *GoofysTest) TestAzureDirBlob(t *C) {
  3147  	if _, ok := s.cloud.(*AZBlob); !ok {
  3148  		t.Skip("only for Azure blob")
  3149  	}
  3150  
  3151  	fakedir := []string{"dir2", "dir3"}
  3152  
  3153  	for _, d := range fakedir {
  3154  		params := &PutBlobInput{
  3155  			Key:  "azuredir/" + d,
  3156  			Body: bytes.NewReader([]byte("")),
  3157  			Metadata: map[string]*string{
  3158  				AzureDirBlobMetadataKey: PString("true"),
  3159  			},
  3160  			Size: PUInt64(0),
  3161  		}
  3162  		_, err := s.cloud.PutBlob(params)
  3163  		t.Assert(err, IsNil)
  3164  	}
  3165  
  3166  	defer func() {
  3167  		// because our listing changes dir3 to dir3/, test
  3168  		// cleanup could not delete the blob so we wneed to
  3169  		// clean up
  3170  		for _, d := range fakedir {
  3171  			_, err := s.cloud.DeleteBlob(&DeleteBlobInput{Key: "azuredir/" + d})
  3172  			t.Assert(err, IsNil)
  3173  		}
  3174  	}()
  3175  
  3176  	s.setupBlobs(s.cloud, t, map[string]*string{
  3177  		// "azuredir/dir" would have gone here
  3178  		"azuredir/dir3,/":           nil,
  3179  		"azuredir/dir3/file1":       nil,
  3180  		"azuredir/dir345_is_a_file": nil,
  3181  	})
  3182  
  3183  	head, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir3"})
  3184  	t.Assert(err, IsNil)
  3185  	t.Assert(head.IsDirBlob, Equals, true)
  3186  
  3187  	head, err = s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir345_is_a_file"})
  3188  	t.Assert(err, IsNil)
  3189  	t.Assert(head.IsDirBlob, Equals, false)
  3190  
  3191  	list, err := s.cloud.ListBlobs(&ListBlobsInput{Prefix: PString("azuredir/")})
  3192  	t.Assert(err, IsNil)
  3193  
  3194  	// for flat listing, we rename `dir3` to `dir3/` and add it to Items,
  3195  	// `dir3` normally sorts before `dir3./`, but after the rename `dir3/` should
  3196  	// sort after `dir3./`
  3197  	t.Assert(len(list.Items), Equals, 5)
  3198  	t.Assert(*list.Items[0].Key, Equals, "azuredir/dir2/")
  3199  	t.Assert(*list.Items[1].Key, Equals, "azuredir/dir3,/")
  3200  	t.Assert(*list.Items[2].Key, Equals, "azuredir/dir3/")
  3201  	t.Assert(*list.Items[3].Key, Equals, "azuredir/dir3/file1")
  3202  	t.Assert(*list.Items[4].Key, Equals, "azuredir/dir345_is_a_file")
  3203  	t.Assert(sort.IsSorted(sortBlobItemOutput(list.Items)), Equals, true)
  3204  
  3205  	list, err = s.cloud.ListBlobs(&ListBlobsInput{
  3206  		Prefix:    PString("azuredir/"),
  3207  		Delimiter: PString("/"),
  3208  	})
  3209  	t.Assert(err, IsNil)
  3210  
  3211  	// for delimited listing, we remove `dir3` from items and add `dir3/` to prefixes,
  3212  	// which should already be there
  3213  	t.Assert(len(list.Items), Equals, 1)
  3214  	t.Assert(*list.Items[0].Key, Equals, "azuredir/dir345_is_a_file")
  3215  
  3216  	t.Assert(len(list.Prefixes), Equals, 3)
  3217  	t.Assert(*list.Prefixes[0].Prefix, Equals, "azuredir/dir2/")
  3218  	t.Assert(*list.Prefixes[1].Prefix, Equals, "azuredir/dir3,/")
  3219  	t.Assert(*list.Prefixes[2].Prefix, Equals, "azuredir/dir3/")
  3220  
  3221  	// finally check that we are reading them in correctly
  3222  	in, err := s.LookUpInode(t, "azuredir")
  3223  	t.Assert(err, IsNil)
  3224  
  3225  	s.assertEntries(t, in, []string{"dir2", "dir3", "dir3,", "dir345_is_a_file"})
  3226  }
  3227  
  3228  func (s *GoofysTest) TestReadDirLarge(t *C) {
  3229  	root := s.getRoot(t)
  3230  	root.dir.mountPrefix = "empty_dir"
  3231  
  3232  	blobs := make(map[string]*string)
  3233  	expect := make([]string, 0)
  3234  	for i := 0; i < 998; i++ {
  3235  		blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil
  3236  		expect = append(expect, fmt.Sprintf("%04vd", i))
  3237  	}
  3238  	blobs["empty_dir/0998f"] = nil
  3239  	blobs["empty_dir/0999f"] = nil
  3240  	blobs["empty_dir/1000f"] = nil
  3241  	expect = append(expect, "0998f")
  3242  	expect = append(expect, "0999f")
  3243  	expect = append(expect, "1000f")
  3244  
  3245  	for i := 1001; i < 1003; i++ {
  3246  		blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil
  3247  		expect = append(expect, fmt.Sprintf("%04vd", i))
  3248  	}
  3249  
  3250  	s.setupBlobs(s.cloud, t, blobs)
  3251  
  3252  	dh := root.OpenDir()
  3253  	defer dh.CloseDir()
  3254  
  3255  	children := namesOf(s.readDirFully(t, dh))
  3256  	sort.Strings(children)
  3257  
  3258  	t.Assert(children, DeepEquals, expect)
  3259  }
  3260  
  3261  func (s *GoofysTest) newBackend(t *C, bucket string, createBucket bool) (cloud StorageBackend) {
  3262  	var err error
  3263  	switch s.cloud.Delegate().(type) {
  3264  	case *S3Backend:
  3265  		config, _ := s.fs.flags.Backend.(*S3Config)
  3266  		s3, err := NewS3(bucket, s.fs.flags, config)
  3267  		t.Assert(err, IsNil)
  3268  
  3269  		s3.aws = hasEnv("AWS")
  3270  
  3271  		if !hasEnv("MINIO") {
  3272  			s3.Handlers.Sign.Clear()
  3273  			s3.Handlers.Sign.PushBack(SignV2)
  3274  			s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
  3275  		}
  3276  
  3277  		if s3.aws {
  3278  			cloud = &S3BucketEventualConsistency{s3}
  3279  		} else {
  3280  			cloud = s3
  3281  		}
  3282  	case *GCS3:
  3283  		config, _ := s.fs.flags.Backend.(*S3Config)
  3284  		cloud, err = NewGCS3(bucket, s.fs.flags, config)
  3285  		t.Assert(err, IsNil)
  3286  	case *AZBlob:
  3287  		config, _ := s.fs.flags.Backend.(*AZBlobConfig)
  3288  		cloud, err = NewAZBlob(bucket, config)
  3289  		t.Assert(err, IsNil)
  3290  	case *ADLv1:
  3291  		config, _ := s.fs.flags.Backend.(*ADLv1Config)
  3292  		cloud, err = NewADLv1(bucket, s.fs.flags, config)
  3293  		t.Assert(err, IsNil)
  3294  	case *ADLv2:
  3295  		config, _ := s.fs.flags.Backend.(*ADLv2Config)
  3296  		cloud, err = NewADLv2(bucket, s.fs.flags, config)
  3297  		t.Assert(err, IsNil)
  3298  	default:
  3299  		t.Fatal("unknown backend")
  3300  	}
  3301  
  3302  	if createBucket {
  3303  		_, err = cloud.MakeBucket(&MakeBucketInput{})
  3304  		t.Assert(err, IsNil)
  3305  
  3306  		s.removeBucket = append(s.removeBucket, cloud)
  3307  	}
  3308  
  3309  	return
  3310  }
  3311  
  3312  func (s *GoofysTest) TestVFS(t *C) {
  3313  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3314  	cloud2 := s.newBackend(t, bucket, true)
  3315  
  3316  	// "mount" this 2nd cloud
  3317  	in, err := s.LookUpInode(t, "dir4")
  3318  	t.Assert(in, NotNil)
  3319  	t.Assert(err, IsNil)
  3320  
  3321  	in.dir.cloud = cloud2
  3322  	in.dir.mountPrefix = "cloud2Prefix/"
  3323  
  3324  	rootCloud, rootPath := in.cloud()
  3325  	t.Assert(rootCloud, NotNil)
  3326  	t.Assert(rootCloud == cloud2, Equals, true)
  3327  	t.Assert(rootPath, Equals, "cloud2Prefix")
  3328  
  3329  	// the mount would shadow dir4/file5
  3330  	_, err = in.LookUp("file5")
  3331  	t.Assert(err, Equals, fuse.ENOENT)
  3332  
  3333  	_, fh := in.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())})
  3334  	err = fh.FlushFile()
  3335  	t.Assert(err, IsNil)
  3336  
  3337  	resp, err := cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile"})
  3338  	t.Assert(err, IsNil)
  3339  	defer resp.Body.Close()
  3340  
  3341  	err = s.getRoot(t).Rename("file1", in, "file2")
  3342  	t.Assert(err, Equals, syscall.EINVAL)
  3343  
  3344  	_, err = in.MkDir("subdir")
  3345  	t.Assert(err, IsNil)
  3346  
  3347  	subdirKey := "cloud2Prefix/subdir"
  3348  	if !cloud2.Capabilities().DirBlob {
  3349  		subdirKey += "/"
  3350  	}
  3351  
  3352  	_, err = cloud2.HeadBlob(&HeadBlobInput{Key: subdirKey})
  3353  	t.Assert(err, IsNil)
  3354  
  3355  	subdir, err := s.LookUpInode(t, "dir4/subdir")
  3356  	t.Assert(err, IsNil)
  3357  	t.Assert(subdir, NotNil)
  3358  	t.Assert(subdir.dir, NotNil)
  3359  	t.Assert(subdir.dir.cloud, IsNil)
  3360  
  3361  	subdirCloud, subdirPath := subdir.cloud()
  3362  	t.Assert(subdirCloud, NotNil)
  3363  	t.Assert(subdirCloud == cloud2, Equals, true)
  3364  	t.Assert(subdirPath, Equals, "cloud2Prefix/subdir")
  3365  
  3366  	// create another file inside subdir to make sure that our
  3367  	// mount check is correct for dir inside the root
  3368  	_, fh = subdir.Create("testfile2", fuseops.OpMetadata{uint32(os.Getpid())})
  3369  	err = fh.FlushFile()
  3370  	t.Assert(err, IsNil)
  3371  
  3372  	resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"})
  3373  	t.Assert(err, IsNil)
  3374  	defer resp.Body.Close()
  3375  
  3376  	err = subdir.Rename("testfile2", in, "testfile2")
  3377  	t.Assert(err, IsNil)
  3378  
  3379  	_, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"})
  3380  	t.Assert(err, Equals, fuse.ENOENT)
  3381  
  3382  	resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"})
  3383  	t.Assert(err, IsNil)
  3384  	defer resp.Body.Close()
  3385  
  3386  	err = in.Rename("testfile2", subdir, "testfile2")
  3387  	t.Assert(err, IsNil)
  3388  
  3389  	_, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"})
  3390  	t.Assert(err, Equals, fuse.ENOENT)
  3391  
  3392  	resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"})
  3393  	t.Assert(err, IsNil)
  3394  	defer resp.Body.Close()
  3395  }
  3396  
  3397  func (s *GoofysTest) TestMountsList(t *C) {
  3398  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  3399  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  3400  
  3401  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3402  	cloud := s.newBackend(t, bucket, true)
  3403  
  3404  	root := s.getRoot(t)
  3405  	rootCloud := root.dir.cloud
  3406  
  3407  	s.fs.MountAll([]*Mount{
  3408  		&Mount{"dir4/cloud1", cloud, "", false},
  3409  	})
  3410  
  3411  	in, err := s.LookUpInode(t, "dir4")
  3412  	t.Assert(in, NotNil)
  3413  	t.Assert(err, IsNil)
  3414  	t.Assert(int(in.Id), Equals, 2)
  3415  
  3416  	s.readDirIntoCache(t, in.Id)
  3417  	// ensure that listing is listing mounts and root bucket in one go
  3418  	root.dir.cloud = nil
  3419  
  3420  	s.assertEntries(t, in, []string{"cloud1", "file5"})
  3421  
  3422  	c1, err := s.LookUpInode(t, "dir4/cloud1")
  3423  	t.Assert(err, IsNil)
  3424  	t.Assert(*c1.Name, Equals, "cloud1")
  3425  	t.Assert(c1.dir.cloud == cloud, Equals, true)
  3426  	t.Assert(int(c1.Id), Equals, 3)
  3427  
  3428  	// pretend we've passed the normal cache ttl
  3429  	s.fs.flags.TypeCacheTTL = 0
  3430  	s.fs.flags.StatCacheTTL = 0
  3431  
  3432  	// listing root again should not overwrite the mounts
  3433  	root.dir.cloud = rootCloud
  3434  
  3435  	s.readDirIntoCache(t, in.Parent.Id)
  3436  	s.assertEntries(t, in, []string{"cloud1", "file5"})
  3437  
  3438  	c1, err = s.LookUpInode(t, "dir4/cloud1")
  3439  	t.Assert(err, IsNil)
  3440  	t.Assert(*c1.Name, Equals, "cloud1")
  3441  	t.Assert(c1.dir.cloud == cloud, Equals, true)
  3442  	t.Assert(int(c1.Id), Equals, 3)
  3443  }
  3444  
  3445  func (s *GoofysTest) TestMountsNewDir(t *C) {
  3446  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3447  	cloud := s.newBackend(t, bucket, true)
  3448  
  3449  	_, err := s.LookUpInode(t, "dir5")
  3450  	t.Assert(err, NotNil)
  3451  	t.Assert(err, Equals, fuse.ENOENT)
  3452  
  3453  	s.fs.MountAll([]*Mount{
  3454  		&Mount{"dir5/cloud1", cloud, "", false},
  3455  	})
  3456  
  3457  	in, err := s.LookUpInode(t, "dir5")
  3458  	t.Assert(err, IsNil)
  3459  	t.Assert(in.isDir(), Equals, true)
  3460  
  3461  	c1, err := s.LookUpInode(t, "dir5/cloud1")
  3462  	t.Assert(err, IsNil)
  3463  	t.Assert(c1.isDir(), Equals, true)
  3464  	t.Assert(c1.dir.cloud, Equals, cloud)
  3465  }
  3466  
  3467  func (s *GoofysTest) TestMountsNewMounts(t *C) {
  3468  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3469  	cloud := s.newBackend(t, bucket, true)
  3470  
  3471  	// "mount" this 2nd cloud
  3472  	in, err := s.LookUpInode(t, "dir4")
  3473  	t.Assert(in, NotNil)
  3474  	t.Assert(err, IsNil)
  3475  
  3476  	s.fs.MountAll([]*Mount{
  3477  		&Mount{"dir4/cloud1", cloud, "", false},
  3478  	})
  3479  
  3480  	s.readDirIntoCache(t, in.Id)
  3481  
  3482  	c1, err := s.LookUpInode(t, "dir4/cloud1")
  3483  	t.Assert(err, IsNil)
  3484  	t.Assert(*c1.Name, Equals, "cloud1")
  3485  	t.Assert(c1.dir.cloud == cloud, Equals, true)
  3486  
  3487  	_, err = s.LookUpInode(t, "dir4/cloud2")
  3488  	t.Assert(err, Equals, fuse.ENOENT)
  3489  
  3490  	s.fs.MountAll([]*Mount{
  3491  		&Mount{"dir4/cloud1", cloud, "", false},
  3492  		&Mount{"dir4/cloud2", cloud, "cloudprefix", false},
  3493  	})
  3494  
  3495  	c2, err := s.LookUpInode(t, "dir4/cloud2")
  3496  	t.Assert(err, IsNil)
  3497  	t.Assert(*c2.Name, Equals, "cloud2")
  3498  	t.Assert(c2.dir.cloud == cloud, Equals, true)
  3499  	t.Assert(c2.dir.mountPrefix, Equals, "cloudprefix")
  3500  }
  3501  
  3502  func (s *GoofysTest) TestMountsError(t *C) {
  3503  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3504  	var cloud StorageBackend
  3505  	if s3, ok := s.cloud.Delegate().(*S3Backend); ok {
  3506  		// S3Backend can't detect bucket doesn't exist because
  3507  		// HEAD an object always return 404 NotFound (instead
  3508  		// of NoSuchBucket)
  3509  		flags := *s3.flags
  3510  		config := *s3.config
  3511  		flags.Endpoint = "0.0.0.0:0"
  3512  		var err error
  3513  		cloud, err = NewS3(bucket, &flags, &config)
  3514  		t.Assert(err, IsNil)
  3515  	} else if _, ok := s.cloud.(*ADLv1); ok {
  3516  		config, _ := s.fs.flags.Backend.(*ADLv1Config)
  3517  		config.Authorizer = nil
  3518  
  3519  		var err error
  3520  		cloud, err = NewADLv1(bucket, s.fs.flags, config)
  3521  		t.Assert(err, IsNil)
  3522  	} else if _, ok := s.cloud.(*ADLv2); ok {
  3523  		// ADLv2 currently doesn't detect bucket doesn't exist
  3524  		cloud = s.newBackend(t, bucket, false)
  3525  		adlCloud, _ := cloud.(*ADLv2)
  3526  		auth := adlCloud.client.BaseClient.Authorizer
  3527  		adlCloud.client.BaseClient.Authorizer = nil
  3528  		defer func() {
  3529  			adlCloud.client.BaseClient.Authorizer = auth
  3530  		}()
  3531  	} else {
  3532  		cloud = s.newBackend(t, bucket, false)
  3533  	}
  3534  
  3535  	s.fs.MountAll([]*Mount{
  3536  		&Mount{"dir4/newerror", StorageBackendInitError{
  3537  			fmt.Errorf("foo"),
  3538  			Capabilities{},
  3539  		}, "errprefix1", false},
  3540  		&Mount{"dir4/initerror", &StorageBackendInitWrapper{
  3541  			StorageBackend: cloud,
  3542  			initKey:        "foobar",
  3543  		}, "errprefix2", false},
  3544  	})
  3545  
  3546  	errfile, err := s.LookUpInode(t, "dir4/newerror/"+INIT_ERR_BLOB)
  3547  	t.Assert(err, IsNil)
  3548  	t.Assert(errfile.isDir(), Equals, false)
  3549  
  3550  	_, err = s.LookUpInode(t, "dir4/newerror/not_there")
  3551  	t.Assert(err, Equals, fuse.ENOENT)
  3552  
  3553  	errfile, err = s.LookUpInode(t, "dir4/initerror/"+INIT_ERR_BLOB)
  3554  	t.Assert(err, IsNil)
  3555  	t.Assert(errfile.isDir(), Equals, false)
  3556  
  3557  	_, err = s.LookUpInode(t, "dir4/initerror/not_there")
  3558  	t.Assert(err, Equals, fuse.ENOENT)
  3559  
  3560  	in, err := s.LookUpInode(t, "dir4/initerror")
  3561  	t.Assert(err, IsNil)
  3562  	t.Assert(in, NotNil)
  3563  
  3564  	t.Assert(in.dir.cloud.Capabilities().Name, Equals, cloud.Capabilities().Name)
  3565  }
  3566  
  3567  func (s *GoofysTest) TestMountsMultiLevel(t *C) {
  3568  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  3569  
  3570  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3571  	cloud := s.newBackend(t, bucket, true)
  3572  
  3573  	s.fs.MountAll([]*Mount{
  3574  		&Mount{"dir4/sub/dir", cloud, "", false},
  3575  	})
  3576  
  3577  	sub, err := s.LookUpInode(t, "dir4/sub")
  3578  	t.Assert(err, IsNil)
  3579  	t.Assert(sub.isDir(), Equals, true)
  3580  
  3581  	s.assertEntries(t, sub, []string{"dir"})
  3582  }
  3583  
  3584  func (s *GoofysTest) TestMountsNested(t *C) {
  3585  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3586  	cloud := s.newBackend(t, bucket, true)
  3587  	s.testMountsNested(t, cloud, []*Mount{
  3588  		&Mount{"dir5/in/a/dir", cloud, "a/dir/", false},
  3589  		&Mount{"dir5/in/", cloud, "b/", false},
  3590  	})
  3591  }
  3592  
  3593  // test that mount order doesn't matter for nested mounts
  3594  func (s *GoofysTest) TestMountsNestedReversed(t *C) {
  3595  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3596  	cloud := s.newBackend(t, bucket, true)
  3597  	s.testMountsNested(t, cloud, []*Mount{
  3598  		&Mount{"dir5/in/", cloud, "b/", false},
  3599  		&Mount{"dir5/in/a/dir", cloud, "a/dir/", false},
  3600  	})
  3601  }
  3602  
  3603  func (s *GoofysTest) testMountsNested(t *C, cloud StorageBackend,
  3604  	mounts []*Mount) {
  3605  
  3606  	_, err := s.LookUpInode(t, "dir5")
  3607  	t.Assert(err, NotNil)
  3608  	t.Assert(err, Equals, fuse.ENOENT)
  3609  
  3610  	s.fs.MountAll(mounts)
  3611  
  3612  	in, err := s.LookUpInode(t, "dir5")
  3613  	t.Assert(err, IsNil)
  3614  
  3615  	s.readDirIntoCache(t, in.Id)
  3616  
  3617  	// make sure all the intermediate dirs never expire
  3618  	time.Sleep(time.Second)
  3619  	dir_in, err := s.LookUpInode(t, "dir5/in")
  3620  	t.Assert(err, IsNil)
  3621  	t.Assert(*dir_in.Name, Equals, "in")
  3622  
  3623  	s.readDirIntoCache(t, dir_in.Id)
  3624  
  3625  	dir_a, err := s.LookUpInode(t, "dir5/in/a")
  3626  	t.Assert(err, IsNil)
  3627  	t.Assert(*dir_a.Name, Equals, "a")
  3628  
  3629  	s.assertEntries(t, dir_a, []string{"dir"})
  3630  
  3631  	dir_dir, err := s.LookUpInode(t, "dir5/in/a/dir")
  3632  	t.Assert(err, IsNil)
  3633  	t.Assert(*dir_dir.Name, Equals, "dir")
  3634  	t.Assert(dir_dir.dir.cloud == cloud, Equals, true)
  3635  
  3636  	_, fh := dir_in.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())})
  3637  	err = fh.FlushFile()
  3638  	t.Assert(err, IsNil)
  3639  
  3640  	resp, err := cloud.GetBlob(&GetBlobInput{Key: "b/testfile"})
  3641  	t.Assert(err, IsNil)
  3642  	defer resp.Body.Close()
  3643  
  3644  	_, fh = dir_dir.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())})
  3645  	err = fh.FlushFile()
  3646  	t.Assert(err, IsNil)
  3647  
  3648  	resp, err = cloud.GetBlob(&GetBlobInput{Key: "a/dir/testfile"})
  3649  	t.Assert(err, IsNil)
  3650  	defer resp.Body.Close()
  3651  
  3652  	s.assertEntries(t, in, []string{"in"})
  3653  }
  3654  
  3655  func verifyFileData(t *C, mountPoint string, path string, content *string) {
  3656  	if !strings.HasSuffix(mountPoint, "/") {
  3657  		mountPoint = mountPoint + "/"
  3658  	}
  3659  	path = mountPoint + path
  3660  	data, err := ioutil.ReadFile(path)
  3661  	comment := Commentf("failed while verifying %v", path)
  3662  	if content != nil {
  3663  		t.Assert(err, IsNil, comment)
  3664  		t.Assert(strings.TrimSpace(string(data)), Equals, *content, comment)
  3665  	} else {
  3666  		t.Assert(err, Not(IsNil), comment)
  3667  		t.Assert(strings.Contains(err.Error(), "no such file or directory"), Equals, true, comment)
  3668  	}
  3669  }
  3670  
  3671  func (s *GoofysTest) TestNestedMountUnmountSimple(t *C) {
  3672  	childBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3673  	childCloud := s.newBackend(t, childBucket, true)
  3674  
  3675  	parFileContent := "parent"
  3676  	childFileContent := "child"
  3677  	parEnv := map[string]*string{
  3678  		"childmnt/x/in_child_and_par": &parFileContent,
  3679  		"childmnt/x/in_par_only":      &parFileContent,
  3680  		"nonchildmnt/something":       &parFileContent,
  3681  	}
  3682  	childEnv := map[string]*string{
  3683  		"x/in_child_only":    &childFileContent,
  3684  		"x/in_child_and_par": &childFileContent,
  3685  	}
  3686  	s.setupBlobs(s.cloud, t, parEnv)
  3687  	s.setupBlobs(childCloud, t, childEnv)
  3688  
  3689  	rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16)
  3690  	s.mount(t, rootMountPath)
  3691  	defer s.umount(t, rootMountPath)
  3692  	// Files under /tmp/fusetesting/ should all be from goofys root.
  3693  	verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent)
  3694  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent)
  3695  	verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent)
  3696  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil)
  3697  
  3698  	childMount := &Mount{"childmnt", childCloud, "", false}
  3699  	s.fs.Mount(childMount)
  3700  	// Now files under /tmp/fusetesting/childmnt should be from childBucket
  3701  	verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", nil)
  3702  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &childFileContent)
  3703  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", &childFileContent)
  3704  	// /tmp/fusetesting/nonchildmnt should be from parent bucket.
  3705  	verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent)
  3706  
  3707  	s.fs.Unmount(childMount.name)
  3708  	// Child is unmounted. So files under /tmp/fusetesting/ should all be from goofys root.
  3709  	verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent)
  3710  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent)
  3711  	verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent)
  3712  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil)
  3713  }
  3714  
  3715  func (s *GoofysTest) TestUnmountBucketWithChild(t *C) {
  3716  	// This bucket will be mounted at ${goofysroot}/c
  3717  	cBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3718  	cCloud := s.newBackend(t, cBucket, true)
  3719  
  3720  	// This bucket will be mounted at ${goofysroot}/c/c
  3721  	ccBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3722  	ccCloud := s.newBackend(t, ccBucket, true)
  3723  
  3724  	pFileContent := "parent"
  3725  	cFileContent := "child"
  3726  	ccFileContent := "childchild"
  3727  	pEnv := map[string]*string{
  3728  		"c/c/x/foo": &pFileContent,
  3729  	}
  3730  	cEnv := map[string]*string{
  3731  		"c/x/foo": &cFileContent,
  3732  	}
  3733  	ccEnv := map[string]*string{
  3734  		"x/foo": &ccFileContent,
  3735  	}
  3736  
  3737  	s.setupBlobs(s.cloud, t, pEnv)
  3738  	s.setupBlobs(cCloud, t, cEnv)
  3739  	s.setupBlobs(ccCloud, t, ccEnv)
  3740  
  3741  	rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16)
  3742  	s.mount(t, rootMountPath)
  3743  	defer s.umount(t, rootMountPath)
  3744  	// c/c/foo should come from root mount.
  3745  	verifyFileData(t, rootMountPath, "c/c/x/foo", &pFileContent)
  3746  
  3747  	cMount := &Mount{"c", cCloud, "", false}
  3748  	s.fs.Mount(cMount)
  3749  	// c/c/foo should come from "c" mount.
  3750  	verifyFileData(t, rootMountPath, "c/c/x/foo", &cFileContent)
  3751  
  3752  	ccMount := &Mount{"c/c", ccCloud, "", false}
  3753  	s.fs.Mount(ccMount)
  3754  	// c/c/foo should come from "c/c" mount.
  3755  	verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent)
  3756  
  3757  	s.fs.Unmount(cMount.name)
  3758  	// c/c/foo should still come from "c/c" mount.
  3759  	verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent)
  3760  }
  3761  
  3762  func (s *GoofysTest) TestRmImplicitDir(t *C) {
  3763  	mountPoint := "/tmp/mnt" + s.fs.bucket
  3764  
  3765  	s.mount(t, mountPoint)
  3766  	defer s.umount(t, mountPoint)
  3767  
  3768  	defer os.Chdir("/")
  3769  
  3770  	dir, err := os.Open(mountPoint + "/dir2")
  3771  	t.Assert(err, IsNil)
  3772  	defer dir.Close()
  3773  
  3774  	err = dir.Chdir()
  3775  	t.Assert(err, IsNil)
  3776  
  3777  	err = os.RemoveAll(mountPoint + "/dir2")
  3778  	t.Assert(err, IsNil)
  3779  
  3780  	root, err := os.Open(mountPoint)
  3781  	t.Assert(err, IsNil)
  3782  	defer root.Close()
  3783  
  3784  	files, err := root.Readdirnames(0)
  3785  	t.Assert(err, IsNil)
  3786  	t.Assert(files, DeepEquals, []string{
  3787  		"dir1", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero",
  3788  	})
  3789  }
  3790  
  3791  func (s *GoofysTest) TestMount(t *C) {
  3792  	if os.Getenv("MOUNT") == "false" {
  3793  		t.Skip("Not mounting")
  3794  	}
  3795  
  3796  	mountPoint := "/tmp/mnt" + s.fs.bucket
  3797  
  3798  	s.mount(t, mountPoint)
  3799  	defer s.umount(t, mountPoint)
  3800  
  3801  	log.Printf("Mounted at %v", mountPoint)
  3802  
  3803  	c := make(chan os.Signal, 2)
  3804  	signal.Notify(c, os.Interrupt, syscall.SIGTERM)
  3805  	<-c
  3806  }
  3807  
  3808  // Checks if 2 sorted lists are equal. Returns a helpful error if they differ.
  3809  func checkSortedListsAreEqual(l1, l2 []string) error {
  3810  	i1, i2 := 0, 0
  3811  	onlyl1, onlyl2 := []string{}, []string{}
  3812  	for i1 < len(l1) && i2 < len(l2) {
  3813  		if l1[i1] == l2[i2] {
  3814  			i1++
  3815  			i2++
  3816  		} else if l1[i1] < l2[i2] {
  3817  			onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1]))
  3818  			i1++
  3819  		} else {
  3820  			onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2]))
  3821  			i2++
  3822  		}
  3823  
  3824  	}
  3825  	for ; i1 < len(l1); i1++ {
  3826  		onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1]))
  3827  	}
  3828  	for ; i2 < len(l2); i2++ {
  3829  		onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2]))
  3830  	}
  3831  
  3832  	if len(onlyl1)+len(onlyl2) == 0 {
  3833  		return nil
  3834  	}
  3835  	toString := func(l []string) string {
  3836  		ret := []string{}
  3837  		// The list can contain a lot of elements. Show only ten and say
  3838  		// "and x more".
  3839  		for i := 0; i < len(l) && i < 10; i++ {
  3840  			ret = append(ret, l[i])
  3841  		}
  3842  		if len(ret) < len(l) {
  3843  			ret = append(ret, fmt.Sprintf("and %d more", len(l)-len(ret)))
  3844  		}
  3845  		return strings.Join(ret, ", ")
  3846  	}
  3847  	return fmt.Errorf("only l1: %+v, only l2: %+v",
  3848  		toString(onlyl1), toString(onlyl2))
  3849  }
  3850  
  3851  func (s *GoofysTest) TestReadDirDash(t *C) {
  3852  	if s.azurite {
  3853  		t.Skip("ADLv1 doesn't have pagination")
  3854  	}
  3855  	root := s.getRoot(t)
  3856  	root.dir.mountPrefix = "prefix"
  3857  
  3858  	// SETUP
  3859  	// Add the following blobs
  3860  	// - prefix/2019/1
  3861  	// - prefix/2019-0000 to prefix/2019-4999
  3862  	// - prefix/20190000 to prefix/20194999
  3863  	// Fetching this result will need 3 pages in azure (pagesize 5k) and 11 pages
  3864  	// in amazon (pagesize 1k)
  3865  	// This setup will verify that we paginate and return results correctly before and after
  3866  	// seeing all contents that have a '-' ('-' < '/'). For more context read the comments in
  3867  	// dir.go::listBlobsSafe.
  3868  	blobs := make(map[string]*string)
  3869  	expect := []string{"2019"}
  3870  	blobs["prefix/2019/1"] = nil
  3871  	for i := 0; i < 5000; i++ {
  3872  		name := fmt.Sprintf("2019-%04d", i)
  3873  		expect = append(expect, name)
  3874  		blobs["prefix/"+name] = nil
  3875  	}
  3876  	for i := 0; i < 5000; i++ {
  3877  		name := fmt.Sprintf("2019%04d", i)
  3878  		expect = append(expect, name)
  3879  		blobs["prefix/"+name] = nil
  3880  	}
  3881  	s.setupBlobs(s.cloud, t, blobs)
  3882  
  3883  	// Read the directory and verify its contents.
  3884  	dh := root.OpenDir()
  3885  	defer dh.CloseDir()
  3886  
  3887  	children := namesOf(s.readDirFully(t, dh))
  3888  	t.Assert(checkSortedListsAreEqual(children, expect), IsNil)
  3889  }
  3890  
  3891  func (s *GoofysTest) TestWriteListFlush(t *C) {
  3892  	root := s.getRoot(t)
  3893  	root.dir.mountPrefix = "this_test/"
  3894  
  3895  	dir, err := root.MkDir("dir")
  3896  	t.Assert(err, IsNil)
  3897  	s.fs.insertInode(root, dir)
  3898  
  3899  	in, fh := dir.Create("file1", fuseops.OpMetadata{})
  3900  	t.Assert(in, NotNil)
  3901  	t.Assert(fh, NotNil)
  3902  	s.fs.insertInode(dir, in)
  3903  
  3904  	s.assertEntries(t, dir, []string{"file1"})
  3905  
  3906  	// in should still be valid
  3907  	t.Assert(in.Parent, NotNil)
  3908  	t.Assert(in.Parent, Equals, dir)
  3909  	fh.FlushFile()
  3910  
  3911  	s.assertEntries(t, dir, []string{"file1"})
  3912  }
  3913  
  3914  type includes struct{}
  3915  
  3916  func (c includes) Info() *CheckerInfo {
  3917  	return &CheckerInfo{Name: "includes", Params: []string{"obtained", "expected"}}
  3918  }
  3919  
  3920  func (c includes) Check(params []interface{}, names []string) (res bool, error string) {
  3921  	arr := reflect.ValueOf(params[0])
  3922  	switch arr.Kind() {
  3923  	case reflect.Array, reflect.Slice, reflect.String:
  3924  	default:
  3925  		panic(fmt.Sprintf("%v is not an array", names[0]))
  3926  	}
  3927  
  3928  	for i := 0; i < arr.Len(); i++ {
  3929  		v := arr.Index(i).Interface()
  3930  		res, error = DeepEquals.Check([]interface{}{v, params[1]}, names)
  3931  		if res {
  3932  			return
  3933  		} else {
  3934  			error = ""
  3935  		}
  3936  
  3937  		res = false
  3938  	}
  3939  	return
  3940  }
  3941  
  3942  func (s *GoofysTest) TestWriteUnlinkFlush(t *C) {
  3943  	root := s.getRoot(t)
  3944  
  3945  	dir, err := root.MkDir("dir")
  3946  	t.Assert(err, IsNil)
  3947  	s.fs.insertInode(root, dir)
  3948  
  3949  	in, fh := dir.Create("deleted", fuseops.OpMetadata{})
  3950  	t.Assert(in, NotNil)
  3951  	t.Assert(fh, NotNil)
  3952  	s.fs.insertInode(dir, in)
  3953  
  3954  	err = dir.Unlink("deleted")
  3955  	t.Assert(err, IsNil)
  3956  
  3957  	s.disableS3()
  3958  	err = fh.FlushFile()
  3959  	t.Assert(err, IsNil)
  3960  
  3961  	dh := dir.OpenDir()
  3962  	defer dh.CloseDir()
  3963  	t.Assert(namesOf(s.readDirFully(t, dh)), Not(includes{}), "deleted")
  3964  }
  3965  
  3966  func (s *GoofysTest) TestIssue474(t *C) {
  3967  	s.fs.flags.TypeCacheTTL = 1 * time.Second
  3968  	s.fs.flags.Cheap = true
  3969  
  3970  	p := "this_test/"
  3971  	root := s.getRoot(t)
  3972  	root.dir.mountPrefix = "this_test/"
  3973  	root.dir.seqOpenDirScore = 2
  3974  
  3975  	blobs := make(map[string]*string)
  3976  
  3977  	in := []string{
  3978  		"1/a/b",
  3979  		"2/c/d",
  3980  	}
  3981  
  3982  	for _, s := range in {
  3983  		blobs[p+s] = nil
  3984  	}
  3985  
  3986  	s.setupBlobs(s.cloud, t, blobs)
  3987  
  3988  	dir1, err := s.LookUpInode(t, "1")
  3989  	t.Assert(err, IsNil)
  3990  	// this would list 1/ and slurp in 2/c/d at the same time
  3991  	s.assertEntries(t, dir1, []string{"a"})
  3992  
  3993  	// 2/ will expire and require re-listing. ensure that we don't
  3994  	// remove any children as stale as we update
  3995  	time.Sleep(time.Second)
  3996  
  3997  	dir2, err := s.LookUpInode(t, "2")
  3998  	t.Assert(err, IsNil)
  3999  	s.assertEntries(t, dir2, []string{"c"})
  4000  }
  4001  
  4002  func (s *GoofysTest) TestReadExternalChangesFuse(t *C) {
  4003  	s.fs.flags.StatCacheTTL = 1 * time.Second
  4004  
  4005  	mountPoint := "/tmp/mnt" + s.fs.bucket
  4006  
  4007  	s.mount(t, mountPoint)
  4008  	defer s.umount(t, mountPoint)
  4009  
  4010  	file := "file1"
  4011  	filePath := mountPoint + "/file1"
  4012  
  4013  	buf, err := ioutil.ReadFile(filePath)
  4014  	t.Assert(err, IsNil)
  4015  	t.Assert(string(buf), Equals, file)
  4016  
  4017  	update := "file2"
  4018  	_, err = s.cloud.PutBlob(&PutBlobInput{
  4019  		Key:  file,
  4020  		Body: bytes.NewReader([]byte(update)),
  4021  		Size: PUInt64(uint64(len(update))),
  4022  	})
  4023  	t.Assert(err, IsNil)
  4024  
  4025  	time.Sleep(1 * time.Second)
  4026  
  4027  	buf, err = ioutil.ReadFile(filePath)
  4028  	t.Assert(err, IsNil)
  4029  	t.Assert(string(buf), Equals, update)
  4030  
  4031  	// the next read shouldn't talk to cloud
  4032  	root := s.getRoot(t)
  4033  	root.dir.cloud = &StorageBackendInitError{
  4034  		syscall.ENONET, *root.dir.cloud.Capabilities(),
  4035  	}
  4036  
  4037  	buf, err = ioutil.ReadFile(filePath)
  4038  	t.Assert(err, IsNil)
  4039  	t.Assert(string(buf), Equals, update)
  4040  }
  4041  
  4042  func (s *GoofysTest) TestReadMyOwnWriteWithExternalChangesFuse(t *C) {
  4043  	s.fs.flags.StatCacheTTL = 1 * time.Second
  4044  
  4045  	mountPoint := "/tmp/mnt" + s.fs.bucket
  4046  
  4047  	s.mount(t, mountPoint)
  4048  	defer s.umount(t, mountPoint)
  4049  
  4050  	file := "file1"
  4051  	filePath := mountPoint + "/file1"
  4052  
  4053  	buf, err := ioutil.ReadFile(filePath)
  4054  	t.Assert(err, IsNil)
  4055  	t.Assert(string(buf), Equals, file)
  4056  
  4057  	update := "file2"
  4058  	_, err = s.cloud.PutBlob(&PutBlobInput{
  4059  		Key:  file,
  4060  		Body: bytes.NewReader([]byte(update)),
  4061  		Size: PUInt64(uint64(len(update))),
  4062  	})
  4063  	t.Assert(err, IsNil)
  4064  
  4065  	time.Sleep(1 * time.Second)
  4066  
  4067  	fh, err := os.Create(filePath)
  4068  	t.Assert(err, IsNil)
  4069  
  4070  	_, err = fh.WriteString("file3")
  4071  	t.Assert(err, IsNil)
  4072  	// we can't flush yet because if we did, we would be reading
  4073  	// the new copy from cloud and that's not the point of this
  4074  	// test
  4075  	defer fh.Close()
  4076  
  4077  	buf, err = ioutil.ReadFile(filePath)
  4078  	t.Assert(err, IsNil)
  4079  	// disabled: we can't actually read back our own update
  4080  	_ = buf
  4081  	//t.Assert(string(buf), Equals, "file3")
  4082  }
  4083  
  4084  func (s *GoofysTest) TestReadNewFileWithExternalChangesFuse(t *C) {
  4085  	s.fs.flags.StatCacheTTL = 1 * time.Second
  4086  
  4087  	mountPoint := "/tmp/mnt" + s.fs.bucket
  4088  
  4089  	s.mount(t, mountPoint)
  4090  	defer s.umount(t, mountPoint)
  4091  
  4092  	filePath := mountPoint + "/filex"
  4093  
  4094  	fh, err := os.Create(filePath)
  4095  	t.Assert(err, IsNil)
  4096  
  4097  	// update := "file2"
  4098  	// _, err = s.cloud.PutBlob(&PutBlobInput{
  4099  	// 	Key:  file,
  4100  	// 	Body: bytes.NewReader([]byte(update)),
  4101  	// 	Size: PUInt64(uint64(len(update))),
  4102  	// })
  4103  	// t.Assert(err, IsNil)
  4104  
  4105  	_, err = fh.WriteString("filex")
  4106  	t.Assert(err, IsNil)
  4107  	// we can't flush yet because if we did, we would be reading
  4108  	// the new copy from cloud and that's not the point of this
  4109  	// test
  4110  	defer fh.Close()
  4111  
  4112  	// disabled: we can't actually read back our own update
  4113  	//buf, err := ioutil.ReadFile(filePath)
  4114  	//t.Assert(err, IsNil)
  4115  	//t.Assert(string(buf), Equals, "filex")
  4116  }