github.com/nefixestrada/goofys@v0.23.1/internal/goofys_test.go (about)

     1  // Copyright 2015 - 2017 Ka-Hing Cheung
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package internal
    16  
    17  import (
    18  	. "github.com/kahing/goofys/api/common"
    19  
    20  	"bufio"
    21  	"bytes"
    22  	"fmt"
    23  	"io"
    24  	"io/ioutil"
    25  	"math/rand"
    26  	"net"
    27  	"os"
    28  	"os/exec"
    29  	"os/signal"
    30  	"os/user"
    31  	"runtime"
    32  	"sort"
    33  	"strconv"
    34  	"strings"
    35  	"sync"
    36  	"syscall"
    37  	"testing"
    38  	"time"
    39  
    40  	"context"
    41  
    42  	"github.com/aws/aws-sdk-go/aws"
    43  	"github.com/aws/aws-sdk-go/aws/corehandlers"
    44  	"github.com/aws/aws-sdk-go/aws/credentials"
    45  
    46  	"github.com/Azure/azure-storage-blob-go/azblob"
    47  	"github.com/Azure/go-autorest/autorest"
    48  	"github.com/Azure/go-autorest/autorest/azure"
    49  	azureauth "github.com/Azure/go-autorest/autorest/azure/auth"
    50  
    51  	"github.com/kahing/go-xattr"
    52  
    53  	"github.com/jacobsa/fuse"
    54  	"github.com/jacobsa/fuse/fuseops"
    55  	"github.com/jacobsa/fuse/fuseutil"
    56  
    57  	"github.com/sirupsen/logrus"
    58  
    59  	. "gopkg.in/check.v1"
    60  )
    61  
    62  // so I don't get complains about unused imports
    63  var ignored = logrus.DebugLevel
    64  
    65  func currentUid() uint32 {
    66  	user, err := user.Current()
    67  	if err != nil {
    68  		panic(err)
    69  	}
    70  
    71  	uid, err := strconv.ParseUint(user.Uid, 10, 32)
    72  	if err != nil {
    73  		panic(err)
    74  	}
    75  
    76  	return uint32(uid)
    77  }
    78  
    79  func currentGid() uint32 {
    80  	user, err := user.Current()
    81  	if err != nil {
    82  		panic(err)
    83  	}
    84  
    85  	gid, err := strconv.ParseUint(user.Gid, 10, 32)
    86  	if err != nil {
    87  		panic(err)
    88  	}
    89  
    90  	return uint32(gid)
    91  }
    92  
    93  type GoofysTest struct {
    94  	fs        *Goofys
    95  	ctx       context.Context
    96  	awsConfig *aws.Config
    97  	cloud     StorageBackend
    98  	emulator  bool
    99  	azurite   bool
   100  
   101  	removeBucket []StorageBackend
   102  
   103  	env map[string]*string
   104  }
   105  
   106  func Test(t *testing.T) {
   107  	TestingT(t)
   108  }
   109  
   110  var _ = Suite(&GoofysTest{})
   111  
   112  func logOutput(t *C, tag string, r io.ReadCloser) {
   113  	in := bufio.NewScanner(r)
   114  
   115  	for in.Scan() {
   116  		t.Log(tag, in.Text())
   117  	}
   118  }
   119  
   120  func waitFor(t *C, addr string) (err error) {
   121  	// wait for it to listen on port
   122  	for i := 0; i < 10; i++ {
   123  		var conn net.Conn
   124  		conn, err = net.Dial("tcp", addr)
   125  		if err == nil {
   126  			// we are done!
   127  			conn.Close()
   128  			return
   129  		} else {
   130  			t.Logf("Cound not connect: %v", err)
   131  			time.Sleep(100 * time.Millisecond)
   132  		}
   133  	}
   134  
   135  	return
   136  }
   137  
   138  func (t *GoofysTest) deleteBlobsParallelly(cloud StorageBackend, blobs []string) error {
   139  	sem := make(semaphore, 100)
   140  	sem.P(100)
   141  	var err error
   142  	for _, blobOuter := range blobs {
   143  		sem.V(1)
   144  		go func(blob string) {
   145  			defer sem.P(1)
   146  			_, localerr := cloud.DeleteBlob(&DeleteBlobInput{blob})
   147  			if localerr != nil && localerr != syscall.ENOENT {
   148  				err = localerr
   149  			}
   150  		}(blobOuter)
   151  		if err != nil {
   152  			break
   153  		}
   154  	}
   155  	sem.V(100)
   156  	return err
   157  }
   158  
   159  // groupByDecresingDepths takes a slice of path strings and returns the paths as
   160  // groups where each group has the same `depth` - depth(a/b/c)=2, depth(a/b/)=1
   161  // The groups are returned in decreasing order of depths.
   162  // - Inp: [] Out: []
   163  // - Inp: ["a/b1/", "a/b/c1", "a/b2", "a/b/c2"]
   164  //   Out: [["a/b/c1", "a/b/c2"], ["a/b1/", "a/b2"]]
   165  // - Inp: ["a/b1/", "z/a/b/c1", "a/b2", "z/a/b/c2"]
   166  //   Out:	[["z/a/b/c1", "z/a/b/c2"], ["a/b1/", "a/b2"]
   167  func groupByDecresingDepths(items []string) [][]string {
   168  	depthToGroup := map[int][]string{}
   169  	for _, item := range items {
   170  		depth := len(strings.Split(strings.TrimRight(item, "/"), "/"))
   171  		if _, ok := depthToGroup[depth]; !ok {
   172  			depthToGroup[depth] = []string{}
   173  		}
   174  		depthToGroup[depth] = append(depthToGroup[depth], item)
   175  	}
   176  	decreasingDepths := []int{}
   177  	for depth := range depthToGroup {
   178  		decreasingDepths = append(decreasingDepths, depth)
   179  	}
   180  	sort.Sort(sort.Reverse(sort.IntSlice(decreasingDepths)))
   181  	ret := [][]string{}
   182  	for _, depth := range decreasingDepths {
   183  		group, _ := depthToGroup[depth]
   184  		ret = append(ret, group)
   185  	}
   186  	return ret
   187  }
   188  
   189  func (t *GoofysTest) DeleteADLBlobs(cloud StorageBackend, items []string) error {
   190  	// If we delete a directory that's not empty, ADL{v1|v2} returns failure. That can
   191  	// happen if we want to delete both "dir1" and "dir1/file" but delete them
   192  	// in the wrong order.
   193  	// So we group the items to delete into multiple groups. All items in a group
   194  	// will have the same depth - depth(/a/b/c) = 2, depth(/a/b/) = 1.
   195  	// We then iterate over the groups in desc order of depth and delete them parallelly.
   196  	for _, group := range groupByDecresingDepths(items) {
   197  		err := t.deleteBlobsParallelly(cloud, group)
   198  		if err != nil {
   199  			return err
   200  		}
   201  	}
   202  	return nil
   203  }
   204  
   205  func (s *GoofysTest) selectTestConfig(t *C, flags *FlagStorage) (conf S3Config) {
   206  	(&conf).Init()
   207  
   208  	if hasEnv("AWS") {
   209  		if isTravis() {
   210  			conf.Region = "us-east-1"
   211  		} else {
   212  			conf.Region = "us-west-2"
   213  		}
   214  		profile := os.Getenv("AWS")
   215  		if profile != "" {
   216  			if profile != "-" {
   217  				conf.Profile = profile
   218  			} else {
   219  				conf.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
   220  				conf.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
   221  			}
   222  		}
   223  	} else if hasEnv("GCS") {
   224  		conf.Region = "us-west1"
   225  		conf.Profile = os.Getenv("GCS")
   226  		flags.Endpoint = "http://storage.googleapis.com"
   227  	} else if hasEnv("MINIO") {
   228  		conf.Region = "us-east-1"
   229  		conf.AccessKey = "Q3AM3UQ867SPQQA43P2F"
   230  		conf.SecretKey = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
   231  		flags.Endpoint = "https://play.minio.io:9000"
   232  	} else {
   233  		s.emulator = true
   234  
   235  		conf.Region = "us-west-2"
   236  		conf.AccessKey = "foo"
   237  		conf.SecretKey = "bar"
   238  		flags.Endpoint = "http://127.0.0.1:8080"
   239  	}
   240  
   241  	return
   242  }
   243  
   244  func (s *GoofysTest) waitForEmulator(t *C) {
   245  	if s.emulator {
   246  		addr := "127.0.0.1:8080"
   247  
   248  		err := waitFor(t, addr)
   249  		t.Assert(err, IsNil)
   250  	}
   251  }
   252  
   253  func (s *GoofysTest) SetUpSuite(t *C) {
   254  }
   255  
   256  func (s *GoofysTest) deleteBucket(cloud StorageBackend) error {
   257  	param := &ListBlobsInput{}
   258  
   259  	// Azure datalake v1,v2 need special handling.
   260  	adlKeysToRemove := make([]string, 0)
   261  	for {
   262  		resp, err := cloud.ListBlobs(param)
   263  		if err != nil {
   264  			return err
   265  		}
   266  
   267  		keysToRemove := []string{}
   268  		for _, o := range resp.Items {
   269  			keysToRemove = append(keysToRemove, *o.Key)
   270  		}
   271  		if len(keysToRemove) != 0 {
   272  			switch cloud.(type) {
   273  			case *ADLv1, *ADLv2:
   274  				// ADLV{1|2} supports directories. => dir can be removed only after the dir is
   275  				// empty. So we will remove the blobs in reverse depth order via DeleteADLBlobs
   276  				// after this for loop.
   277  				adlKeysToRemove = append(adlKeysToRemove, keysToRemove...)
   278  			default:
   279  				_, err = cloud.DeleteBlobs(&DeleteBlobsInput{Items: keysToRemove})
   280  				if err != nil {
   281  					return err
   282  				}
   283  			}
   284  		}
   285  		if resp.IsTruncated {
   286  			param.ContinuationToken = resp.NextContinuationToken
   287  		} else {
   288  			break
   289  		}
   290  	}
   291  
   292  	if len(adlKeysToRemove) != 0 {
   293  		err := s.DeleteADLBlobs(cloud, adlKeysToRemove)
   294  		if err != nil {
   295  			return err
   296  		}
   297  	}
   298  
   299  	_, err := cloud.RemoveBucket(&RemoveBucketInput{})
   300  	return err
   301  }
   302  
   303  func (s *GoofysTest) TearDownTest(t *C) {
   304  	for _, cloud := range s.removeBucket {
   305  		err := s.deleteBucket(cloud)
   306  		t.Assert(err, IsNil)
   307  	}
   308  	s.removeBucket = nil
   309  }
   310  
   311  func (s *GoofysTest) removeBlob(cloud StorageBackend, t *C, blobPath string) {
   312  	params := &DeleteBlobInput{
   313  		Key: blobPath,
   314  	}
   315  	_, err := cloud.DeleteBlob(params)
   316  	t.Assert(err, IsNil)
   317  }
   318  
   319  func (s *GoofysTest) setupBlobs(cloud StorageBackend, t *C, env map[string]*string) {
   320  
   321  	// concurrency = 100
   322  	throttler := make(semaphore, 100)
   323  	throttler.P(100)
   324  
   325  	var globalErr error
   326  	for path, c := range env {
   327  		throttler.V(1)
   328  		go func(path string, content *string) {
   329  			dir := false
   330  			if content == nil {
   331  				if strings.HasSuffix(path, "/") {
   332  					if cloud.Capabilities().DirBlob {
   333  						path = strings.TrimRight(path, "/")
   334  					}
   335  					dir = true
   336  					content = PString("")
   337  				} else {
   338  					content = &path
   339  				}
   340  			}
   341  			defer throttler.P(1)
   342  			params := &PutBlobInput{
   343  				Key:  path,
   344  				Body: bytes.NewReader([]byte(*content)),
   345  				Size: PUInt64(uint64(len(*content))),
   346  				Metadata: map[string]*string{
   347  					"name": aws.String(path + "+/#%00"),
   348  				},
   349  				DirBlob: dir,
   350  			}
   351  
   352  			_, err := cloud.PutBlob(params)
   353  			if err != nil {
   354  				globalErr = err
   355  			}
   356  			t.Assert(err, IsNil)
   357  		}(path, c)
   358  	}
   359  	throttler.V(100)
   360  	throttler = make(semaphore, 100)
   361  	throttler.P(100)
   362  	t.Assert(globalErr, IsNil)
   363  
   364  	// double check, except on AWS S3, because there we sometimes
   365  	// hit 404 NoSuchBucket and there's no way to distinguish that
   366  	// from 404 KeyNotFound
   367  	if !hasEnv("AWS") {
   368  		for path, c := range env {
   369  			throttler.V(1)
   370  			go func(path string, content *string) {
   371  				defer throttler.P(1)
   372  				params := &HeadBlobInput{Key: path}
   373  				res, err := cloud.HeadBlob(params)
   374  				t.Assert(err, IsNil)
   375  				if content != nil {
   376  					t.Assert(res.Size, Equals, uint64(len(*content)))
   377  				} else if strings.HasSuffix(path, "/") || path == "zero" {
   378  					t.Assert(res.Size, Equals, uint64(0))
   379  				} else {
   380  					t.Assert(res.Size, Equals, uint64(len(path)))
   381  				}
   382  			}(path, c)
   383  		}
   384  		throttler.V(100)
   385  		t.Assert(globalErr, IsNil)
   386  	}
   387  }
   388  
   389  func (s *GoofysTest) setupEnv(t *C, env map[string]*string, public bool) {
   390  	if public {
   391  		if s3, ok := s.cloud.Delegate().(*S3Backend); ok {
   392  			s3.config.ACL = "public-read"
   393  		} else {
   394  			t.Error("Not S3 backend")
   395  		}
   396  	}
   397  
   398  	_, err := s.cloud.MakeBucket(&MakeBucketInput{})
   399  	t.Assert(err, IsNil)
   400  
   401  	if !s.emulator {
   402  		//time.Sleep(time.Second)
   403  	}
   404  
   405  	s.setupBlobs(s.cloud, t, env)
   406  
   407  	t.Log("setupEnv done")
   408  }
   409  
   410  func (s *GoofysTest) setupDefaultEnv(t *C, public bool) {
   411  	s.env = map[string]*string{
   412  		"file1":           nil,
   413  		"file2":           nil,
   414  		"dir1/file3":      nil,
   415  		"dir2/dir3/":      nil,
   416  		"dir2/dir3/file4": nil,
   417  		"dir4/":           nil,
   418  		"dir4/file5":      nil,
   419  		"empty_dir/":      nil,
   420  		"empty_dir2/":     nil,
   421  		"zero":            PString(""),
   422  	}
   423  
   424  	s.setupEnv(t, s.env, public)
   425  }
   426  
   427  func (s *GoofysTest) SetUpTest(t *C) {
   428  	log.Infof("Starting at %v", time.Now())
   429  
   430  	var bucket string
   431  	mount := os.Getenv("MOUNT")
   432  
   433  	if mount != "false" {
   434  		bucket = mount
   435  	} else {
   436  		bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16)
   437  	}
   438  	uid, gid := MyUserAndGroup()
   439  	flags := &FlagStorage{
   440  		DirMode:  0700,
   441  		FileMode: 0700,
   442  		Uid:      uint32(uid),
   443  		Gid:      uint32(gid),
   444  	}
   445  
   446  	cloud := os.Getenv("CLOUD")
   447  
   448  	if cloud == "s3" {
   449  		s.emulator = !hasEnv("AWS")
   450  		s.waitForEmulator(t)
   451  
   452  		conf := s.selectTestConfig(t, flags)
   453  		flags.Backend = &conf
   454  
   455  		s3, err := NewS3(bucket, flags, &conf)
   456  		t.Assert(err, IsNil)
   457  
   458  		s.cloud = s3
   459  		s3.aws = hasEnv("AWS")
   460  		if s3.aws {
   461  			s.cloud = &S3BucketEventualConsistency{s3}
   462  		}
   463  
   464  		if !hasEnv("MINIO") {
   465  			s3.Handlers.Sign.Clear()
   466  			s3.Handlers.Sign.PushBack(SignV2)
   467  			s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
   468  		}
   469  		_, err = s3.ListBuckets(nil)
   470  		t.Assert(err, IsNil)
   471  
   472  	} else if cloud == "gcs" {
   473  		conf := s.selectTestConfig(t, flags)
   474  		flags.Backend = &conf
   475  
   476  		var err error
   477  		s.cloud, err = NewGCS3(bucket, flags, &conf)
   478  		t.Assert(s.cloud, NotNil)
   479  		t.Assert(err, IsNil)
   480  	} else if cloud == "azblob" {
   481  		config, err := AzureBlobConfig(os.Getenv("ENDPOINT"), "", "blob")
   482  		t.Assert(err, IsNil)
   483  
   484  		if config.Endpoint == AzuriteEndpoint {
   485  			s.azurite = true
   486  			s.emulator = true
   487  			s.waitForEmulator(t)
   488  		}
   489  
   490  		// Azurite's SAS is buggy, ex: https://github.com/Azure/Azurite/issues/216
   491  		if os.Getenv("SAS_EXPIRE") != "" {
   492  			expire, err := time.ParseDuration(os.Getenv("SAS_EXPIRE"))
   493  			t.Assert(err, IsNil)
   494  
   495  			config.TokenRenewBuffer = expire / 2
   496  			credential, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey)
   497  			t.Assert(err, IsNil)
   498  
   499  			// test sas token config
   500  			config.SasToken = func() (string, error) {
   501  				sasQueryParams, err := azblob.AccountSASSignatureValues{
   502  					Protocol:   azblob.SASProtocolHTTPSandHTTP,
   503  					StartTime:  time.Now().UTC().Add(-1 * time.Hour),
   504  					ExpiryTime: time.Now().UTC().Add(expire),
   505  					Services:   azblob.AccountSASServices{Blob: true}.String(),
   506  					ResourceTypes: azblob.AccountSASResourceTypes{
   507  						Service:   true,
   508  						Container: true,
   509  						Object:    true,
   510  					}.String(),
   511  					Permissions: azblob.AccountSASPermissions{
   512  						Read:   true,
   513  						Write:  true,
   514  						Delete: true,
   515  						List:   true,
   516  						Create: true,
   517  					}.String(),
   518  				}.NewSASQueryParameters(credential)
   519  				if err != nil {
   520  					return "", err
   521  				}
   522  				return sasQueryParams.Encode(), nil
   523  			}
   524  		}
   525  
   526  		flags.Backend = &config
   527  
   528  		s.cloud, err = NewAZBlob(bucket, &config)
   529  		t.Assert(err, IsNil)
   530  		t.Assert(s.cloud, NotNil)
   531  	} else if cloud == "adlv1" {
   532  		cred := azureauth.NewClientCredentialsConfig(
   533  			os.Getenv("ADLV1_CLIENT_ID"),
   534  			os.Getenv("ADLV1_CLIENT_CREDENTIAL"),
   535  			os.Getenv("ADLV1_TENANT_ID"))
   536  		auth, err := cred.Authorizer()
   537  		t.Assert(err, IsNil)
   538  
   539  		config := ADLv1Config{
   540  			Endpoint:   os.Getenv("ENDPOINT"),
   541  			Authorizer: auth,
   542  		}
   543  		config.Init()
   544  
   545  		flags.Backend = &config
   546  
   547  		s.cloud, err = NewADLv1(bucket, flags, &config)
   548  		t.Assert(err, IsNil)
   549  		t.Assert(s.cloud, NotNil)
   550  	} else if cloud == "adlv2" {
   551  		var err error
   552  		var auth autorest.Authorizer
   553  
   554  		if os.Getenv("AZURE_STORAGE_ACCOUNT") != "" && os.Getenv("AZURE_STORAGE_KEY") != "" {
   555  			auth = &AZBlobConfig{
   556  				AccountName: os.Getenv("AZURE_STORAGE_ACCOUNT"),
   557  				AccountKey:  os.Getenv("AZURE_STORAGE_KEY"),
   558  			}
   559  		} else {
   560  			cred := azureauth.NewClientCredentialsConfig(
   561  				os.Getenv("ADLV2_CLIENT_ID"),
   562  				os.Getenv("ADLV2_CLIENT_CREDENTIAL"),
   563  				os.Getenv("ADLV2_TENANT_ID"))
   564  			cred.Resource = azure.PublicCloud.ResourceIdentifiers.Storage
   565  			auth, err = cred.Authorizer()
   566  			t.Assert(err, IsNil)
   567  		}
   568  
   569  		config := ADLv2Config{
   570  			Endpoint:   os.Getenv("ENDPOINT"),
   571  			Authorizer: auth,
   572  		}
   573  
   574  		flags.Backend = &config
   575  
   576  		s.cloud, err = NewADLv2(bucket, flags, &config)
   577  		t.Assert(err, IsNil)
   578  		t.Assert(s.cloud, NotNil)
   579  	} else {
   580  		t.Fatal("Unsupported backend")
   581  	}
   582  
   583  	if mount == "false" {
   584  		s.removeBucket = append(s.removeBucket, s.cloud)
   585  		s.setupDefaultEnv(t, false)
   586  	} else {
   587  		_, err := s.cloud.MakeBucket(&MakeBucketInput{})
   588  		if err == fuse.EEXIST {
   589  			err = nil
   590  		}
   591  		t.Assert(err, IsNil)
   592  	}
   593  
   594  	s.fs = NewGoofys(context.Background(), bucket, flags)
   595  	t.Assert(s.fs, NotNil)
   596  
   597  	s.ctx = context.Background()
   598  
   599  	if hasEnv("GCS") {
   600  		flags.Endpoint = "http://storage.googleapis.com"
   601  	}
   602  }
   603  
   604  func (s *GoofysTest) getRoot(t *C) (inode *Inode) {
   605  	inode = s.fs.inodes[fuseops.RootInodeID]
   606  	t.Assert(inode, NotNil)
   607  	return
   608  }
   609  
   610  func (s *GoofysTest) TestGetRootInode(t *C) {
   611  	root := s.getRoot(t)
   612  	t.Assert(root.Id, Equals, fuseops.InodeID(fuseops.RootInodeID))
   613  }
   614  
   615  func (s *GoofysTest) TestGetRootAttributes(t *C) {
   616  	_, err := s.getRoot(t).GetAttributes()
   617  	t.Assert(err, IsNil)
   618  }
   619  
   620  func (s *GoofysTest) ForgetInode(t *C, inode fuseops.InodeID) {
   621  	err := s.fs.ForgetInode(s.ctx, &fuseops.ForgetInodeOp{Inode: inode})
   622  	t.Assert(err, IsNil)
   623  }
   624  
   625  func (s *GoofysTest) LookUpInode(t *C, name string) (in *Inode, err error) {
   626  	parent := s.getRoot(t)
   627  
   628  	for {
   629  		idx := strings.Index(name, "/")
   630  		if idx == -1 {
   631  			break
   632  		}
   633  
   634  		dirName := name[0:idx]
   635  		name = name[idx+1:]
   636  
   637  		lookup := fuseops.LookUpInodeOp{
   638  			Parent: parent.Id,
   639  			Name:   dirName,
   640  		}
   641  
   642  		err = s.fs.LookUpInode(nil, &lookup)
   643  		if err != nil {
   644  			return
   645  		}
   646  		parent = s.fs.inodes[lookup.Entry.Child]
   647  	}
   648  
   649  	lookup := fuseops.LookUpInodeOp{
   650  		Parent: parent.Id,
   651  		Name:   name,
   652  	}
   653  
   654  	err = s.fs.LookUpInode(nil, &lookup)
   655  	if err != nil {
   656  		return
   657  	}
   658  	in = s.fs.inodes[lookup.Entry.Child]
   659  	return
   660  }
   661  
   662  func (s *GoofysTest) TestSetup(t *C) {
   663  }
   664  
   665  func (s *GoofysTest) TestLookUpInode(t *C) {
   666  	_, err := s.LookUpInode(t, "file1")
   667  	t.Assert(err, IsNil)
   668  
   669  	_, err = s.LookUpInode(t, "fileNotFound")
   670  	t.Assert(err, Equals, fuse.ENOENT)
   671  
   672  	_, err = s.LookUpInode(t, "dir1/file3")
   673  	t.Assert(err, IsNil)
   674  
   675  	_, err = s.LookUpInode(t, "dir2/dir3")
   676  	t.Assert(err, IsNil)
   677  
   678  	_, err = s.LookUpInode(t, "dir2/dir3/file4")
   679  	t.Assert(err, IsNil)
   680  
   681  	_, err = s.LookUpInode(t, "empty_dir")
   682  	t.Assert(err, IsNil)
   683  }
   684  
   685  func (s *GoofysTest) TestPanicWrapper(t *C) {
   686  	fs := FusePanicLogger{s.fs}
   687  	err := fs.GetInodeAttributes(nil, &fuseops.GetInodeAttributesOp{
   688  		Inode: 1234,
   689  	})
   690  	t.Assert(err, Equals, fuse.EIO)
   691  }
   692  
   693  func (s *GoofysTest) TestGetInodeAttributes(t *C) {
   694  	inode, err := s.getRoot(t).LookUp("file1")
   695  	t.Assert(err, IsNil)
   696  
   697  	attr, err := inode.GetAttributes()
   698  	t.Assert(err, IsNil)
   699  	t.Assert(attr.Size, Equals, uint64(len("file1")))
   700  }
   701  
   702  func (s *GoofysTest) readDirFully(t *C, dh *DirHandle) (entries []DirHandleEntry) {
   703  	dh.mu.Lock()
   704  	defer dh.mu.Unlock()
   705  
   706  	en, err := dh.ReadDir(fuseops.DirOffset(0))
   707  	t.Assert(err, IsNil)
   708  	t.Assert(en, NotNil)
   709  	t.Assert(en.Name, Equals, ".")
   710  
   711  	en, err = dh.ReadDir(fuseops.DirOffset(1))
   712  	t.Assert(err, IsNil)
   713  	t.Assert(en, NotNil)
   714  	t.Assert(en.Name, Equals, "..")
   715  
   716  	for i := fuseops.DirOffset(2); ; i++ {
   717  		en, err = dh.ReadDir(i)
   718  		t.Assert(err, IsNil)
   719  
   720  		if en == nil {
   721  			return
   722  		}
   723  
   724  		entries = append(entries, *en)
   725  	}
   726  }
   727  
   728  func namesOf(entries []DirHandleEntry) (names []string) {
   729  	for _, en := range entries {
   730  		names = append(names, en.Name)
   731  	}
   732  	return
   733  }
   734  
   735  func (s *GoofysTest) assertEntries(t *C, in *Inode, names []string) {
   736  	dh := in.OpenDir()
   737  	defer dh.CloseDir()
   738  
   739  	t.Assert(namesOf(s.readDirFully(t, dh)), DeepEquals, names)
   740  }
   741  
   742  func (s *GoofysTest) readDirIntoCache(t *C, inode fuseops.InodeID) {
   743  	openDirOp := fuseops.OpenDirOp{Inode: inode}
   744  	err := s.fs.OpenDir(nil, &openDirOp)
   745  	t.Assert(err, IsNil)
   746  
   747  	readDirOp := fuseops.ReadDirOp{
   748  		Inode:  inode,
   749  		Handle: openDirOp.Handle,
   750  		Dst:    make([]byte, 8*1024),
   751  	}
   752  
   753  	err = s.fs.ReadDir(nil, &readDirOp)
   754  	t.Assert(err, IsNil)
   755  }
   756  
   757  func (s *GoofysTest) TestReadDirCacheLookup(t *C) {
   758  	s.fs.flags.StatCacheTTL = 1 * time.Minute
   759  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
   760  
   761  	s.readDirIntoCache(t, fuseops.RootInodeID)
   762  	s.disableS3()
   763  
   764  	// should be cached so lookup should not need to talk to s3
   765  	entries := []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"}
   766  	for _, en := range entries {
   767  		err := s.fs.LookUpInode(nil, &fuseops.LookUpInodeOp{
   768  			Parent: fuseops.RootInodeID,
   769  			Name:   en,
   770  		})
   771  		t.Assert(err, IsNil)
   772  	}
   773  }
   774  
   775  func (s *GoofysTest) TestReadDirWithExternalChanges(t *C) {
   776  	s.fs.flags.TypeCacheTTL = time.Second
   777  
   778  	dir1, err := s.LookUpInode(t, "dir1")
   779  	t.Assert(err, IsNil)
   780  
   781  	defaultEntries := []string{
   782  		"dir1", "dir2", "dir4", "empty_dir",
   783  		"empty_dir2", "file1", "file2", "zero"}
   784  	s.assertEntries(t, s.getRoot(t), defaultEntries)
   785  	// dir1 has file3 and nothing else.
   786  	s.assertEntries(t, dir1, []string{"file3"})
   787  
   788  	// Do the following 'external' changes in s3 without involving goofys.
   789  	// - Remove file1, add file3.
   790  	// - Remove dir1/file3. Given that dir1 has just this one file,
   791  	//   we are effectively removing dir1 as well.
   792  	s.removeBlob(s.cloud, t, "file1")
   793  	s.setupBlobs(s.cloud, t, map[string]*string{"file3": nil})
   794  	s.removeBlob(s.cloud, t, "dir1/file3")
   795  
   796  	time.Sleep(s.fs.flags.TypeCacheTTL)
   797  	// newEntries = `defaultEntries` - dir1 - file1 + file3.
   798  	newEntries := []string{
   799  		"dir2", "dir4", "empty_dir", "empty_dir2",
   800  		"file2", "file3", "zero"}
   801  	if s.cloud.Capabilities().DirBlob {
   802  		// dir1 is not automatically deleted
   803  		newEntries = append([]string{"dir1"}, newEntries...)
   804  	}
   805  	s.assertEntries(t, s.getRoot(t), newEntries)
   806  }
   807  
   808  func (s *GoofysTest) TestReadDir(t *C) {
   809  	// test listing /
   810  	dh := s.getRoot(t).OpenDir()
   811  	defer dh.CloseDir()
   812  
   813  	s.assertEntries(t, s.getRoot(t), []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"})
   814  
   815  	// test listing dir1/
   816  	in, err := s.LookUpInode(t, "dir1")
   817  	t.Assert(err, IsNil)
   818  	s.assertEntries(t, in, []string{"file3"})
   819  
   820  	// test listing dir2/
   821  	in, err = s.LookUpInode(t, "dir2")
   822  	t.Assert(err, IsNil)
   823  	s.assertEntries(t, in, []string{"dir3"})
   824  
   825  	// test listing dir2/dir3/
   826  	in, err = s.LookUpInode(t, "dir2/dir3")
   827  	t.Assert(err, IsNil)
   828  	s.assertEntries(t, in, []string{"file4"})
   829  }
   830  
   831  func (s *GoofysTest) TestReadFiles(t *C) {
   832  	parent := s.getRoot(t)
   833  	dh := parent.OpenDir()
   834  	defer dh.CloseDir()
   835  
   836  	var entries []*DirHandleEntry
   837  
   838  	dh.mu.Lock()
   839  	for i := fuseops.DirOffset(0); ; i++ {
   840  		en, err := dh.ReadDir(i)
   841  		t.Assert(err, IsNil)
   842  
   843  		if en == nil {
   844  			break
   845  		}
   846  
   847  		entries = append(entries, en)
   848  	}
   849  	dh.mu.Unlock()
   850  
   851  	for _, en := range entries {
   852  		if en.Type == fuseutil.DT_File {
   853  			in, err := parent.LookUp(en.Name)
   854  			t.Assert(err, IsNil)
   855  
   856  			fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
   857  			t.Assert(err, IsNil)
   858  
   859  			buf := make([]byte, 4096)
   860  
   861  			nread, err := fh.ReadFile(0, buf)
   862  			if en.Name == "zero" {
   863  				t.Assert(nread, Equals, 0)
   864  			} else {
   865  				t.Assert(nread, Equals, len(en.Name))
   866  				buf = buf[0:nread]
   867  				t.Assert(string(buf), Equals, en.Name)
   868  			}
   869  		} else {
   870  
   871  		}
   872  	}
   873  }
   874  
   875  func (s *GoofysTest) TestReadOffset(t *C) {
   876  	root := s.getRoot(t)
   877  	f := "file1"
   878  
   879  	in, err := root.LookUp(f)
   880  	t.Assert(err, IsNil)
   881  
   882  	fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
   883  	t.Assert(err, IsNil)
   884  
   885  	buf := make([]byte, 4096)
   886  
   887  	nread, err := fh.ReadFile(1, buf)
   888  	t.Assert(err, IsNil)
   889  	t.Assert(nread, Equals, len(f)-1)
   890  	t.Assert(string(buf[0:nread]), DeepEquals, f[1:])
   891  
   892  	r := rand.New(rand.NewSource(time.Now().UnixNano()))
   893  
   894  	for i := 0; i < 3; i++ {
   895  		off := r.Int31n(int32(len(f)))
   896  		nread, err = fh.ReadFile(int64(off), buf)
   897  		t.Assert(err, IsNil)
   898  		t.Assert(nread, Equals, len(f)-int(off))
   899  		t.Assert(string(buf[0:nread]), DeepEquals, f[off:])
   900  	}
   901  }
   902  
   903  func (s *GoofysTest) TestCreateFiles(t *C) {
   904  	fileName := "testCreateFile"
   905  
   906  	_, fh := s.getRoot(t).Create(fileName, fuseops.OpMetadata{uint32(os.Getpid())})
   907  
   908  	err := fh.FlushFile()
   909  	t.Assert(err, IsNil)
   910  
   911  	resp, err := s.cloud.GetBlob(&GetBlobInput{Key: fileName})
   912  	t.Assert(err, IsNil)
   913  	t.Assert(resp.HeadBlobOutput.Size, DeepEquals, uint64(0))
   914  	defer resp.Body.Close()
   915  
   916  	_, err = s.getRoot(t).LookUp(fileName)
   917  	t.Assert(err, IsNil)
   918  
   919  	fileName = "testCreateFile2"
   920  	s.testWriteFile(t, fileName, 1, 128*1024)
   921  
   922  	inode, err := s.getRoot(t).LookUp(fileName)
   923  	t.Assert(err, IsNil)
   924  
   925  	fh, err = inode.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
   926  	t.Assert(err, IsNil)
   927  
   928  	err = fh.FlushFile()
   929  	t.Assert(err, IsNil)
   930  
   931  	resp, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName})
   932  	t.Assert(err, IsNil)
   933  	// ADLv1 doesn't return size when we do a GET
   934  	if _, adlv1 := s.cloud.(*ADLv1); !adlv1 {
   935  		t.Assert(resp.HeadBlobOutput.Size, Equals, uint64(1))
   936  	}
   937  	defer resp.Body.Close()
   938  }
   939  
   940  func (s *GoofysTest) TestUnlink(t *C) {
   941  	fileName := "file1"
   942  
   943  	err := s.getRoot(t).Unlink(fileName)
   944  	t.Assert(err, IsNil)
   945  
   946  	// make sure that it's gone from s3
   947  	_, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName})
   948  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
   949  }
   950  
   951  type FileHandleReader struct {
   952  	fs     *Goofys
   953  	fh     *FileHandle
   954  	offset int64
   955  }
   956  
   957  func (r *FileHandleReader) Read(p []byte) (nread int, err error) {
   958  	nread, err = r.fh.ReadFile(r.offset, p)
   959  	r.offset += int64(nread)
   960  	return
   961  }
   962  
   963  func (r *FileHandleReader) Seek(offset int64, whence int) (int64, error) {
   964  	switch whence {
   965  	case 0:
   966  		r.offset = offset
   967  	case 1:
   968  		r.offset += offset
   969  	default:
   970  		panic(fmt.Sprintf("unsupported whence: %v", whence))
   971  	}
   972  
   973  	return r.offset, nil
   974  }
   975  
   976  func (s *GoofysTest) testWriteFile(t *C, fileName string, size int64, write_size int) {
   977  	s.testWriteFileAt(t, fileName, int64(0), size, write_size)
   978  }
   979  
   980  func (s *GoofysTest) testWriteFileAt(t *C, fileName string, offset int64, size int64, write_size int) {
   981  	var fh *FileHandle
   982  	root := s.getRoot(t)
   983  
   984  	lookup := fuseops.LookUpInodeOp{
   985  		Parent: root.Id,
   986  		Name:   fileName,
   987  	}
   988  	err := s.fs.LookUpInode(nil, &lookup)
   989  	if err != nil {
   990  		if err == fuse.ENOENT {
   991  			create := fuseops.CreateFileOp{
   992  				Parent: root.Id,
   993  				Name:   fileName,
   994  			}
   995  			err = s.fs.CreateFile(nil, &create)
   996  			t.Assert(err, IsNil)
   997  
   998  			fh = s.fs.fileHandles[create.Handle]
   999  		} else {
  1000  			t.Assert(err, IsNil)
  1001  		}
  1002  	} else {
  1003  		in := s.fs.inodes[lookup.Entry.Child]
  1004  		fh, err = in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
  1005  		t.Assert(err, IsNil)
  1006  	}
  1007  
  1008  	buf := make([]byte, write_size)
  1009  	nwritten := offset
  1010  
  1011  	src := io.LimitReader(&SeqReader{}, size)
  1012  
  1013  	for {
  1014  		nread, err := src.Read(buf)
  1015  		if err == io.EOF {
  1016  			t.Assert(nwritten, Equals, size)
  1017  			break
  1018  		}
  1019  		t.Assert(err, IsNil)
  1020  
  1021  		err = fh.WriteFile(nwritten, buf[:nread])
  1022  		t.Assert(err, IsNil)
  1023  		nwritten += int64(nread)
  1024  	}
  1025  
  1026  	err = fh.FlushFile()
  1027  	t.Assert(err, IsNil)
  1028  
  1029  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: fileName})
  1030  	t.Assert(err, IsNil)
  1031  	t.Assert(resp.Size, Equals, uint64(size+offset))
  1032  
  1033  	fr := &FileHandleReader{s.fs, fh, offset}
  1034  	diff, err := CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 0)
  1035  	t.Assert(err, IsNil)
  1036  	t.Assert(diff, Equals, -1)
  1037  	t.Assert(fr.offset, Equals, size)
  1038  
  1039  	err = fh.FlushFile()
  1040  	t.Assert(err, IsNil)
  1041  
  1042  	// read again with exact 4KB to catch aligned read case
  1043  	fr = &FileHandleReader{s.fs, fh, offset}
  1044  	diff, err = CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 4096)
  1045  	t.Assert(err, IsNil)
  1046  	t.Assert(diff, Equals, -1)
  1047  	t.Assert(fr.offset, Equals, size)
  1048  
  1049  	fh.Release()
  1050  }
  1051  
  1052  func (s *GoofysTest) TestWriteLargeFile(t *C) {
  1053  	s.testWriteFile(t, "testLargeFile", int64(READAHEAD_CHUNK)+1024*1024, 128*1024)
  1054  	s.testWriteFile(t, "testLargeFile2", int64(READAHEAD_CHUNK), 128*1024)
  1055  	s.testWriteFile(t, "testLargeFile3", int64(READAHEAD_CHUNK)+1, 128*1024)
  1056  }
  1057  
  1058  func (s *GoofysTest) TestWriteReallyLargeFile(t *C) {
  1059  	s.testWriteFile(t, "testLargeFile", 512*1024*1024+1, 128*1024)
  1060  }
  1061  
  1062  func (s *GoofysTest) TestWriteReplicatorThrottle(t *C) {
  1063  	s.fs.replicators = Ticket{Total: 1}.Init()
  1064  	s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024)
  1065  }
  1066  
  1067  func (s *GoofysTest) TestReadWriteMinimumMemory(t *C) {
  1068  	if _, ok := s.cloud.(*ADLv1); ok {
  1069  		s.fs.bufferPool.maxBuffers = 4
  1070  	} else {
  1071  		s.fs.bufferPool.maxBuffers = 2
  1072  	}
  1073  	s.fs.bufferPool.computedMaxbuffers = s.fs.bufferPool.maxBuffers
  1074  	s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024)
  1075  }
  1076  
  1077  func (s *GoofysTest) TestWriteManyFilesFile(t *C) {
  1078  	var files sync.WaitGroup
  1079  
  1080  	for i := 0; i < 21; i++ {
  1081  		files.Add(1)
  1082  		fileName := "testSmallFile" + strconv.Itoa(i)
  1083  		go func() {
  1084  			defer files.Done()
  1085  			s.testWriteFile(t, fileName, 1, 128*1024)
  1086  		}()
  1087  	}
  1088  
  1089  	files.Wait()
  1090  }
  1091  
  1092  func (s *GoofysTest) testWriteFileNonAlign(t *C) {
  1093  	s.testWriteFile(t, "testWriteFileNonAlign", 6*1024*1024, 128*1024+1)
  1094  }
  1095  
  1096  func (s *GoofysTest) TestReadRandom(t *C) {
  1097  	size := int64(21 * 1024 * 1024)
  1098  
  1099  	s.testWriteFile(t, "testLargeFile", size, 128*1024)
  1100  	in, err := s.LookUpInode(t, "testLargeFile")
  1101  	t.Assert(err, IsNil)
  1102  
  1103  	fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
  1104  	t.Assert(err, IsNil)
  1105  	fr := &FileHandleReader{s.fs, fh, 0}
  1106  
  1107  	src := rand.NewSource(time.Now().UnixNano())
  1108  	truth := &SeqReader{}
  1109  
  1110  	for i := 0; i < 10; i++ {
  1111  		offset := src.Int63() % (size / 2)
  1112  
  1113  		fr.Seek(offset, 0)
  1114  		truth.Seek(offset, 0)
  1115  
  1116  		// read 5MB+1 from that offset
  1117  		nread := int64(5*1024*1024 + 1)
  1118  		CompareReader(io.LimitReader(fr, nread), io.LimitReader(truth, nread), 0)
  1119  	}
  1120  }
  1121  
  1122  func (s *GoofysTest) TestMkDir(t *C) {
  1123  	_, err := s.LookUpInode(t, "new_dir/file")
  1124  	t.Assert(err, Equals, fuse.ENOENT)
  1125  
  1126  	dirName := "new_dir"
  1127  	inode, err := s.getRoot(t).MkDir(dirName)
  1128  	t.Assert(err, IsNil)
  1129  	t.Assert(*inode.FullName(), Equals, dirName)
  1130  
  1131  	_, err = s.LookUpInode(t, dirName)
  1132  	t.Assert(err, IsNil)
  1133  
  1134  	fileName := "file"
  1135  	_, fh := inode.Create(fileName, fuseops.OpMetadata{uint32(os.Getpid())})
  1136  
  1137  	err = fh.FlushFile()
  1138  	t.Assert(err, IsNil)
  1139  
  1140  	_, err = s.LookUpInode(t, dirName+"/"+fileName)
  1141  	t.Assert(err, IsNil)
  1142  }
  1143  
  1144  func (s *GoofysTest) TestRmDir(t *C) {
  1145  	root := s.getRoot(t)
  1146  
  1147  	err := root.RmDir("dir1")
  1148  	t.Assert(err, Equals, fuse.ENOTEMPTY)
  1149  
  1150  	err = root.RmDir("dir2")
  1151  	t.Assert(err, Equals, fuse.ENOTEMPTY)
  1152  
  1153  	err = root.RmDir("empty_dir")
  1154  	t.Assert(err, IsNil)
  1155  
  1156  }
  1157  
  1158  func (s *GoofysTest) TestRenamePreserveMetadata(t *C) {
  1159  	if _, ok := s.cloud.(*ADLv1); ok {
  1160  		t.Skip("ADLv1 doesn't support metadata")
  1161  	}
  1162  	root := s.getRoot(t)
  1163  
  1164  	from, to := "file1", "new_file"
  1165  
  1166  	metadata := make(map[string]*string)
  1167  	metadata["foo"] = aws.String("bar")
  1168  
  1169  	_, err := s.cloud.CopyBlob(&CopyBlobInput{
  1170  		Source:      from,
  1171  		Destination: from,
  1172  		Metadata:    metadata,
  1173  	})
  1174  	t.Assert(err, IsNil)
  1175  
  1176  	err = root.Rename(from, root, to)
  1177  	t.Assert(err, IsNil)
  1178  
  1179  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: to})
  1180  	t.Assert(err, IsNil)
  1181  	t.Assert(resp.Metadata["foo"], NotNil)
  1182  	t.Assert(*resp.Metadata["foo"], Equals, "bar")
  1183  }
  1184  
  1185  func (s *GoofysTest) TestRenameLarge(t *C) {
  1186  	s.testWriteFile(t, "large_file", 21*1024*1024, 128*1024)
  1187  
  1188  	root := s.getRoot(t)
  1189  
  1190  	from, to := "large_file", "large_file2"
  1191  	err := root.Rename(from, root, to)
  1192  	t.Assert(err, IsNil)
  1193  }
  1194  
  1195  func (s *GoofysTest) TestRenameToExisting(t *C) {
  1196  	root := s.getRoot(t)
  1197  
  1198  	// cache these 2 files first
  1199  	_, err := s.LookUpInode(t, "file1")
  1200  	t.Assert(err, IsNil)
  1201  
  1202  	_, err = s.LookUpInode(t, "file2")
  1203  	t.Assert(err, IsNil)
  1204  
  1205  	err = s.fs.Rename(nil, &fuseops.RenameOp{
  1206  		OldParent: root.Id,
  1207  		NewParent: root.Id,
  1208  		OldName:   "file1",
  1209  		NewName:   "file2",
  1210  	})
  1211  	t.Assert(err, IsNil)
  1212  
  1213  	file1 := root.findChild("file1")
  1214  	t.Assert(file1, IsNil)
  1215  
  1216  	file2 := root.findChild("file2")
  1217  	t.Assert(file2, NotNil)
  1218  	t.Assert(*file2.Name, Equals, "file2")
  1219  }
  1220  
  1221  func (s *GoofysTest) TestBackendListPagination(t *C) {
  1222  	if _, ok := s.cloud.(*ADLv1); ok {
  1223  		t.Skip("ADLv1 doesn't have pagination")
  1224  	}
  1225  	if s.azurite {
  1226  		// https://github.com/Azure/Azurite/issues/262
  1227  		t.Skip("Azurite doesn't support pagination")
  1228  	}
  1229  
  1230  	var itemsPerPage int
  1231  	switch s.cloud.Delegate().(type) {
  1232  	case *S3Backend, *GCS3:
  1233  		itemsPerPage = 1000
  1234  	case *AZBlob, *ADLv2:
  1235  		itemsPerPage = 5000
  1236  	default:
  1237  		t.Fatalf("unknown backend: %T", s.cloud)
  1238  	}
  1239  
  1240  	root := s.getRoot(t)
  1241  	root.dir.mountPrefix = "this_test/"
  1242  
  1243  	blobs := make(map[string]*string)
  1244  	expect := make([]string, 0)
  1245  	for i := 0; i < itemsPerPage+1; i++ {
  1246  		b := fmt.Sprintf("%08v", i)
  1247  		blobs["this_test/"+b] = nil
  1248  		expect = append(expect, b)
  1249  	}
  1250  
  1251  	switch s.cloud.(type) {
  1252  	case *ADLv1, *ADLv2:
  1253  		// these backends don't support parallel delete so I
  1254  		// am doing this here
  1255  		defer func() {
  1256  			var wg sync.WaitGroup
  1257  
  1258  			for b, _ := range blobs {
  1259  				SmallActionsGate.Take(1, true)
  1260  				wg.Add(1)
  1261  
  1262  				go func(key string) {
  1263  					// ignore the error here,
  1264  					// anything we didn't cleanup
  1265  					// will be handled by teardown
  1266  					_, _ = s.cloud.DeleteBlob(&DeleteBlobInput{key})
  1267  					SmallActionsGate.Return(1)
  1268  					wg.Done()
  1269  				}(b)
  1270  			}
  1271  
  1272  			wg.Wait()
  1273  		}()
  1274  	}
  1275  
  1276  	s.setupBlobs(s.cloud, t, blobs)
  1277  
  1278  	dh := root.OpenDir()
  1279  	defer dh.CloseDir()
  1280  
  1281  	children := namesOf(s.readDirFully(t, dh))
  1282  	t.Assert(children, DeepEquals, expect)
  1283  }
  1284  
  1285  func (s *GoofysTest) TestBackendListPrefix(t *C) {
  1286  	res, err := s.cloud.ListBlobs(&ListBlobsInput{
  1287  		Prefix:    PString("random"),
  1288  		Delimiter: PString("/"),
  1289  	})
  1290  	t.Assert(err, IsNil)
  1291  	t.Assert(len(res.Prefixes), Equals, 0)
  1292  	t.Assert(len(res.Items), Equals, 0)
  1293  
  1294  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1295  		Prefix:    PString("empty_dir"),
  1296  		Delimiter: PString("/"),
  1297  	})
  1298  	t.Assert(err, IsNil)
  1299  	t.Assert(len(res.Prefixes), Not(Equals), 0)
  1300  	t.Assert(*res.Prefixes[0].Prefix, Equals, "empty_dir/")
  1301  	t.Assert(len(res.Items), Equals, 0)
  1302  
  1303  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1304  		Prefix:    PString("empty_dir/"),
  1305  		Delimiter: PString("/"),
  1306  	})
  1307  	t.Assert(err, IsNil)
  1308  	t.Assert(len(res.Prefixes), Equals, 0)
  1309  	t.Assert(len(res.Items), Equals, 1)
  1310  	t.Assert(*res.Items[0].Key, Equals, "empty_dir/")
  1311  
  1312  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1313  		Prefix:    PString("file1"),
  1314  		Delimiter: PString("/"),
  1315  	})
  1316  	t.Assert(err, IsNil)
  1317  	t.Assert(len(res.Prefixes), Equals, 0)
  1318  	t.Assert(len(res.Items), Equals, 1)
  1319  	t.Assert(*res.Items[0].Key, Equals, "file1")
  1320  
  1321  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1322  		Prefix:    PString("file1/"),
  1323  		Delimiter: PString("/"),
  1324  	})
  1325  	t.Assert(err, IsNil)
  1326  	t.Assert(len(res.Prefixes), Equals, 0)
  1327  	t.Assert(len(res.Items), Equals, 0)
  1328  
  1329  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1330  		Prefix:    PString("dir2/"),
  1331  		Delimiter: PString("/"),
  1332  	})
  1333  	t.Assert(err, IsNil)
  1334  	t.Assert(len(res.Prefixes), Equals, 1)
  1335  	t.Assert(*res.Prefixes[0].Prefix, Equals, "dir2/dir3/")
  1336  	if s.cloud.Capabilities().DirBlob {
  1337  		t.Assert(len(res.Items), Equals, 1)
  1338  		t.Assert(*res.Items[0].Key, Equals, "dir2/")
  1339  	} else {
  1340  		t.Assert(len(res.Items), Equals, 0)
  1341  	}
  1342  
  1343  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1344  		Prefix:    PString("dir2/dir3/"),
  1345  		Delimiter: PString("/"),
  1346  	})
  1347  	t.Assert(err, IsNil)
  1348  	t.Assert(len(res.Prefixes), Equals, 0)
  1349  	t.Assert(len(res.Items), Equals, 2)
  1350  	t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/")
  1351  	t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4")
  1352  
  1353  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1354  		Prefix: PString("dir2/"),
  1355  	})
  1356  	t.Assert(err, IsNil)
  1357  	t.Assert(len(res.Prefixes), Equals, 0)
  1358  	t.Assert(len(res.Items), Equals, 2)
  1359  	t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/")
  1360  	t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4")
  1361  
  1362  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1363  		Prefix: PString("dir2/dir3/file4"),
  1364  	})
  1365  	t.Assert(err, IsNil)
  1366  	t.Assert(len(res.Prefixes), Equals, 0)
  1367  	t.Assert(len(res.Items), Equals, 1)
  1368  	t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/file4")
  1369  }
  1370  
  1371  func (s *GoofysTest) TestRenameDir(t *C) {
  1372  	s.fs.flags.StatCacheTTL = 0
  1373  
  1374  	root := s.getRoot(t)
  1375  
  1376  	err := root.Rename("empty_dir", root, "dir1")
  1377  	t.Assert(err, Equals, fuse.ENOTEMPTY)
  1378  
  1379  	err = root.Rename("empty_dir", root, "new_dir")
  1380  	t.Assert(err, IsNil)
  1381  
  1382  	dir2, err := s.LookUpInode(t, "dir2")
  1383  	t.Assert(err, IsNil)
  1384  	t.Assert(dir2, NotNil)
  1385  
  1386  	_, err = s.LookUpInode(t, "new_dir2")
  1387  	t.Assert(err, Equals, fuse.ENOENT)
  1388  
  1389  	err = s.fs.Rename(nil, &fuseops.RenameOp{
  1390  		OldParent: root.Id,
  1391  		NewParent: root.Id,
  1392  		OldName:   "dir2",
  1393  		NewName:   "new_dir2",
  1394  	})
  1395  	t.Assert(err, IsNil)
  1396  
  1397  	_, err = s.LookUpInode(t, "dir2/dir3")
  1398  	t.Assert(err, Equals, fuse.ENOENT)
  1399  
  1400  	_, err = s.LookUpInode(t, "dir2/dir3/file4")
  1401  	t.Assert(err, Equals, fuse.ENOENT)
  1402  
  1403  	new_dir2, err := s.LookUpInode(t, "new_dir2")
  1404  	t.Assert(err, IsNil)
  1405  	t.Assert(new_dir2, NotNil)
  1406  	t.Assert(dir2.Id, Equals, new_dir2.Id)
  1407  
  1408  	old, err := s.LookUpInode(t, "new_dir2/dir3/file4")
  1409  	t.Assert(err, IsNil)
  1410  	t.Assert(old, NotNil)
  1411  
  1412  	err = s.fs.Rename(nil, &fuseops.RenameOp{
  1413  		OldParent: root.Id,
  1414  		NewParent: root.Id,
  1415  		OldName:   "new_dir2",
  1416  		NewName:   "new_dir3",
  1417  	})
  1418  	t.Assert(err, IsNil)
  1419  
  1420  	new, err := s.LookUpInode(t, "new_dir3/dir3/file4")
  1421  	t.Assert(err, IsNil)
  1422  	t.Assert(new, NotNil)
  1423  	t.Assert(old.Id, Equals, new.Id)
  1424  
  1425  	_, err = s.LookUpInode(t, "new_dir2/dir3")
  1426  	t.Assert(err, Equals, fuse.ENOENT)
  1427  
  1428  	_, err = s.LookUpInode(t, "new_dir2/dir3/file4")
  1429  	t.Assert(err, Equals, fuse.ENOENT)
  1430  }
  1431  
  1432  func (s *GoofysTest) TestRename(t *C) {
  1433  	root := s.getRoot(t)
  1434  
  1435  	from, to := "empty_dir", "file1"
  1436  	err := root.Rename(from, root, to)
  1437  	t.Assert(err, Equals, fuse.ENOTDIR)
  1438  
  1439  	from, to = "file1", "empty_dir"
  1440  	err = root.Rename(from, root, to)
  1441  	t.Assert(err, Equals, syscall.EISDIR)
  1442  
  1443  	from, to = "file1", "new_file"
  1444  	err = root.Rename(from, root, to)
  1445  	t.Assert(err, IsNil)
  1446  
  1447  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to})
  1448  	t.Assert(err, IsNil)
  1449  
  1450  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from})
  1451  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
  1452  
  1453  	from, to = "file3", "new_file2"
  1454  	dir, _ := s.LookUpInode(t, "dir1")
  1455  	err = dir.Rename(from, root, to)
  1456  	t.Assert(err, IsNil)
  1457  
  1458  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to})
  1459  	t.Assert(err, IsNil)
  1460  
  1461  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from})
  1462  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
  1463  
  1464  	from, to = "no_such_file", "new_file"
  1465  	err = root.Rename(from, root, to)
  1466  	t.Assert(err, Equals, fuse.ENOENT)
  1467  
  1468  	if s3, ok := s.cloud.Delegate().(*S3Backend); ok {
  1469  		if !hasEnv("GCS") {
  1470  			// not really rename but can be used by rename
  1471  			from, to = s.fs.bucket+"/file2", "new_file"
  1472  			_, err = s3.copyObjectMultipart(int64(len("file2")), from, to, "", nil, nil, nil)
  1473  			t.Assert(err, IsNil)
  1474  		}
  1475  	}
  1476  }
  1477  
  1478  func (s *GoofysTest) TestConcurrentRefDeref(t *C) {
  1479  	root := s.getRoot(t)
  1480  
  1481  	lookupOp := fuseops.LookUpInodeOp{
  1482  		Parent: root.Id,
  1483  		Name:   "file1",
  1484  	}
  1485  
  1486  	for i := 0; i < 20; i++ {
  1487  		err := s.fs.LookUpInode(nil, &lookupOp)
  1488  		t.Assert(err, IsNil)
  1489  
  1490  		var wg sync.WaitGroup
  1491  
  1492  		wg.Add(2)
  1493  		go func() {
  1494  			// we want to yield to the forget goroutine so that it's run first
  1495  			// to trigger this bug
  1496  			if i%2 == 0 {
  1497  				runtime.Gosched()
  1498  			}
  1499  			s.fs.LookUpInode(nil, &lookupOp)
  1500  			wg.Done()
  1501  		}()
  1502  		go func() {
  1503  			s.fs.ForgetInode(nil, &fuseops.ForgetInodeOp{
  1504  				Inode: lookupOp.Entry.Child,
  1505  				N:     1,
  1506  			})
  1507  			wg.Done()
  1508  		}()
  1509  
  1510  		wg.Wait()
  1511  	}
  1512  }
  1513  
  1514  func hasEnv(env string) bool {
  1515  	v := os.Getenv(env)
  1516  
  1517  	return !(v == "" || v == "0" || v == "false")
  1518  }
  1519  
  1520  func isTravis() bool {
  1521  	return hasEnv("TRAVIS")
  1522  }
  1523  
  1524  func isCatfs() bool {
  1525  	return hasEnv("CATFS")
  1526  }
  1527  
  1528  func (s *GoofysTest) mount(t *C, mountPoint string) {
  1529  	err := os.MkdirAll(mountPoint, 0700)
  1530  	t.Assert(err, IsNil)
  1531  
  1532  	server := fuseutil.NewFileSystemServer(s.fs)
  1533  
  1534  	if isCatfs() {
  1535  		s.fs.flags.MountOptions = make(map[string]string)
  1536  		s.fs.flags.MountOptions["allow_other"] = ""
  1537  	}
  1538  
  1539  	// Mount the file system.
  1540  	mountCfg := &fuse.MountConfig{
  1541  		FSName:                  s.fs.bucket,
  1542  		Options:                 s.fs.flags.MountOptions,
  1543  		ErrorLogger:             GetStdLogger(NewLogger("fuse"), logrus.ErrorLevel),
  1544  		DisableWritebackCaching: true,
  1545  	}
  1546  	if fuseLog.Level == logrus.DebugLevel {
  1547  		mountCfg.DebugLogger = GetStdLogger(fuseLog, logrus.DebugLevel)
  1548  	}
  1549  
  1550  	_, err = fuse.Mount(mountPoint, server, mountCfg)
  1551  	t.Assert(err, IsNil)
  1552  
  1553  	if isCatfs() {
  1554  		cacheDir := mountPoint + "-cache"
  1555  		err := os.MkdirAll(cacheDir, 0700)
  1556  		t.Assert(err, IsNil)
  1557  
  1558  		catfs := exec.Command("catfs", "--test", "-ononempty", "--", mountPoint, cacheDir, mountPoint)
  1559  		_, err = catfs.Output()
  1560  		if err != nil {
  1561  			if ee, ok := err.(*exec.ExitError); ok {
  1562  				panic(ee.Stderr)
  1563  			}
  1564  		}
  1565  
  1566  		catfs = exec.Command("catfs", "-ononempty", "--", mountPoint, cacheDir, mountPoint)
  1567  
  1568  		if isTravis() {
  1569  			logger := NewLogger("catfs")
  1570  			lvl := logrus.InfoLevel
  1571  			logger.Formatter.(*LogHandle).Lvl = &lvl
  1572  			w := logger.Writer()
  1573  
  1574  			catfs.Stdout = w
  1575  			catfs.Stderr = w
  1576  
  1577  			catfs.Env = append(catfs.Env, "RUST_LOG=debug")
  1578  		}
  1579  
  1580  		err = catfs.Start()
  1581  		t.Assert(err, IsNil)
  1582  
  1583  		time.Sleep(time.Second)
  1584  	}
  1585  }
  1586  
  1587  func (s *GoofysTest) umount(t *C, mountPoint string) {
  1588  	var err error
  1589  	for i := 0; i < 10; i++ {
  1590  		err = fuse.Unmount(mountPoint)
  1591  		if err != nil {
  1592  			time.Sleep(100 * time.Millisecond)
  1593  		} else {
  1594  			break
  1595  		}
  1596  	}
  1597  	t.Assert(err, IsNil)
  1598  
  1599  	os.Remove(mountPoint)
  1600  	if isCatfs() {
  1601  		cacheDir := mountPoint + "-cache"
  1602  		os.Remove(cacheDir)
  1603  	}
  1604  }
  1605  
  1606  func (s *GoofysTest) runFuseTest(t *C, mountPoint string, umount bool, cmdArgs ...string) {
  1607  	s.mount(t, mountPoint)
  1608  
  1609  	if umount {
  1610  		defer s.umount(t, mountPoint)
  1611  	}
  1612  
  1613  	cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
  1614  	cmd.Env = append(cmd.Env, os.Environ()...)
  1615  	cmd.Env = append(cmd.Env, "FAST=true")
  1616  	cmd.Env = append(cmd.Env, "CLEANUP=false")
  1617  
  1618  	if isTravis() {
  1619  		logger := NewLogger("test")
  1620  		lvl := logrus.InfoLevel
  1621  		logger.Formatter.(*LogHandle).Lvl = &lvl
  1622  		w := logger.Writer()
  1623  
  1624  		cmd.Stdout = w
  1625  		cmd.Stderr = w
  1626  	}
  1627  
  1628  	err := cmd.Run()
  1629  	t.Assert(err, IsNil)
  1630  }
  1631  
  1632  func (s *GoofysTest) TestFuse(t *C) {
  1633  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1634  
  1635  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1636  }
  1637  
  1638  func (s *GoofysTest) TestFuseWithTTL(t *C) {
  1639  	s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000
  1640  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1641  
  1642  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1643  }
  1644  
  1645  func (s *GoofysTest) TestCheap(t *C) {
  1646  	s.fs.flags.Cheap = true
  1647  	s.TestLookUpInode(t)
  1648  	s.TestWriteLargeFile(t)
  1649  }
  1650  
  1651  func (s *GoofysTest) TestExplicitDir(t *C) {
  1652  	s.fs.flags.ExplicitDir = true
  1653  	s.testExplicitDir(t)
  1654  }
  1655  
  1656  func (s *GoofysTest) TestExplicitDirAndCheap(t *C) {
  1657  	s.fs.flags.ExplicitDir = true
  1658  	s.fs.flags.Cheap = true
  1659  	s.testExplicitDir(t)
  1660  }
  1661  
  1662  func (s *GoofysTest) testExplicitDir(t *C) {
  1663  	if s.cloud.Capabilities().DirBlob {
  1664  		t.Skip("only for backends without dir blob")
  1665  	}
  1666  
  1667  	_, err := s.LookUpInode(t, "file1")
  1668  	t.Assert(err, IsNil)
  1669  
  1670  	_, err = s.LookUpInode(t, "fileNotFound")
  1671  	t.Assert(err, Equals, fuse.ENOENT)
  1672  
  1673  	// dir1/ doesn't exist so we shouldn't be able to see it
  1674  	_, err = s.LookUpInode(t, "dir1/file3")
  1675  	t.Assert(err, Equals, fuse.ENOENT)
  1676  
  1677  	_, err = s.LookUpInode(t, "dir4/file5")
  1678  	t.Assert(err, IsNil)
  1679  
  1680  	_, err = s.LookUpInode(t, "empty_dir")
  1681  	t.Assert(err, IsNil)
  1682  }
  1683  
  1684  func (s *GoofysTest) TestBenchLs(t *C) {
  1685  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1686  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1687  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1688  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "ls")
  1689  }
  1690  
  1691  func (s *GoofysTest) TestBenchCreate(t *C) {
  1692  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1693  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1694  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1695  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create")
  1696  }
  1697  
  1698  func (s *GoofysTest) TestBenchCreateParallel(t *C) {
  1699  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1700  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1701  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1702  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create_parallel")
  1703  }
  1704  
  1705  func (s *GoofysTest) TestBenchIO(t *C) {
  1706  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1707  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1708  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1709  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "io")
  1710  }
  1711  
  1712  func (s *GoofysTest) TestBenchFindTree(t *C) {
  1713  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1714  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1715  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1716  
  1717  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "find")
  1718  }
  1719  
  1720  func (s *GoofysTest) TestIssue231(t *C) {
  1721  	if isTravis() {
  1722  		t.Skip("disable in travis, not sure if it has enough memory")
  1723  	}
  1724  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1725  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue231")
  1726  }
  1727  
  1728  func (s *GoofysTest) TestChmod(t *C) {
  1729  	root := s.getRoot(t)
  1730  
  1731  	lookupOp := fuseops.LookUpInodeOp{
  1732  		Parent: root.Id,
  1733  		Name:   "file1",
  1734  	}
  1735  
  1736  	err := s.fs.LookUpInode(nil, &lookupOp)
  1737  	t.Assert(err, IsNil)
  1738  
  1739  	targetMode := os.FileMode(0777)
  1740  	setOp := fuseops.SetInodeAttributesOp{Inode: lookupOp.Entry.Child, Mode: &targetMode}
  1741  
  1742  	err = s.fs.SetInodeAttributes(s.ctx, &setOp)
  1743  	t.Assert(err, IsNil)
  1744  	t.Assert(setOp.Attributes, NotNil)
  1745  }
  1746  
  1747  func (s *GoofysTest) TestIssue64(t *C) {
  1748  	/*
  1749  		mountPoint := "/tmp/mnt" + s.fs.bucket
  1750  		log.Level = logrus.DebugLevel
  1751  
  1752  		err := os.MkdirAll(mountPoint, 0700)
  1753  		t.Assert(err, IsNil)
  1754  
  1755  		defer os.Remove(mountPoint)
  1756  
  1757  		s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue64")
  1758  	*/
  1759  }
  1760  
  1761  func (s *GoofysTest) TestIssue69Fuse(t *C) {
  1762  	s.fs.flags.StatCacheTTL = 0
  1763  
  1764  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1765  
  1766  	s.mount(t, mountPoint)
  1767  
  1768  	defer func() {
  1769  		err := os.Chdir("/")
  1770  		t.Assert(err, IsNil)
  1771  
  1772  		s.umount(t, mountPoint)
  1773  	}()
  1774  
  1775  	err := os.Chdir(mountPoint)
  1776  	t.Assert(err, IsNil)
  1777  
  1778  	_, err = os.Stat("dir1")
  1779  	t.Assert(err, IsNil)
  1780  
  1781  	err = os.Remove("dir1/file3")
  1782  	t.Assert(err, IsNil)
  1783  
  1784  	// don't really care about error code, but it should be a PathError
  1785  	os.Stat("dir1")
  1786  	os.Stat("dir1")
  1787  }
  1788  
  1789  func (s *GoofysTest) TestGetMimeType(t *C) {
  1790  	// option to use mime type not turned on
  1791  	mime := s.fs.flags.GetMimeType("foo.css")
  1792  	t.Assert(mime, IsNil)
  1793  
  1794  	s.fs.flags.UseContentType = true
  1795  
  1796  	mime = s.fs.flags.GetMimeType("foo.css")
  1797  	t.Assert(mime, NotNil)
  1798  	t.Assert(*mime, Equals, "text/css")
  1799  
  1800  	mime = s.fs.flags.GetMimeType("foo")
  1801  	t.Assert(mime, IsNil)
  1802  
  1803  	mime = s.fs.flags.GetMimeType("foo.")
  1804  	t.Assert(mime, IsNil)
  1805  
  1806  	mime = s.fs.flags.GetMimeType("foo.unknownExtension")
  1807  	t.Assert(mime, IsNil)
  1808  }
  1809  
  1810  func (s *GoofysTest) TestPutMimeType(t *C) {
  1811  	if _, ok := s.cloud.(*ADLv1); ok {
  1812  		// ADLv1 doesn't support content-type
  1813  		t.Skip("ADLv1 doesn't support content-type")
  1814  	}
  1815  
  1816  	s.fs.flags.UseContentType = true
  1817  
  1818  	root := s.getRoot(t)
  1819  	jpg := "test.jpg"
  1820  	jpg2 := "test2.jpg"
  1821  	file := "test"
  1822  
  1823  	s.testWriteFile(t, jpg, 10, 128)
  1824  
  1825  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: jpg})
  1826  	t.Assert(err, IsNil)
  1827  	t.Assert(*resp.ContentType, Equals, "image/jpeg")
  1828  
  1829  	err = root.Rename(jpg, root, file)
  1830  	t.Assert(err, IsNil)
  1831  
  1832  	resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: file})
  1833  	t.Assert(err, IsNil)
  1834  	t.Assert(*resp.ContentType, Equals, "image/jpeg")
  1835  
  1836  	err = root.Rename(file, root, jpg2)
  1837  	t.Assert(err, IsNil)
  1838  
  1839  	resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: jpg2})
  1840  	t.Assert(err, IsNil)
  1841  	t.Assert(*resp.ContentType, Equals, "image/jpeg")
  1842  }
  1843  
  1844  func (s *GoofysTest) TestBucketPrefixSlash(t *C) {
  1845  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2", s.fs.flags)
  1846  	t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/")
  1847  
  1848  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2///", s.fs.flags)
  1849  	t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/")
  1850  }
  1851  
  1852  func (s *GoofysTest) TestFuseWithPrefix(t *C) {
  1853  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1854  
  1855  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":testprefix", s.fs.flags)
  1856  
  1857  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1858  }
  1859  
  1860  func (s *GoofysTest) TestRenameCache(t *C) {
  1861  	root := s.getRoot(t)
  1862  	s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000
  1863  
  1864  	lookupOp1 := fuseops.LookUpInodeOp{
  1865  		Parent: root.Id,
  1866  		Name:   "file1",
  1867  	}
  1868  
  1869  	lookupOp2 := lookupOp1
  1870  	lookupOp2.Name = "newfile"
  1871  
  1872  	err := s.fs.LookUpInode(nil, &lookupOp1)
  1873  	t.Assert(err, IsNil)
  1874  
  1875  	err = s.fs.LookUpInode(nil, &lookupOp2)
  1876  	t.Assert(err, Equals, fuse.ENOENT)
  1877  
  1878  	renameOp := fuseops.RenameOp{
  1879  		OldParent: root.Id,
  1880  		NewParent: root.Id,
  1881  		OldName:   "file1",
  1882  		NewName:   "newfile",
  1883  	}
  1884  
  1885  	err = s.fs.Rename(nil, &renameOp)
  1886  	t.Assert(err, IsNil)
  1887  
  1888  	lookupOp1.Entry = fuseops.ChildInodeEntry{}
  1889  	lookupOp2.Entry = fuseops.ChildInodeEntry{}
  1890  
  1891  	err = s.fs.LookUpInode(nil, &lookupOp1)
  1892  	t.Assert(err, Equals, fuse.ENOENT)
  1893  
  1894  	err = s.fs.LookUpInode(nil, &lookupOp2)
  1895  	t.Assert(err, IsNil)
  1896  }
  1897  
  1898  func (s *GoofysTest) anonymous(t *C) {
  1899  	// On azure this fails because we re-create the bucket with
  1900  	// the same name right away. And well anonymous access is not
  1901  	// implemented yet in our azure backend anyway
  1902  	var s3 *S3Backend
  1903  	var ok bool
  1904  	if s3, ok = s.cloud.Delegate().(*S3Backend); !ok {
  1905  		t.Skip("only for S3")
  1906  	}
  1907  
  1908  	err := s.deleteBucket(s.cloud)
  1909  	t.Assert(err, IsNil)
  1910  
  1911  	// use a different bucket name to prevent 409 Conflict from
  1912  	// delete bucket above
  1913  	s.fs.bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16)
  1914  	s3.bucket = s.fs.bucket
  1915  	s.setupDefaultEnv(t, true)
  1916  
  1917  	s.fs = NewGoofys(context.Background(), s.fs.bucket, s.fs.flags)
  1918  	t.Assert(s.fs, NotNil)
  1919  
  1920  	// should have auto-detected by S3 backend
  1921  	cloud := s.getRoot(t).dir.cloud
  1922  	t.Assert(cloud, NotNil)
  1923  	s3, ok = cloud.Delegate().(*S3Backend)
  1924  	t.Assert(ok, Equals, true)
  1925  
  1926  	s3.awsConfig.Credentials = credentials.AnonymousCredentials
  1927  	s3.newS3()
  1928  }
  1929  
  1930  func (s *GoofysTest) disableS3() {
  1931  	time.Sleep(1 * time.Second) // wait for any background goroutines to finish
  1932  	s.fs.inodes[fuseops.RootInodeID].dir.cloud = nil
  1933  }
  1934  
  1935  func (s *GoofysTest) TestWriteAnonymous(t *C) {
  1936  	s.anonymous(t)
  1937  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1938  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1939  
  1940  	fileName := "test"
  1941  
  1942  	createOp := fuseops.CreateFileOp{
  1943  		Parent: s.getRoot(t).Id,
  1944  		Name:   fileName,
  1945  	}
  1946  
  1947  	err := s.fs.CreateFile(s.ctx, &createOp)
  1948  	t.Assert(err, IsNil)
  1949  
  1950  	err = s.fs.FlushFile(s.ctx, &fuseops.FlushFileOp{
  1951  		Handle: createOp.Handle,
  1952  		Inode:  createOp.Entry.Child,
  1953  	})
  1954  	t.Assert(err, Equals, syscall.EACCES)
  1955  
  1956  	err = s.fs.ReleaseFileHandle(s.ctx, &fuseops.ReleaseFileHandleOp{Handle: createOp.Handle})
  1957  	t.Assert(err, IsNil)
  1958  
  1959  	err = s.fs.LookUpInode(s.ctx, &fuseops.LookUpInodeOp{
  1960  		Parent: s.getRoot(t).Id,
  1961  		Name:   fileName,
  1962  	})
  1963  	t.Assert(err, Equals, fuse.ENOENT)
  1964  	// BUG! the file shouldn't exist, see test below for comment,
  1965  	// this behaves as expected only because we are bypassing
  1966  	// linux vfs in this test
  1967  }
  1968  
  1969  func (s *GoofysTest) TestWriteAnonymousFuse(t *C) {
  1970  	s.anonymous(t)
  1971  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1972  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1973  
  1974  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1975  
  1976  	s.mount(t, mountPoint)
  1977  	defer s.umount(t, mountPoint)
  1978  
  1979  	err := ioutil.WriteFile(mountPoint+"/test", []byte(""), 0600)
  1980  	t.Assert(err, NotNil)
  1981  	pathErr, ok := err.(*os.PathError)
  1982  	t.Assert(ok, Equals, true)
  1983  	t.Assert(pathErr.Err, Equals, syscall.EACCES)
  1984  
  1985  	_, err = os.Stat(mountPoint + "/test")
  1986  	t.Assert(err, IsNil)
  1987  	// BUG! the file shouldn't exist, the condition below should hold instead
  1988  	// see comment in Goofys.FlushFile
  1989  	// pathErr, ok = err.(*os.PathError)
  1990  	// t.Assert(ok, Equals, true)
  1991  	// t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  1992  
  1993  	_, err = ioutil.ReadFile(mountPoint + "/test")
  1994  	t.Assert(err, NotNil)
  1995  	pathErr, ok = err.(*os.PathError)
  1996  	t.Assert(ok, Equals, true)
  1997  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  1998  
  1999  	// reading the file and getting ENOENT causes the kernel to
  2000  	// invalidate the entry, failing at open is not sufficient, we
  2001  	// have to fail at read (which means that if the application
  2002  	// uses splice(2) it won't get to us, so this wouldn't work
  2003  	_, err = os.Stat(mountPoint + "/test")
  2004  	t.Assert(err, NotNil)
  2005  	pathErr, ok = err.(*os.PathError)
  2006  	t.Assert(ok, Equals, true)
  2007  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2008  }
  2009  
  2010  func (s *GoofysTest) TestWriteSyncWriteFuse(t *C) {
  2011  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2012  
  2013  	s.mount(t, mountPoint)
  2014  	defer s.umount(t, mountPoint)
  2015  
  2016  	var f *os.File
  2017  	var n int
  2018  	var err error
  2019  
  2020  	defer func() {
  2021  		if err != nil {
  2022  			f.Close()
  2023  		}
  2024  	}()
  2025  
  2026  	f, err = os.Create(mountPoint + "/TestWriteSyncWrite")
  2027  	t.Assert(err, IsNil)
  2028  
  2029  	n, err = f.Write([]byte("hello\n"))
  2030  	t.Assert(err, IsNil)
  2031  	t.Assert(n, Equals, 6)
  2032  
  2033  	err = f.Sync()
  2034  	t.Assert(err, IsNil)
  2035  
  2036  	n, err = f.Write([]byte("world\n"))
  2037  	t.Assert(err, IsNil)
  2038  	t.Assert(n, Equals, 6)
  2039  
  2040  	err = f.Close()
  2041  	t.Assert(err, IsNil)
  2042  }
  2043  
  2044  func (s *GoofysTest) TestIssue156(t *C) {
  2045  	_, err := s.LookUpInode(t, "\xae\x8a-")
  2046  	// S3Proxy and aws s3 return different errors
  2047  	// https://github.com/andrewgaul/s3proxy/issues/201
  2048  	t.Assert(err, NotNil)
  2049  }
  2050  
  2051  func (s *GoofysTest) TestIssue162(t *C) {
  2052  	if s.azurite {
  2053  		t.Skip("https://github.com/Azure/Azurite/issues/221")
  2054  	}
  2055  
  2056  	params := &PutBlobInput{
  2057  		Key:  "dir1/lör 006.jpg",
  2058  		Body: bytes.NewReader([]byte("foo")),
  2059  		Size: PUInt64(3),
  2060  	}
  2061  	_, err := s.cloud.PutBlob(params)
  2062  	t.Assert(err, IsNil)
  2063  
  2064  	dir, err := s.LookUpInode(t, "dir1")
  2065  	t.Assert(err, IsNil)
  2066  
  2067  	err = dir.Rename("lör 006.jpg", dir, "myfile.jpg")
  2068  	t.Assert(err, IsNil)
  2069  
  2070  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "dir1/myfile.jpg"})
  2071  	t.Assert(resp.Size, Equals, uint64(3))
  2072  }
  2073  
  2074  func (s *GoofysTest) TestXAttrGet(t *C) {
  2075  	if _, ok := s.cloud.(*ADLv1); ok {
  2076  		t.Skip("ADLv1 doesn't support metadata")
  2077  	}
  2078  
  2079  	_, checkETag := s.cloud.Delegate().(*S3Backend)
  2080  
  2081  	file1, err := s.LookUpInode(t, "file1")
  2082  	t.Assert(err, IsNil)
  2083  
  2084  	names, err := file1.ListXattr()
  2085  	t.Assert(err, IsNil)
  2086  	sort.Strings(names)
  2087  	t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class", "user.name"})
  2088  
  2089  	_, err = file1.GetXattr("user.foobar")
  2090  	// xattr.IsNotExist seems broken on recent version of macOS
  2091  	t.Assert(err, Equals, syscall.ENODATA)
  2092  
  2093  	if checkETag {
  2094  		value, err := file1.GetXattr("s3.etag")
  2095  		t.Assert(err, IsNil)
  2096  		// md5sum of "file1"
  2097  		t.Assert(string(value), Equals, "\"826e8142e6baabe8af779f5f490cf5f5\"")
  2098  	}
  2099  
  2100  	value, err := file1.GetXattr("user.name")
  2101  	t.Assert(err, IsNil)
  2102  	t.Assert(string(value), Equals, "file1+/#\x00")
  2103  
  2104  	dir1, err := s.LookUpInode(t, "dir1")
  2105  	t.Assert(err, IsNil)
  2106  
  2107  	// list dir1 to populate file3 in cache, then get file3's xattr
  2108  	lookup := fuseops.LookUpInodeOp{
  2109  		Parent: fuseops.RootInodeID,
  2110  		Name:   "dir1",
  2111  	}
  2112  	err = s.fs.LookUpInode(nil, &lookup)
  2113  	t.Assert(err, IsNil)
  2114  
  2115  	s.readDirIntoCache(t, lookup.Entry.Child)
  2116  
  2117  	dir1 = s.fs.inodes[lookup.Entry.Child]
  2118  	file3 := dir1.findChild("file3")
  2119  	t.Assert(file3, NotNil)
  2120  	t.Assert(file3.userMetadata, IsNil)
  2121  
  2122  	if checkETag {
  2123  		value, err = file3.GetXattr("s3.etag")
  2124  		t.Assert(err, IsNil)
  2125  		// md5sum of "dir1/file3"
  2126  		t.Assert(string(value), Equals, "\"5cd67e0e59fb85be91a515afe0f4bb24\"")
  2127  	}
  2128  
  2129  	// ensure that we get the dir blob instead of list
  2130  	s.fs.flags.Cheap = true
  2131  
  2132  	emptyDir2, err := s.LookUpInode(t, "empty_dir2")
  2133  	t.Assert(err, IsNil)
  2134  
  2135  	names, err = emptyDir2.ListXattr()
  2136  	t.Assert(err, IsNil)
  2137  	sort.Strings(names)
  2138  	t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class", "user.name"})
  2139  
  2140  	emptyDir, err := s.LookUpInode(t, "empty_dir")
  2141  	t.Assert(err, IsNil)
  2142  
  2143  	if checkETag {
  2144  		value, err = emptyDir.GetXattr("s3.etag")
  2145  		t.Assert(err, IsNil)
  2146  		// dir blobs are empty
  2147  		t.Assert(string(value), Equals, "\"d41d8cd98f00b204e9800998ecf8427e\"")
  2148  	}
  2149  
  2150  	if !s.cloud.Capabilities().DirBlob {
  2151  		// implicit dir blobs don't have s3.etag at all
  2152  		names, err = dir1.ListXattr()
  2153  		t.Assert(err, IsNil)
  2154  		t.Assert(names, HasLen, 0)
  2155  
  2156  		value, err = dir1.GetXattr("s3.etag")
  2157  		t.Assert(err, Equals, syscall.ENODATA)
  2158  	}
  2159  
  2160  	// s3proxy doesn't support storage class yet
  2161  	if hasEnv("AWS") {
  2162  		cloud := s.getRoot(t).dir.cloud
  2163  		s3, ok := cloud.Delegate().(*S3Backend)
  2164  		t.Assert(ok, Equals, true)
  2165  		s3.config.StorageClass = "STANDARD_IA"
  2166  
  2167  		s.testWriteFile(t, "ia", 1, 128*1024)
  2168  
  2169  		ia, err := s.LookUpInode(t, "ia")
  2170  		t.Assert(err, IsNil)
  2171  
  2172  		names, err = ia.ListXattr()
  2173  		t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"})
  2174  
  2175  		value, err = ia.GetXattr("s3.storage-class")
  2176  		t.Assert(err, IsNil)
  2177  		// smaller than 128KB falls back to standard
  2178  		t.Assert(string(value), Equals, "STANDARD")
  2179  
  2180  		s.testWriteFile(t, "ia", 128*1024, 128*1024)
  2181  		time.Sleep(100 * time.Millisecond)
  2182  
  2183  		names, err = ia.ListXattr()
  2184  		t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"})
  2185  
  2186  		value, err = ia.GetXattr("s3.storage-class")
  2187  		t.Assert(err, IsNil)
  2188  		t.Assert(string(value), Equals, "STANDARD_IA")
  2189  	}
  2190  }
  2191  
  2192  func (s *GoofysTest) TestClientForkExec(t *C) {
  2193  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2194  	s.mount(t, mountPoint)
  2195  	defer s.umount(t, mountPoint)
  2196  	file := mountPoint + "/TestClientForkExec"
  2197  
  2198  	// Create new file.
  2199  	fh, err := os.OpenFile(file, os.O_CREATE|os.O_RDWR, 0600)
  2200  	t.Assert(err, IsNil)
  2201  	defer func() { // Defer close file if it's not already closed.
  2202  		if fh != nil {
  2203  			fh.Close()
  2204  		}
  2205  	}()
  2206  	// Write to file.
  2207  	_, err = fh.WriteString("1.1;")
  2208  	t.Assert(err, IsNil)
  2209  	// The `Command` is run via fork+exec.
  2210  	// So all the file descriptors are copied over to the child process.
  2211  	// The child process 'closes' the files before exiting. This should
  2212  	// not result in goofys failing file operations invoked from the test.
  2213  	someCmd := exec.Command("echo", "hello")
  2214  	err = someCmd.Run()
  2215  	t.Assert(err, IsNil)
  2216  	// One more write.
  2217  	_, err = fh.WriteString("1.2;")
  2218  	t.Assert(err, IsNil)
  2219  	// Close file.
  2220  	err = fh.Close()
  2221  	t.Assert(err, IsNil)
  2222  	fh = nil
  2223  	// Check file content.
  2224  	content, err := ioutil.ReadFile(file)
  2225  	t.Assert(err, IsNil)
  2226  	t.Assert(string(content), Equals, "1.1;1.2;")
  2227  
  2228  	// Repeat the same excercise, but now with an existing file.
  2229  	fh, err = os.OpenFile(file, os.O_RDWR, 0600)
  2230  	// Write to file.
  2231  	_, err = fh.WriteString("2.1;")
  2232  	// fork+exec.
  2233  	someCmd = exec.Command("echo", "hello")
  2234  	err = someCmd.Run()
  2235  	t.Assert(err, IsNil)
  2236  	// One more write.
  2237  	_, err = fh.WriteString("2.2;")
  2238  	t.Assert(err, IsNil)
  2239  	// Close file.
  2240  	err = fh.Close()
  2241  	t.Assert(err, IsNil)
  2242  	fh = nil
  2243  	// Verify that the file is updated as per the new write.
  2244  	content, err = ioutil.ReadFile(file)
  2245  	t.Assert(err, IsNil)
  2246  	t.Assert(string(content), Equals, "2.1;2.2;")
  2247  }
  2248  
  2249  func (s *GoofysTest) TestXAttrGetCached(t *C) {
  2250  	if _, ok := s.cloud.(*ADLv1); ok {
  2251  		t.Skip("ADLv1 doesn't support metadata")
  2252  	}
  2253  
  2254  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2255  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2256  	s.readDirIntoCache(t, fuseops.RootInodeID)
  2257  	s.disableS3()
  2258  
  2259  	in, err := s.LookUpInode(t, "file1")
  2260  	t.Assert(err, IsNil)
  2261  	t.Assert(in.userMetadata, IsNil)
  2262  
  2263  	_, err = in.GetXattr("s3.etag")
  2264  	t.Assert(err, IsNil)
  2265  }
  2266  
  2267  func (s *GoofysTest) TestXAttrCopied(t *C) {
  2268  	if _, ok := s.cloud.(*ADLv1); ok {
  2269  		t.Skip("ADLv1 doesn't support metadata")
  2270  	}
  2271  
  2272  	root := s.getRoot(t)
  2273  
  2274  	err := root.Rename("file1", root, "file0")
  2275  	t.Assert(err, IsNil)
  2276  
  2277  	in, err := s.LookUpInode(t, "file0")
  2278  	t.Assert(err, IsNil)
  2279  
  2280  	_, err = in.GetXattr("user.name")
  2281  	t.Assert(err, IsNil)
  2282  }
  2283  
  2284  func (s *GoofysTest) TestXAttrRemove(t *C) {
  2285  	if _, ok := s.cloud.(*ADLv1); ok {
  2286  		t.Skip("ADLv1 doesn't support metadata")
  2287  	}
  2288  
  2289  	in, err := s.LookUpInode(t, "file1")
  2290  	t.Assert(err, IsNil)
  2291  
  2292  	_, err = in.GetXattr("user.name")
  2293  	t.Assert(err, IsNil)
  2294  
  2295  	err = in.RemoveXattr("user.name")
  2296  	t.Assert(err, IsNil)
  2297  
  2298  	_, err = in.GetXattr("user.name")
  2299  	t.Assert(err, Equals, syscall.ENODATA)
  2300  }
  2301  
  2302  func (s *GoofysTest) TestXAttrSet(t *C) {
  2303  	if _, ok := s.cloud.(*ADLv1); ok {
  2304  		t.Skip("ADLv1 doesn't support metadata")
  2305  	}
  2306  
  2307  	in, err := s.LookUpInode(t, "file1")
  2308  	t.Assert(err, IsNil)
  2309  
  2310  	err = in.SetXattr("user.bar", []byte("hello"), xattr.REPLACE)
  2311  	t.Assert(err, Equals, syscall.ENODATA)
  2312  
  2313  	err = in.SetXattr("user.bar", []byte("hello"), xattr.CREATE)
  2314  	t.Assert(err, IsNil)
  2315  
  2316  	err = in.SetXattr("user.bar", []byte("hello"), xattr.CREATE)
  2317  	t.Assert(err, Equals, syscall.EEXIST)
  2318  
  2319  	in, err = s.LookUpInode(t, "file1")
  2320  	t.Assert(err, IsNil)
  2321  
  2322  	value, err := in.GetXattr("user.bar")
  2323  	t.Assert(err, IsNil)
  2324  	t.Assert(string(value), Equals, "hello")
  2325  
  2326  	value = []byte("file1+%/#\x00")
  2327  
  2328  	err = in.SetXattr("user.bar", value, xattr.REPLACE)
  2329  	t.Assert(err, IsNil)
  2330  
  2331  	in, err = s.LookUpInode(t, "file1")
  2332  	t.Assert(err, IsNil)
  2333  
  2334  	value2, err := in.GetXattr("user.bar")
  2335  	t.Assert(err, IsNil)
  2336  	t.Assert(value2, DeepEquals, value)
  2337  
  2338  	// setting with flag = 0 always works
  2339  	err = in.SetXattr("user.bar", []byte("world"), 0)
  2340  	t.Assert(err, IsNil)
  2341  
  2342  	err = in.SetXattr("user.baz", []byte("world"), 0)
  2343  	t.Assert(err, IsNil)
  2344  
  2345  	value, err = in.GetXattr("user.bar")
  2346  	t.Assert(err, IsNil)
  2347  
  2348  	value2, err = in.GetXattr("user.baz")
  2349  	t.Assert(err, IsNil)
  2350  
  2351  	t.Assert(value2, DeepEquals, value)
  2352  	t.Assert(string(value2), DeepEquals, "world")
  2353  }
  2354  
  2355  func (s *GoofysTest) TestCreateRenameBeforeCloseFuse(t *C) {
  2356  	if s.azurite {
  2357  		// Azurite returns 400 when copy source doesn't exist
  2358  		// https://github.com/Azure/Azurite/issues/219
  2359  		// so our code to ignore ENOENT fails
  2360  		t.Skip("https://github.com/Azure/Azurite/issues/219")
  2361  	}
  2362  
  2363  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2364  
  2365  	s.mount(t, mountPoint)
  2366  	defer s.umount(t, mountPoint)
  2367  
  2368  	from := mountPoint + "/newfile"
  2369  	to := mountPoint + "/newfile2"
  2370  
  2371  	fh, err := os.Create(from)
  2372  	t.Assert(err, IsNil)
  2373  	defer func() {
  2374  		// close the file if the test failed so we can unmount
  2375  		if fh != nil {
  2376  			fh.Close()
  2377  		}
  2378  	}()
  2379  
  2380  	_, err = fh.WriteString("hello world")
  2381  	t.Assert(err, IsNil)
  2382  
  2383  	err = os.Rename(from, to)
  2384  	t.Assert(err, IsNil)
  2385  
  2386  	err = fh.Close()
  2387  	t.Assert(err, IsNil)
  2388  	fh = nil
  2389  
  2390  	_, err = os.Stat(from)
  2391  	t.Assert(err, NotNil)
  2392  	pathErr, ok := err.(*os.PathError)
  2393  	t.Assert(ok, Equals, true)
  2394  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2395  
  2396  	content, err := ioutil.ReadFile(to)
  2397  	t.Assert(err, IsNil)
  2398  	t.Assert(string(content), Equals, "hello world")
  2399  }
  2400  
  2401  func (s *GoofysTest) TestRenameBeforeCloseFuse(t *C) {
  2402  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2403  
  2404  	s.mount(t, mountPoint)
  2405  	defer s.umount(t, mountPoint)
  2406  
  2407  	from := mountPoint + "/newfile"
  2408  	to := mountPoint + "/newfile2"
  2409  
  2410  	err := ioutil.WriteFile(from, []byte(""), 0600)
  2411  	t.Assert(err, IsNil)
  2412  
  2413  	fh, err := os.OpenFile(from, os.O_WRONLY, 0600)
  2414  	t.Assert(err, IsNil)
  2415  	defer func() {
  2416  		// close the file if the test failed so we can unmount
  2417  		if fh != nil {
  2418  			fh.Close()
  2419  		}
  2420  	}()
  2421  
  2422  	_, err = fh.WriteString("hello world")
  2423  	t.Assert(err, IsNil)
  2424  
  2425  	err = os.Rename(from, to)
  2426  	t.Assert(err, IsNil)
  2427  
  2428  	err = fh.Close()
  2429  	t.Assert(err, IsNil)
  2430  	fh = nil
  2431  
  2432  	_, err = os.Stat(from)
  2433  	t.Assert(err, NotNil)
  2434  	pathErr, ok := err.(*os.PathError)
  2435  	t.Assert(ok, Equals, true)
  2436  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2437  
  2438  	content, err := ioutil.ReadFile(to)
  2439  	t.Assert(err, IsNil)
  2440  	t.Assert(string(content), Equals, "hello world")
  2441  }
  2442  
  2443  func (s *GoofysTest) TestInodeInsert(t *C) {
  2444  	root := s.getRoot(t)
  2445  
  2446  	in := NewInode(s.fs, root, aws.String("2"))
  2447  	in.Attributes = InodeAttributes{}
  2448  	root.insertChild(in)
  2449  	t.Assert(*root.dir.Children[2].Name, Equals, "2")
  2450  
  2451  	in = NewInode(s.fs, root, aws.String("1"))
  2452  	in.Attributes = InodeAttributes{}
  2453  	root.insertChild(in)
  2454  	t.Assert(*root.dir.Children[2].Name, Equals, "1")
  2455  	t.Assert(*root.dir.Children[3].Name, Equals, "2")
  2456  
  2457  	in = NewInode(s.fs, root, aws.String("4"))
  2458  	in.Attributes = InodeAttributes{}
  2459  	root.insertChild(in)
  2460  	t.Assert(*root.dir.Children[2].Name, Equals, "1")
  2461  	t.Assert(*root.dir.Children[3].Name, Equals, "2")
  2462  	t.Assert(*root.dir.Children[4].Name, Equals, "4")
  2463  
  2464  	inode := root.findChild("1")
  2465  	t.Assert(inode, NotNil)
  2466  	t.Assert(*inode.Name, Equals, "1")
  2467  
  2468  	inode = root.findChild("2")
  2469  	t.Assert(inode, NotNil)
  2470  	t.Assert(*inode.Name, Equals, "2")
  2471  
  2472  	inode = root.findChild("4")
  2473  	t.Assert(inode, NotNil)
  2474  	t.Assert(*inode.Name, Equals, "4")
  2475  
  2476  	inode = root.findChild("0")
  2477  	t.Assert(inode, IsNil)
  2478  
  2479  	inode = root.findChild("3")
  2480  	t.Assert(inode, IsNil)
  2481  
  2482  	root.removeChild(root.dir.Children[3])
  2483  	root.removeChild(root.dir.Children[2])
  2484  	root.removeChild(root.dir.Children[2])
  2485  	t.Assert(len(root.dir.Children), Equals, 2)
  2486  }
  2487  
  2488  func (s *GoofysTest) TestReadDirSlurpHeuristic(t *C) {
  2489  	if _, ok := s.cloud.Delegate().(*S3Backend); !ok {
  2490  		t.Skip("only for S3")
  2491  	}
  2492  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2493  
  2494  	s.setupBlobs(s.cloud, t, map[string]*string{"dir2isafile": nil})
  2495  
  2496  	root := s.getRoot(t).dir
  2497  	t.Assert(root.seqOpenDirScore, Equals, uint8(0))
  2498  	s.assertEntries(t, s.getRoot(t), []string{
  2499  		"dir1", "dir2", "dir2isafile", "dir4", "empty_dir",
  2500  		"empty_dir2", "file1", "file2", "zero"})
  2501  
  2502  	dir1, err := s.LookUpInode(t, "dir1")
  2503  	t.Assert(err, IsNil)
  2504  	dh1 := dir1.OpenDir()
  2505  	defer dh1.CloseDir()
  2506  	score := root.seqOpenDirScore
  2507  
  2508  	dir2, err := s.LookUpInode(t, "dir2")
  2509  	t.Assert(err, IsNil)
  2510  	dh2 := dir2.OpenDir()
  2511  	defer dh2.CloseDir()
  2512  	t.Assert(root.seqOpenDirScore, Equals, score+1)
  2513  
  2514  	dir3, err := s.LookUpInode(t, "dir4")
  2515  	t.Assert(err, IsNil)
  2516  	dh3 := dir3.OpenDir()
  2517  	defer dh3.CloseDir()
  2518  	t.Assert(root.seqOpenDirScore, Equals, score+2)
  2519  }
  2520  
  2521  func (s *GoofysTest) TestReadDirSlurpSubtree(t *C) {
  2522  	if _, ok := s.cloud.Delegate().(*S3Backend); !ok {
  2523  		t.Skip("only for S3")
  2524  	}
  2525  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2526  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2527  
  2528  	s.getRoot(t).dir.seqOpenDirScore = 2
  2529  	in, err := s.LookUpInode(t, "dir2")
  2530  	t.Assert(err, IsNil)
  2531  	t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(2))
  2532  
  2533  	s.readDirIntoCache(t, in.Id)
  2534  	// should have incremented the score
  2535  	t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(3))
  2536  
  2537  	// reading dir2 should cause dir2/dir3 to have cached readdir
  2538  	s.disableS3()
  2539  
  2540  	in, err = s.LookUpInode(t, "dir2/dir3")
  2541  	t.Assert(err, IsNil)
  2542  
  2543  	s.assertEntries(t, in, []string{"file4"})
  2544  }
  2545  
  2546  func (s *GoofysTest) TestReadDirCached(t *C) {
  2547  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2548  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2549  
  2550  	s.getRoot(t).dir.seqOpenDirScore = 2
  2551  	s.readDirIntoCache(t, fuseops.RootInodeID)
  2552  	s.disableS3()
  2553  
  2554  	dh := s.getRoot(t).OpenDir()
  2555  
  2556  	entries := s.readDirFully(t, dh)
  2557  	dirs := make([]string, 0)
  2558  	files := make([]string, 0)
  2559  	noMoreDir := false
  2560  
  2561  	for _, en := range entries {
  2562  		if en.Type == fuseutil.DT_Directory {
  2563  			t.Assert(noMoreDir, Equals, false)
  2564  			dirs = append(dirs, en.Name)
  2565  		} else {
  2566  			files = append(files, en.Name)
  2567  			noMoreDir = true
  2568  		}
  2569  	}
  2570  
  2571  	t.Assert(dirs, DeepEquals, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2"})
  2572  	t.Assert(files, DeepEquals, []string{"file1", "file2", "zero"})
  2573  }
  2574  
  2575  func (s *GoofysTest) TestReadDirLookUp(t *C) {
  2576  	s.getRoot(t).dir.seqOpenDirScore = 2
  2577  
  2578  	var wg sync.WaitGroup
  2579  	for i := 0; i < 10; i++ {
  2580  		wg.Add(2)
  2581  		go func() {
  2582  			defer wg.Done()
  2583  			s.readDirIntoCache(t, fuseops.RootInodeID)
  2584  		}()
  2585  		go func() {
  2586  			defer wg.Done()
  2587  
  2588  			lookup := fuseops.LookUpInodeOp{
  2589  				Parent: fuseops.RootInodeID,
  2590  				Name:   "file1",
  2591  			}
  2592  			err := s.fs.LookUpInode(nil, &lookup)
  2593  			t.Assert(err, IsNil)
  2594  		}()
  2595  	}
  2596  	wg.Wait()
  2597  }
  2598  
  2599  func (s *GoofysTest) writeSeekWriteFuse(t *C, file string, fh *os.File, first string, second string, third string) {
  2600  	fi, err := os.Stat(file)
  2601  	t.Assert(err, IsNil)
  2602  
  2603  	defer func() {
  2604  		// close the file if the test failed so we can unmount
  2605  		if fh != nil {
  2606  			fh.Close()
  2607  		}
  2608  	}()
  2609  
  2610  	_, err = fh.WriteString(first)
  2611  	t.Assert(err, IsNil)
  2612  
  2613  	off, err := fh.Seek(int64(len(second)), 1)
  2614  	t.Assert(err, IsNil)
  2615  	t.Assert(off, Equals, int64(len(first)+len(second)))
  2616  
  2617  	_, err = fh.WriteString(third)
  2618  	t.Assert(err, IsNil)
  2619  
  2620  	off, err = fh.Seek(int64(len(first)), 0)
  2621  	t.Assert(err, IsNil)
  2622  	t.Assert(off, Equals, int64(len(first)))
  2623  
  2624  	_, err = fh.WriteString(second)
  2625  	t.Assert(err, IsNil)
  2626  
  2627  	err = fh.Close()
  2628  	t.Assert(err, IsNil)
  2629  	fh = nil
  2630  
  2631  	content, err := ioutil.ReadFile(file)
  2632  	t.Assert(err, IsNil)
  2633  	t.Assert(string(content), Equals, first+second+third)
  2634  
  2635  	fi2, err := os.Stat(file)
  2636  	t.Assert(err, IsNil)
  2637  	t.Assert(fi.Mode(), Equals, fi2.Mode())
  2638  }
  2639  
  2640  func (s *GoofysTest) TestWriteSeekWriteFuse(t *C) {
  2641  	if !isCatfs() {
  2642  		t.Skip("only works with CATFS=true")
  2643  	}
  2644  
  2645  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2646  	s.mount(t, mountPoint)
  2647  	defer s.umount(t, mountPoint)
  2648  
  2649  	file := mountPoint + "/newfile"
  2650  
  2651  	fh, err := os.Create(file)
  2652  	t.Assert(err, IsNil)
  2653  
  2654  	s.writeSeekWriteFuse(t, file, fh, "hello", " ", "world")
  2655  
  2656  	fh, err = os.OpenFile(file, os.O_WRONLY, 0600)
  2657  	t.Assert(err, IsNil)
  2658  
  2659  	s.writeSeekWriteFuse(t, file, fh, "", "never", "minding")
  2660  }
  2661  
  2662  func (s *GoofysTest) TestDirMtimeCreate(t *C) {
  2663  	root := s.getRoot(t)
  2664  
  2665  	attr, _ := root.GetAttributes()
  2666  	m1 := attr.Mtime
  2667  	time.Sleep(time.Second)
  2668  
  2669  	_, _ = root.Create("foo", fuseops.OpMetadata{uint32(os.Getpid())})
  2670  	attr2, _ := root.GetAttributes()
  2671  	m2 := attr2.Mtime
  2672  
  2673  	t.Assert(m1.Before(m2), Equals, true)
  2674  }
  2675  
  2676  func (s *GoofysTest) TestDirMtimeLs(t *C) {
  2677  	root := s.getRoot(t)
  2678  
  2679  	attr, _ := root.GetAttributes()
  2680  	m1 := attr.Mtime
  2681  	time.Sleep(3 * time.Second)
  2682  
  2683  	params := &PutBlobInput{
  2684  		Key:  "newfile",
  2685  		Body: bytes.NewReader([]byte("foo")),
  2686  		Size: PUInt64(3),
  2687  	}
  2688  	_, err := s.cloud.PutBlob(params)
  2689  	t.Assert(err, IsNil)
  2690  
  2691  	s.readDirIntoCache(t, fuseops.RootInodeID)
  2692  
  2693  	attr2, _ := root.GetAttributes()
  2694  	m2 := attr2.Mtime
  2695  
  2696  	t.Assert(m1.Before(m2), Equals, true)
  2697  }
  2698  
  2699  func (s *GoofysTest) TestRenameOverwrite(t *C) {
  2700  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2701  	s.mount(t, mountPoint)
  2702  	defer s.umount(t, mountPoint)
  2703  
  2704  	file := mountPoint + "/newfile"
  2705  	rename := mountPoint + "/file1"
  2706  
  2707  	fh, err := os.Create(file)
  2708  	t.Assert(err, IsNil)
  2709  
  2710  	err = fh.Close()
  2711  	t.Assert(err, IsNil)
  2712  
  2713  	err = os.Rename(file, rename)
  2714  	t.Assert(err, IsNil)
  2715  }
  2716  
  2717  func (s *GoofysTest) TestRead403(t *C) {
  2718  	// anonymous only works in S3 for now
  2719  	cloud := s.getRoot(t).dir.cloud
  2720  	s3, ok := cloud.Delegate().(*S3Backend)
  2721  	if !ok {
  2722  		t.Skip("only for S3")
  2723  	}
  2724  
  2725  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2726  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2727  
  2728  	// cache the inode first so we don't get 403 when we lookup
  2729  	in, err := s.LookUpInode(t, "file1")
  2730  	t.Assert(err, IsNil)
  2731  
  2732  	fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())})
  2733  	t.Assert(err, IsNil)
  2734  
  2735  	s3.awsConfig.Credentials = credentials.AnonymousCredentials
  2736  	s3.newS3()
  2737  
  2738  	// fake enable read-ahead
  2739  	fh.seqReadAmount = uint64(READAHEAD_CHUNK)
  2740  
  2741  	buf := make([]byte, 5)
  2742  
  2743  	_, err = fh.ReadFile(0, buf)
  2744  	t.Assert(err, Equals, syscall.EACCES)
  2745  
  2746  	// now that the S3 GET has failed, try again, see
  2747  	// https://github.com/kahing/goofys/pull/243
  2748  	_, err = fh.ReadFile(0, buf)
  2749  	t.Assert(err, Equals, syscall.EACCES)
  2750  }
  2751  
  2752  func (s *GoofysTest) TestRmdirWithDiropen(t *C) {
  2753  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2754  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2755  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2756  
  2757  	s.mount(t, mountPoint)
  2758  	defer s.umount(t, mountPoint)
  2759  
  2760  	err := os.MkdirAll(mountPoint+"/dir2/dir4", 0700)
  2761  	t.Assert(err, IsNil)
  2762  	err = os.MkdirAll(mountPoint+"/dir2/dir5", 0700)
  2763  	t.Assert(err, IsNil)
  2764  
  2765  	//1, open dir5
  2766  	dir := mountPoint + "/dir2/dir5"
  2767  	fh, err := os.Open(dir)
  2768  	t.Assert(err, IsNil)
  2769  	defer fh.Close()
  2770  
  2771  	cmd1 := exec.Command("ls", mountPoint+"/dir2")
  2772  	//out, err := cmd.Output()
  2773  	out1, err1 := cmd1.Output()
  2774  	if err1 != nil {
  2775  		if ee, ok := err.(*exec.ExitError); ok {
  2776  			panic(ee.Stderr)
  2777  		}
  2778  	}
  2779  	t.Assert(string(out1), DeepEquals, ""+"dir3\n"+"dir4\n"+"dir5\n")
  2780  
  2781  	//2, rm -rf dir5
  2782  	cmd := exec.Command("rm", "-rf", dir)
  2783  	_, err = cmd.Output()
  2784  	if err != nil {
  2785  		if ee, ok := err.(*exec.ExitError); ok {
  2786  			panic(ee.Stderr)
  2787  		}
  2788  	}
  2789  
  2790  	//3,  readdir dir2
  2791  	fh1, err := os.Open(mountPoint + "/dir2")
  2792  	t.Assert(err, IsNil)
  2793  	defer func() {
  2794  		// close the file if the test failed so we can unmount
  2795  		if fh1 != nil {
  2796  			fh1.Close()
  2797  		}
  2798  	}()
  2799  
  2800  	names, err := fh1.Readdirnames(0)
  2801  	t.Assert(err, IsNil)
  2802  	t.Assert(names, DeepEquals, []string{"dir3", "dir4"})
  2803  
  2804  	cmd = exec.Command("ls", mountPoint+"/dir2")
  2805  	out, err := cmd.Output()
  2806  	if err != nil {
  2807  		if ee, ok := err.(*exec.ExitError); ok {
  2808  			panic(ee.Stderr)
  2809  		}
  2810  	}
  2811  
  2812  	t.Assert(string(out), DeepEquals, ""+"dir3\n"+"dir4\n")
  2813  
  2814  	err = fh1.Close()
  2815  	t.Assert(err, IsNil)
  2816  
  2817  	// 4,reset env
  2818  	err = fh.Close()
  2819  	t.Assert(err, IsNil)
  2820  
  2821  	err = os.RemoveAll(mountPoint + "/dir2/dir4")
  2822  	t.Assert(err, IsNil)
  2823  
  2824  }
  2825  
  2826  func (s *GoofysTest) TestDirMTime(t *C) {
  2827  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2828  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2829  	// enable cheap to ensure GET dir/ will come back before LIST dir/
  2830  	s.fs.flags.Cheap = true
  2831  
  2832  	root := s.getRoot(t)
  2833  	t.Assert(time.Time{}.Before(root.Attributes.Mtime), Equals, true)
  2834  
  2835  	file1, err := s.LookUpInode(t, "dir1")
  2836  	t.Assert(err, IsNil)
  2837  
  2838  	// take mtime from a blob as init time because when we test against
  2839  	// real cloud, server time can be way off from local time
  2840  	initTime := file1.Attributes.Mtime
  2841  
  2842  	dir1, err := s.LookUpInode(t, "dir1")
  2843  	t.Assert(err, IsNil)
  2844  
  2845  	attr1, _ := dir1.GetAttributes()
  2846  	m1 := attr1.Mtime
  2847  	if !s.cloud.Capabilities().DirBlob {
  2848  		// dir1 doesn't have a dir blob, so should take root's mtime
  2849  		t.Assert(m1, Equals, root.Attributes.Mtime)
  2850  	}
  2851  
  2852  	time.Sleep(2 * time.Second)
  2853  
  2854  	dir2, err := dir1.MkDir("dir2")
  2855  	t.Assert(err, IsNil)
  2856  
  2857  	attr2, _ := dir2.GetAttributes()
  2858  	m2 := attr2.Mtime
  2859  	t.Assert(m1.Add(2*time.Second).Before(m2), Equals, true)
  2860  
  2861  	// dir1 didn't have an explicit mtime, so it should update now
  2862  	// that we did a mkdir inside it
  2863  	attr1, _ = dir1.GetAttributes()
  2864  	m1 = attr1.Mtime
  2865  	t.Assert(m1, Equals, m2)
  2866  
  2867  	// we never added the inode so this will do the lookup again
  2868  	dir2, err = dir1.LookUp("dir2")
  2869  	t.Assert(err, IsNil)
  2870  
  2871  	// the new time comes from S3 which only has seconds
  2872  	// granularity
  2873  	attr2, _ = dir2.GetAttributes()
  2874  	t.Assert(m2, Not(Equals), attr2.Mtime)
  2875  	t.Assert(initTime.Add(time.Second).Before(attr2.Mtime), Equals, true)
  2876  
  2877  	// different dir2
  2878  	dir2, err = s.LookUpInode(t, "dir2")
  2879  	t.Assert(err, IsNil)
  2880  
  2881  	attr2, _ = dir2.GetAttributes()
  2882  	m2 = attr2.Mtime
  2883  
  2884  	// this fails because we are listing dir/, which means we
  2885  	// don't actually see the dir blob dir2/dir3/ (it's returned
  2886  	// as common prefix), so we can't get dir3's mtime
  2887  	if false {
  2888  		// dir2/dir3/ exists and has mtime
  2889  		s.readDirIntoCache(t, dir2.Id)
  2890  		dir3, err := s.LookUpInode(t, "dir2/dir3")
  2891  		t.Assert(err, IsNil)
  2892  
  2893  		attr3, _ := dir3.GetAttributes()
  2894  		// setupDefaultEnv is before mounting
  2895  		t.Assert(attr3.Mtime.Before(m2), Equals, true)
  2896  	}
  2897  
  2898  	time.Sleep(time.Second)
  2899  
  2900  	params := &PutBlobInput{
  2901  		Key:  "dir2/newfile",
  2902  		Body: bytes.NewReader([]byte("foo")),
  2903  		Size: PUInt64(3),
  2904  	}
  2905  	_, err = s.cloud.PutBlob(params)
  2906  	t.Assert(err, IsNil)
  2907  
  2908  	s.readDirIntoCache(t, dir2.Id)
  2909  
  2910  	newfile, err := dir2.LookUp("newfile")
  2911  	t.Assert(err, IsNil)
  2912  
  2913  	attr2New, _ := dir2.GetAttributes()
  2914  	// mtime should reflect that of the latest object
  2915  	// GCS can return nano second resolution so truncate to second for compare
  2916  	t.Assert(attr2New.Mtime.Unix(), Equals, newfile.Attributes.Mtime.Unix())
  2917  	t.Assert(m2.Before(attr2New.Mtime), Equals, true)
  2918  }
  2919  
  2920  func (s *GoofysTest) TestDirMTimeNoTTL(t *C) {
  2921  	if s.cloud.Capabilities().DirBlob {
  2922  		t.Skip("Tests for behavior without dir blob")
  2923  	}
  2924  	// enable cheap to ensure GET dir/ will come back before LIST dir/
  2925  	s.fs.flags.Cheap = true
  2926  
  2927  	dir2, err := s.LookUpInode(t, "dir2")
  2928  	t.Assert(err, IsNil)
  2929  
  2930  	attr2, _ := dir2.GetAttributes()
  2931  	m2 := attr2.Mtime
  2932  
  2933  	// dir2/dir3/ exists and has mtime
  2934  	s.readDirIntoCache(t, dir2.Id)
  2935  	dir3, err := s.LookUpInode(t, "dir2/dir3")
  2936  	t.Assert(err, IsNil)
  2937  
  2938  	attr3, _ := dir3.GetAttributes()
  2939  	// setupDefaultEnv is before mounting but we can't really
  2940  	// compare the time here since dir3 is s3 server time and dir2
  2941  	// is local time
  2942  	t.Assert(attr3.Mtime, Not(Equals), m2)
  2943  }
  2944  
  2945  func (s *GoofysTest) TestIssue326(t *C) {
  2946  	root := s.getRoot(t)
  2947  	_, err := root.MkDir("folder@name.something")
  2948  	t.Assert(err, IsNil)
  2949  	_, err = root.MkDir("folder#1#")
  2950  	t.Assert(err, IsNil)
  2951  
  2952  	s.readDirIntoCache(t, root.Id)
  2953  	s.assertEntries(t, root, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2",
  2954  		"file1", "file2", "folder#1#", "folder@name.something", "zero"})
  2955  }
  2956  
  2957  func (s *GoofysTest) TestSlurpFileAndDir(t *C) {
  2958  	if _, ok := s.cloud.Delegate().(*S3Backend); !ok {
  2959  		t.Skip("only for S3")
  2960  	}
  2961  	prefix := "TestSlurpFileAndDir/"
  2962  	// fileAndDir is both a file and a directory, and we are
  2963  	// slurping them together as part of our listing optimization
  2964  	blobs := []string{
  2965  		prefix + "fileAndDir",
  2966  		prefix + "fileAndDir/a",
  2967  	}
  2968  
  2969  	for _, b := range blobs {
  2970  		params := &PutBlobInput{
  2971  			Key:  b,
  2972  			Body: bytes.NewReader([]byte("foo")),
  2973  			Size: PUInt64(3),
  2974  		}
  2975  		_, err := s.cloud.PutBlob(params)
  2976  		t.Assert(err, IsNil)
  2977  	}
  2978  
  2979  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2980  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2981  
  2982  	in, err := s.LookUpInode(t, prefix[0:len(prefix)-1])
  2983  	t.Assert(err, IsNil)
  2984  	t.Assert(in.dir, NotNil)
  2985  
  2986  	s.getRoot(t).dir.seqOpenDirScore = 2
  2987  	s.readDirIntoCache(t, in.Id)
  2988  
  2989  	// should have slurped these
  2990  	in = in.findChild("fileAndDir")
  2991  	t.Assert(in, NotNil)
  2992  	t.Assert(in.dir, NotNil)
  2993  
  2994  	in = in.findChild("a")
  2995  	t.Assert(in, NotNil)
  2996  
  2997  	// because of slurping we've decided that this is a directory,
  2998  	// lookup must _not_ talk to S3 again because otherwise we may
  2999  	// decide it's a file again because of S3 race
  3000  	s.disableS3()
  3001  	in, err = s.LookUpInode(t, prefix+"fileAndDir")
  3002  	t.Assert(err, IsNil)
  3003  
  3004  	s.assertEntries(t, in, []string{"a"})
  3005  }
  3006  
  3007  func (s *GoofysTest) TestAzureDirBlob(t *C) {
  3008  	if _, ok := s.cloud.(*AZBlob); !ok {
  3009  		t.Skip("only for Azure blob")
  3010  	}
  3011  
  3012  	fakedir := []string{"dir2", "dir3"}
  3013  
  3014  	for _, d := range fakedir {
  3015  		params := &PutBlobInput{
  3016  			Key:  "azuredir/" + d,
  3017  			Body: bytes.NewReader([]byte("")),
  3018  			Metadata: map[string]*string{
  3019  				AzureDirBlobMetadataKey: PString("true"),
  3020  			},
  3021  			Size: PUInt64(0),
  3022  		}
  3023  		_, err := s.cloud.PutBlob(params)
  3024  		t.Assert(err, IsNil)
  3025  	}
  3026  
  3027  	defer func() {
  3028  		// because our listing changes dir3 to dir3/, test
  3029  		// cleanup could not delete the blob so we wneed to
  3030  		// clean up
  3031  		for _, d := range fakedir {
  3032  			_, err := s.cloud.DeleteBlob(&DeleteBlobInput{Key: "azuredir/" + d})
  3033  			t.Assert(err, IsNil)
  3034  		}
  3035  	}()
  3036  
  3037  	s.setupBlobs(s.cloud, t, map[string]*string{
  3038  		// "azuredir/dir" would have gone here
  3039  		"azuredir/dir3,/":           nil,
  3040  		"azuredir/dir3/file1":       nil,
  3041  		"azuredir/dir345_is_a_file": nil,
  3042  	})
  3043  
  3044  	head, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir3"})
  3045  	t.Assert(err, IsNil)
  3046  	t.Assert(head.IsDirBlob, Equals, true)
  3047  
  3048  	head, err = s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir345_is_a_file"})
  3049  	t.Assert(err, IsNil)
  3050  	t.Assert(head.IsDirBlob, Equals, false)
  3051  
  3052  	list, err := s.cloud.ListBlobs(&ListBlobsInput{Prefix: PString("azuredir/")})
  3053  	t.Assert(err, IsNil)
  3054  
  3055  	// for flat listing, we rename `dir3` to `dir3/` and add it to Items,
  3056  	// `dir3` normally sorts before `dir3./`, but after the rename `dir3/` should
  3057  	// sort after `dir3./`
  3058  	t.Assert(len(list.Items), Equals, 5)
  3059  	t.Assert(*list.Items[0].Key, Equals, "azuredir/dir2/")
  3060  	t.Assert(*list.Items[1].Key, Equals, "azuredir/dir3,/")
  3061  	t.Assert(*list.Items[2].Key, Equals, "azuredir/dir3/")
  3062  	t.Assert(*list.Items[3].Key, Equals, "azuredir/dir3/file1")
  3063  	t.Assert(*list.Items[4].Key, Equals, "azuredir/dir345_is_a_file")
  3064  	t.Assert(sort.IsSorted(sortBlobItemOutput(list.Items)), Equals, true)
  3065  
  3066  	list, err = s.cloud.ListBlobs(&ListBlobsInput{
  3067  		Prefix:    PString("azuredir/"),
  3068  		Delimiter: PString("/"),
  3069  	})
  3070  	t.Assert(err, IsNil)
  3071  
  3072  	// for delimited listing, we remove `dir3` from items and add `dir3/` to prefixes,
  3073  	// which should already be there
  3074  	t.Assert(len(list.Items), Equals, 1)
  3075  	t.Assert(*list.Items[0].Key, Equals, "azuredir/dir345_is_a_file")
  3076  
  3077  	t.Assert(len(list.Prefixes), Equals, 3)
  3078  	t.Assert(*list.Prefixes[0].Prefix, Equals, "azuredir/dir2/")
  3079  	t.Assert(*list.Prefixes[1].Prefix, Equals, "azuredir/dir3,/")
  3080  	t.Assert(*list.Prefixes[2].Prefix, Equals, "azuredir/dir3/")
  3081  
  3082  	// finally check that we are reading them in correctly
  3083  	in, err := s.LookUpInode(t, "azuredir")
  3084  	t.Assert(err, IsNil)
  3085  
  3086  	s.assertEntries(t, in, []string{"dir2", "dir3", "dir3,", "dir345_is_a_file"})
  3087  }
  3088  
  3089  func (s *GoofysTest) TestReadDirLarge(t *C) {
  3090  	root := s.getRoot(t)
  3091  	root.dir.mountPrefix = "empty_dir"
  3092  
  3093  	blobs := make(map[string]*string)
  3094  	expect := make([]string, 0)
  3095  	for i := 0; i < 998; i++ {
  3096  		blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil
  3097  		expect = append(expect, fmt.Sprintf("%04vd", i))
  3098  	}
  3099  	blobs["empty_dir/0998f"] = nil
  3100  	blobs["empty_dir/0999f"] = nil
  3101  	blobs["empty_dir/1000f"] = nil
  3102  	expect = append(expect, "0998f")
  3103  	expect = append(expect, "0999f")
  3104  	expect = append(expect, "1000f")
  3105  
  3106  	for i := 1001; i < 1003; i++ {
  3107  		blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil
  3108  		expect = append(expect, fmt.Sprintf("%04vd", i))
  3109  	}
  3110  
  3111  	s.setupBlobs(s.cloud, t, blobs)
  3112  
  3113  	dh := root.OpenDir()
  3114  	defer dh.CloseDir()
  3115  
  3116  	children := namesOf(s.readDirFully(t, dh))
  3117  	sort.Strings(children)
  3118  
  3119  	t.Assert(children, DeepEquals, expect)
  3120  }
  3121  
  3122  func (s *GoofysTest) newBackend(t *C, bucket string, createBucket bool) (cloud StorageBackend) {
  3123  	var err error
  3124  	switch s.cloud.Delegate().(type) {
  3125  	case *S3Backend:
  3126  		config, _ := s.fs.flags.Backend.(*S3Config)
  3127  		s3, err := NewS3(bucket, s.fs.flags, config)
  3128  		t.Assert(err, IsNil)
  3129  
  3130  		s3.aws = hasEnv("AWS")
  3131  
  3132  		if !hasEnv("MINIO") {
  3133  			s3.Handlers.Sign.Clear()
  3134  			s3.Handlers.Sign.PushBack(SignV2)
  3135  			s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
  3136  		}
  3137  
  3138  		if s3.aws {
  3139  			cloud = &S3BucketEventualConsistency{s3}
  3140  		} else {
  3141  			cloud = s3
  3142  		}
  3143  	case *GCS3:
  3144  		config, _ := s.fs.flags.Backend.(*S3Config)
  3145  		cloud, err = NewGCS3(bucket, s.fs.flags, config)
  3146  		t.Assert(err, IsNil)
  3147  	case *AZBlob:
  3148  		config, _ := s.fs.flags.Backend.(*AZBlobConfig)
  3149  		cloud, err = NewAZBlob(bucket, config)
  3150  		t.Assert(err, IsNil)
  3151  	case *ADLv1:
  3152  		config, _ := s.fs.flags.Backend.(*ADLv1Config)
  3153  		cloud, err = NewADLv1(bucket, s.fs.flags, config)
  3154  		t.Assert(err, IsNil)
  3155  	case *ADLv2:
  3156  		config, _ := s.fs.flags.Backend.(*ADLv2Config)
  3157  		cloud, err = NewADLv2(bucket, s.fs.flags, config)
  3158  		t.Assert(err, IsNil)
  3159  	default:
  3160  		t.Fatal("unknown backend")
  3161  	}
  3162  
  3163  	if createBucket {
  3164  		_, err = cloud.MakeBucket(&MakeBucketInput{})
  3165  		t.Assert(err, IsNil)
  3166  
  3167  		s.removeBucket = append(s.removeBucket, cloud)
  3168  	}
  3169  
  3170  	return
  3171  }
  3172  
  3173  func (s *GoofysTest) TestVFS(t *C) {
  3174  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3175  	cloud2 := s.newBackend(t, bucket, true)
  3176  
  3177  	// "mount" this 2nd cloud
  3178  	in, err := s.LookUpInode(t, "dir4")
  3179  	t.Assert(in, NotNil)
  3180  	t.Assert(err, IsNil)
  3181  
  3182  	in.dir.cloud = cloud2
  3183  	in.dir.mountPrefix = "cloud2Prefix/"
  3184  
  3185  	rootCloud, rootPath := in.cloud()
  3186  	t.Assert(rootCloud, NotNil)
  3187  	t.Assert(rootCloud == cloud2, Equals, true)
  3188  	t.Assert(rootPath, Equals, "cloud2Prefix")
  3189  
  3190  	// the mount would shadow dir4/file5
  3191  	_, err = in.LookUp("file5")
  3192  	t.Assert(err, Equals, fuse.ENOENT)
  3193  
  3194  	_, fh := in.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())})
  3195  	err = fh.FlushFile()
  3196  	t.Assert(err, IsNil)
  3197  
  3198  	resp, err := cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile"})
  3199  	t.Assert(err, IsNil)
  3200  	defer resp.Body.Close()
  3201  
  3202  	err = s.getRoot(t).Rename("file1", in, "file2")
  3203  	t.Assert(err, Equals, syscall.EINVAL)
  3204  
  3205  	_, err = in.MkDir("subdir")
  3206  	t.Assert(err, IsNil)
  3207  
  3208  	subdirKey := "cloud2Prefix/subdir"
  3209  	if !cloud2.Capabilities().DirBlob {
  3210  		subdirKey += "/"
  3211  	}
  3212  
  3213  	_, err = cloud2.HeadBlob(&HeadBlobInput{Key: subdirKey})
  3214  	t.Assert(err, IsNil)
  3215  
  3216  	subdir, err := s.LookUpInode(t, "dir4/subdir")
  3217  	t.Assert(err, IsNil)
  3218  	t.Assert(subdir, NotNil)
  3219  	t.Assert(subdir.dir, NotNil)
  3220  	t.Assert(subdir.dir.cloud, IsNil)
  3221  
  3222  	subdirCloud, subdirPath := subdir.cloud()
  3223  	t.Assert(subdirCloud, NotNil)
  3224  	t.Assert(subdirCloud == cloud2, Equals, true)
  3225  	t.Assert(subdirPath, Equals, "cloud2Prefix/subdir")
  3226  
  3227  	// create another file inside subdir to make sure that our
  3228  	// mount check is correct for dir inside the root
  3229  	_, fh = subdir.Create("testfile2", fuseops.OpMetadata{uint32(os.Getpid())})
  3230  	err = fh.FlushFile()
  3231  	t.Assert(err, IsNil)
  3232  
  3233  	resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"})
  3234  	t.Assert(err, IsNil)
  3235  	defer resp.Body.Close()
  3236  
  3237  	err = subdir.Rename("testfile2", in, "testfile2")
  3238  	t.Assert(err, IsNil)
  3239  
  3240  	_, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"})
  3241  	t.Assert(err, Equals, fuse.ENOENT)
  3242  
  3243  	resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"})
  3244  	t.Assert(err, IsNil)
  3245  	defer resp.Body.Close()
  3246  
  3247  	err = in.Rename("testfile2", subdir, "testfile2")
  3248  	t.Assert(err, IsNil)
  3249  
  3250  	_, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"})
  3251  	t.Assert(err, Equals, fuse.ENOENT)
  3252  
  3253  	resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"})
  3254  	t.Assert(err, IsNil)
  3255  	defer resp.Body.Close()
  3256  }
  3257  
  3258  func (s *GoofysTest) TestMountsList(t *C) {
  3259  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  3260  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  3261  
  3262  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3263  	cloud := s.newBackend(t, bucket, true)
  3264  
  3265  	root := s.getRoot(t)
  3266  	rootCloud := root.dir.cloud
  3267  
  3268  	s.fs.MountAll([]*Mount{
  3269  		&Mount{"dir4/cloud1", cloud, "", false},
  3270  	})
  3271  
  3272  	in, err := s.LookUpInode(t, "dir4")
  3273  	t.Assert(in, NotNil)
  3274  	t.Assert(err, IsNil)
  3275  	t.Assert(int(in.Id), Equals, 2)
  3276  
  3277  	s.readDirIntoCache(t, in.Id)
  3278  	// ensure that listing is listing mounts and root bucket in one go
  3279  	root.dir.cloud = nil
  3280  
  3281  	s.assertEntries(t, in, []string{"cloud1", "file5"})
  3282  
  3283  	c1, err := s.LookUpInode(t, "dir4/cloud1")
  3284  	t.Assert(err, IsNil)
  3285  	t.Assert(*c1.Name, Equals, "cloud1")
  3286  	t.Assert(c1.dir.cloud == cloud, Equals, true)
  3287  	t.Assert(int(c1.Id), Equals, 3)
  3288  
  3289  	// pretend we've passed the normal cache ttl
  3290  	s.fs.flags.TypeCacheTTL = 0
  3291  	s.fs.flags.StatCacheTTL = 0
  3292  
  3293  	// listing root again should not overwrite the mounts
  3294  	root.dir.cloud = rootCloud
  3295  
  3296  	s.readDirIntoCache(t, in.Parent.Id)
  3297  	s.assertEntries(t, in, []string{"cloud1", "file5"})
  3298  
  3299  	c1, err = s.LookUpInode(t, "dir4/cloud1")
  3300  	t.Assert(err, IsNil)
  3301  	t.Assert(*c1.Name, Equals, "cloud1")
  3302  	t.Assert(c1.dir.cloud == cloud, Equals, true)
  3303  	t.Assert(int(c1.Id), Equals, 3)
  3304  }
  3305  
  3306  func (s *GoofysTest) TestMountsNewDir(t *C) {
  3307  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3308  	cloud := s.newBackend(t, bucket, true)
  3309  
  3310  	_, err := s.LookUpInode(t, "dir5")
  3311  	t.Assert(err, NotNil)
  3312  	t.Assert(err, Equals, fuse.ENOENT)
  3313  
  3314  	s.fs.MountAll([]*Mount{
  3315  		&Mount{"dir5/cloud1", cloud, "", false},
  3316  	})
  3317  
  3318  	in, err := s.LookUpInode(t, "dir5")
  3319  	t.Assert(err, IsNil)
  3320  	t.Assert(in.isDir(), Equals, true)
  3321  
  3322  	c1, err := s.LookUpInode(t, "dir5/cloud1")
  3323  	t.Assert(err, IsNil)
  3324  	t.Assert(c1.isDir(), Equals, true)
  3325  	t.Assert(c1.dir.cloud, Equals, cloud)
  3326  }
  3327  
  3328  func (s *GoofysTest) TestMountsNewMounts(t *C) {
  3329  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3330  	cloud := s.newBackend(t, bucket, true)
  3331  
  3332  	// "mount" this 2nd cloud
  3333  	in, err := s.LookUpInode(t, "dir4")
  3334  	t.Assert(in, NotNil)
  3335  	t.Assert(err, IsNil)
  3336  
  3337  	s.fs.MountAll([]*Mount{
  3338  		&Mount{"dir4/cloud1", cloud, "", false},
  3339  	})
  3340  
  3341  	s.readDirIntoCache(t, in.Id)
  3342  
  3343  	c1, err := s.LookUpInode(t, "dir4/cloud1")
  3344  	t.Assert(err, IsNil)
  3345  	t.Assert(*c1.Name, Equals, "cloud1")
  3346  	t.Assert(c1.dir.cloud == cloud, Equals, true)
  3347  
  3348  	_, err = s.LookUpInode(t, "dir4/cloud2")
  3349  	t.Assert(err, Equals, fuse.ENOENT)
  3350  
  3351  	s.fs.MountAll([]*Mount{
  3352  		&Mount{"dir4/cloud1", cloud, "", false},
  3353  		&Mount{"dir4/cloud2", cloud, "cloudprefix", false},
  3354  	})
  3355  
  3356  	c2, err := s.LookUpInode(t, "dir4/cloud2")
  3357  	t.Assert(err, IsNil)
  3358  	t.Assert(*c2.Name, Equals, "cloud2")
  3359  	t.Assert(c2.dir.cloud == cloud, Equals, true)
  3360  	t.Assert(c2.dir.mountPrefix, Equals, "cloudprefix")
  3361  }
  3362  
  3363  func (s *GoofysTest) TestMountsError(t *C) {
  3364  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3365  	var cloud StorageBackend
  3366  	if s3, ok := s.cloud.Delegate().(*S3Backend); ok {
  3367  		// S3Backend can't detect bucket doesn't exist because
  3368  		// HEAD an object always return 404 NotFound (instead
  3369  		// of NoSuchBucket)
  3370  		flags := *s3.flags
  3371  		config := *s3.config
  3372  		flags.Endpoint = "0.0.0.0:0"
  3373  		var err error
  3374  		cloud, err = NewS3(bucket, &flags, &config)
  3375  		t.Assert(err, IsNil)
  3376  	} else if _, ok := s.cloud.(*ADLv1); ok {
  3377  		config, _ := s.fs.flags.Backend.(*ADLv1Config)
  3378  		config.Authorizer = nil
  3379  
  3380  		var err error
  3381  		cloud, err = NewADLv1(bucket, s.fs.flags, config)
  3382  		t.Assert(err, IsNil)
  3383  	} else if _, ok := s.cloud.(*ADLv2); ok {
  3384  		// ADLv2 currently doesn't detect bucket doesn't exist
  3385  		cloud = s.newBackend(t, bucket, false)
  3386  		adlCloud, _ := cloud.(*ADLv2)
  3387  		auth := adlCloud.client.BaseClient.Authorizer
  3388  		adlCloud.client.BaseClient.Authorizer = nil
  3389  		defer func() {
  3390  			adlCloud.client.BaseClient.Authorizer = auth
  3391  		}()
  3392  	} else {
  3393  		cloud = s.newBackend(t, bucket, false)
  3394  	}
  3395  
  3396  	s.fs.MountAll([]*Mount{
  3397  		&Mount{"dir4/newerror", StorageBackendInitError{
  3398  			fmt.Errorf("foo"),
  3399  			Capabilities{},
  3400  		}, "errprefix1", false},
  3401  		&Mount{"dir4/initerror", &StorageBackendInitWrapper{
  3402  			StorageBackend: cloud,
  3403  			initKey:        "foobar",
  3404  		}, "errprefix2", false},
  3405  	})
  3406  
  3407  	errfile, err := s.LookUpInode(t, "dir4/newerror/"+INIT_ERR_BLOB)
  3408  	t.Assert(err, IsNil)
  3409  	t.Assert(errfile.isDir(), Equals, false)
  3410  
  3411  	_, err = s.LookUpInode(t, "dir4/newerror/not_there")
  3412  	t.Assert(err, Equals, fuse.ENOENT)
  3413  
  3414  	errfile, err = s.LookUpInode(t, "dir4/initerror/"+INIT_ERR_BLOB)
  3415  	t.Assert(err, IsNil)
  3416  	t.Assert(errfile.isDir(), Equals, false)
  3417  
  3418  	_, err = s.LookUpInode(t, "dir4/initerror/not_there")
  3419  	t.Assert(err, Equals, fuse.ENOENT)
  3420  
  3421  	in, err := s.LookUpInode(t, "dir4/initerror")
  3422  	t.Assert(err, IsNil)
  3423  	t.Assert(in, NotNil)
  3424  
  3425  	t.Assert(in.dir.cloud.Capabilities().Name, Equals, cloud.Capabilities().Name)
  3426  }
  3427  
  3428  func (s *GoofysTest) TestMountsMultiLevel(t *C) {
  3429  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  3430  
  3431  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3432  	cloud := s.newBackend(t, bucket, true)
  3433  
  3434  	s.fs.MountAll([]*Mount{
  3435  		&Mount{"dir4/sub/dir", cloud, "", false},
  3436  	})
  3437  
  3438  	sub, err := s.LookUpInode(t, "dir4/sub")
  3439  	t.Assert(err, IsNil)
  3440  	t.Assert(sub.isDir(), Equals, true)
  3441  
  3442  	s.assertEntries(t, sub, []string{"dir"})
  3443  }
  3444  
  3445  func (s *GoofysTest) TestMountsNested(t *C) {
  3446  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3447  	cloud := s.newBackend(t, bucket, true)
  3448  	s.testMountsNested(t, cloud, []*Mount{
  3449  		&Mount{"dir5/in/a/dir", cloud, "a/dir/", false},
  3450  		&Mount{"dir5/in/", cloud, "b/", false},
  3451  	})
  3452  }
  3453  
  3454  // test that mount order doesn't matter for nested mounts
  3455  func (s *GoofysTest) TestMountsNestedReversed(t *C) {
  3456  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3457  	cloud := s.newBackend(t, bucket, true)
  3458  	s.testMountsNested(t, cloud, []*Mount{
  3459  		&Mount{"dir5/in/", cloud, "b/", false},
  3460  		&Mount{"dir5/in/a/dir", cloud, "a/dir/", false},
  3461  	})
  3462  }
  3463  
  3464  func (s *GoofysTest) testMountsNested(t *C, cloud StorageBackend,
  3465  	mounts []*Mount) {
  3466  
  3467  	_, err := s.LookUpInode(t, "dir5")
  3468  	t.Assert(err, NotNil)
  3469  	t.Assert(err, Equals, fuse.ENOENT)
  3470  
  3471  	s.fs.MountAll(mounts)
  3472  
  3473  	in, err := s.LookUpInode(t, "dir5")
  3474  	t.Assert(err, IsNil)
  3475  
  3476  	s.readDirIntoCache(t, in.Id)
  3477  
  3478  	// make sure all the intermediate dirs never expire
  3479  	time.Sleep(time.Second)
  3480  	dir_in, err := s.LookUpInode(t, "dir5/in")
  3481  	t.Assert(err, IsNil)
  3482  	t.Assert(*dir_in.Name, Equals, "in")
  3483  
  3484  	s.readDirIntoCache(t, dir_in.Id)
  3485  
  3486  	dir_a, err := s.LookUpInode(t, "dir5/in/a")
  3487  	t.Assert(err, IsNil)
  3488  	t.Assert(*dir_a.Name, Equals, "a")
  3489  
  3490  	s.assertEntries(t, dir_a, []string{"dir"})
  3491  
  3492  	dir_dir, err := s.LookUpInode(t, "dir5/in/a/dir")
  3493  	t.Assert(err, IsNil)
  3494  	t.Assert(*dir_dir.Name, Equals, "dir")
  3495  	t.Assert(dir_dir.dir.cloud == cloud, Equals, true)
  3496  
  3497  	_, fh := dir_in.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())})
  3498  	err = fh.FlushFile()
  3499  	t.Assert(err, IsNil)
  3500  
  3501  	resp, err := cloud.GetBlob(&GetBlobInput{Key: "b/testfile"})
  3502  	t.Assert(err, IsNil)
  3503  	defer resp.Body.Close()
  3504  
  3505  	_, fh = dir_dir.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())})
  3506  	err = fh.FlushFile()
  3507  	t.Assert(err, IsNil)
  3508  
  3509  	resp, err = cloud.GetBlob(&GetBlobInput{Key: "a/dir/testfile"})
  3510  	t.Assert(err, IsNil)
  3511  	defer resp.Body.Close()
  3512  
  3513  	s.assertEntries(t, in, []string{"in"})
  3514  }
  3515  
  3516  func verifyFileData(t *C, mountPoint string, path string, content *string) {
  3517  	if !strings.HasSuffix(mountPoint, "/") {
  3518  		mountPoint = mountPoint + "/"
  3519  	}
  3520  	path = mountPoint + path
  3521  	data, err := ioutil.ReadFile(path)
  3522  	comment := Commentf("failed while verifying %v", path)
  3523  	if content != nil {
  3524  		t.Assert(err, IsNil, comment)
  3525  		t.Assert(strings.TrimSpace(string(data)), Equals, *content, comment)
  3526  	} else {
  3527  		t.Assert(err, Not(IsNil), comment)
  3528  		t.Assert(strings.Contains(err.Error(), "no such file or directory"), Equals, true, comment)
  3529  	}
  3530  }
  3531  
  3532  func (s *GoofysTest) TestNestedMountUnmountSimple(t *C) {
  3533  	childBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3534  	childCloud := s.newBackend(t, childBucket, true)
  3535  
  3536  	parFileContent := "parent"
  3537  	childFileContent := "child"
  3538  	parEnv := map[string]*string{
  3539  		"childmnt/x/in_child_and_par": &parFileContent,
  3540  		"childmnt/x/in_par_only":      &parFileContent,
  3541  		"nonchildmnt/something":       &parFileContent,
  3542  	}
  3543  	childEnv := map[string]*string{
  3544  		"x/in_child_only":    &childFileContent,
  3545  		"x/in_child_and_par": &childFileContent,
  3546  	}
  3547  	s.setupBlobs(s.cloud, t, parEnv)
  3548  	s.setupBlobs(childCloud, t, childEnv)
  3549  
  3550  	rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16)
  3551  	s.mount(t, rootMountPath)
  3552  	defer s.umount(t, rootMountPath)
  3553  	// Files under /tmp/fusetesting/ should all be from goofys root.
  3554  	verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent)
  3555  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent)
  3556  	verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent)
  3557  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil)
  3558  
  3559  	childMount := &Mount{"childmnt", childCloud, "", false}
  3560  	s.fs.Mount(childMount)
  3561  	// Now files under /tmp/fusetesting/childmnt should be from childBucket
  3562  	verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", nil)
  3563  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &childFileContent)
  3564  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", &childFileContent)
  3565  	// /tmp/fusetesting/nonchildmnt should be from parent bucket.
  3566  	verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent)
  3567  
  3568  	s.fs.Unmount(childMount.name)
  3569  	// Child is unmounted. So files under /tmp/fusetesting/ should all be from goofys root.
  3570  	verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent)
  3571  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent)
  3572  	verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent)
  3573  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil)
  3574  }
  3575  
  3576  func (s *GoofysTest) TestUnmountBucketWithChild(t *C) {
  3577  	// This bucket will be mounted at ${goofysroot}/c
  3578  	cBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3579  	cCloud := s.newBackend(t, cBucket, true)
  3580  
  3581  	// This bucket will be mounted at ${goofysroot}/c/c
  3582  	ccBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3583  	ccCloud := s.newBackend(t, ccBucket, true)
  3584  
  3585  	pFileContent := "parent"
  3586  	cFileContent := "child"
  3587  	ccFileContent := "childchild"
  3588  	pEnv := map[string]*string{
  3589  		"c/c/x/foo": &pFileContent,
  3590  	}
  3591  	cEnv := map[string]*string{
  3592  		"c/x/foo": &cFileContent,
  3593  	}
  3594  	ccEnv := map[string]*string{
  3595  		"x/foo": &ccFileContent,
  3596  	}
  3597  
  3598  	s.setupBlobs(s.cloud, t, pEnv)
  3599  	s.setupBlobs(cCloud, t, cEnv)
  3600  	s.setupBlobs(ccCloud, t, ccEnv)
  3601  
  3602  	rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16)
  3603  	s.mount(t, rootMountPath)
  3604  	defer s.umount(t, rootMountPath)
  3605  	// c/c/foo should come from root mount.
  3606  	verifyFileData(t, rootMountPath, "c/c/x/foo", &pFileContent)
  3607  
  3608  	cMount := &Mount{"c", cCloud, "", false}
  3609  	s.fs.Mount(cMount)
  3610  	// c/c/foo should come from "c" mount.
  3611  	verifyFileData(t, rootMountPath, "c/c/x/foo", &cFileContent)
  3612  
  3613  	ccMount := &Mount{"c/c", ccCloud, "", false}
  3614  	s.fs.Mount(ccMount)
  3615  	// c/c/foo should come from "c/c" mount.
  3616  	verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent)
  3617  
  3618  	s.fs.Unmount(cMount.name)
  3619  	// c/c/foo should still come from "c/c" mount.
  3620  	verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent)
  3621  }
  3622  
  3623  func (s *GoofysTest) TestRmImplicitDir(t *C) {
  3624  	mountPoint := "/tmp/mnt" + s.fs.bucket
  3625  
  3626  	s.mount(t, mountPoint)
  3627  	defer s.umount(t, mountPoint)
  3628  
  3629  	defer os.Chdir("/")
  3630  
  3631  	dir, err := os.Open(mountPoint + "/dir2")
  3632  	t.Assert(err, IsNil)
  3633  	defer dir.Close()
  3634  
  3635  	err = dir.Chdir()
  3636  	t.Assert(err, IsNil)
  3637  
  3638  	err = os.RemoveAll(mountPoint + "/dir2")
  3639  	t.Assert(err, IsNil)
  3640  
  3641  	root, err := os.Open(mountPoint)
  3642  	t.Assert(err, IsNil)
  3643  	defer root.Close()
  3644  
  3645  	files, err := root.Readdirnames(0)
  3646  	t.Assert(err, IsNil)
  3647  	t.Assert(files, DeepEquals, []string{
  3648  		"dir1", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero",
  3649  	})
  3650  }
  3651  
  3652  func (s *GoofysTest) TestMount(t *C) {
  3653  	if os.Getenv("MOUNT") == "false" {
  3654  		t.Skip("Not mounting")
  3655  	}
  3656  
  3657  	mountPoint := "/tmp/mnt" + s.fs.bucket
  3658  
  3659  	s.mount(t, mountPoint)
  3660  	defer s.umount(t, mountPoint)
  3661  
  3662  	log.Printf("Mounted at %v", mountPoint)
  3663  
  3664  	c := make(chan os.Signal, 2)
  3665  	signal.Notify(c, os.Interrupt, syscall.SIGTERM)
  3666  	<-c
  3667  }
  3668  
  3669  // Checks if 2 sorted lists are equal. Returns a helpful error if they differ.
  3670  func checkSortedListsAreEqual(l1, l2 []string) error {
  3671  	i1, i2 := 0, 0
  3672  	onlyl1, onlyl2 := []string{}, []string{}
  3673  	for i1 < len(l1) && i2 < len(l2) {
  3674  		if l1[i1] == l2[i2] {
  3675  			i1++
  3676  			i2++
  3677  		} else if l1[i1] < l2[i2] {
  3678  			onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1]))
  3679  			i1++
  3680  		} else {
  3681  			onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2]))
  3682  			i2++
  3683  		}
  3684  
  3685  	}
  3686  	for ; i1 < len(l1); i1++ {
  3687  		onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1]))
  3688  	}
  3689  	for ; i2 < len(l2); i2++ {
  3690  		onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2]))
  3691  	}
  3692  
  3693  	if len(onlyl1)+len(onlyl2) == 0 {
  3694  		return nil
  3695  	}
  3696  	toString := func(l []string) string {
  3697  		ret := []string{}
  3698  		// The list can contain a lot of elements. Show only ten and say
  3699  		// "and x more".
  3700  		for i := 0; i < len(l) && i < 10; i++ {
  3701  			ret = append(ret, l[i])
  3702  		}
  3703  		if len(ret) < len(l) {
  3704  			ret = append(ret, fmt.Sprintf("and %d more", len(l)-len(ret)))
  3705  		}
  3706  		return strings.Join(ret, ", ")
  3707  	}
  3708  	return fmt.Errorf("only l1: %+v, only l2: %+v",
  3709  		toString(onlyl1), toString(onlyl2))
  3710  }
  3711  
  3712  func (s *GoofysTest) TestReadDirDash(t *C) {
  3713  	if s.azurite {
  3714  		t.Skip("ADLv1 doesn't have pagination")
  3715  	}
  3716  	root := s.getRoot(t)
  3717  	root.dir.mountPrefix = "prefix"
  3718  
  3719  	// SETUP
  3720  	// Add the following blobs
  3721  	// - prefix/2019/1
  3722  	// - prefix/2019-0000 to prefix/2019-4999
  3723  	// - prefix/20190000 to prefix/20194999
  3724  	// Fetching this result will need 3 pages in azure (pagesize 5k) and 11 pages
  3725  	// in amazon (pagesize 1k)
  3726  	// This setup will verify that we paginate and return results correctly before and after
  3727  	// seeing all contents that have a '-' ('-' < '/'). For more context read the comments in
  3728  	// dir.go::listBlobsSafe.
  3729  	blobs := make(map[string]*string)
  3730  	expect := []string{"2019"}
  3731  	blobs["prefix/2019/1"] = nil
  3732  	for i := 0; i < 5000; i++ {
  3733  		name := fmt.Sprintf("2019-%04d", i)
  3734  		expect = append(expect, name)
  3735  		blobs["prefix/"+name] = nil
  3736  	}
  3737  	for i := 0; i < 5000; i++ {
  3738  		name := fmt.Sprintf("2019%04d", i)
  3739  		expect = append(expect, name)
  3740  		blobs["prefix/"+name] = nil
  3741  	}
  3742  	s.setupBlobs(s.cloud, t, blobs)
  3743  
  3744  	// Read the directory and verify its contents.
  3745  	dh := root.OpenDir()
  3746  	defer dh.CloseDir()
  3747  
  3748  	children := namesOf(s.readDirFully(t, dh))
  3749  	t.Assert(checkSortedListsAreEqual(children, expect), IsNil)
  3750  }
  3751  
  3752  func (s *GoofysTest) TestIssue474(t *C) {
  3753  	s.fs.flags.TypeCacheTTL = 1 * time.Second
  3754  	s.fs.flags.Cheap = true
  3755  
  3756  	p := "this_test/"
  3757  	root := s.getRoot(t)
  3758  	root.dir.mountPrefix = "this_test/"
  3759  	root.dir.seqOpenDirScore = 2
  3760  
  3761  	blobs := make(map[string]*string)
  3762  
  3763  	in := []string{
  3764  		"1/a/b",
  3765  		"2/c/d",
  3766  	}
  3767  
  3768  	for _, s := range in {
  3769  		blobs[p+s] = nil
  3770  	}
  3771  
  3772  	s.setupBlobs(s.cloud, t, blobs)
  3773  
  3774  	dir1, err := s.LookUpInode(t, "1")
  3775  	t.Assert(err, IsNil)
  3776  	// this would list 1/ and slurp in 2/c/d at the same time
  3777  	s.assertEntries(t, dir1, []string{"a"})
  3778  
  3779  	// 2/ will expire and require re-listing. ensure that we don't
  3780  	// remove any children as stale as we update
  3781  	time.Sleep(time.Second)
  3782  
  3783  	dir2, err := s.LookUpInode(t, "2")
  3784  	t.Assert(err, IsNil)
  3785  	s.assertEntries(t, dir2, []string{"c"})
  3786  }