github.com/adrianjagielak/goofys@v0.24.1-0.20230810095418-94919a5d2254/internal/goofys_test.go (about)

     1  // Copyright 2015 - 2017 Ka-Hing Cheung
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package internal
    16  
    17  import (
    18  	. "github.com/kahing/goofys/api/common"
    19  
    20  	"bufio"
    21  	"bytes"
    22  	"fmt"
    23  	"io"
    24  	"io/ioutil"
    25  	"math/rand"
    26  	"net"
    27  	"os"
    28  	"os/exec"
    29  	"os/signal"
    30  	"os/user"
    31  	"reflect"
    32  	"runtime"
    33  	"sort"
    34  	"strconv"
    35  	"strings"
    36  	"sync"
    37  	"syscall"
    38  	"testing"
    39  	"time"
    40  
    41  	"context"
    42  
    43  	"github.com/aws/aws-sdk-go/aws"
    44  	"github.com/aws/aws-sdk-go/aws/corehandlers"
    45  	"github.com/aws/aws-sdk-go/aws/credentials"
    46  
    47  	"github.com/Azure/azure-storage-blob-go/azblob"
    48  	"github.com/Azure/go-autorest/autorest"
    49  	"github.com/Azure/go-autorest/autorest/azure"
    50  	azureauth "github.com/Azure/go-autorest/autorest/azure/auth"
    51  
    52  	"golang.org/x/sys/unix"
    53  
    54  	"github.com/jacobsa/fuse"
    55  	"github.com/jacobsa/fuse/fuseops"
    56  	"github.com/jacobsa/fuse/fuseutil"
    57  
    58  	"github.com/sirupsen/logrus"
    59  
    60  	"runtime/debug"
    61  
    62  	. "gopkg.in/check.v1"
    63  )
    64  
    65  // so I don't get complains about unused imports
    66  var ignored = logrus.DebugLevel
    67  
    68  const PerTestTimeout = 10 * time.Minute
    69  
    70  func currentUid() uint32 {
    71  	user, err := user.Current()
    72  	if err != nil {
    73  		panic(err)
    74  	}
    75  
    76  	uid, err := strconv.ParseUint(user.Uid, 10, 32)
    77  	if err != nil {
    78  		panic(err)
    79  	}
    80  
    81  	return uint32(uid)
    82  }
    83  
    84  func currentGid() uint32 {
    85  	user, err := user.Current()
    86  	if err != nil {
    87  		panic(err)
    88  	}
    89  
    90  	gid, err := strconv.ParseUint(user.Gid, 10, 32)
    91  	if err != nil {
    92  		panic(err)
    93  	}
    94  
    95  	return uint32(gid)
    96  }
    97  
    98  type GoofysTest struct {
    99  	fs        *Goofys
   100  	ctx       context.Context
   101  	awsConfig *aws.Config
   102  	cloud     StorageBackend
   103  	emulator  bool
   104  	azurite   bool
   105  
   106  	removeBucket []StorageBackend
   107  
   108  	env map[string]*string
   109  
   110  	timeout chan int
   111  }
   112  
   113  func Test(t *testing.T) {
   114  	TestingT(t)
   115  }
   116  
   117  var _ = Suite(&GoofysTest{})
   118  
   119  func logOutput(t *C, tag string, r io.ReadCloser) {
   120  	in := bufio.NewScanner(r)
   121  
   122  	for in.Scan() {
   123  		t.Log(tag, in.Text())
   124  	}
   125  }
   126  
   127  func waitFor(t *C, addr string) (err error) {
   128  	// wait for it to listen on port
   129  	for i := 0; i < 10; i++ {
   130  		var conn net.Conn
   131  		conn, err = net.Dial("tcp", addr)
   132  		if err == nil {
   133  			// we are done!
   134  			conn.Close()
   135  			return
   136  		} else {
   137  			t.Logf("Cound not connect: %v", err)
   138  			time.Sleep(100 * time.Millisecond)
   139  		}
   140  	}
   141  
   142  	return
   143  }
   144  
   145  func (t *GoofysTest) deleteBlobsParallelly(cloud StorageBackend, blobs []string) error {
   146  	sem := make(semaphore, 100)
   147  	sem.P(100)
   148  	var err error
   149  	for _, blobOuter := range blobs {
   150  		sem.V(1)
   151  		go func(blob string) {
   152  			defer sem.P(1)
   153  			_, localerr := cloud.DeleteBlob(&DeleteBlobInput{blob})
   154  			if localerr != nil && localerr != syscall.ENOENT {
   155  				err = localerr
   156  			}
   157  		}(blobOuter)
   158  		if err != nil {
   159  			break
   160  		}
   161  	}
   162  	sem.V(100)
   163  	return err
   164  }
   165  
   166  // groupByDecresingDepths takes a slice of path strings and returns the paths as
   167  // groups where each group has the same `depth` - depth(a/b/c)=2, depth(a/b/)=1
   168  // The groups are returned in decreasing order of depths.
   169  // - Inp: [] Out: []
   170  // - Inp: ["a/b1/", "a/b/c1", "a/b2", "a/b/c2"]
   171  //   Out: [["a/b/c1", "a/b/c2"], ["a/b1/", "a/b2"]]
   172  // - Inp: ["a/b1/", "z/a/b/c1", "a/b2", "z/a/b/c2"]
   173  //   Out:	[["z/a/b/c1", "z/a/b/c2"], ["a/b1/", "a/b2"]
   174  func groupByDecresingDepths(items []string) [][]string {
   175  	depthToGroup := map[int][]string{}
   176  	for _, item := range items {
   177  		depth := len(strings.Split(strings.TrimRight(item, "/"), "/"))
   178  		if _, ok := depthToGroup[depth]; !ok {
   179  			depthToGroup[depth] = []string{}
   180  		}
   181  		depthToGroup[depth] = append(depthToGroup[depth], item)
   182  	}
   183  	decreasingDepths := []int{}
   184  	for depth := range depthToGroup {
   185  		decreasingDepths = append(decreasingDepths, depth)
   186  	}
   187  	sort.Sort(sort.Reverse(sort.IntSlice(decreasingDepths)))
   188  	ret := [][]string{}
   189  	for _, depth := range decreasingDepths {
   190  		group, _ := depthToGroup[depth]
   191  		ret = append(ret, group)
   192  	}
   193  	return ret
   194  }
   195  
   196  func (t *GoofysTest) DeleteADLBlobs(cloud StorageBackend, items []string) error {
   197  	// If we delete a directory that's not empty, ADL{v1|v2} returns failure. That can
   198  	// happen if we want to delete both "dir1" and "dir1/file" but delete them
   199  	// in the wrong order.
   200  	// So we group the items to delete into multiple groups. All items in a group
   201  	// will have the same depth - depth(/a/b/c) = 2, depth(/a/b/) = 1.
   202  	// We then iterate over the groups in desc order of depth and delete them parallelly.
   203  	for _, group := range groupByDecresingDepths(items) {
   204  		err := t.deleteBlobsParallelly(cloud, group)
   205  		if err != nil {
   206  			return err
   207  		}
   208  	}
   209  	return nil
   210  }
   211  
   212  func (s *GoofysTest) selectTestConfig(t *C, flags *FlagStorage) (conf S3Config) {
   213  	(&conf).Init()
   214  
   215  	if hasEnv("AWS") {
   216  		if isTravis() {
   217  			conf.Region = "us-east-1"
   218  		} else {
   219  			conf.Region = "us-west-2"
   220  		}
   221  		profile := os.Getenv("AWS")
   222  		if profile != "" {
   223  			if profile != "-" {
   224  				conf.Profile = profile
   225  			} else {
   226  				conf.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
   227  				conf.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
   228  			}
   229  		}
   230  
   231  		conf.BucketOwner = os.Getenv("BUCKET_OWNER")
   232  		if conf.BucketOwner == "" {
   233  			panic("BUCKET_OWNER is required on AWS")
   234  		}
   235  	} else if hasEnv("GCS") {
   236  		conf.Region = "us-west1"
   237  		conf.Profile = os.Getenv("GCS")
   238  		flags.Endpoint = "http://storage.googleapis.com"
   239  	} else if hasEnv("MINIO") {
   240  		conf.Region = "us-east-1"
   241  		conf.AccessKey = "Q3AM3UQ867SPQQA43P2F"
   242  		conf.SecretKey = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
   243  		flags.Endpoint = "https://play.minio.io:9000"
   244  	} else {
   245  		s.emulator = true
   246  
   247  		conf.Region = "us-west-2"
   248  		conf.AccessKey = "foo"
   249  		conf.SecretKey = "bar"
   250  		flags.Endpoint = "http://127.0.0.1:8080"
   251  	}
   252  
   253  	return
   254  }
   255  
   256  func (s *GoofysTest) waitForEmulator(t *C) {
   257  	if s.emulator {
   258  		addr := "127.0.0.1:8080"
   259  
   260  		err := waitFor(t, addr)
   261  		t.Assert(err, IsNil)
   262  	}
   263  }
   264  
   265  func (s *GoofysTest) SetUpSuite(t *C) {
   266  }
   267  
   268  func (s *GoofysTest) deleteBucket(cloud StorageBackend) error {
   269  	param := &ListBlobsInput{}
   270  
   271  	// Azure need special handling.
   272  	azureKeysToRemove := make([]string, 0)
   273  	for {
   274  		resp, err := cloud.ListBlobs(param)
   275  		if err != nil {
   276  			return err
   277  		}
   278  
   279  		keysToRemove := []string{}
   280  		for _, o := range resp.Items {
   281  			keysToRemove = append(keysToRemove, *o.Key)
   282  		}
   283  		if len(keysToRemove) != 0 {
   284  			switch cloud.(type) {
   285  			case *ADLv1, *ADLv2, *AZBlob:
   286  				// ADLV{1|2} and AZBlob (sometimes) supports directories. => dir can be removed only
   287  				// after the dir is empty. So we will remove the blobs in reverse depth order via
   288  				// DeleteADLBlobs after this for loop.
   289  				azureKeysToRemove = append(azureKeysToRemove, keysToRemove...)
   290  			default:
   291  				_, err = cloud.DeleteBlobs(&DeleteBlobsInput{Items: keysToRemove})
   292  				if err != nil {
   293  					return err
   294  				}
   295  			}
   296  		}
   297  		if resp.IsTruncated {
   298  			param.ContinuationToken = resp.NextContinuationToken
   299  		} else {
   300  			break
   301  		}
   302  	}
   303  
   304  	if len(azureKeysToRemove) != 0 {
   305  		err := s.DeleteADLBlobs(cloud, azureKeysToRemove)
   306  		if err != nil {
   307  			return err
   308  		}
   309  	}
   310  
   311  	_, err := cloud.RemoveBucket(&RemoveBucketInput{})
   312  	return err
   313  }
   314  
   315  func (s *GoofysTest) TearDownTest(t *C) {
   316  	close(s.timeout)
   317  	s.timeout = nil
   318  
   319  	for _, cloud := range s.removeBucket {
   320  		err := s.deleteBucket(cloud)
   321  		t.Assert(err, IsNil)
   322  	}
   323  	s.removeBucket = nil
   324  }
   325  
   326  func (s *GoofysTest) removeBlob(cloud StorageBackend, t *C, blobPath string) {
   327  	params := &DeleteBlobInput{
   328  		Key: blobPath,
   329  	}
   330  	_, err := cloud.DeleteBlob(params)
   331  	t.Assert(err, IsNil)
   332  }
   333  
   334  func (s *GoofysTest) setupBlobs(cloud StorageBackend, t *C, env map[string]*string) {
   335  
   336  	// concurrency = 100
   337  	throttler := make(semaphore, 100)
   338  	throttler.P(100)
   339  
   340  	var globalErr error
   341  	for path, c := range env {
   342  		throttler.V(1)
   343  		go func(path string, content *string) {
   344  			dir := false
   345  			if content == nil {
   346  				if strings.HasSuffix(path, "/") {
   347  					if cloud.Capabilities().DirBlob {
   348  						path = strings.TrimRight(path, "/")
   349  					}
   350  					dir = true
   351  					content = PString("")
   352  				} else {
   353  					content = &path
   354  				}
   355  			}
   356  			defer throttler.P(1)
   357  			params := &PutBlobInput{
   358  				Key:  path,
   359  				Body: bytes.NewReader([]byte(*content)),
   360  				Size: PUInt64(uint64(len(*content))),
   361  				Metadata: map[string]*string{
   362  					"name": aws.String(path + "+/#%00"),
   363  				},
   364  				DirBlob: dir,
   365  			}
   366  
   367  			_, err := cloud.PutBlob(params)
   368  			if err != nil {
   369  				globalErr = err
   370  			}
   371  			t.Assert(err, IsNil)
   372  		}(path, c)
   373  	}
   374  	throttler.V(100)
   375  	throttler = make(semaphore, 100)
   376  	throttler.P(100)
   377  	t.Assert(globalErr, IsNil)
   378  
   379  	// double check, except on AWS S3, because there we sometimes
   380  	// hit 404 NoSuchBucket and there's no way to distinguish that
   381  	// from 404 KeyNotFound
   382  	if !hasEnv("AWS") {
   383  		for path, c := range env {
   384  			throttler.V(1)
   385  			go func(path string, content *string) {
   386  				defer throttler.P(1)
   387  				params := &HeadBlobInput{Key: path}
   388  				res, err := cloud.HeadBlob(params)
   389  				t.Assert(err, IsNil)
   390  				if content != nil {
   391  					t.Assert(res.Size, Equals, uint64(len(*content)))
   392  				} else if strings.HasSuffix(path, "/") || path == "zero" {
   393  					t.Assert(res.Size, Equals, uint64(0))
   394  				} else {
   395  					t.Assert(res.Size, Equals, uint64(len(path)))
   396  				}
   397  			}(path, c)
   398  		}
   399  		throttler.V(100)
   400  		t.Assert(globalErr, IsNil)
   401  	}
   402  }
   403  
   404  func (s *GoofysTest) setupEnv(t *C, env map[string]*string, public bool) {
   405  	if public {
   406  		if s3, ok := s.cloud.Delegate().(*S3Backend); ok {
   407  			s3.config.ACL = "public-read"
   408  		} else {
   409  			t.Error("Not S3 backend")
   410  		}
   411  	}
   412  
   413  	_, err := s.cloud.MakeBucket(&MakeBucketInput{})
   414  	t.Assert(err, IsNil)
   415  
   416  	if !s.emulator {
   417  		//time.Sleep(time.Second)
   418  	}
   419  
   420  	s.setupBlobs(s.cloud, t, env)
   421  
   422  	t.Log("setupEnv done")
   423  }
   424  
   425  func (s *GoofysTest) setupDefaultEnv(t *C, public bool) {
   426  	s.env = map[string]*string{
   427  		"file1":           nil,
   428  		"file2":           nil,
   429  		"dir1/file3":      nil,
   430  		"dir2/dir3/":      nil,
   431  		"dir2/dir3/file4": nil,
   432  		"dir4/":           nil,
   433  		"dir4/file5":      nil,
   434  		"empty_dir/":      nil,
   435  		"empty_dir2/":     nil,
   436  		"zero":            PString(""),
   437  	}
   438  
   439  	s.setupEnv(t, s.env, public)
   440  }
   441  
   442  func (s *GoofysTest) setUpTestTimeout(t *C, timeout time.Duration) {
   443  	if s.timeout != nil {
   444  		close(s.timeout)
   445  	}
   446  	s.timeout = make(chan int)
   447  	debug.SetTraceback("all")
   448  	started := time.Now()
   449  
   450  	go func() {
   451  		select {
   452  		case _, ok := <-s.timeout:
   453  			if !ok {
   454  				return
   455  			}
   456  		case <-time.After(timeout):
   457  			panic(fmt.Sprintf("timeout %v reached. Started %v now %v",
   458  				timeout, started, time.Now()))
   459  		}
   460  	}()
   461  }
   462  
   463  func (s *GoofysTest) SetUpTest(t *C) {
   464  	log.Infof("Starting at %v", time.Now())
   465  
   466  	s.setUpTestTimeout(t, PerTestTimeout)
   467  
   468  	var bucket string
   469  	mount := os.Getenv("MOUNT")
   470  
   471  	if mount != "false" {
   472  		bucket = mount
   473  	} else {
   474  		bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16)
   475  	}
   476  	uid, gid := MyUserAndGroup()
   477  	flags := &FlagStorage{
   478  		DirMode:     0700,
   479  		FileMode:    0700,
   480  		Uid:         uint32(uid),
   481  		Gid:         uint32(gid),
   482  		HTTPTimeout: 30 * time.Second,
   483  	}
   484  
   485  	cloud := os.Getenv("CLOUD")
   486  
   487  	if cloud == "s3" {
   488  		s.emulator = !hasEnv("AWS")
   489  		s.waitForEmulator(t)
   490  
   491  		conf := s.selectTestConfig(t, flags)
   492  		flags.Backend = &conf
   493  
   494  		s3, err := NewS3(bucket, flags, &conf)
   495  		t.Assert(err, IsNil)
   496  
   497  		s.cloud = s3
   498  		s3.aws = hasEnv("AWS")
   499  		if s3.aws {
   500  			s.cloud = NewS3BucketEventualConsistency(s3)
   501  		}
   502  
   503  		if s.emulator {
   504  			s3.Handlers.Sign.Clear()
   505  			s3.Handlers.Sign.PushBack(SignV2)
   506  			s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
   507  		}
   508  		_, err = s3.ListBuckets(nil)
   509  		t.Assert(err, IsNil)
   510  
   511  	} else if cloud == "gcs3" {
   512  		conf := s.selectTestConfig(t, flags)
   513  		flags.Backend = &conf
   514  
   515  		var err error
   516  		s.cloud, err = NewGCS3(bucket, flags, &conf)
   517  		t.Assert(s.cloud, NotNil)
   518  		t.Assert(err, IsNil)
   519  	} else if cloud == "azblob" {
   520  		config, err := AzureBlobConfig(os.Getenv("ENDPOINT"), "", "blob")
   521  		t.Assert(err, IsNil)
   522  
   523  		if config.Endpoint == AzuriteEndpoint {
   524  			s.azurite = true
   525  			s.emulator = true
   526  			s.waitForEmulator(t)
   527  		}
   528  
   529  		// Azurite's SAS is buggy, ex: https://github.com/Azure/Azurite/issues/216
   530  		if os.Getenv("SAS_EXPIRE") != "" {
   531  			expire, err := time.ParseDuration(os.Getenv("SAS_EXPIRE"))
   532  			t.Assert(err, IsNil)
   533  
   534  			config.TokenRenewBuffer = expire / 2
   535  			credential, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey)
   536  			t.Assert(err, IsNil)
   537  
   538  			// test sas token config
   539  			config.SasToken = func() (string, error) {
   540  				sasQueryParams, err := azblob.AccountSASSignatureValues{
   541  					Protocol:   azblob.SASProtocolHTTPSandHTTP,
   542  					StartTime:  time.Now().UTC().Add(-1 * time.Hour),
   543  					ExpiryTime: time.Now().UTC().Add(expire),
   544  					Services:   azblob.AccountSASServices{Blob: true}.String(),
   545  					ResourceTypes: azblob.AccountSASResourceTypes{
   546  						Service:   true,
   547  						Container: true,
   548  						Object:    true,
   549  					}.String(),
   550  					Permissions: azblob.AccountSASPermissions{
   551  						Read:   true,
   552  						Write:  true,
   553  						Delete: true,
   554  						List:   true,
   555  						Create: true,
   556  					}.String(),
   557  				}.NewSASQueryParameters(credential)
   558  				if err != nil {
   559  					return "", err
   560  				}
   561  				return sasQueryParams.Encode(), nil
   562  			}
   563  		}
   564  
   565  		flags.Backend = &config
   566  
   567  		s.cloud, err = NewAZBlob(bucket, &config)
   568  		t.Assert(err, IsNil)
   569  		t.Assert(s.cloud, NotNil)
   570  	} else if cloud == "adlv1" {
   571  		cred := azureauth.NewClientCredentialsConfig(
   572  			os.Getenv("ADLV1_CLIENT_ID"),
   573  			os.Getenv("ADLV1_CLIENT_CREDENTIAL"),
   574  			os.Getenv("ADLV1_TENANT_ID"))
   575  		auth, err := cred.Authorizer()
   576  		t.Assert(err, IsNil)
   577  
   578  		config := ADLv1Config{
   579  			Endpoint:   os.Getenv("ENDPOINT"),
   580  			Authorizer: auth,
   581  		}
   582  		config.Init()
   583  
   584  		flags.Backend = &config
   585  
   586  		s.cloud, err = NewADLv1(bucket, flags, &config)
   587  		t.Assert(err, IsNil)
   588  		t.Assert(s.cloud, NotNil)
   589  	} else if cloud == "adlv2" {
   590  		var err error
   591  		var auth autorest.Authorizer
   592  
   593  		if os.Getenv("AZURE_STORAGE_ACCOUNT") != "" && os.Getenv("AZURE_STORAGE_KEY") != "" {
   594  			auth = &AZBlobConfig{
   595  				AccountName: os.Getenv("AZURE_STORAGE_ACCOUNT"),
   596  				AccountKey:  os.Getenv("AZURE_STORAGE_KEY"),
   597  			}
   598  		} else {
   599  			cred := azureauth.NewClientCredentialsConfig(
   600  				os.Getenv("ADLV2_CLIENT_ID"),
   601  				os.Getenv("ADLV2_CLIENT_CREDENTIAL"),
   602  				os.Getenv("ADLV2_TENANT_ID"))
   603  			cred.Resource = azure.PublicCloud.ResourceIdentifiers.Storage
   604  			auth, err = cred.Authorizer()
   605  			t.Assert(err, IsNil)
   606  		}
   607  
   608  		config := ADLv2Config{
   609  			Endpoint:   os.Getenv("ENDPOINT"),
   610  			Authorizer: auth,
   611  		}
   612  
   613  		flags.Backend = &config
   614  
   615  		s.cloud, err = NewADLv2(bucket, flags, &config)
   616  		t.Assert(err, IsNil)
   617  		t.Assert(s.cloud, NotNil)
   618  	} else if cloud == "gcs" {
   619  		config := NewGCSConfig()
   620  		t.Assert(config, NotNil)
   621  
   622  		flags.Backend = config
   623  		var err error
   624  		s.cloud, err = NewGCS(bucket, config)
   625  		t.Assert(err, IsNil)
   626  		t.Assert(s.cloud, NotNil)
   627  	} else {
   628  		t.Fatal("Unsupported backend")
   629  	}
   630  
   631  	if mount == "false" {
   632  		s.removeBucket = append(s.removeBucket, s.cloud)
   633  		s.setupDefaultEnv(t, false)
   634  	} else {
   635  		_, err := s.cloud.MakeBucket(&MakeBucketInput{})
   636  		if err == fuse.EEXIST {
   637  			err = nil
   638  		}
   639  		t.Assert(err, IsNil)
   640  	}
   641  
   642  	if hasEnv("AWS") {
   643  		s.fs = newGoofys(context.Background(), bucket, flags,
   644  			func(bucket string, flags *FlagStorage) (StorageBackend, error) {
   645  				cloud, err := NewBackend(bucket, flags)
   646  				if err != nil {
   647  					return nil, err
   648  				}
   649  
   650  				return NewS3BucketEventualConsistency(cloud.(*S3Backend)), nil
   651  			})
   652  	} else {
   653  		s.fs = NewGoofys(context.Background(), bucket, flags)
   654  	}
   655  	t.Assert(s.fs, NotNil)
   656  
   657  	s.ctx = context.Background()
   658  
   659  	if hasEnv("GCS") {
   660  		flags.Endpoint = "http://storage.googleapis.com"
   661  	}
   662  }
   663  
   664  func (s *GoofysTest) getRoot(t *C) (inode *Inode) {
   665  	inode = s.fs.inodes[fuseops.RootInodeID]
   666  	t.Assert(inode, NotNil)
   667  	return
   668  }
   669  
   670  func (s *GoofysTest) TestGetRootInode(t *C) {
   671  	root := s.getRoot(t)
   672  	t.Assert(root.Id, Equals, fuseops.InodeID(fuseops.RootInodeID))
   673  }
   674  
   675  func (s *GoofysTest) TestGetRootAttributes(t *C) {
   676  	_, err := s.getRoot(t).GetAttributes()
   677  	t.Assert(err, IsNil)
   678  }
   679  
   680  func (s *GoofysTest) ForgetInode(t *C, inode fuseops.InodeID) {
   681  	err := s.fs.ForgetInode(s.ctx, &fuseops.ForgetInodeOp{Inode: inode})
   682  	t.Assert(err, IsNil)
   683  }
   684  
   685  func (s *GoofysTest) LookUpInode(t *C, name string) (in *Inode, err error) {
   686  	parent := s.getRoot(t)
   687  
   688  	for {
   689  		idx := strings.Index(name, "/")
   690  		if idx == -1 {
   691  			break
   692  		}
   693  
   694  		dirName := name[0:idx]
   695  		name = name[idx+1:]
   696  
   697  		lookup := fuseops.LookUpInodeOp{
   698  			Parent: parent.Id,
   699  			Name:   dirName,
   700  		}
   701  
   702  		err = s.fs.LookUpInode(nil, &lookup)
   703  		if err != nil {
   704  			return
   705  		}
   706  		parent = s.fs.inodes[lookup.Entry.Child]
   707  	}
   708  
   709  	lookup := fuseops.LookUpInodeOp{
   710  		Parent: parent.Id,
   711  		Name:   name,
   712  	}
   713  
   714  	err = s.fs.LookUpInode(nil, &lookup)
   715  	if err != nil {
   716  		return
   717  	}
   718  	in = s.fs.inodes[lookup.Entry.Child]
   719  	return
   720  }
   721  
   722  func (s *GoofysTest) TestSetup(t *C) {
   723  }
   724  
   725  func (s *GoofysTest) TestLookUpInode(t *C) {
   726  	_, err := s.LookUpInode(t, "file1")
   727  	t.Assert(err, IsNil)
   728  
   729  	_, err = s.LookUpInode(t, "fileNotFound")
   730  	t.Assert(err, Equals, fuse.ENOENT)
   731  
   732  	_, err = s.LookUpInode(t, "dir1/file3")
   733  	t.Assert(err, IsNil)
   734  
   735  	_, err = s.LookUpInode(t, "dir2/dir3")
   736  	t.Assert(err, IsNil)
   737  
   738  	_, err = s.LookUpInode(t, "dir2/dir3/file4")
   739  	t.Assert(err, IsNil)
   740  
   741  	_, err = s.LookUpInode(t, "empty_dir")
   742  	t.Assert(err, IsNil)
   743  }
   744  
   745  func (s *GoofysTest) TestPanicWrapper(t *C) {
   746  	debug.SetTraceback("single")
   747  
   748  	fs := FusePanicLogger{s.fs}
   749  	err := fs.GetInodeAttributes(nil, &fuseops.GetInodeAttributesOp{
   750  		Inode: 1234,
   751  	})
   752  	t.Assert(err, Equals, fuse.EIO)
   753  }
   754  
   755  func (s *GoofysTest) TestGetInodeAttributes(t *C) {
   756  	inode, err := s.getRoot(t).LookUp("file1")
   757  	t.Assert(err, IsNil)
   758  
   759  	attr, err := inode.GetAttributes()
   760  	t.Assert(err, IsNil)
   761  	t.Assert(attr.Size, Equals, uint64(len("file1")))
   762  }
   763  
   764  func (s *GoofysTest) readDirFully(t *C, dh *DirHandle) (entries []DirHandleEntry) {
   765  	dh.mu.Lock()
   766  	defer dh.mu.Unlock()
   767  
   768  	en, err := dh.ReadDir(fuseops.DirOffset(0))
   769  	t.Assert(err, IsNil)
   770  	t.Assert(en, NotNil)
   771  	t.Assert(en.Name, Equals, ".")
   772  
   773  	en, err = dh.ReadDir(fuseops.DirOffset(1))
   774  	t.Assert(err, IsNil)
   775  	t.Assert(en, NotNil)
   776  	t.Assert(en.Name, Equals, "..")
   777  
   778  	for i := fuseops.DirOffset(2); ; i++ {
   779  		en, err = dh.ReadDir(i)
   780  		t.Assert(err, IsNil)
   781  
   782  		if en == nil {
   783  			return
   784  		}
   785  
   786  		entries = append(entries, *en)
   787  	}
   788  }
   789  
   790  func namesOf(entries []DirHandleEntry) (names []string) {
   791  	for _, en := range entries {
   792  		names = append(names, en.Name)
   793  	}
   794  	return
   795  }
   796  
   797  func (s *GoofysTest) assertEntries(t *C, in *Inode, names []string) {
   798  	dh := in.OpenDir()
   799  	defer dh.CloseDir()
   800  
   801  	t.Assert(namesOf(s.readDirFully(t, dh)), DeepEquals, names)
   802  }
   803  
   804  func (s *GoofysTest) readDirIntoCache(t *C, inode fuseops.InodeID) {
   805  	openDirOp := fuseops.OpenDirOp{Inode: inode}
   806  	err := s.fs.OpenDir(nil, &openDirOp)
   807  	t.Assert(err, IsNil)
   808  
   809  	readDirOp := fuseops.ReadDirOp{
   810  		Inode:  inode,
   811  		Handle: openDirOp.Handle,
   812  		Dst:    make([]byte, 8*1024),
   813  	}
   814  
   815  	err = s.fs.ReadDir(nil, &readDirOp)
   816  	t.Assert(err, IsNil)
   817  }
   818  
   819  func (s *GoofysTest) TestReadDirCacheLookup(t *C) {
   820  	s.fs.flags.StatCacheTTL = 1 * time.Minute
   821  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
   822  
   823  	s.readDirIntoCache(t, fuseops.RootInodeID)
   824  	s.disableS3()
   825  
   826  	// should be cached so lookup should not need to talk to s3
   827  	entries := []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"}
   828  	for _, en := range entries {
   829  		err := s.fs.LookUpInode(nil, &fuseops.LookUpInodeOp{
   830  			Parent: fuseops.RootInodeID,
   831  			Name:   en,
   832  		})
   833  		t.Assert(err, IsNil)
   834  	}
   835  }
   836  
   837  func (s *GoofysTest) TestReadDirWithExternalChanges(t *C) {
   838  	s.fs.flags.TypeCacheTTL = time.Second
   839  
   840  	dir1, err := s.LookUpInode(t, "dir1")
   841  	t.Assert(err, IsNil)
   842  
   843  	defaultEntries := []string{
   844  		"dir1", "dir2", "dir4", "empty_dir",
   845  		"empty_dir2", "file1", "file2", "zero"}
   846  	s.assertEntries(t, s.getRoot(t), defaultEntries)
   847  	// dir1 has file3 and nothing else.
   848  	s.assertEntries(t, dir1, []string{"file3"})
   849  
   850  	// Do the following 'external' changes in s3 without involving goofys.
   851  	// - Remove file1, add file3.
   852  	// - Remove dir1/file3. Given that dir1 has just this one file,
   853  	//   we are effectively removing dir1 as well.
   854  	s.removeBlob(s.cloud, t, "file1")
   855  	s.setupBlobs(s.cloud, t, map[string]*string{"file3": nil})
   856  	s.removeBlob(s.cloud, t, "dir1/file3")
   857  
   858  	time.Sleep(s.fs.flags.TypeCacheTTL)
   859  	// newEntries = `defaultEntries` - dir1 - file1 + file3.
   860  	newEntries := []string{
   861  		"dir2", "dir4", "empty_dir", "empty_dir2",
   862  		"file2", "file3", "zero"}
   863  	if s.cloud.Capabilities().DirBlob {
   864  		// dir1 is not automatically deleted
   865  		newEntries = append([]string{"dir1"}, newEntries...)
   866  	}
   867  	s.assertEntries(t, s.getRoot(t), newEntries)
   868  }
   869  
   870  func (s *GoofysTest) TestReadDir(t *C) {
   871  	// test listing /
   872  	dh := s.getRoot(t).OpenDir()
   873  	defer dh.CloseDir()
   874  
   875  	s.assertEntries(t, s.getRoot(t), []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"})
   876  
   877  	// test listing dir1/
   878  	in, err := s.LookUpInode(t, "dir1")
   879  	t.Assert(err, IsNil)
   880  	s.assertEntries(t, in, []string{"file3"})
   881  
   882  	// test listing dir2/
   883  	in, err = s.LookUpInode(t, "dir2")
   884  	t.Assert(err, IsNil)
   885  	s.assertEntries(t, in, []string{"dir3"})
   886  
   887  	// test listing dir2/dir3/
   888  	in, err = s.LookUpInode(t, "dir2/dir3")
   889  	t.Assert(err, IsNil)
   890  	s.assertEntries(t, in, []string{"file4"})
   891  }
   892  
   893  func (s *GoofysTest) TestReadFiles(t *C) {
   894  	parent := s.getRoot(t)
   895  	dh := parent.OpenDir()
   896  	defer dh.CloseDir()
   897  
   898  	var entries []*DirHandleEntry
   899  
   900  	dh.mu.Lock()
   901  	for i := fuseops.DirOffset(0); ; i++ {
   902  		en, err := dh.ReadDir(i)
   903  		t.Assert(err, IsNil)
   904  
   905  		if en == nil {
   906  			break
   907  		}
   908  
   909  		entries = append(entries, en)
   910  	}
   911  	dh.mu.Unlock()
   912  
   913  	for _, en := range entries {
   914  		if en.Type == fuseutil.DT_File {
   915  			in, err := parent.LookUp(en.Name)
   916  			t.Assert(err, IsNil)
   917  
   918  			fh, err := in.OpenFile(fuseops.OpContext{uint32(os.Getpid())})
   919  			t.Assert(err, IsNil)
   920  
   921  			buf := make([]byte, 4096)
   922  
   923  			nread, err := fh.ReadFile(0, buf)
   924  			if en.Name == "zero" {
   925  				t.Assert(nread, Equals, 0)
   926  			} else {
   927  				t.Assert(nread, Equals, len(en.Name))
   928  				buf = buf[0:nread]
   929  				t.Assert(string(buf), Equals, en.Name)
   930  			}
   931  		} else {
   932  
   933  		}
   934  	}
   935  }
   936  
   937  func (s *GoofysTest) TestReadOffset(t *C) {
   938  	root := s.getRoot(t)
   939  	f := "file1"
   940  
   941  	in, err := root.LookUp(f)
   942  	t.Assert(err, IsNil)
   943  
   944  	fh, err := in.OpenFile(fuseops.OpContext{uint32(os.Getpid())})
   945  	t.Assert(err, IsNil)
   946  
   947  	buf := make([]byte, 4096)
   948  
   949  	nread, err := fh.ReadFile(1, buf)
   950  	t.Assert(err, IsNil)
   951  	t.Assert(nread, Equals, len(f)-1)
   952  	t.Assert(string(buf[0:nread]), DeepEquals, f[1:])
   953  
   954  	r := rand.New(rand.NewSource(time.Now().UnixNano()))
   955  
   956  	for i := 0; i < 3; i++ {
   957  		off := r.Int31n(int32(len(f)))
   958  		nread, err = fh.ReadFile(int64(off), buf)
   959  		t.Assert(err, IsNil)
   960  		t.Assert(nread, Equals, len(f)-int(off))
   961  		t.Assert(string(buf[0:nread]), DeepEquals, f[off:])
   962  	}
   963  }
   964  
   965  func (s *GoofysTest) TestCreateFiles(t *C) {
   966  	fileName := "testCreateFile"
   967  
   968  	_, fh := s.getRoot(t).Create(fileName, fuseops.OpContext{uint32(os.Getpid())})
   969  
   970  	err := fh.FlushFile()
   971  	t.Assert(err, IsNil)
   972  
   973  	resp, err := s.cloud.GetBlob(&GetBlobInput{Key: fileName})
   974  	t.Assert(err, IsNil)
   975  	t.Assert(resp.HeadBlobOutput.Size, DeepEquals, uint64(0))
   976  	defer resp.Body.Close()
   977  
   978  	_, err = s.getRoot(t).LookUp(fileName)
   979  	t.Assert(err, IsNil)
   980  
   981  	fileName = "testCreateFile2"
   982  	s.testWriteFile(t, fileName, 1, 128*1024)
   983  
   984  	inode, err := s.getRoot(t).LookUp(fileName)
   985  	t.Assert(err, IsNil)
   986  
   987  	fh, err = inode.OpenFile(fuseops.OpContext{uint32(os.Getpid())})
   988  	t.Assert(err, IsNil)
   989  
   990  	err = fh.FlushFile()
   991  	t.Assert(err, IsNil)
   992  
   993  	resp, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName})
   994  	t.Assert(err, IsNil)
   995  	// ADLv1 doesn't return size when we do a GET
   996  	if _, adlv1 := s.cloud.(*ADLv1); !adlv1 {
   997  		t.Assert(resp.HeadBlobOutput.Size, Equals, uint64(1))
   998  	}
   999  	defer resp.Body.Close()
  1000  }
  1001  
  1002  func (s *GoofysTest) TestRenameWithSpecialChar(t *C) {
  1003  	fileName := "foo+"
  1004  	s.testWriteFile(t, fileName, 1, 128*1024)
  1005  
  1006  	inode, err := s.getRoot(t).LookUp(fileName)
  1007  	t.Assert(err, IsNil)
  1008  
  1009  	fh, err := inode.OpenFile(fuseops.OpContext{uint32(os.Getpid())})
  1010  	t.Assert(err, IsNil)
  1011  
  1012  	err = fh.FlushFile()
  1013  	t.Assert(err, IsNil)
  1014  
  1015  	resp, err := s.cloud.GetBlob(&GetBlobInput{Key: fileName})
  1016  	t.Assert(err, IsNil)
  1017  	// ADLv1 doesn't return size when we do a GET
  1018  	if _, adlv1 := s.cloud.(*ADLv1); !adlv1 {
  1019  		t.Assert(resp.HeadBlobOutput.Size, Equals, uint64(1))
  1020  	}
  1021  	defer resp.Body.Close()
  1022  
  1023  	root := s.getRoot(t)
  1024  	err = root.Rename(fileName, root, "foo")
  1025  	t.Assert(err, IsNil)
  1026  }
  1027  
  1028  func (s *GoofysTest) TestUnlink(t *C) {
  1029  	fileName := "file1"
  1030  
  1031  	err := s.getRoot(t).Unlink(fileName)
  1032  	t.Assert(err, IsNil)
  1033  
  1034  	// make sure that it's gone from s3
  1035  	_, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName})
  1036  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
  1037  }
  1038  
  1039  type FileHandleReader struct {
  1040  	fs     *Goofys
  1041  	fh     *FileHandle
  1042  	offset int64
  1043  }
  1044  
  1045  func (r *FileHandleReader) Read(p []byte) (nread int, err error) {
  1046  	nread, err = r.fh.ReadFile(r.offset, p)
  1047  	r.offset += int64(nread)
  1048  	return
  1049  }
  1050  
  1051  func (r *FileHandleReader) Seek(offset int64, whence int) (int64, error) {
  1052  	switch whence {
  1053  	case 0:
  1054  		r.offset = offset
  1055  	case 1:
  1056  		r.offset += offset
  1057  	default:
  1058  		panic(fmt.Sprintf("unsupported whence: %v", whence))
  1059  	}
  1060  
  1061  	return r.offset, nil
  1062  }
  1063  
  1064  func (s *GoofysTest) testWriteFile(t *C, fileName string, size int64, write_size int) {
  1065  	s.testWriteFileAt(t, fileName, int64(0), size, write_size)
  1066  }
  1067  
  1068  func (s *GoofysTest) testWriteFileAt(t *C, fileName string, offset int64, size int64, write_size int) {
  1069  	var fh *FileHandle
  1070  	root := s.getRoot(t)
  1071  
  1072  	lookup := fuseops.LookUpInodeOp{
  1073  		Parent: root.Id,
  1074  		Name:   fileName,
  1075  	}
  1076  	err := s.fs.LookUpInode(nil, &lookup)
  1077  	if err != nil {
  1078  		if err == fuse.ENOENT {
  1079  			create := fuseops.CreateFileOp{
  1080  				Parent: root.Id,
  1081  				Name:   fileName,
  1082  			}
  1083  			err = s.fs.CreateFile(nil, &create)
  1084  			t.Assert(err, IsNil)
  1085  
  1086  			fh = s.fs.fileHandles[create.Handle]
  1087  		} else {
  1088  			t.Assert(err, IsNil)
  1089  		}
  1090  	} else {
  1091  		in := s.fs.inodes[lookup.Entry.Child]
  1092  		fh, err = in.OpenFile(fuseops.OpContext{uint32(os.Getpid())})
  1093  		t.Assert(err, IsNil)
  1094  	}
  1095  
  1096  	buf := make([]byte, write_size)
  1097  	nwritten := offset
  1098  
  1099  	src := io.LimitReader(&SeqReader{}, size)
  1100  
  1101  	for {
  1102  		nread, err := src.Read(buf)
  1103  		if err == io.EOF {
  1104  			t.Assert(nwritten, Equals, size)
  1105  			break
  1106  		}
  1107  		t.Assert(err, IsNil)
  1108  
  1109  		err = fh.WriteFile(nwritten, buf[:nread])
  1110  		t.Assert(err, IsNil)
  1111  		nwritten += int64(nread)
  1112  	}
  1113  
  1114  	err = fh.FlushFile()
  1115  	t.Assert(err, IsNil)
  1116  
  1117  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: fileName})
  1118  	t.Assert(err, IsNil)
  1119  	t.Assert(resp.Size, Equals, uint64(size+offset))
  1120  
  1121  	fr := &FileHandleReader{s.fs, fh, offset}
  1122  	diff, err := CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 0)
  1123  	t.Assert(err, IsNil)
  1124  	t.Assert(diff, Equals, -1)
  1125  	t.Assert(fr.offset, Equals, size)
  1126  
  1127  	err = fh.FlushFile()
  1128  	t.Assert(err, IsNil)
  1129  
  1130  	// read again with exact 4KB to catch aligned read case
  1131  	fr = &FileHandleReader{s.fs, fh, offset}
  1132  	diff, err = CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 4096)
  1133  	t.Assert(err, IsNil)
  1134  	t.Assert(diff, Equals, -1)
  1135  	t.Assert(fr.offset, Equals, size)
  1136  
  1137  	fh.Release()
  1138  }
  1139  
  1140  func (s *GoofysTest) TestWriteLargeFile(t *C) {
  1141  	s.testWriteFile(t, "testLargeFile", int64(READAHEAD_CHUNK)+1024*1024, 128*1024)
  1142  	s.testWriteFile(t, "testLargeFile2", int64(READAHEAD_CHUNK), 128*1024)
  1143  	s.testWriteFile(t, "testLargeFile3", int64(READAHEAD_CHUNK)+1, 128*1024)
  1144  }
  1145  
  1146  func (s *GoofysTest) TestWriteReallyLargeFile(t *C) {
  1147  	if _, ok := s.cloud.(*S3Backend); ok && s.emulator {
  1148  		t.Skip("seems to be OOM'ing S3proxy 1.8.0")
  1149  	}
  1150  	s.testWriteFile(t, "testLargeFile", 512*1024*1024+1, 128*1024)
  1151  }
  1152  
  1153  func (s *GoofysTest) TestWriteReplicatorThrottle(t *C) {
  1154  	s.fs.replicators = Ticket{Total: 1}.Init()
  1155  	s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024)
  1156  }
  1157  
  1158  func (s *GoofysTest) TestReadWriteMinimumMemory(t *C) {
  1159  	if _, ok := s.cloud.(*ADLv1); ok {
  1160  		s.fs.bufferPool.maxBuffers = 4
  1161  	} else {
  1162  		s.fs.bufferPool.maxBuffers = 2
  1163  	}
  1164  	s.fs.bufferPool.computedMaxbuffers = s.fs.bufferPool.maxBuffers
  1165  	s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024)
  1166  }
  1167  
  1168  func (s *GoofysTest) TestWriteManyFilesFile(t *C) {
  1169  	var files sync.WaitGroup
  1170  
  1171  	for i := 0; i < 21; i++ {
  1172  		files.Add(1)
  1173  		fileName := "testSmallFile" + strconv.Itoa(i)
  1174  		go func() {
  1175  			defer files.Done()
  1176  			s.testWriteFile(t, fileName, 1, 128*1024)
  1177  		}()
  1178  	}
  1179  
  1180  	files.Wait()
  1181  }
  1182  
  1183  func (s *GoofysTest) testWriteFileNonAlign(t *C) {
  1184  	s.testWriteFile(t, "testWriteFileNonAlign", 6*1024*1024, 128*1024+1)
  1185  }
  1186  
  1187  func (s *GoofysTest) TestReadRandom(t *C) {
  1188  	size := int64(21 * 1024 * 1024)
  1189  
  1190  	s.testWriteFile(t, "testLargeFile", size, 128*1024)
  1191  	in, err := s.LookUpInode(t, "testLargeFile")
  1192  	t.Assert(err, IsNil)
  1193  
  1194  	fh, err := in.OpenFile(fuseops.OpContext{uint32(os.Getpid())})
  1195  	t.Assert(err, IsNil)
  1196  	fr := &FileHandleReader{s.fs, fh, 0}
  1197  
  1198  	src := rand.NewSource(time.Now().UnixNano())
  1199  	truth := &SeqReader{}
  1200  
  1201  	for i := 0; i < 10; i++ {
  1202  		offset := src.Int63() % (size / 2)
  1203  
  1204  		fr.Seek(offset, 0)
  1205  		truth.Seek(offset, 0)
  1206  
  1207  		// read 5MB+1 from that offset
  1208  		nread := int64(5*1024*1024 + 1)
  1209  		CompareReader(io.LimitReader(fr, nread), io.LimitReader(truth, nread), 0)
  1210  	}
  1211  }
  1212  
  1213  func (s *GoofysTest) TestMkDir(t *C) {
  1214  	_, err := s.LookUpInode(t, "new_dir/file")
  1215  	t.Assert(err, Equals, fuse.ENOENT)
  1216  
  1217  	dirName := "new_dir"
  1218  	inode, err := s.getRoot(t).MkDir(dirName)
  1219  	t.Assert(err, IsNil)
  1220  	t.Assert(*inode.FullName(), Equals, dirName)
  1221  
  1222  	_, err = s.LookUpInode(t, dirName)
  1223  	t.Assert(err, IsNil)
  1224  
  1225  	fileName := "file"
  1226  	_, fh := inode.Create(fileName, fuseops.OpContext{uint32(os.Getpid())})
  1227  
  1228  	err = fh.FlushFile()
  1229  	t.Assert(err, IsNil)
  1230  
  1231  	_, err = s.LookUpInode(t, dirName+"/"+fileName)
  1232  	t.Assert(err, IsNil)
  1233  }
  1234  
  1235  func (s *GoofysTest) TestRmDir(t *C) {
  1236  	root := s.getRoot(t)
  1237  
  1238  	err := root.RmDir("dir1")
  1239  	t.Assert(err, Equals, fuse.ENOTEMPTY)
  1240  
  1241  	err = root.RmDir("dir2")
  1242  	t.Assert(err, Equals, fuse.ENOTEMPTY)
  1243  
  1244  	err = root.RmDir("empty_dir")
  1245  	t.Assert(err, IsNil)
  1246  
  1247  }
  1248  
  1249  func (s *GoofysTest) TestRenamePreserveMetadata(t *C) {
  1250  	if _, ok := s.cloud.(*ADLv1); ok {
  1251  		t.Skip("ADLv1 doesn't support metadata")
  1252  	}
  1253  	root := s.getRoot(t)
  1254  
  1255  	from, to := "file1", "new_file"
  1256  
  1257  	metadata := make(map[string]*string)
  1258  	metadata["foo"] = aws.String("bar")
  1259  
  1260  	_, err := s.cloud.CopyBlob(&CopyBlobInput{
  1261  		Source:      from,
  1262  		Destination: from,
  1263  		Metadata:    metadata,
  1264  	})
  1265  	t.Assert(err, IsNil)
  1266  
  1267  	err = root.Rename(from, root, to)
  1268  	t.Assert(err, IsNil)
  1269  
  1270  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: to})
  1271  	t.Assert(err, IsNil)
  1272  	t.Assert(resp.Metadata["foo"], NotNil)
  1273  	t.Assert(*resp.Metadata["foo"], Equals, "bar")
  1274  }
  1275  
  1276  func (s *GoofysTest) TestRenameLarge(t *C) {
  1277  	fileSize := int64(2 * 1024 * 1024 * 1024)
  1278  	// AWS S3 can timeout when renaming large file
  1279  	if _, ok := s.cloud.(*S3Backend); ok && s.emulator {
  1280  		// S3proxy runs out of memory on truly large files. We
  1281  		// want to use a large file to test timeout issues
  1282  		// which wouldn't happen on s3proxy anyway
  1283  		fileSize = 21 * 1024 * 1024
  1284  	}
  1285  
  1286  	s.testWriteFile(t, "large_file", fileSize, 128*1024)
  1287  
  1288  	root := s.getRoot(t)
  1289  
  1290  	from, to := "large_file", "large_file2"
  1291  	err := root.Rename(from, root, to)
  1292  	t.Assert(err, IsNil)
  1293  }
  1294  
  1295  func (s *GoofysTest) TestRenameToExisting(t *C) {
  1296  	root := s.getRoot(t)
  1297  
  1298  	// cache these 2 files first
  1299  	_, err := s.LookUpInode(t, "file1")
  1300  	t.Assert(err, IsNil)
  1301  
  1302  	_, err = s.LookUpInode(t, "file2")
  1303  	t.Assert(err, IsNil)
  1304  
  1305  	err = s.fs.Rename(nil, &fuseops.RenameOp{
  1306  		OldParent: root.Id,
  1307  		NewParent: root.Id,
  1308  		OldName:   "file1",
  1309  		NewName:   "file2",
  1310  	})
  1311  	t.Assert(err, IsNil)
  1312  
  1313  	file1 := root.findChild("file1")
  1314  	t.Assert(file1, IsNil)
  1315  
  1316  	file2 := root.findChild("file2")
  1317  	t.Assert(file2, NotNil)
  1318  	t.Assert(*file2.Name, Equals, "file2")
  1319  }
  1320  
  1321  func (s *GoofysTest) TestBackendListPagination(t *C) {
  1322  	if _, ok := s.cloud.(*ADLv1); ok {
  1323  		t.Skip("ADLv1 doesn't have pagination")
  1324  	}
  1325  	if s.azurite {
  1326  		// https://github.com/Azure/Azurite/issues/262
  1327  		t.Skip("Azurite doesn't support pagination")
  1328  	}
  1329  
  1330  	var itemsPerPage int
  1331  	switch s.cloud.Delegate().(type) {
  1332  	case *S3Backend, *GCS3:
  1333  		itemsPerPage = 1000
  1334  	case *AZBlob, *ADLv2:
  1335  		itemsPerPage = 5000
  1336  	case *GCSBackend:
  1337  		itemsPerPage = 1000
  1338  	default:
  1339  		t.Fatalf("unknown backend: %T", s.cloud)
  1340  	}
  1341  
  1342  	root := s.getRoot(t)
  1343  	root.dir.mountPrefix = "this_test/"
  1344  
  1345  	blobs := make(map[string]*string)
  1346  	expect := make([]string, 0)
  1347  	for i := 0; i < itemsPerPage+1; i++ {
  1348  		b := fmt.Sprintf("%08v", i)
  1349  		blobs["this_test/"+b] = nil
  1350  		expect = append(expect, b)
  1351  	}
  1352  
  1353  	switch s.cloud.(type) {
  1354  	case *ADLv1, *ADLv2:
  1355  		// these backends don't support parallel delete so I
  1356  		// am doing this here
  1357  		defer func() {
  1358  			var wg sync.WaitGroup
  1359  
  1360  			for b, _ := range blobs {
  1361  				SmallActionsGate.Take(1, true)
  1362  				wg.Add(1)
  1363  
  1364  				go func(key string) {
  1365  					// ignore the error here,
  1366  					// anything we didn't cleanup
  1367  					// will be handled by teardown
  1368  					_, _ = s.cloud.DeleteBlob(&DeleteBlobInput{key})
  1369  					SmallActionsGate.Return(1)
  1370  					wg.Done()
  1371  				}(b)
  1372  			}
  1373  
  1374  			wg.Wait()
  1375  		}()
  1376  	}
  1377  
  1378  	s.setupBlobs(s.cloud, t, blobs)
  1379  
  1380  	dh := root.OpenDir()
  1381  	defer dh.CloseDir()
  1382  
  1383  	children := namesOf(s.readDirFully(t, dh))
  1384  	t.Assert(children, DeepEquals, expect)
  1385  }
  1386  
  1387  func (s *GoofysTest) TestBackendListPrefix(t *C) {
  1388  	res, err := s.cloud.ListBlobs(&ListBlobsInput{
  1389  		Prefix:    PString("random"),
  1390  		Delimiter: PString("/"),
  1391  	})
  1392  	t.Assert(err, IsNil)
  1393  	t.Assert(len(res.Prefixes), Equals, 0)
  1394  	t.Assert(len(res.Items), Equals, 0)
  1395  
  1396  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1397  		Prefix:    PString("empty_dir"),
  1398  		Delimiter: PString("/"),
  1399  	})
  1400  	t.Assert(err, IsNil)
  1401  	t.Assert(len(res.Prefixes), Not(Equals), 0)
  1402  	t.Assert(*res.Prefixes[0].Prefix, Equals, "empty_dir/")
  1403  	t.Assert(len(res.Items), Equals, 0)
  1404  
  1405  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1406  		Prefix:    PString("empty_dir/"),
  1407  		Delimiter: PString("/"),
  1408  	})
  1409  	t.Assert(err, IsNil)
  1410  	t.Assert(len(res.Prefixes), Equals, 0)
  1411  	t.Assert(len(res.Items), Equals, 1)
  1412  	t.Assert(*res.Items[0].Key, Equals, "empty_dir/")
  1413  
  1414  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1415  		Prefix:    PString("file1"),
  1416  		Delimiter: PString("/"),
  1417  	})
  1418  	t.Assert(err, IsNil)
  1419  	t.Assert(len(res.Prefixes), Equals, 0)
  1420  	t.Assert(len(res.Items), Equals, 1)
  1421  	t.Assert(*res.Items[0].Key, Equals, "file1")
  1422  
  1423  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1424  		Prefix:    PString("file1/"),
  1425  		Delimiter: PString("/"),
  1426  	})
  1427  	t.Assert(err, IsNil)
  1428  	t.Assert(len(res.Prefixes), Equals, 0)
  1429  	t.Assert(len(res.Items), Equals, 0)
  1430  
  1431  	// ListBlobs:
  1432  	// - Case1: If the prefix foo/ is not added explicitly, then ListBlobs foo/ might or might not return foo/.
  1433  	//   In the test setup dir2 is not expliticly created.
  1434  	// - Case2: Else, ListBlobs foo/ must return foo/
  1435  	//   In the test setup dir2/dir3 is expliticly created.
  1436  
  1437  	// ListBlobs:Case1
  1438  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1439  		Prefix:    PString("dir2/"),
  1440  		Delimiter: PString("/"),
  1441  	})
  1442  	t.Assert(err, IsNil)
  1443  	t.Assert(len(res.Prefixes), Equals, 1)
  1444  	t.Assert(*res.Prefixes[0].Prefix, Equals, "dir2/dir3/")
  1445  	if len(res.Items) == 1 {
  1446  		// azblob(with hierarchial ns on), adlv1, adlv2.
  1447  		t.Assert(*res.Items[0].Key, Equals, "dir2/")
  1448  	} else {
  1449  		// s3, azblob(with hierarchial ns off)
  1450  		t.Assert(len(res.Items), Equals, 0)
  1451  	}
  1452  
  1453  	// ListBlobs:Case2
  1454  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1455  		Prefix:    PString("dir2/dir3/"),
  1456  		Delimiter: PString("/"),
  1457  	})
  1458  	t.Assert(err, IsNil)
  1459  	t.Assert(len(res.Prefixes), Equals, 0)
  1460  	t.Assert(len(res.Items), Equals, 2)
  1461  	t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/")
  1462  	t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4")
  1463  
  1464  	// ListBlobs:Case1
  1465  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1466  		Prefix: PString("dir2/"),
  1467  	})
  1468  	t.Assert(err, IsNil)
  1469  	t.Assert(len(res.Prefixes), Equals, 0)
  1470  	if len(res.Items) == 3 {
  1471  		// azblob(with hierarchial ns on), adlv1, adlv2.
  1472  		t.Assert(*res.Items[0].Key, Equals, "dir2/")
  1473  		t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/")
  1474  		t.Assert(*res.Items[2].Key, Equals, "dir2/dir3/file4")
  1475  	} else {
  1476  		// s3, azblob(with hierarchial ns off)
  1477  		t.Assert(len(res.Items), Equals, 2)
  1478  		t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/")
  1479  		t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4")
  1480  	}
  1481  
  1482  	res, err = s.cloud.ListBlobs(&ListBlobsInput{
  1483  		Prefix: PString("dir2/dir3/file4"),
  1484  	})
  1485  	t.Assert(err, IsNil)
  1486  	t.Assert(len(res.Prefixes), Equals, 0)
  1487  	t.Assert(len(res.Items), Equals, 1)
  1488  	t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/file4")
  1489  }
  1490  
  1491  func (s *GoofysTest) TestRenameDir(t *C) {
  1492  	s.fs.flags.StatCacheTTL = 0
  1493  
  1494  	root := s.getRoot(t)
  1495  
  1496  	err := root.Rename("empty_dir", root, "dir1")
  1497  	t.Assert(err, Equals, fuse.ENOTEMPTY)
  1498  
  1499  	err = root.Rename("empty_dir", root, "new_dir")
  1500  	t.Assert(err, IsNil)
  1501  
  1502  	dir2, err := s.LookUpInode(t, "dir2")
  1503  	t.Assert(err, IsNil)
  1504  	t.Assert(dir2, NotNil)
  1505  
  1506  	_, err = s.LookUpInode(t, "new_dir2")
  1507  	t.Assert(err, Equals, fuse.ENOENT)
  1508  
  1509  	err = s.fs.Rename(nil, &fuseops.RenameOp{
  1510  		OldParent: root.Id,
  1511  		NewParent: root.Id,
  1512  		OldName:   "dir2",
  1513  		NewName:   "new_dir2",
  1514  	})
  1515  	t.Assert(err, IsNil)
  1516  
  1517  	_, err = s.LookUpInode(t, "dir2/dir3")
  1518  	t.Assert(err, Equals, fuse.ENOENT)
  1519  
  1520  	_, err = s.LookUpInode(t, "dir2/dir3/file4")
  1521  	t.Assert(err, Equals, fuse.ENOENT)
  1522  
  1523  	new_dir2, err := s.LookUpInode(t, "new_dir2")
  1524  	t.Assert(err, IsNil)
  1525  	t.Assert(new_dir2, NotNil)
  1526  	t.Assert(dir2.Id, Equals, new_dir2.Id)
  1527  
  1528  	old, err := s.LookUpInode(t, "new_dir2/dir3/file4")
  1529  	t.Assert(err, IsNil)
  1530  	t.Assert(old, NotNil)
  1531  
  1532  	err = s.fs.Rename(nil, &fuseops.RenameOp{
  1533  		OldParent: root.Id,
  1534  		NewParent: root.Id,
  1535  		OldName:   "new_dir2",
  1536  		NewName:   "new_dir3",
  1537  	})
  1538  	t.Assert(err, IsNil)
  1539  
  1540  	new, err := s.LookUpInode(t, "new_dir3/dir3/file4")
  1541  	t.Assert(err, IsNil)
  1542  	t.Assert(new, NotNil)
  1543  	t.Assert(old.Id, Equals, new.Id)
  1544  
  1545  	_, err = s.LookUpInode(t, "new_dir2/dir3")
  1546  	t.Assert(err, Equals, fuse.ENOENT)
  1547  
  1548  	_, err = s.LookUpInode(t, "new_dir2/dir3/file4")
  1549  	t.Assert(err, Equals, fuse.ENOENT)
  1550  }
  1551  
  1552  func (s *GoofysTest) TestRename(t *C) {
  1553  	root := s.getRoot(t)
  1554  
  1555  	from, to := "empty_dir", "file1"
  1556  	err := root.Rename(from, root, to)
  1557  	t.Assert(err, Equals, fuse.ENOTDIR)
  1558  
  1559  	from, to = "file1", "empty_dir"
  1560  	err = root.Rename(from, root, to)
  1561  	t.Assert(err, Equals, syscall.EISDIR)
  1562  
  1563  	from, to = "file1", "new_file"
  1564  	err = root.Rename(from, root, to)
  1565  	t.Assert(err, IsNil)
  1566  
  1567  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to})
  1568  	t.Assert(err, IsNil)
  1569  
  1570  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from})
  1571  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
  1572  
  1573  	from, to = "file3", "new_file2"
  1574  	dir, _ := s.LookUpInode(t, "dir1")
  1575  	err = dir.Rename(from, root, to)
  1576  	t.Assert(err, IsNil)
  1577  
  1578  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to})
  1579  	t.Assert(err, IsNil)
  1580  
  1581  	_, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from})
  1582  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
  1583  
  1584  	from, to = "no_such_file", "new_file"
  1585  	err = root.Rename(from, root, to)
  1586  	t.Assert(err, Equals, fuse.ENOENT)
  1587  
  1588  	if s3, ok := s.cloud.Delegate().(*S3Backend); ok {
  1589  		if !hasEnv("GCS") {
  1590  			// not really rename but can be used by rename
  1591  			from, to = s.fs.bucket+"/file2", "new_file"
  1592  			_, err = s3.copyObjectMultipart(int64(len("file2")), from, to, "", nil, nil, nil)
  1593  			t.Assert(err, IsNil)
  1594  		}
  1595  	}
  1596  }
  1597  
  1598  func (s *GoofysTest) TestConcurrentRefDeref(t *C) {
  1599  	root := s.getRoot(t)
  1600  
  1601  	lookupOp := fuseops.LookUpInodeOp{
  1602  		Parent: root.Id,
  1603  		Name:   "file1",
  1604  	}
  1605  
  1606  	for i := 0; i < 20; i++ {
  1607  		err := s.fs.LookUpInode(nil, &lookupOp)
  1608  		t.Assert(err, IsNil)
  1609  
  1610  		var wg sync.WaitGroup
  1611  
  1612  		wg.Add(2)
  1613  		go func() {
  1614  			// we want to yield to the forget goroutine so that it's run first
  1615  			// to trigger this bug
  1616  			if i%2 == 0 {
  1617  				runtime.Gosched()
  1618  			}
  1619  			s.fs.LookUpInode(nil, &lookupOp)
  1620  			wg.Done()
  1621  		}()
  1622  		go func() {
  1623  			s.fs.ForgetInode(nil, &fuseops.ForgetInodeOp{
  1624  				Inode: lookupOp.Entry.Child,
  1625  				N:     1,
  1626  			})
  1627  			wg.Done()
  1628  		}()
  1629  
  1630  		wg.Wait()
  1631  	}
  1632  }
  1633  
  1634  func hasEnv(env string) bool {
  1635  	v := os.Getenv(env)
  1636  
  1637  	return !(v == "" || v == "0" || v == "false")
  1638  }
  1639  
  1640  func isTravis() bool {
  1641  	return hasEnv("TRAVIS")
  1642  }
  1643  
  1644  func isCatfs() bool {
  1645  	return hasEnv("CATFS")
  1646  }
  1647  
  1648  func (s *GoofysTest) mount(t *C, mountPoint string) {
  1649  	err := os.MkdirAll(mountPoint, 0700)
  1650  	t.Assert(err, IsNil)
  1651  
  1652  	server := fuseutil.NewFileSystemServer(s.fs)
  1653  
  1654  	if isCatfs() {
  1655  		s.fs.flags.MountOptions = make(map[string]string)
  1656  		s.fs.flags.MountOptions["allow_other"] = ""
  1657  	}
  1658  
  1659  	// Mount the file system.
  1660  	mountCfg := &fuse.MountConfig{
  1661  		FSName:                  s.fs.bucket,
  1662  		Subtype:                 "goofys",
  1663  		Options:                 s.fs.flags.MountOptions,
  1664  		ErrorLogger:             GetStdLogger(NewLogger("fuse"), logrus.ErrorLevel),
  1665  		DisableWritebackCaching: true,
  1666  	}
  1667  	mountCfg.DebugLogger = GetStdLogger(fuseLog, logrus.DebugLevel)
  1668  
  1669  	_, err = fuse.Mount(mountPoint, server, mountCfg)
  1670  	t.Assert(err, IsNil)
  1671  
  1672  	if isCatfs() {
  1673  		cacheDir := mountPoint + "-cache"
  1674  		err := os.MkdirAll(cacheDir, 0700)
  1675  		t.Assert(err, IsNil)
  1676  
  1677  		catfs := exec.Command("catfs", "--test", "-ononempty", "--", mountPoint, cacheDir, mountPoint)
  1678  		_, err = catfs.Output()
  1679  		if err != nil {
  1680  			if ee, ok := err.(*exec.ExitError); ok {
  1681  				panic(ee.Stderr)
  1682  			}
  1683  		}
  1684  
  1685  		catfs = exec.Command("catfs", "-ononempty", "--", mountPoint, cacheDir, mountPoint)
  1686  
  1687  		if isTravis() {
  1688  			logger := NewLogger("catfs")
  1689  			lvl := logrus.InfoLevel
  1690  			logger.Formatter.(*LogHandle).Lvl = &lvl
  1691  			w := logger.Writer()
  1692  
  1693  			catfs.Stdout = w
  1694  			catfs.Stderr = w
  1695  
  1696  			catfs.Env = append(catfs.Env, "RUST_LOG=debug")
  1697  		}
  1698  
  1699  		err = catfs.Start()
  1700  		t.Assert(err, IsNil)
  1701  
  1702  		time.Sleep(time.Second)
  1703  	}
  1704  }
  1705  
  1706  func (s *GoofysTest) umount(t *C, mountPoint string) {
  1707  	var err error
  1708  	for i := 0; i < 10; i++ {
  1709  		err = fuse.Unmount(mountPoint)
  1710  		if err != nil {
  1711  			time.Sleep(100 * time.Millisecond)
  1712  		} else {
  1713  			break
  1714  		}
  1715  	}
  1716  	t.Assert(err, IsNil)
  1717  
  1718  	os.Remove(mountPoint)
  1719  	if isCatfs() {
  1720  		cacheDir := mountPoint + "-cache"
  1721  		os.Remove(cacheDir)
  1722  	}
  1723  }
  1724  
  1725  func (s *GoofysTest) runFuseTest(t *C, mountPoint string, umount bool, cmdArgs ...string) {
  1726  	s.mount(t, mountPoint)
  1727  
  1728  	if umount {
  1729  		defer s.umount(t, mountPoint)
  1730  	}
  1731  
  1732  	// if command starts with ./ or ../ then we are executing a
  1733  	// relative path and cannot do chdir
  1734  	chdir := cmdArgs[0][0] != '.'
  1735  
  1736  	cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
  1737  	cmd.Env = append(cmd.Env, os.Environ()...)
  1738  	cmd.Env = append(cmd.Env, "FAST=true")
  1739  	cmd.Env = append(cmd.Env, "CLEANUP=false")
  1740  
  1741  	if isTravis() {
  1742  		logger := NewLogger("test")
  1743  		lvl := logrus.InfoLevel
  1744  		logger.Formatter.(*LogHandle).Lvl = &lvl
  1745  		w := logger.Writer()
  1746  
  1747  		cmd.Stdout = w
  1748  		cmd.Stderr = w
  1749  	}
  1750  
  1751  	if chdir {
  1752  		oldCwd, err := os.Getwd()
  1753  		t.Assert(err, IsNil)
  1754  
  1755  		err = os.Chdir(mountPoint)
  1756  		t.Assert(err, IsNil)
  1757  
  1758  		defer os.Chdir(oldCwd)
  1759  	}
  1760  
  1761  	err := cmd.Run()
  1762  	t.Assert(err, IsNil)
  1763  }
  1764  
  1765  func (s *GoofysTest) TestFuse(t *C) {
  1766  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1767  
  1768  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1769  }
  1770  
  1771  func (s *GoofysTest) TestFuseWithTTL(t *C) {
  1772  	s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000
  1773  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1774  
  1775  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1776  }
  1777  
  1778  func (s *GoofysTest) TestCheap(t *C) {
  1779  	s.fs.flags.Cheap = true
  1780  	s.TestLookUpInode(t)
  1781  	s.TestWriteLargeFile(t)
  1782  }
  1783  
  1784  func (s *GoofysTest) TestExplicitDir(t *C) {
  1785  	s.fs.flags.ExplicitDir = true
  1786  	s.testExplicitDir(t)
  1787  }
  1788  
  1789  func (s *GoofysTest) TestExplicitDirAndCheap(t *C) {
  1790  	s.fs.flags.ExplicitDir = true
  1791  	s.fs.flags.Cheap = true
  1792  	s.testExplicitDir(t)
  1793  }
  1794  
  1795  func (s *GoofysTest) testExplicitDir(t *C) {
  1796  	if s.cloud.Capabilities().DirBlob {
  1797  		t.Skip("only for backends without dir blob")
  1798  	}
  1799  
  1800  	_, err := s.LookUpInode(t, "file1")
  1801  	t.Assert(err, IsNil)
  1802  
  1803  	_, err = s.LookUpInode(t, "fileNotFound")
  1804  	t.Assert(err, Equals, fuse.ENOENT)
  1805  
  1806  	// dir1/ doesn't exist so we shouldn't be able to see it
  1807  	_, err = s.LookUpInode(t, "dir1/file3")
  1808  	t.Assert(err, Equals, fuse.ENOENT)
  1809  
  1810  	_, err = s.LookUpInode(t, "dir4/file5")
  1811  	t.Assert(err, IsNil)
  1812  
  1813  	_, err = s.LookUpInode(t, "empty_dir")
  1814  	t.Assert(err, IsNil)
  1815  }
  1816  
  1817  func (s *GoofysTest) TestBenchLs(t *C) {
  1818  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1819  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1820  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1821  	s.setUpTestTimeout(t, 20*time.Minute)
  1822  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "ls")
  1823  }
  1824  
  1825  func (s *GoofysTest) TestBenchCreate(t *C) {
  1826  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1827  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1828  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1829  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create")
  1830  }
  1831  
  1832  func (s *GoofysTest) TestBenchCreateParallel(t *C) {
  1833  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1834  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1835  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1836  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create_parallel")
  1837  }
  1838  
  1839  func (s *GoofysTest) TestBenchIO(t *C) {
  1840  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1841  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1842  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1843  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "io")
  1844  }
  1845  
  1846  func (s *GoofysTest) TestBenchFindTree(t *C) {
  1847  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1848  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1849  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1850  
  1851  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "find")
  1852  }
  1853  
  1854  func (s *GoofysTest) TestIssue231(t *C) {
  1855  	if isTravis() {
  1856  		t.Skip("disable in travis, not sure if it has enough memory")
  1857  	}
  1858  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1859  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue231")
  1860  }
  1861  
  1862  func (s *GoofysTest) TestChmod(t *C) {
  1863  	root := s.getRoot(t)
  1864  
  1865  	lookupOp := fuseops.LookUpInodeOp{
  1866  		Parent: root.Id,
  1867  		Name:   "file1",
  1868  	}
  1869  
  1870  	err := s.fs.LookUpInode(nil, &lookupOp)
  1871  	t.Assert(err, IsNil)
  1872  
  1873  	targetMode := os.FileMode(0777)
  1874  	setOp := fuseops.SetInodeAttributesOp{Inode: lookupOp.Entry.Child, Mode: &targetMode}
  1875  
  1876  	err = s.fs.SetInodeAttributes(s.ctx, &setOp)
  1877  	t.Assert(err, IsNil)
  1878  	t.Assert(setOp.Attributes, NotNil)
  1879  }
  1880  
  1881  func (s *GoofysTest) TestIssue64(t *C) {
  1882  	/*
  1883  		mountPoint := "/tmp/mnt" + s.fs.bucket
  1884  		log.Level = logrus.DebugLevel
  1885  
  1886  		err := os.MkdirAll(mountPoint, 0700)
  1887  		t.Assert(err, IsNil)
  1888  
  1889  		defer os.Remove(mountPoint)
  1890  
  1891  		s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue64")
  1892  	*/
  1893  }
  1894  
  1895  func (s *GoofysTest) TestIssue69Fuse(t *C) {
  1896  	s.fs.flags.StatCacheTTL = 0
  1897  
  1898  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1899  
  1900  	s.mount(t, mountPoint)
  1901  
  1902  	defer func() {
  1903  		err := os.Chdir("/")
  1904  		t.Assert(err, IsNil)
  1905  
  1906  		s.umount(t, mountPoint)
  1907  	}()
  1908  
  1909  	err := os.Chdir(mountPoint)
  1910  	t.Assert(err, IsNil)
  1911  
  1912  	_, err = os.Stat("dir1")
  1913  	t.Assert(err, IsNil)
  1914  
  1915  	err = os.Remove("dir1/file3")
  1916  	t.Assert(err, IsNil)
  1917  
  1918  	// don't really care about error code, but it should be a PathError
  1919  	os.Stat("dir1")
  1920  	os.Stat("dir1")
  1921  }
  1922  
  1923  func (s *GoofysTest) TestGetMimeType(t *C) {
  1924  	// option to use mime type not turned on
  1925  	mime := s.fs.flags.GetMimeType("foo.css")
  1926  	t.Assert(mime, IsNil)
  1927  
  1928  	s.fs.flags.UseContentType = true
  1929  
  1930  	mime = s.fs.flags.GetMimeType("foo.css")
  1931  	t.Assert(mime, NotNil)
  1932  	t.Assert(*mime, Equals, "text/css")
  1933  
  1934  	mime = s.fs.flags.GetMimeType("foo")
  1935  	t.Assert(mime, IsNil)
  1936  
  1937  	mime = s.fs.flags.GetMimeType("foo.")
  1938  	t.Assert(mime, IsNil)
  1939  
  1940  	mime = s.fs.flags.GetMimeType("foo.unknownExtension")
  1941  	t.Assert(mime, IsNil)
  1942  }
  1943  
  1944  func (s *GoofysTest) TestPutMimeType(t *C) {
  1945  	if _, ok := s.cloud.(*ADLv1); ok {
  1946  		// ADLv1 doesn't support content-type
  1947  		t.Skip("ADLv1 doesn't support content-type")
  1948  	}
  1949  
  1950  	s.fs.flags.UseContentType = true
  1951  
  1952  	root := s.getRoot(t)
  1953  	jpg := "test.jpg"
  1954  	jpg2 := "test2.jpg"
  1955  	file := "test"
  1956  
  1957  	s.testWriteFile(t, jpg, 10, 128)
  1958  
  1959  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: jpg})
  1960  	t.Assert(err, IsNil)
  1961  	t.Assert(*resp.ContentType, Equals, "image/jpeg")
  1962  
  1963  	err = root.Rename(jpg, root, file)
  1964  	t.Assert(err, IsNil)
  1965  
  1966  	resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: file})
  1967  	t.Assert(err, IsNil)
  1968  	t.Assert(*resp.ContentType, Equals, "image/jpeg")
  1969  
  1970  	err = root.Rename(file, root, jpg2)
  1971  	t.Assert(err, IsNil)
  1972  
  1973  	resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: jpg2})
  1974  	t.Assert(err, IsNil)
  1975  	t.Assert(*resp.ContentType, Equals, "image/jpeg")
  1976  }
  1977  
  1978  func (s *GoofysTest) TestBucketPrefixSlash(t *C) {
  1979  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2", s.fs.flags)
  1980  	t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/")
  1981  
  1982  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2///", s.fs.flags)
  1983  	t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/")
  1984  }
  1985  
  1986  func (s *GoofysTest) TestFuseWithPrefix(t *C) {
  1987  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1988  
  1989  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":testprefix", s.fs.flags)
  1990  
  1991  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1992  }
  1993  
  1994  func (s *GoofysTest) TestRenameCache(t *C) {
  1995  	root := s.getRoot(t)
  1996  	s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000
  1997  
  1998  	lookupOp1 := fuseops.LookUpInodeOp{
  1999  		Parent: root.Id,
  2000  		Name:   "file1",
  2001  	}
  2002  
  2003  	lookupOp2 := lookupOp1
  2004  	lookupOp2.Name = "newfile"
  2005  
  2006  	err := s.fs.LookUpInode(nil, &lookupOp1)
  2007  	t.Assert(err, IsNil)
  2008  
  2009  	err = s.fs.LookUpInode(nil, &lookupOp2)
  2010  	t.Assert(err, Equals, fuse.ENOENT)
  2011  
  2012  	renameOp := fuseops.RenameOp{
  2013  		OldParent: root.Id,
  2014  		NewParent: root.Id,
  2015  		OldName:   "file1",
  2016  		NewName:   "newfile",
  2017  	}
  2018  
  2019  	err = s.fs.Rename(nil, &renameOp)
  2020  	t.Assert(err, IsNil)
  2021  
  2022  	lookupOp1.Entry = fuseops.ChildInodeEntry{}
  2023  	lookupOp2.Entry = fuseops.ChildInodeEntry{}
  2024  
  2025  	err = s.fs.LookUpInode(nil, &lookupOp1)
  2026  	t.Assert(err, Equals, fuse.ENOENT)
  2027  
  2028  	err = s.fs.LookUpInode(nil, &lookupOp2)
  2029  	t.Assert(err, IsNil)
  2030  }
  2031  
  2032  func (s *GoofysTest) anonymous(t *C) {
  2033  	// On azure this fails because we re-create the bucket with
  2034  	// the same name right away. And well anonymous access is not
  2035  	// implemented yet in our azure backend anyway
  2036  	var s3 *S3Backend
  2037  	var ok bool
  2038  	if s3, ok = s.cloud.Delegate().(*S3Backend); !ok {
  2039  		t.Skip("only for S3")
  2040  	}
  2041  
  2042  	err := s.deleteBucket(s.cloud)
  2043  	t.Assert(err, IsNil)
  2044  
  2045  	// use a different bucket name to prevent 409 Conflict from
  2046  	// delete bucket above
  2047  	s.fs.bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16)
  2048  	s3.bucket = s.fs.bucket
  2049  	s.setupDefaultEnv(t, true)
  2050  
  2051  	s.fs = NewGoofys(context.Background(), s.fs.bucket, s.fs.flags)
  2052  	t.Assert(s.fs, NotNil)
  2053  
  2054  	// should have auto-detected by S3 backend
  2055  	cloud := s.getRoot(t).dir.cloud
  2056  	t.Assert(cloud, NotNil)
  2057  	s3, ok = cloud.Delegate().(*S3Backend)
  2058  	t.Assert(ok, Equals, true)
  2059  
  2060  	s3.awsConfig.Credentials = credentials.AnonymousCredentials
  2061  	s3.newS3()
  2062  }
  2063  
  2064  func (s *GoofysTest) disableS3() {
  2065  	time.Sleep(1 * time.Second) // wait for any background goroutines to finish
  2066  	dir := s.fs.inodes[fuseops.RootInodeID].dir
  2067  	dir.cloud = StorageBackendInitError{
  2068  		fmt.Errorf("cloud disabled"),
  2069  		*dir.cloud.Capabilities(),
  2070  	}
  2071  }
  2072  
  2073  func (s *GoofysTest) TestWriteAnonymous(t *C) {
  2074  	s.anonymous(t)
  2075  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2076  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2077  
  2078  	fileName := "test"
  2079  
  2080  	createOp := fuseops.CreateFileOp{
  2081  		Parent: s.getRoot(t).Id,
  2082  		Name:   fileName,
  2083  	}
  2084  
  2085  	err := s.fs.CreateFile(s.ctx, &createOp)
  2086  	t.Assert(err, IsNil)
  2087  
  2088  	err = s.fs.FlushFile(s.ctx, &fuseops.FlushFileOp{
  2089  		Handle: createOp.Handle,
  2090  		Inode:  createOp.Entry.Child,
  2091  	})
  2092  	t.Assert(err, Equals, syscall.EACCES)
  2093  
  2094  	err = s.fs.ReleaseFileHandle(s.ctx, &fuseops.ReleaseFileHandleOp{Handle: createOp.Handle})
  2095  	t.Assert(err, IsNil)
  2096  
  2097  	err = s.fs.LookUpInode(s.ctx, &fuseops.LookUpInodeOp{
  2098  		Parent: s.getRoot(t).Id,
  2099  		Name:   fileName,
  2100  	})
  2101  	t.Assert(err, Equals, fuse.ENOENT)
  2102  	// BUG! the file shouldn't exist, see test below for comment,
  2103  	// this behaves as expected only because we are bypassing
  2104  	// linux vfs in this test
  2105  }
  2106  
  2107  func (s *GoofysTest) TestWriteAnonymousFuse(t *C) {
  2108  	s.anonymous(t)
  2109  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2110  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2111  
  2112  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2113  
  2114  	s.mount(t, mountPoint)
  2115  	defer s.umount(t, mountPoint)
  2116  
  2117  	err := ioutil.WriteFile(mountPoint+"/test", []byte(""), 0600)
  2118  	t.Assert(err, NotNil)
  2119  	pathErr, ok := err.(*os.PathError)
  2120  	t.Assert(ok, Equals, true)
  2121  	t.Assert(pathErr.Err, Equals, syscall.EACCES)
  2122  
  2123  	_, err = os.Stat(mountPoint + "/test")
  2124  	t.Assert(err, IsNil)
  2125  	// BUG! the file shouldn't exist, the condition below should hold instead
  2126  	// see comment in Goofys.FlushFile
  2127  	// pathErr, ok = err.(*os.PathError)
  2128  	// t.Assert(ok, Equals, true)
  2129  	// t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2130  
  2131  	_, err = ioutil.ReadFile(mountPoint + "/test")
  2132  	t.Assert(err, NotNil)
  2133  	pathErr, ok = err.(*os.PathError)
  2134  	t.Assert(ok, Equals, true)
  2135  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2136  
  2137  	// reading the file and getting ENOENT causes the kernel to
  2138  	// invalidate the entry, failing at open is not sufficient, we
  2139  	// have to fail at read (which means that if the application
  2140  	// uses splice(2) it won't get to us, so this wouldn't work
  2141  	_, err = os.Stat(mountPoint + "/test")
  2142  	t.Assert(err, NotNil)
  2143  	pathErr, ok = err.(*os.PathError)
  2144  	t.Assert(ok, Equals, true)
  2145  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2146  }
  2147  
  2148  func (s *GoofysTest) TestWriteSyncWriteFuse(t *C) {
  2149  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2150  
  2151  	s.mount(t, mountPoint)
  2152  	defer s.umount(t, mountPoint)
  2153  
  2154  	var f *os.File
  2155  	var n int
  2156  	var err error
  2157  
  2158  	defer func() {
  2159  		if err != nil {
  2160  			f.Close()
  2161  		}
  2162  	}()
  2163  
  2164  	f, err = os.Create(mountPoint + "/TestWriteSyncWrite")
  2165  	t.Assert(err, IsNil)
  2166  
  2167  	n, err = f.Write([]byte("hello\n"))
  2168  	t.Assert(err, IsNil)
  2169  	t.Assert(n, Equals, 6)
  2170  
  2171  	err = f.Sync()
  2172  	t.Assert(err, IsNil)
  2173  
  2174  	n, err = f.Write([]byte("world\n"))
  2175  	t.Assert(err, IsNil)
  2176  	t.Assert(n, Equals, 6)
  2177  
  2178  	err = f.Close()
  2179  	t.Assert(err, IsNil)
  2180  }
  2181  
  2182  func (s *GoofysTest) TestIssue156(t *C) {
  2183  	_, err := s.LookUpInode(t, "\xae\x8a-")
  2184  	// S3Proxy and aws s3 return different errors
  2185  	// https://github.com/andrewgaul/s3proxy/issues/201
  2186  	t.Assert(err, NotNil)
  2187  }
  2188  
  2189  func (s *GoofysTest) TestIssue162(t *C) {
  2190  	if s.azurite {
  2191  		t.Skip("https://github.com/Azure/Azurite/issues/221")
  2192  	}
  2193  
  2194  	params := &PutBlobInput{
  2195  		Key:  "dir1/lör 006.jpg",
  2196  		Body: bytes.NewReader([]byte("foo")),
  2197  		Size: PUInt64(3),
  2198  	}
  2199  	_, err := s.cloud.PutBlob(params)
  2200  	t.Assert(err, IsNil)
  2201  
  2202  	dir, err := s.LookUpInode(t, "dir1")
  2203  	t.Assert(err, IsNil)
  2204  
  2205  	err = dir.Rename("lör 006.jpg", dir, "myfile.jpg")
  2206  	t.Assert(err, IsNil)
  2207  
  2208  	resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "dir1/myfile.jpg"})
  2209  	t.Assert(resp.Size, Equals, uint64(3))
  2210  }
  2211  
  2212  func (s *GoofysTest) TestXAttrGet(t *C) {
  2213  	if _, ok := s.cloud.(*ADLv1); ok {
  2214  		t.Skip("ADLv1 doesn't support metadata")
  2215  	}
  2216  
  2217  	_, checkETag := s.cloud.Delegate().(*S3Backend)
  2218  	xattrPrefix := s.cloud.Capabilities().Name + "."
  2219  
  2220  	file1, err := s.LookUpInode(t, "file1")
  2221  	t.Assert(err, IsNil)
  2222  
  2223  	names, err := file1.ListXattr()
  2224  	t.Assert(err, IsNil)
  2225  	expectedXattrs := []string{
  2226  		xattrPrefix + "etag",
  2227  		xattrPrefix + "storage-class",
  2228  		"user.name",
  2229  	}
  2230  	sort.Strings(expectedXattrs)
  2231  	t.Assert(names, DeepEquals, expectedXattrs)
  2232  
  2233  	_, err = file1.GetXattr("user.foobar")
  2234  	t.Assert(err, Equals, unix.ENODATA)
  2235  
  2236  	if checkETag {
  2237  		value, err := file1.GetXattr("s3.etag")
  2238  		t.Assert(err, IsNil)
  2239  		// md5sum of "file1"
  2240  		t.Assert(string(value), Equals, "\"826e8142e6baabe8af779f5f490cf5f5\"")
  2241  	}
  2242  
  2243  	value, err := file1.GetXattr("user.name")
  2244  	t.Assert(err, IsNil)
  2245  	t.Assert(string(value), Equals, "file1+/#\x00")
  2246  
  2247  	dir1, err := s.LookUpInode(t, "dir1")
  2248  	t.Assert(err, IsNil)
  2249  
  2250  	if !s.cloud.Capabilities().DirBlob {
  2251  		// implicit dir blobs don't have s3.etag at all
  2252  		names, err = dir1.ListXattr()
  2253  		t.Assert(err, IsNil)
  2254  		t.Assert(len(names), Equals, 0, Commentf("names: %v", names))
  2255  
  2256  		value, err = dir1.GetXattr(xattrPrefix + "etag")
  2257  		t.Assert(err, Equals, syscall.ENODATA)
  2258  	}
  2259  
  2260  	// list dir1 to populate file3 in cache, then get file3's xattr
  2261  	lookup := fuseops.LookUpInodeOp{
  2262  		Parent: fuseops.RootInodeID,
  2263  		Name:   "dir1",
  2264  	}
  2265  	err = s.fs.LookUpInode(nil, &lookup)
  2266  	t.Assert(err, IsNil)
  2267  
  2268  	s.readDirIntoCache(t, lookup.Entry.Child)
  2269  
  2270  	dir1 = s.fs.inodes[lookup.Entry.Child]
  2271  	file3 := dir1.findChild("file3")
  2272  	t.Assert(file3, NotNil)
  2273  	t.Assert(file3.userMetadata, IsNil)
  2274  
  2275  	if checkETag {
  2276  		value, err = file3.GetXattr("s3.etag")
  2277  		t.Assert(err, IsNil)
  2278  		// md5sum of "dir1/file3"
  2279  		t.Assert(string(value), Equals, "\"5cd67e0e59fb85be91a515afe0f4bb24\"")
  2280  	}
  2281  
  2282  	// ensure that we get the dir blob instead of list
  2283  	s.fs.flags.Cheap = true
  2284  
  2285  	emptyDir2, err := s.LookUpInode(t, "empty_dir2")
  2286  	t.Assert(err, IsNil)
  2287  
  2288  	names, err = emptyDir2.ListXattr()
  2289  	t.Assert(err, IsNil)
  2290  	sort.Strings(names)
  2291  	t.Assert(names, DeepEquals, expectedXattrs)
  2292  
  2293  	emptyDir, err := s.LookUpInode(t, "empty_dir")
  2294  	t.Assert(err, IsNil)
  2295  
  2296  	if checkETag {
  2297  		value, err = emptyDir.GetXattr("s3.etag")
  2298  		t.Assert(err, IsNil)
  2299  		// dir blobs are empty
  2300  		t.Assert(string(value), Equals, "\"d41d8cd98f00b204e9800998ecf8427e\"")
  2301  	}
  2302  
  2303  	// s3proxy doesn't support storage class yet
  2304  	if hasEnv("AWS") {
  2305  		cloud := s.getRoot(t).dir.cloud
  2306  		s3, ok := cloud.Delegate().(*S3Backend)
  2307  		t.Assert(ok, Equals, true)
  2308  		s3.config.StorageClass = "STANDARD_IA"
  2309  
  2310  		s.testWriteFile(t, "ia", 1, 128*1024)
  2311  
  2312  		ia, err := s.LookUpInode(t, "ia")
  2313  		t.Assert(err, IsNil)
  2314  
  2315  		names, err = ia.ListXattr()
  2316  		t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"})
  2317  
  2318  		value, err = ia.GetXattr("s3.storage-class")
  2319  		t.Assert(err, IsNil)
  2320  		// smaller than 128KB falls back to standard
  2321  		t.Assert(string(value), Equals, "STANDARD")
  2322  
  2323  		s.testWriteFile(t, "ia", 128*1024, 128*1024)
  2324  		time.Sleep(100 * time.Millisecond)
  2325  
  2326  		names, err = ia.ListXattr()
  2327  		t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"})
  2328  
  2329  		value, err = ia.GetXattr("s3.storage-class")
  2330  		t.Assert(err, IsNil)
  2331  		t.Assert(string(value), Equals, "STANDARD_IA")
  2332  	}
  2333  }
  2334  
  2335  func (s *GoofysTest) TestClientForkExec(t *C) {
  2336  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2337  	s.mount(t, mountPoint)
  2338  	defer s.umount(t, mountPoint)
  2339  	file := mountPoint + "/TestClientForkExec"
  2340  
  2341  	// Create new file.
  2342  	fh, err := os.OpenFile(file, os.O_CREATE|os.O_RDWR, 0600)
  2343  	t.Assert(err, IsNil)
  2344  	defer func() { // Defer close file if it's not already closed.
  2345  		if fh != nil {
  2346  			fh.Close()
  2347  		}
  2348  	}()
  2349  	// Write to file.
  2350  	_, err = fh.WriteString("1.1;")
  2351  	t.Assert(err, IsNil)
  2352  	// The `Command` is run via fork+exec.
  2353  	// So all the file descriptors are copied over to the child process.
  2354  	// The child process 'closes' the files before exiting. This should
  2355  	// not result in goofys failing file operations invoked from the test.
  2356  	someCmd := exec.Command("echo", "hello")
  2357  	err = someCmd.Run()
  2358  	t.Assert(err, IsNil)
  2359  	// One more write.
  2360  	_, err = fh.WriteString("1.2;")
  2361  	t.Assert(err, IsNil)
  2362  	// Close file.
  2363  	err = fh.Close()
  2364  	t.Assert(err, IsNil)
  2365  	fh = nil
  2366  	// Check file content.
  2367  	content, err := ioutil.ReadFile(file)
  2368  	t.Assert(err, IsNil)
  2369  	t.Assert(string(content), Equals, "1.1;1.2;")
  2370  
  2371  	// Repeat the same excercise, but now with an existing file.
  2372  	fh, err = os.OpenFile(file, os.O_RDWR, 0600)
  2373  	// Write to file.
  2374  	_, err = fh.WriteString("2.1;")
  2375  	// fork+exec.
  2376  	someCmd = exec.Command("echo", "hello")
  2377  	err = someCmd.Run()
  2378  	t.Assert(err, IsNil)
  2379  	// One more write.
  2380  	_, err = fh.WriteString("2.2;")
  2381  	t.Assert(err, IsNil)
  2382  	// Close file.
  2383  	err = fh.Close()
  2384  	t.Assert(err, IsNil)
  2385  	fh = nil
  2386  	// Verify that the file is updated as per the new write.
  2387  	content, err = ioutil.ReadFile(file)
  2388  	t.Assert(err, IsNil)
  2389  	t.Assert(string(content), Equals, "2.1;2.2;")
  2390  }
  2391  
  2392  func (s *GoofysTest) TestXAttrGetCached(t *C) {
  2393  	if _, ok := s.cloud.(*ADLv1); ok {
  2394  		t.Skip("ADLv1 doesn't support metadata")
  2395  	}
  2396  
  2397  	xattrPrefix := s.cloud.Capabilities().Name + "."
  2398  
  2399  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2400  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2401  	s.readDirIntoCache(t, fuseops.RootInodeID)
  2402  	s.disableS3()
  2403  
  2404  	in, err := s.LookUpInode(t, "file1")
  2405  	t.Assert(err, IsNil)
  2406  	t.Assert(in.userMetadata, IsNil)
  2407  
  2408  	_, err = in.GetXattr(xattrPrefix + "etag")
  2409  	t.Assert(err, IsNil)
  2410  }
  2411  
  2412  func (s *GoofysTest) TestXAttrCopied(t *C) {
  2413  	if _, ok := s.cloud.(*ADLv1); ok {
  2414  		t.Skip("ADLv1 doesn't support metadata")
  2415  	}
  2416  
  2417  	root := s.getRoot(t)
  2418  
  2419  	err := root.Rename("file1", root, "file0")
  2420  	t.Assert(err, IsNil)
  2421  
  2422  	in, err := s.LookUpInode(t, "file0")
  2423  	t.Assert(err, IsNil)
  2424  
  2425  	_, err = in.GetXattr("user.name")
  2426  	t.Assert(err, IsNil)
  2427  }
  2428  
  2429  func (s *GoofysTest) TestXAttrRemove(t *C) {
  2430  	if _, ok := s.cloud.(*ADLv1); ok {
  2431  		t.Skip("ADLv1 doesn't support metadata")
  2432  	}
  2433  
  2434  	in, err := s.LookUpInode(t, "file1")
  2435  	t.Assert(err, IsNil)
  2436  
  2437  	_, err = in.GetXattr("user.name")
  2438  	t.Assert(err, IsNil)
  2439  
  2440  	err = in.RemoveXattr("user.name")
  2441  	t.Assert(err, IsNil)
  2442  
  2443  	_, err = in.GetXattr("user.name")
  2444  	t.Assert(err, Equals, syscall.ENODATA)
  2445  }
  2446  
  2447  func (s *GoofysTest) TestXAttrFuse(t *C) {
  2448  	if _, ok := s.cloud.(*ADLv1); ok {
  2449  		t.Skip("ADLv1 doesn't support metadata")
  2450  	}
  2451  
  2452  	_, checkETag := s.cloud.Delegate().(*S3Backend)
  2453  	xattrPrefix := s.cloud.Capabilities().Name + "."
  2454  
  2455  	//fuseLog.Level = logrus.DebugLevel
  2456  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2457  	s.mount(t, mountPoint)
  2458  	defer s.umount(t, mountPoint)
  2459  
  2460  	expectedXattrs := []string{
  2461  		xattrPrefix + "etag",
  2462  		xattrPrefix + "storage-class",
  2463  		"user.name",
  2464  	}
  2465  	sort.Strings(expectedXattrs)
  2466  
  2467  	var expectedXattrsStr string
  2468  	for _, x := range expectedXattrs {
  2469  		expectedXattrsStr += x + "\x00"
  2470  	}
  2471  	var buf [1024]byte
  2472  
  2473  	// error if size is too small (but not zero)
  2474  	_, err := unix.Listxattr(mountPoint+"/file1", buf[:1])
  2475  	t.Assert(err, Equals, unix.ERANGE)
  2476  
  2477  	// 0 len buffer means interogate the size of buffer
  2478  	nbytes, err := unix.Listxattr(mountPoint+"/file1", nil)
  2479  	t.Assert(err, Equals, nil)
  2480  	t.Assert(nbytes, Equals, len(expectedXattrsStr))
  2481  
  2482  	nbytes, err = unix.Listxattr(mountPoint+"/file1", buf[:nbytes])
  2483  	t.Assert(err, IsNil)
  2484  	t.Assert(nbytes, Equals, len(expectedXattrsStr))
  2485  	t.Assert(string(buf[:nbytes]), Equals, expectedXattrsStr)
  2486  
  2487  	_, err = unix.Getxattr(mountPoint+"/file1", "user.name", buf[:1])
  2488  	t.Assert(err, Equals, unix.ERANGE)
  2489  
  2490  	nbytes, err = unix.Getxattr(mountPoint+"/file1", "user.name", nil)
  2491  	t.Assert(err, IsNil)
  2492  	t.Assert(nbytes, Equals, 9)
  2493  
  2494  	nbytes, err = unix.Getxattr(mountPoint+"/file1", "user.name", buf[:nbytes])
  2495  	t.Assert(err, IsNil)
  2496  	t.Assert(nbytes, Equals, 9)
  2497  	t.Assert(string(buf[:nbytes]), Equals, "file1+/#\x00")
  2498  
  2499  	if !s.cloud.Capabilities().DirBlob {
  2500  		// dir1 has no xattrs
  2501  		nbytes, err = unix.Listxattr(mountPoint+"/dir1", nil)
  2502  		t.Assert(err, IsNil)
  2503  		t.Assert(nbytes, Equals, 0)
  2504  
  2505  		nbytes, err = unix.Listxattr(mountPoint+"/dir1", buf[:1])
  2506  		t.Assert(err, IsNil)
  2507  		t.Assert(nbytes, Equals, 0)
  2508  	}
  2509  
  2510  	if checkETag {
  2511  		_, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", buf[:1])
  2512  		t.Assert(err, Equals, unix.ERANGE)
  2513  
  2514  		nbytes, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", nil)
  2515  		t.Assert(err, IsNil)
  2516  		// 32 bytes md5 plus quotes
  2517  		t.Assert(nbytes, Equals, 34)
  2518  
  2519  		nbytes, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", buf[:nbytes])
  2520  		t.Assert(err, IsNil)
  2521  		t.Assert(nbytes, Equals, 34)
  2522  		t.Assert(string(buf[:nbytes]), Equals,
  2523  			"\"826e8142e6baabe8af779f5f490cf5f5\"")
  2524  	}
  2525  }
  2526  
  2527  func (s *GoofysTest) TestXAttrSet(t *C) {
  2528  	if _, ok := s.cloud.(*ADLv1); ok {
  2529  		t.Skip("ADLv1 doesn't support metadata")
  2530  	}
  2531  
  2532  	in, err := s.LookUpInode(t, "file1")
  2533  	t.Assert(err, IsNil)
  2534  
  2535  	err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_REPLACE)
  2536  	t.Assert(err, Equals, syscall.ENODATA)
  2537  
  2538  	err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_CREATE)
  2539  	t.Assert(err, IsNil)
  2540  
  2541  	err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_CREATE)
  2542  	t.Assert(err, Equals, syscall.EEXIST)
  2543  
  2544  	in, err = s.LookUpInode(t, "file1")
  2545  	t.Assert(err, IsNil)
  2546  
  2547  	value, err := in.GetXattr("user.bar")
  2548  	t.Assert(err, IsNil)
  2549  	t.Assert(string(value), Equals, "hello")
  2550  
  2551  	value = []byte("file1+%/#\x00")
  2552  
  2553  	err = in.SetXattr("user.bar", value, unix.XATTR_REPLACE)
  2554  	t.Assert(err, IsNil)
  2555  
  2556  	in, err = s.LookUpInode(t, "file1")
  2557  	t.Assert(err, IsNil)
  2558  
  2559  	value2, err := in.GetXattr("user.bar")
  2560  	t.Assert(err, IsNil)
  2561  	t.Assert(value2, DeepEquals, value)
  2562  
  2563  	// setting with flag = 0 always works
  2564  	err = in.SetXattr("user.bar", []byte("world"), 0)
  2565  	t.Assert(err, IsNil)
  2566  
  2567  	err = in.SetXattr("user.baz", []byte("world"), 0)
  2568  	t.Assert(err, IsNil)
  2569  
  2570  	value, err = in.GetXattr("user.bar")
  2571  	t.Assert(err, IsNil)
  2572  
  2573  	value2, err = in.GetXattr("user.baz")
  2574  	t.Assert(err, IsNil)
  2575  
  2576  	t.Assert(value2, DeepEquals, value)
  2577  	t.Assert(string(value2), DeepEquals, "world")
  2578  
  2579  	err = in.SetXattr("s3.bar", []byte("hello"), unix.XATTR_CREATE)
  2580  	t.Assert(err, Equals, syscall.EPERM)
  2581  }
  2582  
  2583  func (s *GoofysTest) TestPythonCopyTree(t *C) {
  2584  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2585  
  2586  	s.runFuseTest(t, mountPoint, true, "python", "-c",
  2587  		"import shutil; shutil.copytree('dir2', 'dir5')",
  2588  		mountPoint)
  2589  }
  2590  
  2591  func (s *GoofysTest) TestCreateRenameBeforeCloseFuse(t *C) {
  2592  	if s.azurite {
  2593  		// Azurite returns 400 when copy source doesn't exist
  2594  		// https://github.com/Azure/Azurite/issues/219
  2595  		// so our code to ignore ENOENT fails
  2596  		t.Skip("https://github.com/Azure/Azurite/issues/219")
  2597  	}
  2598  
  2599  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2600  
  2601  	s.mount(t, mountPoint)
  2602  	defer s.umount(t, mountPoint)
  2603  
  2604  	from := mountPoint + "/newfile"
  2605  	to := mountPoint + "/newfile2"
  2606  
  2607  	fh, err := os.Create(from)
  2608  	t.Assert(err, IsNil)
  2609  	defer func() {
  2610  		// close the file if the test failed so we can unmount
  2611  		if fh != nil {
  2612  			fh.Close()
  2613  		}
  2614  	}()
  2615  
  2616  	_, err = fh.WriteString("hello world")
  2617  	t.Assert(err, IsNil)
  2618  
  2619  	err = os.Rename(from, to)
  2620  	t.Assert(err, IsNil)
  2621  
  2622  	err = fh.Close()
  2623  	t.Assert(err, IsNil)
  2624  	fh = nil
  2625  
  2626  	_, err = os.Stat(from)
  2627  	t.Assert(err, NotNil)
  2628  	pathErr, ok := err.(*os.PathError)
  2629  	t.Assert(ok, Equals, true)
  2630  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2631  
  2632  	content, err := ioutil.ReadFile(to)
  2633  	t.Assert(err, IsNil)
  2634  	t.Assert(string(content), Equals, "hello world")
  2635  }
  2636  
  2637  func (s *GoofysTest) TestRenameBeforeCloseFuse(t *C) {
  2638  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2639  
  2640  	s.mount(t, mountPoint)
  2641  	defer s.umount(t, mountPoint)
  2642  
  2643  	from := mountPoint + "/newfile"
  2644  	to := mountPoint + "/newfile2"
  2645  
  2646  	err := ioutil.WriteFile(from, []byte(""), 0600)
  2647  	t.Assert(err, IsNil)
  2648  
  2649  	fh, err := os.OpenFile(from, os.O_WRONLY, 0600)
  2650  	t.Assert(err, IsNil)
  2651  	defer func() {
  2652  		// close the file if the test failed so we can unmount
  2653  		if fh != nil {
  2654  			fh.Close()
  2655  		}
  2656  	}()
  2657  
  2658  	_, err = fh.WriteString("hello world")
  2659  	t.Assert(err, IsNil)
  2660  
  2661  	err = os.Rename(from, to)
  2662  	t.Assert(err, IsNil)
  2663  
  2664  	err = fh.Close()
  2665  	t.Assert(err, IsNil)
  2666  	fh = nil
  2667  
  2668  	_, err = os.Stat(from)
  2669  	t.Assert(err, NotNil)
  2670  	pathErr, ok := err.(*os.PathError)
  2671  	t.Assert(ok, Equals, true)
  2672  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  2673  
  2674  	content, err := ioutil.ReadFile(to)
  2675  	t.Assert(err, IsNil)
  2676  	t.Assert(string(content), Equals, "hello world")
  2677  }
  2678  
  2679  func (s *GoofysTest) TestInodeInsert(t *C) {
  2680  	root := s.getRoot(t)
  2681  
  2682  	in := NewInode(s.fs, root, aws.String("2"))
  2683  	in.Attributes = InodeAttributes{}
  2684  	root.insertChild(in)
  2685  	t.Assert(*root.dir.Children[2].Name, Equals, "2")
  2686  
  2687  	in = NewInode(s.fs, root, aws.String("1"))
  2688  	in.Attributes = InodeAttributes{}
  2689  	root.insertChild(in)
  2690  	t.Assert(*root.dir.Children[2].Name, Equals, "1")
  2691  	t.Assert(*root.dir.Children[3].Name, Equals, "2")
  2692  
  2693  	in = NewInode(s.fs, root, aws.String("4"))
  2694  	in.Attributes = InodeAttributes{}
  2695  	root.insertChild(in)
  2696  	t.Assert(*root.dir.Children[2].Name, Equals, "1")
  2697  	t.Assert(*root.dir.Children[3].Name, Equals, "2")
  2698  	t.Assert(*root.dir.Children[4].Name, Equals, "4")
  2699  
  2700  	inode := root.findChild("1")
  2701  	t.Assert(inode, NotNil)
  2702  	t.Assert(*inode.Name, Equals, "1")
  2703  
  2704  	inode = root.findChild("2")
  2705  	t.Assert(inode, NotNil)
  2706  	t.Assert(*inode.Name, Equals, "2")
  2707  
  2708  	inode = root.findChild("4")
  2709  	t.Assert(inode, NotNil)
  2710  	t.Assert(*inode.Name, Equals, "4")
  2711  
  2712  	inode = root.findChild("0")
  2713  	t.Assert(inode, IsNil)
  2714  
  2715  	inode = root.findChild("3")
  2716  	t.Assert(inode, IsNil)
  2717  
  2718  	root.removeChild(root.dir.Children[3])
  2719  	root.removeChild(root.dir.Children[2])
  2720  	root.removeChild(root.dir.Children[2])
  2721  	t.Assert(len(root.dir.Children), Equals, 2)
  2722  }
  2723  
  2724  func (s *GoofysTest) TestReadDirSlurpHeuristic(t *C) {
  2725  	if _, ok := s.cloud.Delegate().(*S3Backend); !ok {
  2726  		t.Skip("only for S3")
  2727  	}
  2728  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2729  
  2730  	s.setupBlobs(s.cloud, t, map[string]*string{"dir2isafile": nil})
  2731  
  2732  	root := s.getRoot(t).dir
  2733  	t.Assert(root.seqOpenDirScore, Equals, uint8(0))
  2734  	s.assertEntries(t, s.getRoot(t), []string{
  2735  		"dir1", "dir2", "dir2isafile", "dir4", "empty_dir",
  2736  		"empty_dir2", "file1", "file2", "zero"})
  2737  
  2738  	dir1, err := s.LookUpInode(t, "dir1")
  2739  	t.Assert(err, IsNil)
  2740  	dh1 := dir1.OpenDir()
  2741  	defer dh1.CloseDir()
  2742  	score := root.seqOpenDirScore
  2743  
  2744  	dir2, err := s.LookUpInode(t, "dir2")
  2745  	t.Assert(err, IsNil)
  2746  	dh2 := dir2.OpenDir()
  2747  	defer dh2.CloseDir()
  2748  	t.Assert(root.seqOpenDirScore, Equals, score+1)
  2749  
  2750  	dir3, err := s.LookUpInode(t, "dir4")
  2751  	t.Assert(err, IsNil)
  2752  	dh3 := dir3.OpenDir()
  2753  	defer dh3.CloseDir()
  2754  	t.Assert(root.seqOpenDirScore, Equals, score+2)
  2755  }
  2756  
  2757  func (s *GoofysTest) TestReadDirSlurpSubtree(t *C) {
  2758  	if _, ok := s.cloud.Delegate().(*S3Backend); !ok {
  2759  		t.Skip("only for S3")
  2760  	}
  2761  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2762  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2763  
  2764  	s.getRoot(t).dir.seqOpenDirScore = 2
  2765  	in, err := s.LookUpInode(t, "dir2")
  2766  	t.Assert(err, IsNil)
  2767  	t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(2))
  2768  
  2769  	s.readDirIntoCache(t, in.Id)
  2770  	// should have incremented the score
  2771  	t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(3))
  2772  
  2773  	// reading dir2 should cause dir2/dir3 to have cached readdir
  2774  	s.disableS3()
  2775  
  2776  	in, err = s.LookUpInode(t, "dir2/dir3")
  2777  	t.Assert(err, IsNil)
  2778  
  2779  	s.assertEntries(t, in, []string{"file4"})
  2780  }
  2781  
  2782  func (s *GoofysTest) TestReadDirCached(t *C) {
  2783  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2784  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2785  
  2786  	s.getRoot(t).dir.seqOpenDirScore = 2
  2787  	s.readDirIntoCache(t, fuseops.RootInodeID)
  2788  	s.disableS3()
  2789  
  2790  	dh := s.getRoot(t).OpenDir()
  2791  
  2792  	entries := s.readDirFully(t, dh)
  2793  	dirs := make([]string, 0)
  2794  	files := make([]string, 0)
  2795  	noMoreDir := false
  2796  
  2797  	for _, en := range entries {
  2798  		if en.Type == fuseutil.DT_Directory {
  2799  			t.Assert(noMoreDir, Equals, false)
  2800  			dirs = append(dirs, en.Name)
  2801  		} else {
  2802  			files = append(files, en.Name)
  2803  			noMoreDir = true
  2804  		}
  2805  	}
  2806  
  2807  	t.Assert(dirs, DeepEquals, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2"})
  2808  	t.Assert(files, DeepEquals, []string{"file1", "file2", "zero"})
  2809  }
  2810  
  2811  func (s *GoofysTest) TestReadDirLookUp(t *C) {
  2812  	s.getRoot(t).dir.seqOpenDirScore = 2
  2813  
  2814  	var wg sync.WaitGroup
  2815  	for i := 0; i < 10; i++ {
  2816  		wg.Add(2)
  2817  		go func() {
  2818  			defer wg.Done()
  2819  			s.readDirIntoCache(t, fuseops.RootInodeID)
  2820  		}()
  2821  		go func() {
  2822  			defer wg.Done()
  2823  
  2824  			lookup := fuseops.LookUpInodeOp{
  2825  				Parent: fuseops.RootInodeID,
  2826  				Name:   "file1",
  2827  			}
  2828  			err := s.fs.LookUpInode(nil, &lookup)
  2829  			t.Assert(err, IsNil)
  2830  		}()
  2831  	}
  2832  	wg.Wait()
  2833  }
  2834  
  2835  func (s *GoofysTest) writeSeekWriteFuse(t *C, file string, fh *os.File, first string, second string, third string) {
  2836  	fi, err := os.Stat(file)
  2837  	t.Assert(err, IsNil)
  2838  
  2839  	defer func() {
  2840  		// close the file if the test failed so we can unmount
  2841  		if fh != nil {
  2842  			fh.Close()
  2843  		}
  2844  	}()
  2845  
  2846  	_, err = fh.WriteString(first)
  2847  	t.Assert(err, IsNil)
  2848  
  2849  	off, err := fh.Seek(int64(len(second)), 1)
  2850  	t.Assert(err, IsNil)
  2851  	t.Assert(off, Equals, int64(len(first)+len(second)))
  2852  
  2853  	_, err = fh.WriteString(third)
  2854  	t.Assert(err, IsNil)
  2855  
  2856  	off, err = fh.Seek(int64(len(first)), 0)
  2857  	t.Assert(err, IsNil)
  2858  	t.Assert(off, Equals, int64(len(first)))
  2859  
  2860  	_, err = fh.WriteString(second)
  2861  	t.Assert(err, IsNil)
  2862  
  2863  	err = fh.Close()
  2864  	t.Assert(err, IsNil)
  2865  	fh = nil
  2866  
  2867  	content, err := ioutil.ReadFile(file)
  2868  	t.Assert(err, IsNil)
  2869  	t.Assert(string(content), Equals, first+second+third)
  2870  
  2871  	fi2, err := os.Stat(file)
  2872  	t.Assert(err, IsNil)
  2873  	t.Assert(fi.Mode(), Equals, fi2.Mode())
  2874  }
  2875  
  2876  func (s *GoofysTest) TestWriteSeekWriteFuse(t *C) {
  2877  	if !isCatfs() {
  2878  		t.Skip("only works with CATFS=true")
  2879  	}
  2880  
  2881  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2882  	s.mount(t, mountPoint)
  2883  	defer s.umount(t, mountPoint)
  2884  
  2885  	file := mountPoint + "/newfile"
  2886  
  2887  	fh, err := os.Create(file)
  2888  	t.Assert(err, IsNil)
  2889  
  2890  	s.writeSeekWriteFuse(t, file, fh, "hello", " ", "world")
  2891  
  2892  	fh, err = os.OpenFile(file, os.O_WRONLY, 0600)
  2893  	t.Assert(err, IsNil)
  2894  
  2895  	s.writeSeekWriteFuse(t, file, fh, "", "never", "minding")
  2896  }
  2897  
  2898  func (s *GoofysTest) TestDirMtimeCreate(t *C) {
  2899  	root := s.getRoot(t)
  2900  
  2901  	attr, _ := root.GetAttributes()
  2902  	m1 := attr.Mtime
  2903  	time.Sleep(time.Second)
  2904  
  2905  	_, _ = root.Create("foo", fuseops.OpContext{uint32(os.Getpid())})
  2906  	attr2, _ := root.GetAttributes()
  2907  	m2 := attr2.Mtime
  2908  
  2909  	t.Assert(m1.Before(m2), Equals, true)
  2910  }
  2911  
  2912  func (s *GoofysTest) TestDirMtimeLs(t *C) {
  2913  	root := s.getRoot(t)
  2914  
  2915  	attr, _ := root.GetAttributes()
  2916  	m1 := attr.Mtime
  2917  	time.Sleep(3 * time.Second)
  2918  
  2919  	params := &PutBlobInput{
  2920  		Key:  "newfile",
  2921  		Body: bytes.NewReader([]byte("foo")),
  2922  		Size: PUInt64(3),
  2923  	}
  2924  	_, err := s.cloud.PutBlob(params)
  2925  	t.Assert(err, IsNil)
  2926  
  2927  	s.readDirIntoCache(t, fuseops.RootInodeID)
  2928  
  2929  	attr2, _ := root.GetAttributes()
  2930  	m2 := attr2.Mtime
  2931  
  2932  	t.Assert(m1.Before(m2), Equals, true)
  2933  }
  2934  
  2935  func (s *GoofysTest) TestRenameOverwrite(t *C) {
  2936  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2937  	s.mount(t, mountPoint)
  2938  	defer s.umount(t, mountPoint)
  2939  
  2940  	file := mountPoint + "/newfile"
  2941  	rename := mountPoint + "/file1"
  2942  
  2943  	fh, err := os.Create(file)
  2944  	t.Assert(err, IsNil)
  2945  
  2946  	err = fh.Close()
  2947  	t.Assert(err, IsNil)
  2948  
  2949  	err = os.Rename(file, rename)
  2950  	t.Assert(err, IsNil)
  2951  }
  2952  
  2953  func (s *GoofysTest) TestRead403(t *C) {
  2954  	// anonymous only works in S3 for now
  2955  	cloud := s.getRoot(t).dir.cloud
  2956  	s3, ok := cloud.Delegate().(*S3Backend)
  2957  	if !ok {
  2958  		t.Skip("only for S3")
  2959  	}
  2960  
  2961  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2962  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2963  
  2964  	// cache the inode first so we don't get 403 when we lookup
  2965  	in, err := s.LookUpInode(t, "file1")
  2966  	t.Assert(err, IsNil)
  2967  
  2968  	fh, err := in.OpenFile(fuseops.OpContext{uint32(os.Getpid())})
  2969  	t.Assert(err, IsNil)
  2970  
  2971  	s3.awsConfig.Credentials = credentials.AnonymousCredentials
  2972  	s3.newS3()
  2973  
  2974  	// fake enable read-ahead
  2975  	fh.seqReadAmount = uint64(READAHEAD_CHUNK)
  2976  
  2977  	buf := make([]byte, 5)
  2978  
  2979  	_, err = fh.ReadFile(0, buf)
  2980  	t.Assert(err, Equals, syscall.EACCES)
  2981  
  2982  	// now that the S3 GET has failed, try again, see
  2983  	// https://github.com/kahing/goofys/pull/243
  2984  	_, err = fh.ReadFile(0, buf)
  2985  	t.Assert(err, Equals, syscall.EACCES)
  2986  }
  2987  
  2988  func (s *GoofysTest) TestRmdirWithDiropen(t *C) {
  2989  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2990  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2991  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2992  
  2993  	s.mount(t, mountPoint)
  2994  	defer s.umount(t, mountPoint)
  2995  
  2996  	err := os.MkdirAll(mountPoint+"/dir2/dir4", 0700)
  2997  	t.Assert(err, IsNil)
  2998  	err = os.MkdirAll(mountPoint+"/dir2/dir5", 0700)
  2999  	t.Assert(err, IsNil)
  3000  
  3001  	//1, open dir5
  3002  	dir := mountPoint + "/dir2/dir5"
  3003  	fh, err := os.Open(dir)
  3004  	t.Assert(err, IsNil)
  3005  	defer fh.Close()
  3006  
  3007  	cmd1 := exec.Command("ls", mountPoint+"/dir2")
  3008  	//out, err := cmd.Output()
  3009  	out1, err1 := cmd1.Output()
  3010  	if err1 != nil {
  3011  		if ee, ok := err.(*exec.ExitError); ok {
  3012  			panic(ee.Stderr)
  3013  		}
  3014  	}
  3015  	t.Assert(string(out1), DeepEquals, ""+"dir3\n"+"dir4\n"+"dir5\n")
  3016  
  3017  	//2, rm -rf dir5
  3018  	cmd := exec.Command("rm", "-rf", dir)
  3019  	_, err = cmd.Output()
  3020  	if err != nil {
  3021  		if ee, ok := err.(*exec.ExitError); ok {
  3022  			panic(ee.Stderr)
  3023  		}
  3024  	}
  3025  
  3026  	//3,  readdir dir2
  3027  	fh1, err := os.Open(mountPoint + "/dir2")
  3028  	t.Assert(err, IsNil)
  3029  	defer func() {
  3030  		// close the file if the test failed so we can unmount
  3031  		if fh1 != nil {
  3032  			fh1.Close()
  3033  		}
  3034  	}()
  3035  
  3036  	names, err := fh1.Readdirnames(0)
  3037  	t.Assert(err, IsNil)
  3038  	t.Assert(names, DeepEquals, []string{"dir3", "dir4"})
  3039  
  3040  	cmd = exec.Command("ls", mountPoint+"/dir2")
  3041  	out, err := cmd.Output()
  3042  	if err != nil {
  3043  		if ee, ok := err.(*exec.ExitError); ok {
  3044  			panic(ee.Stderr)
  3045  		}
  3046  	}
  3047  
  3048  	t.Assert(string(out), DeepEquals, ""+"dir3\n"+"dir4\n")
  3049  
  3050  	err = fh1.Close()
  3051  	t.Assert(err, IsNil)
  3052  
  3053  	// 4,reset env
  3054  	err = fh.Close()
  3055  	t.Assert(err, IsNil)
  3056  
  3057  	err = os.RemoveAll(mountPoint + "/dir2/dir4")
  3058  	t.Assert(err, IsNil)
  3059  
  3060  }
  3061  
  3062  func (s *GoofysTest) TestDirMTime(t *C) {
  3063  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  3064  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  3065  	// enable cheap to ensure GET dir/ will come back before LIST dir/
  3066  	s.fs.flags.Cheap = true
  3067  
  3068  	root := s.getRoot(t)
  3069  	t.Assert(time.Time{}.Before(root.Attributes.Mtime), Equals, true)
  3070  
  3071  	file1, err := s.LookUpInode(t, "dir1")
  3072  	t.Assert(err, IsNil)
  3073  
  3074  	// take mtime from a blob as init time because when we test against
  3075  	// real cloud, server time can be way off from local time
  3076  	initTime := file1.Attributes.Mtime
  3077  
  3078  	dir1, err := s.LookUpInode(t, "dir1")
  3079  	t.Assert(err, IsNil)
  3080  
  3081  	attr1, _ := dir1.GetAttributes()
  3082  	m1 := attr1.Mtime
  3083  	if !s.cloud.Capabilities().DirBlob {
  3084  		// dir1 doesn't have a dir blob, so should take root's mtime
  3085  		t.Assert(m1, Equals, root.Attributes.Mtime)
  3086  	}
  3087  
  3088  	time.Sleep(2 * time.Second)
  3089  
  3090  	dir2, err := dir1.MkDir("dir2")
  3091  	t.Assert(err, IsNil)
  3092  
  3093  	attr2, _ := dir2.GetAttributes()
  3094  	m2 := attr2.Mtime
  3095  	t.Assert(m1.Add(2*time.Second).Before(m2), Equals, true)
  3096  
  3097  	// dir1 didn't have an explicit mtime, so it should update now
  3098  	// that we did a mkdir inside it
  3099  	attr1, _ = dir1.GetAttributes()
  3100  	m1 = attr1.Mtime
  3101  	t.Assert(m1, Equals, m2)
  3102  
  3103  	// we never added the inode so this will do the lookup again
  3104  	dir2, err = dir1.LookUp("dir2")
  3105  	t.Assert(err, IsNil)
  3106  
  3107  	// the new time comes from S3 which only has seconds
  3108  	// granularity
  3109  	attr2, _ = dir2.GetAttributes()
  3110  	t.Assert(m2, Not(Equals), attr2.Mtime)
  3111  	t.Assert(initTime.Add(time.Second).Before(attr2.Mtime), Equals, true)
  3112  
  3113  	// different dir2
  3114  	dir2, err = s.LookUpInode(t, "dir2")
  3115  	t.Assert(err, IsNil)
  3116  
  3117  	attr2, _ = dir2.GetAttributes()
  3118  	m2 = attr2.Mtime
  3119  
  3120  	// this fails because we are listing dir/, which means we
  3121  	// don't actually see the dir blob dir2/dir3/ (it's returned
  3122  	// as common prefix), so we can't get dir3's mtime
  3123  	if false {
  3124  		// dir2/dir3/ exists and has mtime
  3125  		s.readDirIntoCache(t, dir2.Id)
  3126  		dir3, err := s.LookUpInode(t, "dir2/dir3")
  3127  		t.Assert(err, IsNil)
  3128  
  3129  		attr3, _ := dir3.GetAttributes()
  3130  		// setupDefaultEnv is before mounting
  3131  		t.Assert(attr3.Mtime.Before(m2), Equals, true)
  3132  	}
  3133  
  3134  	time.Sleep(time.Second)
  3135  
  3136  	params := &PutBlobInput{
  3137  		Key:  "dir2/newfile",
  3138  		Body: bytes.NewReader([]byte("foo")),
  3139  		Size: PUInt64(3),
  3140  	}
  3141  	_, err = s.cloud.PutBlob(params)
  3142  	t.Assert(err, IsNil)
  3143  
  3144  	s.readDirIntoCache(t, dir2.Id)
  3145  
  3146  	newfile, err := dir2.LookUp("newfile")
  3147  	t.Assert(err, IsNil)
  3148  
  3149  	attr2New, _ := dir2.GetAttributes()
  3150  	// mtime should reflect that of the latest object
  3151  	// GCS can return nano second resolution so truncate to second for compare
  3152  	t.Assert(attr2New.Mtime.Unix(), Equals, newfile.Attributes.Mtime.Unix())
  3153  	t.Assert(m2.Before(attr2New.Mtime), Equals, true)
  3154  }
  3155  
  3156  func (s *GoofysTest) TestDirMTimeNoTTL(t *C) {
  3157  	if s.cloud.Capabilities().DirBlob {
  3158  		t.Skip("Tests for behavior without dir blob")
  3159  	}
  3160  	// enable cheap to ensure GET dir/ will come back before LIST dir/
  3161  	s.fs.flags.Cheap = true
  3162  
  3163  	dir2, err := s.LookUpInode(t, "dir2")
  3164  	t.Assert(err, IsNil)
  3165  
  3166  	attr2, _ := dir2.GetAttributes()
  3167  	m2 := attr2.Mtime
  3168  
  3169  	// dir2/dir3/ exists and has mtime
  3170  	s.readDirIntoCache(t, dir2.Id)
  3171  	dir3, err := s.LookUpInode(t, "dir2/dir3")
  3172  	t.Assert(err, IsNil)
  3173  
  3174  	attr3, _ := dir3.GetAttributes()
  3175  	// setupDefaultEnv is before mounting but we can't really
  3176  	// compare the time here since dir3 is s3 server time and dir2
  3177  	// is local time
  3178  	t.Assert(attr3.Mtime, Not(Equals), m2)
  3179  }
  3180  
  3181  func (s *GoofysTest) TestIssue326(t *C) {
  3182  	root := s.getRoot(t)
  3183  	_, err := root.MkDir("folder@name.something")
  3184  	t.Assert(err, IsNil)
  3185  	_, err = root.MkDir("folder#1#")
  3186  	t.Assert(err, IsNil)
  3187  
  3188  	s.readDirIntoCache(t, root.Id)
  3189  	s.assertEntries(t, root, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2",
  3190  		"file1", "file2", "folder#1#", "folder@name.something", "zero"})
  3191  }
  3192  
  3193  func (s *GoofysTest) TestSlurpFileAndDir(t *C) {
  3194  	if _, ok := s.cloud.Delegate().(*S3Backend); !ok {
  3195  		t.Skip("only for S3")
  3196  	}
  3197  	prefix := "TestSlurpFileAndDir/"
  3198  	// fileAndDir is both a file and a directory, and we are
  3199  	// slurping them together as part of our listing optimization
  3200  	blobs := []string{
  3201  		prefix + "fileAndDir",
  3202  		prefix + "fileAndDir/a",
  3203  	}
  3204  
  3205  	for _, b := range blobs {
  3206  		params := &PutBlobInput{
  3207  			Key:  b,
  3208  			Body: bytes.NewReader([]byte("foo")),
  3209  			Size: PUInt64(3),
  3210  		}
  3211  		_, err := s.cloud.PutBlob(params)
  3212  		t.Assert(err, IsNil)
  3213  	}
  3214  
  3215  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  3216  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  3217  
  3218  	in, err := s.LookUpInode(t, prefix[0:len(prefix)-1])
  3219  	t.Assert(err, IsNil)
  3220  	t.Assert(in.dir, NotNil)
  3221  
  3222  	s.getRoot(t).dir.seqOpenDirScore = 2
  3223  	s.readDirIntoCache(t, in.Id)
  3224  
  3225  	// should have slurped these
  3226  	in = in.findChild("fileAndDir")
  3227  	t.Assert(in, NotNil)
  3228  	t.Assert(in.dir, NotNil)
  3229  
  3230  	in = in.findChild("a")
  3231  	t.Assert(in, NotNil)
  3232  
  3233  	// because of slurping we've decided that this is a directory,
  3234  	// lookup must _not_ talk to S3 again because otherwise we may
  3235  	// decide it's a file again because of S3 race
  3236  	s.disableS3()
  3237  	in, err = s.LookUpInode(t, prefix+"fileAndDir")
  3238  	t.Assert(err, IsNil)
  3239  
  3240  	s.assertEntries(t, in, []string{"a"})
  3241  }
  3242  
  3243  func (s *GoofysTest) TestAzureDirBlob(t *C) {
  3244  	if _, ok := s.cloud.(*AZBlob); !ok {
  3245  		t.Skip("only for Azure blob")
  3246  	}
  3247  
  3248  	fakedir := []string{"dir2", "dir3"}
  3249  
  3250  	for _, d := range fakedir {
  3251  		params := &PutBlobInput{
  3252  			Key:  "azuredir/" + d,
  3253  			Body: bytes.NewReader([]byte("")),
  3254  			Metadata: map[string]*string{
  3255  				AzureDirBlobMetadataKey: PString("true"),
  3256  			},
  3257  			Size: PUInt64(0),
  3258  		}
  3259  		_, err := s.cloud.PutBlob(params)
  3260  		t.Assert(err, IsNil)
  3261  	}
  3262  
  3263  	defer func() {
  3264  		// because our listing changes dir3 to dir3/, test
  3265  		// cleanup could not delete the blob so we wneed to
  3266  		// clean up
  3267  		for _, d := range fakedir {
  3268  			_, err := s.cloud.DeleteBlob(&DeleteBlobInput{Key: "azuredir/" + d})
  3269  			t.Assert(err, IsNil)
  3270  		}
  3271  	}()
  3272  
  3273  	s.setupBlobs(s.cloud, t, map[string]*string{
  3274  		// "azuredir/dir" would have gone here
  3275  		"azuredir/dir3,/":           nil,
  3276  		"azuredir/dir3/file1":       nil,
  3277  		"azuredir/dir345_is_a_file": nil,
  3278  	})
  3279  
  3280  	head, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir3"})
  3281  	t.Assert(err, IsNil)
  3282  	t.Assert(head.IsDirBlob, Equals, true)
  3283  
  3284  	head, err = s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir345_is_a_file"})
  3285  	t.Assert(err, IsNil)
  3286  	t.Assert(head.IsDirBlob, Equals, false)
  3287  
  3288  	list, err := s.cloud.ListBlobs(&ListBlobsInput{Prefix: PString("azuredir/")})
  3289  	t.Assert(err, IsNil)
  3290  
  3291  	// for flat listing, we rename `dir3` to `dir3/` and add it to Items,
  3292  	// `dir3` normally sorts before `dir3./`, but after the rename `dir3/` should
  3293  	// sort after `dir3./`
  3294  	t.Assert(len(list.Items), Equals, 5)
  3295  	t.Assert(*list.Items[0].Key, Equals, "azuredir/dir2/")
  3296  	t.Assert(*list.Items[1].Key, Equals, "azuredir/dir3,/")
  3297  	t.Assert(*list.Items[2].Key, Equals, "azuredir/dir3/")
  3298  	t.Assert(*list.Items[3].Key, Equals, "azuredir/dir3/file1")
  3299  	t.Assert(*list.Items[4].Key, Equals, "azuredir/dir345_is_a_file")
  3300  	t.Assert(sort.IsSorted(sortBlobItemOutput(list.Items)), Equals, true)
  3301  
  3302  	list, err = s.cloud.ListBlobs(&ListBlobsInput{
  3303  		Prefix:    PString("azuredir/"),
  3304  		Delimiter: PString("/"),
  3305  	})
  3306  	t.Assert(err, IsNil)
  3307  
  3308  	// for delimited listing, we remove `dir3` from items and add `dir3/` to prefixes,
  3309  	// which should already be there
  3310  	t.Assert(len(list.Items), Equals, 1)
  3311  	t.Assert(*list.Items[0].Key, Equals, "azuredir/dir345_is_a_file")
  3312  
  3313  	t.Assert(len(list.Prefixes), Equals, 3)
  3314  	t.Assert(*list.Prefixes[0].Prefix, Equals, "azuredir/dir2/")
  3315  	t.Assert(*list.Prefixes[1].Prefix, Equals, "azuredir/dir3,/")
  3316  	t.Assert(*list.Prefixes[2].Prefix, Equals, "azuredir/dir3/")
  3317  
  3318  	// finally check that we are reading them in correctly
  3319  	in, err := s.LookUpInode(t, "azuredir")
  3320  	t.Assert(err, IsNil)
  3321  
  3322  	s.assertEntries(t, in, []string{"dir2", "dir3", "dir3,", "dir345_is_a_file"})
  3323  }
  3324  
  3325  func (s *GoofysTest) TestReadDirLarge(t *C) {
  3326  	root := s.getRoot(t)
  3327  	root.dir.mountPrefix = "empty_dir"
  3328  
  3329  	blobs := make(map[string]*string)
  3330  	expect := make([]string, 0)
  3331  	for i := 0; i < 998; i++ {
  3332  		blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil
  3333  		expect = append(expect, fmt.Sprintf("%04vd", i))
  3334  	}
  3335  	blobs["empty_dir/0998f"] = nil
  3336  	blobs["empty_dir/0999f"] = nil
  3337  	blobs["empty_dir/1000f"] = nil
  3338  	expect = append(expect, "0998f")
  3339  	expect = append(expect, "0999f")
  3340  	expect = append(expect, "1000f")
  3341  
  3342  	for i := 1001; i < 1003; i++ {
  3343  		blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil
  3344  		expect = append(expect, fmt.Sprintf("%04vd", i))
  3345  	}
  3346  
  3347  	s.setupBlobs(s.cloud, t, blobs)
  3348  
  3349  	dh := root.OpenDir()
  3350  	defer dh.CloseDir()
  3351  
  3352  	children := namesOf(s.readDirFully(t, dh))
  3353  	sort.Strings(children)
  3354  
  3355  	t.Assert(children, DeepEquals, expect)
  3356  }
  3357  
  3358  func (s *GoofysTest) newBackend(t *C, bucket string, createBucket bool) (cloud StorageBackend) {
  3359  	var err error
  3360  	switch s.cloud.Delegate().(type) {
  3361  	case *S3Backend:
  3362  		config, _ := s.fs.flags.Backend.(*S3Config)
  3363  		s3, err := NewS3(bucket, s.fs.flags, config)
  3364  		t.Assert(err, IsNil)
  3365  
  3366  		s3.aws = hasEnv("AWS")
  3367  
  3368  		if s.emulator {
  3369  			s3.Handlers.Sign.Clear()
  3370  			s3.Handlers.Sign.PushBack(SignV2)
  3371  			s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
  3372  		}
  3373  
  3374  		if s3.aws {
  3375  			cloud = NewS3BucketEventualConsistency(s3)
  3376  		} else {
  3377  			cloud = s3
  3378  		}
  3379  	case *GCS3:
  3380  		config, _ := s.fs.flags.Backend.(*S3Config)
  3381  		cloud, err = NewGCS3(bucket, s.fs.flags, config)
  3382  		t.Assert(err, IsNil)
  3383  	case *AZBlob:
  3384  		config, _ := s.fs.flags.Backend.(*AZBlobConfig)
  3385  		cloud, err = NewAZBlob(bucket, config)
  3386  		t.Assert(err, IsNil)
  3387  	case *ADLv1:
  3388  		config, _ := s.fs.flags.Backend.(*ADLv1Config)
  3389  		cloud, err = NewADLv1(bucket, s.fs.flags, config)
  3390  		t.Assert(err, IsNil)
  3391  	case *ADLv2:
  3392  		config, _ := s.fs.flags.Backend.(*ADLv2Config)
  3393  		cloud, err = NewADLv2(bucket, s.fs.flags, config)
  3394  		t.Assert(err, IsNil)
  3395  	case *GCSBackend:
  3396  		config, _ := s.fs.flags.Backend.(*GCSConfig)
  3397  		cloud, err = NewGCS(bucket, config)
  3398  		t.Assert(err, IsNil)
  3399  	default:
  3400  		t.Fatal("unknown backend")
  3401  	}
  3402  
  3403  	if createBucket {
  3404  		_, err = cloud.MakeBucket(&MakeBucketInput{})
  3405  		t.Assert(err, IsNil)
  3406  
  3407  		s.removeBucket = append(s.removeBucket, cloud)
  3408  	}
  3409  
  3410  	return
  3411  }
  3412  
  3413  func (s *GoofysTest) TestVFS(t *C) {
  3414  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3415  	cloud2 := s.newBackend(t, bucket, true)
  3416  
  3417  	// "mount" this 2nd cloud
  3418  	in, err := s.LookUpInode(t, "dir4")
  3419  	t.Assert(in, NotNil)
  3420  	t.Assert(err, IsNil)
  3421  
  3422  	in.dir.cloud = cloud2
  3423  	in.dir.mountPrefix = "cloud2Prefix/"
  3424  
  3425  	rootCloud, rootPath := in.cloud()
  3426  	t.Assert(rootCloud, NotNil)
  3427  	t.Assert(rootCloud == cloud2, Equals, true)
  3428  	t.Assert(rootPath, Equals, "cloud2Prefix")
  3429  
  3430  	// the mount would shadow dir4/file5
  3431  	_, err = in.LookUp("file5")
  3432  	t.Assert(err, Equals, fuse.ENOENT)
  3433  
  3434  	_, fh := in.Create("testfile", fuseops.OpContext{uint32(os.Getpid())})
  3435  	err = fh.FlushFile()
  3436  	t.Assert(err, IsNil)
  3437  
  3438  	resp, err := cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile"})
  3439  	t.Assert(err, IsNil)
  3440  	defer resp.Body.Close()
  3441  
  3442  	err = s.getRoot(t).Rename("file1", in, "file2")
  3443  	t.Assert(err, Equals, syscall.EINVAL)
  3444  
  3445  	_, err = in.MkDir("subdir")
  3446  	t.Assert(err, IsNil)
  3447  
  3448  	subdirKey := "cloud2Prefix/subdir"
  3449  	if !cloud2.Capabilities().DirBlob {
  3450  		subdirKey += "/"
  3451  	}
  3452  
  3453  	_, err = cloud2.HeadBlob(&HeadBlobInput{Key: subdirKey})
  3454  	t.Assert(err, IsNil)
  3455  
  3456  	subdir, err := s.LookUpInode(t, "dir4/subdir")
  3457  	t.Assert(err, IsNil)
  3458  	t.Assert(subdir, NotNil)
  3459  	t.Assert(subdir.dir, NotNil)
  3460  	t.Assert(subdir.dir.cloud, IsNil)
  3461  
  3462  	subdirCloud, subdirPath := subdir.cloud()
  3463  	t.Assert(subdirCloud, NotNil)
  3464  	t.Assert(subdirCloud == cloud2, Equals, true)
  3465  	t.Assert(subdirPath, Equals, "cloud2Prefix/subdir")
  3466  
  3467  	// create another file inside subdir to make sure that our
  3468  	// mount check is correct for dir inside the root
  3469  	_, fh = subdir.Create("testfile2", fuseops.OpContext{uint32(os.Getpid())})
  3470  	err = fh.FlushFile()
  3471  	t.Assert(err, IsNil)
  3472  
  3473  	resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"})
  3474  	t.Assert(err, IsNil)
  3475  	defer resp.Body.Close()
  3476  
  3477  	err = subdir.Rename("testfile2", in, "testfile2")
  3478  	t.Assert(err, IsNil)
  3479  
  3480  	_, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"})
  3481  	t.Assert(err, Equals, fuse.ENOENT)
  3482  
  3483  	resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"})
  3484  	t.Assert(err, IsNil)
  3485  	defer resp.Body.Close()
  3486  
  3487  	err = in.Rename("testfile2", subdir, "testfile2")
  3488  	t.Assert(err, IsNil)
  3489  
  3490  	_, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"})
  3491  	t.Assert(err, Equals, fuse.ENOENT)
  3492  
  3493  	resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"})
  3494  	t.Assert(err, IsNil)
  3495  	defer resp.Body.Close()
  3496  }
  3497  
  3498  func (s *GoofysTest) TestMountsList(t *C) {
  3499  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  3500  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  3501  
  3502  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3503  	cloud := s.newBackend(t, bucket, true)
  3504  
  3505  	root := s.getRoot(t)
  3506  	rootCloud := root.dir.cloud
  3507  
  3508  	s.fs.MountAll([]*Mount{
  3509  		&Mount{"dir4/cloud1", cloud, "", false},
  3510  	})
  3511  
  3512  	in, err := s.LookUpInode(t, "dir4")
  3513  	t.Assert(in, NotNil)
  3514  	t.Assert(err, IsNil)
  3515  	t.Assert(int(in.Id), Equals, 2)
  3516  
  3517  	s.readDirIntoCache(t, in.Id)
  3518  	// ensure that listing is listing mounts and root bucket in one go
  3519  	root.dir.cloud = nil
  3520  
  3521  	s.assertEntries(t, in, []string{"cloud1", "file5"})
  3522  
  3523  	c1, err := s.LookUpInode(t, "dir4/cloud1")
  3524  	t.Assert(err, IsNil)
  3525  	t.Assert(*c1.Name, Equals, "cloud1")
  3526  	t.Assert(c1.dir.cloud == cloud, Equals, true)
  3527  	t.Assert(int(c1.Id), Equals, 3)
  3528  
  3529  	// pretend we've passed the normal cache ttl
  3530  	s.fs.flags.TypeCacheTTL = 0
  3531  	s.fs.flags.StatCacheTTL = 0
  3532  
  3533  	// listing root again should not overwrite the mounts
  3534  	root.dir.cloud = rootCloud
  3535  
  3536  	s.readDirIntoCache(t, in.Parent.Id)
  3537  	s.assertEntries(t, in, []string{"cloud1", "file5"})
  3538  
  3539  	c1, err = s.LookUpInode(t, "dir4/cloud1")
  3540  	t.Assert(err, IsNil)
  3541  	t.Assert(*c1.Name, Equals, "cloud1")
  3542  	t.Assert(c1.dir.cloud == cloud, Equals, true)
  3543  	t.Assert(int(c1.Id), Equals, 3)
  3544  }
  3545  
  3546  func (s *GoofysTest) TestMountsNewDir(t *C) {
  3547  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3548  	cloud := s.newBackend(t, bucket, true)
  3549  
  3550  	_, err := s.LookUpInode(t, "dir5")
  3551  	t.Assert(err, NotNil)
  3552  	t.Assert(err, Equals, fuse.ENOENT)
  3553  
  3554  	s.fs.MountAll([]*Mount{
  3555  		&Mount{"dir5/cloud1", cloud, "", false},
  3556  	})
  3557  
  3558  	in, err := s.LookUpInode(t, "dir5")
  3559  	t.Assert(err, IsNil)
  3560  	t.Assert(in.isDir(), Equals, true)
  3561  
  3562  	c1, err := s.LookUpInode(t, "dir5/cloud1")
  3563  	t.Assert(err, IsNil)
  3564  	t.Assert(c1.isDir(), Equals, true)
  3565  	t.Assert(c1.dir.cloud, Equals, cloud)
  3566  }
  3567  
  3568  func (s *GoofysTest) TestMountsNewMounts(t *C) {
  3569  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3570  	cloud := s.newBackend(t, bucket, true)
  3571  
  3572  	// "mount" this 2nd cloud
  3573  	in, err := s.LookUpInode(t, "dir4")
  3574  	t.Assert(in, NotNil)
  3575  	t.Assert(err, IsNil)
  3576  
  3577  	s.fs.MountAll([]*Mount{
  3578  		&Mount{"dir4/cloud1", cloud, "", false},
  3579  	})
  3580  
  3581  	s.readDirIntoCache(t, in.Id)
  3582  
  3583  	c1, err := s.LookUpInode(t, "dir4/cloud1")
  3584  	t.Assert(err, IsNil)
  3585  	t.Assert(*c1.Name, Equals, "cloud1")
  3586  	t.Assert(c1.dir.cloud == cloud, Equals, true)
  3587  
  3588  	_, err = s.LookUpInode(t, "dir4/cloud2")
  3589  	t.Assert(err, Equals, fuse.ENOENT)
  3590  
  3591  	s.fs.MountAll([]*Mount{
  3592  		&Mount{"dir4/cloud1", cloud, "", false},
  3593  		&Mount{"dir4/cloud2", cloud, "cloudprefix", false},
  3594  	})
  3595  
  3596  	c2, err := s.LookUpInode(t, "dir4/cloud2")
  3597  	t.Assert(err, IsNil)
  3598  	t.Assert(*c2.Name, Equals, "cloud2")
  3599  	t.Assert(c2.dir.cloud == cloud, Equals, true)
  3600  	t.Assert(c2.dir.mountPrefix, Equals, "cloudprefix")
  3601  }
  3602  
  3603  func (s *GoofysTest) TestMountsError(t *C) {
  3604  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3605  	var cloud StorageBackend
  3606  	if s3, ok := s.cloud.Delegate().(*S3Backend); ok {
  3607  		// S3Backend can't detect bucket doesn't exist because
  3608  		// HEAD an object always return 404 NotFound (instead
  3609  		// of NoSuchBucket)
  3610  		flags := *s3.flags
  3611  		config := *s3.config
  3612  		flags.Endpoint = "0.0.0.0:0"
  3613  		var err error
  3614  		cloud, err = NewS3(bucket, &flags, &config)
  3615  		t.Assert(err, IsNil)
  3616  	} else if _, ok := s.cloud.(*ADLv1); ok {
  3617  		config, _ := s.fs.flags.Backend.(*ADLv1Config)
  3618  		config.Authorizer = nil
  3619  
  3620  		var err error
  3621  		cloud, err = NewADLv1(bucket, s.fs.flags, config)
  3622  		t.Assert(err, IsNil)
  3623  	} else if _, ok := s.cloud.(*ADLv2); ok {
  3624  		// ADLv2 currently doesn't detect bucket doesn't exist
  3625  		cloud = s.newBackend(t, bucket, false)
  3626  		adlCloud, _ := cloud.(*ADLv2)
  3627  		auth := adlCloud.client.BaseClient.Authorizer
  3628  		adlCloud.client.BaseClient.Authorizer = nil
  3629  		defer func() {
  3630  			adlCloud.client.BaseClient.Authorizer = auth
  3631  		}()
  3632  	} else if _, ok := s.cloud.(*GCSBackend); ok {
  3633  		// We'll trigger a failure on GCS mount by using an unauthenticated client to mount to a private bucket
  3634  		defaultCreds := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
  3635  		os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS")
  3636  
  3637  		defer func() {
  3638  			os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", defaultCreds)
  3639  		}()
  3640  
  3641  		var err error
  3642  		config := NewGCSConfig()
  3643  		cloud, err = NewGCS(s.fs.bucket, config)
  3644  		t.Assert(err, IsNil)
  3645  	} else {
  3646  		cloud = s.newBackend(t, bucket, false)
  3647  	}
  3648  
  3649  	s.fs.MountAll([]*Mount{
  3650  		&Mount{"dir4/newerror", StorageBackendInitError{
  3651  			fmt.Errorf("foo"),
  3652  			Capabilities{},
  3653  		}, "errprefix1", false},
  3654  		&Mount{"dir4/initerror", &StorageBackendInitWrapper{
  3655  			StorageBackend: cloud,
  3656  			initKey:        "foobar",
  3657  		}, "errprefix2", false},
  3658  	})
  3659  
  3660  	errfile, err := s.LookUpInode(t, "dir4/newerror/"+INIT_ERR_BLOB)
  3661  	t.Assert(err, IsNil)
  3662  	t.Assert(errfile.isDir(), Equals, false)
  3663  
  3664  	_, err = s.LookUpInode(t, "dir4/newerror/not_there")
  3665  	t.Assert(err, Equals, fuse.ENOENT)
  3666  
  3667  	errfile, err = s.LookUpInode(t, "dir4/initerror/"+INIT_ERR_BLOB)
  3668  	t.Assert(err, IsNil)
  3669  	t.Assert(errfile.isDir(), Equals, false)
  3670  
  3671  	_, err = s.LookUpInode(t, "dir4/initerror/not_there")
  3672  	t.Assert(err, Equals, fuse.ENOENT)
  3673  
  3674  	in, err := s.LookUpInode(t, "dir4/initerror")
  3675  	t.Assert(err, IsNil)
  3676  	t.Assert(in, NotNil)
  3677  
  3678  	t.Assert(in.dir.cloud.Capabilities().Name, Equals, cloud.Capabilities().Name)
  3679  }
  3680  
  3681  func (s *GoofysTest) TestMountsMultiLevel(t *C) {
  3682  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  3683  
  3684  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3685  	cloud := s.newBackend(t, bucket, true)
  3686  
  3687  	s.fs.MountAll([]*Mount{
  3688  		&Mount{"dir4/sub/dir", cloud, "", false},
  3689  	})
  3690  
  3691  	sub, err := s.LookUpInode(t, "dir4/sub")
  3692  	t.Assert(err, IsNil)
  3693  	t.Assert(sub.isDir(), Equals, true)
  3694  
  3695  	s.assertEntries(t, sub, []string{"dir"})
  3696  }
  3697  
  3698  func (s *GoofysTest) TestMountsNested(t *C) {
  3699  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3700  	cloud := s.newBackend(t, bucket, true)
  3701  	s.testMountsNested(t, cloud, []*Mount{
  3702  		&Mount{"dir5/in/a/dir", cloud, "a/dir/", false},
  3703  		&Mount{"dir5/in/", cloud, "b/", false},
  3704  	})
  3705  }
  3706  
  3707  // test that mount order doesn't matter for nested mounts
  3708  func (s *GoofysTest) TestMountsNestedReversed(t *C) {
  3709  	bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3710  	cloud := s.newBackend(t, bucket, true)
  3711  	s.testMountsNested(t, cloud, []*Mount{
  3712  		&Mount{"dir5/in/", cloud, "b/", false},
  3713  		&Mount{"dir5/in/a/dir", cloud, "a/dir/", false},
  3714  	})
  3715  }
  3716  
  3717  func (s *GoofysTest) testMountsNested(t *C, cloud StorageBackend,
  3718  	mounts []*Mount) {
  3719  
  3720  	_, err := s.LookUpInode(t, "dir5")
  3721  	t.Assert(err, NotNil)
  3722  	t.Assert(err, Equals, fuse.ENOENT)
  3723  
  3724  	s.fs.MountAll(mounts)
  3725  
  3726  	in, err := s.LookUpInode(t, "dir5")
  3727  	t.Assert(err, IsNil)
  3728  
  3729  	s.readDirIntoCache(t, in.Id)
  3730  
  3731  	// make sure all the intermediate dirs never expire
  3732  	time.Sleep(time.Second)
  3733  	dir_in, err := s.LookUpInode(t, "dir5/in")
  3734  	t.Assert(err, IsNil)
  3735  	t.Assert(*dir_in.Name, Equals, "in")
  3736  
  3737  	s.readDirIntoCache(t, dir_in.Id)
  3738  
  3739  	dir_a, err := s.LookUpInode(t, "dir5/in/a")
  3740  	t.Assert(err, IsNil)
  3741  	t.Assert(*dir_a.Name, Equals, "a")
  3742  
  3743  	s.assertEntries(t, dir_a, []string{"dir"})
  3744  
  3745  	dir_dir, err := s.LookUpInode(t, "dir5/in/a/dir")
  3746  	t.Assert(err, IsNil)
  3747  	t.Assert(*dir_dir.Name, Equals, "dir")
  3748  	t.Assert(dir_dir.dir.cloud == cloud, Equals, true)
  3749  
  3750  	_, fh := dir_in.Create("testfile", fuseops.OpContext{uint32(os.Getpid())})
  3751  	err = fh.FlushFile()
  3752  	t.Assert(err, IsNil)
  3753  
  3754  	resp, err := cloud.GetBlob(&GetBlobInput{Key: "b/testfile"})
  3755  	t.Assert(err, IsNil)
  3756  	defer resp.Body.Close()
  3757  
  3758  	_, fh = dir_dir.Create("testfile", fuseops.OpContext{uint32(os.Getpid())})
  3759  	err = fh.FlushFile()
  3760  	t.Assert(err, IsNil)
  3761  
  3762  	resp, err = cloud.GetBlob(&GetBlobInput{Key: "a/dir/testfile"})
  3763  	t.Assert(err, IsNil)
  3764  	defer resp.Body.Close()
  3765  
  3766  	s.assertEntries(t, in, []string{"in"})
  3767  }
  3768  
  3769  func verifyFileData(t *C, mountPoint string, path string, content *string) {
  3770  	if !strings.HasSuffix(mountPoint, "/") {
  3771  		mountPoint = mountPoint + "/"
  3772  	}
  3773  	path = mountPoint + path
  3774  	data, err := ioutil.ReadFile(path)
  3775  	comment := Commentf("failed while verifying %v", path)
  3776  	if content != nil {
  3777  		t.Assert(err, IsNil, comment)
  3778  		t.Assert(strings.TrimSpace(string(data)), Equals, *content, comment)
  3779  	} else {
  3780  		t.Assert(err, Not(IsNil), comment)
  3781  		t.Assert(strings.Contains(err.Error(), "no such file or directory"), Equals, true, comment)
  3782  	}
  3783  }
  3784  
  3785  func (s *GoofysTest) TestNestedMountUnmountSimple(t *C) {
  3786  	childBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3787  	childCloud := s.newBackend(t, childBucket, true)
  3788  
  3789  	parFileContent := "parent"
  3790  	childFileContent := "child"
  3791  	parEnv := map[string]*string{
  3792  		"childmnt/x/in_child_and_par": &parFileContent,
  3793  		"childmnt/x/in_par_only":      &parFileContent,
  3794  		"nonchildmnt/something":       &parFileContent,
  3795  	}
  3796  	childEnv := map[string]*string{
  3797  		"x/in_child_only":    &childFileContent,
  3798  		"x/in_child_and_par": &childFileContent,
  3799  	}
  3800  	s.setupBlobs(s.cloud, t, parEnv)
  3801  	s.setupBlobs(childCloud, t, childEnv)
  3802  
  3803  	rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16)
  3804  	s.mount(t, rootMountPath)
  3805  	defer s.umount(t, rootMountPath)
  3806  	// Files under /tmp/fusetesting/ should all be from goofys root.
  3807  	verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent)
  3808  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent)
  3809  	verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent)
  3810  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil)
  3811  
  3812  	childMount := &Mount{"childmnt", childCloud, "", false}
  3813  	s.fs.Mount(childMount)
  3814  	// Now files under /tmp/fusetesting/childmnt should be from childBucket
  3815  	verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", nil)
  3816  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &childFileContent)
  3817  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", &childFileContent)
  3818  	// /tmp/fusetesting/nonchildmnt should be from parent bucket.
  3819  	verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent)
  3820  
  3821  	s.fs.Unmount(childMount.name)
  3822  	// Child is unmounted. So files under /tmp/fusetesting/ should all be from goofys root.
  3823  	verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent)
  3824  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent)
  3825  	verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent)
  3826  	verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil)
  3827  }
  3828  
  3829  func (s *GoofysTest) TestUnmountBucketWithChild(t *C) {
  3830  	// This bucket will be mounted at ${goofysroot}/c
  3831  	cBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3832  	cCloud := s.newBackend(t, cBucket, true)
  3833  
  3834  	// This bucket will be mounted at ${goofysroot}/c/c
  3835  	ccBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16)
  3836  	ccCloud := s.newBackend(t, ccBucket, true)
  3837  
  3838  	pFileContent := "parent"
  3839  	cFileContent := "child"
  3840  	ccFileContent := "childchild"
  3841  	pEnv := map[string]*string{
  3842  		"c/c/x/foo": &pFileContent,
  3843  	}
  3844  	cEnv := map[string]*string{
  3845  		"c/x/foo": &cFileContent,
  3846  	}
  3847  	ccEnv := map[string]*string{
  3848  		"x/foo": &ccFileContent,
  3849  	}
  3850  
  3851  	s.setupBlobs(s.cloud, t, pEnv)
  3852  	s.setupBlobs(cCloud, t, cEnv)
  3853  	s.setupBlobs(ccCloud, t, ccEnv)
  3854  
  3855  	rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16)
  3856  	s.mount(t, rootMountPath)
  3857  	defer s.umount(t, rootMountPath)
  3858  	// c/c/foo should come from root mount.
  3859  	verifyFileData(t, rootMountPath, "c/c/x/foo", &pFileContent)
  3860  
  3861  	cMount := &Mount{"c", cCloud, "", false}
  3862  	s.fs.Mount(cMount)
  3863  	// c/c/foo should come from "c" mount.
  3864  	verifyFileData(t, rootMountPath, "c/c/x/foo", &cFileContent)
  3865  
  3866  	ccMount := &Mount{"c/c", ccCloud, "", false}
  3867  	s.fs.Mount(ccMount)
  3868  	// c/c/foo should come from "c/c" mount.
  3869  	verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent)
  3870  
  3871  	s.fs.Unmount(cMount.name)
  3872  	// c/c/foo should still come from "c/c" mount.
  3873  	verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent)
  3874  }
  3875  
  3876  func (s *GoofysTest) TestRmImplicitDir(t *C) {
  3877  	mountPoint := "/tmp/mnt" + s.fs.bucket
  3878  
  3879  	s.mount(t, mountPoint)
  3880  	defer s.umount(t, mountPoint)
  3881  
  3882  	defer os.Chdir("/")
  3883  
  3884  	dir, err := os.Open(mountPoint + "/dir2")
  3885  	t.Assert(err, IsNil)
  3886  	defer dir.Close()
  3887  
  3888  	err = dir.Chdir()
  3889  	t.Assert(err, IsNil)
  3890  
  3891  	err = os.RemoveAll(mountPoint + "/dir2")
  3892  	t.Assert(err, IsNil)
  3893  
  3894  	root, err := os.Open(mountPoint)
  3895  	t.Assert(err, IsNil)
  3896  	defer root.Close()
  3897  
  3898  	files, err := root.Readdirnames(0)
  3899  	t.Assert(err, IsNil)
  3900  	t.Assert(files, DeepEquals, []string{
  3901  		"dir1", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero",
  3902  	})
  3903  }
  3904  
  3905  func (s *GoofysTest) TestMount(t *C) {
  3906  	if os.Getenv("MOUNT") == "false" {
  3907  		t.Skip("Not mounting")
  3908  	}
  3909  
  3910  	mountPoint := "/tmp/mnt" + s.fs.bucket
  3911  
  3912  	s.mount(t, mountPoint)
  3913  	defer s.umount(t, mountPoint)
  3914  
  3915  	log.Printf("Mounted at %v", mountPoint)
  3916  
  3917  	c := make(chan os.Signal, 2)
  3918  	signal.Notify(c, os.Interrupt, syscall.SIGTERM)
  3919  	<-c
  3920  }
  3921  
  3922  // Checks if 2 sorted lists are equal. Returns a helpful error if they differ.
  3923  func checkSortedListsAreEqual(l1, l2 []string) error {
  3924  	i1, i2 := 0, 0
  3925  	onlyl1, onlyl2 := []string{}, []string{}
  3926  	for i1 < len(l1) && i2 < len(l2) {
  3927  		if l1[i1] == l2[i2] {
  3928  			i1++
  3929  			i2++
  3930  		} else if l1[i1] < l2[i2] {
  3931  			onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1]))
  3932  			i1++
  3933  		} else {
  3934  			onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2]))
  3935  			i2++
  3936  		}
  3937  
  3938  	}
  3939  	for ; i1 < len(l1); i1++ {
  3940  		onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1]))
  3941  	}
  3942  	for ; i2 < len(l2); i2++ {
  3943  		onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2]))
  3944  	}
  3945  
  3946  	if len(onlyl1)+len(onlyl2) == 0 {
  3947  		return nil
  3948  	}
  3949  	toString := func(l []string) string {
  3950  		ret := []string{}
  3951  		// The list can contain a lot of elements. Show only ten and say
  3952  		// "and x more".
  3953  		for i := 0; i < len(l) && i < 10; i++ {
  3954  			ret = append(ret, l[i])
  3955  		}
  3956  		if len(ret) < len(l) {
  3957  			ret = append(ret, fmt.Sprintf("and %d more", len(l)-len(ret)))
  3958  		}
  3959  		return strings.Join(ret, ", ")
  3960  	}
  3961  	return fmt.Errorf("only l1: %+v, only l2: %+v",
  3962  		toString(onlyl1), toString(onlyl2))
  3963  }
  3964  
  3965  func (s *GoofysTest) TestReadDirDash(t *C) {
  3966  	if s.azurite {
  3967  		t.Skip("ADLv1 doesn't have pagination")
  3968  	}
  3969  	root := s.getRoot(t)
  3970  	root.dir.mountPrefix = "prefix"
  3971  
  3972  	// SETUP
  3973  	// Add the following blobs
  3974  	// - prefix/2019/1
  3975  	// - prefix/2019-0000 to prefix/2019-4999
  3976  	// - prefix/20190000 to prefix/20194999
  3977  	// Fetching this result will need 3 pages in azure (pagesize 5k) and 11 pages
  3978  	// in amazon (pagesize 1k)
  3979  	// This setup will verify that we paginate and return results correctly before and after
  3980  	// seeing all contents that have a '-' ('-' < '/'). For more context read the comments in
  3981  	// dir.go::listBlobsSafe.
  3982  	blobs := make(map[string]*string)
  3983  	expect := []string{"2019"}
  3984  	blobs["prefix/2019/1"] = nil
  3985  	for i := 0; i < 5000; i++ {
  3986  		name := fmt.Sprintf("2019-%04d", i)
  3987  		expect = append(expect, name)
  3988  		blobs["prefix/"+name] = nil
  3989  	}
  3990  	for i := 0; i < 5000; i++ {
  3991  		name := fmt.Sprintf("2019%04d", i)
  3992  		expect = append(expect, name)
  3993  		blobs["prefix/"+name] = nil
  3994  	}
  3995  	s.setupBlobs(s.cloud, t, blobs)
  3996  
  3997  	// Read the directory and verify its contents.
  3998  	dh := root.OpenDir()
  3999  	defer dh.CloseDir()
  4000  
  4001  	children := namesOf(s.readDirFully(t, dh))
  4002  	t.Assert(checkSortedListsAreEqual(children, expect), IsNil)
  4003  }
  4004  
  4005  func (s *GoofysTest) TestWriteListFlush(t *C) {
  4006  	root := s.getRoot(t)
  4007  	root.dir.mountPrefix = "this_test/"
  4008  
  4009  	dir, err := root.MkDir("dir")
  4010  	t.Assert(err, IsNil)
  4011  	s.fs.insertInode(root, dir)
  4012  
  4013  	in, fh := dir.Create("file1", fuseops.OpContext{})
  4014  	t.Assert(in, NotNil)
  4015  	t.Assert(fh, NotNil)
  4016  	s.fs.insertInode(dir, in)
  4017  
  4018  	s.assertEntries(t, dir, []string{"file1"})
  4019  
  4020  	// in should still be valid
  4021  	t.Assert(in.Parent, NotNil)
  4022  	t.Assert(in.Parent, Equals, dir)
  4023  	fh.FlushFile()
  4024  
  4025  	s.assertEntries(t, dir, []string{"file1"})
  4026  }
  4027  
  4028  type includes struct{}
  4029  
  4030  func (c includes) Info() *CheckerInfo {
  4031  	return &CheckerInfo{Name: "includes", Params: []string{"obtained", "expected"}}
  4032  }
  4033  
  4034  func (c includes) Check(params []interface{}, names []string) (res bool, error string) {
  4035  	arr := reflect.ValueOf(params[0])
  4036  	switch arr.Kind() {
  4037  	case reflect.Array, reflect.Slice, reflect.String:
  4038  	default:
  4039  		panic(fmt.Sprintf("%v is not an array", names[0]))
  4040  	}
  4041  
  4042  	for i := 0; i < arr.Len(); i++ {
  4043  		v := arr.Index(i).Interface()
  4044  		res, error = DeepEquals.Check([]interface{}{v, params[1]}, names)
  4045  		if res {
  4046  			return
  4047  		} else {
  4048  			error = ""
  4049  		}
  4050  
  4051  		res = false
  4052  	}
  4053  	return
  4054  }
  4055  
  4056  func (s *GoofysTest) TestWriteUnlinkFlush(t *C) {
  4057  	root := s.getRoot(t)
  4058  
  4059  	dir, err := root.MkDir("dir")
  4060  	t.Assert(err, IsNil)
  4061  	s.fs.insertInode(root, dir)
  4062  
  4063  	in, fh := dir.Create("deleted", fuseops.OpContext{})
  4064  	t.Assert(in, NotNil)
  4065  	t.Assert(fh, NotNil)
  4066  	s.fs.insertInode(dir, in)
  4067  
  4068  	err = dir.Unlink("deleted")
  4069  	t.Assert(err, IsNil)
  4070  
  4071  	s.disableS3()
  4072  	err = fh.FlushFile()
  4073  	t.Assert(err, IsNil)
  4074  
  4075  	dh := dir.OpenDir()
  4076  	defer dh.CloseDir()
  4077  	t.Assert(namesOf(s.readDirFully(t, dh)), Not(includes{}), "deleted")
  4078  }
  4079  
  4080  func (s *GoofysTest) TestIssue474(t *C) {
  4081  	s.fs.flags.TypeCacheTTL = 1 * time.Second
  4082  	s.fs.flags.Cheap = true
  4083  
  4084  	p := "this_test/"
  4085  	root := s.getRoot(t)
  4086  	root.dir.mountPrefix = "this_test/"
  4087  	root.dir.seqOpenDirScore = 2
  4088  
  4089  	blobs := make(map[string]*string)
  4090  
  4091  	in := []string{
  4092  		"1/a/b",
  4093  		"2/c/d",
  4094  	}
  4095  
  4096  	for _, s := range in {
  4097  		blobs[p+s] = nil
  4098  	}
  4099  
  4100  	s.setupBlobs(s.cloud, t, blobs)
  4101  
  4102  	dir1, err := s.LookUpInode(t, "1")
  4103  	t.Assert(err, IsNil)
  4104  	// this would list 1/ and slurp in 2/c/d at the same time
  4105  	s.assertEntries(t, dir1, []string{"a"})
  4106  
  4107  	// 2/ will expire and require re-listing. ensure that we don't
  4108  	// remove any children as stale as we update
  4109  	time.Sleep(time.Second)
  4110  
  4111  	dir2, err := s.LookUpInode(t, "2")
  4112  	t.Assert(err, IsNil)
  4113  	s.assertEntries(t, dir2, []string{"c"})
  4114  }
  4115  
  4116  func (s *GoofysTest) TestReadExternalChangesFuse(t *C) {
  4117  	s.fs.flags.StatCacheTTL = 1 * time.Second
  4118  
  4119  	mountPoint := "/tmp/mnt" + s.fs.bucket
  4120  
  4121  	s.mount(t, mountPoint)
  4122  	defer s.umount(t, mountPoint)
  4123  
  4124  	file := "file1"
  4125  	filePath := mountPoint + "/file1"
  4126  
  4127  	buf, err := ioutil.ReadFile(filePath)
  4128  	t.Assert(err, IsNil)
  4129  	t.Assert(string(buf), Equals, file)
  4130  
  4131  	update := "file2"
  4132  	_, err = s.cloud.PutBlob(&PutBlobInput{
  4133  		Key:  file,
  4134  		Body: bytes.NewReader([]byte(update)),
  4135  		Size: PUInt64(uint64(len(update))),
  4136  	})
  4137  	t.Assert(err, IsNil)
  4138  
  4139  	time.Sleep(1 * time.Second)
  4140  
  4141  	buf, err = ioutil.ReadFile(filePath)
  4142  	t.Assert(err, IsNil)
  4143  	t.Assert(string(buf), Equals, update)
  4144  
  4145  	// the next read shouldn't talk to cloud
  4146  	root := s.getRoot(t)
  4147  	root.dir.cloud = &StorageBackendInitError{
  4148  		syscall.EINVAL, *root.dir.cloud.Capabilities(),
  4149  	}
  4150  
  4151  	buf, err = ioutil.ReadFile(filePath)
  4152  	t.Assert(err, IsNil)
  4153  	t.Assert(string(buf), Equals, update)
  4154  }
  4155  
  4156  func (s *GoofysTest) TestReadMyOwnWriteFuse(t *C) {
  4157  	s.testReadMyOwnWriteFuse(t, false)
  4158  }
  4159  
  4160  func (s *GoofysTest) TestReadMyOwnWriteExternalChangesFuse(t *C) {
  4161  	s.testReadMyOwnWriteFuse(t, true)
  4162  }
  4163  
  4164  func (s *GoofysTest) testReadMyOwnWriteFuse(t *C, externalUpdate bool) {
  4165  	s.fs.flags.StatCacheTTL = 1 * time.Second
  4166  
  4167  	mountPoint := "/tmp/mnt" + s.fs.bucket
  4168  
  4169  	s.mount(t, mountPoint)
  4170  	defer s.umount(t, mountPoint)
  4171  
  4172  	file := "file1"
  4173  	filePath := mountPoint + "/file1"
  4174  
  4175  	buf, err := ioutil.ReadFile(filePath)
  4176  	t.Assert(err, IsNil)
  4177  	t.Assert(string(buf), Equals, file)
  4178  
  4179  	if externalUpdate {
  4180  		update := "file2"
  4181  		_, err = s.cloud.PutBlob(&PutBlobInput{
  4182  			Key:  file,
  4183  			Body: bytes.NewReader([]byte(update)),
  4184  			Size: PUInt64(uint64(len(update))),
  4185  		})
  4186  		t.Assert(err, IsNil)
  4187  
  4188  		time.Sleep(s.fs.flags.StatCacheTTL)
  4189  	}
  4190  
  4191  	fh, err := os.Create(filePath)
  4192  	t.Assert(err, IsNil)
  4193  
  4194  	_, err = fh.WriteString("file3")
  4195  	t.Assert(err, IsNil)
  4196  	// we can't flush yet because if we did, we would be reading
  4197  	// the new copy from cloud and that's not the point of this
  4198  	// test
  4199  	defer func() {
  4200  		// want fh to be late-binding because we re-use the variable
  4201  		fh.Close()
  4202  	}()
  4203  
  4204  	buf, err = ioutil.ReadFile(filePath)
  4205  	t.Assert(err, IsNil)
  4206  	if externalUpdate {
  4207  		// if there was an external update, we had set
  4208  		// KeepPageCache to false on os.Create above, which
  4209  		// causes our write to not be in cache, and read here
  4210  		// will go to cloud
  4211  		t.Assert(string(buf), Equals, "file2")
  4212  	} else {
  4213  		t.Assert(string(buf), Equals, "file3")
  4214  	}
  4215  
  4216  	err = fh.Close()
  4217  	t.Assert(err, IsNil)
  4218  
  4219  	time.Sleep(s.fs.flags.StatCacheTTL)
  4220  
  4221  	root := s.getRoot(t)
  4222  	cloud := &TestBackend{root.dir.cloud, nil}
  4223  	root.dir.cloud = cloud
  4224  
  4225  	fh, err = os.Open(filePath)
  4226  	t.Assert(err, IsNil)
  4227  
  4228  	if !externalUpdate {
  4229  		// we flushed and ttl expired, next lookup should
  4230  		// realize nothing is changed and NOT invalidate the
  4231  		// cache. Except ADLv1,GCS because PUT there doesn't
  4232  		// return the mtime, so the open above will think the
  4233  		// file is updated and not re-use cache
  4234  		_, adlv1 := s.cloud.(*ADLv1)
  4235  		_, isGCS := s.cloud.(*GCSBackend)
  4236  		if !adlv1 && !isGCS {
  4237  			cloud.err = fuse.EINVAL
  4238  		}
  4239  	} else {
  4240  		// if there was externalUpdate, we wrote our own
  4241  		// update with KeepPageCache=false, so we should read
  4242  		// from the cloud her
  4243  	}
  4244  
  4245  	buf, err = ioutil.ReadAll(fh)
  4246  	t.Assert(err, IsNil)
  4247  	t.Assert(string(buf), Equals, "file3")
  4248  }
  4249  
  4250  func (s *GoofysTest) TestReadMyOwnNewFileFuse(t *C) {
  4251  	s.fs.flags.StatCacheTTL = 1 * time.Second
  4252  	s.fs.flags.TypeCacheTTL = 1 * time.Second
  4253  
  4254  	mountPoint := "/tmp/mnt" + s.fs.bucket
  4255  
  4256  	s.mount(t, mountPoint)
  4257  	defer s.umount(t, mountPoint)
  4258  
  4259  	filePath := mountPoint + "/filex"
  4260  
  4261  	// jacobsa/fuse doesn't support setting OpenKeepCache on
  4262  	// CreateFile but even after manually setting in in
  4263  	// fuse/conversions.go, we still receive read ops instead of
  4264  	// being handled by kernel
  4265  
  4266  	fh, err := os.Create(filePath)
  4267  	t.Assert(err, IsNil)
  4268  
  4269  	_, err = fh.WriteString("filex")
  4270  	t.Assert(err, IsNil)
  4271  	// we can't flush yet because if we did, we would be reading
  4272  	// the new copy from cloud and that's not the point of this
  4273  	// test
  4274  	defer fh.Close()
  4275  
  4276  	// disabled: we can't actually read back our own update
  4277  	//buf, err := ioutil.ReadFile(filePath)
  4278  	//t.Assert(err, IsNil)
  4279  	//t.Assert(string(buf), Equals, "filex")
  4280  }