github.com/sagansystems/goofys-app@v0.19.1-0.20180410053237-b2302fdf5af9/internal/goofys_test.go (about)

     1  // Copyright 2015 - 2017 Ka-Hing Cheung
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package internal
    16  
    17  import (
    18  	"bufio"
    19  	"bytes"
    20  	"fmt"
    21  	"io"
    22  	"io/ioutil"
    23  	"math/rand"
    24  	"net"
    25  	"os"
    26  	"os/exec"
    27  	"os/user"
    28  	"runtime"
    29  	"sort"
    30  	"strconv"
    31  	"strings"
    32  	"sync"
    33  	"syscall"
    34  	"testing"
    35  	"time"
    36  
    37  	"golang.org/x/net/context"
    38  
    39  	"github.com/aws/aws-sdk-go/aws"
    40  	"github.com/aws/aws-sdk-go/aws/corehandlers"
    41  	"github.com/aws/aws-sdk-go/aws/credentials"
    42  	"github.com/aws/aws-sdk-go/aws/session"
    43  	"github.com/aws/aws-sdk-go/service/s3"
    44  
    45  	"github.com/kahing/go-xattr"
    46  
    47  	"github.com/jacobsa/fuse"
    48  	"github.com/jacobsa/fuse/fuseops"
    49  	"github.com/jacobsa/fuse/fuseutil"
    50  
    51  	"github.com/sirupsen/logrus"
    52  
    53  	. "gopkg.in/check.v1"
    54  )
    55  
    56  // so I don't get complains about unused imports
    57  var ignored = logrus.DebugLevel
    58  
    59  func currentUid() uint32 {
    60  	user, err := user.Current()
    61  	if err != nil {
    62  		panic(err)
    63  	}
    64  
    65  	uid, err := strconv.ParseUint(user.Uid, 10, 32)
    66  	if err != nil {
    67  		panic(err)
    68  	}
    69  
    70  	return uint32(uid)
    71  }
    72  
    73  func currentGid() uint32 {
    74  	user, err := user.Current()
    75  	if err != nil {
    76  		panic(err)
    77  	}
    78  
    79  	gid, err := strconv.ParseUint(user.Gid, 10, 32)
    80  	if err != nil {
    81  		panic(err)
    82  	}
    83  
    84  	return uint32(gid)
    85  }
    86  
    87  type GoofysTest struct {
    88  	fs        *Goofys
    89  	ctx       context.Context
    90  	awsConfig *aws.Config
    91  	s3        *s3.S3
    92  	sess      *session.Session
    93  	env       map[string]io.ReadSeeker
    94  }
    95  
    96  func Test(t *testing.T) {
    97  	TestingT(t)
    98  }
    99  
   100  var _ = Suite(&GoofysTest{})
   101  
   102  func logOutput(t *C, tag string, r io.ReadCloser) {
   103  	in := bufio.NewScanner(r)
   104  
   105  	for in.Scan() {
   106  		t.Log(tag, in.Text())
   107  	}
   108  }
   109  
   110  func waitFor(t *C, addr string) (err error) {
   111  	// wait for it to listen on port
   112  	for i := 0; i < 10; i++ {
   113  		var conn net.Conn
   114  		conn, err = net.Dial("tcp", addr)
   115  		if err == nil {
   116  			// we are done!
   117  			conn.Close()
   118  			return
   119  		} else {
   120  			t.Log("Cound not connect: %v", err)
   121  			time.Sleep(100 * time.Millisecond)
   122  		}
   123  	}
   124  
   125  	return
   126  }
   127  
   128  func selectTestConfig(t *C) *aws.Config {
   129  	if hasEnv("AWS") {
   130  		return &aws.Config{
   131  			Region:     aws.String("us-west-2"),
   132  			DisableSSL: aws.Bool(true),
   133  			//LogLevel:         aws.LogLevel(aws.LogDebug | aws.LogDebugWithSigning),
   134  			S3ForcePathStyle: aws.Bool(true),
   135  		}
   136  	} else if hasEnv("GCS") {
   137  		return &aws.Config{
   138  			Region:      aws.String("us-west1"),
   139  			Endpoint:    aws.String("http://storage.googleapis.com"),
   140  			Credentials: credentials.NewSharedCredentials("", os.Getenv("GCS")),
   141  			//LogLevel:         aws.LogLevel(aws.LogDebug | aws.LogDebugWithSigning),
   142  			S3ForcePathStyle: aws.Bool(true),
   143  		}
   144  	} else if hasEnv("MINIO") {
   145  		return &aws.Config{
   146  			Credentials: credentials.NewStaticCredentials("Q3AM3UQ867SPQQA43P2F",
   147  				"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ""),
   148  			Region: aws.String("us-east-1"),
   149  			//LogLevel:         aws.LogLevel(aws.LogDebug | aws.LogDebugWithSigning),
   150  			S3ForcePathStyle: aws.Bool(true),
   151  			Endpoint:         aws.String("https://play.minio.io:9000"),
   152  		}
   153  	} else {
   154  		addr := "127.0.0.1:8080"
   155  
   156  		err := waitFor(t, addr)
   157  		t.Assert(err, IsNil)
   158  
   159  		return &aws.Config{
   160  			//Credentials: credentials.AnonymousCredentials,
   161  			Credentials:      credentials.NewStaticCredentials("foo", "bar", ""),
   162  			Region:           aws.String("us-west-2"),
   163  			Endpoint:         aws.String("http://" + addr),
   164  			DisableSSL:       aws.Bool(true),
   165  			S3ForcePathStyle: aws.Bool(true),
   166  			MaxRetries:       aws.Int(0),
   167  			//Logger: t,
   168  			//LogLevel: aws.LogLevel(aws.LogDebug),
   169  			//LogLevel: aws.LogLevel(aws.LogDebug | aws.LogDebugWithHTTPBody),
   170  		}
   171  	}
   172  }
   173  
   174  func (s *GoofysTest) SetUpSuite(t *C) {
   175  }
   176  
   177  func (s *GoofysTest) deleteBucket(t *C) {
   178  	resp, err := s.s3.ListObjects(&s3.ListObjectsInput{Bucket: &s.fs.bucket})
   179  	t.Assert(err, IsNil)
   180  
   181  	if hasEnv("GCS") {
   182  		// GCS does not have multi-delete
   183  		var wg sync.WaitGroup
   184  
   185  		for _, o := range resp.Contents {
   186  			wg.Add(1)
   187  			key := *o.Key
   188  			go func() {
   189  				_, err = s.s3.DeleteObject(&s3.DeleteObjectInput{
   190  					Bucket: &s.fs.bucket,
   191  					Key:    &key,
   192  				})
   193  				wg.Done()
   194  				t.Assert(err, IsNil)
   195  			}()
   196  		}
   197  		wg.Wait()
   198  	} else {
   199  		num_objs := len(resp.Contents)
   200  
   201  		var items s3.Delete
   202  		var objs = make([]*s3.ObjectIdentifier, num_objs)
   203  
   204  		for i, o := range resp.Contents {
   205  			objs[i] = &s3.ObjectIdentifier{Key: aws.String(*o.Key)}
   206  		}
   207  
   208  		// Add list of objects to delete to Delete object
   209  		items.SetObjects(objs)
   210  		_, err = s.s3.DeleteObjects(&s3.DeleteObjectsInput{Bucket: &s.fs.bucket, Delete: &items})
   211  		t.Assert(err, IsNil)
   212  	}
   213  
   214  	s.s3.DeleteBucket(&s3.DeleteBucketInput{Bucket: &s.fs.bucket})
   215  }
   216  
   217  func (s *GoofysTest) TearDownSuite(t *C) {
   218  	s.deleteBucket(t)
   219  }
   220  
   221  func (s *GoofysTest) setupEnv(t *C, bucket string, env map[string]io.ReadSeeker, public bool) {
   222  	param := s3.CreateBucketInput{
   223  		Bucket: &bucket,
   224  		//ACL: aws.String(s3.BucketCannedACLPrivate),
   225  	}
   226  	if public {
   227  		param.ACL = aws.String("public-read")
   228  	}
   229  	_, err := s.s3.CreateBucket(&param)
   230  	t.Assert(err, IsNil)
   231  
   232  	for path, r := range env {
   233  		if r == nil {
   234  			if strings.HasSuffix(path, "/") {
   235  				r = bytes.NewReader([]byte{})
   236  			} else {
   237  				r = bytes.NewReader([]byte(path))
   238  			}
   239  		}
   240  
   241  		params := &s3.PutObjectInput{
   242  			Bucket: &bucket,
   243  			Key:    &path,
   244  			Body:   r,
   245  			Metadata: map[string]*string{
   246  				"name": aws.String(path + "+/#%00"),
   247  			},
   248  		}
   249  
   250  		_, err := s.s3.PutObject(params)
   251  		t.Assert(err, IsNil)
   252  	}
   253  
   254  	// double check
   255  	for path := range env {
   256  		params := &s3.HeadObjectInput{Bucket: &bucket, Key: &path}
   257  		_, err := s.s3.HeadObject(params)
   258  		t.Assert(err, IsNil)
   259  	}
   260  
   261  	t.Log("setupEnv done")
   262  }
   263  
   264  func (s *GoofysTest) setupDefaultEnv(t *C, public bool) (bucket string) {
   265  	s.env = map[string]io.ReadSeeker{
   266  		"file1":           nil,
   267  		"file2":           nil,
   268  		"dir1/file3":      nil,
   269  		"dir2/dir3/":      nil,
   270  		"dir2/dir3/file4": nil,
   271  		"dir4/":           nil,
   272  		"dir4/file5":      nil,
   273  		"empty_dir/":      nil,
   274  		"empty_dir2/":     nil,
   275  		"zero":            bytes.NewReader([]byte{}),
   276  	}
   277  
   278  	bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16)
   279  	s.setupEnv(t, bucket, s.env, public)
   280  	return bucket
   281  }
   282  
   283  func (s *GoofysTest) SetUpTest(t *C) {
   284  	s.awsConfig = selectTestConfig(t)
   285  	s.sess = session.New(s.awsConfig)
   286  	s.s3 = s3.New(s.sess)
   287  
   288  	if !hasEnv("MINIO") {
   289  		s.s3.Handlers.Sign.Clear()
   290  		s.s3.Handlers.Sign.PushBack(SignV2)
   291  		s.s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
   292  	}
   293  
   294  	_, err := s.s3.ListBuckets(nil)
   295  	t.Assert(err, IsNil)
   296  
   297  	bucket := s.setupDefaultEnv(t, false)
   298  
   299  	s.ctx = context.Background()
   300  
   301  	uid, gid := MyUserAndGroup()
   302  	flags := &FlagStorage{
   303  		StorageClass: "STANDARD",
   304  		DirMode:      0700,
   305  		FileMode:     0700,
   306  		Uid:          uint32(uid),
   307  		Gid:          uint32(gid),
   308  	}
   309  	if hasEnv("GCS") {
   310  		flags.Endpoint = "http://storage.googleapis.com"
   311  	}
   312  	s.fs = NewGoofys(context.Background(), bucket, s.awsConfig, flags)
   313  	t.Assert(s.fs, NotNil)
   314  }
   315  
   316  func (s *GoofysTest) getRoot(t *C) (inode *Inode) {
   317  	inode = s.fs.inodes[fuseops.RootInodeID]
   318  	t.Assert(inode, NotNil)
   319  	return
   320  }
   321  
   322  func (s *GoofysTest) TestGetRootInode(t *C) {
   323  	root := s.getRoot(t)
   324  	t.Assert(root.Id, Equals, fuseops.InodeID(fuseops.RootInodeID))
   325  }
   326  
   327  func (s *GoofysTest) TestGetRootAttributes(t *C) {
   328  	_, err := s.getRoot(t).GetAttributes()
   329  	t.Assert(err, IsNil)
   330  }
   331  
   332  func (s *GoofysTest) ForgetInode(t *C, inode fuseops.InodeID) {
   333  	err := s.fs.ForgetInode(s.ctx, &fuseops.ForgetInodeOp{Inode: inode})
   334  	t.Assert(err, IsNil)
   335  }
   336  
   337  func (s *GoofysTest) LookUpInode(t *C, name string) (in *Inode, err error) {
   338  	parent := s.getRoot(t)
   339  
   340  	for {
   341  		idx := strings.Index(name, "/")
   342  		if idx == -1 {
   343  			break
   344  		}
   345  
   346  		dirName := name[0:idx]
   347  		name = name[idx+1:]
   348  
   349  		lookup := fuseops.LookUpInodeOp{
   350  			Parent: parent.Id,
   351  			Name:   dirName,
   352  		}
   353  
   354  		err = s.fs.LookUpInode(nil, &lookup)
   355  		if err != nil {
   356  			return
   357  		}
   358  		parent = s.fs.inodes[lookup.Entry.Child]
   359  	}
   360  
   361  	lookup := fuseops.LookUpInodeOp{
   362  		Parent: parent.Id,
   363  		Name:   name,
   364  	}
   365  
   366  	err = s.fs.LookUpInode(nil, &lookup)
   367  	if err != nil {
   368  		return
   369  	}
   370  	in = s.fs.inodes[lookup.Entry.Child]
   371  	return
   372  }
   373  
   374  func (s *GoofysTest) TestLookUpInode(t *C) {
   375  	_, err := s.LookUpInode(t, "file1")
   376  	t.Assert(err, IsNil)
   377  
   378  	_, err = s.LookUpInode(t, "fileNotFound")
   379  	t.Assert(err, Equals, fuse.ENOENT)
   380  
   381  	_, err = s.LookUpInode(t, "dir1/file3")
   382  	t.Assert(err, IsNil)
   383  
   384  	_, err = s.LookUpInode(t, "dir2/dir3")
   385  	t.Assert(err, IsNil)
   386  
   387  	_, err = s.LookUpInode(t, "dir2/dir3/file4")
   388  	t.Assert(err, IsNil)
   389  
   390  	_, err = s.LookUpInode(t, "empty_dir")
   391  	t.Assert(err, IsNil)
   392  }
   393  
   394  func (s *GoofysTest) TestGetInodeAttributes(t *C) {
   395  	inode, err := s.getRoot(t).LookUp("file1")
   396  	t.Assert(err, IsNil)
   397  
   398  	attr, err := inode.GetAttributes()
   399  	t.Assert(err, IsNil)
   400  	t.Assert(attr.Size, Equals, uint64(len("file1")))
   401  }
   402  
   403  func (s *GoofysTest) readDirFully(t *C, dh *DirHandle) (entries []DirHandleEntry) {
   404  	dh.mu.Lock()
   405  	defer dh.mu.Unlock()
   406  
   407  	en, err := dh.ReadDir(fuseops.DirOffset(0))
   408  	t.Assert(err, IsNil)
   409  	t.Assert(en, NotNil)
   410  	t.Assert(*en.Name, Equals, ".")
   411  
   412  	en, err = dh.ReadDir(fuseops.DirOffset(1))
   413  	t.Assert(err, IsNil)
   414  	t.Assert(en, NotNil)
   415  	t.Assert(*en.Name, Equals, "..")
   416  
   417  	for i := fuseops.DirOffset(2); ; i++ {
   418  		en, err = dh.ReadDir(i)
   419  		t.Assert(err, IsNil)
   420  
   421  		if en == nil {
   422  			return
   423  		}
   424  
   425  		entries = append(entries, *en)
   426  	}
   427  }
   428  
   429  func namesOf(entries []DirHandleEntry) (names []string) {
   430  	for _, en := range entries {
   431  		names = append(names, *en.Name)
   432  	}
   433  	return
   434  }
   435  
   436  func (s *GoofysTest) assertEntries(t *C, in *Inode, names []string) {
   437  	dh := in.OpenDir()
   438  	defer dh.CloseDir()
   439  
   440  	t.Assert(namesOf(s.readDirFully(t, dh)), DeepEquals, names)
   441  }
   442  
   443  func (s *GoofysTest) readDirIntoCache(t *C, inode fuseops.InodeID) {
   444  	openDirOp := fuseops.OpenDirOp{Inode: inode}
   445  	err := s.fs.OpenDir(nil, &openDirOp)
   446  	t.Assert(err, IsNil)
   447  
   448  	readDirOp := fuseops.ReadDirOp{
   449  		Inode:  inode,
   450  		Handle: openDirOp.Handle,
   451  		Dst:    make([]byte, 8*1024),
   452  	}
   453  
   454  	err = s.fs.ReadDir(nil, &readDirOp)
   455  	t.Assert(err, IsNil)
   456  }
   457  
   458  func (s *GoofysTest) TestReadDirCacheLookup(t *C) {
   459  	s.fs.flags.StatCacheTTL = 1 * time.Minute
   460  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
   461  
   462  	s.readDirIntoCache(t, fuseops.RootInodeID)
   463  	s.disableS3()
   464  
   465  	// should be cached so lookup should not need to talk to s3
   466  	entries := []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"}
   467  	for _, en := range entries {
   468  		err := s.fs.LookUpInode(nil, &fuseops.LookUpInodeOp{
   469  			Parent: fuseops.RootInodeID,
   470  			Name:   en,
   471  		})
   472  		t.Assert(err, IsNil)
   473  	}
   474  }
   475  
   476  func (s *GoofysTest) TestReadDir(t *C) {
   477  	// test listing /
   478  	dh := s.getRoot(t).OpenDir()
   479  	defer dh.CloseDir()
   480  
   481  	s.assertEntries(t, s.getRoot(t), []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"})
   482  
   483  	// test listing dir1/
   484  	in, err := s.LookUpInode(t, "dir1")
   485  	t.Assert(err, IsNil)
   486  	s.assertEntries(t, in, []string{"file3"})
   487  
   488  	// test listing dir2/
   489  	in, err = s.LookUpInode(t, "dir2")
   490  	t.Assert(err, IsNil)
   491  	s.assertEntries(t, in, []string{"dir3"})
   492  
   493  	// test listing dir2/dir3/
   494  	in, err = in.LookUp("dir3")
   495  	t.Assert(err, IsNil)
   496  	s.assertEntries(t, in, []string{"file4"})
   497  }
   498  
   499  func (s *GoofysTest) TestReadFiles(t *C) {
   500  	parent := s.getRoot(t)
   501  	dh := parent.OpenDir()
   502  	defer dh.CloseDir()
   503  
   504  	var entries []*DirHandleEntry
   505  
   506  	dh.mu.Lock()
   507  	for i := fuseops.DirOffset(0); ; i++ {
   508  		en, err := dh.ReadDir(i)
   509  		t.Assert(err, IsNil)
   510  
   511  		if en == nil {
   512  			break
   513  		}
   514  
   515  		entries = append(entries, en)
   516  	}
   517  	dh.mu.Unlock()
   518  
   519  	for _, en := range entries {
   520  		if en.Type == fuseutil.DT_File {
   521  			in, err := parent.LookUp(*en.Name)
   522  			t.Assert(err, IsNil)
   523  
   524  			fh, err := in.OpenFile()
   525  			t.Assert(err, IsNil)
   526  
   527  			buf := make([]byte, 4096)
   528  
   529  			nread, err := fh.ReadFile(0, buf)
   530  			if *en.Name == "zero" {
   531  				t.Assert(nread, Equals, 0)
   532  			} else {
   533  				t.Assert(nread, Equals, len(*en.Name))
   534  				buf = buf[0:nread]
   535  				t.Assert(string(buf), Equals, *en.Name)
   536  			}
   537  		} else {
   538  
   539  		}
   540  	}
   541  }
   542  
   543  func (s *GoofysTest) TestReadOffset(t *C) {
   544  	root := s.getRoot(t)
   545  	f := "file1"
   546  
   547  	in, err := root.LookUp(f)
   548  	t.Assert(err, IsNil)
   549  
   550  	fh, err := in.OpenFile()
   551  	t.Assert(err, IsNil)
   552  
   553  	buf := make([]byte, 4096)
   554  
   555  	nread, err := fh.ReadFile(1, buf)
   556  	t.Assert(err, IsNil)
   557  	t.Assert(nread, Equals, len(f)-1)
   558  	t.Assert(string(buf[0:nread]), DeepEquals, f[1:])
   559  
   560  	r := rand.New(rand.NewSource(time.Now().UnixNano()))
   561  
   562  	for i := 0; i < 3; i++ {
   563  		off := r.Int31n(int32(len(f)))
   564  		nread, err = fh.ReadFile(int64(off), buf)
   565  		t.Assert(err, IsNil)
   566  		t.Assert(nread, Equals, len(f)-int(off))
   567  		t.Assert(string(buf[0:nread]), DeepEquals, f[off:])
   568  	}
   569  }
   570  
   571  func (s *GoofysTest) TestCreateFiles(t *C) {
   572  	fileName := "testCreateFile"
   573  
   574  	_, fh := s.getRoot(t).Create(fileName)
   575  
   576  	err := fh.FlushFile()
   577  	t.Assert(err, IsNil)
   578  
   579  	resp, err := s.s3.GetObject(&s3.GetObjectInput{Bucket: &s.fs.bucket, Key: &fileName})
   580  	t.Assert(err, IsNil)
   581  	t.Assert(*resp.ContentLength, DeepEquals, int64(0))
   582  	defer resp.Body.Close()
   583  
   584  	_, err = s.getRoot(t).LookUp(fileName)
   585  	t.Assert(err, IsNil)
   586  
   587  	fileName = "testCreateFile2"
   588  	s.testWriteFile(t, fileName, 1, 128*1024)
   589  
   590  	inode, err := s.getRoot(t).LookUp(fileName)
   591  	t.Assert(err, IsNil)
   592  
   593  	fh, err = inode.OpenFile()
   594  	t.Assert(err, IsNil)
   595  
   596  	err = fh.FlushFile()
   597  	t.Assert(err, IsNil)
   598  
   599  	resp, err = s.s3.GetObject(&s3.GetObjectInput{Bucket: &s.fs.bucket, Key: &fileName})
   600  	t.Assert(err, IsNil)
   601  	t.Assert(*resp.ContentLength, Equals, int64(1))
   602  	defer resp.Body.Close()
   603  }
   604  
   605  func (s *GoofysTest) TestUnlink(t *C) {
   606  	fileName := "file1"
   607  
   608  	err := s.getRoot(t).Unlink(fileName)
   609  	t.Assert(err, IsNil)
   610  
   611  	// make sure that it's gone from s3
   612  	_, err = s.s3.GetObject(&s3.GetObjectInput{Bucket: &s.fs.bucket, Key: &fileName})
   613  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
   614  }
   615  
   616  type FileHandleReader struct {
   617  	fs     *Goofys
   618  	fh     *FileHandle
   619  	offset int64
   620  }
   621  
   622  func (r *FileHandleReader) Read(p []byte) (nread int, err error) {
   623  	nread, err = r.fh.ReadFile(r.offset, p)
   624  	r.offset += int64(nread)
   625  	return
   626  }
   627  
   628  func (r *FileHandleReader) Seek(offset int64, whence int) (int64, error) {
   629  	switch whence {
   630  	case 0:
   631  		r.offset = offset
   632  	case 1:
   633  		r.offset += offset
   634  	default:
   635  		panic(fmt.Sprintf("unsupported whence: %v", whence))
   636  	}
   637  
   638  	return r.offset, nil
   639  }
   640  
   641  func (s *GoofysTest) testWriteFile(t *C, fileName string, size int64, write_size int) {
   642  	s.testWriteFileAt(t, fileName, int64(0), size, write_size)
   643  }
   644  
   645  func (s *GoofysTest) testWriteFileAt(t *C, fileName string, offset int64, size int64, write_size int) {
   646  	var fh *FileHandle
   647  
   648  	if offset == 0 {
   649  		_, fh = s.getRoot(t).Create(fileName)
   650  	} else {
   651  		in, err := s.getRoot(t).LookUp(fileName)
   652  		t.Assert(err, IsNil)
   653  
   654  		fh, err = in.OpenFile()
   655  		t.Assert(err, IsNil)
   656  	}
   657  
   658  	buf := make([]byte, write_size)
   659  	nwritten := offset
   660  
   661  	src := io.LimitReader(&SeqReader{}, size)
   662  
   663  	for {
   664  		nread, err := src.Read(buf)
   665  		if err == io.EOF {
   666  			t.Assert(nwritten, Equals, size)
   667  			break
   668  		}
   669  		t.Assert(err, IsNil)
   670  
   671  		err = fh.WriteFile(nwritten, buf[:nread])
   672  		t.Assert(err, IsNil)
   673  		nwritten += int64(nread)
   674  	}
   675  
   676  	err := fh.FlushFile()
   677  	t.Assert(err, IsNil)
   678  
   679  	resp, err := s.s3.HeadObject(&s3.HeadObjectInput{Bucket: &s.fs.bucket, Key: &fileName})
   680  	t.Assert(err, IsNil)
   681  	t.Assert(*resp.ContentLength, DeepEquals, size+offset)
   682  
   683  	fr := &FileHandleReader{s.fs, fh, offset}
   684  	diff, err := CompareReader(fr, io.LimitReader(&SeqReader{offset}, size))
   685  	t.Assert(err, IsNil)
   686  	t.Assert(diff, Equals, -1)
   687  	t.Assert(fr.offset, Equals, size)
   688  
   689  	fh.Release()
   690  }
   691  
   692  func (s *GoofysTest) TestWriteLargeFile(t *C) {
   693  	s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024)
   694  	s.testWriteFile(t, "testLargeFile2", 20*1024*1024, 128*1024)
   695  }
   696  
   697  func (s *GoofysTest) TestWriteReplicatorThrottle(t *C) {
   698  	s.fs.replicators = Ticket{Total: 1}.Init()
   699  	s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024)
   700  }
   701  
   702  func (s *GoofysTest) TestReadWriteMinimumMemory(t *C) {
   703  	s.fs.bufferPool.maxBuffers = 2
   704  	s.fs.bufferPool.computedMaxbuffers = s.fs.bufferPool.maxBuffers
   705  	s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024)
   706  }
   707  
   708  func (s *GoofysTest) TestWriteManyFilesFile(t *C) {
   709  	var files sync.WaitGroup
   710  
   711  	for i := 0; i < 21; i++ {
   712  		files.Add(1)
   713  		fileName := "testSmallFile" + strconv.Itoa(i)
   714  		go func() {
   715  			defer files.Done()
   716  			s.testWriteFile(t, fileName, 1, 128*1024)
   717  		}()
   718  	}
   719  
   720  	files.Wait()
   721  }
   722  
   723  func (s *GoofysTest) testWriteFileNonAlign(t *C) {
   724  	s.testWriteFile(t, "testWriteFileNonAlign", 6*1024*1024, 128*1024+1)
   725  }
   726  
   727  func (s *GoofysTest) TestReadRandom(t *C) {
   728  	size := int64(21 * 1024 * 1024)
   729  
   730  	s.testWriteFile(t, "testLargeFile", size, 128*1024)
   731  	in, err := s.LookUpInode(t, "testLargeFile")
   732  	t.Assert(err, IsNil)
   733  
   734  	fh, err := in.OpenFile()
   735  	t.Assert(err, IsNil)
   736  	fr := &FileHandleReader{s.fs, fh, 0}
   737  
   738  	src := rand.NewSource(time.Now().UnixNano())
   739  	truth := &SeqReader{}
   740  
   741  	for i := 0; i < 10; i++ {
   742  		offset := src.Int63() % (size / 2)
   743  
   744  		fr.Seek(offset, 0)
   745  		truth.Seek(offset, 0)
   746  
   747  		// read 5MB+1 from that offset
   748  		nread := int64(5*1024*1024 + 1)
   749  		CompareReader(io.LimitReader(fr, nread), io.LimitReader(truth, nread))
   750  	}
   751  }
   752  
   753  func (s *GoofysTest) TestMkDir(t *C) {
   754  	_, err := s.LookUpInode(t, "new_dir/file")
   755  	t.Assert(err, Equals, fuse.ENOENT)
   756  
   757  	dirName := "new_dir"
   758  	inode, err := s.getRoot(t).MkDir(dirName)
   759  	t.Assert(err, IsNil)
   760  	t.Assert(*inode.FullName(), Equals, dirName)
   761  
   762  	_, err = s.LookUpInode(t, dirName)
   763  	t.Assert(err, IsNil)
   764  
   765  	fileName := "file"
   766  	_, fh := inode.Create(fileName)
   767  
   768  	err = fh.FlushFile()
   769  	t.Assert(err, IsNil)
   770  
   771  	_, err = s.LookUpInode(t, dirName+"/"+fileName)
   772  	t.Assert(err, IsNil)
   773  }
   774  
   775  func (s *GoofysTest) TestRmDir(t *C) {
   776  	root := s.getRoot(t)
   777  
   778  	err := root.RmDir("dir1")
   779  	t.Assert(err, Equals, fuse.ENOTEMPTY)
   780  
   781  	err = root.RmDir("dir2")
   782  	t.Assert(err, Equals, fuse.ENOTEMPTY)
   783  
   784  	err = root.RmDir("empty_dir")
   785  	t.Assert(err, IsNil)
   786  
   787  }
   788  
   789  func (s *GoofysTest) TestRenamePreserveMetadata(t *C) {
   790  	root := s.getRoot(t)
   791  
   792  	from, to := "file1", "new_file"
   793  
   794  	metadata := make(map[string]*string)
   795  	metadata["foo"] = aws.String("bar")
   796  
   797  	_, err := s.s3.CopyObject(&s3.CopyObjectInput{
   798  		Bucket:            &s.fs.bucket,
   799  		CopySource:        aws.String(s.fs.bucket + "/" + from),
   800  		Key:               &from,
   801  		Metadata:          metadata,
   802  		MetadataDirective: aws.String(s3.MetadataDirectiveReplace),
   803  	})
   804  	t.Assert(err, IsNil)
   805  
   806  	err = root.Rename(from, root, to)
   807  	t.Assert(err, IsNil)
   808  
   809  	resp, err := s.s3.HeadObject(&s3.HeadObjectInput{Bucket: &s.fs.bucket, Key: &to})
   810  	t.Assert(err, IsNil)
   811  	t.Assert(resp.Metadata["Foo"], NotNil)
   812  	t.Assert(*resp.Metadata["Foo"], Equals, "bar")
   813  }
   814  
   815  func (s *GoofysTest) TestRenameLarge(t *C) {
   816  	s.testWriteFile(t, "large_file", 21*1024*1024, 128*1024)
   817  
   818  	root := s.getRoot(t)
   819  
   820  	from, to := "large_file", "large_file2"
   821  	err := root.Rename(from, root, to)
   822  	t.Assert(err, IsNil)
   823  }
   824  
   825  func (s *GoofysTest) TestRenameToExisting(t *C) {
   826  	root := s.getRoot(t)
   827  
   828  	// cache these 2 files first
   829  	_, err := s.LookUpInode(t, "file1")
   830  	t.Assert(err, IsNil)
   831  
   832  	_, err = s.LookUpInode(t, "file2")
   833  	t.Assert(err, IsNil)
   834  
   835  	err = s.fs.Rename(nil, &fuseops.RenameOp{
   836  		OldParent: root.Id,
   837  		NewParent: root.Id,
   838  		OldName:   "file1",
   839  		NewName:   "file2",
   840  	})
   841  	t.Assert(err, IsNil)
   842  
   843  	file1 := root.findChild("file1")
   844  	t.Assert(file1, IsNil)
   845  
   846  	file2 := root.findChild("file2")
   847  	t.Assert(file2, NotNil)
   848  	t.Assert(*file2.Name, Equals, "file2")
   849  }
   850  
   851  func (s *GoofysTest) TestRename(t *C) {
   852  	root := s.getRoot(t)
   853  	from, to := "dir1", "new_dir"
   854  	err := root.Rename(from, root, to)
   855  	t.Assert(err, Equals, fuse.ENOTEMPTY)
   856  
   857  	dir2, err := root.LookUp("dir2")
   858  	t.Assert(err, IsNil)
   859  
   860  	from, to = "dir3", "new_dir"
   861  	err = dir2.Rename(from, root, to)
   862  	t.Assert(err, Equals, fuse.ENOTEMPTY)
   863  
   864  	from, to = "empty_dir", "dir1"
   865  	err = root.Rename(from, root, to)
   866  	t.Assert(err, Equals, fuse.ENOTEMPTY)
   867  
   868  	from, to = "empty_dir", "file1"
   869  	err = root.Rename(from, root, to)
   870  	t.Assert(err, Equals, fuse.ENOTDIR)
   871  
   872  	from, to = "file1", "empty_dir"
   873  	err = root.Rename(from, root, to)
   874  	t.Assert(err, Equals, syscall.EISDIR)
   875  
   876  	from, to = "empty_dir", "new_dir"
   877  	err = root.Rename(from, root, to)
   878  	t.Assert(err, IsNil)
   879  
   880  	from, to = "file1", "new_file"
   881  	err = root.Rename(from, root, to)
   882  	t.Assert(err, IsNil)
   883  
   884  	_, err = s.s3.HeadObject(&s3.HeadObjectInput{Bucket: &s.fs.bucket, Key: &to})
   885  	t.Assert(err, IsNil)
   886  
   887  	_, err = s.s3.HeadObject(&s3.HeadObjectInput{Bucket: &s.fs.bucket, Key: &from})
   888  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
   889  
   890  	from, to = "file3", "new_file"
   891  	dir, _ := s.LookUpInode(t, "dir1")
   892  	err = dir.Rename(from, root, to)
   893  	t.Assert(err, IsNil)
   894  
   895  	_, err = s.s3.HeadObject(&s3.HeadObjectInput{Bucket: &s.fs.bucket, Key: &to})
   896  	t.Assert(err, IsNil)
   897  
   898  	_, err = s.s3.HeadObject(&s3.HeadObjectInput{Bucket: &s.fs.bucket, Key: &from})
   899  	t.Assert(mapAwsError(err), Equals, fuse.ENOENT)
   900  
   901  	from, to = "no_such_file", "new_file"
   902  	err = root.Rename(from, root, to)
   903  	t.Assert(err, Equals, fuse.ENOENT)
   904  
   905  	if !hasEnv("GCS") {
   906  		// not really rename but can be used by rename
   907  		from, to = s.fs.bucket+"/file2", "new_file"
   908  		err = copyObjectMultipart(s.fs, int64(len("file2")), from, to, "", nil, nil)
   909  		t.Assert(err, IsNil)
   910  	}
   911  }
   912  
   913  func (s *GoofysTest) TestConcurrentRefDeref(t *C) {
   914  	root := s.getRoot(t)
   915  
   916  	lookupOp := fuseops.LookUpInodeOp{
   917  		Parent: root.Id,
   918  		Name:   "file1",
   919  	}
   920  
   921  	for i := 0; i < 20; i++ {
   922  		err := s.fs.LookUpInode(nil, &lookupOp)
   923  		t.Assert(err, IsNil)
   924  
   925  		var wg sync.WaitGroup
   926  
   927  		wg.Add(2)
   928  		go func() {
   929  			// we want to yield to the forget goroutine so that it's run first
   930  			// to trigger this bug
   931  			if i%2 == 0 {
   932  				runtime.Gosched()
   933  			}
   934  			s.fs.LookUpInode(nil, &lookupOp)
   935  			wg.Done()
   936  		}()
   937  		go func() {
   938  			s.fs.ForgetInode(nil, &fuseops.ForgetInodeOp{
   939  				Inode: lookupOp.Entry.Child,
   940  				N:     1,
   941  			})
   942  			wg.Done()
   943  		}()
   944  
   945  		wg.Wait()
   946  	}
   947  }
   948  
   949  func hasEnv(env string) bool {
   950  	v := os.Getenv(env)
   951  
   952  	return !(v == "" || v == "0" || v == "false")
   953  }
   954  
   955  func isTravis() bool {
   956  	return hasEnv("TRAVIS")
   957  }
   958  
   959  func isCatfs() bool {
   960  	return hasEnv("CATFS")
   961  }
   962  
   963  func (s *GoofysTest) mount(t *C, mountPoint string) {
   964  	err := os.MkdirAll(mountPoint, 0700)
   965  	t.Assert(err, IsNil)
   966  
   967  	server := fuseutil.NewFileSystemServer(s.fs)
   968  
   969  	if isCatfs() {
   970  		s.fs.flags.MountOptions = make(map[string]string)
   971  		s.fs.flags.MountOptions["allow_other"] = ""
   972  	}
   973  
   974  	// Mount the file system.
   975  	mountCfg := &fuse.MountConfig{
   976  		FSName:                  s.fs.bucket,
   977  		Options:                 s.fs.flags.MountOptions,
   978  		ErrorLogger:             GetStdLogger(NewLogger("fuse"), logrus.ErrorLevel),
   979  		DisableWritebackCaching: true,
   980  	}
   981  
   982  	_, err = fuse.Mount(mountPoint, server, mountCfg)
   983  	t.Assert(err, IsNil)
   984  
   985  	if isCatfs() {
   986  		cacheDir := mountPoint + "-cache"
   987  		err := os.MkdirAll(cacheDir, 0700)
   988  		t.Assert(err, IsNil)
   989  
   990  		catfs := exec.Command("catfs", "--test", "-ononempty", "--", mountPoint, cacheDir, mountPoint)
   991  		_, err = catfs.Output()
   992  		if err != nil {
   993  			if ee, ok := err.(*exec.ExitError); ok {
   994  				panic(ee.Stderr)
   995  			}
   996  		}
   997  
   998  		catfs = exec.Command("catfs", "-ononempty", "--", mountPoint, cacheDir, mountPoint)
   999  
  1000  		if isTravis() {
  1001  			logger := NewLogger("catfs")
  1002  			lvl := logrus.InfoLevel
  1003  			logger.Formatter.(*LogHandle).Lvl = &lvl
  1004  			w := logger.Writer()
  1005  
  1006  			catfs.Stdout = w
  1007  			catfs.Stderr = w
  1008  
  1009  			catfs.Env = append(catfs.Env, "RUST_LOG=debug")
  1010  		}
  1011  
  1012  		err = catfs.Start()
  1013  		t.Assert(err, IsNil)
  1014  
  1015  		time.Sleep(time.Second)
  1016  	}
  1017  }
  1018  
  1019  func (s *GoofysTest) umount(t *C, mountPoint string) {
  1020  	err := fuse.Unmount(mountPoint)
  1021  	if err != nil {
  1022  		time.Sleep(100 * time.Millisecond)
  1023  		err = fuse.Unmount(mountPoint)
  1024  		t.Assert(err, IsNil)
  1025  	}
  1026  
  1027  	os.Remove(mountPoint)
  1028  	if isCatfs() {
  1029  		cacheDir := mountPoint + "-cache"
  1030  		os.Remove(cacheDir)
  1031  	}
  1032  }
  1033  
  1034  func (s *GoofysTest) runFuseTest(t *C, mountPoint string, umount bool, cmdArgs ...string) {
  1035  	s.mount(t, mountPoint)
  1036  
  1037  	if umount {
  1038  		defer s.umount(t, mountPoint)
  1039  	}
  1040  
  1041  	cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
  1042  	cmd.Env = append(cmd.Env, os.Environ()...)
  1043  	cmd.Env = append(cmd.Env, "TRAVIS=true")
  1044  	cmd.Env = append(cmd.Env, "FAST=true")
  1045  
  1046  	if isTravis() {
  1047  		logger := NewLogger("test")
  1048  		lvl := logrus.InfoLevel
  1049  		logger.Formatter.(*LogHandle).Lvl = &lvl
  1050  		w := logger.Writer()
  1051  
  1052  		cmd.Stdout = w
  1053  		cmd.Stderr = w
  1054  	}
  1055  
  1056  	err := cmd.Run()
  1057  	t.Assert(err, IsNil)
  1058  }
  1059  
  1060  func (s *GoofysTest) TestFuse(t *C) {
  1061  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1062  
  1063  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1064  }
  1065  
  1066  func (s *GoofysTest) TestFuseWithTTL(t *C) {
  1067  	s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000
  1068  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1069  
  1070  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1071  }
  1072  
  1073  func (s *GoofysTest) TestCheap(t *C) {
  1074  	s.fs.flags.Cheap = true
  1075  	s.TestLookUpInode(t)
  1076  	s.TestWriteLargeFile(t)
  1077  }
  1078  
  1079  func (s *GoofysTest) TestExplicitDir(t *C) {
  1080  	s.fs.flags.ExplicitDir = true
  1081  	s.testExplicitDir(t)
  1082  }
  1083  
  1084  func (s *GoofysTest) TestExplicitDirAndCheap(t *C) {
  1085  	s.fs.flags.ExplicitDir = true
  1086  	s.fs.flags.Cheap = true
  1087  	s.testExplicitDir(t)
  1088  }
  1089  
  1090  func (s *GoofysTest) testExplicitDir(t *C) {
  1091  
  1092  	_, err := s.LookUpInode(t, "file1")
  1093  	t.Assert(err, IsNil)
  1094  
  1095  	_, err = s.LookUpInode(t, "fileNotFound")
  1096  	t.Assert(err, Equals, fuse.ENOENT)
  1097  
  1098  	// dir1/ doesn't exist so we shouldn't be able to see it
  1099  	_, err = s.LookUpInode(t, "dir1/file3")
  1100  	t.Assert(err, Equals, fuse.ENOENT)
  1101  
  1102  	_, err = s.LookUpInode(t, "dir4/file5")
  1103  	t.Assert(err, IsNil)
  1104  
  1105  	_, err = s.LookUpInode(t, "empty_dir")
  1106  	t.Assert(err, IsNil)
  1107  }
  1108  
  1109  func (s *GoofysTest) TestBenchLs(t *C) {
  1110  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1111  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "ls")
  1112  }
  1113  
  1114  func (s *GoofysTest) TestBenchCreate(t *C) {
  1115  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1116  
  1117  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create")
  1118  }
  1119  
  1120  func (s *GoofysTest) TestBenchCreateParallel(t *C) {
  1121  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1122  
  1123  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create_parallel")
  1124  }
  1125  
  1126  func (s *GoofysTest) TestBenchIO(t *C) {
  1127  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1128  
  1129  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "io")
  1130  }
  1131  
  1132  func (s *GoofysTest) TestBenchFindTree(t *C) {
  1133  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1134  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1135  
  1136  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "find")
  1137  }
  1138  
  1139  func (s *GoofysTest) TestIssue231(t *C) {
  1140  	if isTravis() {
  1141  		t.Skip("disable in travis, not sure if it has enough memory")
  1142  	}
  1143  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1144  	s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue231")
  1145  }
  1146  
  1147  func (s *GoofysTest) TestChmod(t *C) {
  1148  	root := s.getRoot(t)
  1149  
  1150  	lookupOp := fuseops.LookUpInodeOp{
  1151  		Parent: root.Id,
  1152  		Name:   "file1",
  1153  	}
  1154  
  1155  	err := s.fs.LookUpInode(nil, &lookupOp)
  1156  	t.Assert(err, IsNil)
  1157  
  1158  	targetMode := os.FileMode(0777)
  1159  	setOp := fuseops.SetInodeAttributesOp{Inode: lookupOp.Entry.Child, Mode: &targetMode}
  1160  
  1161  	err = s.fs.SetInodeAttributes(s.ctx, &setOp)
  1162  	t.Assert(err, IsNil)
  1163  	t.Assert(setOp.Attributes, NotNil)
  1164  }
  1165  
  1166  func (s *GoofysTest) TestIssue64(t *C) {
  1167  	/*
  1168  		mountPoint := "/tmp/mnt" + s.fs.bucket
  1169  		log.Level = logrus.DebugLevel
  1170  
  1171  		err := os.MkdirAll(mountPoint, 0700)
  1172  		t.Assert(err, IsNil)
  1173  
  1174  		defer os.Remove(mountPoint)
  1175  
  1176  		s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue64")
  1177  	*/
  1178  }
  1179  
  1180  func (s *GoofysTest) TestIssue69Fuse(t *C) {
  1181  	s.fs.flags.StatCacheTTL = 0
  1182  
  1183  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1184  
  1185  	s.mount(t, mountPoint)
  1186  
  1187  	defer func() {
  1188  		err := os.Chdir("/")
  1189  		t.Assert(err, IsNil)
  1190  
  1191  		s.umount(t, mountPoint)
  1192  	}()
  1193  
  1194  	err := os.Chdir(mountPoint)
  1195  	t.Assert(err, IsNil)
  1196  
  1197  	_, err = os.Stat("dir1")
  1198  	t.Assert(err, IsNil)
  1199  
  1200  	err = os.Remove("dir1/file3")
  1201  	t.Assert(err, IsNil)
  1202  
  1203  	// don't really care about error code, but it should be a PathError
  1204  	os.Stat("dir1")
  1205  	os.Stat("dir1")
  1206  }
  1207  
  1208  func (s *GoofysTest) TestGetMimeType(t *C) {
  1209  	// option to use mime type not turned on
  1210  	mime := s.fs.getMimeType("foo.css")
  1211  	t.Assert(mime, IsNil)
  1212  
  1213  	s.fs.flags.UseContentType = true
  1214  
  1215  	mime = s.fs.getMimeType("foo.css")
  1216  	t.Assert(mime, NotNil)
  1217  	t.Assert(*mime, Equals, "text/css")
  1218  
  1219  	mime = s.fs.getMimeType("foo")
  1220  	t.Assert(mime, IsNil)
  1221  
  1222  	mime = s.fs.getMimeType("foo.")
  1223  	t.Assert(mime, IsNil)
  1224  
  1225  	mime = s.fs.getMimeType("foo.unknownExtension")
  1226  	t.Assert(mime, IsNil)
  1227  }
  1228  
  1229  func (s *GoofysTest) TestPutMimeType(t *C) {
  1230  	s.fs.flags.UseContentType = true
  1231  
  1232  	root := s.getRoot(t)
  1233  	jpg := "test.jpg"
  1234  	jpg2 := "test2.jpg"
  1235  	file := "test"
  1236  
  1237  	s.testWriteFile(t, jpg, 0, 0)
  1238  
  1239  	resp, err := s.s3.HeadObject(&s3.HeadObjectInput{Bucket: &s.fs.bucket, Key: &jpg})
  1240  	t.Assert(err, IsNil)
  1241  	t.Assert(*resp.ContentType, Equals, "image/jpeg")
  1242  
  1243  	err = root.Rename(jpg, root, file)
  1244  	t.Assert(err, IsNil)
  1245  
  1246  	resp, err = s.s3.HeadObject(&s3.HeadObjectInput{Bucket: &s.fs.bucket, Key: &file})
  1247  	t.Assert(err, IsNil)
  1248  	if hasEnv("AWS") {
  1249  		t.Assert(*resp.ContentType, Equals, "binary/octet-stream")
  1250  	} else if hasEnv("GCS") {
  1251  		t.Assert(*resp.ContentType, Equals, "application/octet-stream")
  1252  	} else {
  1253  		// workaround s3proxy https://github.com/andrewgaul/s3proxy/issues/179
  1254  		t.Assert(*resp.ContentType, Equals, "application/unknown")
  1255  	}
  1256  
  1257  	err = root.Rename(file, root, jpg2)
  1258  	t.Assert(err, IsNil)
  1259  
  1260  	resp, err = s.s3.HeadObject(&s3.HeadObjectInput{Bucket: &s.fs.bucket, Key: &jpg2})
  1261  	t.Assert(err, IsNil)
  1262  	t.Assert(*resp.ContentType, Equals, "image/jpeg")
  1263  }
  1264  
  1265  func (s *GoofysTest) TestBucketPrefixSlash(t *C) {
  1266  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2", s.awsConfig, s.fs.flags)
  1267  	t.Assert(s.fs.prefix, Equals, "dir2/")
  1268  
  1269  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2///", s.awsConfig, s.fs.flags)
  1270  	t.Assert(s.fs.prefix, Equals, "dir2/")
  1271  }
  1272  
  1273  func (s *GoofysTest) TestFuseWithPrefix(t *C) {
  1274  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1275  
  1276  	s.fs = NewGoofys(context.Background(), s.fs.bucket+":testprefix", s.awsConfig, s.fs.flags)
  1277  
  1278  	s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint)
  1279  }
  1280  
  1281  func (s *GoofysTest) TestRenameCache(t *C) {
  1282  	root := s.getRoot(t)
  1283  	s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000
  1284  
  1285  	lookupOp1 := fuseops.LookUpInodeOp{
  1286  		Parent: root.Id,
  1287  		Name:   "file1",
  1288  	}
  1289  
  1290  	lookupOp2 := lookupOp1
  1291  	lookupOp2.Name = "newfile"
  1292  
  1293  	err := s.fs.LookUpInode(nil, &lookupOp1)
  1294  	t.Assert(err, IsNil)
  1295  
  1296  	err = s.fs.LookUpInode(nil, &lookupOp2)
  1297  	t.Assert(err, Equals, fuse.ENOENT)
  1298  
  1299  	renameOp := fuseops.RenameOp{
  1300  		OldParent: root.Id,
  1301  		NewParent: root.Id,
  1302  		OldName:   "file1",
  1303  		NewName:   "newfile",
  1304  	}
  1305  
  1306  	err = s.fs.Rename(nil, &renameOp)
  1307  	t.Assert(err, IsNil)
  1308  
  1309  	lookupOp1.Entry = fuseops.ChildInodeEntry{}
  1310  	lookupOp2.Entry = fuseops.ChildInodeEntry{}
  1311  
  1312  	err = s.fs.LookUpInode(nil, &lookupOp1)
  1313  	t.Assert(err, Equals, fuse.ENOENT)
  1314  
  1315  	err = s.fs.LookUpInode(nil, &lookupOp2)
  1316  	t.Assert(err, IsNil)
  1317  }
  1318  
  1319  func (s *GoofysTest) anonymous(t *C) {
  1320  	// delete the original bucket
  1321  	s.deleteBucket(t)
  1322  
  1323  	bucket := s.setupDefaultEnv(t, true)
  1324  
  1325  	s.fs = NewGoofys(context.Background(), bucket, s.awsConfig, s.fs.flags)
  1326  	t.Assert(s.fs, NotNil)
  1327  
  1328  	// should have auto-detected within NewGoofys, but doing this here to ensure
  1329  	// we are using anonymous credentials
  1330  	s.fs.awsConfig = selectTestConfig(t)
  1331  	s.fs.awsConfig.Credentials = credentials.AnonymousCredentials
  1332  	s.fs.sess = session.New(s.fs.awsConfig)
  1333  	s.fs.s3 = s.fs.newS3()
  1334  }
  1335  
  1336  func (s *GoofysTest) disableS3() *s3.S3 {
  1337  	time.Sleep(1 * time.Second) // wait for any background goroutines to finish
  1338  	s3 := s.fs.s3
  1339  	s.fs.s3 = nil
  1340  	return s3
  1341  }
  1342  
  1343  func (s *GoofysTest) TestWriteAnonymous(t *C) {
  1344  	s.anonymous(t)
  1345  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1346  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1347  
  1348  	fileName := "test"
  1349  
  1350  	createOp := fuseops.CreateFileOp{
  1351  		Parent: s.getRoot(t).Id,
  1352  		Name:   fileName,
  1353  	}
  1354  
  1355  	err := s.fs.CreateFile(s.ctx, &createOp)
  1356  	t.Assert(err, IsNil)
  1357  
  1358  	err = s.fs.FlushFile(s.ctx, &fuseops.FlushFileOp{
  1359  		Handle: createOp.Handle,
  1360  		Inode:  createOp.Entry.Child,
  1361  	})
  1362  	t.Assert(err, Equals, syscall.EACCES)
  1363  
  1364  	err = s.fs.ReleaseFileHandle(s.ctx, &fuseops.ReleaseFileHandleOp{Handle: createOp.Handle})
  1365  	t.Assert(err, IsNil)
  1366  
  1367  	err = s.fs.LookUpInode(s.ctx, &fuseops.LookUpInodeOp{
  1368  		Parent: s.getRoot(t).Id,
  1369  		Name:   fileName,
  1370  	})
  1371  	t.Assert(err, Equals, fuse.ENOENT)
  1372  	// BUG! the file shouldn't exist, see test below for comment,
  1373  	// this behaves as expected only because we are bypassing
  1374  	// linux vfs in this test
  1375  }
  1376  
  1377  func (s *GoofysTest) TestWriteAnonymousFuse(t *C) {
  1378  	s.anonymous(t)
  1379  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1380  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1381  
  1382  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1383  
  1384  	s.mount(t, mountPoint)
  1385  	defer s.umount(t, mountPoint)
  1386  
  1387  	err := ioutil.WriteFile(mountPoint+"/test", []byte(""), 0600)
  1388  	t.Assert(err, NotNil)
  1389  	pathErr, ok := err.(*os.PathError)
  1390  	t.Assert(ok, Equals, true)
  1391  	t.Assert(pathErr.Err, Equals, syscall.EACCES)
  1392  
  1393  	_, err = os.Stat(mountPoint + "/test")
  1394  	t.Assert(err, IsNil)
  1395  	// BUG! the file shouldn't exist, the condition below should hold instead
  1396  	// see comment in Goofys.FlushFile
  1397  	// pathErr, ok = err.(*os.PathError)
  1398  	// t.Assert(ok, Equals, true)
  1399  	// t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  1400  
  1401  	_, err = ioutil.ReadFile(mountPoint + "/test")
  1402  	t.Assert(err, NotNil)
  1403  	pathErr, ok = err.(*os.PathError)
  1404  	t.Assert(ok, Equals, true)
  1405  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  1406  
  1407  	// reading the file and getting ENOENT causes the kernel to
  1408  	// invalidate the entry, failing at open is not sufficient, we
  1409  	// have to fail at read (which means that if the application
  1410  	// uses splice(2) it won't get to us, so this wouldn't work
  1411  	_, err = os.Stat(mountPoint + "/test")
  1412  	t.Assert(err, NotNil)
  1413  	pathErr, ok = err.(*os.PathError)
  1414  	t.Assert(ok, Equals, true)
  1415  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  1416  }
  1417  
  1418  func (s *GoofysTest) TestWriteSyncWriteFuse(t *C) {
  1419  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1420  
  1421  	s.mount(t, mountPoint)
  1422  	defer s.umount(t, mountPoint)
  1423  
  1424  	var f *os.File
  1425  	var n int
  1426  	var err error
  1427  
  1428  	defer func() {
  1429  		if err != nil {
  1430  			f.Close()
  1431  		}
  1432  	}()
  1433  
  1434  	f, err = os.Create(mountPoint + "/TestWriteSyncWrite")
  1435  	t.Assert(err, IsNil)
  1436  
  1437  	n, err = f.Write([]byte("hello\n"))
  1438  	t.Assert(err, IsNil)
  1439  	t.Assert(n, Equals, 6)
  1440  
  1441  	err = f.Sync()
  1442  	t.Assert(err, IsNil)
  1443  
  1444  	n, err = f.Write([]byte("world\n"))
  1445  	t.Assert(err, IsNil)
  1446  	t.Assert(n, Equals, 6)
  1447  
  1448  	err = f.Close()
  1449  	t.Assert(err, IsNil)
  1450  }
  1451  
  1452  func (s *GoofysTest) TestIssue156(t *C) {
  1453  	_, err := s.LookUpInode(t, "\xae\x8a-")
  1454  	// S3Proxy and aws s3 return different errors
  1455  	// https://github.com/andrewgaul/s3proxy/issues/201
  1456  	t.Assert(err, NotNil)
  1457  }
  1458  
  1459  func (s *GoofysTest) TestIssue162(t *C) {
  1460  	params := &s3.PutObjectInput{
  1461  		Bucket: &s.fs.bucket,
  1462  		Key:    aws.String("dir1/lör 006.jpg"),
  1463  		Body:   bytes.NewReader([]byte("foo")),
  1464  	}
  1465  	_, err := s.s3.PutObject(params)
  1466  	t.Assert(err, IsNil)
  1467  
  1468  	dir, err := s.LookUpInode(t, "dir1")
  1469  	t.Assert(err, IsNil)
  1470  
  1471  	err = dir.Rename("lör 006.jpg", dir, "myfile.jpg")
  1472  	t.Assert(err, IsNil)
  1473  
  1474  	resp, err := s.s3.HeadObject(&s3.HeadObjectInput{Bucket: &s.fs.bucket, Key: aws.String("dir1/myfile.jpg")})
  1475  	t.Assert(*resp.ContentLength, Equals, int64(3))
  1476  }
  1477  
  1478  func (s *GoofysTest) TestXAttrGet(t *C) {
  1479  	file1, err := s.LookUpInode(t, "file1")
  1480  	t.Assert(err, IsNil)
  1481  
  1482  	names, err := file1.ListXattr()
  1483  	t.Assert(err, IsNil)
  1484  	sort.Strings(names)
  1485  	t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class", "user.name"})
  1486  
  1487  	_, err = file1.GetXattr("user.foobar")
  1488  	t.Assert(xattr.IsNotExist(err), Equals, true)
  1489  
  1490  	value, err := file1.GetXattr("s3.etag")
  1491  	t.Assert(err, IsNil)
  1492  	// md5sum of "file1"
  1493  	t.Assert(string(value), Equals, "\"826e8142e6baabe8af779f5f490cf5f5\"")
  1494  
  1495  	value, err = file1.GetXattr("user.name")
  1496  	t.Assert(err, IsNil)
  1497  	t.Assert(string(value), Equals, "file1+/#\x00")
  1498  
  1499  	dir1, err := s.LookUpInode(t, "dir1")
  1500  	t.Assert(err, IsNil)
  1501  
  1502  	// list dir1 to populate file3 in cache, then get file3's xattr
  1503  	lookup := fuseops.LookUpInodeOp{
  1504  		Parent: fuseops.RootInodeID,
  1505  		Name:   "dir1",
  1506  	}
  1507  	err = s.fs.LookUpInode(nil, &lookup)
  1508  	t.Assert(err, IsNil)
  1509  
  1510  	s.readDirIntoCache(t, lookup.Entry.Child)
  1511  
  1512  	dir1 = s.fs.inodes[lookup.Entry.Child]
  1513  	file3 := dir1.findChild("file3")
  1514  	t.Assert(file3, NotNil)
  1515  	t.Assert(file3.userMetadata, IsNil)
  1516  
  1517  	value, err = file3.GetXattr("s3.etag")
  1518  	t.Assert(err, IsNil)
  1519  	// md5sum of "dir1/file3"
  1520  	t.Assert(string(value), Equals, "\"5cd67e0e59fb85be91a515afe0f4bb24\"")
  1521  
  1522  	emptyDir2, err := s.LookUpInode(t, "empty_dir2")
  1523  	t.Assert(err, IsNil)
  1524  
  1525  	names, err = emptyDir2.ListXattr()
  1526  	t.Assert(err, IsNil)
  1527  	sort.Strings(names)
  1528  	t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class", "user.name"})
  1529  
  1530  	emptyDir, err := s.LookUpInode(t, "empty_dir")
  1531  	t.Assert(err, IsNil)
  1532  
  1533  	value, err = emptyDir.GetXattr("s3.etag")
  1534  	t.Assert(err, IsNil)
  1535  	// dir blobs are empty
  1536  	t.Assert(string(value), Equals, "\"d41d8cd98f00b204e9800998ecf8427e\"")
  1537  
  1538  	// implicit dir blobs don't have s3.etag at all
  1539  	names, err = dir1.ListXattr()
  1540  	t.Assert(err, IsNil)
  1541  	t.Assert(names, HasLen, 0)
  1542  
  1543  	value, err = dir1.GetXattr("s3.etag")
  1544  	t.Assert(err, Equals, syscall.ENODATA)
  1545  
  1546  	// s3proxy doesn't support storage class yet
  1547  	if hasEnv("AWS") {
  1548  		s.fs.flags.StorageClass = "STANDARD_IA"
  1549  
  1550  		s.testWriteFile(t, "ia", 1, 128*1024)
  1551  
  1552  		ia, err := s.LookUpInode(t, "ia")
  1553  		t.Assert(err, IsNil)
  1554  
  1555  		names, err = ia.ListXattr()
  1556  		t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"})
  1557  
  1558  		value, err = ia.GetXattr("s3.storage-class")
  1559  		t.Assert(err, IsNil)
  1560  		t.Assert(string(value), Equals, "STANDARD_IA")
  1561  	}
  1562  }
  1563  
  1564  func (s *GoofysTest) TestXAttrGetCached(t *C) {
  1565  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1566  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1567  	s.readDirIntoCache(t, fuseops.RootInodeID)
  1568  	s.disableS3()
  1569  
  1570  	in, err := s.LookUpInode(t, "file1")
  1571  	t.Assert(err, IsNil)
  1572  	t.Assert(in.userMetadata, IsNil)
  1573  
  1574  	_, err = in.GetXattr("s3.etag")
  1575  	t.Assert(err, IsNil)
  1576  }
  1577  
  1578  func (s *GoofysTest) TestXAttrCopied(t *C) {
  1579  	root := s.getRoot(t)
  1580  
  1581  	err := root.Rename("file1", root, "file0")
  1582  	t.Assert(err, IsNil)
  1583  
  1584  	in, err := s.LookUpInode(t, "file0")
  1585  	t.Assert(err, IsNil)
  1586  
  1587  	_, err = in.GetXattr("user.name")
  1588  	t.Assert(err, IsNil)
  1589  }
  1590  
  1591  func (s *GoofysTest) TestXAttrRemove(t *C) {
  1592  	in, err := s.LookUpInode(t, "file1")
  1593  	t.Assert(err, IsNil)
  1594  
  1595  	_, err = in.GetXattr("user.name")
  1596  	t.Assert(err, IsNil)
  1597  
  1598  	err = in.RemoveXattr("user.name")
  1599  	t.Assert(err, IsNil)
  1600  
  1601  	_, err = in.GetXattr("user.name")
  1602  	t.Assert(err, Equals, syscall.ENODATA)
  1603  }
  1604  
  1605  func (s *GoofysTest) TestXAttrSet(t *C) {
  1606  	in, err := s.LookUpInode(t, "file1")
  1607  	t.Assert(err, IsNil)
  1608  
  1609  	err = in.SetXattr("user.bar", []byte("hello"), xattr.REPLACE)
  1610  	t.Assert(err, Equals, syscall.ENODATA)
  1611  
  1612  	err = in.SetXattr("user.bar", []byte("hello"), xattr.CREATE)
  1613  	t.Assert(err, IsNil)
  1614  
  1615  	err = in.SetXattr("user.bar", []byte("hello"), xattr.CREATE)
  1616  	t.Assert(err, Equals, syscall.EEXIST)
  1617  
  1618  	in, err = s.LookUpInode(t, "file1")
  1619  	t.Assert(err, IsNil)
  1620  
  1621  	value, err := in.GetXattr("user.bar")
  1622  	t.Assert(err, IsNil)
  1623  	t.Assert(string(value), Equals, "hello")
  1624  
  1625  	value = []byte("file1+%/#\x00")
  1626  
  1627  	err = in.SetXattr("user.bar", value, xattr.REPLACE)
  1628  	t.Assert(err, IsNil)
  1629  
  1630  	in, err = s.LookUpInode(t, "file1")
  1631  	t.Assert(err, IsNil)
  1632  
  1633  	value2, err := in.GetXattr("user.bar")
  1634  	t.Assert(err, IsNil)
  1635  	t.Assert(value2, DeepEquals, value)
  1636  
  1637  	// setting with flag = 0 always works
  1638  	err = in.SetXattr("user.bar", []byte("world"), 0)
  1639  	t.Assert(err, IsNil)
  1640  
  1641  	err = in.SetXattr("user.baz", []byte("world"), 0)
  1642  	t.Assert(err, IsNil)
  1643  
  1644  	value, err = in.GetXattr("user.bar")
  1645  	t.Assert(err, IsNil)
  1646  
  1647  	value2, err = in.GetXattr("user.baz")
  1648  	t.Assert(err, IsNil)
  1649  
  1650  	t.Assert(value2, DeepEquals, value)
  1651  	t.Assert(string(value2), DeepEquals, "world")
  1652  }
  1653  
  1654  func (s *GoofysTest) TestCreateRenameBeforeCloseFuse(t *C) {
  1655  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1656  
  1657  	s.mount(t, mountPoint)
  1658  	defer s.umount(t, mountPoint)
  1659  
  1660  	from := mountPoint + "/newfile"
  1661  	to := mountPoint + "/newfile2"
  1662  
  1663  	fh, err := os.Create(from)
  1664  	t.Assert(err, IsNil)
  1665  	defer func() {
  1666  		// close the file if the test failed so we can unmount
  1667  		if fh != nil {
  1668  			fh.Close()
  1669  		}
  1670  	}()
  1671  
  1672  	_, err = fh.WriteString("hello world")
  1673  	t.Assert(err, IsNil)
  1674  
  1675  	err = os.Rename(from, to)
  1676  	t.Assert(err, IsNil)
  1677  
  1678  	err = fh.Close()
  1679  	t.Assert(err, IsNil)
  1680  	fh = nil
  1681  
  1682  	_, err = os.Stat(from)
  1683  	t.Assert(err, NotNil)
  1684  	pathErr, ok := err.(*os.PathError)
  1685  	t.Assert(ok, Equals, true)
  1686  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  1687  
  1688  	content, err := ioutil.ReadFile(to)
  1689  	t.Assert(err, IsNil)
  1690  	t.Assert(string(content), Equals, "hello world")
  1691  }
  1692  
  1693  func (s *GoofysTest) TestRenameBeforeCloseFuse(t *C) {
  1694  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1695  
  1696  	s.mount(t, mountPoint)
  1697  	defer s.umount(t, mountPoint)
  1698  
  1699  	from := mountPoint + "/newfile"
  1700  	to := mountPoint + "/newfile2"
  1701  
  1702  	err := ioutil.WriteFile(from, []byte(""), 0600)
  1703  	t.Assert(err, IsNil)
  1704  
  1705  	fh, err := os.OpenFile(from, os.O_WRONLY, 0600)
  1706  	t.Assert(err, IsNil)
  1707  	defer func() {
  1708  		// close the file if the test failed so we can unmount
  1709  		if fh != nil {
  1710  			fh.Close()
  1711  		}
  1712  	}()
  1713  
  1714  	_, err = fh.WriteString("hello world")
  1715  	t.Assert(err, IsNil)
  1716  
  1717  	err = os.Rename(from, to)
  1718  	t.Assert(err, IsNil)
  1719  
  1720  	err = fh.Close()
  1721  	t.Assert(err, IsNil)
  1722  	fh = nil
  1723  
  1724  	_, err = os.Stat(from)
  1725  	t.Assert(err, NotNil)
  1726  	pathErr, ok := err.(*os.PathError)
  1727  	t.Assert(ok, Equals, true)
  1728  	t.Assert(pathErr.Err, Equals, fuse.ENOENT)
  1729  
  1730  	content, err := ioutil.ReadFile(to)
  1731  	t.Assert(err, IsNil)
  1732  	t.Assert(string(content), Equals, "hello world")
  1733  }
  1734  
  1735  func (s *GoofysTest) TestInodeInsert(t *C) {
  1736  	root := s.getRoot(t)
  1737  
  1738  	in := NewInode(s.fs, root, aws.String("2"), aws.String("2"))
  1739  	in.Attributes = InodeAttributes{}
  1740  	root.insertChild(in)
  1741  	t.Assert(*root.dir.Children[0].Name, Equals, "2")
  1742  
  1743  	in = NewInode(s.fs, root, aws.String("1"), aws.String("1"))
  1744  	in.Attributes = InodeAttributes{}
  1745  	root.insertChild(in)
  1746  	t.Assert(*root.dir.Children[0].Name, Equals, "1")
  1747  	t.Assert(*root.dir.Children[1].Name, Equals, "2")
  1748  
  1749  	in = NewInode(s.fs, root, aws.String("4"), aws.String("4"))
  1750  	in.Attributes = InodeAttributes{}
  1751  	root.insertChild(in)
  1752  	t.Assert(*root.dir.Children[0].Name, Equals, "1")
  1753  	t.Assert(*root.dir.Children[1].Name, Equals, "2")
  1754  	t.Assert(*root.dir.Children[2].Name, Equals, "4")
  1755  
  1756  	inode := root.findChild("1")
  1757  	t.Assert(inode, NotNil)
  1758  	t.Assert(*inode.Name, Equals, "1")
  1759  
  1760  	inode = root.findChild("2")
  1761  	t.Assert(inode, NotNil)
  1762  	t.Assert(*inode.Name, Equals, "2")
  1763  
  1764  	inode = root.findChild("4")
  1765  	t.Assert(inode, NotNil)
  1766  	t.Assert(*inode.Name, Equals, "4")
  1767  
  1768  	inode = root.findChild("0")
  1769  	t.Assert(inode, IsNil)
  1770  
  1771  	inode = root.findChild("3")
  1772  	t.Assert(inode, IsNil)
  1773  
  1774  	root.removeChild(root.dir.Children[1])
  1775  	root.removeChild(root.dir.Children[0])
  1776  	root.removeChild(root.dir.Children[0])
  1777  	t.Assert(len(root.dir.Children), Equals, 0)
  1778  }
  1779  
  1780  func (s *GoofysTest) TestReadDirSlurpSubtree(t *C) {
  1781  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1782  
  1783  	s.getRoot(t).dir.seqOpenDirScore = 2
  1784  	in, err := s.LookUpInode(t, "dir2")
  1785  	t.Assert(err, IsNil)
  1786  
  1787  	s.readDirIntoCache(t, in.Id)
  1788  
  1789  	in, err = s.LookUpInode(t, "dir2/dir3")
  1790  	t.Assert(err, IsNil)
  1791  
  1792  	// reading dir2 should cause dir2/dir3 to have cached readdir
  1793  	s.disableS3()
  1794  
  1795  	s.assertEntries(t, in, []string{"file4"})
  1796  }
  1797  
  1798  func (s *GoofysTest) TestReadDirCached(t *C) {
  1799  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1800  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1801  
  1802  	s.getRoot(t).dir.seqOpenDirScore = 2
  1803  	s.readDirIntoCache(t, fuseops.RootInodeID)
  1804  	s.disableS3()
  1805  
  1806  	dh := s.getRoot(t).OpenDir()
  1807  
  1808  	entries := s.readDirFully(t, dh)
  1809  	dirs := make([]string, 0)
  1810  	files := make([]string, 0)
  1811  	noMoreDir := false
  1812  
  1813  	for _, en := range entries {
  1814  		if en.Type == fuseutil.DT_Directory {
  1815  			t.Assert(noMoreDir, Equals, false)
  1816  			dirs = append(dirs, *en.Name)
  1817  		} else {
  1818  			files = append(files, *en.Name)
  1819  			noMoreDir = true
  1820  		}
  1821  	}
  1822  
  1823  	t.Assert(dirs, DeepEquals, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2"})
  1824  	t.Assert(files, DeepEquals, []string{"file1", "file2", "zero"})
  1825  }
  1826  
  1827  func (s *GoofysTest) TestReadDirLookUp(t *C) {
  1828  	s.getRoot(t).dir.seqOpenDirScore = 2
  1829  	for i := 0; i < 10; i++ {
  1830  		go s.readDirIntoCache(t, fuseops.RootInodeID)
  1831  		go func() {
  1832  			lookup := fuseops.LookUpInodeOp{
  1833  				Parent: fuseops.RootInodeID,
  1834  				Name:   "file1",
  1835  			}
  1836  			err := s.fs.LookUpInode(nil, &lookup)
  1837  			t.Assert(err, IsNil)
  1838  		}()
  1839  	}
  1840  }
  1841  
  1842  func (s *GoofysTest) writeSeekWriteFuse(t *C, file string, fh *os.File, first string, second string, third string) {
  1843  	fi, err := os.Stat(file)
  1844  	t.Assert(err, IsNil)
  1845  
  1846  	defer func() {
  1847  		// close the file if the test failed so we can unmount
  1848  		if fh != nil {
  1849  			fh.Close()
  1850  		}
  1851  	}()
  1852  
  1853  	_, err = fh.WriteString(first)
  1854  	t.Assert(err, IsNil)
  1855  
  1856  	off, err := fh.Seek(int64(len(second)), 1)
  1857  	t.Assert(err, IsNil)
  1858  	t.Assert(off, Equals, int64(len(first)+len(second)))
  1859  
  1860  	_, err = fh.WriteString(third)
  1861  	t.Assert(err, IsNil)
  1862  
  1863  	off, err = fh.Seek(int64(len(first)), 0)
  1864  	t.Assert(err, IsNil)
  1865  	t.Assert(off, Equals, int64(len(first)))
  1866  
  1867  	_, err = fh.WriteString(second)
  1868  	t.Assert(err, IsNil)
  1869  
  1870  	err = fh.Close()
  1871  	t.Assert(err, IsNil)
  1872  	fh = nil
  1873  
  1874  	content, err := ioutil.ReadFile(file)
  1875  	t.Assert(err, IsNil)
  1876  	t.Assert(string(content), Equals, first+second+third)
  1877  
  1878  	fi2, err := os.Stat(file)
  1879  	t.Assert(err, IsNil)
  1880  	t.Assert(fi.Mode(), Equals, fi2.Mode())
  1881  }
  1882  
  1883  func (s *GoofysTest) TestWriteSeekWriteFuse(t *C) {
  1884  	if !isCatfs() {
  1885  		t.Skip("only works with CATFS=true")
  1886  	}
  1887  
  1888  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1889  	s.mount(t, mountPoint)
  1890  	defer s.umount(t, mountPoint)
  1891  
  1892  	file := mountPoint + "/newfile"
  1893  
  1894  	fh, err := os.Create(file)
  1895  	t.Assert(err, IsNil)
  1896  
  1897  	s.writeSeekWriteFuse(t, file, fh, "hello", " ", "world")
  1898  
  1899  	fh, err = os.OpenFile(file, os.O_WRONLY, 0600)
  1900  	t.Assert(err, IsNil)
  1901  
  1902  	s.writeSeekWriteFuse(t, file, fh, "", "never", "minding")
  1903  }
  1904  
  1905  func (s *GoofysTest) TestDirMtimeCreate(t *C) {
  1906  	root := s.getRoot(t)
  1907  
  1908  	attr, _ := root.GetAttributes()
  1909  	m1 := attr.Mtime
  1910  	time.Sleep(time.Second)
  1911  
  1912  	_, _ = root.Create("foo")
  1913  	attr2, _ := root.GetAttributes()
  1914  	m2 := attr2.Mtime
  1915  
  1916  	t.Assert(m1.Before(m2), Equals, true)
  1917  }
  1918  
  1919  func (s *GoofysTest) TestDirMtimeLs(t *C) {
  1920  	root := s.getRoot(t)
  1921  
  1922  	attr, _ := root.GetAttributes()
  1923  	m1 := attr.Mtime
  1924  	time.Sleep(time.Second)
  1925  
  1926  	params := &s3.PutObjectInput{
  1927  		Bucket: &s.fs.bucket,
  1928  		Key:    aws.String("newfile"),
  1929  		Body:   bytes.NewReader([]byte("foo")),
  1930  	}
  1931  	_, err := s.s3.PutObject(params)
  1932  	t.Assert(err, IsNil)
  1933  
  1934  	s.readDirIntoCache(t, fuseops.RootInodeID)
  1935  
  1936  	attr2, _ := root.GetAttributes()
  1937  	m2 := attr2.Mtime
  1938  
  1939  	t.Assert(m1.Before(m2), Equals, true)
  1940  }
  1941  
  1942  func (s *GoofysTest) TestRenameOverwrite(t *C) {
  1943  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1944  	s.mount(t, mountPoint)
  1945  	defer s.umount(t, mountPoint)
  1946  
  1947  	file := mountPoint + "/newfile"
  1948  	rename := mountPoint + "/file1"
  1949  
  1950  	fh, err := os.Create(file)
  1951  	t.Assert(err, IsNil)
  1952  
  1953  	err = fh.Close()
  1954  	t.Assert(err, IsNil)
  1955  
  1956  	err = os.Rename(file, rename)
  1957  	t.Assert(err, IsNil)
  1958  }
  1959  
  1960  func (s *GoofysTest) TestRead403(t *C) {
  1961  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1962  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1963  
  1964  	// cache the inode first so we don't get 403 when we lookup
  1965  	in, err := s.LookUpInode(t, "file1")
  1966  	t.Assert(err, IsNil)
  1967  
  1968  	fh, err := in.OpenFile()
  1969  	t.Assert(err, IsNil)
  1970  
  1971  	s.fs.awsConfig.Credentials = credentials.AnonymousCredentials
  1972  	s.fs.sess = session.New(s.fs.awsConfig)
  1973  	s.fs.s3 = s.fs.newS3()
  1974  
  1975  	// fake enable read-ahead
  1976  	fh.seqReadAmount = uint64(READAHEAD_CHUNK)
  1977  
  1978  	buf := make([]byte, 5)
  1979  
  1980  	_, err = fh.ReadFile(0, buf)
  1981  	t.Assert(err, Equals, syscall.EACCES)
  1982  
  1983  	// now that the S3 GET has failed, try again, see
  1984  	// https://github.com/kahing/goofys/pull/243
  1985  	_, err = fh.ReadFile(0, buf)
  1986  	t.Assert(err, Equals, syscall.EACCES)
  1987  }
  1988  
  1989  func (s *GoofysTest) TestRmdirWithDiropen(t *C) {
  1990  	mountPoint := "/tmp/mnt" + s.fs.bucket
  1991  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  1992  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  1993  
  1994  	s.mount(t, mountPoint)
  1995  	defer s.umount(t, mountPoint)
  1996  
  1997  	err := os.MkdirAll(mountPoint+"/dir2/dir4", 0700)
  1998  	t.Assert(err, IsNil)
  1999  	err = os.MkdirAll(mountPoint+"/dir2/dir5", 0700)
  2000  	t.Assert(err, IsNil)
  2001  
  2002  	//1, open dir5
  2003  	dir := mountPoint + "/dir2/dir5"
  2004  	fh, err := os.Open(dir)
  2005  	t.Assert(err, IsNil)
  2006  	defer fh.Close()
  2007  
  2008  	cmd1 := exec.Command("ls", mountPoint+"/dir2")
  2009  	//out, err := cmd.Output()
  2010  	out1, err1 := cmd1.Output()
  2011  	if err1 != nil {
  2012  		if ee, ok := err.(*exec.ExitError); ok {
  2013  			panic(ee.Stderr)
  2014  		}
  2015  	}
  2016  	t.Assert(string(out1), DeepEquals, ""+"dir3\n"+"dir4\n"+"dir5\n")
  2017  
  2018  	//2, rm -rf dir5
  2019  	cmd := exec.Command("rm", "-rf", dir)
  2020  	_, err = cmd.Output()
  2021  	if err != nil {
  2022  		if ee, ok := err.(*exec.ExitError); ok {
  2023  			panic(ee.Stderr)
  2024  		}
  2025  	}
  2026  
  2027  	//3,  readdir dir2
  2028  	fh1, err := os.Open(mountPoint + "/dir2")
  2029  	t.Assert(err, IsNil)
  2030  	defer func() {
  2031  		// close the file if the test failed so we can unmount
  2032  		if fh1 != nil {
  2033  			fh1.Close()
  2034  		}
  2035  	}()
  2036  
  2037  	names, err := fh1.Readdirnames(0)
  2038  	t.Assert(err, IsNil)
  2039  	t.Assert(names, DeepEquals, []string{"dir3", "dir4"})
  2040  
  2041  	cmd = exec.Command("ls", mountPoint+"/dir2")
  2042  	out, err := cmd.Output()
  2043  	if err != nil {
  2044  		if ee, ok := err.(*exec.ExitError); ok {
  2045  			panic(ee.Stderr)
  2046  		}
  2047  	}
  2048  
  2049  	t.Assert(string(out), DeepEquals, ""+"dir3\n"+"dir4\n")
  2050  
  2051  	err = fh1.Close()
  2052  	t.Assert(err, IsNil)
  2053  
  2054  	// 4,reset env
  2055  	err = fh.Close()
  2056  	t.Assert(err, IsNil)
  2057  
  2058  	err = os.RemoveAll(mountPoint + "/dir2/dir4")
  2059  	t.Assert(err, IsNil)
  2060  
  2061  }
  2062  
  2063  func (s *GoofysTest) TestDirMTime(t *C) {
  2064  	s.fs.flags.StatCacheTTL = 1 * time.Minute
  2065  	s.fs.flags.TypeCacheTTL = 1 * time.Minute
  2066  	// enable cheap to ensure GET dir/ will come back before LIST dir/
  2067  	s.fs.flags.Cheap = true
  2068  
  2069  	root := s.getRoot(t)
  2070  	t.Assert(time.Time{}.Before(root.Attributes.Mtime), Equals, true)
  2071  
  2072  	dir1, err := s.LookUpInode(t, "dir1")
  2073  	t.Assert(err, IsNil)
  2074  
  2075  	attr1, _ := dir1.GetAttributes()
  2076  	m1 := attr1.Mtime
  2077  	// dir1 doesn't have a dir blob, so should take root's mtime
  2078  	t.Assert(m1, Equals, root.Attributes.Mtime)
  2079  
  2080  	time.Sleep(2 * time.Second)
  2081  
  2082  	dir2, err := dir1.MkDir("dir2")
  2083  	t.Assert(err, IsNil)
  2084  
  2085  	attr2, _ := dir2.GetAttributes()
  2086  	m2 := attr2.Mtime
  2087  	t.Assert(m1.Add(2*time.Second).Before(m2), Equals, true)
  2088  
  2089  	// dir1 didn't have an explicit mtime, so it should update now
  2090  	// that we did a mkdir inside it
  2091  	attr1, _ = dir1.GetAttributes()
  2092  	m1 = attr1.Mtime
  2093  	t.Assert(m1, Equals, m2)
  2094  
  2095  	// simulate forget inode so we will retrieve the inode again
  2096  	dir1.removeChild(dir2)
  2097  
  2098  	dir2, err = dir1.LookUp("dir2")
  2099  	t.Assert(err, IsNil)
  2100  
  2101  	// the new time comes from S3 which only has seconds
  2102  	// granularity
  2103  	attr2, _ = dir2.GetAttributes()
  2104  	t.Assert(m2, Not(Equals), attr2.Mtime)
  2105  	t.Assert(root.Attributes.Mtime.Add(time.Second).Before(attr2.Mtime), Equals, true)
  2106  
  2107  	// different dir2
  2108  	dir2, err = s.LookUpInode(t, "dir2")
  2109  	t.Assert(err, IsNil)
  2110  
  2111  	attr2, _ = dir2.GetAttributes()
  2112  	m2 = attr2.Mtime
  2113  
  2114  	// this fails because we are listing dir/, which means we
  2115  	// don't actually see the dir blob dir2/dir3/ (it's returned
  2116  	// as common prefix), so we can't get dir3's mtime
  2117  	if false {
  2118  		// dir2/dir3/ exists and has mtime
  2119  		s.readDirIntoCache(t, dir2.Id)
  2120  		dir3, err := s.LookUpInode(t, "dir2/dir3")
  2121  		t.Assert(err, IsNil)
  2122  
  2123  		attr3, _ := dir3.GetAttributes()
  2124  		// setupDefaultEnv is before mounting
  2125  		t.Assert(attr3.Mtime.Before(m2), Equals, true)
  2126  	}
  2127  
  2128  	time.Sleep(time.Second)
  2129  
  2130  	params := &s3.PutObjectInput{
  2131  		Bucket: &s.fs.bucket,
  2132  		Key:    aws.String("dir2/newfile"),
  2133  		Body:   bytes.NewReader([]byte("foo")),
  2134  	}
  2135  	_, err = s.s3.PutObject(params)
  2136  	t.Assert(err, IsNil)
  2137  
  2138  	s.readDirIntoCache(t, dir2.Id)
  2139  
  2140  	newfile, err := dir2.LookUp("newfile")
  2141  	t.Assert(err, IsNil)
  2142  
  2143  	attr2New, _ := dir2.GetAttributes()
  2144  	// mtime should reflect that of the latest object
  2145  	// GCS can return nano second resolution so truncate to second for compare
  2146  	t.Assert(attr2New.Mtime.Unix(), Equals, newfile.Attributes.Mtime.Unix())
  2147  	t.Assert(m2.Before(attr2New.Mtime), Equals, true)
  2148  }
  2149  
  2150  func (s *GoofysTest) TestDirMTimeNoTTL(t *C) {
  2151  	// enable cheap to ensure GET dir/ will come back before LIST dir/
  2152  	s.fs.flags.Cheap = true
  2153  
  2154  	dir2, err := s.LookUpInode(t, "dir2")
  2155  	t.Assert(err, IsNil)
  2156  
  2157  	attr2, _ := dir2.GetAttributes()
  2158  	m2 := attr2.Mtime
  2159  
  2160  	// dir2/dir3/ exists and has mtime
  2161  	s.readDirIntoCache(t, dir2.Id)
  2162  	dir3, err := s.LookUpInode(t, "dir2/dir3")
  2163  	t.Assert(err, IsNil)
  2164  
  2165  	attr3, _ := dir3.GetAttributes()
  2166  	// setupDefaultEnv is before mounting
  2167  	t.Assert(attr3.Mtime.Before(m2), Equals, true)
  2168  }
  2169  
  2170  func (s *GoofysTest) TestStatCacheTTL(t *C) {
  2171  	s.fs.flags.StatCacheTTL = 3 * time.Second
  2172  	s.fs.flags.TypeCacheTTL = 3 * time.Second
  2173  
  2174  	mountPoint := "/tmp/mnt" + s.fs.bucket
  2175  	s.mount(t, mountPoint)
  2176  	defer s.umount(t, mountPoint)
  2177  
  2178  	key := "empty_dir/newfile"
  2179  	file := mountPoint + "/" + key
  2180  
  2181  	fh, err := os.Create(file)
  2182  	t.Assert(err, IsNil)
  2183  
  2184  	err = fh.Close()
  2185  	t.Assert(err, IsNil)
  2186  
  2187  	_, err = s.s3.DeleteObject(&s3.DeleteObjectInput{
  2188  		Bucket: &s.fs.bucket,
  2189  		Key:    &key,
  2190  	})
  2191  	t.Assert(err, IsNil)
  2192  
  2193  	time.Sleep(3 * time.Second)
  2194  
  2195  	dh, err := os.Open(mountPoint + "/empty_dir")
  2196  	t.Assert(err, IsNil)
  2197  	defer func() {
  2198  		// close the file if the test failed so we can unmount
  2199  		if dh != nil {
  2200  			dh.Close()
  2201  		}
  2202  	}()
  2203  
  2204  	names, err := dh.Readdirnames(0)
  2205  	t.Assert(err, IsNil)
  2206  	t.Assert(names, DeepEquals, []string{})
  2207  }