github.com/djmaze/goofys@v0.24.2/internal/goofys_test.go (about) 1 // Copyright 2015 - 2017 Ka-Hing Cheung 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package internal 16 17 import ( 18 . "github.com/djmaze/goofys/api/common" 19 20 "bufio" 21 "bytes" 22 "fmt" 23 "io" 24 "io/ioutil" 25 "math/rand" 26 "net" 27 "os" 28 "os/exec" 29 "os/signal" 30 "os/user" 31 "reflect" 32 "runtime" 33 "sort" 34 "strconv" 35 "strings" 36 "sync" 37 "syscall" 38 "testing" 39 "time" 40 41 "context" 42 43 "github.com/aws/aws-sdk-go/aws" 44 "github.com/aws/aws-sdk-go/aws/corehandlers" 45 "github.com/aws/aws-sdk-go/aws/credentials" 46 47 "github.com/Azure/azure-storage-blob-go/azblob" 48 "github.com/Azure/go-autorest/autorest" 49 "github.com/Azure/go-autorest/autorest/azure" 50 azureauth "github.com/Azure/go-autorest/autorest/azure/auth" 51 52 "golang.org/x/sys/unix" 53 54 "github.com/jacobsa/fuse" 55 "github.com/jacobsa/fuse/fuseops" 56 "github.com/jacobsa/fuse/fuseutil" 57 58 "github.com/sirupsen/logrus" 59 60 "runtime/debug" 61 62 . "gopkg.in/check.v1" 63 ) 64 65 // so I don't get complains about unused imports 66 var ignored = logrus.DebugLevel 67 68 const PerTestTimeout = 10 * time.Minute 69 70 func currentUid() uint32 { 71 user, err := user.Current() 72 if err != nil { 73 panic(err) 74 } 75 76 uid, err := strconv.ParseUint(user.Uid, 10, 32) 77 if err != nil { 78 panic(err) 79 } 80 81 return uint32(uid) 82 } 83 84 func currentGid() uint32 { 85 user, err := user.Current() 86 if err != nil { 87 panic(err) 88 } 89 90 gid, err := strconv.ParseUint(user.Gid, 10, 32) 91 if err != nil { 92 panic(err) 93 } 94 95 return uint32(gid) 96 } 97 98 type GoofysTest struct { 99 fs *Goofys 100 ctx context.Context 101 awsConfig *aws.Config 102 cloud StorageBackend 103 emulator bool 104 azurite bool 105 106 removeBucket []StorageBackend 107 108 env map[string]*string 109 110 timeout chan int 111 } 112 113 func Test(t *testing.T) { 114 TestingT(t) 115 } 116 117 var _ = Suite(&GoofysTest{}) 118 119 func logOutput(t *C, tag string, r io.ReadCloser) { 120 in := bufio.NewScanner(r) 121 122 for in.Scan() { 123 t.Log(tag, in.Text()) 124 } 125 } 126 127 func waitFor(t *C, addr string) (err error) { 128 // wait for it to listen on port 129 for i := 0; i < 10; i++ { 130 var conn net.Conn 131 conn, err = net.Dial("tcp", addr) 132 if err == nil { 133 // we are done! 134 conn.Close() 135 return 136 } else { 137 t.Logf("Cound not connect: %v", err) 138 time.Sleep(100 * time.Millisecond) 139 } 140 } 141 142 return 143 } 144 145 func (t *GoofysTest) deleteBlobsParallelly(cloud StorageBackend, blobs []string) error { 146 sem := make(semaphore, 100) 147 sem.P(100) 148 var err error 149 for _, blobOuter := range blobs { 150 sem.V(1) 151 go func(blob string) { 152 defer sem.P(1) 153 _, localerr := cloud.DeleteBlob(&DeleteBlobInput{blob}) 154 if localerr != nil && localerr != syscall.ENOENT { 155 err = localerr 156 } 157 }(blobOuter) 158 if err != nil { 159 break 160 } 161 } 162 sem.V(100) 163 return err 164 } 165 166 // groupByDecresingDepths takes a slice of path strings and returns the paths as 167 // groups where each group has the same `depth` - depth(a/b/c)=2, depth(a/b/)=1 168 // The groups are returned in decreasing order of depths. 169 // - Inp: [] Out: [] 170 // - Inp: ["a/b1/", "a/b/c1", "a/b2", "a/b/c2"] 171 // Out: [["a/b/c1", "a/b/c2"], ["a/b1/", "a/b2"]] 172 // - Inp: ["a/b1/", "z/a/b/c1", "a/b2", "z/a/b/c2"] 173 // Out: [["z/a/b/c1", "z/a/b/c2"], ["a/b1/", "a/b2"] 174 func groupByDecresingDepths(items []string) [][]string { 175 depthToGroup := map[int][]string{} 176 for _, item := range items { 177 depth := len(strings.Split(strings.TrimRight(item, "/"), "/")) 178 if _, ok := depthToGroup[depth]; !ok { 179 depthToGroup[depth] = []string{} 180 } 181 depthToGroup[depth] = append(depthToGroup[depth], item) 182 } 183 decreasingDepths := []int{} 184 for depth := range depthToGroup { 185 decreasingDepths = append(decreasingDepths, depth) 186 } 187 sort.Sort(sort.Reverse(sort.IntSlice(decreasingDepths))) 188 ret := [][]string{} 189 for _, depth := range decreasingDepths { 190 group, _ := depthToGroup[depth] 191 ret = append(ret, group) 192 } 193 return ret 194 } 195 196 func (t *GoofysTest) DeleteADLBlobs(cloud StorageBackend, items []string) error { 197 // If we delete a directory that's not empty, ADL{v1|v2} returns failure. That can 198 // happen if we want to delete both "dir1" and "dir1/file" but delete them 199 // in the wrong order. 200 // So we group the items to delete into multiple groups. All items in a group 201 // will have the same depth - depth(/a/b/c) = 2, depth(/a/b/) = 1. 202 // We then iterate over the groups in desc order of depth and delete them parallelly. 203 for _, group := range groupByDecresingDepths(items) { 204 err := t.deleteBlobsParallelly(cloud, group) 205 if err != nil { 206 return err 207 } 208 } 209 return nil 210 } 211 212 func (s *GoofysTest) selectTestConfig(t *C, flags *FlagStorage) (conf S3Config) { 213 (&conf).Init() 214 215 if hasEnv("AWS") { 216 if isTravis() { 217 conf.Region = "us-east-1" 218 } else { 219 conf.Region = "us-west-2" 220 } 221 profile := os.Getenv("AWS") 222 if profile != "" { 223 if profile != "-" { 224 conf.Profile = profile 225 } else { 226 conf.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") 227 conf.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") 228 } 229 } 230 231 conf.BucketOwner = os.Getenv("BUCKET_OWNER") 232 if conf.BucketOwner == "" { 233 panic("BUCKET_OWNER is required on AWS") 234 } 235 } else if hasEnv("GCS") { 236 conf.Region = "us-west1" 237 conf.Profile = os.Getenv("GCS") 238 flags.Endpoint = "http://storage.googleapis.com" 239 } else if hasEnv("MINIO") { 240 conf.Region = "us-east-1" 241 conf.AccessKey = "Q3AM3UQ867SPQQA43P2F" 242 conf.SecretKey = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" 243 flags.Endpoint = "https://play.minio.io:9000" 244 } else { 245 s.emulator = true 246 247 conf.Region = "us-west-2" 248 conf.AccessKey = "foo" 249 conf.SecretKey = "bar" 250 flags.Endpoint = "http://127.0.0.1:8080" 251 } 252 253 return 254 } 255 256 func (s *GoofysTest) waitForEmulator(t *C) { 257 if s.emulator { 258 addr := "127.0.0.1:8080" 259 260 err := waitFor(t, addr) 261 t.Assert(err, IsNil) 262 } 263 } 264 265 func (s *GoofysTest) SetUpSuite(t *C) { 266 } 267 268 func (s *GoofysTest) deleteBucket(cloud StorageBackend) error { 269 param := &ListBlobsInput{} 270 271 // Azure need special handling. 272 azureKeysToRemove := make([]string, 0) 273 for { 274 resp, err := cloud.ListBlobs(param) 275 if err != nil { 276 return err 277 } 278 279 keysToRemove := []string{} 280 for _, o := range resp.Items { 281 keysToRemove = append(keysToRemove, *o.Key) 282 } 283 if len(keysToRemove) != 0 { 284 switch cloud.(type) { 285 case *ADLv1, *ADLv2, *AZBlob: 286 // ADLV{1|2} and AZBlob (sometimes) supports directories. => dir can be removed only 287 // after the dir is empty. So we will remove the blobs in reverse depth order via 288 // DeleteADLBlobs after this for loop. 289 azureKeysToRemove = append(azureKeysToRemove, keysToRemove...) 290 default: 291 _, err = cloud.DeleteBlobs(&DeleteBlobsInput{Items: keysToRemove}) 292 if err != nil { 293 return err 294 } 295 } 296 } 297 if resp.IsTruncated { 298 param.ContinuationToken = resp.NextContinuationToken 299 } else { 300 break 301 } 302 } 303 304 if len(azureKeysToRemove) != 0 { 305 err := s.DeleteADLBlobs(cloud, azureKeysToRemove) 306 if err != nil { 307 return err 308 } 309 } 310 311 _, err := cloud.RemoveBucket(&RemoveBucketInput{}) 312 return err 313 } 314 315 func (s *GoofysTest) TearDownTest(t *C) { 316 close(s.timeout) 317 s.timeout = nil 318 319 for _, cloud := range s.removeBucket { 320 err := s.deleteBucket(cloud) 321 t.Assert(err, IsNil) 322 } 323 s.removeBucket = nil 324 } 325 326 func (s *GoofysTest) removeBlob(cloud StorageBackend, t *C, blobPath string) { 327 params := &DeleteBlobInput{ 328 Key: blobPath, 329 } 330 _, err := cloud.DeleteBlob(params) 331 t.Assert(err, IsNil) 332 } 333 334 func (s *GoofysTest) setupBlobs(cloud StorageBackend, t *C, env map[string]*string) { 335 336 // concurrency = 100 337 throttler := make(semaphore, 100) 338 throttler.P(100) 339 340 var globalErr error 341 for path, c := range env { 342 throttler.V(1) 343 go func(path string, content *string) { 344 dir := false 345 if content == nil { 346 if strings.HasSuffix(path, "/") { 347 if cloud.Capabilities().DirBlob { 348 path = strings.TrimRight(path, "/") 349 } 350 dir = true 351 content = PString("") 352 } else { 353 content = &path 354 } 355 } 356 defer throttler.P(1) 357 params := &PutBlobInput{ 358 Key: path, 359 Body: bytes.NewReader([]byte(*content)), 360 Size: PUInt64(uint64(len(*content))), 361 Metadata: map[string]*string{ 362 "name": aws.String(path + "+/#%00"), 363 }, 364 DirBlob: dir, 365 } 366 367 _, err := cloud.PutBlob(params) 368 if err != nil { 369 globalErr = err 370 } 371 t.Assert(err, IsNil) 372 }(path, c) 373 } 374 throttler.V(100) 375 throttler = make(semaphore, 100) 376 throttler.P(100) 377 t.Assert(globalErr, IsNil) 378 379 // double check, except on AWS S3, because there we sometimes 380 // hit 404 NoSuchBucket and there's no way to distinguish that 381 // from 404 KeyNotFound 382 if !hasEnv("AWS") { 383 for path, c := range env { 384 throttler.V(1) 385 go func(path string, content *string) { 386 defer throttler.P(1) 387 params := &HeadBlobInput{Key: path} 388 res, err := cloud.HeadBlob(params) 389 t.Assert(err, IsNil) 390 if content != nil { 391 t.Assert(res.Size, Equals, uint64(len(*content))) 392 } else if strings.HasSuffix(path, "/") || path == "zero" { 393 t.Assert(res.Size, Equals, uint64(0)) 394 } else { 395 t.Assert(res.Size, Equals, uint64(len(path))) 396 } 397 }(path, c) 398 } 399 throttler.V(100) 400 t.Assert(globalErr, IsNil) 401 } 402 } 403 404 func (s *GoofysTest) setupEnv(t *C, env map[string]*string, public bool) { 405 if public { 406 if s3, ok := s.cloud.Delegate().(*S3Backend); ok { 407 s3.config.ACL = "public-read" 408 } else { 409 t.Error("Not S3 backend") 410 } 411 } 412 413 _, err := s.cloud.MakeBucket(&MakeBucketInput{}) 414 t.Assert(err, IsNil) 415 416 if !s.emulator { 417 //time.Sleep(time.Second) 418 } 419 420 s.setupBlobs(s.cloud, t, env) 421 422 t.Log("setupEnv done") 423 } 424 425 func (s *GoofysTest) setupDefaultEnv(t *C, public bool) { 426 s.env = map[string]*string{ 427 "file1": nil, 428 "file2": nil, 429 "dir1/file3": nil, 430 "dir2/dir3/": nil, 431 "dir2/dir3/file4": nil, 432 "dir4/": nil, 433 "dir4/file5": nil, 434 "empty_dir/": nil, 435 "empty_dir2/": nil, 436 "zero": PString(""), 437 } 438 439 s.setupEnv(t, s.env, public) 440 } 441 442 func (s *GoofysTest) setUpTestTimeout(t *C, timeout time.Duration) { 443 if s.timeout != nil { 444 close(s.timeout) 445 } 446 s.timeout = make(chan int) 447 debug.SetTraceback("all") 448 started := time.Now() 449 450 go func() { 451 select { 452 case _, ok := <-s.timeout: 453 if !ok { 454 return 455 } 456 case <-time.After(timeout): 457 panic(fmt.Sprintf("timeout %v reached. Started %v now %v", 458 timeout, started, time.Now())) 459 } 460 }() 461 } 462 463 func (s *GoofysTest) SetUpTest(t *C) { 464 log.Infof("Starting at %v", time.Now()) 465 466 s.setUpTestTimeout(t, PerTestTimeout) 467 468 var bucket string 469 mount := os.Getenv("MOUNT") 470 471 if mount != "false" { 472 bucket = mount 473 } else { 474 bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16) 475 } 476 uid, gid := MyUserAndGroup() 477 flags := &FlagStorage{ 478 DirMode: 0700, 479 FileMode: 0700, 480 Uid: uint32(uid), 481 Gid: uint32(gid), 482 HTTPTimeout: 30 * time.Second, 483 } 484 485 cloud := os.Getenv("CLOUD") 486 487 if cloud == "s3" { 488 s.emulator = !hasEnv("AWS") 489 s.waitForEmulator(t) 490 491 conf := s.selectTestConfig(t, flags) 492 flags.Backend = &conf 493 494 s3, err := NewS3(bucket, flags, &conf) 495 t.Assert(err, IsNil) 496 497 s.cloud = s3 498 s3.aws = hasEnv("AWS") 499 if s3.aws { 500 s.cloud = NewS3BucketEventualConsistency(s3) 501 } 502 503 if s.emulator { 504 s3.Handlers.Sign.Clear() 505 s3.Handlers.Sign.PushBack(SignV2) 506 s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) 507 } 508 _, err = s3.ListBuckets(nil) 509 t.Assert(err, IsNil) 510 511 } else if cloud == "gcs3" { 512 conf := s.selectTestConfig(t, flags) 513 flags.Backend = &conf 514 515 var err error 516 s.cloud, err = NewGCS3(bucket, flags, &conf) 517 t.Assert(s.cloud, NotNil) 518 t.Assert(err, IsNil) 519 } else if cloud == "azblob" { 520 config, err := AzureBlobConfig(os.Getenv("ENDPOINT"), "", "blob") 521 t.Assert(err, IsNil) 522 523 if config.Endpoint == AzuriteEndpoint { 524 s.azurite = true 525 s.emulator = true 526 s.waitForEmulator(t) 527 } 528 529 // Azurite's SAS is buggy, ex: https://github.com/Azure/Azurite/issues/216 530 if os.Getenv("SAS_EXPIRE") != "" { 531 expire, err := time.ParseDuration(os.Getenv("SAS_EXPIRE")) 532 t.Assert(err, IsNil) 533 534 config.TokenRenewBuffer = expire / 2 535 credential, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey) 536 t.Assert(err, IsNil) 537 538 // test sas token config 539 config.SasToken = func() (string, error) { 540 sasQueryParams, err := azblob.AccountSASSignatureValues{ 541 Protocol: azblob.SASProtocolHTTPSandHTTP, 542 StartTime: time.Now().UTC().Add(-1 * time.Hour), 543 ExpiryTime: time.Now().UTC().Add(expire), 544 Services: azblob.AccountSASServices{Blob: true}.String(), 545 ResourceTypes: azblob.AccountSASResourceTypes{ 546 Service: true, 547 Container: true, 548 Object: true, 549 }.String(), 550 Permissions: azblob.AccountSASPermissions{ 551 Read: true, 552 Write: true, 553 Delete: true, 554 List: true, 555 Create: true, 556 }.String(), 557 }.NewSASQueryParameters(credential) 558 if err != nil { 559 return "", err 560 } 561 return sasQueryParams.Encode(), nil 562 } 563 } 564 565 flags.Backend = &config 566 567 s.cloud, err = NewAZBlob(bucket, &config) 568 t.Assert(err, IsNil) 569 t.Assert(s.cloud, NotNil) 570 } else if cloud == "adlv1" { 571 cred := azureauth.NewClientCredentialsConfig( 572 os.Getenv("ADLV1_CLIENT_ID"), 573 os.Getenv("ADLV1_CLIENT_CREDENTIAL"), 574 os.Getenv("ADLV1_TENANT_ID")) 575 auth, err := cred.Authorizer() 576 t.Assert(err, IsNil) 577 578 config := ADLv1Config{ 579 Endpoint: os.Getenv("ENDPOINT"), 580 Authorizer: auth, 581 } 582 config.Init() 583 584 flags.Backend = &config 585 586 s.cloud, err = NewADLv1(bucket, flags, &config) 587 t.Assert(err, IsNil) 588 t.Assert(s.cloud, NotNil) 589 } else if cloud == "adlv2" { 590 var err error 591 var auth autorest.Authorizer 592 593 if os.Getenv("AZURE_STORAGE_ACCOUNT") != "" && os.Getenv("AZURE_STORAGE_KEY") != "" { 594 auth = &AZBlobConfig{ 595 AccountName: os.Getenv("AZURE_STORAGE_ACCOUNT"), 596 AccountKey: os.Getenv("AZURE_STORAGE_KEY"), 597 } 598 } else { 599 cred := azureauth.NewClientCredentialsConfig( 600 os.Getenv("ADLV2_CLIENT_ID"), 601 os.Getenv("ADLV2_CLIENT_CREDENTIAL"), 602 os.Getenv("ADLV2_TENANT_ID")) 603 cred.Resource = azure.PublicCloud.ResourceIdentifiers.Storage 604 auth, err = cred.Authorizer() 605 t.Assert(err, IsNil) 606 } 607 608 config := ADLv2Config{ 609 Endpoint: os.Getenv("ENDPOINT"), 610 Authorizer: auth, 611 } 612 613 flags.Backend = &config 614 615 s.cloud, err = NewADLv2(bucket, flags, &config) 616 t.Assert(err, IsNil) 617 t.Assert(s.cloud, NotNil) 618 } else if cloud == "gcs" { 619 config := NewGCSConfig() 620 t.Assert(config, NotNil) 621 622 flags.Backend = config 623 var err error 624 s.cloud, err = NewGCS(bucket, config) 625 t.Assert(err, IsNil) 626 t.Assert(s.cloud, NotNil) 627 } else { 628 t.Fatal("Unsupported backend") 629 } 630 631 if mount == "false" { 632 s.removeBucket = append(s.removeBucket, s.cloud) 633 s.setupDefaultEnv(t, false) 634 } else { 635 _, err := s.cloud.MakeBucket(&MakeBucketInput{}) 636 if err == fuse.EEXIST { 637 err = nil 638 } 639 t.Assert(err, IsNil) 640 } 641 642 if hasEnv("AWS") { 643 s.fs = newGoofys(context.Background(), bucket, flags, 644 func(bucket string, flags *FlagStorage) (StorageBackend, error) { 645 cloud, err := NewBackend(bucket, flags) 646 if err != nil { 647 return nil, err 648 } 649 650 return NewS3BucketEventualConsistency(cloud.(*S3Backend)), nil 651 }) 652 } else { 653 s.fs = NewGoofys(context.Background(), bucket, flags) 654 } 655 t.Assert(s.fs, NotNil) 656 657 s.ctx = context.Background() 658 659 if hasEnv("GCS") { 660 flags.Endpoint = "http://storage.googleapis.com" 661 } 662 } 663 664 func (s *GoofysTest) getRoot(t *C) (inode *Inode) { 665 inode = s.fs.inodes[fuseops.RootInodeID] 666 t.Assert(inode, NotNil) 667 return 668 } 669 670 func (s *GoofysTest) TestGetRootInode(t *C) { 671 root := s.getRoot(t) 672 t.Assert(root.Id, Equals, fuseops.InodeID(fuseops.RootInodeID)) 673 } 674 675 func (s *GoofysTest) TestGetRootAttributes(t *C) { 676 _, err := s.getRoot(t).GetAttributes() 677 t.Assert(err, IsNil) 678 } 679 680 func (s *GoofysTest) ForgetInode(t *C, inode fuseops.InodeID) { 681 err := s.fs.ForgetInode(s.ctx, &fuseops.ForgetInodeOp{Inode: inode}) 682 t.Assert(err, IsNil) 683 } 684 685 func (s *GoofysTest) LookUpInode(t *C, name string) (in *Inode, err error) { 686 parent := s.getRoot(t) 687 688 for { 689 idx := strings.Index(name, "/") 690 if idx == -1 { 691 break 692 } 693 694 dirName := name[0:idx] 695 name = name[idx+1:] 696 697 lookup := fuseops.LookUpInodeOp{ 698 Parent: parent.Id, 699 Name: dirName, 700 } 701 702 err = s.fs.LookUpInode(nil, &lookup) 703 if err != nil { 704 return 705 } 706 parent = s.fs.inodes[lookup.Entry.Child] 707 } 708 709 lookup := fuseops.LookUpInodeOp{ 710 Parent: parent.Id, 711 Name: name, 712 } 713 714 err = s.fs.LookUpInode(nil, &lookup) 715 if err != nil { 716 return 717 } 718 in = s.fs.inodes[lookup.Entry.Child] 719 return 720 } 721 722 func (s *GoofysTest) TestSetup(t *C) { 723 } 724 725 func (s *GoofysTest) TestLookUpInode(t *C) { 726 _, err := s.LookUpInode(t, "file1") 727 t.Assert(err, IsNil) 728 729 _, err = s.LookUpInode(t, "fileNotFound") 730 t.Assert(err, Equals, fuse.ENOENT) 731 732 _, err = s.LookUpInode(t, "dir1/file3") 733 t.Assert(err, IsNil) 734 735 _, err = s.LookUpInode(t, "dir2/dir3") 736 t.Assert(err, IsNil) 737 738 _, err = s.LookUpInode(t, "dir2/dir3/file4") 739 t.Assert(err, IsNil) 740 741 _, err = s.LookUpInode(t, "empty_dir") 742 t.Assert(err, IsNil) 743 } 744 745 func (s *GoofysTest) TestPanicWrapper(t *C) { 746 debug.SetTraceback("single") 747 748 fs := FusePanicLogger{s.fs} 749 err := fs.GetInodeAttributes(nil, &fuseops.GetInodeAttributesOp{ 750 Inode: 1234, 751 }) 752 t.Assert(err, Equals, fuse.EIO) 753 } 754 755 func (s *GoofysTest) TestGetInodeAttributes(t *C) { 756 inode, err := s.getRoot(t).LookUp("file1") 757 t.Assert(err, IsNil) 758 759 attr, err := inode.GetAttributes() 760 t.Assert(err, IsNil) 761 t.Assert(attr.Size, Equals, uint64(len("file1"))) 762 } 763 764 func (s *GoofysTest) readDirFully(t *C, dh *DirHandle) (entries []DirHandleEntry) { 765 dh.mu.Lock() 766 defer dh.mu.Unlock() 767 768 en, err := dh.ReadDir(fuseops.DirOffset(0)) 769 t.Assert(err, IsNil) 770 t.Assert(en, NotNil) 771 t.Assert(en.Name, Equals, ".") 772 773 en, err = dh.ReadDir(fuseops.DirOffset(1)) 774 t.Assert(err, IsNil) 775 t.Assert(en, NotNil) 776 t.Assert(en.Name, Equals, "..") 777 778 for i := fuseops.DirOffset(2); ; i++ { 779 en, err = dh.ReadDir(i) 780 t.Assert(err, IsNil) 781 782 if en == nil { 783 return 784 } 785 786 entries = append(entries, *en) 787 } 788 } 789 790 func namesOf(entries []DirHandleEntry) (names []string) { 791 for _, en := range entries { 792 names = append(names, en.Name) 793 } 794 return 795 } 796 797 func (s *GoofysTest) assertEntries(t *C, in *Inode, names []string) { 798 dh := in.OpenDir() 799 defer dh.CloseDir() 800 801 t.Assert(namesOf(s.readDirFully(t, dh)), DeepEquals, names) 802 } 803 804 func (s *GoofysTest) readDirIntoCache(t *C, inode fuseops.InodeID) { 805 openDirOp := fuseops.OpenDirOp{Inode: inode} 806 err := s.fs.OpenDir(nil, &openDirOp) 807 t.Assert(err, IsNil) 808 809 readDirOp := fuseops.ReadDirOp{ 810 Inode: inode, 811 Handle: openDirOp.Handle, 812 Dst: make([]byte, 8*1024), 813 } 814 815 err = s.fs.ReadDir(nil, &readDirOp) 816 t.Assert(err, IsNil) 817 } 818 819 func (s *GoofysTest) TestReadDirCacheLookup(t *C) { 820 s.fs.flags.StatCacheTTL = 1 * time.Minute 821 s.fs.flags.TypeCacheTTL = 1 * time.Minute 822 823 s.readDirIntoCache(t, fuseops.RootInodeID) 824 s.disableS3() 825 826 // should be cached so lookup should not need to talk to s3 827 entries := []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"} 828 for _, en := range entries { 829 err := s.fs.LookUpInode(nil, &fuseops.LookUpInodeOp{ 830 Parent: fuseops.RootInodeID, 831 Name: en, 832 }) 833 t.Assert(err, IsNil) 834 } 835 } 836 837 func (s *GoofysTest) TestReadDirWithExternalChanges(t *C) { 838 s.fs.flags.TypeCacheTTL = time.Second 839 840 dir1, err := s.LookUpInode(t, "dir1") 841 t.Assert(err, IsNil) 842 843 defaultEntries := []string{ 844 "dir1", "dir2", "dir4", "empty_dir", 845 "empty_dir2", "file1", "file2", "zero"} 846 s.assertEntries(t, s.getRoot(t), defaultEntries) 847 // dir1 has file3 and nothing else. 848 s.assertEntries(t, dir1, []string{"file3"}) 849 850 // Do the following 'external' changes in s3 without involving goofys. 851 // - Remove file1, add file3. 852 // - Remove dir1/file3. Given that dir1 has just this one file, 853 // we are effectively removing dir1 as well. 854 s.removeBlob(s.cloud, t, "file1") 855 s.setupBlobs(s.cloud, t, map[string]*string{"file3": nil}) 856 s.removeBlob(s.cloud, t, "dir1/file3") 857 858 time.Sleep(s.fs.flags.TypeCacheTTL) 859 // newEntries = `defaultEntries` - dir1 - file1 + file3. 860 newEntries := []string{ 861 "dir2", "dir4", "empty_dir", "empty_dir2", 862 "file2", "file3", "zero"} 863 if s.cloud.Capabilities().DirBlob { 864 // dir1 is not automatically deleted 865 newEntries = append([]string{"dir1"}, newEntries...) 866 } 867 s.assertEntries(t, s.getRoot(t), newEntries) 868 } 869 870 func (s *GoofysTest) TestReadDir(t *C) { 871 // test listing / 872 dh := s.getRoot(t).OpenDir() 873 defer dh.CloseDir() 874 875 s.assertEntries(t, s.getRoot(t), []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"}) 876 877 // test listing dir1/ 878 in, err := s.LookUpInode(t, "dir1") 879 t.Assert(err, IsNil) 880 s.assertEntries(t, in, []string{"file3"}) 881 882 // test listing dir2/ 883 in, err = s.LookUpInode(t, "dir2") 884 t.Assert(err, IsNil) 885 s.assertEntries(t, in, []string{"dir3"}) 886 887 // test listing dir2/dir3/ 888 in, err = s.LookUpInode(t, "dir2/dir3") 889 t.Assert(err, IsNil) 890 s.assertEntries(t, in, []string{"file4"}) 891 } 892 893 func (s *GoofysTest) TestReadFiles(t *C) { 894 parent := s.getRoot(t) 895 dh := parent.OpenDir() 896 defer dh.CloseDir() 897 898 var entries []*DirHandleEntry 899 900 dh.mu.Lock() 901 for i := fuseops.DirOffset(0); ; i++ { 902 en, err := dh.ReadDir(i) 903 t.Assert(err, IsNil) 904 905 if en == nil { 906 break 907 } 908 909 entries = append(entries, en) 910 } 911 dh.mu.Unlock() 912 913 for _, en := range entries { 914 if en.Type == fuseutil.DT_File { 915 in, err := parent.LookUp(en.Name) 916 t.Assert(err, IsNil) 917 918 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 919 t.Assert(err, IsNil) 920 921 buf := make([]byte, 4096) 922 923 nread, err := fh.ReadFile(0, buf) 924 if en.Name == "zero" { 925 t.Assert(nread, Equals, 0) 926 } else { 927 t.Assert(nread, Equals, len(en.Name)) 928 buf = buf[0:nread] 929 t.Assert(string(buf), Equals, en.Name) 930 } 931 } else { 932 933 } 934 } 935 } 936 937 func (s *GoofysTest) TestReadOffset(t *C) { 938 root := s.getRoot(t) 939 f := "file1" 940 941 in, err := root.LookUp(f) 942 t.Assert(err, IsNil) 943 944 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 945 t.Assert(err, IsNil) 946 947 buf := make([]byte, 4096) 948 949 nread, err := fh.ReadFile(1, buf) 950 t.Assert(err, IsNil) 951 t.Assert(nread, Equals, len(f)-1) 952 t.Assert(string(buf[0:nread]), DeepEquals, f[1:]) 953 954 r := rand.New(rand.NewSource(time.Now().UnixNano())) 955 956 for i := 0; i < 3; i++ { 957 off := r.Int31n(int32(len(f))) 958 nread, err = fh.ReadFile(int64(off), buf) 959 t.Assert(err, IsNil) 960 t.Assert(nread, Equals, len(f)-int(off)) 961 t.Assert(string(buf[0:nread]), DeepEquals, f[off:]) 962 } 963 } 964 965 func (s *GoofysTest) TestCreateFiles(t *C) { 966 fileName := "testCreateFile" 967 968 _, fh := s.getRoot(t).Create(fileName, fuseops.OpMetadata{uint32(os.Getpid())}) 969 970 err := fh.FlushFile() 971 t.Assert(err, IsNil) 972 973 resp, err := s.cloud.GetBlob(&GetBlobInput{Key: fileName}) 974 t.Assert(err, IsNil) 975 t.Assert(resp.HeadBlobOutput.Size, DeepEquals, uint64(0)) 976 defer resp.Body.Close() 977 978 _, err = s.getRoot(t).LookUp(fileName) 979 t.Assert(err, IsNil) 980 981 fileName = "testCreateFile2" 982 s.testWriteFile(t, fileName, 1, 128*1024) 983 984 inode, err := s.getRoot(t).LookUp(fileName) 985 t.Assert(err, IsNil) 986 987 fh, err = inode.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 988 t.Assert(err, IsNil) 989 990 err = fh.FlushFile() 991 t.Assert(err, IsNil) 992 993 resp, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName}) 994 t.Assert(err, IsNil) 995 // ADLv1 doesn't return size when we do a GET 996 if _, adlv1 := s.cloud.(*ADLv1); !adlv1 { 997 t.Assert(resp.HeadBlobOutput.Size, Equals, uint64(1)) 998 } 999 defer resp.Body.Close() 1000 } 1001 1002 func (s *GoofysTest) TestUnlink(t *C) { 1003 fileName := "file1" 1004 1005 err := s.getRoot(t).Unlink(fileName) 1006 t.Assert(err, IsNil) 1007 1008 // make sure that it's gone from s3 1009 _, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName}) 1010 t.Assert(mapAwsError(err), Equals, fuse.ENOENT) 1011 } 1012 1013 type FileHandleReader struct { 1014 fs *Goofys 1015 fh *FileHandle 1016 offset int64 1017 } 1018 1019 func (r *FileHandleReader) Read(p []byte) (nread int, err error) { 1020 nread, err = r.fh.ReadFile(r.offset, p) 1021 r.offset += int64(nread) 1022 return 1023 } 1024 1025 func (r *FileHandleReader) Seek(offset int64, whence int) (int64, error) { 1026 switch whence { 1027 case 0: 1028 r.offset = offset 1029 case 1: 1030 r.offset += offset 1031 default: 1032 panic(fmt.Sprintf("unsupported whence: %v", whence)) 1033 } 1034 1035 return r.offset, nil 1036 } 1037 1038 func (s *GoofysTest) testWriteFile(t *C, fileName string, size int64, write_size int) { 1039 s.testWriteFileAt(t, fileName, int64(0), size, write_size) 1040 } 1041 1042 func (s *GoofysTest) testWriteFileAt(t *C, fileName string, offset int64, size int64, write_size int) { 1043 var fh *FileHandle 1044 root := s.getRoot(t) 1045 1046 lookup := fuseops.LookUpInodeOp{ 1047 Parent: root.Id, 1048 Name: fileName, 1049 } 1050 err := s.fs.LookUpInode(nil, &lookup) 1051 if err != nil { 1052 if err == fuse.ENOENT { 1053 create := fuseops.CreateFileOp{ 1054 Parent: root.Id, 1055 Name: fileName, 1056 } 1057 err = s.fs.CreateFile(nil, &create) 1058 t.Assert(err, IsNil) 1059 1060 fh = s.fs.fileHandles[create.Handle] 1061 } else { 1062 t.Assert(err, IsNil) 1063 } 1064 } else { 1065 in := s.fs.inodes[lookup.Entry.Child] 1066 fh, err = in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 1067 t.Assert(err, IsNil) 1068 } 1069 1070 buf := make([]byte, write_size) 1071 nwritten := offset 1072 1073 src := io.LimitReader(&SeqReader{}, size) 1074 1075 for { 1076 nread, err := src.Read(buf) 1077 if err == io.EOF { 1078 t.Assert(nwritten, Equals, size) 1079 break 1080 } 1081 t.Assert(err, IsNil) 1082 1083 err = fh.WriteFile(nwritten, buf[:nread]) 1084 t.Assert(err, IsNil) 1085 nwritten += int64(nread) 1086 } 1087 1088 err = fh.FlushFile() 1089 t.Assert(err, IsNil) 1090 1091 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: fileName}) 1092 t.Assert(err, IsNil) 1093 t.Assert(resp.Size, Equals, uint64(size+offset)) 1094 1095 fr := &FileHandleReader{s.fs, fh, offset} 1096 diff, err := CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 0) 1097 t.Assert(err, IsNil) 1098 t.Assert(diff, Equals, -1) 1099 t.Assert(fr.offset, Equals, size) 1100 1101 err = fh.FlushFile() 1102 t.Assert(err, IsNil) 1103 1104 // read again with exact 4KB to catch aligned read case 1105 fr = &FileHandleReader{s.fs, fh, offset} 1106 diff, err = CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 4096) 1107 t.Assert(err, IsNil) 1108 t.Assert(diff, Equals, -1) 1109 t.Assert(fr.offset, Equals, size) 1110 1111 fh.Release() 1112 } 1113 1114 func (s *GoofysTest) TestWriteLargeFile(t *C) { 1115 s.testWriteFile(t, "testLargeFile", int64(READAHEAD_CHUNK)+1024*1024, 128*1024) 1116 s.testWriteFile(t, "testLargeFile2", int64(READAHEAD_CHUNK), 128*1024) 1117 s.testWriteFile(t, "testLargeFile3", int64(READAHEAD_CHUNK)+1, 128*1024) 1118 } 1119 1120 func (s *GoofysTest) TestWriteReallyLargeFile(t *C) { 1121 if _, ok := s.cloud.(*S3Backend); ok && s.emulator { 1122 t.Skip("seems to be OOM'ing S3proxy 1.8.0") 1123 } 1124 s.testWriteFile(t, "testLargeFile", 512*1024*1024+1, 128*1024) 1125 } 1126 1127 func (s *GoofysTest) TestWriteReplicatorThrottle(t *C) { 1128 s.fs.replicators = Ticket{Total: 1}.Init() 1129 s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024) 1130 } 1131 1132 func (s *GoofysTest) TestReadWriteMinimumMemory(t *C) { 1133 if _, ok := s.cloud.(*ADLv1); ok { 1134 s.fs.bufferPool.maxBuffers = 4 1135 } else { 1136 s.fs.bufferPool.maxBuffers = 2 1137 } 1138 s.fs.bufferPool.computedMaxbuffers = s.fs.bufferPool.maxBuffers 1139 s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024) 1140 } 1141 1142 func (s *GoofysTest) TestWriteManyFilesFile(t *C) { 1143 var files sync.WaitGroup 1144 1145 for i := 0; i < 21; i++ { 1146 files.Add(1) 1147 fileName := "testSmallFile" + strconv.Itoa(i) 1148 go func() { 1149 defer files.Done() 1150 s.testWriteFile(t, fileName, 1, 128*1024) 1151 }() 1152 } 1153 1154 files.Wait() 1155 } 1156 1157 func (s *GoofysTest) testWriteFileNonAlign(t *C) { 1158 s.testWriteFile(t, "testWriteFileNonAlign", 6*1024*1024, 128*1024+1) 1159 } 1160 1161 func (s *GoofysTest) TestReadRandom(t *C) { 1162 size := int64(21 * 1024 * 1024) 1163 1164 s.testWriteFile(t, "testLargeFile", size, 128*1024) 1165 in, err := s.LookUpInode(t, "testLargeFile") 1166 t.Assert(err, IsNil) 1167 1168 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 1169 t.Assert(err, IsNil) 1170 fr := &FileHandleReader{s.fs, fh, 0} 1171 1172 src := rand.NewSource(time.Now().UnixNano()) 1173 truth := &SeqReader{} 1174 1175 for i := 0; i < 10; i++ { 1176 offset := src.Int63() % (size / 2) 1177 1178 fr.Seek(offset, 0) 1179 truth.Seek(offset, 0) 1180 1181 // read 5MB+1 from that offset 1182 nread := int64(5*1024*1024 + 1) 1183 CompareReader(io.LimitReader(fr, nread), io.LimitReader(truth, nread), 0) 1184 } 1185 } 1186 1187 func (s *GoofysTest) TestMkDir(t *C) { 1188 _, err := s.LookUpInode(t, "new_dir/file") 1189 t.Assert(err, Equals, fuse.ENOENT) 1190 1191 dirName := "new_dir" 1192 inode, err := s.getRoot(t).MkDir(dirName) 1193 t.Assert(err, IsNil) 1194 t.Assert(*inode.FullName(), Equals, dirName) 1195 1196 _, err = s.LookUpInode(t, dirName) 1197 t.Assert(err, IsNil) 1198 1199 fileName := "file" 1200 _, fh := inode.Create(fileName, fuseops.OpMetadata{uint32(os.Getpid())}) 1201 1202 err = fh.FlushFile() 1203 t.Assert(err, IsNil) 1204 1205 _, err = s.LookUpInode(t, dirName+"/"+fileName) 1206 t.Assert(err, IsNil) 1207 } 1208 1209 func (s *GoofysTest) TestRmDir(t *C) { 1210 root := s.getRoot(t) 1211 1212 err := root.RmDir("dir1") 1213 t.Assert(err, Equals, fuse.ENOTEMPTY) 1214 1215 err = root.RmDir("dir2") 1216 t.Assert(err, Equals, fuse.ENOTEMPTY) 1217 1218 err = root.RmDir("empty_dir") 1219 t.Assert(err, IsNil) 1220 1221 } 1222 1223 func (s *GoofysTest) TestRenamePreserveMetadata(t *C) { 1224 if _, ok := s.cloud.(*ADLv1); ok { 1225 t.Skip("ADLv1 doesn't support metadata") 1226 } 1227 root := s.getRoot(t) 1228 1229 from, to := "file1", "new_file" 1230 1231 metadata := make(map[string]*string) 1232 metadata["foo"] = aws.String("bar") 1233 1234 _, err := s.cloud.CopyBlob(&CopyBlobInput{ 1235 Source: from, 1236 Destination: from, 1237 Metadata: metadata, 1238 }) 1239 t.Assert(err, IsNil) 1240 1241 err = root.Rename(from, root, to) 1242 t.Assert(err, IsNil) 1243 1244 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: to}) 1245 t.Assert(err, IsNil) 1246 t.Assert(resp.Metadata["foo"], NotNil) 1247 t.Assert(*resp.Metadata["foo"], Equals, "bar") 1248 } 1249 1250 func (s *GoofysTest) TestRenameLarge(t *C) { 1251 fileSize := int64(2 * 1024 * 1024 * 1024) 1252 // AWS S3 can timeout when renaming large file 1253 if _, ok := s.cloud.(*S3Backend); ok && s.emulator { 1254 // S3proxy runs out of memory on truly large files. We 1255 // want to use a large file to test timeout issues 1256 // which wouldn't happen on s3proxy anyway 1257 fileSize = 21 * 1024 * 1024 1258 } 1259 1260 s.testWriteFile(t, "large_file", fileSize, 128*1024) 1261 1262 root := s.getRoot(t) 1263 1264 from, to := "large_file", "large_file2" 1265 err := root.Rename(from, root, to) 1266 t.Assert(err, IsNil) 1267 } 1268 1269 func (s *GoofysTest) TestRenameToExisting(t *C) { 1270 root := s.getRoot(t) 1271 1272 // cache these 2 files first 1273 _, err := s.LookUpInode(t, "file1") 1274 t.Assert(err, IsNil) 1275 1276 _, err = s.LookUpInode(t, "file2") 1277 t.Assert(err, IsNil) 1278 1279 err = s.fs.Rename(nil, &fuseops.RenameOp{ 1280 OldParent: root.Id, 1281 NewParent: root.Id, 1282 OldName: "file1", 1283 NewName: "file2", 1284 }) 1285 t.Assert(err, IsNil) 1286 1287 file1 := root.findChild("file1") 1288 t.Assert(file1, IsNil) 1289 1290 file2 := root.findChild("file2") 1291 t.Assert(file2, NotNil) 1292 t.Assert(*file2.Name, Equals, "file2") 1293 } 1294 1295 func (s *GoofysTest) TestBackendListPagination(t *C) { 1296 if _, ok := s.cloud.(*ADLv1); ok { 1297 t.Skip("ADLv1 doesn't have pagination") 1298 } 1299 if s.azurite { 1300 // https://github.com/Azure/Azurite/issues/262 1301 t.Skip("Azurite doesn't support pagination") 1302 } 1303 1304 var itemsPerPage int 1305 switch s.cloud.Delegate().(type) { 1306 case *S3Backend, *GCS3: 1307 itemsPerPage = 1000 1308 case *AZBlob, *ADLv2: 1309 itemsPerPage = 5000 1310 case *GCSBackend: 1311 itemsPerPage = 1000 1312 default: 1313 t.Fatalf("unknown backend: %T", s.cloud) 1314 } 1315 1316 root := s.getRoot(t) 1317 root.dir.mountPrefix = "this_test/" 1318 1319 blobs := make(map[string]*string) 1320 expect := make([]string, 0) 1321 for i := 0; i < itemsPerPage+1; i++ { 1322 b := fmt.Sprintf("%08v", i) 1323 blobs["this_test/"+b] = nil 1324 expect = append(expect, b) 1325 } 1326 1327 switch s.cloud.(type) { 1328 case *ADLv1, *ADLv2: 1329 // these backends don't support parallel delete so I 1330 // am doing this here 1331 defer func() { 1332 var wg sync.WaitGroup 1333 1334 for b, _ := range blobs { 1335 SmallActionsGate.Take(1, true) 1336 wg.Add(1) 1337 1338 go func(key string) { 1339 // ignore the error here, 1340 // anything we didn't cleanup 1341 // will be handled by teardown 1342 _, _ = s.cloud.DeleteBlob(&DeleteBlobInput{key}) 1343 SmallActionsGate.Return(1) 1344 wg.Done() 1345 }(b) 1346 } 1347 1348 wg.Wait() 1349 }() 1350 } 1351 1352 s.setupBlobs(s.cloud, t, blobs) 1353 1354 dh := root.OpenDir() 1355 defer dh.CloseDir() 1356 1357 children := namesOf(s.readDirFully(t, dh)) 1358 t.Assert(children, DeepEquals, expect) 1359 } 1360 1361 func (s *GoofysTest) TestBackendListPrefix(t *C) { 1362 res, err := s.cloud.ListBlobs(&ListBlobsInput{ 1363 Prefix: PString("random"), 1364 Delimiter: PString("/"), 1365 }) 1366 t.Assert(err, IsNil) 1367 t.Assert(len(res.Prefixes), Equals, 0) 1368 t.Assert(len(res.Items), Equals, 0) 1369 1370 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1371 Prefix: PString("empty_dir"), 1372 Delimiter: PString("/"), 1373 }) 1374 t.Assert(err, IsNil) 1375 t.Assert(len(res.Prefixes), Not(Equals), 0) 1376 t.Assert(*res.Prefixes[0].Prefix, Equals, "empty_dir/") 1377 t.Assert(len(res.Items), Equals, 0) 1378 1379 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1380 Prefix: PString("empty_dir/"), 1381 Delimiter: PString("/"), 1382 }) 1383 t.Assert(err, IsNil) 1384 t.Assert(len(res.Prefixes), Equals, 0) 1385 t.Assert(len(res.Items), Equals, 1) 1386 t.Assert(*res.Items[0].Key, Equals, "empty_dir/") 1387 1388 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1389 Prefix: PString("file1"), 1390 Delimiter: PString("/"), 1391 }) 1392 t.Assert(err, IsNil) 1393 t.Assert(len(res.Prefixes), Equals, 0) 1394 t.Assert(len(res.Items), Equals, 1) 1395 t.Assert(*res.Items[0].Key, Equals, "file1") 1396 1397 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1398 Prefix: PString("file1/"), 1399 Delimiter: PString("/"), 1400 }) 1401 t.Assert(err, IsNil) 1402 t.Assert(len(res.Prefixes), Equals, 0) 1403 t.Assert(len(res.Items), Equals, 0) 1404 1405 // ListBlobs: 1406 // - Case1: If the prefix foo/ is not added explicitly, then ListBlobs foo/ might or might not return foo/. 1407 // In the test setup dir2 is not expliticly created. 1408 // - Case2: Else, ListBlobs foo/ must return foo/ 1409 // In the test setup dir2/dir3 is expliticly created. 1410 1411 // ListBlobs:Case1 1412 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1413 Prefix: PString("dir2/"), 1414 Delimiter: PString("/"), 1415 }) 1416 t.Assert(err, IsNil) 1417 t.Assert(len(res.Prefixes), Equals, 1) 1418 t.Assert(*res.Prefixes[0].Prefix, Equals, "dir2/dir3/") 1419 if len(res.Items) == 1 { 1420 // azblob(with hierarchial ns on), adlv1, adlv2. 1421 t.Assert(*res.Items[0].Key, Equals, "dir2/") 1422 } else { 1423 // s3, azblob(with hierarchial ns off) 1424 t.Assert(len(res.Items), Equals, 0) 1425 } 1426 1427 // ListBlobs:Case2 1428 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1429 Prefix: PString("dir2/dir3/"), 1430 Delimiter: PString("/"), 1431 }) 1432 t.Assert(err, IsNil) 1433 t.Assert(len(res.Prefixes), Equals, 0) 1434 t.Assert(len(res.Items), Equals, 2) 1435 t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/") 1436 t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4") 1437 1438 // ListBlobs:Case1 1439 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1440 Prefix: PString("dir2/"), 1441 }) 1442 t.Assert(err, IsNil) 1443 t.Assert(len(res.Prefixes), Equals, 0) 1444 if len(res.Items) == 3 { 1445 // azblob(with hierarchial ns on), adlv1, adlv2. 1446 t.Assert(*res.Items[0].Key, Equals, "dir2/") 1447 t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/") 1448 t.Assert(*res.Items[2].Key, Equals, "dir2/dir3/file4") 1449 } else { 1450 // s3, azblob(with hierarchial ns off) 1451 t.Assert(len(res.Items), Equals, 2) 1452 t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/") 1453 t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4") 1454 } 1455 1456 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1457 Prefix: PString("dir2/dir3/file4"), 1458 }) 1459 t.Assert(err, IsNil) 1460 t.Assert(len(res.Prefixes), Equals, 0) 1461 t.Assert(len(res.Items), Equals, 1) 1462 t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/file4") 1463 } 1464 1465 func (s *GoofysTest) TestRenameDir(t *C) { 1466 s.fs.flags.StatCacheTTL = 0 1467 1468 root := s.getRoot(t) 1469 1470 err := root.Rename("empty_dir", root, "dir1") 1471 t.Assert(err, Equals, fuse.ENOTEMPTY) 1472 1473 err = root.Rename("empty_dir", root, "new_dir") 1474 t.Assert(err, IsNil) 1475 1476 dir2, err := s.LookUpInode(t, "dir2") 1477 t.Assert(err, IsNil) 1478 t.Assert(dir2, NotNil) 1479 1480 _, err = s.LookUpInode(t, "new_dir2") 1481 t.Assert(err, Equals, fuse.ENOENT) 1482 1483 err = s.fs.Rename(nil, &fuseops.RenameOp{ 1484 OldParent: root.Id, 1485 NewParent: root.Id, 1486 OldName: "dir2", 1487 NewName: "new_dir2", 1488 }) 1489 t.Assert(err, IsNil) 1490 1491 _, err = s.LookUpInode(t, "dir2/dir3") 1492 t.Assert(err, Equals, fuse.ENOENT) 1493 1494 _, err = s.LookUpInode(t, "dir2/dir3/file4") 1495 t.Assert(err, Equals, fuse.ENOENT) 1496 1497 new_dir2, err := s.LookUpInode(t, "new_dir2") 1498 t.Assert(err, IsNil) 1499 t.Assert(new_dir2, NotNil) 1500 t.Assert(dir2.Id, Equals, new_dir2.Id) 1501 1502 old, err := s.LookUpInode(t, "new_dir2/dir3/file4") 1503 t.Assert(err, IsNil) 1504 t.Assert(old, NotNil) 1505 1506 err = s.fs.Rename(nil, &fuseops.RenameOp{ 1507 OldParent: root.Id, 1508 NewParent: root.Id, 1509 OldName: "new_dir2", 1510 NewName: "new_dir3", 1511 }) 1512 t.Assert(err, IsNil) 1513 1514 new, err := s.LookUpInode(t, "new_dir3/dir3/file4") 1515 t.Assert(err, IsNil) 1516 t.Assert(new, NotNil) 1517 t.Assert(old.Id, Equals, new.Id) 1518 1519 _, err = s.LookUpInode(t, "new_dir2/dir3") 1520 t.Assert(err, Equals, fuse.ENOENT) 1521 1522 _, err = s.LookUpInode(t, "new_dir2/dir3/file4") 1523 t.Assert(err, Equals, fuse.ENOENT) 1524 } 1525 1526 func (s *GoofysTest) TestRename(t *C) { 1527 root := s.getRoot(t) 1528 1529 from, to := "empty_dir", "file1" 1530 err := root.Rename(from, root, to) 1531 t.Assert(err, Equals, fuse.ENOTDIR) 1532 1533 from, to = "file1", "empty_dir" 1534 err = root.Rename(from, root, to) 1535 t.Assert(err, Equals, syscall.EISDIR) 1536 1537 from, to = "file1", "new_file" 1538 err = root.Rename(from, root, to) 1539 t.Assert(err, IsNil) 1540 1541 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to}) 1542 t.Assert(err, IsNil) 1543 1544 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from}) 1545 t.Assert(mapAwsError(err), Equals, fuse.ENOENT) 1546 1547 from, to = "file3", "new_file2" 1548 dir, _ := s.LookUpInode(t, "dir1") 1549 err = dir.Rename(from, root, to) 1550 t.Assert(err, IsNil) 1551 1552 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to}) 1553 t.Assert(err, IsNil) 1554 1555 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from}) 1556 t.Assert(mapAwsError(err), Equals, fuse.ENOENT) 1557 1558 from, to = "no_such_file", "new_file" 1559 err = root.Rename(from, root, to) 1560 t.Assert(err, Equals, fuse.ENOENT) 1561 1562 if s3, ok := s.cloud.Delegate().(*S3Backend); ok { 1563 if !hasEnv("GCS") { 1564 // not really rename but can be used by rename 1565 from, to = s.fs.bucket+"/file2", "new_file" 1566 _, err = s3.copyObjectMultipart(int64(len("file2")), from, to, "", nil, nil, nil) 1567 t.Assert(err, IsNil) 1568 } 1569 } 1570 } 1571 1572 func (s *GoofysTest) TestConcurrentRefDeref(t *C) { 1573 root := s.getRoot(t) 1574 1575 lookupOp := fuseops.LookUpInodeOp{ 1576 Parent: root.Id, 1577 Name: "file1", 1578 } 1579 1580 for i := 0; i < 20; i++ { 1581 err := s.fs.LookUpInode(nil, &lookupOp) 1582 t.Assert(err, IsNil) 1583 1584 var wg sync.WaitGroup 1585 1586 wg.Add(2) 1587 go func() { 1588 // we want to yield to the forget goroutine so that it's run first 1589 // to trigger this bug 1590 if i%2 == 0 { 1591 runtime.Gosched() 1592 } 1593 s.fs.LookUpInode(nil, &lookupOp) 1594 wg.Done() 1595 }() 1596 go func() { 1597 s.fs.ForgetInode(nil, &fuseops.ForgetInodeOp{ 1598 Inode: lookupOp.Entry.Child, 1599 N: 1, 1600 }) 1601 wg.Done() 1602 }() 1603 1604 wg.Wait() 1605 } 1606 } 1607 1608 func hasEnv(env string) bool { 1609 v := os.Getenv(env) 1610 1611 return !(v == "" || v == "0" || v == "false") 1612 } 1613 1614 func isTravis() bool { 1615 return hasEnv("TRAVIS") 1616 } 1617 1618 func isCatfs() bool { 1619 return hasEnv("CATFS") 1620 } 1621 1622 func (s *GoofysTest) mount(t *C, mountPoint string) { 1623 err := os.MkdirAll(mountPoint, 0700) 1624 t.Assert(err, IsNil) 1625 1626 server := fuseutil.NewFileSystemServer(s.fs) 1627 1628 if isCatfs() { 1629 s.fs.flags.MountOptions = make(map[string]string) 1630 s.fs.flags.MountOptions["allow_other"] = "" 1631 } 1632 1633 // Mount the file system. 1634 mountCfg := &fuse.MountConfig{ 1635 FSName: s.fs.bucket, 1636 Options: s.fs.flags.MountOptions, 1637 ErrorLogger: GetStdLogger(NewLogger("fuse"), logrus.ErrorLevel), 1638 DisableWritebackCaching: true, 1639 } 1640 mountCfg.DebugLogger = GetStdLogger(fuseLog, logrus.DebugLevel) 1641 1642 _, err = fuse.Mount(mountPoint, server, mountCfg) 1643 t.Assert(err, IsNil) 1644 1645 if isCatfs() { 1646 cacheDir := mountPoint + "-cache" 1647 err := os.MkdirAll(cacheDir, 0700) 1648 t.Assert(err, IsNil) 1649 1650 catfs := exec.Command("catfs", "--test", "-ononempty", "--", mountPoint, cacheDir, mountPoint) 1651 _, err = catfs.Output() 1652 if err != nil { 1653 if ee, ok := err.(*exec.ExitError); ok { 1654 panic(ee.Stderr) 1655 } 1656 } 1657 1658 catfs = exec.Command("catfs", "-ononempty", "--", mountPoint, cacheDir, mountPoint) 1659 1660 if isTravis() { 1661 logger := NewLogger("catfs") 1662 lvl := logrus.InfoLevel 1663 logger.Formatter.(*LogHandle).Lvl = &lvl 1664 w := logger.Writer() 1665 1666 catfs.Stdout = w 1667 catfs.Stderr = w 1668 1669 catfs.Env = append(catfs.Env, "RUST_LOG=debug") 1670 } 1671 1672 err = catfs.Start() 1673 t.Assert(err, IsNil) 1674 1675 time.Sleep(time.Second) 1676 } 1677 } 1678 1679 func (s *GoofysTest) umount(t *C, mountPoint string) { 1680 var err error 1681 for i := 0; i < 10; i++ { 1682 err = fuse.Unmount(mountPoint) 1683 if err != nil { 1684 time.Sleep(100 * time.Millisecond) 1685 } else { 1686 break 1687 } 1688 } 1689 t.Assert(err, IsNil) 1690 1691 os.Remove(mountPoint) 1692 if isCatfs() { 1693 cacheDir := mountPoint + "-cache" 1694 os.Remove(cacheDir) 1695 } 1696 } 1697 1698 func (s *GoofysTest) runFuseTest(t *C, mountPoint string, umount bool, cmdArgs ...string) { 1699 s.mount(t, mountPoint) 1700 1701 if umount { 1702 defer s.umount(t, mountPoint) 1703 } 1704 1705 // if command starts with ./ or ../ then we are executing a 1706 // relative path and cannot do chdir 1707 chdir := cmdArgs[0][0] != '.' 1708 1709 cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) 1710 cmd.Env = append(cmd.Env, os.Environ()...) 1711 cmd.Env = append(cmd.Env, "FAST=true") 1712 cmd.Env = append(cmd.Env, "CLEANUP=false") 1713 1714 if isTravis() { 1715 logger := NewLogger("test") 1716 lvl := logrus.InfoLevel 1717 logger.Formatter.(*LogHandle).Lvl = &lvl 1718 w := logger.Writer() 1719 1720 cmd.Stdout = w 1721 cmd.Stderr = w 1722 } 1723 1724 if chdir { 1725 oldCwd, err := os.Getwd() 1726 t.Assert(err, IsNil) 1727 1728 err = os.Chdir(mountPoint) 1729 t.Assert(err, IsNil) 1730 1731 defer os.Chdir(oldCwd) 1732 } 1733 1734 err := cmd.Run() 1735 t.Assert(err, IsNil) 1736 } 1737 1738 func (s *GoofysTest) TestFuse(t *C) { 1739 mountPoint := "/tmp/mnt" + s.fs.bucket 1740 1741 s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint) 1742 } 1743 1744 func (s *GoofysTest) TestFuseWithTTL(t *C) { 1745 s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000 1746 mountPoint := "/tmp/mnt" + s.fs.bucket 1747 1748 s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint) 1749 } 1750 1751 func (s *GoofysTest) TestCheap(t *C) { 1752 s.fs.flags.Cheap = true 1753 s.TestLookUpInode(t) 1754 s.TestWriteLargeFile(t) 1755 } 1756 1757 func (s *GoofysTest) TestExplicitDir(t *C) { 1758 s.fs.flags.ExplicitDir = true 1759 s.testExplicitDir(t) 1760 } 1761 1762 func (s *GoofysTest) TestExplicitDirAndCheap(t *C) { 1763 s.fs.flags.ExplicitDir = true 1764 s.fs.flags.Cheap = true 1765 s.testExplicitDir(t) 1766 } 1767 1768 func (s *GoofysTest) testExplicitDir(t *C) { 1769 if s.cloud.Capabilities().DirBlob { 1770 t.Skip("only for backends without dir blob") 1771 } 1772 1773 _, err := s.LookUpInode(t, "file1") 1774 t.Assert(err, IsNil) 1775 1776 _, err = s.LookUpInode(t, "fileNotFound") 1777 t.Assert(err, Equals, fuse.ENOENT) 1778 1779 // dir1/ doesn't exist so we shouldn't be able to see it 1780 _, err = s.LookUpInode(t, "dir1/file3") 1781 t.Assert(err, Equals, fuse.ENOENT) 1782 1783 _, err = s.LookUpInode(t, "dir4/file5") 1784 t.Assert(err, IsNil) 1785 1786 _, err = s.LookUpInode(t, "empty_dir") 1787 t.Assert(err, IsNil) 1788 } 1789 1790 func (s *GoofysTest) TestBenchLs(t *C) { 1791 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1792 s.fs.flags.StatCacheTTL = 1 * time.Minute 1793 mountPoint := "/tmp/mnt" + s.fs.bucket 1794 s.setUpTestTimeout(t, 20*time.Minute) 1795 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "ls") 1796 } 1797 1798 func (s *GoofysTest) TestBenchCreate(t *C) { 1799 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1800 s.fs.flags.StatCacheTTL = 1 * time.Minute 1801 mountPoint := "/tmp/mnt" + s.fs.bucket 1802 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create") 1803 } 1804 1805 func (s *GoofysTest) TestBenchCreateParallel(t *C) { 1806 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1807 s.fs.flags.StatCacheTTL = 1 * time.Minute 1808 mountPoint := "/tmp/mnt" + s.fs.bucket 1809 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create_parallel") 1810 } 1811 1812 func (s *GoofysTest) TestBenchIO(t *C) { 1813 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1814 s.fs.flags.StatCacheTTL = 1 * time.Minute 1815 mountPoint := "/tmp/mnt" + s.fs.bucket 1816 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "io") 1817 } 1818 1819 func (s *GoofysTest) TestBenchFindTree(t *C) { 1820 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1821 s.fs.flags.StatCacheTTL = 1 * time.Minute 1822 mountPoint := "/tmp/mnt" + s.fs.bucket 1823 1824 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "find") 1825 } 1826 1827 func (s *GoofysTest) TestIssue231(t *C) { 1828 if isTravis() { 1829 t.Skip("disable in travis, not sure if it has enough memory") 1830 } 1831 mountPoint := "/tmp/mnt" + s.fs.bucket 1832 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue231") 1833 } 1834 1835 func (s *GoofysTest) TestChmod(t *C) { 1836 root := s.getRoot(t) 1837 1838 lookupOp := fuseops.LookUpInodeOp{ 1839 Parent: root.Id, 1840 Name: "file1", 1841 } 1842 1843 err := s.fs.LookUpInode(nil, &lookupOp) 1844 t.Assert(err, IsNil) 1845 1846 targetMode := os.FileMode(0777) 1847 setOp := fuseops.SetInodeAttributesOp{Inode: lookupOp.Entry.Child, Mode: &targetMode} 1848 1849 err = s.fs.SetInodeAttributes(s.ctx, &setOp) 1850 t.Assert(err, IsNil) 1851 t.Assert(setOp.Attributes, NotNil) 1852 } 1853 1854 func (s *GoofysTest) TestIssue64(t *C) { 1855 /* 1856 mountPoint := "/tmp/mnt" + s.fs.bucket 1857 log.Level = logrus.DebugLevel 1858 1859 err := os.MkdirAll(mountPoint, 0700) 1860 t.Assert(err, IsNil) 1861 1862 defer os.Remove(mountPoint) 1863 1864 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue64") 1865 */ 1866 } 1867 1868 func (s *GoofysTest) TestIssue69Fuse(t *C) { 1869 s.fs.flags.StatCacheTTL = 0 1870 1871 mountPoint := "/tmp/mnt" + s.fs.bucket 1872 1873 s.mount(t, mountPoint) 1874 1875 defer func() { 1876 err := os.Chdir("/") 1877 t.Assert(err, IsNil) 1878 1879 s.umount(t, mountPoint) 1880 }() 1881 1882 err := os.Chdir(mountPoint) 1883 t.Assert(err, IsNil) 1884 1885 _, err = os.Stat("dir1") 1886 t.Assert(err, IsNil) 1887 1888 err = os.Remove("dir1/file3") 1889 t.Assert(err, IsNil) 1890 1891 // don't really care about error code, but it should be a PathError 1892 os.Stat("dir1") 1893 os.Stat("dir1") 1894 } 1895 1896 func (s *GoofysTest) TestGetMimeType(t *C) { 1897 // option to use mime type not turned on 1898 mime := s.fs.flags.GetMimeType("foo.css") 1899 t.Assert(mime, IsNil) 1900 1901 s.fs.flags.UseContentType = true 1902 1903 mime = s.fs.flags.GetMimeType("foo.css") 1904 t.Assert(mime, NotNil) 1905 t.Assert(*mime, Equals, "text/css") 1906 1907 mime = s.fs.flags.GetMimeType("foo") 1908 t.Assert(mime, IsNil) 1909 1910 mime = s.fs.flags.GetMimeType("foo.") 1911 t.Assert(mime, IsNil) 1912 1913 mime = s.fs.flags.GetMimeType("foo.unknownExtension") 1914 t.Assert(mime, IsNil) 1915 } 1916 1917 func (s *GoofysTest) TestPutMimeType(t *C) { 1918 if _, ok := s.cloud.(*ADLv1); ok { 1919 // ADLv1 doesn't support content-type 1920 t.Skip("ADLv1 doesn't support content-type") 1921 } 1922 1923 s.fs.flags.UseContentType = true 1924 1925 root := s.getRoot(t) 1926 jpg := "test.jpg" 1927 jpg2 := "test2.jpg" 1928 file := "test" 1929 1930 s.testWriteFile(t, jpg, 10, 128) 1931 1932 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: jpg}) 1933 t.Assert(err, IsNil) 1934 t.Assert(*resp.ContentType, Equals, "image/jpeg") 1935 1936 err = root.Rename(jpg, root, file) 1937 t.Assert(err, IsNil) 1938 1939 resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: file}) 1940 t.Assert(err, IsNil) 1941 t.Assert(*resp.ContentType, Equals, "image/jpeg") 1942 1943 err = root.Rename(file, root, jpg2) 1944 t.Assert(err, IsNil) 1945 1946 resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: jpg2}) 1947 t.Assert(err, IsNil) 1948 t.Assert(*resp.ContentType, Equals, "image/jpeg") 1949 } 1950 1951 func (s *GoofysTest) TestBucketPrefixSlash(t *C) { 1952 s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2", s.fs.flags) 1953 t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/") 1954 1955 s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2///", s.fs.flags) 1956 t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/") 1957 } 1958 1959 func (s *GoofysTest) TestFuseWithPrefix(t *C) { 1960 mountPoint := "/tmp/mnt" + s.fs.bucket 1961 1962 s.fs = NewGoofys(context.Background(), s.fs.bucket+":testprefix", s.fs.flags) 1963 1964 s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint) 1965 } 1966 1967 func (s *GoofysTest) TestRenameCache(t *C) { 1968 root := s.getRoot(t) 1969 s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000 1970 1971 lookupOp1 := fuseops.LookUpInodeOp{ 1972 Parent: root.Id, 1973 Name: "file1", 1974 } 1975 1976 lookupOp2 := lookupOp1 1977 lookupOp2.Name = "newfile" 1978 1979 err := s.fs.LookUpInode(nil, &lookupOp1) 1980 t.Assert(err, IsNil) 1981 1982 err = s.fs.LookUpInode(nil, &lookupOp2) 1983 t.Assert(err, Equals, fuse.ENOENT) 1984 1985 renameOp := fuseops.RenameOp{ 1986 OldParent: root.Id, 1987 NewParent: root.Id, 1988 OldName: "file1", 1989 NewName: "newfile", 1990 } 1991 1992 err = s.fs.Rename(nil, &renameOp) 1993 t.Assert(err, IsNil) 1994 1995 lookupOp1.Entry = fuseops.ChildInodeEntry{} 1996 lookupOp2.Entry = fuseops.ChildInodeEntry{} 1997 1998 err = s.fs.LookUpInode(nil, &lookupOp1) 1999 t.Assert(err, Equals, fuse.ENOENT) 2000 2001 err = s.fs.LookUpInode(nil, &lookupOp2) 2002 t.Assert(err, IsNil) 2003 } 2004 2005 func (s *GoofysTest) anonymous(t *C) { 2006 // On azure this fails because we re-create the bucket with 2007 // the same name right away. And well anonymous access is not 2008 // implemented yet in our azure backend anyway 2009 var s3 *S3Backend 2010 var ok bool 2011 if s3, ok = s.cloud.Delegate().(*S3Backend); !ok { 2012 t.Skip("only for S3") 2013 } 2014 2015 err := s.deleteBucket(s.cloud) 2016 t.Assert(err, IsNil) 2017 2018 // use a different bucket name to prevent 409 Conflict from 2019 // delete bucket above 2020 s.fs.bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16) 2021 s3.bucket = s.fs.bucket 2022 s.setupDefaultEnv(t, true) 2023 2024 s.fs = NewGoofys(context.Background(), s.fs.bucket, s.fs.flags) 2025 t.Assert(s.fs, NotNil) 2026 2027 // should have auto-detected by S3 backend 2028 cloud := s.getRoot(t).dir.cloud 2029 t.Assert(cloud, NotNil) 2030 s3, ok = cloud.Delegate().(*S3Backend) 2031 t.Assert(ok, Equals, true) 2032 2033 s3.awsConfig.Credentials = credentials.AnonymousCredentials 2034 s3.newS3() 2035 } 2036 2037 func (s *GoofysTest) disableS3() { 2038 time.Sleep(1 * time.Second) // wait for any background goroutines to finish 2039 dir := s.fs.inodes[fuseops.RootInodeID].dir 2040 dir.cloud = StorageBackendInitError{ 2041 fmt.Errorf("cloud disabled"), 2042 *dir.cloud.Capabilities(), 2043 } 2044 } 2045 2046 func (s *GoofysTest) TestWriteAnonymous(t *C) { 2047 s.anonymous(t) 2048 s.fs.flags.StatCacheTTL = 1 * time.Minute 2049 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2050 2051 fileName := "test" 2052 2053 createOp := fuseops.CreateFileOp{ 2054 Parent: s.getRoot(t).Id, 2055 Name: fileName, 2056 } 2057 2058 err := s.fs.CreateFile(s.ctx, &createOp) 2059 t.Assert(err, IsNil) 2060 2061 err = s.fs.FlushFile(s.ctx, &fuseops.FlushFileOp{ 2062 Handle: createOp.Handle, 2063 Inode: createOp.Entry.Child, 2064 }) 2065 t.Assert(err, Equals, syscall.EACCES) 2066 2067 err = s.fs.ReleaseFileHandle(s.ctx, &fuseops.ReleaseFileHandleOp{Handle: createOp.Handle}) 2068 t.Assert(err, IsNil) 2069 2070 err = s.fs.LookUpInode(s.ctx, &fuseops.LookUpInodeOp{ 2071 Parent: s.getRoot(t).Id, 2072 Name: fileName, 2073 }) 2074 t.Assert(err, Equals, fuse.ENOENT) 2075 // BUG! the file shouldn't exist, see test below for comment, 2076 // this behaves as expected only because we are bypassing 2077 // linux vfs in this test 2078 } 2079 2080 func (s *GoofysTest) TestWriteAnonymousFuse(t *C) { 2081 s.anonymous(t) 2082 s.fs.flags.StatCacheTTL = 1 * time.Minute 2083 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2084 2085 mountPoint := "/tmp/mnt" + s.fs.bucket 2086 2087 s.mount(t, mountPoint) 2088 defer s.umount(t, mountPoint) 2089 2090 err := ioutil.WriteFile(mountPoint+"/test", []byte(""), 0600) 2091 t.Assert(err, NotNil) 2092 pathErr, ok := err.(*os.PathError) 2093 t.Assert(ok, Equals, true) 2094 t.Assert(pathErr.Err, Equals, syscall.EACCES) 2095 2096 _, err = os.Stat(mountPoint + "/test") 2097 t.Assert(err, IsNil) 2098 // BUG! the file shouldn't exist, the condition below should hold instead 2099 // see comment in Goofys.FlushFile 2100 // pathErr, ok = err.(*os.PathError) 2101 // t.Assert(ok, Equals, true) 2102 // t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2103 2104 _, err = ioutil.ReadFile(mountPoint + "/test") 2105 t.Assert(err, NotNil) 2106 pathErr, ok = err.(*os.PathError) 2107 t.Assert(ok, Equals, true) 2108 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2109 2110 // reading the file and getting ENOENT causes the kernel to 2111 // invalidate the entry, failing at open is not sufficient, we 2112 // have to fail at read (which means that if the application 2113 // uses splice(2) it won't get to us, so this wouldn't work 2114 _, err = os.Stat(mountPoint + "/test") 2115 t.Assert(err, NotNil) 2116 pathErr, ok = err.(*os.PathError) 2117 t.Assert(ok, Equals, true) 2118 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2119 } 2120 2121 func (s *GoofysTest) TestWriteSyncWriteFuse(t *C) { 2122 mountPoint := "/tmp/mnt" + s.fs.bucket 2123 2124 s.mount(t, mountPoint) 2125 defer s.umount(t, mountPoint) 2126 2127 var f *os.File 2128 var n int 2129 var err error 2130 2131 defer func() { 2132 if err != nil { 2133 f.Close() 2134 } 2135 }() 2136 2137 f, err = os.Create(mountPoint + "/TestWriteSyncWrite") 2138 t.Assert(err, IsNil) 2139 2140 n, err = f.Write([]byte("hello\n")) 2141 t.Assert(err, IsNil) 2142 t.Assert(n, Equals, 6) 2143 2144 err = f.Sync() 2145 t.Assert(err, IsNil) 2146 2147 n, err = f.Write([]byte("world\n")) 2148 t.Assert(err, IsNil) 2149 t.Assert(n, Equals, 6) 2150 2151 err = f.Close() 2152 t.Assert(err, IsNil) 2153 } 2154 2155 func (s *GoofysTest) TestIssue156(t *C) { 2156 _, err := s.LookUpInode(t, "\xae\x8a-") 2157 // S3Proxy and aws s3 return different errors 2158 // https://github.com/andrewgaul/s3proxy/issues/201 2159 t.Assert(err, NotNil) 2160 } 2161 2162 func (s *GoofysTest) TestIssue162(t *C) { 2163 if s.azurite { 2164 t.Skip("https://github.com/Azure/Azurite/issues/221") 2165 } 2166 2167 params := &PutBlobInput{ 2168 Key: "dir1/lör 006.jpg", 2169 Body: bytes.NewReader([]byte("foo")), 2170 Size: PUInt64(3), 2171 } 2172 _, err := s.cloud.PutBlob(params) 2173 t.Assert(err, IsNil) 2174 2175 dir, err := s.LookUpInode(t, "dir1") 2176 t.Assert(err, IsNil) 2177 2178 err = dir.Rename("lör 006.jpg", dir, "myfile.jpg") 2179 t.Assert(err, IsNil) 2180 2181 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "dir1/myfile.jpg"}) 2182 t.Assert(resp.Size, Equals, uint64(3)) 2183 } 2184 2185 func (s *GoofysTest) TestXAttrGet(t *C) { 2186 if _, ok := s.cloud.(*ADLv1); ok { 2187 t.Skip("ADLv1 doesn't support metadata") 2188 } 2189 2190 _, checkETag := s.cloud.Delegate().(*S3Backend) 2191 xattrPrefix := s.cloud.Capabilities().Name + "." 2192 2193 file1, err := s.LookUpInode(t, "file1") 2194 t.Assert(err, IsNil) 2195 2196 names, err := file1.ListXattr() 2197 t.Assert(err, IsNil) 2198 expectedXattrs := []string{ 2199 xattrPrefix + "etag", 2200 xattrPrefix + "storage-class", 2201 "user.name", 2202 } 2203 sort.Strings(expectedXattrs) 2204 t.Assert(names, DeepEquals, expectedXattrs) 2205 2206 _, err = file1.GetXattr("user.foobar") 2207 t.Assert(err, Equals, unix.ENODATA) 2208 2209 if checkETag { 2210 value, err := file1.GetXattr("s3.etag") 2211 t.Assert(err, IsNil) 2212 // md5sum of "file1" 2213 t.Assert(string(value), Equals, "\"826e8142e6baabe8af779f5f490cf5f5\"") 2214 } 2215 2216 value, err := file1.GetXattr("user.name") 2217 t.Assert(err, IsNil) 2218 t.Assert(string(value), Equals, "file1+/#\x00") 2219 2220 dir1, err := s.LookUpInode(t, "dir1") 2221 t.Assert(err, IsNil) 2222 2223 if !s.cloud.Capabilities().DirBlob { 2224 // implicit dir blobs don't have s3.etag at all 2225 names, err = dir1.ListXattr() 2226 t.Assert(err, IsNil) 2227 t.Assert(len(names), Equals, 0, Commentf("names: %v", names)) 2228 2229 value, err = dir1.GetXattr(xattrPrefix + "etag") 2230 t.Assert(err, Equals, syscall.ENODATA) 2231 } 2232 2233 // list dir1 to populate file3 in cache, then get file3's xattr 2234 lookup := fuseops.LookUpInodeOp{ 2235 Parent: fuseops.RootInodeID, 2236 Name: "dir1", 2237 } 2238 err = s.fs.LookUpInode(nil, &lookup) 2239 t.Assert(err, IsNil) 2240 2241 s.readDirIntoCache(t, lookup.Entry.Child) 2242 2243 dir1 = s.fs.inodes[lookup.Entry.Child] 2244 file3 := dir1.findChild("file3") 2245 t.Assert(file3, NotNil) 2246 t.Assert(file3.userMetadata, IsNil) 2247 2248 if checkETag { 2249 value, err = file3.GetXattr("s3.etag") 2250 t.Assert(err, IsNil) 2251 // md5sum of "dir1/file3" 2252 t.Assert(string(value), Equals, "\"5cd67e0e59fb85be91a515afe0f4bb24\"") 2253 } 2254 2255 // ensure that we get the dir blob instead of list 2256 s.fs.flags.Cheap = true 2257 2258 emptyDir2, err := s.LookUpInode(t, "empty_dir2") 2259 t.Assert(err, IsNil) 2260 2261 names, err = emptyDir2.ListXattr() 2262 t.Assert(err, IsNil) 2263 sort.Strings(names) 2264 t.Assert(names, DeepEquals, expectedXattrs) 2265 2266 emptyDir, err := s.LookUpInode(t, "empty_dir") 2267 t.Assert(err, IsNil) 2268 2269 if checkETag { 2270 value, err = emptyDir.GetXattr("s3.etag") 2271 t.Assert(err, IsNil) 2272 // dir blobs are empty 2273 t.Assert(string(value), Equals, "\"d41d8cd98f00b204e9800998ecf8427e\"") 2274 } 2275 2276 // s3proxy doesn't support storage class yet 2277 if hasEnv("AWS") { 2278 cloud := s.getRoot(t).dir.cloud 2279 s3, ok := cloud.Delegate().(*S3Backend) 2280 t.Assert(ok, Equals, true) 2281 s3.config.StorageClass = "STANDARD_IA" 2282 2283 s.testWriteFile(t, "ia", 1, 128*1024) 2284 2285 ia, err := s.LookUpInode(t, "ia") 2286 t.Assert(err, IsNil) 2287 2288 names, err = ia.ListXattr() 2289 t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"}) 2290 2291 value, err = ia.GetXattr("s3.storage-class") 2292 t.Assert(err, IsNil) 2293 // smaller than 128KB falls back to standard 2294 t.Assert(string(value), Equals, "STANDARD") 2295 2296 s.testWriteFile(t, "ia", 128*1024, 128*1024) 2297 time.Sleep(100 * time.Millisecond) 2298 2299 names, err = ia.ListXattr() 2300 t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"}) 2301 2302 value, err = ia.GetXattr("s3.storage-class") 2303 t.Assert(err, IsNil) 2304 t.Assert(string(value), Equals, "STANDARD_IA") 2305 } 2306 } 2307 2308 func (s *GoofysTest) TestClientForkExec(t *C) { 2309 mountPoint := "/tmp/mnt" + s.fs.bucket 2310 s.mount(t, mountPoint) 2311 defer s.umount(t, mountPoint) 2312 file := mountPoint + "/TestClientForkExec" 2313 2314 // Create new file. 2315 fh, err := os.OpenFile(file, os.O_CREATE|os.O_RDWR, 0600) 2316 t.Assert(err, IsNil) 2317 defer func() { // Defer close file if it's not already closed. 2318 if fh != nil { 2319 fh.Close() 2320 } 2321 }() 2322 // Write to file. 2323 _, err = fh.WriteString("1.1;") 2324 t.Assert(err, IsNil) 2325 // The `Command` is run via fork+exec. 2326 // So all the file descriptors are copied over to the child process. 2327 // The child process 'closes' the files before exiting. This should 2328 // not result in goofys failing file operations invoked from the test. 2329 someCmd := exec.Command("echo", "hello") 2330 err = someCmd.Run() 2331 t.Assert(err, IsNil) 2332 // One more write. 2333 _, err = fh.WriteString("1.2;") 2334 t.Assert(err, IsNil) 2335 // Close file. 2336 err = fh.Close() 2337 t.Assert(err, IsNil) 2338 fh = nil 2339 // Check file content. 2340 content, err := ioutil.ReadFile(file) 2341 t.Assert(err, IsNil) 2342 t.Assert(string(content), Equals, "1.1;1.2;") 2343 2344 // Repeat the same excercise, but now with an existing file. 2345 fh, err = os.OpenFile(file, os.O_RDWR, 0600) 2346 // Write to file. 2347 _, err = fh.WriteString("2.1;") 2348 // fork+exec. 2349 someCmd = exec.Command("echo", "hello") 2350 err = someCmd.Run() 2351 t.Assert(err, IsNil) 2352 // One more write. 2353 _, err = fh.WriteString("2.2;") 2354 t.Assert(err, IsNil) 2355 // Close file. 2356 err = fh.Close() 2357 t.Assert(err, IsNil) 2358 fh = nil 2359 // Verify that the file is updated as per the new write. 2360 content, err = ioutil.ReadFile(file) 2361 t.Assert(err, IsNil) 2362 t.Assert(string(content), Equals, "2.1;2.2;") 2363 } 2364 2365 func (s *GoofysTest) TestXAttrGetCached(t *C) { 2366 if _, ok := s.cloud.(*ADLv1); ok { 2367 t.Skip("ADLv1 doesn't support metadata") 2368 } 2369 2370 xattrPrefix := s.cloud.Capabilities().Name + "." 2371 2372 s.fs.flags.StatCacheTTL = 1 * time.Minute 2373 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2374 s.readDirIntoCache(t, fuseops.RootInodeID) 2375 s.disableS3() 2376 2377 in, err := s.LookUpInode(t, "file1") 2378 t.Assert(err, IsNil) 2379 t.Assert(in.userMetadata, IsNil) 2380 2381 _, err = in.GetXattr(xattrPrefix + "etag") 2382 t.Assert(err, IsNil) 2383 } 2384 2385 func (s *GoofysTest) TestXAttrCopied(t *C) { 2386 if _, ok := s.cloud.(*ADLv1); ok { 2387 t.Skip("ADLv1 doesn't support metadata") 2388 } 2389 2390 root := s.getRoot(t) 2391 2392 err := root.Rename("file1", root, "file0") 2393 t.Assert(err, IsNil) 2394 2395 in, err := s.LookUpInode(t, "file0") 2396 t.Assert(err, IsNil) 2397 2398 _, err = in.GetXattr("user.name") 2399 t.Assert(err, IsNil) 2400 } 2401 2402 func (s *GoofysTest) TestXAttrRemove(t *C) { 2403 if _, ok := s.cloud.(*ADLv1); ok { 2404 t.Skip("ADLv1 doesn't support metadata") 2405 } 2406 2407 in, err := s.LookUpInode(t, "file1") 2408 t.Assert(err, IsNil) 2409 2410 _, err = in.GetXattr("user.name") 2411 t.Assert(err, IsNil) 2412 2413 err = in.RemoveXattr("user.name") 2414 t.Assert(err, IsNil) 2415 2416 _, err = in.GetXattr("user.name") 2417 t.Assert(err, Equals, syscall.ENODATA) 2418 } 2419 2420 func (s *GoofysTest) TestXAttrFuse(t *C) { 2421 if _, ok := s.cloud.(*ADLv1); ok { 2422 t.Skip("ADLv1 doesn't support metadata") 2423 } 2424 2425 _, checkETag := s.cloud.Delegate().(*S3Backend) 2426 xattrPrefix := s.cloud.Capabilities().Name + "." 2427 2428 //fuseLog.Level = logrus.DebugLevel 2429 mountPoint := "/tmp/mnt" + s.fs.bucket 2430 s.mount(t, mountPoint) 2431 defer s.umount(t, mountPoint) 2432 2433 expectedXattrs := []string{ 2434 xattrPrefix + "etag", 2435 xattrPrefix + "storage-class", 2436 "user.name", 2437 } 2438 sort.Strings(expectedXattrs) 2439 2440 var expectedXattrsStr string 2441 for _, x := range expectedXattrs { 2442 expectedXattrsStr += x + "\x00" 2443 } 2444 var buf [1024]byte 2445 2446 // error if size is too small (but not zero) 2447 _, err := unix.Listxattr(mountPoint+"/file1", buf[:1]) 2448 t.Assert(err, Equals, unix.ERANGE) 2449 2450 // 0 len buffer means interogate the size of buffer 2451 nbytes, err := unix.Listxattr(mountPoint+"/file1", nil) 2452 t.Assert(err, Equals, nil) 2453 t.Assert(nbytes, Equals, len(expectedXattrsStr)) 2454 2455 nbytes, err = unix.Listxattr(mountPoint+"/file1", buf[:nbytes]) 2456 t.Assert(err, IsNil) 2457 t.Assert(nbytes, Equals, len(expectedXattrsStr)) 2458 t.Assert(string(buf[:nbytes]), Equals, expectedXattrsStr) 2459 2460 _, err = unix.Getxattr(mountPoint+"/file1", "user.name", buf[:1]) 2461 t.Assert(err, Equals, unix.ERANGE) 2462 2463 nbytes, err = unix.Getxattr(mountPoint+"/file1", "user.name", nil) 2464 t.Assert(err, IsNil) 2465 t.Assert(nbytes, Equals, 9) 2466 2467 nbytes, err = unix.Getxattr(mountPoint+"/file1", "user.name", buf[:nbytes]) 2468 t.Assert(err, IsNil) 2469 t.Assert(nbytes, Equals, 9) 2470 t.Assert(string(buf[:nbytes]), Equals, "file1+/#\x00") 2471 2472 if !s.cloud.Capabilities().DirBlob { 2473 // dir1 has no xattrs 2474 nbytes, err = unix.Listxattr(mountPoint+"/dir1", nil) 2475 t.Assert(err, IsNil) 2476 t.Assert(nbytes, Equals, 0) 2477 2478 nbytes, err = unix.Listxattr(mountPoint+"/dir1", buf[:1]) 2479 t.Assert(err, IsNil) 2480 t.Assert(nbytes, Equals, 0) 2481 } 2482 2483 if checkETag { 2484 _, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", buf[:1]) 2485 t.Assert(err, Equals, unix.ERANGE) 2486 2487 nbytes, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", nil) 2488 t.Assert(err, IsNil) 2489 // 32 bytes md5 plus quotes 2490 t.Assert(nbytes, Equals, 34) 2491 2492 nbytes, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", buf[:nbytes]) 2493 t.Assert(err, IsNil) 2494 t.Assert(nbytes, Equals, 34) 2495 t.Assert(string(buf[:nbytes]), Equals, 2496 "\"826e8142e6baabe8af779f5f490cf5f5\"") 2497 } 2498 } 2499 2500 func (s *GoofysTest) TestXAttrSet(t *C) { 2501 if _, ok := s.cloud.(*ADLv1); ok { 2502 t.Skip("ADLv1 doesn't support metadata") 2503 } 2504 2505 in, err := s.LookUpInode(t, "file1") 2506 t.Assert(err, IsNil) 2507 2508 err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_REPLACE) 2509 t.Assert(err, Equals, syscall.ENODATA) 2510 2511 err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_CREATE) 2512 t.Assert(err, IsNil) 2513 2514 err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_CREATE) 2515 t.Assert(err, Equals, syscall.EEXIST) 2516 2517 in, err = s.LookUpInode(t, "file1") 2518 t.Assert(err, IsNil) 2519 2520 value, err := in.GetXattr("user.bar") 2521 t.Assert(err, IsNil) 2522 t.Assert(string(value), Equals, "hello") 2523 2524 value = []byte("file1+%/#\x00") 2525 2526 err = in.SetXattr("user.bar", value, unix.XATTR_REPLACE) 2527 t.Assert(err, IsNil) 2528 2529 in, err = s.LookUpInode(t, "file1") 2530 t.Assert(err, IsNil) 2531 2532 value2, err := in.GetXattr("user.bar") 2533 t.Assert(err, IsNil) 2534 t.Assert(value2, DeepEquals, value) 2535 2536 // setting with flag = 0 always works 2537 err = in.SetXattr("user.bar", []byte("world"), 0) 2538 t.Assert(err, IsNil) 2539 2540 err = in.SetXattr("user.baz", []byte("world"), 0) 2541 t.Assert(err, IsNil) 2542 2543 value, err = in.GetXattr("user.bar") 2544 t.Assert(err, IsNil) 2545 2546 value2, err = in.GetXattr("user.baz") 2547 t.Assert(err, IsNil) 2548 2549 t.Assert(value2, DeepEquals, value) 2550 t.Assert(string(value2), DeepEquals, "world") 2551 2552 err = in.SetXattr("s3.bar", []byte("hello"), unix.XATTR_CREATE) 2553 t.Assert(err, Equals, syscall.EPERM) 2554 } 2555 2556 func (s *GoofysTest) TestPythonCopyTree(t *C) { 2557 mountPoint := "/tmp/mnt" + s.fs.bucket 2558 2559 s.runFuseTest(t, mountPoint, true, "python", "-c", 2560 "import shutil; shutil.copytree('dir2', 'dir5')", 2561 mountPoint) 2562 } 2563 2564 func (s *GoofysTest) TestCreateRenameBeforeCloseFuse(t *C) { 2565 if s.azurite { 2566 // Azurite returns 400 when copy source doesn't exist 2567 // https://github.com/Azure/Azurite/issues/219 2568 // so our code to ignore ENOENT fails 2569 t.Skip("https://github.com/Azure/Azurite/issues/219") 2570 } 2571 2572 mountPoint := "/tmp/mnt" + s.fs.bucket 2573 2574 s.mount(t, mountPoint) 2575 defer s.umount(t, mountPoint) 2576 2577 from := mountPoint + "/newfile" 2578 to := mountPoint + "/newfile2" 2579 2580 fh, err := os.Create(from) 2581 t.Assert(err, IsNil) 2582 defer func() { 2583 // close the file if the test failed so we can unmount 2584 if fh != nil { 2585 fh.Close() 2586 } 2587 }() 2588 2589 _, err = fh.WriteString("hello world") 2590 t.Assert(err, IsNil) 2591 2592 err = os.Rename(from, to) 2593 t.Assert(err, IsNil) 2594 2595 err = fh.Close() 2596 t.Assert(err, IsNil) 2597 fh = nil 2598 2599 _, err = os.Stat(from) 2600 t.Assert(err, NotNil) 2601 pathErr, ok := err.(*os.PathError) 2602 t.Assert(ok, Equals, true) 2603 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2604 2605 content, err := ioutil.ReadFile(to) 2606 t.Assert(err, IsNil) 2607 t.Assert(string(content), Equals, "hello world") 2608 } 2609 2610 func (s *GoofysTest) TestRenameBeforeCloseFuse(t *C) { 2611 mountPoint := "/tmp/mnt" + s.fs.bucket 2612 2613 s.mount(t, mountPoint) 2614 defer s.umount(t, mountPoint) 2615 2616 from := mountPoint + "/newfile" 2617 to := mountPoint + "/newfile2" 2618 2619 err := ioutil.WriteFile(from, []byte(""), 0600) 2620 t.Assert(err, IsNil) 2621 2622 fh, err := os.OpenFile(from, os.O_WRONLY, 0600) 2623 t.Assert(err, IsNil) 2624 defer func() { 2625 // close the file if the test failed so we can unmount 2626 if fh != nil { 2627 fh.Close() 2628 } 2629 }() 2630 2631 _, err = fh.WriteString("hello world") 2632 t.Assert(err, IsNil) 2633 2634 err = os.Rename(from, to) 2635 t.Assert(err, IsNil) 2636 2637 err = fh.Close() 2638 t.Assert(err, IsNil) 2639 fh = nil 2640 2641 _, err = os.Stat(from) 2642 t.Assert(err, NotNil) 2643 pathErr, ok := err.(*os.PathError) 2644 t.Assert(ok, Equals, true) 2645 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2646 2647 content, err := ioutil.ReadFile(to) 2648 t.Assert(err, IsNil) 2649 t.Assert(string(content), Equals, "hello world") 2650 } 2651 2652 func (s *GoofysTest) TestInodeInsert(t *C) { 2653 root := s.getRoot(t) 2654 2655 in := NewInode(s.fs, root, aws.String("2")) 2656 in.Attributes = InodeAttributes{} 2657 root.insertChild(in) 2658 t.Assert(*root.dir.Children[2].Name, Equals, "2") 2659 2660 in = NewInode(s.fs, root, aws.String("1")) 2661 in.Attributes = InodeAttributes{} 2662 root.insertChild(in) 2663 t.Assert(*root.dir.Children[2].Name, Equals, "1") 2664 t.Assert(*root.dir.Children[3].Name, Equals, "2") 2665 2666 in = NewInode(s.fs, root, aws.String("4")) 2667 in.Attributes = InodeAttributes{} 2668 root.insertChild(in) 2669 t.Assert(*root.dir.Children[2].Name, Equals, "1") 2670 t.Assert(*root.dir.Children[3].Name, Equals, "2") 2671 t.Assert(*root.dir.Children[4].Name, Equals, "4") 2672 2673 inode := root.findChild("1") 2674 t.Assert(inode, NotNil) 2675 t.Assert(*inode.Name, Equals, "1") 2676 2677 inode = root.findChild("2") 2678 t.Assert(inode, NotNil) 2679 t.Assert(*inode.Name, Equals, "2") 2680 2681 inode = root.findChild("4") 2682 t.Assert(inode, NotNil) 2683 t.Assert(*inode.Name, Equals, "4") 2684 2685 inode = root.findChild("0") 2686 t.Assert(inode, IsNil) 2687 2688 inode = root.findChild("3") 2689 t.Assert(inode, IsNil) 2690 2691 root.removeChild(root.dir.Children[3]) 2692 root.removeChild(root.dir.Children[2]) 2693 root.removeChild(root.dir.Children[2]) 2694 t.Assert(len(root.dir.Children), Equals, 2) 2695 } 2696 2697 func (s *GoofysTest) TestReadDirSlurpHeuristic(t *C) { 2698 if _, ok := s.cloud.Delegate().(*S3Backend); !ok { 2699 t.Skip("only for S3") 2700 } 2701 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2702 2703 s.setupBlobs(s.cloud, t, map[string]*string{"dir2isafile": nil}) 2704 2705 root := s.getRoot(t).dir 2706 t.Assert(root.seqOpenDirScore, Equals, uint8(0)) 2707 s.assertEntries(t, s.getRoot(t), []string{ 2708 "dir1", "dir2", "dir2isafile", "dir4", "empty_dir", 2709 "empty_dir2", "file1", "file2", "zero"}) 2710 2711 dir1, err := s.LookUpInode(t, "dir1") 2712 t.Assert(err, IsNil) 2713 dh1 := dir1.OpenDir() 2714 defer dh1.CloseDir() 2715 score := root.seqOpenDirScore 2716 2717 dir2, err := s.LookUpInode(t, "dir2") 2718 t.Assert(err, IsNil) 2719 dh2 := dir2.OpenDir() 2720 defer dh2.CloseDir() 2721 t.Assert(root.seqOpenDirScore, Equals, score+1) 2722 2723 dir3, err := s.LookUpInode(t, "dir4") 2724 t.Assert(err, IsNil) 2725 dh3 := dir3.OpenDir() 2726 defer dh3.CloseDir() 2727 t.Assert(root.seqOpenDirScore, Equals, score+2) 2728 } 2729 2730 func (s *GoofysTest) TestReadDirSlurpSubtree(t *C) { 2731 if _, ok := s.cloud.Delegate().(*S3Backend); !ok { 2732 t.Skip("only for S3") 2733 } 2734 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2735 s.fs.flags.StatCacheTTL = 1 * time.Minute 2736 2737 s.getRoot(t).dir.seqOpenDirScore = 2 2738 in, err := s.LookUpInode(t, "dir2") 2739 t.Assert(err, IsNil) 2740 t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(2)) 2741 2742 s.readDirIntoCache(t, in.Id) 2743 // should have incremented the score 2744 t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(3)) 2745 2746 // reading dir2 should cause dir2/dir3 to have cached readdir 2747 s.disableS3() 2748 2749 in, err = s.LookUpInode(t, "dir2/dir3") 2750 t.Assert(err, IsNil) 2751 2752 s.assertEntries(t, in, []string{"file4"}) 2753 } 2754 2755 func (s *GoofysTest) TestReadDirCached(t *C) { 2756 s.fs.flags.StatCacheTTL = 1 * time.Minute 2757 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2758 2759 s.getRoot(t).dir.seqOpenDirScore = 2 2760 s.readDirIntoCache(t, fuseops.RootInodeID) 2761 s.disableS3() 2762 2763 dh := s.getRoot(t).OpenDir() 2764 2765 entries := s.readDirFully(t, dh) 2766 dirs := make([]string, 0) 2767 files := make([]string, 0) 2768 noMoreDir := false 2769 2770 for _, en := range entries { 2771 if en.Type == fuseutil.DT_Directory { 2772 t.Assert(noMoreDir, Equals, false) 2773 dirs = append(dirs, en.Name) 2774 } else { 2775 files = append(files, en.Name) 2776 noMoreDir = true 2777 } 2778 } 2779 2780 t.Assert(dirs, DeepEquals, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2"}) 2781 t.Assert(files, DeepEquals, []string{"file1", "file2", "zero"}) 2782 } 2783 2784 func (s *GoofysTest) TestReadDirLookUp(t *C) { 2785 s.getRoot(t).dir.seqOpenDirScore = 2 2786 2787 var wg sync.WaitGroup 2788 for i := 0; i < 10; i++ { 2789 wg.Add(2) 2790 go func() { 2791 defer wg.Done() 2792 s.readDirIntoCache(t, fuseops.RootInodeID) 2793 }() 2794 go func() { 2795 defer wg.Done() 2796 2797 lookup := fuseops.LookUpInodeOp{ 2798 Parent: fuseops.RootInodeID, 2799 Name: "file1", 2800 } 2801 err := s.fs.LookUpInode(nil, &lookup) 2802 t.Assert(err, IsNil) 2803 }() 2804 } 2805 wg.Wait() 2806 } 2807 2808 func (s *GoofysTest) writeSeekWriteFuse(t *C, file string, fh *os.File, first string, second string, third string) { 2809 fi, err := os.Stat(file) 2810 t.Assert(err, IsNil) 2811 2812 defer func() { 2813 // close the file if the test failed so we can unmount 2814 if fh != nil { 2815 fh.Close() 2816 } 2817 }() 2818 2819 _, err = fh.WriteString(first) 2820 t.Assert(err, IsNil) 2821 2822 off, err := fh.Seek(int64(len(second)), 1) 2823 t.Assert(err, IsNil) 2824 t.Assert(off, Equals, int64(len(first)+len(second))) 2825 2826 _, err = fh.WriteString(third) 2827 t.Assert(err, IsNil) 2828 2829 off, err = fh.Seek(int64(len(first)), 0) 2830 t.Assert(err, IsNil) 2831 t.Assert(off, Equals, int64(len(first))) 2832 2833 _, err = fh.WriteString(second) 2834 t.Assert(err, IsNil) 2835 2836 err = fh.Close() 2837 t.Assert(err, IsNil) 2838 fh = nil 2839 2840 content, err := ioutil.ReadFile(file) 2841 t.Assert(err, IsNil) 2842 t.Assert(string(content), Equals, first+second+third) 2843 2844 fi2, err := os.Stat(file) 2845 t.Assert(err, IsNil) 2846 t.Assert(fi.Mode(), Equals, fi2.Mode()) 2847 } 2848 2849 func (s *GoofysTest) TestWriteSeekWriteFuse(t *C) { 2850 if !isCatfs() { 2851 t.Skip("only works with CATFS=true") 2852 } 2853 2854 mountPoint := "/tmp/mnt" + s.fs.bucket 2855 s.mount(t, mountPoint) 2856 defer s.umount(t, mountPoint) 2857 2858 file := mountPoint + "/newfile" 2859 2860 fh, err := os.Create(file) 2861 t.Assert(err, IsNil) 2862 2863 s.writeSeekWriteFuse(t, file, fh, "hello", " ", "world") 2864 2865 fh, err = os.OpenFile(file, os.O_WRONLY, 0600) 2866 t.Assert(err, IsNil) 2867 2868 s.writeSeekWriteFuse(t, file, fh, "", "never", "minding") 2869 } 2870 2871 func (s *GoofysTest) TestDirMtimeCreate(t *C) { 2872 root := s.getRoot(t) 2873 2874 attr, _ := root.GetAttributes() 2875 m1 := attr.Mtime 2876 time.Sleep(time.Second) 2877 2878 _, _ = root.Create("foo", fuseops.OpMetadata{uint32(os.Getpid())}) 2879 attr2, _ := root.GetAttributes() 2880 m2 := attr2.Mtime 2881 2882 t.Assert(m1.Before(m2), Equals, true) 2883 } 2884 2885 func (s *GoofysTest) TestDirMtimeLs(t *C) { 2886 root := s.getRoot(t) 2887 2888 attr, _ := root.GetAttributes() 2889 m1 := attr.Mtime 2890 time.Sleep(3 * time.Second) 2891 2892 params := &PutBlobInput{ 2893 Key: "newfile", 2894 Body: bytes.NewReader([]byte("foo")), 2895 Size: PUInt64(3), 2896 } 2897 _, err := s.cloud.PutBlob(params) 2898 t.Assert(err, IsNil) 2899 2900 s.readDirIntoCache(t, fuseops.RootInodeID) 2901 2902 attr2, _ := root.GetAttributes() 2903 m2 := attr2.Mtime 2904 2905 t.Assert(m1.Before(m2), Equals, true) 2906 } 2907 2908 func (s *GoofysTest) TestRenameOverwrite(t *C) { 2909 mountPoint := "/tmp/mnt" + s.fs.bucket 2910 s.mount(t, mountPoint) 2911 defer s.umount(t, mountPoint) 2912 2913 file := mountPoint + "/newfile" 2914 rename := mountPoint + "/file1" 2915 2916 fh, err := os.Create(file) 2917 t.Assert(err, IsNil) 2918 2919 err = fh.Close() 2920 t.Assert(err, IsNil) 2921 2922 err = os.Rename(file, rename) 2923 t.Assert(err, IsNil) 2924 } 2925 2926 func (s *GoofysTest) TestRead403(t *C) { 2927 // anonymous only works in S3 for now 2928 cloud := s.getRoot(t).dir.cloud 2929 s3, ok := cloud.Delegate().(*S3Backend) 2930 if !ok { 2931 t.Skip("only for S3") 2932 } 2933 2934 s.fs.flags.StatCacheTTL = 1 * time.Minute 2935 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2936 2937 // cache the inode first so we don't get 403 when we lookup 2938 in, err := s.LookUpInode(t, "file1") 2939 t.Assert(err, IsNil) 2940 2941 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 2942 t.Assert(err, IsNil) 2943 2944 s3.awsConfig.Credentials = credentials.AnonymousCredentials 2945 s3.newS3() 2946 2947 // fake enable read-ahead 2948 fh.seqReadAmount = uint64(READAHEAD_CHUNK) 2949 2950 buf := make([]byte, 5) 2951 2952 _, err = fh.ReadFile(0, buf) 2953 t.Assert(err, Equals, syscall.EACCES) 2954 2955 // now that the S3 GET has failed, try again, see 2956 // https://github.com/djmaze/goofys/pull/243 2957 _, err = fh.ReadFile(0, buf) 2958 t.Assert(err, Equals, syscall.EACCES) 2959 } 2960 2961 func (s *GoofysTest) TestRmdirWithDiropen(t *C) { 2962 mountPoint := "/tmp/mnt" + s.fs.bucket 2963 s.fs.flags.StatCacheTTL = 1 * time.Minute 2964 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2965 2966 s.mount(t, mountPoint) 2967 defer s.umount(t, mountPoint) 2968 2969 err := os.MkdirAll(mountPoint+"/dir2/dir4", 0700) 2970 t.Assert(err, IsNil) 2971 err = os.MkdirAll(mountPoint+"/dir2/dir5", 0700) 2972 t.Assert(err, IsNil) 2973 2974 //1, open dir5 2975 dir := mountPoint + "/dir2/dir5" 2976 fh, err := os.Open(dir) 2977 t.Assert(err, IsNil) 2978 defer fh.Close() 2979 2980 cmd1 := exec.Command("ls", mountPoint+"/dir2") 2981 //out, err := cmd.Output() 2982 out1, err1 := cmd1.Output() 2983 if err1 != nil { 2984 if ee, ok := err.(*exec.ExitError); ok { 2985 panic(ee.Stderr) 2986 } 2987 } 2988 t.Assert(string(out1), DeepEquals, ""+"dir3\n"+"dir4\n"+"dir5\n") 2989 2990 //2, rm -rf dir5 2991 cmd := exec.Command("rm", "-rf", dir) 2992 _, err = cmd.Output() 2993 if err != nil { 2994 if ee, ok := err.(*exec.ExitError); ok { 2995 panic(ee.Stderr) 2996 } 2997 } 2998 2999 //3, readdir dir2 3000 fh1, err := os.Open(mountPoint + "/dir2") 3001 t.Assert(err, IsNil) 3002 defer func() { 3003 // close the file if the test failed so we can unmount 3004 if fh1 != nil { 3005 fh1.Close() 3006 } 3007 }() 3008 3009 names, err := fh1.Readdirnames(0) 3010 t.Assert(err, IsNil) 3011 t.Assert(names, DeepEquals, []string{"dir3", "dir4"}) 3012 3013 cmd = exec.Command("ls", mountPoint+"/dir2") 3014 out, err := cmd.Output() 3015 if err != nil { 3016 if ee, ok := err.(*exec.ExitError); ok { 3017 panic(ee.Stderr) 3018 } 3019 } 3020 3021 t.Assert(string(out), DeepEquals, ""+"dir3\n"+"dir4\n") 3022 3023 err = fh1.Close() 3024 t.Assert(err, IsNil) 3025 3026 // 4,reset env 3027 err = fh.Close() 3028 t.Assert(err, IsNil) 3029 3030 err = os.RemoveAll(mountPoint + "/dir2/dir4") 3031 t.Assert(err, IsNil) 3032 3033 } 3034 3035 func (s *GoofysTest) TestDirMTime(t *C) { 3036 s.fs.flags.StatCacheTTL = 1 * time.Minute 3037 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3038 // enable cheap to ensure GET dir/ will come back before LIST dir/ 3039 s.fs.flags.Cheap = true 3040 3041 root := s.getRoot(t) 3042 t.Assert(time.Time{}.Before(root.Attributes.Mtime), Equals, true) 3043 3044 file1, err := s.LookUpInode(t, "dir1") 3045 t.Assert(err, IsNil) 3046 3047 // take mtime from a blob as init time because when we test against 3048 // real cloud, server time can be way off from local time 3049 initTime := file1.Attributes.Mtime 3050 3051 dir1, err := s.LookUpInode(t, "dir1") 3052 t.Assert(err, IsNil) 3053 3054 attr1, _ := dir1.GetAttributes() 3055 m1 := attr1.Mtime 3056 if !s.cloud.Capabilities().DirBlob { 3057 // dir1 doesn't have a dir blob, so should take root's mtime 3058 t.Assert(m1, Equals, root.Attributes.Mtime) 3059 } 3060 3061 time.Sleep(2 * time.Second) 3062 3063 dir2, err := dir1.MkDir("dir2") 3064 t.Assert(err, IsNil) 3065 3066 attr2, _ := dir2.GetAttributes() 3067 m2 := attr2.Mtime 3068 t.Assert(m1.Add(2*time.Second).Before(m2), Equals, true) 3069 3070 // dir1 didn't have an explicit mtime, so it should update now 3071 // that we did a mkdir inside it 3072 attr1, _ = dir1.GetAttributes() 3073 m1 = attr1.Mtime 3074 t.Assert(m1, Equals, m2) 3075 3076 // we never added the inode so this will do the lookup again 3077 dir2, err = dir1.LookUp("dir2") 3078 t.Assert(err, IsNil) 3079 3080 // the new time comes from S3 which only has seconds 3081 // granularity 3082 attr2, _ = dir2.GetAttributes() 3083 t.Assert(m2, Not(Equals), attr2.Mtime) 3084 t.Assert(initTime.Add(time.Second).Before(attr2.Mtime), Equals, true) 3085 3086 // different dir2 3087 dir2, err = s.LookUpInode(t, "dir2") 3088 t.Assert(err, IsNil) 3089 3090 attr2, _ = dir2.GetAttributes() 3091 m2 = attr2.Mtime 3092 3093 // this fails because we are listing dir/, which means we 3094 // don't actually see the dir blob dir2/dir3/ (it's returned 3095 // as common prefix), so we can't get dir3's mtime 3096 if false { 3097 // dir2/dir3/ exists and has mtime 3098 s.readDirIntoCache(t, dir2.Id) 3099 dir3, err := s.LookUpInode(t, "dir2/dir3") 3100 t.Assert(err, IsNil) 3101 3102 attr3, _ := dir3.GetAttributes() 3103 // setupDefaultEnv is before mounting 3104 t.Assert(attr3.Mtime.Before(m2), Equals, true) 3105 } 3106 3107 time.Sleep(time.Second) 3108 3109 params := &PutBlobInput{ 3110 Key: "dir2/newfile", 3111 Body: bytes.NewReader([]byte("foo")), 3112 Size: PUInt64(3), 3113 } 3114 _, err = s.cloud.PutBlob(params) 3115 t.Assert(err, IsNil) 3116 3117 s.readDirIntoCache(t, dir2.Id) 3118 3119 newfile, err := dir2.LookUp("newfile") 3120 t.Assert(err, IsNil) 3121 3122 attr2New, _ := dir2.GetAttributes() 3123 // mtime should reflect that of the latest object 3124 // GCS can return nano second resolution so truncate to second for compare 3125 t.Assert(attr2New.Mtime.Unix(), Equals, newfile.Attributes.Mtime.Unix()) 3126 t.Assert(m2.Before(attr2New.Mtime), Equals, true) 3127 } 3128 3129 func (s *GoofysTest) TestDirMTimeNoTTL(t *C) { 3130 if s.cloud.Capabilities().DirBlob { 3131 t.Skip("Tests for behavior without dir blob") 3132 } 3133 // enable cheap to ensure GET dir/ will come back before LIST dir/ 3134 s.fs.flags.Cheap = true 3135 3136 dir2, err := s.LookUpInode(t, "dir2") 3137 t.Assert(err, IsNil) 3138 3139 attr2, _ := dir2.GetAttributes() 3140 m2 := attr2.Mtime 3141 3142 // dir2/dir3/ exists and has mtime 3143 s.readDirIntoCache(t, dir2.Id) 3144 dir3, err := s.LookUpInode(t, "dir2/dir3") 3145 t.Assert(err, IsNil) 3146 3147 attr3, _ := dir3.GetAttributes() 3148 // setupDefaultEnv is before mounting but we can't really 3149 // compare the time here since dir3 is s3 server time and dir2 3150 // is local time 3151 t.Assert(attr3.Mtime, Not(Equals), m2) 3152 } 3153 3154 func (s *GoofysTest) TestIssue326(t *C) { 3155 root := s.getRoot(t) 3156 _, err := root.MkDir("folder@name.something") 3157 t.Assert(err, IsNil) 3158 _, err = root.MkDir("folder#1#") 3159 t.Assert(err, IsNil) 3160 3161 s.readDirIntoCache(t, root.Id) 3162 s.assertEntries(t, root, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", 3163 "file1", "file2", "folder#1#", "folder@name.something", "zero"}) 3164 } 3165 3166 func (s *GoofysTest) TestSlurpFileAndDir(t *C) { 3167 if _, ok := s.cloud.Delegate().(*S3Backend); !ok { 3168 t.Skip("only for S3") 3169 } 3170 prefix := "TestSlurpFileAndDir/" 3171 // fileAndDir is both a file and a directory, and we are 3172 // slurping them together as part of our listing optimization 3173 blobs := []string{ 3174 prefix + "fileAndDir", 3175 prefix + "fileAndDir/a", 3176 } 3177 3178 for _, b := range blobs { 3179 params := &PutBlobInput{ 3180 Key: b, 3181 Body: bytes.NewReader([]byte("foo")), 3182 Size: PUInt64(3), 3183 } 3184 _, err := s.cloud.PutBlob(params) 3185 t.Assert(err, IsNil) 3186 } 3187 3188 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3189 s.fs.flags.StatCacheTTL = 1 * time.Minute 3190 3191 in, err := s.LookUpInode(t, prefix[0:len(prefix)-1]) 3192 t.Assert(err, IsNil) 3193 t.Assert(in.dir, NotNil) 3194 3195 s.getRoot(t).dir.seqOpenDirScore = 2 3196 s.readDirIntoCache(t, in.Id) 3197 3198 // should have slurped these 3199 in = in.findChild("fileAndDir") 3200 t.Assert(in, NotNil) 3201 t.Assert(in.dir, NotNil) 3202 3203 in = in.findChild("a") 3204 t.Assert(in, NotNil) 3205 3206 // because of slurping we've decided that this is a directory, 3207 // lookup must _not_ talk to S3 again because otherwise we may 3208 // decide it's a file again because of S3 race 3209 s.disableS3() 3210 in, err = s.LookUpInode(t, prefix+"fileAndDir") 3211 t.Assert(err, IsNil) 3212 3213 s.assertEntries(t, in, []string{"a"}) 3214 } 3215 3216 func (s *GoofysTest) TestAzureDirBlob(t *C) { 3217 if _, ok := s.cloud.(*AZBlob); !ok { 3218 t.Skip("only for Azure blob") 3219 } 3220 3221 fakedir := []string{"dir2", "dir3"} 3222 3223 for _, d := range fakedir { 3224 params := &PutBlobInput{ 3225 Key: "azuredir/" + d, 3226 Body: bytes.NewReader([]byte("")), 3227 Metadata: map[string]*string{ 3228 AzureDirBlobMetadataKey: PString("true"), 3229 }, 3230 Size: PUInt64(0), 3231 } 3232 _, err := s.cloud.PutBlob(params) 3233 t.Assert(err, IsNil) 3234 } 3235 3236 defer func() { 3237 // because our listing changes dir3 to dir3/, test 3238 // cleanup could not delete the blob so we wneed to 3239 // clean up 3240 for _, d := range fakedir { 3241 _, err := s.cloud.DeleteBlob(&DeleteBlobInput{Key: "azuredir/" + d}) 3242 t.Assert(err, IsNil) 3243 } 3244 }() 3245 3246 s.setupBlobs(s.cloud, t, map[string]*string{ 3247 // "azuredir/dir" would have gone here 3248 "azuredir/dir3,/": nil, 3249 "azuredir/dir3/file1": nil, 3250 "azuredir/dir345_is_a_file": nil, 3251 }) 3252 3253 head, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir3"}) 3254 t.Assert(err, IsNil) 3255 t.Assert(head.IsDirBlob, Equals, true) 3256 3257 head, err = s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir345_is_a_file"}) 3258 t.Assert(err, IsNil) 3259 t.Assert(head.IsDirBlob, Equals, false) 3260 3261 list, err := s.cloud.ListBlobs(&ListBlobsInput{Prefix: PString("azuredir/")}) 3262 t.Assert(err, IsNil) 3263 3264 // for flat listing, we rename `dir3` to `dir3/` and add it to Items, 3265 // `dir3` normally sorts before `dir3./`, but after the rename `dir3/` should 3266 // sort after `dir3./` 3267 t.Assert(len(list.Items), Equals, 5) 3268 t.Assert(*list.Items[0].Key, Equals, "azuredir/dir2/") 3269 t.Assert(*list.Items[1].Key, Equals, "azuredir/dir3,/") 3270 t.Assert(*list.Items[2].Key, Equals, "azuredir/dir3/") 3271 t.Assert(*list.Items[3].Key, Equals, "azuredir/dir3/file1") 3272 t.Assert(*list.Items[4].Key, Equals, "azuredir/dir345_is_a_file") 3273 t.Assert(sort.IsSorted(sortBlobItemOutput(list.Items)), Equals, true) 3274 3275 list, err = s.cloud.ListBlobs(&ListBlobsInput{ 3276 Prefix: PString("azuredir/"), 3277 Delimiter: PString("/"), 3278 }) 3279 t.Assert(err, IsNil) 3280 3281 // for delimited listing, we remove `dir3` from items and add `dir3/` to prefixes, 3282 // which should already be there 3283 t.Assert(len(list.Items), Equals, 1) 3284 t.Assert(*list.Items[0].Key, Equals, "azuredir/dir345_is_a_file") 3285 3286 t.Assert(len(list.Prefixes), Equals, 3) 3287 t.Assert(*list.Prefixes[0].Prefix, Equals, "azuredir/dir2/") 3288 t.Assert(*list.Prefixes[1].Prefix, Equals, "azuredir/dir3,/") 3289 t.Assert(*list.Prefixes[2].Prefix, Equals, "azuredir/dir3/") 3290 3291 // finally check that we are reading them in correctly 3292 in, err := s.LookUpInode(t, "azuredir") 3293 t.Assert(err, IsNil) 3294 3295 s.assertEntries(t, in, []string{"dir2", "dir3", "dir3,", "dir345_is_a_file"}) 3296 } 3297 3298 func (s *GoofysTest) TestReadDirLarge(t *C) { 3299 root := s.getRoot(t) 3300 root.dir.mountPrefix = "empty_dir" 3301 3302 blobs := make(map[string]*string) 3303 expect := make([]string, 0) 3304 for i := 0; i < 998; i++ { 3305 blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil 3306 expect = append(expect, fmt.Sprintf("%04vd", i)) 3307 } 3308 blobs["empty_dir/0998f"] = nil 3309 blobs["empty_dir/0999f"] = nil 3310 blobs["empty_dir/1000f"] = nil 3311 expect = append(expect, "0998f") 3312 expect = append(expect, "0999f") 3313 expect = append(expect, "1000f") 3314 3315 for i := 1001; i < 1003; i++ { 3316 blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil 3317 expect = append(expect, fmt.Sprintf("%04vd", i)) 3318 } 3319 3320 s.setupBlobs(s.cloud, t, blobs) 3321 3322 dh := root.OpenDir() 3323 defer dh.CloseDir() 3324 3325 children := namesOf(s.readDirFully(t, dh)) 3326 sort.Strings(children) 3327 3328 t.Assert(children, DeepEquals, expect) 3329 } 3330 3331 func (s *GoofysTest) newBackend(t *C, bucket string, createBucket bool) (cloud StorageBackend) { 3332 var err error 3333 switch s.cloud.Delegate().(type) { 3334 case *S3Backend: 3335 config, _ := s.fs.flags.Backend.(*S3Config) 3336 s3, err := NewS3(bucket, s.fs.flags, config) 3337 t.Assert(err, IsNil) 3338 3339 s3.aws = hasEnv("AWS") 3340 3341 if s.emulator { 3342 s3.Handlers.Sign.Clear() 3343 s3.Handlers.Sign.PushBack(SignV2) 3344 s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) 3345 } 3346 3347 if s3.aws { 3348 cloud = NewS3BucketEventualConsistency(s3) 3349 } else { 3350 cloud = s3 3351 } 3352 case *GCS3: 3353 config, _ := s.fs.flags.Backend.(*S3Config) 3354 cloud, err = NewGCS3(bucket, s.fs.flags, config) 3355 t.Assert(err, IsNil) 3356 case *AZBlob: 3357 config, _ := s.fs.flags.Backend.(*AZBlobConfig) 3358 cloud, err = NewAZBlob(bucket, config) 3359 t.Assert(err, IsNil) 3360 case *ADLv1: 3361 config, _ := s.fs.flags.Backend.(*ADLv1Config) 3362 cloud, err = NewADLv1(bucket, s.fs.flags, config) 3363 t.Assert(err, IsNil) 3364 case *ADLv2: 3365 config, _ := s.fs.flags.Backend.(*ADLv2Config) 3366 cloud, err = NewADLv2(bucket, s.fs.flags, config) 3367 t.Assert(err, IsNil) 3368 case *GCSBackend: 3369 config, _ := s.fs.flags.Backend.(*GCSConfig) 3370 cloud, err = NewGCS(bucket, config) 3371 t.Assert(err, IsNil) 3372 default: 3373 t.Fatal("unknown backend") 3374 } 3375 3376 if createBucket { 3377 _, err = cloud.MakeBucket(&MakeBucketInput{}) 3378 t.Assert(err, IsNil) 3379 3380 s.removeBucket = append(s.removeBucket, cloud) 3381 } 3382 3383 return 3384 } 3385 3386 func (s *GoofysTest) TestVFS(t *C) { 3387 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3388 cloud2 := s.newBackend(t, bucket, true) 3389 3390 // "mount" this 2nd cloud 3391 in, err := s.LookUpInode(t, "dir4") 3392 t.Assert(in, NotNil) 3393 t.Assert(err, IsNil) 3394 3395 in.dir.cloud = cloud2 3396 in.dir.mountPrefix = "cloud2Prefix/" 3397 3398 rootCloud, rootPath := in.cloud() 3399 t.Assert(rootCloud, NotNil) 3400 t.Assert(rootCloud == cloud2, Equals, true) 3401 t.Assert(rootPath, Equals, "cloud2Prefix") 3402 3403 // the mount would shadow dir4/file5 3404 _, err = in.LookUp("file5") 3405 t.Assert(err, Equals, fuse.ENOENT) 3406 3407 _, fh := in.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())}) 3408 err = fh.FlushFile() 3409 t.Assert(err, IsNil) 3410 3411 resp, err := cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile"}) 3412 t.Assert(err, IsNil) 3413 defer resp.Body.Close() 3414 3415 err = s.getRoot(t).Rename("file1", in, "file2") 3416 t.Assert(err, Equals, syscall.EINVAL) 3417 3418 _, err = in.MkDir("subdir") 3419 t.Assert(err, IsNil) 3420 3421 subdirKey := "cloud2Prefix/subdir" 3422 if !cloud2.Capabilities().DirBlob { 3423 subdirKey += "/" 3424 } 3425 3426 _, err = cloud2.HeadBlob(&HeadBlobInput{Key: subdirKey}) 3427 t.Assert(err, IsNil) 3428 3429 subdir, err := s.LookUpInode(t, "dir4/subdir") 3430 t.Assert(err, IsNil) 3431 t.Assert(subdir, NotNil) 3432 t.Assert(subdir.dir, NotNil) 3433 t.Assert(subdir.dir.cloud, IsNil) 3434 3435 subdirCloud, subdirPath := subdir.cloud() 3436 t.Assert(subdirCloud, NotNil) 3437 t.Assert(subdirCloud == cloud2, Equals, true) 3438 t.Assert(subdirPath, Equals, "cloud2Prefix/subdir") 3439 3440 // create another file inside subdir to make sure that our 3441 // mount check is correct for dir inside the root 3442 _, fh = subdir.Create("testfile2", fuseops.OpMetadata{uint32(os.Getpid())}) 3443 err = fh.FlushFile() 3444 t.Assert(err, IsNil) 3445 3446 resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"}) 3447 t.Assert(err, IsNil) 3448 defer resp.Body.Close() 3449 3450 err = subdir.Rename("testfile2", in, "testfile2") 3451 t.Assert(err, IsNil) 3452 3453 _, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"}) 3454 t.Assert(err, Equals, fuse.ENOENT) 3455 3456 resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"}) 3457 t.Assert(err, IsNil) 3458 defer resp.Body.Close() 3459 3460 err = in.Rename("testfile2", subdir, "testfile2") 3461 t.Assert(err, IsNil) 3462 3463 _, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"}) 3464 t.Assert(err, Equals, fuse.ENOENT) 3465 3466 resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"}) 3467 t.Assert(err, IsNil) 3468 defer resp.Body.Close() 3469 } 3470 3471 func (s *GoofysTest) TestMountsList(t *C) { 3472 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3473 s.fs.flags.StatCacheTTL = 1 * time.Minute 3474 3475 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3476 cloud := s.newBackend(t, bucket, true) 3477 3478 root := s.getRoot(t) 3479 rootCloud := root.dir.cloud 3480 3481 s.fs.MountAll([]*Mount{ 3482 &Mount{"dir4/cloud1", cloud, "", false}, 3483 }) 3484 3485 in, err := s.LookUpInode(t, "dir4") 3486 t.Assert(in, NotNil) 3487 t.Assert(err, IsNil) 3488 t.Assert(int(in.Id), Equals, 2) 3489 3490 s.readDirIntoCache(t, in.Id) 3491 // ensure that listing is listing mounts and root bucket in one go 3492 root.dir.cloud = nil 3493 3494 s.assertEntries(t, in, []string{"cloud1", "file5"}) 3495 3496 c1, err := s.LookUpInode(t, "dir4/cloud1") 3497 t.Assert(err, IsNil) 3498 t.Assert(*c1.Name, Equals, "cloud1") 3499 t.Assert(c1.dir.cloud == cloud, Equals, true) 3500 t.Assert(int(c1.Id), Equals, 3) 3501 3502 // pretend we've passed the normal cache ttl 3503 s.fs.flags.TypeCacheTTL = 0 3504 s.fs.flags.StatCacheTTL = 0 3505 3506 // listing root again should not overwrite the mounts 3507 root.dir.cloud = rootCloud 3508 3509 s.readDirIntoCache(t, in.Parent.Id) 3510 s.assertEntries(t, in, []string{"cloud1", "file5"}) 3511 3512 c1, err = s.LookUpInode(t, "dir4/cloud1") 3513 t.Assert(err, IsNil) 3514 t.Assert(*c1.Name, Equals, "cloud1") 3515 t.Assert(c1.dir.cloud == cloud, Equals, true) 3516 t.Assert(int(c1.Id), Equals, 3) 3517 } 3518 3519 func (s *GoofysTest) TestMountsNewDir(t *C) { 3520 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3521 cloud := s.newBackend(t, bucket, true) 3522 3523 _, err := s.LookUpInode(t, "dir5") 3524 t.Assert(err, NotNil) 3525 t.Assert(err, Equals, fuse.ENOENT) 3526 3527 s.fs.MountAll([]*Mount{ 3528 &Mount{"dir5/cloud1", cloud, "", false}, 3529 }) 3530 3531 in, err := s.LookUpInode(t, "dir5") 3532 t.Assert(err, IsNil) 3533 t.Assert(in.isDir(), Equals, true) 3534 3535 c1, err := s.LookUpInode(t, "dir5/cloud1") 3536 t.Assert(err, IsNil) 3537 t.Assert(c1.isDir(), Equals, true) 3538 t.Assert(c1.dir.cloud, Equals, cloud) 3539 } 3540 3541 func (s *GoofysTest) TestMountsNewMounts(t *C) { 3542 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3543 cloud := s.newBackend(t, bucket, true) 3544 3545 // "mount" this 2nd cloud 3546 in, err := s.LookUpInode(t, "dir4") 3547 t.Assert(in, NotNil) 3548 t.Assert(err, IsNil) 3549 3550 s.fs.MountAll([]*Mount{ 3551 &Mount{"dir4/cloud1", cloud, "", false}, 3552 }) 3553 3554 s.readDirIntoCache(t, in.Id) 3555 3556 c1, err := s.LookUpInode(t, "dir4/cloud1") 3557 t.Assert(err, IsNil) 3558 t.Assert(*c1.Name, Equals, "cloud1") 3559 t.Assert(c1.dir.cloud == cloud, Equals, true) 3560 3561 _, err = s.LookUpInode(t, "dir4/cloud2") 3562 t.Assert(err, Equals, fuse.ENOENT) 3563 3564 s.fs.MountAll([]*Mount{ 3565 &Mount{"dir4/cloud1", cloud, "", false}, 3566 &Mount{"dir4/cloud2", cloud, "cloudprefix", false}, 3567 }) 3568 3569 c2, err := s.LookUpInode(t, "dir4/cloud2") 3570 t.Assert(err, IsNil) 3571 t.Assert(*c2.Name, Equals, "cloud2") 3572 t.Assert(c2.dir.cloud == cloud, Equals, true) 3573 t.Assert(c2.dir.mountPrefix, Equals, "cloudprefix") 3574 } 3575 3576 func (s *GoofysTest) TestMountsError(t *C) { 3577 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3578 var cloud StorageBackend 3579 if s3, ok := s.cloud.Delegate().(*S3Backend); ok { 3580 // S3Backend can't detect bucket doesn't exist because 3581 // HEAD an object always return 404 NotFound (instead 3582 // of NoSuchBucket) 3583 flags := *s3.flags 3584 config := *s3.config 3585 flags.Endpoint = "0.0.0.0:0" 3586 var err error 3587 cloud, err = NewS3(bucket, &flags, &config) 3588 t.Assert(err, IsNil) 3589 } else if _, ok := s.cloud.(*ADLv1); ok { 3590 config, _ := s.fs.flags.Backend.(*ADLv1Config) 3591 config.Authorizer = nil 3592 3593 var err error 3594 cloud, err = NewADLv1(bucket, s.fs.flags, config) 3595 t.Assert(err, IsNil) 3596 } else if _, ok := s.cloud.(*ADLv2); ok { 3597 // ADLv2 currently doesn't detect bucket doesn't exist 3598 cloud = s.newBackend(t, bucket, false) 3599 adlCloud, _ := cloud.(*ADLv2) 3600 auth := adlCloud.client.BaseClient.Authorizer 3601 adlCloud.client.BaseClient.Authorizer = nil 3602 defer func() { 3603 adlCloud.client.BaseClient.Authorizer = auth 3604 }() 3605 } else if _, ok := s.cloud.(*GCSBackend); ok { 3606 // We'll trigger a failure on GCS mount by using an unauthenticated client to mount to a private bucket 3607 defaultCreds := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") 3608 os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") 3609 3610 defer func() { 3611 os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", defaultCreds) 3612 }() 3613 3614 var err error 3615 config := NewGCSConfig() 3616 cloud, err = NewGCS(s.fs.bucket, config) 3617 t.Assert(err, IsNil) 3618 } else { 3619 cloud = s.newBackend(t, bucket, false) 3620 } 3621 3622 s.fs.MountAll([]*Mount{ 3623 &Mount{"dir4/newerror", StorageBackendInitError{ 3624 fmt.Errorf("foo"), 3625 Capabilities{}, 3626 }, "errprefix1", false}, 3627 &Mount{"dir4/initerror", &StorageBackendInitWrapper{ 3628 StorageBackend: cloud, 3629 initKey: "foobar", 3630 }, "errprefix2", false}, 3631 }) 3632 3633 errfile, err := s.LookUpInode(t, "dir4/newerror/"+INIT_ERR_BLOB) 3634 t.Assert(err, IsNil) 3635 t.Assert(errfile.isDir(), Equals, false) 3636 3637 _, err = s.LookUpInode(t, "dir4/newerror/not_there") 3638 t.Assert(err, Equals, fuse.ENOENT) 3639 3640 errfile, err = s.LookUpInode(t, "dir4/initerror/"+INIT_ERR_BLOB) 3641 t.Assert(err, IsNil) 3642 t.Assert(errfile.isDir(), Equals, false) 3643 3644 _, err = s.LookUpInode(t, "dir4/initerror/not_there") 3645 t.Assert(err, Equals, fuse.ENOENT) 3646 3647 in, err := s.LookUpInode(t, "dir4/initerror") 3648 t.Assert(err, IsNil) 3649 t.Assert(in, NotNil) 3650 3651 t.Assert(in.dir.cloud.Capabilities().Name, Equals, cloud.Capabilities().Name) 3652 } 3653 3654 func (s *GoofysTest) TestMountsMultiLevel(t *C) { 3655 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3656 3657 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3658 cloud := s.newBackend(t, bucket, true) 3659 3660 s.fs.MountAll([]*Mount{ 3661 &Mount{"dir4/sub/dir", cloud, "", false}, 3662 }) 3663 3664 sub, err := s.LookUpInode(t, "dir4/sub") 3665 t.Assert(err, IsNil) 3666 t.Assert(sub.isDir(), Equals, true) 3667 3668 s.assertEntries(t, sub, []string{"dir"}) 3669 } 3670 3671 func (s *GoofysTest) TestMountsNested(t *C) { 3672 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3673 cloud := s.newBackend(t, bucket, true) 3674 s.testMountsNested(t, cloud, []*Mount{ 3675 &Mount{"dir5/in/a/dir", cloud, "a/dir/", false}, 3676 &Mount{"dir5/in/", cloud, "b/", false}, 3677 }) 3678 } 3679 3680 // test that mount order doesn't matter for nested mounts 3681 func (s *GoofysTest) TestMountsNestedReversed(t *C) { 3682 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3683 cloud := s.newBackend(t, bucket, true) 3684 s.testMountsNested(t, cloud, []*Mount{ 3685 &Mount{"dir5/in/", cloud, "b/", false}, 3686 &Mount{"dir5/in/a/dir", cloud, "a/dir/", false}, 3687 }) 3688 } 3689 3690 func (s *GoofysTest) testMountsNested(t *C, cloud StorageBackend, 3691 mounts []*Mount) { 3692 3693 _, err := s.LookUpInode(t, "dir5") 3694 t.Assert(err, NotNil) 3695 t.Assert(err, Equals, fuse.ENOENT) 3696 3697 s.fs.MountAll(mounts) 3698 3699 in, err := s.LookUpInode(t, "dir5") 3700 t.Assert(err, IsNil) 3701 3702 s.readDirIntoCache(t, in.Id) 3703 3704 // make sure all the intermediate dirs never expire 3705 time.Sleep(time.Second) 3706 dir_in, err := s.LookUpInode(t, "dir5/in") 3707 t.Assert(err, IsNil) 3708 t.Assert(*dir_in.Name, Equals, "in") 3709 3710 s.readDirIntoCache(t, dir_in.Id) 3711 3712 dir_a, err := s.LookUpInode(t, "dir5/in/a") 3713 t.Assert(err, IsNil) 3714 t.Assert(*dir_a.Name, Equals, "a") 3715 3716 s.assertEntries(t, dir_a, []string{"dir"}) 3717 3718 dir_dir, err := s.LookUpInode(t, "dir5/in/a/dir") 3719 t.Assert(err, IsNil) 3720 t.Assert(*dir_dir.Name, Equals, "dir") 3721 t.Assert(dir_dir.dir.cloud == cloud, Equals, true) 3722 3723 _, fh := dir_in.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())}) 3724 err = fh.FlushFile() 3725 t.Assert(err, IsNil) 3726 3727 resp, err := cloud.GetBlob(&GetBlobInput{Key: "b/testfile"}) 3728 t.Assert(err, IsNil) 3729 defer resp.Body.Close() 3730 3731 _, fh = dir_dir.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())}) 3732 err = fh.FlushFile() 3733 t.Assert(err, IsNil) 3734 3735 resp, err = cloud.GetBlob(&GetBlobInput{Key: "a/dir/testfile"}) 3736 t.Assert(err, IsNil) 3737 defer resp.Body.Close() 3738 3739 s.assertEntries(t, in, []string{"in"}) 3740 } 3741 3742 func verifyFileData(t *C, mountPoint string, path string, content *string) { 3743 if !strings.HasSuffix(mountPoint, "/") { 3744 mountPoint = mountPoint + "/" 3745 } 3746 path = mountPoint + path 3747 data, err := ioutil.ReadFile(path) 3748 comment := Commentf("failed while verifying %v", path) 3749 if content != nil { 3750 t.Assert(err, IsNil, comment) 3751 t.Assert(strings.TrimSpace(string(data)), Equals, *content, comment) 3752 } else { 3753 t.Assert(err, Not(IsNil), comment) 3754 t.Assert(strings.Contains(err.Error(), "no such file or directory"), Equals, true, comment) 3755 } 3756 } 3757 3758 func (s *GoofysTest) TestNestedMountUnmountSimple(t *C) { 3759 childBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3760 childCloud := s.newBackend(t, childBucket, true) 3761 3762 parFileContent := "parent" 3763 childFileContent := "child" 3764 parEnv := map[string]*string{ 3765 "childmnt/x/in_child_and_par": &parFileContent, 3766 "childmnt/x/in_par_only": &parFileContent, 3767 "nonchildmnt/something": &parFileContent, 3768 } 3769 childEnv := map[string]*string{ 3770 "x/in_child_only": &childFileContent, 3771 "x/in_child_and_par": &childFileContent, 3772 } 3773 s.setupBlobs(s.cloud, t, parEnv) 3774 s.setupBlobs(childCloud, t, childEnv) 3775 3776 rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16) 3777 s.mount(t, rootMountPath) 3778 defer s.umount(t, rootMountPath) 3779 // Files under /tmp/fusetesting/ should all be from goofys root. 3780 verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent) 3781 verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent) 3782 verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent) 3783 verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil) 3784 3785 childMount := &Mount{"childmnt", childCloud, "", false} 3786 s.fs.Mount(childMount) 3787 // Now files under /tmp/fusetesting/childmnt should be from childBucket 3788 verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", nil) 3789 verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &childFileContent) 3790 verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", &childFileContent) 3791 // /tmp/fusetesting/nonchildmnt should be from parent bucket. 3792 verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent) 3793 3794 s.fs.Unmount(childMount.name) 3795 // Child is unmounted. So files under /tmp/fusetesting/ should all be from goofys root. 3796 verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent) 3797 verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent) 3798 verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent) 3799 verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil) 3800 } 3801 3802 func (s *GoofysTest) TestUnmountBucketWithChild(t *C) { 3803 // This bucket will be mounted at ${goofysroot}/c 3804 cBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3805 cCloud := s.newBackend(t, cBucket, true) 3806 3807 // This bucket will be mounted at ${goofysroot}/c/c 3808 ccBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3809 ccCloud := s.newBackend(t, ccBucket, true) 3810 3811 pFileContent := "parent" 3812 cFileContent := "child" 3813 ccFileContent := "childchild" 3814 pEnv := map[string]*string{ 3815 "c/c/x/foo": &pFileContent, 3816 } 3817 cEnv := map[string]*string{ 3818 "c/x/foo": &cFileContent, 3819 } 3820 ccEnv := map[string]*string{ 3821 "x/foo": &ccFileContent, 3822 } 3823 3824 s.setupBlobs(s.cloud, t, pEnv) 3825 s.setupBlobs(cCloud, t, cEnv) 3826 s.setupBlobs(ccCloud, t, ccEnv) 3827 3828 rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16) 3829 s.mount(t, rootMountPath) 3830 defer s.umount(t, rootMountPath) 3831 // c/c/foo should come from root mount. 3832 verifyFileData(t, rootMountPath, "c/c/x/foo", &pFileContent) 3833 3834 cMount := &Mount{"c", cCloud, "", false} 3835 s.fs.Mount(cMount) 3836 // c/c/foo should come from "c" mount. 3837 verifyFileData(t, rootMountPath, "c/c/x/foo", &cFileContent) 3838 3839 ccMount := &Mount{"c/c", ccCloud, "", false} 3840 s.fs.Mount(ccMount) 3841 // c/c/foo should come from "c/c" mount. 3842 verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent) 3843 3844 s.fs.Unmount(cMount.name) 3845 // c/c/foo should still come from "c/c" mount. 3846 verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent) 3847 } 3848 3849 func (s *GoofysTest) TestRmImplicitDir(t *C) { 3850 mountPoint := "/tmp/mnt" + s.fs.bucket 3851 3852 s.mount(t, mountPoint) 3853 defer s.umount(t, mountPoint) 3854 3855 defer os.Chdir("/") 3856 3857 dir, err := os.Open(mountPoint + "/dir2") 3858 t.Assert(err, IsNil) 3859 defer dir.Close() 3860 3861 err = dir.Chdir() 3862 t.Assert(err, IsNil) 3863 3864 err = os.RemoveAll(mountPoint + "/dir2") 3865 t.Assert(err, IsNil) 3866 3867 root, err := os.Open(mountPoint) 3868 t.Assert(err, IsNil) 3869 defer root.Close() 3870 3871 files, err := root.Readdirnames(0) 3872 t.Assert(err, IsNil) 3873 t.Assert(files, DeepEquals, []string{ 3874 "dir1", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero", 3875 }) 3876 } 3877 3878 func (s *GoofysTest) TestMount(t *C) { 3879 if os.Getenv("MOUNT") == "false" { 3880 t.Skip("Not mounting") 3881 } 3882 3883 mountPoint := "/tmp/mnt" + s.fs.bucket 3884 3885 s.mount(t, mountPoint) 3886 defer s.umount(t, mountPoint) 3887 3888 log.Printf("Mounted at %v", mountPoint) 3889 3890 c := make(chan os.Signal, 2) 3891 signal.Notify(c, os.Interrupt, syscall.SIGTERM) 3892 <-c 3893 } 3894 3895 // Checks if 2 sorted lists are equal. Returns a helpful error if they differ. 3896 func checkSortedListsAreEqual(l1, l2 []string) error { 3897 i1, i2 := 0, 0 3898 onlyl1, onlyl2 := []string{}, []string{} 3899 for i1 < len(l1) && i2 < len(l2) { 3900 if l1[i1] == l2[i2] { 3901 i1++ 3902 i2++ 3903 } else if l1[i1] < l2[i2] { 3904 onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1])) 3905 i1++ 3906 } else { 3907 onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2])) 3908 i2++ 3909 } 3910 3911 } 3912 for ; i1 < len(l1); i1++ { 3913 onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1])) 3914 } 3915 for ; i2 < len(l2); i2++ { 3916 onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2])) 3917 } 3918 3919 if len(onlyl1)+len(onlyl2) == 0 { 3920 return nil 3921 } 3922 toString := func(l []string) string { 3923 ret := []string{} 3924 // The list can contain a lot of elements. Show only ten and say 3925 // "and x more". 3926 for i := 0; i < len(l) && i < 10; i++ { 3927 ret = append(ret, l[i]) 3928 } 3929 if len(ret) < len(l) { 3930 ret = append(ret, fmt.Sprintf("and %d more", len(l)-len(ret))) 3931 } 3932 return strings.Join(ret, ", ") 3933 } 3934 return fmt.Errorf("only l1: %+v, only l2: %+v", 3935 toString(onlyl1), toString(onlyl2)) 3936 } 3937 3938 func (s *GoofysTest) TestReadDirDash(t *C) { 3939 if s.azurite { 3940 t.Skip("ADLv1 doesn't have pagination") 3941 } 3942 root := s.getRoot(t) 3943 root.dir.mountPrefix = "prefix" 3944 3945 // SETUP 3946 // Add the following blobs 3947 // - prefix/2019/1 3948 // - prefix/2019-0000 to prefix/2019-4999 3949 // - prefix/20190000 to prefix/20194999 3950 // Fetching this result will need 3 pages in azure (pagesize 5k) and 11 pages 3951 // in amazon (pagesize 1k) 3952 // This setup will verify that we paginate and return results correctly before and after 3953 // seeing all contents that have a '-' ('-' < '/'). For more context read the comments in 3954 // dir.go::listBlobsSafe. 3955 blobs := make(map[string]*string) 3956 expect := []string{"2019"} 3957 blobs["prefix/2019/1"] = nil 3958 for i := 0; i < 5000; i++ { 3959 name := fmt.Sprintf("2019-%04d", i) 3960 expect = append(expect, name) 3961 blobs["prefix/"+name] = nil 3962 } 3963 for i := 0; i < 5000; i++ { 3964 name := fmt.Sprintf("2019%04d", i) 3965 expect = append(expect, name) 3966 blobs["prefix/"+name] = nil 3967 } 3968 s.setupBlobs(s.cloud, t, blobs) 3969 3970 // Read the directory and verify its contents. 3971 dh := root.OpenDir() 3972 defer dh.CloseDir() 3973 3974 children := namesOf(s.readDirFully(t, dh)) 3975 t.Assert(checkSortedListsAreEqual(children, expect), IsNil) 3976 } 3977 3978 func (s *GoofysTest) TestWriteListFlush(t *C) { 3979 root := s.getRoot(t) 3980 root.dir.mountPrefix = "this_test/" 3981 3982 dir, err := root.MkDir("dir") 3983 t.Assert(err, IsNil) 3984 s.fs.insertInode(root, dir) 3985 3986 in, fh := dir.Create("file1", fuseops.OpMetadata{}) 3987 t.Assert(in, NotNil) 3988 t.Assert(fh, NotNil) 3989 s.fs.insertInode(dir, in) 3990 3991 s.assertEntries(t, dir, []string{"file1"}) 3992 3993 // in should still be valid 3994 t.Assert(in.Parent, NotNil) 3995 t.Assert(in.Parent, Equals, dir) 3996 fh.FlushFile() 3997 3998 s.assertEntries(t, dir, []string{"file1"}) 3999 } 4000 4001 type includes struct{} 4002 4003 func (c includes) Info() *CheckerInfo { 4004 return &CheckerInfo{Name: "includes", Params: []string{"obtained", "expected"}} 4005 } 4006 4007 func (c includes) Check(params []interface{}, names []string) (res bool, error string) { 4008 arr := reflect.ValueOf(params[0]) 4009 switch arr.Kind() { 4010 case reflect.Array, reflect.Slice, reflect.String: 4011 default: 4012 panic(fmt.Sprintf("%v is not an array", names[0])) 4013 } 4014 4015 for i := 0; i < arr.Len(); i++ { 4016 v := arr.Index(i).Interface() 4017 res, error = DeepEquals.Check([]interface{}{v, params[1]}, names) 4018 if res { 4019 return 4020 } else { 4021 error = "" 4022 } 4023 4024 res = false 4025 } 4026 return 4027 } 4028 4029 func (s *GoofysTest) TestWriteUnlinkFlush(t *C) { 4030 root := s.getRoot(t) 4031 4032 dir, err := root.MkDir("dir") 4033 t.Assert(err, IsNil) 4034 s.fs.insertInode(root, dir) 4035 4036 in, fh := dir.Create("deleted", fuseops.OpMetadata{}) 4037 t.Assert(in, NotNil) 4038 t.Assert(fh, NotNil) 4039 s.fs.insertInode(dir, in) 4040 4041 err = dir.Unlink("deleted") 4042 t.Assert(err, IsNil) 4043 4044 s.disableS3() 4045 err = fh.FlushFile() 4046 t.Assert(err, IsNil) 4047 4048 dh := dir.OpenDir() 4049 defer dh.CloseDir() 4050 t.Assert(namesOf(s.readDirFully(t, dh)), Not(includes{}), "deleted") 4051 } 4052 4053 func (s *GoofysTest) TestIssue474(t *C) { 4054 s.fs.flags.TypeCacheTTL = 1 * time.Second 4055 s.fs.flags.Cheap = true 4056 4057 p := "this_test/" 4058 root := s.getRoot(t) 4059 root.dir.mountPrefix = "this_test/" 4060 root.dir.seqOpenDirScore = 2 4061 4062 blobs := make(map[string]*string) 4063 4064 in := []string{ 4065 "1/a/b", 4066 "2/c/d", 4067 } 4068 4069 for _, s := range in { 4070 blobs[p+s] = nil 4071 } 4072 4073 s.setupBlobs(s.cloud, t, blobs) 4074 4075 dir1, err := s.LookUpInode(t, "1") 4076 t.Assert(err, IsNil) 4077 // this would list 1/ and slurp in 2/c/d at the same time 4078 s.assertEntries(t, dir1, []string{"a"}) 4079 4080 // 2/ will expire and require re-listing. ensure that we don't 4081 // remove any children as stale as we update 4082 time.Sleep(time.Second) 4083 4084 dir2, err := s.LookUpInode(t, "2") 4085 t.Assert(err, IsNil) 4086 s.assertEntries(t, dir2, []string{"c"}) 4087 } 4088 4089 func (s *GoofysTest) TestReadExternalChangesFuse(t *C) { 4090 s.fs.flags.StatCacheTTL = 1 * time.Second 4091 4092 mountPoint := "/tmp/mnt" + s.fs.bucket 4093 4094 s.mount(t, mountPoint) 4095 defer s.umount(t, mountPoint) 4096 4097 file := "file1" 4098 filePath := mountPoint + "/file1" 4099 4100 buf, err := ioutil.ReadFile(filePath) 4101 t.Assert(err, IsNil) 4102 t.Assert(string(buf), Equals, file) 4103 4104 update := "file2" 4105 _, err = s.cloud.PutBlob(&PutBlobInput{ 4106 Key: file, 4107 Body: bytes.NewReader([]byte(update)), 4108 Size: PUInt64(uint64(len(update))), 4109 }) 4110 t.Assert(err, IsNil) 4111 4112 time.Sleep(1 * time.Second) 4113 4114 buf, err = ioutil.ReadFile(filePath) 4115 t.Assert(err, IsNil) 4116 t.Assert(string(buf), Equals, update) 4117 4118 // the next read shouldn't talk to cloud 4119 root := s.getRoot(t) 4120 root.dir.cloud = &StorageBackendInitError{ 4121 syscall.EINVAL, *root.dir.cloud.Capabilities(), 4122 } 4123 4124 buf, err = ioutil.ReadFile(filePath) 4125 t.Assert(err, IsNil) 4126 t.Assert(string(buf), Equals, update) 4127 } 4128 4129 func (s *GoofysTest) TestReadMyOwnWriteFuse(t *C) { 4130 s.testReadMyOwnWriteFuse(t, false) 4131 } 4132 4133 func (s *GoofysTest) TestReadMyOwnWriteExternalChangesFuse(t *C) { 4134 s.testReadMyOwnWriteFuse(t, true) 4135 } 4136 4137 func (s *GoofysTest) testReadMyOwnWriteFuse(t *C, externalUpdate bool) { 4138 s.fs.flags.StatCacheTTL = 1 * time.Second 4139 4140 mountPoint := "/tmp/mnt" + s.fs.bucket 4141 4142 s.mount(t, mountPoint) 4143 defer s.umount(t, mountPoint) 4144 4145 file := "file1" 4146 filePath := mountPoint + "/file1" 4147 4148 buf, err := ioutil.ReadFile(filePath) 4149 t.Assert(err, IsNil) 4150 t.Assert(string(buf), Equals, file) 4151 4152 if externalUpdate { 4153 update := "file2" 4154 _, err = s.cloud.PutBlob(&PutBlobInput{ 4155 Key: file, 4156 Body: bytes.NewReader([]byte(update)), 4157 Size: PUInt64(uint64(len(update))), 4158 }) 4159 t.Assert(err, IsNil) 4160 4161 time.Sleep(s.fs.flags.StatCacheTTL) 4162 } 4163 4164 fh, err := os.Create(filePath) 4165 t.Assert(err, IsNil) 4166 4167 _, err = fh.WriteString("file3") 4168 t.Assert(err, IsNil) 4169 // we can't flush yet because if we did, we would be reading 4170 // the new copy from cloud and that's not the point of this 4171 // test 4172 defer func() { 4173 // want fh to be late-binding because we re-use the variable 4174 fh.Close() 4175 }() 4176 4177 buf, err = ioutil.ReadFile(filePath) 4178 t.Assert(err, IsNil) 4179 if externalUpdate { 4180 // if there was an external update, we had set 4181 // KeepPageCache to false on os.Create above, which 4182 // causes our write to not be in cache, and read here 4183 // will go to cloud 4184 t.Assert(string(buf), Equals, "file2") 4185 } else { 4186 t.Assert(string(buf), Equals, "file3") 4187 } 4188 4189 err = fh.Close() 4190 t.Assert(err, IsNil) 4191 4192 time.Sleep(s.fs.flags.StatCacheTTL) 4193 4194 root := s.getRoot(t) 4195 cloud := &TestBackend{root.dir.cloud, nil} 4196 root.dir.cloud = cloud 4197 4198 fh, err = os.Open(filePath) 4199 t.Assert(err, IsNil) 4200 4201 if !externalUpdate { 4202 // we flushed and ttl expired, next lookup should 4203 // realize nothing is changed and NOT invalidate the 4204 // cache. Except ADLv1,GCS because PUT there doesn't 4205 // return the mtime, so the open above will think the 4206 // file is updated and not re-use cache 4207 _, adlv1 := s.cloud.(*ADLv1) 4208 _, isGCS := s.cloud.(*GCSBackend) 4209 if !adlv1 && !isGCS { 4210 cloud.err = fuse.EINVAL 4211 } 4212 } else { 4213 // if there was externalUpdate, we wrote our own 4214 // update with KeepPageCache=false, so we should read 4215 // from the cloud her 4216 } 4217 4218 buf, err = ioutil.ReadAll(fh) 4219 t.Assert(err, IsNil) 4220 t.Assert(string(buf), Equals, "file3") 4221 } 4222 4223 func (s *GoofysTest) TestReadMyOwnNewFileFuse(t *C) { 4224 s.fs.flags.StatCacheTTL = 1 * time.Second 4225 s.fs.flags.TypeCacheTTL = 1 * time.Second 4226 4227 mountPoint := "/tmp/mnt" + s.fs.bucket 4228 4229 s.mount(t, mountPoint) 4230 defer s.umount(t, mountPoint) 4231 4232 filePath := mountPoint + "/filex" 4233 4234 // jacobsa/fuse doesn't support setting OpenKeepCache on 4235 // CreateFile but even after manually setting in in 4236 // fuse/conversions.go, we still receive read ops instead of 4237 // being handled by kernel 4238 4239 fh, err := os.Create(filePath) 4240 t.Assert(err, IsNil) 4241 4242 _, err = fh.WriteString("filex") 4243 t.Assert(err, IsNil) 4244 // we can't flush yet because if we did, we would be reading 4245 // the new copy from cloud and that's not the point of this 4246 // test 4247 defer fh.Close() 4248 4249 // disabled: we can't actually read back our own update 4250 //buf, err := ioutil.ReadFile(filePath) 4251 //t.Assert(err, IsNil) 4252 //t.Assert(string(buf), Equals, "filex") 4253 }