github.com/maobaolong/goofys@v0.24.1-0.20200717030821-b50ef2d29ddf/internal/goofys_test.go (about) 1 // Copyright 2015 - 2017 Ka-Hing Cheung 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package internal 16 17 import ( 18 . "github.com/kahing/goofys/api/common" 19 20 "bufio" 21 "bytes" 22 "fmt" 23 "io" 24 "io/ioutil" 25 "math/rand" 26 "net" 27 "os" 28 "os/exec" 29 "os/signal" 30 "os/user" 31 "reflect" 32 "runtime" 33 "sort" 34 "strconv" 35 "strings" 36 "sync" 37 "syscall" 38 "testing" 39 "time" 40 41 "context" 42 43 "github.com/aws/aws-sdk-go/aws" 44 "github.com/aws/aws-sdk-go/aws/corehandlers" 45 "github.com/aws/aws-sdk-go/aws/credentials" 46 47 "github.com/Azure/azure-storage-blob-go/azblob" 48 "github.com/Azure/go-autorest/autorest" 49 "github.com/Azure/go-autorest/autorest/azure" 50 azureauth "github.com/Azure/go-autorest/autorest/azure/auth" 51 52 "golang.org/x/sys/unix" 53 54 "github.com/jacobsa/fuse" 55 "github.com/jacobsa/fuse/fuseops" 56 "github.com/jacobsa/fuse/fuseutil" 57 58 "github.com/sirupsen/logrus" 59 60 . "gopkg.in/check.v1" 61 "runtime/debug" 62 ) 63 64 const READAHEAD_CHUNK = uint32(20 * 1024 * 1024) 65 66 // so I don't get complains about unused imports 67 var ignored = logrus.DebugLevel 68 69 const PerTestTimeout = 10 * time.Minute 70 71 func currentUid() uint32 { 72 user, err := user.Current() 73 if err != nil { 74 panic(err) 75 } 76 77 uid, err := strconv.ParseUint(user.Uid, 10, 32) 78 if err != nil { 79 panic(err) 80 } 81 82 return uint32(uid) 83 } 84 85 func currentGid() uint32 { 86 user, err := user.Current() 87 if err != nil { 88 panic(err) 89 } 90 91 gid, err := strconv.ParseUint(user.Gid, 10, 32) 92 if err != nil { 93 panic(err) 94 } 95 96 return uint32(gid) 97 } 98 99 type GoofysTest struct { 100 fs *Goofys 101 ctx context.Context 102 awsConfig *aws.Config 103 cloud StorageBackend 104 emulator bool 105 azurite bool 106 107 removeBucket []StorageBackend 108 109 env map[string]*string 110 111 timeout chan int 112 } 113 114 func Test(t *testing.T) { 115 TestingT(t) 116 } 117 118 var _ = Suite(&GoofysTest{}) 119 120 func logOutput(t *C, tag string, r io.ReadCloser) { 121 in := bufio.NewScanner(r) 122 123 for in.Scan() { 124 t.Log(tag, in.Text()) 125 } 126 } 127 128 func waitFor(t *C, addr string) (err error) { 129 // wait for it to listen on port 130 for i := 0; i < 10; i++ { 131 var conn net.Conn 132 conn, err = net.Dial("tcp", addr) 133 if err == nil { 134 // we are done! 135 conn.Close() 136 return 137 } else { 138 t.Logf("Cound not connect: %v", err) 139 time.Sleep(100 * time.Millisecond) 140 } 141 } 142 143 return 144 } 145 146 func (t *GoofysTest) deleteBlobsParallelly(cloud StorageBackend, blobs []string) error { 147 sem := make(semaphore, 100) 148 sem.P(100) 149 var err error 150 for _, blobOuter := range blobs { 151 sem.V(1) 152 go func(blob string) { 153 defer sem.P(1) 154 _, localerr := cloud.DeleteBlob(&DeleteBlobInput{blob}) 155 if localerr != nil && localerr != syscall.ENOENT { 156 err = localerr 157 } 158 }(blobOuter) 159 if err != nil { 160 break 161 } 162 } 163 sem.V(100) 164 return err 165 } 166 167 // groupByDecresingDepths takes a slice of path strings and returns the paths as 168 // groups where each group has the same `depth` - depth(a/b/c)=2, depth(a/b/)=1 169 // The groups are returned in decreasing order of depths. 170 // - Inp: [] Out: [] 171 // - Inp: ["a/b1/", "a/b/c1", "a/b2", "a/b/c2"] 172 // Out: [["a/b/c1", "a/b/c2"], ["a/b1/", "a/b2"]] 173 // - Inp: ["a/b1/", "z/a/b/c1", "a/b2", "z/a/b/c2"] 174 // Out: [["z/a/b/c1", "z/a/b/c2"], ["a/b1/", "a/b2"] 175 func groupByDecresingDepths(items []string) [][]string { 176 depthToGroup := map[int][]string{} 177 for _, item := range items { 178 depth := len(strings.Split(strings.TrimRight(item, "/"), "/")) 179 if _, ok := depthToGroup[depth]; !ok { 180 depthToGroup[depth] = []string{} 181 } 182 depthToGroup[depth] = append(depthToGroup[depth], item) 183 } 184 decreasingDepths := []int{} 185 for depth := range depthToGroup { 186 decreasingDepths = append(decreasingDepths, depth) 187 } 188 sort.Sort(sort.Reverse(sort.IntSlice(decreasingDepths))) 189 ret := [][]string{} 190 for _, depth := range decreasingDepths { 191 group, _ := depthToGroup[depth] 192 ret = append(ret, group) 193 } 194 return ret 195 } 196 197 func (t *GoofysTest) DeleteADLBlobs(cloud StorageBackend, items []string) error { 198 // If we delete a directory that's not empty, ADL{v1|v2} returns failure. That can 199 // happen if we want to delete both "dir1" and "dir1/file" but delete them 200 // in the wrong order. 201 // So we group the items to delete into multiple groups. All items in a group 202 // will have the same depth - depth(/a/b/c) = 2, depth(/a/b/) = 1. 203 // We then iterate over the groups in desc order of depth and delete them parallelly. 204 for _, group := range groupByDecresingDepths(items) { 205 err := t.deleteBlobsParallelly(cloud, group) 206 if err != nil { 207 return err 208 } 209 } 210 return nil 211 } 212 213 func (s *GoofysTest) selectTestConfig(t *C, flags *FlagStorage) (conf S3Config) { 214 (&conf).Init() 215 216 if hasEnv("AWS") { 217 if isTravis() { 218 conf.Region = "us-east-1" 219 } else { 220 conf.Region = "us-west-2" 221 } 222 profile := os.Getenv("AWS") 223 if profile != "" { 224 if profile != "-" { 225 conf.Profile = profile 226 } else { 227 conf.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") 228 conf.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") 229 } 230 } 231 232 conf.BucketOwner = os.Getenv("BUCKET_OWNER") 233 if conf.BucketOwner == "" { 234 panic("BUCKET_OWNER is required on AWS") 235 } 236 } else if hasEnv("GCS") { 237 conf.Region = "us-west1" 238 conf.Profile = os.Getenv("GCS") 239 flags.Endpoint = "http://storage.googleapis.com" 240 } else if hasEnv("MINIO") { 241 conf.Region = "us-east-1" 242 conf.AccessKey = "Q3AM3UQ867SPQQA43P2F" 243 conf.SecretKey = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" 244 flags.Endpoint = "https://play.minio.io:9000" 245 } else { 246 s.emulator = true 247 248 conf.Region = "us-west-2" 249 conf.AccessKey = "foo" 250 conf.SecretKey = "bar" 251 flags.Endpoint = "http://127.0.0.1:8080" 252 } 253 254 return 255 } 256 257 func (s *GoofysTest) waitForEmulator(t *C) { 258 if s.emulator { 259 addr := "127.0.0.1:8080" 260 261 err := waitFor(t, addr) 262 t.Assert(err, IsNil) 263 } 264 } 265 266 func (s *GoofysTest) SetUpSuite(t *C) { 267 } 268 269 func (s *GoofysTest) deleteBucket(cloud StorageBackend) error { 270 param := &ListBlobsInput{} 271 272 // Azure need special handling. 273 azureKeysToRemove := make([]string, 0) 274 for { 275 resp, err := cloud.ListBlobs(param) 276 if err != nil { 277 return err 278 } 279 280 keysToRemove := []string{} 281 for _, o := range resp.Items { 282 keysToRemove = append(keysToRemove, *o.Key) 283 } 284 if len(keysToRemove) != 0 { 285 switch cloud.(type) { 286 case *ADLv1, *ADLv2, *AZBlob: 287 // ADLV{1|2} and AZBlob (sometimes) supports directories. => dir can be removed only 288 // after the dir is empty. So we will remove the blobs in reverse depth order via 289 // DeleteADLBlobs after this for loop. 290 azureKeysToRemove = append(azureKeysToRemove, keysToRemove...) 291 default: 292 _, err = cloud.DeleteBlobs(&DeleteBlobsInput{Items: keysToRemove}) 293 if err != nil { 294 return err 295 } 296 } 297 } 298 if resp.IsTruncated { 299 param.ContinuationToken = resp.NextContinuationToken 300 } else { 301 break 302 } 303 } 304 305 if len(azureKeysToRemove) != 0 { 306 err := s.DeleteADLBlobs(cloud, azureKeysToRemove) 307 if err != nil { 308 return err 309 } 310 } 311 312 _, err := cloud.RemoveBucket(&RemoveBucketInput{}) 313 return err 314 } 315 316 func (s *GoofysTest) TearDownTest(t *C) { 317 close(s.timeout) 318 319 for _, cloud := range s.removeBucket { 320 err := s.deleteBucket(cloud) 321 t.Assert(err, IsNil) 322 } 323 s.removeBucket = nil 324 } 325 326 func (s *GoofysTest) removeBlob(cloud StorageBackend, t *C, blobPath string) { 327 params := &DeleteBlobInput{ 328 Key: blobPath, 329 } 330 _, err := cloud.DeleteBlob(params) 331 t.Assert(err, IsNil) 332 } 333 334 func (s *GoofysTest) setupBlobs(cloud StorageBackend, t *C, env map[string]*string) { 335 336 // concurrency = 100 337 throttler := make(semaphore, 100) 338 throttler.P(100) 339 340 var globalErr error 341 for path, c := range env { 342 throttler.V(1) 343 go func(path string, content *string) { 344 dir := false 345 if content == nil { 346 if strings.HasSuffix(path, "/") { 347 if cloud.Capabilities().DirBlob { 348 path = strings.TrimRight(path, "/") 349 } 350 dir = true 351 content = PString("") 352 } else { 353 content = &path 354 } 355 } 356 defer throttler.P(1) 357 params := &PutBlobInput{ 358 Key: path, 359 Body: bytes.NewReader([]byte(*content)), 360 Size: PUInt64(uint64(len(*content))), 361 Metadata: map[string]*string{ 362 "name": aws.String(path + "+/#%00"), 363 }, 364 DirBlob: dir, 365 } 366 367 _, err := cloud.PutBlob(params) 368 if err != nil { 369 globalErr = err 370 } 371 t.Assert(err, IsNil) 372 }(path, c) 373 } 374 throttler.V(100) 375 throttler = make(semaphore, 100) 376 throttler.P(100) 377 t.Assert(globalErr, IsNil) 378 379 // double check, except on AWS S3, because there we sometimes 380 // hit 404 NoSuchBucket and there's no way to distinguish that 381 // from 404 KeyNotFound 382 if !hasEnv("AWS") { 383 for path, c := range env { 384 throttler.V(1) 385 go func(path string, content *string) { 386 defer throttler.P(1) 387 params := &HeadBlobInput{Key: path} 388 res, err := cloud.HeadBlob(params) 389 t.Assert(err, IsNil) 390 if content != nil { 391 t.Assert(res.Size, Equals, uint64(len(*content))) 392 } else if strings.HasSuffix(path, "/") || path == "zero" { 393 t.Assert(res.Size, Equals, uint64(0)) 394 } else { 395 t.Assert(res.Size, Equals, uint64(len(path))) 396 } 397 }(path, c) 398 } 399 throttler.V(100) 400 t.Assert(globalErr, IsNil) 401 } 402 } 403 404 func (s *GoofysTest) setupEnv(t *C, env map[string]*string, public bool) { 405 if public { 406 if s3, ok := s.cloud.Delegate().(*S3Backend); ok { 407 s3.config.ACL = "public-read" 408 } else { 409 t.Error("Not S3 backend") 410 } 411 } 412 413 _, err := s.cloud.MakeBucket(&MakeBucketInput{}) 414 t.Assert(err, IsNil) 415 416 if !s.emulator { 417 //time.Sleep(time.Second) 418 } 419 420 s.setupBlobs(s.cloud, t, env) 421 422 t.Log("setupEnv done") 423 } 424 425 func (s *GoofysTest) setupDefaultEnv(t *C, public bool) { 426 s.env = map[string]*string{ 427 "file1": nil, 428 "file2": nil, 429 "dir1/file3": nil, 430 "dir2/dir3/": nil, 431 "dir2/dir3/file4": nil, 432 "dir4/": nil, 433 "dir4/file5": nil, 434 "empty_dir/": nil, 435 "empty_dir2/": nil, 436 "zero": PString(""), 437 } 438 439 s.setupEnv(t, s.env, public) 440 } 441 442 func (s *GoofysTest) setUpTestTimeout(t *C) { 443 s.timeout = make(chan int) 444 debug.SetTraceback("all") 445 started := time.Now() 446 447 go func() { 448 select { 449 case _, ok := <-s.timeout: 450 if !ok { 451 return 452 } 453 case <-time.After(PerTestTimeout): 454 panic(fmt.Sprintf("timeout %v reached. Started %v now %v", 455 PerTestTimeout, started, time.Now())) 456 } 457 }() 458 } 459 460 func (s *GoofysTest) SetUpTest(t *C) { 461 log.Infof("Starting at %v", time.Now()) 462 463 s.setUpTestTimeout(t) 464 465 var bucket string 466 mount := os.Getenv("MOUNT") 467 468 if mount != "false" { 469 bucket = mount 470 } else { 471 bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16) 472 } 473 uid, gid := MyUserAndGroup() 474 flags := &FlagStorage{ 475 DirMode: 0700, 476 FileMode: 0700, 477 Uid: uint32(uid), 478 Gid: uint32(gid), 479 HTTPTimeout: 30 * time.Second, 480 } 481 482 cloud := os.Getenv("CLOUD") 483 484 if cloud == "s3" { 485 s.emulator = !hasEnv("AWS") 486 s.waitForEmulator(t) 487 488 conf := s.selectTestConfig(t, flags) 489 flags.Backend = &conf 490 491 s3, err := NewS3(bucket, flags, &conf) 492 t.Assert(err, IsNil) 493 494 s.cloud = s3 495 s3.aws = hasEnv("AWS") 496 if s3.aws { 497 s.cloud = NewS3BucketEventualConsistency(s3) 498 } 499 500 if s.emulator { 501 s3.Handlers.Sign.Clear() 502 s3.Handlers.Sign.PushBack(SignV2) 503 s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) 504 } 505 _, err = s3.ListBuckets(nil) 506 t.Assert(err, IsNil) 507 508 } else if cloud == "gcs" { 509 conf := s.selectTestConfig(t, flags) 510 flags.Backend = &conf 511 512 var err error 513 s.cloud, err = NewGCS3(bucket, flags, &conf) 514 t.Assert(s.cloud, NotNil) 515 t.Assert(err, IsNil) 516 } else if cloud == "azblob" { 517 config, err := AzureBlobConfig(os.Getenv("ENDPOINT"), "", "blob") 518 t.Assert(err, IsNil) 519 520 if config.Endpoint == AzuriteEndpoint { 521 s.azurite = true 522 s.emulator = true 523 s.waitForEmulator(t) 524 } 525 526 // Azurite's SAS is buggy, ex: https://github.com/Azure/Azurite/issues/216 527 if os.Getenv("SAS_EXPIRE") != "" { 528 expire, err := time.ParseDuration(os.Getenv("SAS_EXPIRE")) 529 t.Assert(err, IsNil) 530 531 config.TokenRenewBuffer = expire / 2 532 credential, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey) 533 t.Assert(err, IsNil) 534 535 // test sas token config 536 config.SasToken = func() (string, error) { 537 sasQueryParams, err := azblob.AccountSASSignatureValues{ 538 Protocol: azblob.SASProtocolHTTPSandHTTP, 539 StartTime: time.Now().UTC().Add(-1 * time.Hour), 540 ExpiryTime: time.Now().UTC().Add(expire), 541 Services: azblob.AccountSASServices{Blob: true}.String(), 542 ResourceTypes: azblob.AccountSASResourceTypes{ 543 Service: true, 544 Container: true, 545 Object: true, 546 }.String(), 547 Permissions: azblob.AccountSASPermissions{ 548 Read: true, 549 Write: true, 550 Delete: true, 551 List: true, 552 Create: true, 553 }.String(), 554 }.NewSASQueryParameters(credential) 555 if err != nil { 556 return "", err 557 } 558 return sasQueryParams.Encode(), nil 559 } 560 } 561 562 flags.Backend = &config 563 564 s.cloud, err = NewAZBlob(bucket, &config) 565 t.Assert(err, IsNil) 566 t.Assert(s.cloud, NotNil) 567 } else if cloud == "adlv1" { 568 cred := azureauth.NewClientCredentialsConfig( 569 os.Getenv("ADLV1_CLIENT_ID"), 570 os.Getenv("ADLV1_CLIENT_CREDENTIAL"), 571 os.Getenv("ADLV1_TENANT_ID")) 572 auth, err := cred.Authorizer() 573 t.Assert(err, IsNil) 574 575 config := ADLv1Config{ 576 Endpoint: os.Getenv("ENDPOINT"), 577 Authorizer: auth, 578 } 579 config.Init() 580 581 flags.Backend = &config 582 583 s.cloud, err = NewADLv1(bucket, flags, &config) 584 t.Assert(err, IsNil) 585 t.Assert(s.cloud, NotNil) 586 } else if cloud == "adlv2" { 587 var err error 588 var auth autorest.Authorizer 589 590 if os.Getenv("AZURE_STORAGE_ACCOUNT") != "" && os.Getenv("AZURE_STORAGE_KEY") != "" { 591 auth = &AZBlobConfig{ 592 AccountName: os.Getenv("AZURE_STORAGE_ACCOUNT"), 593 AccountKey: os.Getenv("AZURE_STORAGE_KEY"), 594 } 595 } else { 596 cred := azureauth.NewClientCredentialsConfig( 597 os.Getenv("ADLV2_CLIENT_ID"), 598 os.Getenv("ADLV2_CLIENT_CREDENTIAL"), 599 os.Getenv("ADLV2_TENANT_ID")) 600 cred.Resource = azure.PublicCloud.ResourceIdentifiers.Storage 601 auth, err = cred.Authorizer() 602 t.Assert(err, IsNil) 603 } 604 605 config := ADLv2Config{ 606 Endpoint: os.Getenv("ENDPOINT"), 607 Authorizer: auth, 608 } 609 610 flags.Backend = &config 611 612 s.cloud, err = NewADLv2(bucket, flags, &config) 613 t.Assert(err, IsNil) 614 t.Assert(s.cloud, NotNil) 615 } else { 616 t.Fatal("Unsupported backend") 617 } 618 619 if mount == "false" { 620 s.removeBucket = append(s.removeBucket, s.cloud) 621 s.setupDefaultEnv(t, false) 622 } else { 623 _, err := s.cloud.MakeBucket(&MakeBucketInput{}) 624 if err == fuse.EEXIST { 625 err = nil 626 } 627 t.Assert(err, IsNil) 628 } 629 630 if hasEnv("AWS") { 631 s.fs = newGoofys(context.Background(), bucket, flags, 632 func(bucket string, flags *FlagStorage) (StorageBackend, error) { 633 cloud, err := NewBackend(bucket, flags) 634 if err != nil { 635 return nil, err 636 } 637 638 return NewS3BucketEventualConsistency(cloud.(*S3Backend)), nil 639 }) 640 } else { 641 s.fs = NewGoofys(context.Background(), bucket, flags) 642 } 643 t.Assert(s.fs, NotNil) 644 645 s.ctx = context.Background() 646 647 if hasEnv("GCS") { 648 flags.Endpoint = "http://storage.googleapis.com" 649 } 650 } 651 652 func (s *GoofysTest) getRoot(t *C) (inode *Inode) { 653 inode = s.fs.inodes[fuseops.RootInodeID] 654 t.Assert(inode, NotNil) 655 return 656 } 657 658 func (s *GoofysTest) TestGetRootInode(t *C) { 659 root := s.getRoot(t) 660 t.Assert(root.Id, Equals, fuseops.InodeID(fuseops.RootInodeID)) 661 } 662 663 func (s *GoofysTest) TestGetRootAttributes(t *C) { 664 _, err := s.getRoot(t).GetAttributes() 665 t.Assert(err, IsNil) 666 } 667 668 func (s *GoofysTest) ForgetInode(t *C, inode fuseops.InodeID) { 669 err := s.fs.ForgetInode(s.ctx, &fuseops.ForgetInodeOp{Inode: inode}) 670 t.Assert(err, IsNil) 671 } 672 673 func (s *GoofysTest) LookUpInode(t *C, name string) (in *Inode, err error) { 674 parent := s.getRoot(t) 675 676 for { 677 idx := strings.Index(name, "/") 678 if idx == -1 { 679 break 680 } 681 682 dirName := name[0:idx] 683 name = name[idx+1:] 684 685 lookup := fuseops.LookUpInodeOp{ 686 Parent: parent.Id, 687 Name: dirName, 688 } 689 690 err = s.fs.LookUpInode(nil, &lookup) 691 if err != nil { 692 return 693 } 694 parent = s.fs.inodes[lookup.Entry.Child] 695 } 696 697 lookup := fuseops.LookUpInodeOp{ 698 Parent: parent.Id, 699 Name: name, 700 } 701 702 err = s.fs.LookUpInode(nil, &lookup) 703 if err != nil { 704 return 705 } 706 in = s.fs.inodes[lookup.Entry.Child] 707 return 708 } 709 710 func (s *GoofysTest) TestSetup(t *C) { 711 } 712 713 func (s *GoofysTest) TestLookUpInode(t *C) { 714 _, err := s.LookUpInode(t, "file1") 715 t.Assert(err, IsNil) 716 717 _, err = s.LookUpInode(t, "fileNotFound") 718 t.Assert(err, Equals, fuse.ENOENT) 719 720 _, err = s.LookUpInode(t, "dir1/file3") 721 t.Assert(err, IsNil) 722 723 _, err = s.LookUpInode(t, "dir2/dir3") 724 t.Assert(err, IsNil) 725 726 _, err = s.LookUpInode(t, "dir2/dir3/file4") 727 t.Assert(err, IsNil) 728 729 _, err = s.LookUpInode(t, "empty_dir") 730 t.Assert(err, IsNil) 731 } 732 733 func (s *GoofysTest) TestPanicWrapper(t *C) { 734 debug.SetTraceback("single") 735 736 fs := FusePanicLogger{s.fs} 737 err := fs.GetInodeAttributes(nil, &fuseops.GetInodeAttributesOp{ 738 Inode: 1234, 739 }) 740 t.Assert(err, Equals, fuse.EIO) 741 } 742 743 func (s *GoofysTest) TestGetInodeAttributes(t *C) { 744 inode, err := s.getRoot(t).LookUp("file1") 745 t.Assert(err, IsNil) 746 747 attr, err := inode.GetAttributes() 748 t.Assert(err, IsNil) 749 t.Assert(attr.Size, Equals, uint64(len("file1"))) 750 } 751 752 func (s *GoofysTest) readDirFully(t *C, dh *DirHandle) (entries []DirHandleEntry) { 753 dh.mu.Lock() 754 defer dh.mu.Unlock() 755 756 en, err := dh.ReadDir(fuseops.DirOffset(0)) 757 t.Assert(err, IsNil) 758 t.Assert(en, NotNil) 759 t.Assert(en.Name, Equals, ".") 760 761 en, err = dh.ReadDir(fuseops.DirOffset(1)) 762 t.Assert(err, IsNil) 763 t.Assert(en, NotNil) 764 t.Assert(en.Name, Equals, "..") 765 766 for i := fuseops.DirOffset(2); ; i++ { 767 en, err = dh.ReadDir(i) 768 t.Assert(err, IsNil) 769 770 if en == nil { 771 return 772 } 773 774 entries = append(entries, *en) 775 } 776 } 777 778 func namesOf(entries []DirHandleEntry) (names []string) { 779 for _, en := range entries { 780 names = append(names, en.Name) 781 } 782 return 783 } 784 785 func (s *GoofysTest) assertEntries(t *C, in *Inode, names []string) { 786 dh := in.OpenDir() 787 defer dh.CloseDir() 788 789 t.Assert(namesOf(s.readDirFully(t, dh)), DeepEquals, names) 790 } 791 792 func (s *GoofysTest) readDirIntoCache(t *C, inode fuseops.InodeID) { 793 openDirOp := fuseops.OpenDirOp{Inode: inode} 794 err := s.fs.OpenDir(nil, &openDirOp) 795 t.Assert(err, IsNil) 796 797 readDirOp := fuseops.ReadDirOp{ 798 Inode: inode, 799 Handle: openDirOp.Handle, 800 Dst: make([]byte, 8*1024), 801 } 802 803 err = s.fs.ReadDir(nil, &readDirOp) 804 t.Assert(err, IsNil) 805 } 806 807 func (s *GoofysTest) TestReadDirCacheLookup(t *C) { 808 s.fs.flags.StatCacheTTL = 1 * time.Minute 809 s.fs.flags.TypeCacheTTL = 1 * time.Minute 810 811 s.readDirIntoCache(t, fuseops.RootInodeID) 812 s.disableS3() 813 814 // should be cached so lookup should not need to talk to s3 815 entries := []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"} 816 for _, en := range entries { 817 err := s.fs.LookUpInode(nil, &fuseops.LookUpInodeOp{ 818 Parent: fuseops.RootInodeID, 819 Name: en, 820 }) 821 t.Assert(err, IsNil) 822 } 823 } 824 825 func (s *GoofysTest) TestReadDirWithExternalChanges(t *C) { 826 s.fs.flags.TypeCacheTTL = time.Second 827 828 dir1, err := s.LookUpInode(t, "dir1") 829 t.Assert(err, IsNil) 830 831 defaultEntries := []string{ 832 "dir1", "dir2", "dir4", "empty_dir", 833 "empty_dir2", "file1", "file2", "zero"} 834 s.assertEntries(t, s.getRoot(t), defaultEntries) 835 // dir1 has file3 and nothing else. 836 s.assertEntries(t, dir1, []string{"file3"}) 837 838 // Do the following 'external' changes in s3 without involving goofys. 839 // - Remove file1, add file3. 840 // - Remove dir1/file3. Given that dir1 has just this one file, 841 // we are effectively removing dir1 as well. 842 s.removeBlob(s.cloud, t, "file1") 843 s.setupBlobs(s.cloud, t, map[string]*string{"file3": nil}) 844 s.removeBlob(s.cloud, t, "dir1/file3") 845 846 time.Sleep(s.fs.flags.TypeCacheTTL) 847 // newEntries = `defaultEntries` - dir1 - file1 + file3. 848 newEntries := []string{ 849 "dir2", "dir4", "empty_dir", "empty_dir2", 850 "file2", "file3", "zero"} 851 if s.cloud.Capabilities().DirBlob { 852 // dir1 is not automatically deleted 853 newEntries = append([]string{"dir1"}, newEntries...) 854 } 855 s.assertEntries(t, s.getRoot(t), newEntries) 856 } 857 858 func (s *GoofysTest) TestReadDir(t *C) { 859 // test listing / 860 dh := s.getRoot(t).OpenDir() 861 defer dh.CloseDir() 862 863 s.assertEntries(t, s.getRoot(t), []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"}) 864 865 // test listing dir1/ 866 in, err := s.LookUpInode(t, "dir1") 867 t.Assert(err, IsNil) 868 s.assertEntries(t, in, []string{"file3"}) 869 870 // test listing dir2/ 871 in, err = s.LookUpInode(t, "dir2") 872 t.Assert(err, IsNil) 873 s.assertEntries(t, in, []string{"dir3"}) 874 875 // test listing dir2/dir3/ 876 in, err = s.LookUpInode(t, "dir2/dir3") 877 t.Assert(err, IsNil) 878 s.assertEntries(t, in, []string{"file4"}) 879 } 880 881 func (s *GoofysTest) TestReadFiles(t *C) { 882 parent := s.getRoot(t) 883 dh := parent.OpenDir() 884 defer dh.CloseDir() 885 886 var entries []*DirHandleEntry 887 888 dh.mu.Lock() 889 for i := fuseops.DirOffset(0); ; i++ { 890 en, err := dh.ReadDir(i) 891 t.Assert(err, IsNil) 892 893 if en == nil { 894 break 895 } 896 897 entries = append(entries, en) 898 } 899 dh.mu.Unlock() 900 901 for _, en := range entries { 902 if en.Type == fuseutil.DT_File { 903 in, err := parent.LookUp(en.Name) 904 t.Assert(err, IsNil) 905 906 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 907 t.Assert(err, IsNil) 908 909 buf := make([]byte, 4096) 910 911 nread, err := fh.ReadFile(0, buf) 912 if en.Name == "zero" { 913 t.Assert(nread, Equals, 0) 914 } else { 915 t.Assert(nread, Equals, len(en.Name)) 916 buf = buf[0:nread] 917 t.Assert(string(buf), Equals, en.Name) 918 } 919 } else { 920 921 } 922 } 923 } 924 925 func (s *GoofysTest) TestReadOffset(t *C) { 926 root := s.getRoot(t) 927 f := "file1" 928 929 in, err := root.LookUp(f) 930 t.Assert(err, IsNil) 931 932 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 933 t.Assert(err, IsNil) 934 935 buf := make([]byte, 4096) 936 937 nread, err := fh.ReadFile(1, buf) 938 t.Assert(err, IsNil) 939 t.Assert(nread, Equals, len(f)-1) 940 t.Assert(string(buf[0:nread]), DeepEquals, f[1:]) 941 942 r := rand.New(rand.NewSource(time.Now().UnixNano())) 943 944 for i := 0; i < 3; i++ { 945 off := r.Int31n(int32(len(f))) 946 nread, err = fh.ReadFile(int64(off), buf) 947 t.Assert(err, IsNil) 948 t.Assert(nread, Equals, len(f)-int(off)) 949 t.Assert(string(buf[0:nread]), DeepEquals, f[off:]) 950 } 951 } 952 953 func (s *GoofysTest) TestCreateFiles(t *C) { 954 fileName := "testCreateFile" 955 956 _, fh := s.getRoot(t).Create(fileName, fuseops.OpMetadata{uint32(os.Getpid())}) 957 958 err := fh.FlushFile() 959 t.Assert(err, IsNil) 960 961 resp, err := s.cloud.GetBlob(&GetBlobInput{Key: fileName}) 962 t.Assert(err, IsNil) 963 t.Assert(resp.HeadBlobOutput.Size, DeepEquals, uint64(0)) 964 defer resp.Body.Close() 965 966 _, err = s.getRoot(t).LookUp(fileName) 967 t.Assert(err, IsNil) 968 969 fileName = "testCreateFile2" 970 s.testWriteFile(t, fileName, 1, 128*1024) 971 972 inode, err := s.getRoot(t).LookUp(fileName) 973 t.Assert(err, IsNil) 974 975 fh, err = inode.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 976 t.Assert(err, IsNil) 977 978 err = fh.FlushFile() 979 t.Assert(err, IsNil) 980 981 resp, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName}) 982 t.Assert(err, IsNil) 983 // ADLv1 doesn't return size when we do a GET 984 if _, adlv1 := s.cloud.(*ADLv1); !adlv1 { 985 t.Assert(resp.HeadBlobOutput.Size, Equals, uint64(1)) 986 } 987 defer resp.Body.Close() 988 } 989 990 func (s *GoofysTest) TestUnlink(t *C) { 991 fileName := "file1" 992 993 err := s.getRoot(t).Unlink(fileName) 994 t.Assert(err, IsNil) 995 996 // make sure that it's gone from s3 997 _, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName}) 998 t.Assert(mapAwsError(err), Equals, fuse.ENOENT) 999 } 1000 1001 type FileHandleReader struct { 1002 fs *Goofys 1003 fh *FileHandle 1004 offset int64 1005 } 1006 1007 func (r *FileHandleReader) Read(p []byte) (nread int, err error) { 1008 nread, err = r.fh.ReadFile(r.offset, p) 1009 r.offset += int64(nread) 1010 return 1011 } 1012 1013 func (r *FileHandleReader) Seek(offset int64, whence int) (int64, error) { 1014 switch whence { 1015 case 0: 1016 r.offset = offset 1017 case 1: 1018 r.offset += offset 1019 default: 1020 panic(fmt.Sprintf("unsupported whence: %v", whence)) 1021 } 1022 1023 return r.offset, nil 1024 } 1025 1026 func (s *GoofysTest) testWriteFile(t *C, fileName string, size int64, write_size int) { 1027 s.testWriteFileAt(t, fileName, int64(0), size, write_size) 1028 } 1029 1030 func (s *GoofysTest) testWriteFileAt(t *C, fileName string, offset int64, size int64, write_size int) { 1031 var fh *FileHandle 1032 root := s.getRoot(t) 1033 1034 lookup := fuseops.LookUpInodeOp{ 1035 Parent: root.Id, 1036 Name: fileName, 1037 } 1038 err := s.fs.LookUpInode(nil, &lookup) 1039 if err != nil { 1040 if err == fuse.ENOENT { 1041 create := fuseops.CreateFileOp{ 1042 Parent: root.Id, 1043 Name: fileName, 1044 } 1045 err = s.fs.CreateFile(nil, &create) 1046 t.Assert(err, IsNil) 1047 1048 fh = s.fs.fileHandles[create.Handle] 1049 } else { 1050 t.Assert(err, IsNil) 1051 } 1052 } else { 1053 in := s.fs.inodes[lookup.Entry.Child] 1054 fh, err = in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 1055 t.Assert(err, IsNil) 1056 } 1057 1058 buf := make([]byte, write_size) 1059 nwritten := offset 1060 1061 src := io.LimitReader(&SeqReader{}, size) 1062 1063 for { 1064 nread, err := src.Read(buf) 1065 if err == io.EOF { 1066 t.Assert(nwritten, Equals, size) 1067 break 1068 } 1069 t.Assert(err, IsNil) 1070 1071 err = fh.WriteFile(nwritten, buf[:nread]) 1072 t.Assert(err, IsNil) 1073 nwritten += int64(nread) 1074 } 1075 1076 err = fh.FlushFile() 1077 t.Assert(err, IsNil) 1078 1079 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: fileName}) 1080 t.Assert(err, IsNil) 1081 t.Assert(resp.Size, Equals, uint64(size+offset)) 1082 1083 fr := &FileHandleReader{s.fs, fh, offset} 1084 diff, err := CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 0) 1085 t.Assert(err, IsNil) 1086 t.Assert(diff, Equals, -1) 1087 t.Assert(fr.offset, Equals, size) 1088 1089 err = fh.FlushFile() 1090 t.Assert(err, IsNil) 1091 1092 // read again with exact 4KB to catch aligned read case 1093 fr = &FileHandleReader{s.fs, fh, offset} 1094 diff, err = CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 4096) 1095 t.Assert(err, IsNil) 1096 t.Assert(diff, Equals, -1) 1097 t.Assert(fr.offset, Equals, size) 1098 1099 fh.Release() 1100 } 1101 1102 func (s *GoofysTest) TestWriteLargeFile(t *C) { 1103 s.testWriteFile(t, "testLargeFile", int64(READAHEAD_CHUNK)+1024*1024, 128*1024) 1104 s.testWriteFile(t, "testLargeFile2", int64(READAHEAD_CHUNK), 128*1024) 1105 s.testWriteFile(t, "testLargeFile3", int64(READAHEAD_CHUNK)+1, 128*1024) 1106 } 1107 1108 func (s *GoofysTest) TestWriteReallyLargeFile(t *C) { 1109 s.testWriteFile(t, "testLargeFile", 512*1024*1024+1, 128*1024) 1110 } 1111 1112 func (s *GoofysTest) TestWriteReplicatorThrottle(t *C) { 1113 s.fs.replicators = Ticket{Total: 1}.Init() 1114 s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024) 1115 } 1116 1117 func (s *GoofysTest) TestReadWriteMinimumMemory(t *C) { 1118 if _, ok := s.cloud.(*ADLv1); ok { 1119 s.fs.bufferPool.maxBuffers = 4 1120 } else { 1121 s.fs.bufferPool.maxBuffers = 2 1122 } 1123 s.fs.bufferPool.computedMaxbuffers = s.fs.bufferPool.maxBuffers 1124 s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024) 1125 } 1126 1127 func (s *GoofysTest) TestWriteManyFilesFile(t *C) { 1128 var files sync.WaitGroup 1129 1130 for i := 0; i < 21; i++ { 1131 files.Add(1) 1132 fileName := "testSmallFile" + strconv.Itoa(i) 1133 go func() { 1134 defer files.Done() 1135 s.testWriteFile(t, fileName, 1, 128*1024) 1136 }() 1137 } 1138 1139 files.Wait() 1140 } 1141 1142 func (s *GoofysTest) testWriteFileNonAlign(t *C) { 1143 s.testWriteFile(t, "testWriteFileNonAlign", 6*1024*1024, 128*1024+1) 1144 } 1145 1146 func (s *GoofysTest) TestReadRandom(t *C) { 1147 size := int64(21 * 1024 * 1024) 1148 1149 s.testWriteFile(t, "testLargeFile", size, 128*1024) 1150 in, err := s.LookUpInode(t, "testLargeFile") 1151 t.Assert(err, IsNil) 1152 1153 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 1154 t.Assert(err, IsNil) 1155 fr := &FileHandleReader{s.fs, fh, 0} 1156 1157 src := rand.NewSource(time.Now().UnixNano()) 1158 truth := &SeqReader{} 1159 1160 for i := 0; i < 10; i++ { 1161 offset := src.Int63() % (size / 2) 1162 1163 fr.Seek(offset, 0) 1164 truth.Seek(offset, 0) 1165 1166 // read 5MB+1 from that offset 1167 nread := int64(5*1024*1024 + 1) 1168 CompareReader(io.LimitReader(fr, nread), io.LimitReader(truth, nread), 0) 1169 } 1170 } 1171 1172 func (s *GoofysTest) TestMkDir(t *C) { 1173 _, err := s.LookUpInode(t, "new_dir/file") 1174 t.Assert(err, Equals, fuse.ENOENT) 1175 1176 dirName := "new_dir" 1177 inode, err := s.getRoot(t).MkDir(dirName) 1178 t.Assert(err, IsNil) 1179 t.Assert(*inode.FullName(), Equals, dirName) 1180 1181 _, err = s.LookUpInode(t, dirName) 1182 t.Assert(err, IsNil) 1183 1184 fileName := "file" 1185 _, fh := inode.Create(fileName, fuseops.OpMetadata{uint32(os.Getpid())}) 1186 1187 err = fh.FlushFile() 1188 t.Assert(err, IsNil) 1189 1190 _, err = s.LookUpInode(t, dirName+"/"+fileName) 1191 t.Assert(err, IsNil) 1192 } 1193 1194 func (s *GoofysTest) TestRmDir(t *C) { 1195 root := s.getRoot(t) 1196 1197 err := root.RmDir("dir1") 1198 t.Assert(err, Equals, fuse.ENOTEMPTY) 1199 1200 err = root.RmDir("dir2") 1201 t.Assert(err, Equals, fuse.ENOTEMPTY) 1202 1203 err = root.RmDir("empty_dir") 1204 t.Assert(err, IsNil) 1205 1206 } 1207 1208 func (s *GoofysTest) TestRenamePreserveMetadata(t *C) { 1209 if _, ok := s.cloud.(*ADLv1); ok { 1210 t.Skip("ADLv1 doesn't support metadata") 1211 } 1212 root := s.getRoot(t) 1213 1214 from, to := "file1", "new_file" 1215 1216 metadata := make(map[string]*string) 1217 metadata["foo"] = aws.String("bar") 1218 1219 _, err := s.cloud.CopyBlob(&CopyBlobInput{ 1220 Source: from, 1221 Destination: from, 1222 Metadata: metadata, 1223 }) 1224 t.Assert(err, IsNil) 1225 1226 err = root.Rename(from, root, to) 1227 t.Assert(err, IsNil) 1228 1229 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: to}) 1230 t.Assert(err, IsNil) 1231 t.Assert(resp.Metadata["foo"], NotNil) 1232 t.Assert(*resp.Metadata["foo"], Equals, "bar") 1233 } 1234 1235 func (s *GoofysTest) TestRenameLarge(t *C) { 1236 fileSize := int64(2 * 1024 * 1024 * 1024) 1237 // AWS S3 can timeout when renaming large file 1238 if _, ok := s.cloud.(*S3Backend); ok && s.emulator { 1239 // S3proxy runs out of memory on truly large files. We 1240 // want to use a large file to test timeout issues 1241 // which wouldn't happen on s3proxy anyway 1242 fileSize = 21 * 1024 * 1024 1243 } 1244 1245 s.testWriteFile(t, "large_file", fileSize, 128*1024) 1246 1247 root := s.getRoot(t) 1248 1249 from, to := "large_file", "large_file2" 1250 err := root.Rename(from, root, to) 1251 t.Assert(err, IsNil) 1252 } 1253 1254 func (s *GoofysTest) TestRenameToExisting(t *C) { 1255 root := s.getRoot(t) 1256 1257 // cache these 2 files first 1258 _, err := s.LookUpInode(t, "file1") 1259 t.Assert(err, IsNil) 1260 1261 _, err = s.LookUpInode(t, "file2") 1262 t.Assert(err, IsNil) 1263 1264 err = s.fs.Rename(nil, &fuseops.RenameOp{ 1265 OldParent: root.Id, 1266 NewParent: root.Id, 1267 OldName: "file1", 1268 NewName: "file2", 1269 }) 1270 t.Assert(err, IsNil) 1271 1272 file1 := root.findChild("file1") 1273 t.Assert(file1, IsNil) 1274 1275 file2 := root.findChild("file2") 1276 t.Assert(file2, NotNil) 1277 t.Assert(*file2.Name, Equals, "file2") 1278 } 1279 1280 func (s *GoofysTest) TestBackendListPagination(t *C) { 1281 if _, ok := s.cloud.(*ADLv1); ok { 1282 t.Skip("ADLv1 doesn't have pagination") 1283 } 1284 if s.azurite { 1285 // https://github.com/Azure/Azurite/issues/262 1286 t.Skip("Azurite doesn't support pagination") 1287 } 1288 1289 var itemsPerPage int 1290 switch s.cloud.Delegate().(type) { 1291 case *S3Backend, *GCS3: 1292 itemsPerPage = 1000 1293 case *AZBlob, *ADLv2: 1294 itemsPerPage = 5000 1295 default: 1296 t.Fatalf("unknown backend: %T", s.cloud) 1297 } 1298 1299 root := s.getRoot(t) 1300 root.dir.mountPrefix = "this_test/" 1301 1302 blobs := make(map[string]*string) 1303 expect := make([]string, 0) 1304 for i := 0; i < itemsPerPage+1; i++ { 1305 b := fmt.Sprintf("%08v", i) 1306 blobs["this_test/"+b] = nil 1307 expect = append(expect, b) 1308 } 1309 1310 switch s.cloud.(type) { 1311 case *ADLv1, *ADLv2: 1312 // these backends don't support parallel delete so I 1313 // am doing this here 1314 defer func() { 1315 var wg sync.WaitGroup 1316 1317 for b, _ := range blobs { 1318 SmallActionsGate.Take(1, true) 1319 wg.Add(1) 1320 1321 go func(key string) { 1322 // ignore the error here, 1323 // anything we didn't cleanup 1324 // will be handled by teardown 1325 _, _ = s.cloud.DeleteBlob(&DeleteBlobInput{key}) 1326 SmallActionsGate.Return(1) 1327 wg.Done() 1328 }(b) 1329 } 1330 1331 wg.Wait() 1332 }() 1333 } 1334 1335 s.setupBlobs(s.cloud, t, blobs) 1336 1337 dh := root.OpenDir() 1338 defer dh.CloseDir() 1339 1340 children := namesOf(s.readDirFully(t, dh)) 1341 t.Assert(children, DeepEquals, expect) 1342 } 1343 1344 func (s *GoofysTest) TestBackendListPrefix(t *C) { 1345 res, err := s.cloud.ListBlobs(&ListBlobsInput{ 1346 Prefix: PString("random"), 1347 Delimiter: PString("/"), 1348 }) 1349 t.Assert(err, IsNil) 1350 t.Assert(len(res.Prefixes), Equals, 0) 1351 t.Assert(len(res.Items), Equals, 0) 1352 1353 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1354 Prefix: PString("empty_dir"), 1355 Delimiter: PString("/"), 1356 }) 1357 t.Assert(err, IsNil) 1358 t.Assert(len(res.Prefixes), Not(Equals), 0) 1359 t.Assert(*res.Prefixes[0].Prefix, Equals, "empty_dir/") 1360 t.Assert(len(res.Items), Equals, 0) 1361 1362 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1363 Prefix: PString("empty_dir/"), 1364 Delimiter: PString("/"), 1365 }) 1366 t.Assert(err, IsNil) 1367 t.Assert(len(res.Prefixes), Equals, 0) 1368 t.Assert(len(res.Items), Equals, 1) 1369 t.Assert(*res.Items[0].Key, Equals, "empty_dir/") 1370 1371 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1372 Prefix: PString("file1"), 1373 Delimiter: PString("/"), 1374 }) 1375 t.Assert(err, IsNil) 1376 t.Assert(len(res.Prefixes), Equals, 0) 1377 t.Assert(len(res.Items), Equals, 1) 1378 t.Assert(*res.Items[0].Key, Equals, "file1") 1379 1380 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1381 Prefix: PString("file1/"), 1382 Delimiter: PString("/"), 1383 }) 1384 t.Assert(err, IsNil) 1385 t.Assert(len(res.Prefixes), Equals, 0) 1386 t.Assert(len(res.Items), Equals, 0) 1387 1388 // ListBlobs: 1389 // - Case1: If the prefix foo/ is not added explicitly, then ListBlobs foo/ might or might not return foo/. 1390 // In the test setup dir2 is not expliticly created. 1391 // - Case2: Else, ListBlobs foo/ must return foo/ 1392 // In the test setup dir2/dir3 is expliticly created. 1393 1394 // ListBlobs:Case1 1395 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1396 Prefix: PString("dir2/"), 1397 Delimiter: PString("/"), 1398 }) 1399 t.Assert(err, IsNil) 1400 t.Assert(len(res.Prefixes), Equals, 1) 1401 t.Assert(*res.Prefixes[0].Prefix, Equals, "dir2/dir3/") 1402 if len(res.Items) == 1 { 1403 // azblob(with hierarchial ns on), adlv1, adlv2. 1404 t.Assert(*res.Items[0].Key, Equals, "dir2/") 1405 } else { 1406 // s3, azblob(with hierarchial ns off) 1407 t.Assert(len(res.Items), Equals, 0) 1408 } 1409 1410 // ListBlobs:Case2 1411 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1412 Prefix: PString("dir2/dir3/"), 1413 Delimiter: PString("/"), 1414 }) 1415 t.Assert(err, IsNil) 1416 t.Assert(len(res.Prefixes), Equals, 0) 1417 t.Assert(len(res.Items), Equals, 2) 1418 t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/") 1419 t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4") 1420 1421 // ListBlobs:Case1 1422 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1423 Prefix: PString("dir2/"), 1424 }) 1425 t.Assert(err, IsNil) 1426 t.Assert(len(res.Prefixes), Equals, 0) 1427 if len(res.Items) == 3 { 1428 // azblob(with hierarchial ns on), adlv1, adlv2. 1429 t.Assert(*res.Items[0].Key, Equals, "dir2/") 1430 t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/") 1431 t.Assert(*res.Items[2].Key, Equals, "dir2/dir3/file4") 1432 } else { 1433 // s3, azblob(with hierarchial ns off) 1434 t.Assert(len(res.Items), Equals, 2) 1435 t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/") 1436 t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4") 1437 } 1438 1439 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1440 Prefix: PString("dir2/dir3/file4"), 1441 }) 1442 t.Assert(err, IsNil) 1443 t.Assert(len(res.Prefixes), Equals, 0) 1444 t.Assert(len(res.Items), Equals, 1) 1445 t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/file4") 1446 } 1447 1448 func (s *GoofysTest) TestRenameDir(t *C) { 1449 s.fs.flags.StatCacheTTL = 0 1450 1451 root := s.getRoot(t) 1452 1453 err := root.Rename("empty_dir", root, "dir1") 1454 t.Assert(err, Equals, fuse.ENOTEMPTY) 1455 1456 err = root.Rename("empty_dir", root, "new_dir") 1457 t.Assert(err, IsNil) 1458 1459 dir2, err := s.LookUpInode(t, "dir2") 1460 t.Assert(err, IsNil) 1461 t.Assert(dir2, NotNil) 1462 1463 _, err = s.LookUpInode(t, "new_dir2") 1464 t.Assert(err, Equals, fuse.ENOENT) 1465 1466 err = s.fs.Rename(nil, &fuseops.RenameOp{ 1467 OldParent: root.Id, 1468 NewParent: root.Id, 1469 OldName: "dir2", 1470 NewName: "new_dir2", 1471 }) 1472 t.Assert(err, IsNil) 1473 1474 _, err = s.LookUpInode(t, "dir2/dir3") 1475 t.Assert(err, Equals, fuse.ENOENT) 1476 1477 _, err = s.LookUpInode(t, "dir2/dir3/file4") 1478 t.Assert(err, Equals, fuse.ENOENT) 1479 1480 new_dir2, err := s.LookUpInode(t, "new_dir2") 1481 t.Assert(err, IsNil) 1482 t.Assert(new_dir2, NotNil) 1483 t.Assert(dir2.Id, Equals, new_dir2.Id) 1484 1485 old, err := s.LookUpInode(t, "new_dir2/dir3/file4") 1486 t.Assert(err, IsNil) 1487 t.Assert(old, NotNil) 1488 1489 err = s.fs.Rename(nil, &fuseops.RenameOp{ 1490 OldParent: root.Id, 1491 NewParent: root.Id, 1492 OldName: "new_dir2", 1493 NewName: "new_dir3", 1494 }) 1495 t.Assert(err, IsNil) 1496 1497 new, err := s.LookUpInode(t, "new_dir3/dir3/file4") 1498 t.Assert(err, IsNil) 1499 t.Assert(new, NotNil) 1500 t.Assert(old.Id, Equals, new.Id) 1501 1502 _, err = s.LookUpInode(t, "new_dir2/dir3") 1503 t.Assert(err, Equals, fuse.ENOENT) 1504 1505 _, err = s.LookUpInode(t, "new_dir2/dir3/file4") 1506 t.Assert(err, Equals, fuse.ENOENT) 1507 } 1508 1509 func (s *GoofysTest) TestRename(t *C) { 1510 root := s.getRoot(t) 1511 1512 from, to := "empty_dir", "file1" 1513 err := root.Rename(from, root, to) 1514 t.Assert(err, Equals, fuse.ENOTDIR) 1515 1516 from, to = "file1", "empty_dir" 1517 err = root.Rename(from, root, to) 1518 t.Assert(err, Equals, syscall.EISDIR) 1519 1520 from, to = "file1", "new_file" 1521 err = root.Rename(from, root, to) 1522 t.Assert(err, IsNil) 1523 1524 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to}) 1525 t.Assert(err, IsNil) 1526 1527 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from}) 1528 t.Assert(mapAwsError(err), Equals, fuse.ENOENT) 1529 1530 from, to = "file3", "new_file2" 1531 dir, _ := s.LookUpInode(t, "dir1") 1532 err = dir.Rename(from, root, to) 1533 t.Assert(err, IsNil) 1534 1535 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to}) 1536 t.Assert(err, IsNil) 1537 1538 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from}) 1539 t.Assert(mapAwsError(err), Equals, fuse.ENOENT) 1540 1541 from, to = "no_such_file", "new_file" 1542 err = root.Rename(from, root, to) 1543 t.Assert(err, Equals, fuse.ENOENT) 1544 1545 if s3, ok := s.cloud.Delegate().(*S3Backend); ok { 1546 if !hasEnv("GCS") { 1547 // not really rename but can be used by rename 1548 from, to = s.fs.bucket+"/file2", "new_file" 1549 _, err = s3.copyObjectMultipart(int64(len("file2")), from, to, "", nil, nil, nil) 1550 t.Assert(err, IsNil) 1551 } 1552 } 1553 } 1554 1555 func (s *GoofysTest) TestConcurrentRefDeref(t *C) { 1556 root := s.getRoot(t) 1557 1558 lookupOp := fuseops.LookUpInodeOp{ 1559 Parent: root.Id, 1560 Name: "file1", 1561 } 1562 1563 for i := 0; i < 20; i++ { 1564 err := s.fs.LookUpInode(nil, &lookupOp) 1565 t.Assert(err, IsNil) 1566 1567 var wg sync.WaitGroup 1568 1569 wg.Add(2) 1570 go func() { 1571 // we want to yield to the forget goroutine so that it's run first 1572 // to trigger this bug 1573 if i%2 == 0 { 1574 runtime.Gosched() 1575 } 1576 s.fs.LookUpInode(nil, &lookupOp) 1577 wg.Done() 1578 }() 1579 go func() { 1580 s.fs.ForgetInode(nil, &fuseops.ForgetInodeOp{ 1581 Inode: lookupOp.Entry.Child, 1582 N: 1, 1583 }) 1584 wg.Done() 1585 }() 1586 1587 wg.Wait() 1588 } 1589 } 1590 1591 func hasEnv(env string) bool { 1592 v := os.Getenv(env) 1593 1594 return !(v == "" || v == "0" || v == "false") 1595 } 1596 1597 func isTravis() bool { 1598 return hasEnv("TRAVIS") 1599 } 1600 1601 func isCatfs() bool { 1602 return hasEnv("CATFS") 1603 } 1604 1605 func (s *GoofysTest) mount(t *C, mountPoint string) { 1606 err := os.MkdirAll(mountPoint, 0700) 1607 t.Assert(err, IsNil) 1608 1609 server := fuseutil.NewFileSystemServer(s.fs) 1610 1611 if isCatfs() { 1612 s.fs.flags.MountOptions = make(map[string]string) 1613 s.fs.flags.MountOptions["allow_other"] = "" 1614 } 1615 1616 // Mount the file system. 1617 mountCfg := &fuse.MountConfig{ 1618 FSName: s.fs.bucket, 1619 Options: s.fs.flags.MountOptions, 1620 ErrorLogger: GetStdLogger(NewLogger("fuse"), logrus.ErrorLevel), 1621 DisableWritebackCaching: true, 1622 } 1623 mountCfg.DebugLogger = GetStdLogger(fuseLog, logrus.DebugLevel) 1624 1625 _, err = fuse.Mount(mountPoint, server, mountCfg) 1626 t.Assert(err, IsNil) 1627 1628 if isCatfs() { 1629 cacheDir := mountPoint + "-cache" 1630 err := os.MkdirAll(cacheDir, 0700) 1631 t.Assert(err, IsNil) 1632 1633 catfs := exec.Command("catfs", "--test", "-ononempty", "--", mountPoint, cacheDir, mountPoint) 1634 _, err = catfs.Output() 1635 if err != nil { 1636 if ee, ok := err.(*exec.ExitError); ok { 1637 panic(ee.Stderr) 1638 } 1639 } 1640 1641 catfs = exec.Command("catfs", "-ononempty", "--", mountPoint, cacheDir, mountPoint) 1642 1643 if isTravis() { 1644 logger := NewLogger("catfs") 1645 lvl := logrus.InfoLevel 1646 logger.Formatter.(*LogHandle).Lvl = &lvl 1647 w := logger.Writer() 1648 1649 catfs.Stdout = w 1650 catfs.Stderr = w 1651 1652 catfs.Env = append(catfs.Env, "RUST_LOG=debug") 1653 } 1654 1655 err = catfs.Start() 1656 t.Assert(err, IsNil) 1657 1658 time.Sleep(time.Second) 1659 } 1660 } 1661 1662 func (s *GoofysTest) umount(t *C, mountPoint string) { 1663 var err error 1664 for i := 0; i < 10; i++ { 1665 err = fuse.Unmount(mountPoint) 1666 if err != nil { 1667 time.Sleep(100 * time.Millisecond) 1668 } else { 1669 break 1670 } 1671 } 1672 t.Assert(err, IsNil) 1673 1674 os.Remove(mountPoint) 1675 if isCatfs() { 1676 cacheDir := mountPoint + "-cache" 1677 os.Remove(cacheDir) 1678 } 1679 } 1680 1681 func (s *GoofysTest) runFuseTest(t *C, mountPoint string, umount bool, cmdArgs ...string) { 1682 s.mount(t, mountPoint) 1683 1684 if umount { 1685 defer s.umount(t, mountPoint) 1686 } 1687 1688 // if command starts with ./ or ../ then we are executing a 1689 // relative path and cannot do chdir 1690 chdir := cmdArgs[0][0] != '.' 1691 1692 cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) 1693 cmd.Env = append(cmd.Env, os.Environ()...) 1694 cmd.Env = append(cmd.Env, "FAST=true") 1695 cmd.Env = append(cmd.Env, "CLEANUP=false") 1696 1697 if isTravis() { 1698 logger := NewLogger("test") 1699 lvl := logrus.InfoLevel 1700 logger.Formatter.(*LogHandle).Lvl = &lvl 1701 w := logger.Writer() 1702 1703 cmd.Stdout = w 1704 cmd.Stderr = w 1705 } 1706 1707 if chdir { 1708 oldCwd, err := os.Getwd() 1709 t.Assert(err, IsNil) 1710 1711 err = os.Chdir(mountPoint) 1712 t.Assert(err, IsNil) 1713 1714 defer os.Chdir(oldCwd) 1715 } 1716 1717 err := cmd.Run() 1718 t.Assert(err, IsNil) 1719 } 1720 1721 func (s *GoofysTest) TestFuse(t *C) { 1722 mountPoint := "/tmp/mnt" + s.fs.bucket 1723 1724 s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint) 1725 } 1726 1727 func (s *GoofysTest) TestFuseWithTTL(t *C) { 1728 s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000 1729 mountPoint := "/tmp/mnt" + s.fs.bucket 1730 1731 s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint) 1732 } 1733 1734 func (s *GoofysTest) TestCheap(t *C) { 1735 s.fs.flags.Cheap = true 1736 s.TestLookUpInode(t) 1737 s.TestWriteLargeFile(t) 1738 } 1739 1740 func (s *GoofysTest) TestExplicitDir(t *C) { 1741 s.fs.flags.ExplicitDir = true 1742 s.testExplicitDir(t) 1743 } 1744 1745 func (s *GoofysTest) TestExplicitDirAndCheap(t *C) { 1746 s.fs.flags.ExplicitDir = true 1747 s.fs.flags.Cheap = true 1748 s.testExplicitDir(t) 1749 } 1750 1751 func (s *GoofysTest) testExplicitDir(t *C) { 1752 if s.cloud.Capabilities().DirBlob { 1753 t.Skip("only for backends without dir blob") 1754 } 1755 1756 _, err := s.LookUpInode(t, "file1") 1757 t.Assert(err, IsNil) 1758 1759 _, err = s.LookUpInode(t, "fileNotFound") 1760 t.Assert(err, Equals, fuse.ENOENT) 1761 1762 // dir1/ doesn't exist so we shouldn't be able to see it 1763 _, err = s.LookUpInode(t, "dir1/file3") 1764 t.Assert(err, Equals, fuse.ENOENT) 1765 1766 _, err = s.LookUpInode(t, "dir4/file5") 1767 t.Assert(err, IsNil) 1768 1769 _, err = s.LookUpInode(t, "empty_dir") 1770 t.Assert(err, IsNil) 1771 } 1772 1773 func (s *GoofysTest) TestBenchLs(t *C) { 1774 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1775 s.fs.flags.StatCacheTTL = 1 * time.Minute 1776 mountPoint := "/tmp/mnt" + s.fs.bucket 1777 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "ls") 1778 } 1779 1780 func (s *GoofysTest) TestBenchCreate(t *C) { 1781 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1782 s.fs.flags.StatCacheTTL = 1 * time.Minute 1783 mountPoint := "/tmp/mnt" + s.fs.bucket 1784 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create") 1785 } 1786 1787 func (s *GoofysTest) TestBenchCreateParallel(t *C) { 1788 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1789 s.fs.flags.StatCacheTTL = 1 * time.Minute 1790 mountPoint := "/tmp/mnt" + s.fs.bucket 1791 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create_parallel") 1792 } 1793 1794 func (s *GoofysTest) TestBenchIO(t *C) { 1795 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1796 s.fs.flags.StatCacheTTL = 1 * time.Minute 1797 mountPoint := "/tmp/mnt" + s.fs.bucket 1798 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "io") 1799 } 1800 1801 func (s *GoofysTest) TestBenchFindTree(t *C) { 1802 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1803 s.fs.flags.StatCacheTTL = 1 * time.Minute 1804 mountPoint := "/tmp/mnt" + s.fs.bucket 1805 1806 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "find") 1807 } 1808 1809 func (s *GoofysTest) TestIssue231(t *C) { 1810 if isTravis() { 1811 t.Skip("disable in travis, not sure if it has enough memory") 1812 } 1813 mountPoint := "/tmp/mnt" + s.fs.bucket 1814 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue231") 1815 } 1816 1817 func (s *GoofysTest) TestChmod(t *C) { 1818 root := s.getRoot(t) 1819 1820 lookupOp := fuseops.LookUpInodeOp{ 1821 Parent: root.Id, 1822 Name: "file1", 1823 } 1824 1825 err := s.fs.LookUpInode(nil, &lookupOp) 1826 t.Assert(err, IsNil) 1827 1828 targetMode := os.FileMode(0777) 1829 setOp := fuseops.SetInodeAttributesOp{Inode: lookupOp.Entry.Child, Mode: &targetMode} 1830 1831 err = s.fs.SetInodeAttributes(s.ctx, &setOp) 1832 t.Assert(err, IsNil) 1833 t.Assert(setOp.Attributes, NotNil) 1834 } 1835 1836 func (s *GoofysTest) TestIssue64(t *C) { 1837 /* 1838 mountPoint := "/tmp/mnt" + s.fs.bucket 1839 log.Level = logrus.DebugLevel 1840 1841 err := os.MkdirAll(mountPoint, 0700) 1842 t.Assert(err, IsNil) 1843 1844 defer os.Remove(mountPoint) 1845 1846 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue64") 1847 */ 1848 } 1849 1850 func (s *GoofysTest) TestIssue69Fuse(t *C) { 1851 s.fs.flags.StatCacheTTL = 0 1852 1853 mountPoint := "/tmp/mnt" + s.fs.bucket 1854 1855 s.mount(t, mountPoint) 1856 1857 defer func() { 1858 err := os.Chdir("/") 1859 t.Assert(err, IsNil) 1860 1861 s.umount(t, mountPoint) 1862 }() 1863 1864 err := os.Chdir(mountPoint) 1865 t.Assert(err, IsNil) 1866 1867 _, err = os.Stat("dir1") 1868 t.Assert(err, IsNil) 1869 1870 err = os.Remove("dir1/file3") 1871 t.Assert(err, IsNil) 1872 1873 // don't really care about error code, but it should be a PathError 1874 os.Stat("dir1") 1875 os.Stat("dir1") 1876 } 1877 1878 func (s *GoofysTest) TestGetMimeType(t *C) { 1879 // option to use mime type not turned on 1880 mime := s.fs.flags.GetMimeType("foo.css") 1881 t.Assert(mime, IsNil) 1882 1883 s.fs.flags.UseContentType = true 1884 1885 mime = s.fs.flags.GetMimeType("foo.css") 1886 t.Assert(mime, NotNil) 1887 t.Assert(*mime, Equals, "text/css") 1888 1889 mime = s.fs.flags.GetMimeType("foo") 1890 t.Assert(mime, IsNil) 1891 1892 mime = s.fs.flags.GetMimeType("foo.") 1893 t.Assert(mime, IsNil) 1894 1895 mime = s.fs.flags.GetMimeType("foo.unknownExtension") 1896 t.Assert(mime, IsNil) 1897 } 1898 1899 func (s *GoofysTest) TestPutMimeType(t *C) { 1900 if _, ok := s.cloud.(*ADLv1); ok { 1901 // ADLv1 doesn't support content-type 1902 t.Skip("ADLv1 doesn't support content-type") 1903 } 1904 1905 s.fs.flags.UseContentType = true 1906 1907 root := s.getRoot(t) 1908 jpg := "test.jpg" 1909 jpg2 := "test2.jpg" 1910 file := "test" 1911 1912 s.testWriteFile(t, jpg, 10, 128) 1913 1914 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: jpg}) 1915 t.Assert(err, IsNil) 1916 t.Assert(*resp.ContentType, Equals, "image/jpeg") 1917 1918 err = root.Rename(jpg, root, file) 1919 t.Assert(err, IsNil) 1920 1921 resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: file}) 1922 t.Assert(err, IsNil) 1923 t.Assert(*resp.ContentType, Equals, "image/jpeg") 1924 1925 err = root.Rename(file, root, jpg2) 1926 t.Assert(err, IsNil) 1927 1928 resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: jpg2}) 1929 t.Assert(err, IsNil) 1930 t.Assert(*resp.ContentType, Equals, "image/jpeg") 1931 } 1932 1933 func (s *GoofysTest) TestBucketPrefixSlash(t *C) { 1934 s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2", s.fs.flags) 1935 t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/") 1936 1937 s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2///", s.fs.flags) 1938 t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/") 1939 } 1940 1941 func (s *GoofysTest) TestFuseWithPrefix(t *C) { 1942 mountPoint := "/tmp/mnt" + s.fs.bucket 1943 1944 s.fs = NewGoofys(context.Background(), s.fs.bucket+":testprefix", s.fs.flags) 1945 1946 s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint) 1947 } 1948 1949 func (s *GoofysTest) TestRenameCache(t *C) { 1950 root := s.getRoot(t) 1951 s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000 1952 1953 lookupOp1 := fuseops.LookUpInodeOp{ 1954 Parent: root.Id, 1955 Name: "file1", 1956 } 1957 1958 lookupOp2 := lookupOp1 1959 lookupOp2.Name = "newfile" 1960 1961 err := s.fs.LookUpInode(nil, &lookupOp1) 1962 t.Assert(err, IsNil) 1963 1964 err = s.fs.LookUpInode(nil, &lookupOp2) 1965 t.Assert(err, Equals, fuse.ENOENT) 1966 1967 renameOp := fuseops.RenameOp{ 1968 OldParent: root.Id, 1969 NewParent: root.Id, 1970 OldName: "file1", 1971 NewName: "newfile", 1972 } 1973 1974 err = s.fs.Rename(nil, &renameOp) 1975 t.Assert(err, IsNil) 1976 1977 lookupOp1.Entry = fuseops.ChildInodeEntry{} 1978 lookupOp2.Entry = fuseops.ChildInodeEntry{} 1979 1980 err = s.fs.LookUpInode(nil, &lookupOp1) 1981 t.Assert(err, Equals, fuse.ENOENT) 1982 1983 err = s.fs.LookUpInode(nil, &lookupOp2) 1984 t.Assert(err, IsNil) 1985 } 1986 1987 func (s *GoofysTest) anonymous(t *C) { 1988 // On azure this fails because we re-create the bucket with 1989 // the same name right away. And well anonymous access is not 1990 // implemented yet in our azure backend anyway 1991 var s3 *S3Backend 1992 var ok bool 1993 if s3, ok = s.cloud.Delegate().(*S3Backend); !ok { 1994 t.Skip("only for S3") 1995 } 1996 1997 err := s.deleteBucket(s.cloud) 1998 t.Assert(err, IsNil) 1999 2000 // use a different bucket name to prevent 409 Conflict from 2001 // delete bucket above 2002 s.fs.bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16) 2003 s3.bucket = s.fs.bucket 2004 s.setupDefaultEnv(t, true) 2005 2006 s.fs = NewGoofys(context.Background(), s.fs.bucket, s.fs.flags) 2007 t.Assert(s.fs, NotNil) 2008 2009 // should have auto-detected by S3 backend 2010 cloud := s.getRoot(t).dir.cloud 2011 t.Assert(cloud, NotNil) 2012 s3, ok = cloud.Delegate().(*S3Backend) 2013 t.Assert(ok, Equals, true) 2014 2015 s3.awsConfig.Credentials = credentials.AnonymousCredentials 2016 s3.newS3() 2017 } 2018 2019 func (s *GoofysTest) disableS3() { 2020 time.Sleep(1 * time.Second) // wait for any background goroutines to finish 2021 dir := s.fs.inodes[fuseops.RootInodeID].dir 2022 dir.cloud = StorageBackendInitError{ 2023 fmt.Errorf("cloud disabled"), 2024 *dir.cloud.Capabilities(), 2025 } 2026 } 2027 2028 func (s *GoofysTest) TestWriteAnonymous(t *C) { 2029 s.anonymous(t) 2030 s.fs.flags.StatCacheTTL = 1 * time.Minute 2031 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2032 2033 fileName := "test" 2034 2035 createOp := fuseops.CreateFileOp{ 2036 Parent: s.getRoot(t).Id, 2037 Name: fileName, 2038 } 2039 2040 err := s.fs.CreateFile(s.ctx, &createOp) 2041 t.Assert(err, IsNil) 2042 2043 err = s.fs.FlushFile(s.ctx, &fuseops.FlushFileOp{ 2044 Handle: createOp.Handle, 2045 Inode: createOp.Entry.Child, 2046 }) 2047 t.Assert(err, Equals, syscall.EACCES) 2048 2049 err = s.fs.ReleaseFileHandle(s.ctx, &fuseops.ReleaseFileHandleOp{Handle: createOp.Handle}) 2050 t.Assert(err, IsNil) 2051 2052 err = s.fs.LookUpInode(s.ctx, &fuseops.LookUpInodeOp{ 2053 Parent: s.getRoot(t).Id, 2054 Name: fileName, 2055 }) 2056 t.Assert(err, Equals, fuse.ENOENT) 2057 // BUG! the file shouldn't exist, see test below for comment, 2058 // this behaves as expected only because we are bypassing 2059 // linux vfs in this test 2060 } 2061 2062 func (s *GoofysTest) TestWriteAnonymousFuse(t *C) { 2063 s.anonymous(t) 2064 s.fs.flags.StatCacheTTL = 1 * time.Minute 2065 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2066 2067 mountPoint := "/tmp/mnt" + s.fs.bucket 2068 2069 s.mount(t, mountPoint) 2070 defer s.umount(t, mountPoint) 2071 2072 err := ioutil.WriteFile(mountPoint+"/test", []byte(""), 0600) 2073 t.Assert(err, NotNil) 2074 pathErr, ok := err.(*os.PathError) 2075 t.Assert(ok, Equals, true) 2076 t.Assert(pathErr.Err, Equals, syscall.EACCES) 2077 2078 _, err = os.Stat(mountPoint + "/test") 2079 t.Assert(err, IsNil) 2080 // BUG! the file shouldn't exist, the condition below should hold instead 2081 // see comment in Goofys.FlushFile 2082 // pathErr, ok = err.(*os.PathError) 2083 // t.Assert(ok, Equals, true) 2084 // t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2085 2086 _, err = ioutil.ReadFile(mountPoint + "/test") 2087 t.Assert(err, NotNil) 2088 pathErr, ok = err.(*os.PathError) 2089 t.Assert(ok, Equals, true) 2090 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2091 2092 // reading the file and getting ENOENT causes the kernel to 2093 // invalidate the entry, failing at open is not sufficient, we 2094 // have to fail at read (which means that if the application 2095 // uses splice(2) it won't get to us, so this wouldn't work 2096 _, err = os.Stat(mountPoint + "/test") 2097 t.Assert(err, NotNil) 2098 pathErr, ok = err.(*os.PathError) 2099 t.Assert(ok, Equals, true) 2100 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2101 } 2102 2103 func (s *GoofysTest) TestWriteSyncWriteFuse(t *C) { 2104 mountPoint := "/tmp/mnt" + s.fs.bucket 2105 2106 s.mount(t, mountPoint) 2107 defer s.umount(t, mountPoint) 2108 2109 var f *os.File 2110 var n int 2111 var err error 2112 2113 defer func() { 2114 if err != nil { 2115 f.Close() 2116 } 2117 }() 2118 2119 f, err = os.Create(mountPoint + "/TestWriteSyncWrite") 2120 t.Assert(err, IsNil) 2121 2122 n, err = f.Write([]byte("hello\n")) 2123 t.Assert(err, IsNil) 2124 t.Assert(n, Equals, 6) 2125 2126 err = f.Sync() 2127 t.Assert(err, IsNil) 2128 2129 n, err = f.Write([]byte("world\n")) 2130 t.Assert(err, IsNil) 2131 t.Assert(n, Equals, 6) 2132 2133 err = f.Close() 2134 t.Assert(err, IsNil) 2135 } 2136 2137 func (s *GoofysTest) TestIssue156(t *C) { 2138 _, err := s.LookUpInode(t, "\xae\x8a-") 2139 // S3Proxy and aws s3 return different errors 2140 // https://github.com/andrewgaul/s3proxy/issues/201 2141 t.Assert(err, NotNil) 2142 } 2143 2144 func (s *GoofysTest) TestIssue162(t *C) { 2145 if s.azurite { 2146 t.Skip("https://github.com/Azure/Azurite/issues/221") 2147 } 2148 2149 params := &PutBlobInput{ 2150 Key: "dir1/lör 006.jpg", 2151 Body: bytes.NewReader([]byte("foo")), 2152 Size: PUInt64(3), 2153 } 2154 _, err := s.cloud.PutBlob(params) 2155 t.Assert(err, IsNil) 2156 2157 dir, err := s.LookUpInode(t, "dir1") 2158 t.Assert(err, IsNil) 2159 2160 err = dir.Rename("lör 006.jpg", dir, "myfile.jpg") 2161 t.Assert(err, IsNil) 2162 2163 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "dir1/myfile.jpg"}) 2164 t.Assert(resp.Size, Equals, uint64(3)) 2165 } 2166 2167 func (s *GoofysTest) TestXAttrGet(t *C) { 2168 if _, ok := s.cloud.(*ADLv1); ok { 2169 t.Skip("ADLv1 doesn't support metadata") 2170 } 2171 2172 _, checkETag := s.cloud.Delegate().(*S3Backend) 2173 xattrPrefix := s.cloud.Capabilities().Name + "." 2174 2175 file1, err := s.LookUpInode(t, "file1") 2176 t.Assert(err, IsNil) 2177 2178 names, err := file1.ListXattr() 2179 t.Assert(err, IsNil) 2180 expectedXattrs := []string{ 2181 xattrPrefix + "etag", 2182 xattrPrefix + "storage-class", 2183 "user.name", 2184 } 2185 sort.Strings(expectedXattrs) 2186 t.Assert(names, DeepEquals, expectedXattrs) 2187 2188 _, err = file1.GetXattr("user.foobar") 2189 t.Assert(err, Equals, unix.ENODATA) 2190 2191 if checkETag { 2192 value, err := file1.GetXattr("s3.etag") 2193 t.Assert(err, IsNil) 2194 // md5sum of "file1" 2195 t.Assert(string(value), Equals, "\"826e8142e6baabe8af779f5f490cf5f5\"") 2196 } 2197 2198 value, err := file1.GetXattr("user.name") 2199 t.Assert(err, IsNil) 2200 t.Assert(string(value), Equals, "file1+/#\x00") 2201 2202 dir1, err := s.LookUpInode(t, "dir1") 2203 t.Assert(err, IsNil) 2204 2205 if !s.cloud.Capabilities().DirBlob { 2206 // implicit dir blobs don't have s3.etag at all 2207 names, err = dir1.ListXattr() 2208 t.Assert(err, IsNil) 2209 t.Assert(len(names), Equals, 0, Commentf("names: %v", names)) 2210 2211 value, err = dir1.GetXattr(xattrPrefix + "etag") 2212 t.Assert(err, Equals, syscall.ENODATA) 2213 } 2214 2215 // list dir1 to populate file3 in cache, then get file3's xattr 2216 lookup := fuseops.LookUpInodeOp{ 2217 Parent: fuseops.RootInodeID, 2218 Name: "dir1", 2219 } 2220 err = s.fs.LookUpInode(nil, &lookup) 2221 t.Assert(err, IsNil) 2222 2223 s.readDirIntoCache(t, lookup.Entry.Child) 2224 2225 dir1 = s.fs.inodes[lookup.Entry.Child] 2226 file3 := dir1.findChild("file3") 2227 t.Assert(file3, NotNil) 2228 t.Assert(file3.userMetadata, IsNil) 2229 2230 if checkETag { 2231 value, err = file3.GetXattr("s3.etag") 2232 t.Assert(err, IsNil) 2233 // md5sum of "dir1/file3" 2234 t.Assert(string(value), Equals, "\"5cd67e0e59fb85be91a515afe0f4bb24\"") 2235 } 2236 2237 // ensure that we get the dir blob instead of list 2238 s.fs.flags.Cheap = true 2239 2240 emptyDir2, err := s.LookUpInode(t, "empty_dir2") 2241 t.Assert(err, IsNil) 2242 2243 names, err = emptyDir2.ListXattr() 2244 t.Assert(err, IsNil) 2245 sort.Strings(names) 2246 t.Assert(names, DeepEquals, expectedXattrs) 2247 2248 emptyDir, err := s.LookUpInode(t, "empty_dir") 2249 t.Assert(err, IsNil) 2250 2251 if checkETag { 2252 value, err = emptyDir.GetXattr("s3.etag") 2253 t.Assert(err, IsNil) 2254 // dir blobs are empty 2255 t.Assert(string(value), Equals, "\"d41d8cd98f00b204e9800998ecf8427e\"") 2256 } 2257 2258 // s3proxy doesn't support storage class yet 2259 if hasEnv("AWS") { 2260 cloud := s.getRoot(t).dir.cloud 2261 s3, ok := cloud.Delegate().(*S3Backend) 2262 t.Assert(ok, Equals, true) 2263 s3.config.StorageClass = "STANDARD_IA" 2264 2265 s.testWriteFile(t, "ia", 1, 128*1024) 2266 2267 ia, err := s.LookUpInode(t, "ia") 2268 t.Assert(err, IsNil) 2269 2270 names, err = ia.ListXattr() 2271 t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"}) 2272 2273 value, err = ia.GetXattr("s3.storage-class") 2274 t.Assert(err, IsNil) 2275 // smaller than 128KB falls back to standard 2276 t.Assert(string(value), Equals, "STANDARD") 2277 2278 s.testWriteFile(t, "ia", 128*1024, 128*1024) 2279 time.Sleep(100 * time.Millisecond) 2280 2281 names, err = ia.ListXattr() 2282 t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"}) 2283 2284 value, err = ia.GetXattr("s3.storage-class") 2285 t.Assert(err, IsNil) 2286 t.Assert(string(value), Equals, "STANDARD_IA") 2287 } 2288 } 2289 2290 func (s *GoofysTest) TestClientForkExec(t *C) { 2291 mountPoint := "/tmp/mnt" + s.fs.bucket 2292 s.mount(t, mountPoint) 2293 defer s.umount(t, mountPoint) 2294 file := mountPoint + "/TestClientForkExec" 2295 2296 // Create new file. 2297 fh, err := os.OpenFile(file, os.O_CREATE|os.O_RDWR, 0600) 2298 t.Assert(err, IsNil) 2299 defer func() { // Defer close file if it's not already closed. 2300 if fh != nil { 2301 fh.Close() 2302 } 2303 }() 2304 // Write to file. 2305 _, err = fh.WriteString("1.1;") 2306 t.Assert(err, IsNil) 2307 // The `Command` is run via fork+exec. 2308 // So all the file descriptors are copied over to the child process. 2309 // The child process 'closes' the files before exiting. This should 2310 // not result in goofys failing file operations invoked from the test. 2311 someCmd := exec.Command("echo", "hello") 2312 err = someCmd.Run() 2313 t.Assert(err, IsNil) 2314 // One more write. 2315 _, err = fh.WriteString("1.2;") 2316 t.Assert(err, IsNil) 2317 // Close file. 2318 err = fh.Close() 2319 t.Assert(err, IsNil) 2320 fh = nil 2321 // Check file content. 2322 content, err := ioutil.ReadFile(file) 2323 t.Assert(err, IsNil) 2324 t.Assert(string(content), Equals, "1.1;1.2;") 2325 2326 // Repeat the same excercise, but now with an existing file. 2327 fh, err = os.OpenFile(file, os.O_RDWR, 0600) 2328 // Write to file. 2329 _, err = fh.WriteString("2.1;") 2330 // fork+exec. 2331 someCmd = exec.Command("echo", "hello") 2332 err = someCmd.Run() 2333 t.Assert(err, IsNil) 2334 // One more write. 2335 _, err = fh.WriteString("2.2;") 2336 t.Assert(err, IsNil) 2337 // Close file. 2338 err = fh.Close() 2339 t.Assert(err, IsNil) 2340 fh = nil 2341 // Verify that the file is updated as per the new write. 2342 content, err = ioutil.ReadFile(file) 2343 t.Assert(err, IsNil) 2344 t.Assert(string(content), Equals, "2.1;2.2;") 2345 } 2346 2347 func (s *GoofysTest) TestXAttrGetCached(t *C) { 2348 if _, ok := s.cloud.(*ADLv1); ok { 2349 t.Skip("ADLv1 doesn't support metadata") 2350 } 2351 2352 xattrPrefix := s.cloud.Capabilities().Name + "." 2353 2354 s.fs.flags.StatCacheTTL = 1 * time.Minute 2355 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2356 s.readDirIntoCache(t, fuseops.RootInodeID) 2357 s.disableS3() 2358 2359 in, err := s.LookUpInode(t, "file1") 2360 t.Assert(err, IsNil) 2361 t.Assert(in.userMetadata, IsNil) 2362 2363 _, err = in.GetXattr(xattrPrefix + "etag") 2364 t.Assert(err, IsNil) 2365 } 2366 2367 func (s *GoofysTest) TestXAttrCopied(t *C) { 2368 if _, ok := s.cloud.(*ADLv1); ok { 2369 t.Skip("ADLv1 doesn't support metadata") 2370 } 2371 2372 root := s.getRoot(t) 2373 2374 err := root.Rename("file1", root, "file0") 2375 t.Assert(err, IsNil) 2376 2377 in, err := s.LookUpInode(t, "file0") 2378 t.Assert(err, IsNil) 2379 2380 _, err = in.GetXattr("user.name") 2381 t.Assert(err, IsNil) 2382 } 2383 2384 func (s *GoofysTest) TestXAttrRemove(t *C) { 2385 if _, ok := s.cloud.(*ADLv1); ok { 2386 t.Skip("ADLv1 doesn't support metadata") 2387 } 2388 2389 in, err := s.LookUpInode(t, "file1") 2390 t.Assert(err, IsNil) 2391 2392 _, err = in.GetXattr("user.name") 2393 t.Assert(err, IsNil) 2394 2395 err = in.RemoveXattr("user.name") 2396 t.Assert(err, IsNil) 2397 2398 _, err = in.GetXattr("user.name") 2399 t.Assert(err, Equals, syscall.ENODATA) 2400 } 2401 2402 func (s *GoofysTest) TestXAttrFuse(t *C) { 2403 if _, ok := s.cloud.(*ADLv1); ok { 2404 t.Skip("ADLv1 doesn't support metadata") 2405 } 2406 2407 _, checkETag := s.cloud.Delegate().(*S3Backend) 2408 xattrPrefix := s.cloud.Capabilities().Name + "." 2409 2410 //fuseLog.Level = logrus.DebugLevel 2411 mountPoint := "/tmp/mnt" + s.fs.bucket 2412 s.mount(t, mountPoint) 2413 defer s.umount(t, mountPoint) 2414 2415 expectedXattrs := []string{ 2416 xattrPrefix + "etag", 2417 xattrPrefix + "storage-class", 2418 "user.name", 2419 } 2420 sort.Strings(expectedXattrs) 2421 2422 var expectedXattrsStr string 2423 for _, x := range expectedXattrs { 2424 expectedXattrsStr += x + "\x00" 2425 } 2426 var buf [1024]byte 2427 2428 // error if size is too small (but not zero) 2429 _, err := unix.Listxattr(mountPoint+"/file1", buf[:1]) 2430 t.Assert(err, Equals, unix.ERANGE) 2431 2432 // 0 len buffer means interogate the size of buffer 2433 nbytes, err := unix.Listxattr(mountPoint+"/file1", nil) 2434 t.Assert(err, Equals, nil) 2435 t.Assert(nbytes, Equals, len(expectedXattrsStr)) 2436 2437 nbytes, err = unix.Listxattr(mountPoint+"/file1", buf[:nbytes]) 2438 t.Assert(err, IsNil) 2439 t.Assert(nbytes, Equals, len(expectedXattrsStr)) 2440 t.Assert(string(buf[:nbytes]), Equals, expectedXattrsStr) 2441 2442 _, err = unix.Getxattr(mountPoint+"/file1", "user.name", buf[:1]) 2443 t.Assert(err, Equals, unix.ERANGE) 2444 2445 nbytes, err = unix.Getxattr(mountPoint+"/file1", "user.name", nil) 2446 t.Assert(err, IsNil) 2447 t.Assert(nbytes, Equals, 9) 2448 2449 nbytes, err = unix.Getxattr(mountPoint+"/file1", "user.name", buf[:nbytes]) 2450 t.Assert(err, IsNil) 2451 t.Assert(nbytes, Equals, 9) 2452 t.Assert(string(buf[:nbytes]), Equals, "file1+/#\x00") 2453 2454 if !s.cloud.Capabilities().DirBlob { 2455 // dir1 has no xattrs 2456 nbytes, err = unix.Listxattr(mountPoint+"/dir1", nil) 2457 t.Assert(err, IsNil) 2458 t.Assert(nbytes, Equals, 0) 2459 2460 nbytes, err = unix.Listxattr(mountPoint+"/dir1", buf[:1]) 2461 t.Assert(err, IsNil) 2462 t.Assert(nbytes, Equals, 0) 2463 } 2464 2465 if checkETag { 2466 _, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", buf[:1]) 2467 t.Assert(err, Equals, unix.ERANGE) 2468 2469 nbytes, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", nil) 2470 t.Assert(err, IsNil) 2471 // 32 bytes md5 plus quotes 2472 t.Assert(nbytes, Equals, 34) 2473 2474 nbytes, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", buf[:nbytes]) 2475 t.Assert(err, IsNil) 2476 t.Assert(nbytes, Equals, 34) 2477 t.Assert(string(buf[:nbytes]), Equals, 2478 "\"826e8142e6baabe8af779f5f490cf5f5\"") 2479 } 2480 } 2481 2482 func (s *GoofysTest) TestXAttrSet(t *C) { 2483 if _, ok := s.cloud.(*ADLv1); ok { 2484 t.Skip("ADLv1 doesn't support metadata") 2485 } 2486 2487 in, err := s.LookUpInode(t, "file1") 2488 t.Assert(err, IsNil) 2489 2490 err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_REPLACE) 2491 t.Assert(err, Equals, syscall.ENODATA) 2492 2493 err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_CREATE) 2494 t.Assert(err, IsNil) 2495 2496 err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_CREATE) 2497 t.Assert(err, Equals, syscall.EEXIST) 2498 2499 in, err = s.LookUpInode(t, "file1") 2500 t.Assert(err, IsNil) 2501 2502 value, err := in.GetXattr("user.bar") 2503 t.Assert(err, IsNil) 2504 t.Assert(string(value), Equals, "hello") 2505 2506 value = []byte("file1+%/#\x00") 2507 2508 err = in.SetXattr("user.bar", value, unix.XATTR_REPLACE) 2509 t.Assert(err, IsNil) 2510 2511 in, err = s.LookUpInode(t, "file1") 2512 t.Assert(err, IsNil) 2513 2514 value2, err := in.GetXattr("user.bar") 2515 t.Assert(err, IsNil) 2516 t.Assert(value2, DeepEquals, value) 2517 2518 // setting with flag = 0 always works 2519 err = in.SetXattr("user.bar", []byte("world"), 0) 2520 t.Assert(err, IsNil) 2521 2522 err = in.SetXattr("user.baz", []byte("world"), 0) 2523 t.Assert(err, IsNil) 2524 2525 value, err = in.GetXattr("user.bar") 2526 t.Assert(err, IsNil) 2527 2528 value2, err = in.GetXattr("user.baz") 2529 t.Assert(err, IsNil) 2530 2531 t.Assert(value2, DeepEquals, value) 2532 t.Assert(string(value2), DeepEquals, "world") 2533 2534 err = in.SetXattr("s3.bar", []byte("hello"), unix.XATTR_CREATE) 2535 t.Assert(err, Equals, syscall.EPERM) 2536 } 2537 2538 func (s *GoofysTest) TestPythonCopyTree(t *C) { 2539 mountPoint := "/tmp/mnt" + s.fs.bucket 2540 2541 s.runFuseTest(t, mountPoint, true, "python", "-c", 2542 "import shutil; shutil.copytree('dir2', 'dir5')", 2543 mountPoint) 2544 } 2545 2546 func (s *GoofysTest) TestCreateRenameBeforeCloseFuse(t *C) { 2547 if s.azurite { 2548 // Azurite returns 400 when copy source doesn't exist 2549 // https://github.com/Azure/Azurite/issues/219 2550 // so our code to ignore ENOENT fails 2551 t.Skip("https://github.com/Azure/Azurite/issues/219") 2552 } 2553 2554 mountPoint := "/tmp/mnt" + s.fs.bucket 2555 2556 s.mount(t, mountPoint) 2557 defer s.umount(t, mountPoint) 2558 2559 from := mountPoint + "/newfile" 2560 to := mountPoint + "/newfile2" 2561 2562 fh, err := os.Create(from) 2563 t.Assert(err, IsNil) 2564 defer func() { 2565 // close the file if the test failed so we can unmount 2566 if fh != nil { 2567 fh.Close() 2568 } 2569 }() 2570 2571 _, err = fh.WriteString("hello world") 2572 t.Assert(err, IsNil) 2573 2574 err = os.Rename(from, to) 2575 t.Assert(err, IsNil) 2576 2577 err = fh.Close() 2578 t.Assert(err, IsNil) 2579 fh = nil 2580 2581 _, err = os.Stat(from) 2582 t.Assert(err, NotNil) 2583 pathErr, ok := err.(*os.PathError) 2584 t.Assert(ok, Equals, true) 2585 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2586 2587 content, err := ioutil.ReadFile(to) 2588 t.Assert(err, IsNil) 2589 t.Assert(string(content), Equals, "hello world") 2590 } 2591 2592 func (s *GoofysTest) TestRenameBeforeCloseFuse(t *C) { 2593 mountPoint := "/tmp/mnt" + s.fs.bucket 2594 2595 s.mount(t, mountPoint) 2596 defer s.umount(t, mountPoint) 2597 2598 from := mountPoint + "/newfile" 2599 to := mountPoint + "/newfile2" 2600 2601 err := ioutil.WriteFile(from, []byte(""), 0600) 2602 t.Assert(err, IsNil) 2603 2604 fh, err := os.OpenFile(from, os.O_WRONLY, 0600) 2605 t.Assert(err, IsNil) 2606 defer func() { 2607 // close the file if the test failed so we can unmount 2608 if fh != nil { 2609 fh.Close() 2610 } 2611 }() 2612 2613 _, err = fh.WriteString("hello world") 2614 t.Assert(err, IsNil) 2615 2616 err = os.Rename(from, to) 2617 t.Assert(err, IsNil) 2618 2619 err = fh.Close() 2620 t.Assert(err, IsNil) 2621 fh = nil 2622 2623 _, err = os.Stat(from) 2624 t.Assert(err, NotNil) 2625 pathErr, ok := err.(*os.PathError) 2626 t.Assert(ok, Equals, true) 2627 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2628 2629 content, err := ioutil.ReadFile(to) 2630 t.Assert(err, IsNil) 2631 t.Assert(string(content), Equals, "hello world") 2632 } 2633 2634 func (s *GoofysTest) TestInodeInsert(t *C) { 2635 root := s.getRoot(t) 2636 2637 in := NewInode(s.fs, root, aws.String("2")) 2638 in.Attributes = InodeAttributes{} 2639 root.insertChild(in) 2640 t.Assert(*root.dir.Children[2].Name, Equals, "2") 2641 2642 in = NewInode(s.fs, root, aws.String("1")) 2643 in.Attributes = InodeAttributes{} 2644 root.insertChild(in) 2645 t.Assert(*root.dir.Children[2].Name, Equals, "1") 2646 t.Assert(*root.dir.Children[3].Name, Equals, "2") 2647 2648 in = NewInode(s.fs, root, aws.String("4")) 2649 in.Attributes = InodeAttributes{} 2650 root.insertChild(in) 2651 t.Assert(*root.dir.Children[2].Name, Equals, "1") 2652 t.Assert(*root.dir.Children[3].Name, Equals, "2") 2653 t.Assert(*root.dir.Children[4].Name, Equals, "4") 2654 2655 inode := root.findChild("1") 2656 t.Assert(inode, NotNil) 2657 t.Assert(*inode.Name, Equals, "1") 2658 2659 inode = root.findChild("2") 2660 t.Assert(inode, NotNil) 2661 t.Assert(*inode.Name, Equals, "2") 2662 2663 inode = root.findChild("4") 2664 t.Assert(inode, NotNil) 2665 t.Assert(*inode.Name, Equals, "4") 2666 2667 inode = root.findChild("0") 2668 t.Assert(inode, IsNil) 2669 2670 inode = root.findChild("3") 2671 t.Assert(inode, IsNil) 2672 2673 root.removeChild(root.dir.Children[3]) 2674 root.removeChild(root.dir.Children[2]) 2675 root.removeChild(root.dir.Children[2]) 2676 t.Assert(len(root.dir.Children), Equals, 2) 2677 } 2678 2679 func (s *GoofysTest) TestReadDirSlurpHeuristic(t *C) { 2680 if _, ok := s.cloud.Delegate().(*S3Backend); !ok { 2681 t.Skip("only for S3") 2682 } 2683 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2684 2685 s.setupBlobs(s.cloud, t, map[string]*string{"dir2isafile": nil}) 2686 2687 root := s.getRoot(t).dir 2688 t.Assert(root.seqOpenDirScore, Equals, uint8(0)) 2689 s.assertEntries(t, s.getRoot(t), []string{ 2690 "dir1", "dir2", "dir2isafile", "dir4", "empty_dir", 2691 "empty_dir2", "file1", "file2", "zero"}) 2692 2693 dir1, err := s.LookUpInode(t, "dir1") 2694 t.Assert(err, IsNil) 2695 dh1 := dir1.OpenDir() 2696 defer dh1.CloseDir() 2697 score := root.seqOpenDirScore 2698 2699 dir2, err := s.LookUpInode(t, "dir2") 2700 t.Assert(err, IsNil) 2701 dh2 := dir2.OpenDir() 2702 defer dh2.CloseDir() 2703 t.Assert(root.seqOpenDirScore, Equals, score+1) 2704 2705 dir3, err := s.LookUpInode(t, "dir4") 2706 t.Assert(err, IsNil) 2707 dh3 := dir3.OpenDir() 2708 defer dh3.CloseDir() 2709 t.Assert(root.seqOpenDirScore, Equals, score+2) 2710 } 2711 2712 func (s *GoofysTest) TestReadDirSlurpSubtree(t *C) { 2713 if _, ok := s.cloud.Delegate().(*S3Backend); !ok { 2714 t.Skip("only for S3") 2715 } 2716 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2717 s.fs.flags.StatCacheTTL = 1 * time.Minute 2718 2719 s.getRoot(t).dir.seqOpenDirScore = 2 2720 in, err := s.LookUpInode(t, "dir2") 2721 t.Assert(err, IsNil) 2722 t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(2)) 2723 2724 s.readDirIntoCache(t, in.Id) 2725 // should have incremented the score 2726 t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(3)) 2727 2728 // reading dir2 should cause dir2/dir3 to have cached readdir 2729 s.disableS3() 2730 2731 in, err = s.LookUpInode(t, "dir2/dir3") 2732 t.Assert(err, IsNil) 2733 2734 s.assertEntries(t, in, []string{"file4"}) 2735 } 2736 2737 func (s *GoofysTest) TestReadDirCached(t *C) { 2738 s.fs.flags.StatCacheTTL = 1 * time.Minute 2739 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2740 2741 s.getRoot(t).dir.seqOpenDirScore = 2 2742 s.readDirIntoCache(t, fuseops.RootInodeID) 2743 s.disableS3() 2744 2745 dh := s.getRoot(t).OpenDir() 2746 2747 entries := s.readDirFully(t, dh) 2748 dirs := make([]string, 0) 2749 files := make([]string, 0) 2750 noMoreDir := false 2751 2752 for _, en := range entries { 2753 if en.Type == fuseutil.DT_Directory { 2754 t.Assert(noMoreDir, Equals, false) 2755 dirs = append(dirs, en.Name) 2756 } else { 2757 files = append(files, en.Name) 2758 noMoreDir = true 2759 } 2760 } 2761 2762 t.Assert(dirs, DeepEquals, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2"}) 2763 t.Assert(files, DeepEquals, []string{"file1", "file2", "zero"}) 2764 } 2765 2766 func (s *GoofysTest) TestReadDirLookUp(t *C) { 2767 s.getRoot(t).dir.seqOpenDirScore = 2 2768 2769 var wg sync.WaitGroup 2770 for i := 0; i < 10; i++ { 2771 wg.Add(2) 2772 go func() { 2773 defer wg.Done() 2774 s.readDirIntoCache(t, fuseops.RootInodeID) 2775 }() 2776 go func() { 2777 defer wg.Done() 2778 2779 lookup := fuseops.LookUpInodeOp{ 2780 Parent: fuseops.RootInodeID, 2781 Name: "file1", 2782 } 2783 err := s.fs.LookUpInode(nil, &lookup) 2784 t.Assert(err, IsNil) 2785 }() 2786 } 2787 wg.Wait() 2788 } 2789 2790 func (s *GoofysTest) writeSeekWriteFuse(t *C, file string, fh *os.File, first string, second string, third string) { 2791 fi, err := os.Stat(file) 2792 t.Assert(err, IsNil) 2793 2794 defer func() { 2795 // close the file if the test failed so we can unmount 2796 if fh != nil { 2797 fh.Close() 2798 } 2799 }() 2800 2801 _, err = fh.WriteString(first) 2802 t.Assert(err, IsNil) 2803 2804 off, err := fh.Seek(int64(len(second)), 1) 2805 t.Assert(err, IsNil) 2806 t.Assert(off, Equals, int64(len(first)+len(second))) 2807 2808 _, err = fh.WriteString(third) 2809 t.Assert(err, IsNil) 2810 2811 off, err = fh.Seek(int64(len(first)), 0) 2812 t.Assert(err, IsNil) 2813 t.Assert(off, Equals, int64(len(first))) 2814 2815 _, err = fh.WriteString(second) 2816 t.Assert(err, IsNil) 2817 2818 err = fh.Close() 2819 t.Assert(err, IsNil) 2820 fh = nil 2821 2822 content, err := ioutil.ReadFile(file) 2823 t.Assert(err, IsNil) 2824 t.Assert(string(content), Equals, first+second+third) 2825 2826 fi2, err := os.Stat(file) 2827 t.Assert(err, IsNil) 2828 t.Assert(fi.Mode(), Equals, fi2.Mode()) 2829 } 2830 2831 func (s *GoofysTest) TestWriteSeekWriteFuse(t *C) { 2832 if !isCatfs() { 2833 t.Skip("only works with CATFS=true") 2834 } 2835 2836 mountPoint := "/tmp/mnt" + s.fs.bucket 2837 s.mount(t, mountPoint) 2838 defer s.umount(t, mountPoint) 2839 2840 file := mountPoint + "/newfile" 2841 2842 fh, err := os.Create(file) 2843 t.Assert(err, IsNil) 2844 2845 s.writeSeekWriteFuse(t, file, fh, "hello", " ", "world") 2846 2847 fh, err = os.OpenFile(file, os.O_WRONLY, 0600) 2848 t.Assert(err, IsNil) 2849 2850 s.writeSeekWriteFuse(t, file, fh, "", "never", "minding") 2851 } 2852 2853 func (s *GoofysTest) TestDirMtimeCreate(t *C) { 2854 root := s.getRoot(t) 2855 2856 attr, _ := root.GetAttributes() 2857 m1 := attr.Mtime 2858 time.Sleep(time.Second) 2859 2860 _, _ = root.Create("foo", fuseops.OpMetadata{uint32(os.Getpid())}) 2861 attr2, _ := root.GetAttributes() 2862 m2 := attr2.Mtime 2863 2864 t.Assert(m1.Before(m2), Equals, true) 2865 } 2866 2867 func (s *GoofysTest) TestDirMtimeLs(t *C) { 2868 root := s.getRoot(t) 2869 2870 attr, _ := root.GetAttributes() 2871 m1 := attr.Mtime 2872 time.Sleep(3 * time.Second) 2873 2874 params := &PutBlobInput{ 2875 Key: "newfile", 2876 Body: bytes.NewReader([]byte("foo")), 2877 Size: PUInt64(3), 2878 } 2879 _, err := s.cloud.PutBlob(params) 2880 t.Assert(err, IsNil) 2881 2882 s.readDirIntoCache(t, fuseops.RootInodeID) 2883 2884 attr2, _ := root.GetAttributes() 2885 m2 := attr2.Mtime 2886 2887 t.Assert(m1.Before(m2), Equals, true) 2888 } 2889 2890 func (s *GoofysTest) TestRenameOverwrite(t *C) { 2891 mountPoint := "/tmp/mnt" + s.fs.bucket 2892 s.mount(t, mountPoint) 2893 defer s.umount(t, mountPoint) 2894 2895 file := mountPoint + "/newfile" 2896 rename := mountPoint + "/file1" 2897 2898 fh, err := os.Create(file) 2899 t.Assert(err, IsNil) 2900 2901 err = fh.Close() 2902 t.Assert(err, IsNil) 2903 2904 err = os.Rename(file, rename) 2905 t.Assert(err, IsNil) 2906 } 2907 2908 func (s *GoofysTest) TestRead403(t *C) { 2909 // anonymous only works in S3 for now 2910 cloud := s.getRoot(t).dir.cloud 2911 s3, ok := cloud.Delegate().(*S3Backend) 2912 if !ok { 2913 t.Skip("only for S3") 2914 } 2915 2916 s.fs.flags.StatCacheTTL = 1 * time.Minute 2917 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2918 2919 // cache the inode first so we don't get 403 when we lookup 2920 in, err := s.LookUpInode(t, "file1") 2921 t.Assert(err, IsNil) 2922 2923 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 2924 t.Assert(err, IsNil) 2925 2926 s3.awsConfig.Credentials = credentials.AnonymousCredentials 2927 s3.newS3() 2928 2929 // fake enable read-ahead 2930 fh.seqReadAmount = uint64(READAHEAD_CHUNK) 2931 2932 buf := make([]byte, 5) 2933 2934 _, err = fh.ReadFile(0, buf) 2935 t.Assert(err, Equals, syscall.EACCES) 2936 2937 // now that the S3 GET has failed, try again, see 2938 // https://github.com/kahing/goofys/pull/243 2939 _, err = fh.ReadFile(0, buf) 2940 t.Assert(err, Equals, syscall.EACCES) 2941 } 2942 2943 func (s *GoofysTest) TestRmdirWithDiropen(t *C) { 2944 mountPoint := "/tmp/mnt" + s.fs.bucket 2945 s.fs.flags.StatCacheTTL = 1 * time.Minute 2946 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2947 2948 s.mount(t, mountPoint) 2949 defer s.umount(t, mountPoint) 2950 2951 err := os.MkdirAll(mountPoint+"/dir2/dir4", 0700) 2952 t.Assert(err, IsNil) 2953 err = os.MkdirAll(mountPoint+"/dir2/dir5", 0700) 2954 t.Assert(err, IsNil) 2955 2956 //1, open dir5 2957 dir := mountPoint + "/dir2/dir5" 2958 fh, err := os.Open(dir) 2959 t.Assert(err, IsNil) 2960 defer fh.Close() 2961 2962 cmd1 := exec.Command("ls", mountPoint+"/dir2") 2963 //out, err := cmd.Output() 2964 out1, err1 := cmd1.Output() 2965 if err1 != nil { 2966 if ee, ok := err.(*exec.ExitError); ok { 2967 panic(ee.Stderr) 2968 } 2969 } 2970 t.Assert(string(out1), DeepEquals, ""+"dir3\n"+"dir4\n"+"dir5\n") 2971 2972 //2, rm -rf dir5 2973 cmd := exec.Command("rm", "-rf", dir) 2974 _, err = cmd.Output() 2975 if err != nil { 2976 if ee, ok := err.(*exec.ExitError); ok { 2977 panic(ee.Stderr) 2978 } 2979 } 2980 2981 //3, readdir dir2 2982 fh1, err := os.Open(mountPoint + "/dir2") 2983 t.Assert(err, IsNil) 2984 defer func() { 2985 // close the file if the test failed so we can unmount 2986 if fh1 != nil { 2987 fh1.Close() 2988 } 2989 }() 2990 2991 names, err := fh1.Readdirnames(0) 2992 t.Assert(err, IsNil) 2993 t.Assert(names, DeepEquals, []string{"dir3", "dir4"}) 2994 2995 cmd = exec.Command("ls", mountPoint+"/dir2") 2996 out, err := cmd.Output() 2997 if err != nil { 2998 if ee, ok := err.(*exec.ExitError); ok { 2999 panic(ee.Stderr) 3000 } 3001 } 3002 3003 t.Assert(string(out), DeepEquals, ""+"dir3\n"+"dir4\n") 3004 3005 err = fh1.Close() 3006 t.Assert(err, IsNil) 3007 3008 // 4,reset env 3009 err = fh.Close() 3010 t.Assert(err, IsNil) 3011 3012 err = os.RemoveAll(mountPoint + "/dir2/dir4") 3013 t.Assert(err, IsNil) 3014 3015 } 3016 3017 func (s *GoofysTest) TestDirMTime(t *C) { 3018 s.fs.flags.StatCacheTTL = 1 * time.Minute 3019 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3020 // enable cheap to ensure GET dir/ will come back before LIST dir/ 3021 s.fs.flags.Cheap = true 3022 3023 root := s.getRoot(t) 3024 t.Assert(time.Time{}.Before(root.Attributes.Mtime), Equals, true) 3025 3026 file1, err := s.LookUpInode(t, "dir1") 3027 t.Assert(err, IsNil) 3028 3029 // take mtime from a blob as init time because when we test against 3030 // real cloud, server time can be way off from local time 3031 initTime := file1.Attributes.Mtime 3032 3033 dir1, err := s.LookUpInode(t, "dir1") 3034 t.Assert(err, IsNil) 3035 3036 attr1, _ := dir1.GetAttributes() 3037 m1 := attr1.Mtime 3038 if !s.cloud.Capabilities().DirBlob { 3039 // dir1 doesn't have a dir blob, so should take root's mtime 3040 t.Assert(m1, Equals, root.Attributes.Mtime) 3041 } 3042 3043 time.Sleep(2 * time.Second) 3044 3045 dir2, err := dir1.MkDir("dir2") 3046 t.Assert(err, IsNil) 3047 3048 attr2, _ := dir2.GetAttributes() 3049 m2 := attr2.Mtime 3050 t.Assert(m1.Add(2*time.Second).Before(m2), Equals, true) 3051 3052 // dir1 didn't have an explicit mtime, so it should update now 3053 // that we did a mkdir inside it 3054 attr1, _ = dir1.GetAttributes() 3055 m1 = attr1.Mtime 3056 t.Assert(m1, Equals, m2) 3057 3058 // we never added the inode so this will do the lookup again 3059 dir2, err = dir1.LookUp("dir2") 3060 t.Assert(err, IsNil) 3061 3062 // the new time comes from S3 which only has seconds 3063 // granularity 3064 attr2, _ = dir2.GetAttributes() 3065 t.Assert(m2, Not(Equals), attr2.Mtime) 3066 t.Assert(initTime.Add(time.Second).Before(attr2.Mtime), Equals, true) 3067 3068 // different dir2 3069 dir2, err = s.LookUpInode(t, "dir2") 3070 t.Assert(err, IsNil) 3071 3072 attr2, _ = dir2.GetAttributes() 3073 m2 = attr2.Mtime 3074 3075 // this fails because we are listing dir/, which means we 3076 // don't actually see the dir blob dir2/dir3/ (it's returned 3077 // as common prefix), so we can't get dir3's mtime 3078 if false { 3079 // dir2/dir3/ exists and has mtime 3080 s.readDirIntoCache(t, dir2.Id) 3081 dir3, err := s.LookUpInode(t, "dir2/dir3") 3082 t.Assert(err, IsNil) 3083 3084 attr3, _ := dir3.GetAttributes() 3085 // setupDefaultEnv is before mounting 3086 t.Assert(attr3.Mtime.Before(m2), Equals, true) 3087 } 3088 3089 time.Sleep(time.Second) 3090 3091 params := &PutBlobInput{ 3092 Key: "dir2/newfile", 3093 Body: bytes.NewReader([]byte("foo")), 3094 Size: PUInt64(3), 3095 } 3096 _, err = s.cloud.PutBlob(params) 3097 t.Assert(err, IsNil) 3098 3099 s.readDirIntoCache(t, dir2.Id) 3100 3101 newfile, err := dir2.LookUp("newfile") 3102 t.Assert(err, IsNil) 3103 3104 attr2New, _ := dir2.GetAttributes() 3105 // mtime should reflect that of the latest object 3106 // GCS can return nano second resolution so truncate to second for compare 3107 t.Assert(attr2New.Mtime.Unix(), Equals, newfile.Attributes.Mtime.Unix()) 3108 t.Assert(m2.Before(attr2New.Mtime), Equals, true) 3109 } 3110 3111 func (s *GoofysTest) TestDirMTimeNoTTL(t *C) { 3112 if s.cloud.Capabilities().DirBlob { 3113 t.Skip("Tests for behavior without dir blob") 3114 } 3115 // enable cheap to ensure GET dir/ will come back before LIST dir/ 3116 s.fs.flags.Cheap = true 3117 3118 dir2, err := s.LookUpInode(t, "dir2") 3119 t.Assert(err, IsNil) 3120 3121 attr2, _ := dir2.GetAttributes() 3122 m2 := attr2.Mtime 3123 3124 // dir2/dir3/ exists and has mtime 3125 s.readDirIntoCache(t, dir2.Id) 3126 dir3, err := s.LookUpInode(t, "dir2/dir3") 3127 t.Assert(err, IsNil) 3128 3129 attr3, _ := dir3.GetAttributes() 3130 // setupDefaultEnv is before mounting but we can't really 3131 // compare the time here since dir3 is s3 server time and dir2 3132 // is local time 3133 t.Assert(attr3.Mtime, Not(Equals), m2) 3134 } 3135 3136 func (s *GoofysTest) TestIssue326(t *C) { 3137 root := s.getRoot(t) 3138 _, err := root.MkDir("folder@name.something") 3139 t.Assert(err, IsNil) 3140 _, err = root.MkDir("folder#1#") 3141 t.Assert(err, IsNil) 3142 3143 s.readDirIntoCache(t, root.Id) 3144 s.assertEntries(t, root, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", 3145 "file1", "file2", "folder#1#", "folder@name.something", "zero"}) 3146 } 3147 3148 func (s *GoofysTest) TestSlurpFileAndDir(t *C) { 3149 if _, ok := s.cloud.Delegate().(*S3Backend); !ok { 3150 t.Skip("only for S3") 3151 } 3152 prefix := "TestSlurpFileAndDir/" 3153 // fileAndDir is both a file and a directory, and we are 3154 // slurping them together as part of our listing optimization 3155 blobs := []string{ 3156 prefix + "fileAndDir", 3157 prefix + "fileAndDir/a", 3158 } 3159 3160 for _, b := range blobs { 3161 params := &PutBlobInput{ 3162 Key: b, 3163 Body: bytes.NewReader([]byte("foo")), 3164 Size: PUInt64(3), 3165 } 3166 _, err := s.cloud.PutBlob(params) 3167 t.Assert(err, IsNil) 3168 } 3169 3170 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3171 s.fs.flags.StatCacheTTL = 1 * time.Minute 3172 3173 in, err := s.LookUpInode(t, prefix[0:len(prefix)-1]) 3174 t.Assert(err, IsNil) 3175 t.Assert(in.dir, NotNil) 3176 3177 s.getRoot(t).dir.seqOpenDirScore = 2 3178 s.readDirIntoCache(t, in.Id) 3179 3180 // should have slurped these 3181 in = in.findChild("fileAndDir") 3182 t.Assert(in, NotNil) 3183 t.Assert(in.dir, NotNil) 3184 3185 in = in.findChild("a") 3186 t.Assert(in, NotNil) 3187 3188 // because of slurping we've decided that this is a directory, 3189 // lookup must _not_ talk to S3 again because otherwise we may 3190 // decide it's a file again because of S3 race 3191 s.disableS3() 3192 in, err = s.LookUpInode(t, prefix+"fileAndDir") 3193 t.Assert(err, IsNil) 3194 3195 s.assertEntries(t, in, []string{"a"}) 3196 } 3197 3198 func (s *GoofysTest) TestAzureDirBlob(t *C) { 3199 if _, ok := s.cloud.(*AZBlob); !ok { 3200 t.Skip("only for Azure blob") 3201 } 3202 3203 fakedir := []string{"dir2", "dir3"} 3204 3205 for _, d := range fakedir { 3206 params := &PutBlobInput{ 3207 Key: "azuredir/" + d, 3208 Body: bytes.NewReader([]byte("")), 3209 Metadata: map[string]*string{ 3210 AzureDirBlobMetadataKey: PString("true"), 3211 }, 3212 Size: PUInt64(0), 3213 } 3214 _, err := s.cloud.PutBlob(params) 3215 t.Assert(err, IsNil) 3216 } 3217 3218 defer func() { 3219 // because our listing changes dir3 to dir3/, test 3220 // cleanup could not delete the blob so we wneed to 3221 // clean up 3222 for _, d := range fakedir { 3223 _, err := s.cloud.DeleteBlob(&DeleteBlobInput{Key: "azuredir/" + d}) 3224 t.Assert(err, IsNil) 3225 } 3226 }() 3227 3228 s.setupBlobs(s.cloud, t, map[string]*string{ 3229 // "azuredir/dir" would have gone here 3230 "azuredir/dir3,/": nil, 3231 "azuredir/dir3/file1": nil, 3232 "azuredir/dir345_is_a_file": nil, 3233 }) 3234 3235 head, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir3"}) 3236 t.Assert(err, IsNil) 3237 t.Assert(head.IsDirBlob, Equals, true) 3238 3239 head, err = s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir345_is_a_file"}) 3240 t.Assert(err, IsNil) 3241 t.Assert(head.IsDirBlob, Equals, false) 3242 3243 list, err := s.cloud.ListBlobs(&ListBlobsInput{Prefix: PString("azuredir/")}) 3244 t.Assert(err, IsNil) 3245 3246 // for flat listing, we rename `dir3` to `dir3/` and add it to Items, 3247 // `dir3` normally sorts before `dir3./`, but after the rename `dir3/` should 3248 // sort after `dir3./` 3249 t.Assert(len(list.Items), Equals, 5) 3250 t.Assert(*list.Items[0].Key, Equals, "azuredir/dir2/") 3251 t.Assert(*list.Items[1].Key, Equals, "azuredir/dir3,/") 3252 t.Assert(*list.Items[2].Key, Equals, "azuredir/dir3/") 3253 t.Assert(*list.Items[3].Key, Equals, "azuredir/dir3/file1") 3254 t.Assert(*list.Items[4].Key, Equals, "azuredir/dir345_is_a_file") 3255 t.Assert(sort.IsSorted(sortBlobItemOutput(list.Items)), Equals, true) 3256 3257 list, err = s.cloud.ListBlobs(&ListBlobsInput{ 3258 Prefix: PString("azuredir/"), 3259 Delimiter: PString("/"), 3260 }) 3261 t.Assert(err, IsNil) 3262 3263 // for delimited listing, we remove `dir3` from items and add `dir3/` to prefixes, 3264 // which should already be there 3265 t.Assert(len(list.Items), Equals, 1) 3266 t.Assert(*list.Items[0].Key, Equals, "azuredir/dir345_is_a_file") 3267 3268 t.Assert(len(list.Prefixes), Equals, 3) 3269 t.Assert(*list.Prefixes[0].Prefix, Equals, "azuredir/dir2/") 3270 t.Assert(*list.Prefixes[1].Prefix, Equals, "azuredir/dir3,/") 3271 t.Assert(*list.Prefixes[2].Prefix, Equals, "azuredir/dir3/") 3272 3273 // finally check that we are reading them in correctly 3274 in, err := s.LookUpInode(t, "azuredir") 3275 t.Assert(err, IsNil) 3276 3277 s.assertEntries(t, in, []string{"dir2", "dir3", "dir3,", "dir345_is_a_file"}) 3278 } 3279 3280 func (s *GoofysTest) TestReadDirLarge(t *C) { 3281 root := s.getRoot(t) 3282 root.dir.mountPrefix = "empty_dir" 3283 3284 blobs := make(map[string]*string) 3285 expect := make([]string, 0) 3286 for i := 0; i < 998; i++ { 3287 blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil 3288 expect = append(expect, fmt.Sprintf("%04vd", i)) 3289 } 3290 blobs["empty_dir/0998f"] = nil 3291 blobs["empty_dir/0999f"] = nil 3292 blobs["empty_dir/1000f"] = nil 3293 expect = append(expect, "0998f") 3294 expect = append(expect, "0999f") 3295 expect = append(expect, "1000f") 3296 3297 for i := 1001; i < 1003; i++ { 3298 blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil 3299 expect = append(expect, fmt.Sprintf("%04vd", i)) 3300 } 3301 3302 s.setupBlobs(s.cloud, t, blobs) 3303 3304 dh := root.OpenDir() 3305 defer dh.CloseDir() 3306 3307 children := namesOf(s.readDirFully(t, dh)) 3308 sort.Strings(children) 3309 3310 t.Assert(children, DeepEquals, expect) 3311 } 3312 3313 func (s *GoofysTest) newBackend(t *C, bucket string, createBucket bool) (cloud StorageBackend) { 3314 var err error 3315 switch s.cloud.Delegate().(type) { 3316 case *S3Backend: 3317 config, _ := s.fs.flags.Backend.(*S3Config) 3318 s3, err := NewS3(bucket, s.fs.flags, config) 3319 t.Assert(err, IsNil) 3320 3321 s3.aws = hasEnv("AWS") 3322 3323 if s.emulator { 3324 s3.Handlers.Sign.Clear() 3325 s3.Handlers.Sign.PushBack(SignV2) 3326 s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) 3327 } 3328 3329 if s3.aws { 3330 cloud = NewS3BucketEventualConsistency(s3) 3331 } else { 3332 cloud = s3 3333 } 3334 case *GCS3: 3335 config, _ := s.fs.flags.Backend.(*S3Config) 3336 cloud, err = NewGCS3(bucket, s.fs.flags, config) 3337 t.Assert(err, IsNil) 3338 case *AZBlob: 3339 config, _ := s.fs.flags.Backend.(*AZBlobConfig) 3340 cloud, err = NewAZBlob(bucket, config) 3341 t.Assert(err, IsNil) 3342 case *ADLv1: 3343 config, _ := s.fs.flags.Backend.(*ADLv1Config) 3344 cloud, err = NewADLv1(bucket, s.fs.flags, config) 3345 t.Assert(err, IsNil) 3346 case *ADLv2: 3347 config, _ := s.fs.flags.Backend.(*ADLv2Config) 3348 cloud, err = NewADLv2(bucket, s.fs.flags, config) 3349 t.Assert(err, IsNil) 3350 default: 3351 t.Fatal("unknown backend") 3352 } 3353 3354 if createBucket { 3355 _, err = cloud.MakeBucket(&MakeBucketInput{}) 3356 t.Assert(err, IsNil) 3357 3358 s.removeBucket = append(s.removeBucket, cloud) 3359 } 3360 3361 return 3362 } 3363 3364 func (s *GoofysTest) TestVFS(t *C) { 3365 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3366 cloud2 := s.newBackend(t, bucket, true) 3367 3368 // "mount" this 2nd cloud 3369 in, err := s.LookUpInode(t, "dir4") 3370 t.Assert(in, NotNil) 3371 t.Assert(err, IsNil) 3372 3373 in.dir.cloud = cloud2 3374 in.dir.mountPrefix = "cloud2Prefix/" 3375 3376 rootCloud, rootPath := in.cloud() 3377 t.Assert(rootCloud, NotNil) 3378 t.Assert(rootCloud == cloud2, Equals, true) 3379 t.Assert(rootPath, Equals, "cloud2Prefix") 3380 3381 // the mount would shadow dir4/file5 3382 _, err = in.LookUp("file5") 3383 t.Assert(err, Equals, fuse.ENOENT) 3384 3385 _, fh := in.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())}) 3386 err = fh.FlushFile() 3387 t.Assert(err, IsNil) 3388 3389 resp, err := cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile"}) 3390 t.Assert(err, IsNil) 3391 defer resp.Body.Close() 3392 3393 err = s.getRoot(t).Rename("file1", in, "file2") 3394 t.Assert(err, Equals, syscall.EINVAL) 3395 3396 _, err = in.MkDir("subdir") 3397 t.Assert(err, IsNil) 3398 3399 subdirKey := "cloud2Prefix/subdir" 3400 if !cloud2.Capabilities().DirBlob { 3401 subdirKey += "/" 3402 } 3403 3404 _, err = cloud2.HeadBlob(&HeadBlobInput{Key: subdirKey}) 3405 t.Assert(err, IsNil) 3406 3407 subdir, err := s.LookUpInode(t, "dir4/subdir") 3408 t.Assert(err, IsNil) 3409 t.Assert(subdir, NotNil) 3410 t.Assert(subdir.dir, NotNil) 3411 t.Assert(subdir.dir.cloud, IsNil) 3412 3413 subdirCloud, subdirPath := subdir.cloud() 3414 t.Assert(subdirCloud, NotNil) 3415 t.Assert(subdirCloud == cloud2, Equals, true) 3416 t.Assert(subdirPath, Equals, "cloud2Prefix/subdir") 3417 3418 // create another file inside subdir to make sure that our 3419 // mount check is correct for dir inside the root 3420 _, fh = subdir.Create("testfile2", fuseops.OpMetadata{uint32(os.Getpid())}) 3421 err = fh.FlushFile() 3422 t.Assert(err, IsNil) 3423 3424 resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"}) 3425 t.Assert(err, IsNil) 3426 defer resp.Body.Close() 3427 3428 err = subdir.Rename("testfile2", in, "testfile2") 3429 t.Assert(err, IsNil) 3430 3431 _, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"}) 3432 t.Assert(err, Equals, fuse.ENOENT) 3433 3434 resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"}) 3435 t.Assert(err, IsNil) 3436 defer resp.Body.Close() 3437 3438 err = in.Rename("testfile2", subdir, "testfile2") 3439 t.Assert(err, IsNil) 3440 3441 _, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"}) 3442 t.Assert(err, Equals, fuse.ENOENT) 3443 3444 resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"}) 3445 t.Assert(err, IsNil) 3446 defer resp.Body.Close() 3447 } 3448 3449 func (s *GoofysTest) TestMountsList(t *C) { 3450 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3451 s.fs.flags.StatCacheTTL = 1 * time.Minute 3452 3453 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3454 cloud := s.newBackend(t, bucket, true) 3455 3456 root := s.getRoot(t) 3457 rootCloud := root.dir.cloud 3458 3459 s.fs.MountAll([]*Mount{ 3460 &Mount{"dir4/cloud1", cloud, "", false}, 3461 }) 3462 3463 in, err := s.LookUpInode(t, "dir4") 3464 t.Assert(in, NotNil) 3465 t.Assert(err, IsNil) 3466 t.Assert(int(in.Id), Equals, 2) 3467 3468 s.readDirIntoCache(t, in.Id) 3469 // ensure that listing is listing mounts and root bucket in one go 3470 root.dir.cloud = nil 3471 3472 s.assertEntries(t, in, []string{"cloud1", "file5"}) 3473 3474 c1, err := s.LookUpInode(t, "dir4/cloud1") 3475 t.Assert(err, IsNil) 3476 t.Assert(*c1.Name, Equals, "cloud1") 3477 t.Assert(c1.dir.cloud == cloud, Equals, true) 3478 t.Assert(int(c1.Id), Equals, 3) 3479 3480 // pretend we've passed the normal cache ttl 3481 s.fs.flags.TypeCacheTTL = 0 3482 s.fs.flags.StatCacheTTL = 0 3483 3484 // listing root again should not overwrite the mounts 3485 root.dir.cloud = rootCloud 3486 3487 s.readDirIntoCache(t, in.Parent.Id) 3488 s.assertEntries(t, in, []string{"cloud1", "file5"}) 3489 3490 c1, err = s.LookUpInode(t, "dir4/cloud1") 3491 t.Assert(err, IsNil) 3492 t.Assert(*c1.Name, Equals, "cloud1") 3493 t.Assert(c1.dir.cloud == cloud, Equals, true) 3494 t.Assert(int(c1.Id), Equals, 3) 3495 } 3496 3497 func (s *GoofysTest) TestMountsNewDir(t *C) { 3498 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3499 cloud := s.newBackend(t, bucket, true) 3500 3501 _, err := s.LookUpInode(t, "dir5") 3502 t.Assert(err, NotNil) 3503 t.Assert(err, Equals, fuse.ENOENT) 3504 3505 s.fs.MountAll([]*Mount{ 3506 &Mount{"dir5/cloud1", cloud, "", false}, 3507 }) 3508 3509 in, err := s.LookUpInode(t, "dir5") 3510 t.Assert(err, IsNil) 3511 t.Assert(in.isDir(), Equals, true) 3512 3513 c1, err := s.LookUpInode(t, "dir5/cloud1") 3514 t.Assert(err, IsNil) 3515 t.Assert(c1.isDir(), Equals, true) 3516 t.Assert(c1.dir.cloud, Equals, cloud) 3517 } 3518 3519 func (s *GoofysTest) TestMountsNewMounts(t *C) { 3520 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3521 cloud := s.newBackend(t, bucket, true) 3522 3523 // "mount" this 2nd cloud 3524 in, err := s.LookUpInode(t, "dir4") 3525 t.Assert(in, NotNil) 3526 t.Assert(err, IsNil) 3527 3528 s.fs.MountAll([]*Mount{ 3529 &Mount{"dir4/cloud1", cloud, "", false}, 3530 }) 3531 3532 s.readDirIntoCache(t, in.Id) 3533 3534 c1, err := s.LookUpInode(t, "dir4/cloud1") 3535 t.Assert(err, IsNil) 3536 t.Assert(*c1.Name, Equals, "cloud1") 3537 t.Assert(c1.dir.cloud == cloud, Equals, true) 3538 3539 _, err = s.LookUpInode(t, "dir4/cloud2") 3540 t.Assert(err, Equals, fuse.ENOENT) 3541 3542 s.fs.MountAll([]*Mount{ 3543 &Mount{"dir4/cloud1", cloud, "", false}, 3544 &Mount{"dir4/cloud2", cloud, "cloudprefix", false}, 3545 }) 3546 3547 c2, err := s.LookUpInode(t, "dir4/cloud2") 3548 t.Assert(err, IsNil) 3549 t.Assert(*c2.Name, Equals, "cloud2") 3550 t.Assert(c2.dir.cloud == cloud, Equals, true) 3551 t.Assert(c2.dir.mountPrefix, Equals, "cloudprefix") 3552 } 3553 3554 func (s *GoofysTest) TestMountsError(t *C) { 3555 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3556 var cloud StorageBackend 3557 if s3, ok := s.cloud.Delegate().(*S3Backend); ok { 3558 // S3Backend can't detect bucket doesn't exist because 3559 // HEAD an object always return 404 NotFound (instead 3560 // of NoSuchBucket) 3561 flags := *s3.flags 3562 config := *s3.config 3563 flags.Endpoint = "0.0.0.0:0" 3564 var err error 3565 cloud, err = NewS3(bucket, &flags, &config) 3566 t.Assert(err, IsNil) 3567 } else if _, ok := s.cloud.(*ADLv1); ok { 3568 config, _ := s.fs.flags.Backend.(*ADLv1Config) 3569 config.Authorizer = nil 3570 3571 var err error 3572 cloud, err = NewADLv1(bucket, s.fs.flags, config) 3573 t.Assert(err, IsNil) 3574 } else if _, ok := s.cloud.(*ADLv2); ok { 3575 // ADLv2 currently doesn't detect bucket doesn't exist 3576 cloud = s.newBackend(t, bucket, false) 3577 adlCloud, _ := cloud.(*ADLv2) 3578 auth := adlCloud.client.BaseClient.Authorizer 3579 adlCloud.client.BaseClient.Authorizer = nil 3580 defer func() { 3581 adlCloud.client.BaseClient.Authorizer = auth 3582 }() 3583 } else { 3584 cloud = s.newBackend(t, bucket, false) 3585 } 3586 3587 s.fs.MountAll([]*Mount{ 3588 &Mount{"dir4/newerror", StorageBackendInitError{ 3589 fmt.Errorf("foo"), 3590 Capabilities{}, 3591 }, "errprefix1", false}, 3592 &Mount{"dir4/initerror", &StorageBackendInitWrapper{ 3593 StorageBackend: cloud, 3594 initKey: "foobar", 3595 }, "errprefix2", false}, 3596 }) 3597 3598 errfile, err := s.LookUpInode(t, "dir4/newerror/"+INIT_ERR_BLOB) 3599 t.Assert(err, IsNil) 3600 t.Assert(errfile.isDir(), Equals, false) 3601 3602 _, err = s.LookUpInode(t, "dir4/newerror/not_there") 3603 t.Assert(err, Equals, fuse.ENOENT) 3604 3605 errfile, err = s.LookUpInode(t, "dir4/initerror/"+INIT_ERR_BLOB) 3606 t.Assert(err, IsNil) 3607 t.Assert(errfile.isDir(), Equals, false) 3608 3609 _, err = s.LookUpInode(t, "dir4/initerror/not_there") 3610 t.Assert(err, Equals, fuse.ENOENT) 3611 3612 in, err := s.LookUpInode(t, "dir4/initerror") 3613 t.Assert(err, IsNil) 3614 t.Assert(in, NotNil) 3615 3616 t.Assert(in.dir.cloud.Capabilities().Name, Equals, cloud.Capabilities().Name) 3617 } 3618 3619 func (s *GoofysTest) TestMountsMultiLevel(t *C) { 3620 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3621 3622 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3623 cloud := s.newBackend(t, bucket, true) 3624 3625 s.fs.MountAll([]*Mount{ 3626 &Mount{"dir4/sub/dir", cloud, "", false}, 3627 }) 3628 3629 sub, err := s.LookUpInode(t, "dir4/sub") 3630 t.Assert(err, IsNil) 3631 t.Assert(sub.isDir(), Equals, true) 3632 3633 s.assertEntries(t, sub, []string{"dir"}) 3634 } 3635 3636 func (s *GoofysTest) TestMountsNested(t *C) { 3637 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3638 cloud := s.newBackend(t, bucket, true) 3639 s.testMountsNested(t, cloud, []*Mount{ 3640 &Mount{"dir5/in/a/dir", cloud, "a/dir/", false}, 3641 &Mount{"dir5/in/", cloud, "b/", false}, 3642 }) 3643 } 3644 3645 // test that mount order doesn't matter for nested mounts 3646 func (s *GoofysTest) TestMountsNestedReversed(t *C) { 3647 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3648 cloud := s.newBackend(t, bucket, true) 3649 s.testMountsNested(t, cloud, []*Mount{ 3650 &Mount{"dir5/in/", cloud, "b/", false}, 3651 &Mount{"dir5/in/a/dir", cloud, "a/dir/", false}, 3652 }) 3653 } 3654 3655 func (s *GoofysTest) testMountsNested(t *C, cloud StorageBackend, 3656 mounts []*Mount) { 3657 3658 _, err := s.LookUpInode(t, "dir5") 3659 t.Assert(err, NotNil) 3660 t.Assert(err, Equals, fuse.ENOENT) 3661 3662 s.fs.MountAll(mounts) 3663 3664 in, err := s.LookUpInode(t, "dir5") 3665 t.Assert(err, IsNil) 3666 3667 s.readDirIntoCache(t, in.Id) 3668 3669 // make sure all the intermediate dirs never expire 3670 time.Sleep(time.Second) 3671 dir_in, err := s.LookUpInode(t, "dir5/in") 3672 t.Assert(err, IsNil) 3673 t.Assert(*dir_in.Name, Equals, "in") 3674 3675 s.readDirIntoCache(t, dir_in.Id) 3676 3677 dir_a, err := s.LookUpInode(t, "dir5/in/a") 3678 t.Assert(err, IsNil) 3679 t.Assert(*dir_a.Name, Equals, "a") 3680 3681 s.assertEntries(t, dir_a, []string{"dir"}) 3682 3683 dir_dir, err := s.LookUpInode(t, "dir5/in/a/dir") 3684 t.Assert(err, IsNil) 3685 t.Assert(*dir_dir.Name, Equals, "dir") 3686 t.Assert(dir_dir.dir.cloud == cloud, Equals, true) 3687 3688 _, fh := dir_in.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())}) 3689 err = fh.FlushFile() 3690 t.Assert(err, IsNil) 3691 3692 resp, err := cloud.GetBlob(&GetBlobInput{Key: "b/testfile"}) 3693 t.Assert(err, IsNil) 3694 defer resp.Body.Close() 3695 3696 _, fh = dir_dir.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())}) 3697 err = fh.FlushFile() 3698 t.Assert(err, IsNil) 3699 3700 resp, err = cloud.GetBlob(&GetBlobInput{Key: "a/dir/testfile"}) 3701 t.Assert(err, IsNil) 3702 defer resp.Body.Close() 3703 3704 s.assertEntries(t, in, []string{"in"}) 3705 } 3706 3707 func verifyFileData(t *C, mountPoint string, path string, content *string) { 3708 if !strings.HasSuffix(mountPoint, "/") { 3709 mountPoint = mountPoint + "/" 3710 } 3711 path = mountPoint + path 3712 data, err := ioutil.ReadFile(path) 3713 comment := Commentf("failed while verifying %v", path) 3714 if content != nil { 3715 t.Assert(err, IsNil, comment) 3716 t.Assert(strings.TrimSpace(string(data)), Equals, *content, comment) 3717 } else { 3718 t.Assert(err, Not(IsNil), comment) 3719 t.Assert(strings.Contains(err.Error(), "no such file or directory"), Equals, true, comment) 3720 } 3721 } 3722 3723 func (s *GoofysTest) TestNestedMountUnmountSimple(t *C) { 3724 childBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3725 childCloud := s.newBackend(t, childBucket, true) 3726 3727 parFileContent := "parent" 3728 childFileContent := "child" 3729 parEnv := map[string]*string{ 3730 "childmnt/x/in_child_and_par": &parFileContent, 3731 "childmnt/x/in_par_only": &parFileContent, 3732 "nonchildmnt/something": &parFileContent, 3733 } 3734 childEnv := map[string]*string{ 3735 "x/in_child_only": &childFileContent, 3736 "x/in_child_and_par": &childFileContent, 3737 } 3738 s.setupBlobs(s.cloud, t, parEnv) 3739 s.setupBlobs(childCloud, t, childEnv) 3740 3741 rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16) 3742 s.mount(t, rootMountPath) 3743 defer s.umount(t, rootMountPath) 3744 // Files under /tmp/fusetesting/ should all be from goofys root. 3745 verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent) 3746 verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent) 3747 verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent) 3748 verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil) 3749 3750 childMount := &Mount{"childmnt", childCloud, "", false} 3751 s.fs.Mount(childMount) 3752 // Now files under /tmp/fusetesting/childmnt should be from childBucket 3753 verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", nil) 3754 verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &childFileContent) 3755 verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", &childFileContent) 3756 // /tmp/fusetesting/nonchildmnt should be from parent bucket. 3757 verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent) 3758 3759 s.fs.Unmount(childMount.name) 3760 // Child is unmounted. So files under /tmp/fusetesting/ should all be from goofys root. 3761 verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent) 3762 verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent) 3763 verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent) 3764 verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil) 3765 } 3766 3767 func (s *GoofysTest) TestUnmountBucketWithChild(t *C) { 3768 // This bucket will be mounted at ${goofysroot}/c 3769 cBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3770 cCloud := s.newBackend(t, cBucket, true) 3771 3772 // This bucket will be mounted at ${goofysroot}/c/c 3773 ccBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3774 ccCloud := s.newBackend(t, ccBucket, true) 3775 3776 pFileContent := "parent" 3777 cFileContent := "child" 3778 ccFileContent := "childchild" 3779 pEnv := map[string]*string{ 3780 "c/c/x/foo": &pFileContent, 3781 } 3782 cEnv := map[string]*string{ 3783 "c/x/foo": &cFileContent, 3784 } 3785 ccEnv := map[string]*string{ 3786 "x/foo": &ccFileContent, 3787 } 3788 3789 s.setupBlobs(s.cloud, t, pEnv) 3790 s.setupBlobs(cCloud, t, cEnv) 3791 s.setupBlobs(ccCloud, t, ccEnv) 3792 3793 rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16) 3794 s.mount(t, rootMountPath) 3795 defer s.umount(t, rootMountPath) 3796 // c/c/foo should come from root mount. 3797 verifyFileData(t, rootMountPath, "c/c/x/foo", &pFileContent) 3798 3799 cMount := &Mount{"c", cCloud, "", false} 3800 s.fs.Mount(cMount) 3801 // c/c/foo should come from "c" mount. 3802 verifyFileData(t, rootMountPath, "c/c/x/foo", &cFileContent) 3803 3804 ccMount := &Mount{"c/c", ccCloud, "", false} 3805 s.fs.Mount(ccMount) 3806 // c/c/foo should come from "c/c" mount. 3807 verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent) 3808 3809 s.fs.Unmount(cMount.name) 3810 // c/c/foo should still come from "c/c" mount. 3811 verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent) 3812 } 3813 3814 func (s *GoofysTest) TestRmImplicitDir(t *C) { 3815 mountPoint := "/tmp/mnt" + s.fs.bucket 3816 3817 s.mount(t, mountPoint) 3818 defer s.umount(t, mountPoint) 3819 3820 defer os.Chdir("/") 3821 3822 dir, err := os.Open(mountPoint + "/dir2") 3823 t.Assert(err, IsNil) 3824 defer dir.Close() 3825 3826 err = dir.Chdir() 3827 t.Assert(err, IsNil) 3828 3829 err = os.RemoveAll(mountPoint + "/dir2") 3830 t.Assert(err, IsNil) 3831 3832 root, err := os.Open(mountPoint) 3833 t.Assert(err, IsNil) 3834 defer root.Close() 3835 3836 files, err := root.Readdirnames(0) 3837 t.Assert(err, IsNil) 3838 t.Assert(files, DeepEquals, []string{ 3839 "dir1", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero", 3840 }) 3841 } 3842 3843 func (s *GoofysTest) TestMount(t *C) { 3844 if os.Getenv("MOUNT") == "false" { 3845 t.Skip("Not mounting") 3846 } 3847 3848 mountPoint := "/tmp/mnt" + s.fs.bucket 3849 3850 s.mount(t, mountPoint) 3851 defer s.umount(t, mountPoint) 3852 3853 log.Printf("Mounted at %v", mountPoint) 3854 3855 c := make(chan os.Signal, 2) 3856 signal.Notify(c, os.Interrupt, syscall.SIGTERM) 3857 <-c 3858 } 3859 3860 // Checks if 2 sorted lists are equal. Returns a helpful error if they differ. 3861 func checkSortedListsAreEqual(l1, l2 []string) error { 3862 i1, i2 := 0, 0 3863 onlyl1, onlyl2 := []string{}, []string{} 3864 for i1 < len(l1) && i2 < len(l2) { 3865 if l1[i1] == l2[i2] { 3866 i1++ 3867 i2++ 3868 } else if l1[i1] < l2[i2] { 3869 onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1])) 3870 i1++ 3871 } else { 3872 onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2])) 3873 i2++ 3874 } 3875 3876 } 3877 for ; i1 < len(l1); i1++ { 3878 onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1])) 3879 } 3880 for ; i2 < len(l2); i2++ { 3881 onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2])) 3882 } 3883 3884 if len(onlyl1)+len(onlyl2) == 0 { 3885 return nil 3886 } 3887 toString := func(l []string) string { 3888 ret := []string{} 3889 // The list can contain a lot of elements. Show only ten and say 3890 // "and x more". 3891 for i := 0; i < len(l) && i < 10; i++ { 3892 ret = append(ret, l[i]) 3893 } 3894 if len(ret) < len(l) { 3895 ret = append(ret, fmt.Sprintf("and %d more", len(l)-len(ret))) 3896 } 3897 return strings.Join(ret, ", ") 3898 } 3899 return fmt.Errorf("only l1: %+v, only l2: %+v", 3900 toString(onlyl1), toString(onlyl2)) 3901 } 3902 3903 func (s *GoofysTest) TestReadDirDash(t *C) { 3904 if s.azurite { 3905 t.Skip("ADLv1 doesn't have pagination") 3906 } 3907 root := s.getRoot(t) 3908 root.dir.mountPrefix = "prefix" 3909 3910 // SETUP 3911 // Add the following blobs 3912 // - prefix/2019/1 3913 // - prefix/2019-0000 to prefix/2019-4999 3914 // - prefix/20190000 to prefix/20194999 3915 // Fetching this result will need 3 pages in azure (pagesize 5k) and 11 pages 3916 // in amazon (pagesize 1k) 3917 // This setup will verify that we paginate and return results correctly before and after 3918 // seeing all contents that have a '-' ('-' < '/'). For more context read the comments in 3919 // dir.go::listBlobsSafe. 3920 blobs := make(map[string]*string) 3921 expect := []string{"2019"} 3922 blobs["prefix/2019/1"] = nil 3923 for i := 0; i < 5000; i++ { 3924 name := fmt.Sprintf("2019-%04d", i) 3925 expect = append(expect, name) 3926 blobs["prefix/"+name] = nil 3927 } 3928 for i := 0; i < 5000; i++ { 3929 name := fmt.Sprintf("2019%04d", i) 3930 expect = append(expect, name) 3931 blobs["prefix/"+name] = nil 3932 } 3933 s.setupBlobs(s.cloud, t, blobs) 3934 3935 // Read the directory and verify its contents. 3936 dh := root.OpenDir() 3937 defer dh.CloseDir() 3938 3939 children := namesOf(s.readDirFully(t, dh)) 3940 t.Assert(checkSortedListsAreEqual(children, expect), IsNil) 3941 } 3942 3943 func (s *GoofysTest) TestWriteListFlush(t *C) { 3944 root := s.getRoot(t) 3945 root.dir.mountPrefix = "this_test/" 3946 3947 dir, err := root.MkDir("dir") 3948 t.Assert(err, IsNil) 3949 s.fs.insertInode(root, dir) 3950 3951 in, fh := dir.Create("file1", fuseops.OpMetadata{}) 3952 t.Assert(in, NotNil) 3953 t.Assert(fh, NotNil) 3954 s.fs.insertInode(dir, in) 3955 3956 s.assertEntries(t, dir, []string{"file1"}) 3957 3958 // in should still be valid 3959 t.Assert(in.Parent, NotNil) 3960 t.Assert(in.Parent, Equals, dir) 3961 fh.FlushFile() 3962 3963 s.assertEntries(t, dir, []string{"file1"}) 3964 } 3965 3966 type includes struct{} 3967 3968 func (c includes) Info() *CheckerInfo { 3969 return &CheckerInfo{Name: "includes", Params: []string{"obtained", "expected"}} 3970 } 3971 3972 func (c includes) Check(params []interface{}, names []string) (res bool, error string) { 3973 arr := reflect.ValueOf(params[0]) 3974 switch arr.Kind() { 3975 case reflect.Array, reflect.Slice, reflect.String: 3976 default: 3977 panic(fmt.Sprintf("%v is not an array", names[0])) 3978 } 3979 3980 for i := 0; i < arr.Len(); i++ { 3981 v := arr.Index(i).Interface() 3982 res, error = DeepEquals.Check([]interface{}{v, params[1]}, names) 3983 if res { 3984 return 3985 } else { 3986 error = "" 3987 } 3988 3989 res = false 3990 } 3991 return 3992 } 3993 3994 func (s *GoofysTest) TestWriteUnlinkFlush(t *C) { 3995 root := s.getRoot(t) 3996 3997 dir, err := root.MkDir("dir") 3998 t.Assert(err, IsNil) 3999 s.fs.insertInode(root, dir) 4000 4001 in, fh := dir.Create("deleted", fuseops.OpMetadata{}) 4002 t.Assert(in, NotNil) 4003 t.Assert(fh, NotNil) 4004 s.fs.insertInode(dir, in) 4005 4006 err = dir.Unlink("deleted") 4007 t.Assert(err, IsNil) 4008 4009 s.disableS3() 4010 err = fh.FlushFile() 4011 t.Assert(err, IsNil) 4012 4013 dh := dir.OpenDir() 4014 defer dh.CloseDir() 4015 t.Assert(namesOf(s.readDirFully(t, dh)), Not(includes{}), "deleted") 4016 } 4017 4018 func (s *GoofysTest) TestIssue474(t *C) { 4019 s.fs.flags.TypeCacheTTL = 1 * time.Second 4020 s.fs.flags.Cheap = true 4021 4022 p := "this_test/" 4023 root := s.getRoot(t) 4024 root.dir.mountPrefix = "this_test/" 4025 root.dir.seqOpenDirScore = 2 4026 4027 blobs := make(map[string]*string) 4028 4029 in := []string{ 4030 "1/a/b", 4031 "2/c/d", 4032 } 4033 4034 for _, s := range in { 4035 blobs[p+s] = nil 4036 } 4037 4038 s.setupBlobs(s.cloud, t, blobs) 4039 4040 dir1, err := s.LookUpInode(t, "1") 4041 t.Assert(err, IsNil) 4042 // this would list 1/ and slurp in 2/c/d at the same time 4043 s.assertEntries(t, dir1, []string{"a"}) 4044 4045 // 2/ will expire and require re-listing. ensure that we don't 4046 // remove any children as stale as we update 4047 time.Sleep(time.Second) 4048 4049 dir2, err := s.LookUpInode(t, "2") 4050 t.Assert(err, IsNil) 4051 s.assertEntries(t, dir2, []string{"c"}) 4052 } 4053 4054 func (s *GoofysTest) TestReadExternalChangesFuse(t *C) { 4055 s.fs.flags.StatCacheTTL = 1 * time.Second 4056 4057 mountPoint := "/tmp/mnt" + s.fs.bucket 4058 4059 s.mount(t, mountPoint) 4060 defer s.umount(t, mountPoint) 4061 4062 file := "file1" 4063 filePath := mountPoint + "/file1" 4064 4065 buf, err := ioutil.ReadFile(filePath) 4066 t.Assert(err, IsNil) 4067 t.Assert(string(buf), Equals, file) 4068 4069 update := "file2" 4070 _, err = s.cloud.PutBlob(&PutBlobInput{ 4071 Key: file, 4072 Body: bytes.NewReader([]byte(update)), 4073 Size: PUInt64(uint64(len(update))), 4074 }) 4075 t.Assert(err, IsNil) 4076 4077 time.Sleep(1 * time.Second) 4078 4079 buf, err = ioutil.ReadFile(filePath) 4080 t.Assert(err, IsNil) 4081 t.Assert(string(buf), Equals, update) 4082 4083 // the next read shouldn't talk to cloud 4084 root := s.getRoot(t) 4085 root.dir.cloud = &StorageBackendInitError{ 4086 syscall.EINVAL, *root.dir.cloud.Capabilities(), 4087 } 4088 4089 buf, err = ioutil.ReadFile(filePath) 4090 t.Assert(err, IsNil) 4091 t.Assert(string(buf), Equals, update) 4092 } 4093 4094 func (s *GoofysTest) TestReadMyOwnWriteFuse(t *C) { 4095 s.testReadMyOwnWriteFuse(t, false) 4096 } 4097 4098 func (s *GoofysTest) TestReadMyOwnWriteExternalChangesFuse(t *C) { 4099 s.testReadMyOwnWriteFuse(t, true) 4100 } 4101 4102 func (s *GoofysTest) testReadMyOwnWriteFuse(t *C, externalUpdate bool) { 4103 s.fs.flags.StatCacheTTL = 1 * time.Second 4104 4105 mountPoint := "/tmp/mnt" + s.fs.bucket 4106 4107 s.mount(t, mountPoint) 4108 defer s.umount(t, mountPoint) 4109 4110 file := "file1" 4111 filePath := mountPoint + "/file1" 4112 4113 buf, err := ioutil.ReadFile(filePath) 4114 t.Assert(err, IsNil) 4115 t.Assert(string(buf), Equals, file) 4116 4117 if externalUpdate { 4118 update := "file2" 4119 _, err = s.cloud.PutBlob(&PutBlobInput{ 4120 Key: file, 4121 Body: bytes.NewReader([]byte(update)), 4122 Size: PUInt64(uint64(len(update))), 4123 }) 4124 t.Assert(err, IsNil) 4125 4126 time.Sleep(s.fs.flags.StatCacheTTL) 4127 } 4128 4129 fh, err := os.Create(filePath) 4130 t.Assert(err, IsNil) 4131 4132 _, err = fh.WriteString("file3") 4133 t.Assert(err, IsNil) 4134 // we can't flush yet because if we did, we would be reading 4135 // the new copy from cloud and that's not the point of this 4136 // test 4137 defer func() { 4138 // want fh to be late-binding because we re-use the variable 4139 fh.Close() 4140 }() 4141 4142 buf, err = ioutil.ReadFile(filePath) 4143 t.Assert(err, IsNil) 4144 if externalUpdate { 4145 // if there was an external update, we had set 4146 // KeepPageCache to false on os.Create above, which 4147 // causes our write to not be in cache, and read here 4148 // will go to cloud 4149 t.Assert(string(buf), Equals, "file2") 4150 } else { 4151 t.Assert(string(buf), Equals, "file3") 4152 } 4153 4154 err = fh.Close() 4155 t.Assert(err, IsNil) 4156 4157 time.Sleep(s.fs.flags.StatCacheTTL) 4158 4159 root := s.getRoot(t) 4160 cloud := &TestBackend{root.dir.cloud, nil} 4161 root.dir.cloud = cloud 4162 4163 fh, err = os.Open(filePath) 4164 t.Assert(err, IsNil) 4165 4166 if !externalUpdate { 4167 // we flushed and ttl expired, next lookup should 4168 // realize nothing is changed and NOT invalidate the 4169 // cache. Except ADLv1 because PUT there doesn't 4170 // return the mtime, so the open above will think the 4171 // file is updated and not re-use cache 4172 if _, adlv1 := s.cloud.(*ADLv1); !adlv1 { 4173 cloud.err = fuse.EINVAL 4174 } 4175 } else { 4176 // if there was externalUpdate, we wrote our own 4177 // update with KeepPageCache=false, so we should read 4178 // from the cloud her 4179 } 4180 4181 buf, err = ioutil.ReadAll(fh) 4182 t.Assert(err, IsNil) 4183 t.Assert(string(buf), Equals, "file3") 4184 } 4185 4186 func (s *GoofysTest) TestReadMyOwnNewFileFuse(t *C) { 4187 s.fs.flags.StatCacheTTL = 1 * time.Second 4188 s.fs.flags.TypeCacheTTL = 1 * time.Second 4189 4190 mountPoint := "/tmp/mnt" + s.fs.bucket 4191 4192 s.mount(t, mountPoint) 4193 defer s.umount(t, mountPoint) 4194 4195 filePath := mountPoint + "/filex" 4196 4197 // jacobsa/fuse doesn't support setting OpenKeepCache on 4198 // CreateFile but even after manually setting in in 4199 // fuse/conversions.go, we still receive read ops instead of 4200 // being handled by kernel 4201 4202 fh, err := os.Create(filePath) 4203 t.Assert(err, IsNil) 4204 4205 _, err = fh.WriteString("filex") 4206 t.Assert(err, IsNil) 4207 // we can't flush yet because if we did, we would be reading 4208 // the new copy from cloud and that's not the point of this 4209 // test 4210 defer fh.Close() 4211 4212 // disabled: we can't actually read back our own update 4213 //buf, err := ioutil.ReadFile(filePath) 4214 //t.Assert(err, IsNil) 4215 //t.Assert(string(buf), Equals, "filex") 4216 }