github.com/knpwrs/goofys@v0.24.0/internal/goofys_test.go (about) 1 // Copyright 2015 - 2017 Ka-Hing Cheung 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package internal 16 17 import ( 18 . "github.com/kahing/goofys/api/common" 19 20 "bufio" 21 "bytes" 22 "fmt" 23 "io" 24 "io/ioutil" 25 "math/rand" 26 "net" 27 "os" 28 "os/exec" 29 "os/signal" 30 "os/user" 31 "reflect" 32 "runtime" 33 "sort" 34 "strconv" 35 "strings" 36 "sync" 37 "syscall" 38 "testing" 39 "time" 40 41 "context" 42 43 "github.com/aws/aws-sdk-go/aws" 44 "github.com/aws/aws-sdk-go/aws/corehandlers" 45 "github.com/aws/aws-sdk-go/aws/credentials" 46 47 "github.com/Azure/azure-storage-blob-go/azblob" 48 "github.com/Azure/go-autorest/autorest" 49 "github.com/Azure/go-autorest/autorest/azure" 50 azureauth "github.com/Azure/go-autorest/autorest/azure/auth" 51 52 "golang.org/x/sys/unix" 53 54 "github.com/jacobsa/fuse" 55 "github.com/jacobsa/fuse/fuseops" 56 "github.com/jacobsa/fuse/fuseutil" 57 58 "github.com/sirupsen/logrus" 59 60 . "gopkg.in/check.v1" 61 "runtime/debug" 62 ) 63 64 // so I don't get complains about unused imports 65 var ignored = logrus.DebugLevel 66 67 const PerTestTimeout = 10 * time.Minute 68 69 func currentUid() uint32 { 70 user, err := user.Current() 71 if err != nil { 72 panic(err) 73 } 74 75 uid, err := strconv.ParseUint(user.Uid, 10, 32) 76 if err != nil { 77 panic(err) 78 } 79 80 return uint32(uid) 81 } 82 83 func currentGid() uint32 { 84 user, err := user.Current() 85 if err != nil { 86 panic(err) 87 } 88 89 gid, err := strconv.ParseUint(user.Gid, 10, 32) 90 if err != nil { 91 panic(err) 92 } 93 94 return uint32(gid) 95 } 96 97 type GoofysTest struct { 98 fs *Goofys 99 ctx context.Context 100 awsConfig *aws.Config 101 cloud StorageBackend 102 emulator bool 103 azurite bool 104 105 removeBucket []StorageBackend 106 107 env map[string]*string 108 109 timeout chan int 110 } 111 112 func Test(t *testing.T) { 113 TestingT(t) 114 } 115 116 var _ = Suite(&GoofysTest{}) 117 118 func logOutput(t *C, tag string, r io.ReadCloser) { 119 in := bufio.NewScanner(r) 120 121 for in.Scan() { 122 t.Log(tag, in.Text()) 123 } 124 } 125 126 func waitFor(t *C, addr string) (err error) { 127 // wait for it to listen on port 128 for i := 0; i < 10; i++ { 129 var conn net.Conn 130 conn, err = net.Dial("tcp", addr) 131 if err == nil { 132 // we are done! 133 conn.Close() 134 return 135 } else { 136 t.Logf("Cound not connect: %v", err) 137 time.Sleep(100 * time.Millisecond) 138 } 139 } 140 141 return 142 } 143 144 func (t *GoofysTest) deleteBlobsParallelly(cloud StorageBackend, blobs []string) error { 145 sem := make(semaphore, 100) 146 sem.P(100) 147 var err error 148 for _, blobOuter := range blobs { 149 sem.V(1) 150 go func(blob string) { 151 defer sem.P(1) 152 _, localerr := cloud.DeleteBlob(&DeleteBlobInput{blob}) 153 if localerr != nil && localerr != syscall.ENOENT { 154 err = localerr 155 } 156 }(blobOuter) 157 if err != nil { 158 break 159 } 160 } 161 sem.V(100) 162 return err 163 } 164 165 // groupByDecresingDepths takes a slice of path strings and returns the paths as 166 // groups where each group has the same `depth` - depth(a/b/c)=2, depth(a/b/)=1 167 // The groups are returned in decreasing order of depths. 168 // - Inp: [] Out: [] 169 // - Inp: ["a/b1/", "a/b/c1", "a/b2", "a/b/c2"] 170 // Out: [["a/b/c1", "a/b/c2"], ["a/b1/", "a/b2"]] 171 // - Inp: ["a/b1/", "z/a/b/c1", "a/b2", "z/a/b/c2"] 172 // Out: [["z/a/b/c1", "z/a/b/c2"], ["a/b1/", "a/b2"] 173 func groupByDecresingDepths(items []string) [][]string { 174 depthToGroup := map[int][]string{} 175 for _, item := range items { 176 depth := len(strings.Split(strings.TrimRight(item, "/"), "/")) 177 if _, ok := depthToGroup[depth]; !ok { 178 depthToGroup[depth] = []string{} 179 } 180 depthToGroup[depth] = append(depthToGroup[depth], item) 181 } 182 decreasingDepths := []int{} 183 for depth := range depthToGroup { 184 decreasingDepths = append(decreasingDepths, depth) 185 } 186 sort.Sort(sort.Reverse(sort.IntSlice(decreasingDepths))) 187 ret := [][]string{} 188 for _, depth := range decreasingDepths { 189 group, _ := depthToGroup[depth] 190 ret = append(ret, group) 191 } 192 return ret 193 } 194 195 func (t *GoofysTest) DeleteADLBlobs(cloud StorageBackend, items []string) error { 196 // If we delete a directory that's not empty, ADL{v1|v2} returns failure. That can 197 // happen if we want to delete both "dir1" and "dir1/file" but delete them 198 // in the wrong order. 199 // So we group the items to delete into multiple groups. All items in a group 200 // will have the same depth - depth(/a/b/c) = 2, depth(/a/b/) = 1. 201 // We then iterate over the groups in desc order of depth and delete them parallelly. 202 for _, group := range groupByDecresingDepths(items) { 203 err := t.deleteBlobsParallelly(cloud, group) 204 if err != nil { 205 return err 206 } 207 } 208 return nil 209 } 210 211 func (s *GoofysTest) selectTestConfig(t *C, flags *FlagStorage) (conf S3Config) { 212 (&conf).Init() 213 214 if hasEnv("AWS") { 215 if isTravis() { 216 conf.Region = "us-east-1" 217 } else { 218 conf.Region = "us-west-2" 219 } 220 profile := os.Getenv("AWS") 221 if profile != "" { 222 if profile != "-" { 223 conf.Profile = profile 224 } else { 225 conf.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") 226 conf.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") 227 } 228 } 229 230 conf.BucketOwner = os.Getenv("BUCKET_OWNER") 231 if conf.BucketOwner == "" { 232 panic("BUCKET_OWNER is required on AWS") 233 } 234 } else if hasEnv("GCS") { 235 conf.Region = "us-west1" 236 conf.Profile = os.Getenv("GCS") 237 flags.Endpoint = "http://storage.googleapis.com" 238 } else if hasEnv("MINIO") { 239 conf.Region = "us-east-1" 240 conf.AccessKey = "Q3AM3UQ867SPQQA43P2F" 241 conf.SecretKey = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" 242 flags.Endpoint = "https://play.minio.io:9000" 243 } else { 244 s.emulator = true 245 246 conf.Region = "us-west-2" 247 conf.AccessKey = "foo" 248 conf.SecretKey = "bar" 249 flags.Endpoint = "http://127.0.0.1:8080" 250 } 251 252 return 253 } 254 255 func (s *GoofysTest) waitForEmulator(t *C) { 256 if s.emulator { 257 addr := "127.0.0.1:8080" 258 259 err := waitFor(t, addr) 260 t.Assert(err, IsNil) 261 } 262 } 263 264 func (s *GoofysTest) SetUpSuite(t *C) { 265 } 266 267 func (s *GoofysTest) deleteBucket(cloud StorageBackend) error { 268 param := &ListBlobsInput{} 269 270 // Azure need special handling. 271 azureKeysToRemove := make([]string, 0) 272 for { 273 resp, err := cloud.ListBlobs(param) 274 if err != nil { 275 return err 276 } 277 278 keysToRemove := []string{} 279 for _, o := range resp.Items { 280 keysToRemove = append(keysToRemove, *o.Key) 281 } 282 if len(keysToRemove) != 0 { 283 switch cloud.(type) { 284 case *ADLv1, *ADLv2, *AZBlob: 285 // ADLV{1|2} and AZBlob (sometimes) supports directories. => dir can be removed only 286 // after the dir is empty. So we will remove the blobs in reverse depth order via 287 // DeleteADLBlobs after this for loop. 288 azureKeysToRemove = append(azureKeysToRemove, keysToRemove...) 289 default: 290 _, err = cloud.DeleteBlobs(&DeleteBlobsInput{Items: keysToRemove}) 291 if err != nil { 292 return err 293 } 294 } 295 } 296 if resp.IsTruncated { 297 param.ContinuationToken = resp.NextContinuationToken 298 } else { 299 break 300 } 301 } 302 303 if len(azureKeysToRemove) != 0 { 304 err := s.DeleteADLBlobs(cloud, azureKeysToRemove) 305 if err != nil { 306 return err 307 } 308 } 309 310 _, err := cloud.RemoveBucket(&RemoveBucketInput{}) 311 return err 312 } 313 314 func (s *GoofysTest) TearDownTest(t *C) { 315 close(s.timeout) 316 317 for _, cloud := range s.removeBucket { 318 err := s.deleteBucket(cloud) 319 t.Assert(err, IsNil) 320 } 321 s.removeBucket = nil 322 } 323 324 func (s *GoofysTest) removeBlob(cloud StorageBackend, t *C, blobPath string) { 325 params := &DeleteBlobInput{ 326 Key: blobPath, 327 } 328 _, err := cloud.DeleteBlob(params) 329 t.Assert(err, IsNil) 330 } 331 332 func (s *GoofysTest) setupBlobs(cloud StorageBackend, t *C, env map[string]*string) { 333 334 // concurrency = 100 335 throttler := make(semaphore, 100) 336 throttler.P(100) 337 338 var globalErr error 339 for path, c := range env { 340 throttler.V(1) 341 go func(path string, content *string) { 342 dir := false 343 if content == nil { 344 if strings.HasSuffix(path, "/") { 345 if cloud.Capabilities().DirBlob { 346 path = strings.TrimRight(path, "/") 347 } 348 dir = true 349 content = PString("") 350 } else { 351 content = &path 352 } 353 } 354 defer throttler.P(1) 355 params := &PutBlobInput{ 356 Key: path, 357 Body: bytes.NewReader([]byte(*content)), 358 Size: PUInt64(uint64(len(*content))), 359 Metadata: map[string]*string{ 360 "name": aws.String(path + "+/#%00"), 361 }, 362 DirBlob: dir, 363 } 364 365 _, err := cloud.PutBlob(params) 366 if err != nil { 367 globalErr = err 368 } 369 t.Assert(err, IsNil) 370 }(path, c) 371 } 372 throttler.V(100) 373 throttler = make(semaphore, 100) 374 throttler.P(100) 375 t.Assert(globalErr, IsNil) 376 377 // double check, except on AWS S3, because there we sometimes 378 // hit 404 NoSuchBucket and there's no way to distinguish that 379 // from 404 KeyNotFound 380 if !hasEnv("AWS") { 381 for path, c := range env { 382 throttler.V(1) 383 go func(path string, content *string) { 384 defer throttler.P(1) 385 params := &HeadBlobInput{Key: path} 386 res, err := cloud.HeadBlob(params) 387 t.Assert(err, IsNil) 388 if content != nil { 389 t.Assert(res.Size, Equals, uint64(len(*content))) 390 } else if strings.HasSuffix(path, "/") || path == "zero" { 391 t.Assert(res.Size, Equals, uint64(0)) 392 } else { 393 t.Assert(res.Size, Equals, uint64(len(path))) 394 } 395 }(path, c) 396 } 397 throttler.V(100) 398 t.Assert(globalErr, IsNil) 399 } 400 } 401 402 func (s *GoofysTest) setupEnv(t *C, env map[string]*string, public bool) { 403 if public { 404 if s3, ok := s.cloud.Delegate().(*S3Backend); ok { 405 s3.config.ACL = "public-read" 406 } else { 407 t.Error("Not S3 backend") 408 } 409 } 410 411 _, err := s.cloud.MakeBucket(&MakeBucketInput{}) 412 t.Assert(err, IsNil) 413 414 if !s.emulator { 415 //time.Sleep(time.Second) 416 } 417 418 s.setupBlobs(s.cloud, t, env) 419 420 t.Log("setupEnv done") 421 } 422 423 func (s *GoofysTest) setupDefaultEnv(t *C, public bool) { 424 s.env = map[string]*string{ 425 "file1": nil, 426 "file2": nil, 427 "dir1/file3": nil, 428 "dir2/dir3/": nil, 429 "dir2/dir3/file4": nil, 430 "dir4/": nil, 431 "dir4/file5": nil, 432 "empty_dir/": nil, 433 "empty_dir2/": nil, 434 "zero": PString(""), 435 } 436 437 s.setupEnv(t, s.env, public) 438 } 439 440 func (s *GoofysTest) setUpTestTimeout(t *C) { 441 s.timeout = make(chan int) 442 debug.SetTraceback("all") 443 started := time.Now() 444 445 go func() { 446 select { 447 case _, ok := <-s.timeout: 448 if !ok { 449 return 450 } 451 case <-time.After(PerTestTimeout): 452 panic(fmt.Sprintf("timeout %v reached. Started %v now %v", 453 PerTestTimeout, started, time.Now())) 454 } 455 }() 456 } 457 458 func (s *GoofysTest) SetUpTest(t *C) { 459 log.Infof("Starting at %v", time.Now()) 460 461 s.setUpTestTimeout(t) 462 463 var bucket string 464 mount := os.Getenv("MOUNT") 465 466 if mount != "false" { 467 bucket = mount 468 } else { 469 bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16) 470 } 471 uid, gid := MyUserAndGroup() 472 flags := &FlagStorage{ 473 DirMode: 0700, 474 FileMode: 0700, 475 Uid: uint32(uid), 476 Gid: uint32(gid), 477 HTTPTimeout: 30 * time.Second, 478 } 479 480 cloud := os.Getenv("CLOUD") 481 482 if cloud == "s3" { 483 s.emulator = !hasEnv("AWS") 484 s.waitForEmulator(t) 485 486 conf := s.selectTestConfig(t, flags) 487 flags.Backend = &conf 488 489 s3, err := NewS3(bucket, flags, &conf) 490 t.Assert(err, IsNil) 491 492 s.cloud = s3 493 s3.aws = hasEnv("AWS") 494 if s3.aws { 495 s.cloud = NewS3BucketEventualConsistency(s3) 496 } 497 498 if s.emulator { 499 s3.Handlers.Sign.Clear() 500 s3.Handlers.Sign.PushBack(SignV2) 501 s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) 502 } 503 _, err = s3.ListBuckets(nil) 504 t.Assert(err, IsNil) 505 506 } else if cloud == "gcs" { 507 conf := s.selectTestConfig(t, flags) 508 flags.Backend = &conf 509 510 var err error 511 s.cloud, err = NewGCS3(bucket, flags, &conf) 512 t.Assert(s.cloud, NotNil) 513 t.Assert(err, IsNil) 514 } else if cloud == "azblob" { 515 config, err := AzureBlobConfig(os.Getenv("ENDPOINT"), "", "blob") 516 t.Assert(err, IsNil) 517 518 if config.Endpoint == AzuriteEndpoint { 519 s.azurite = true 520 s.emulator = true 521 s.waitForEmulator(t) 522 } 523 524 // Azurite's SAS is buggy, ex: https://github.com/Azure/Azurite/issues/216 525 if os.Getenv("SAS_EXPIRE") != "" { 526 expire, err := time.ParseDuration(os.Getenv("SAS_EXPIRE")) 527 t.Assert(err, IsNil) 528 529 config.TokenRenewBuffer = expire / 2 530 credential, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey) 531 t.Assert(err, IsNil) 532 533 // test sas token config 534 config.SasToken = func() (string, error) { 535 sasQueryParams, err := azblob.AccountSASSignatureValues{ 536 Protocol: azblob.SASProtocolHTTPSandHTTP, 537 StartTime: time.Now().UTC().Add(-1 * time.Hour), 538 ExpiryTime: time.Now().UTC().Add(expire), 539 Services: azblob.AccountSASServices{Blob: true}.String(), 540 ResourceTypes: azblob.AccountSASResourceTypes{ 541 Service: true, 542 Container: true, 543 Object: true, 544 }.String(), 545 Permissions: azblob.AccountSASPermissions{ 546 Read: true, 547 Write: true, 548 Delete: true, 549 List: true, 550 Create: true, 551 }.String(), 552 }.NewSASQueryParameters(credential) 553 if err != nil { 554 return "", err 555 } 556 return sasQueryParams.Encode(), nil 557 } 558 } 559 560 flags.Backend = &config 561 562 s.cloud, err = NewAZBlob(bucket, &config) 563 t.Assert(err, IsNil) 564 t.Assert(s.cloud, NotNil) 565 } else if cloud == "adlv1" { 566 cred := azureauth.NewClientCredentialsConfig( 567 os.Getenv("ADLV1_CLIENT_ID"), 568 os.Getenv("ADLV1_CLIENT_CREDENTIAL"), 569 os.Getenv("ADLV1_TENANT_ID")) 570 auth, err := cred.Authorizer() 571 t.Assert(err, IsNil) 572 573 config := ADLv1Config{ 574 Endpoint: os.Getenv("ENDPOINT"), 575 Authorizer: auth, 576 } 577 config.Init() 578 579 flags.Backend = &config 580 581 s.cloud, err = NewADLv1(bucket, flags, &config) 582 t.Assert(err, IsNil) 583 t.Assert(s.cloud, NotNil) 584 } else if cloud == "adlv2" { 585 var err error 586 var auth autorest.Authorizer 587 588 if os.Getenv("AZURE_STORAGE_ACCOUNT") != "" && os.Getenv("AZURE_STORAGE_KEY") != "" { 589 auth = &AZBlobConfig{ 590 AccountName: os.Getenv("AZURE_STORAGE_ACCOUNT"), 591 AccountKey: os.Getenv("AZURE_STORAGE_KEY"), 592 } 593 } else { 594 cred := azureauth.NewClientCredentialsConfig( 595 os.Getenv("ADLV2_CLIENT_ID"), 596 os.Getenv("ADLV2_CLIENT_CREDENTIAL"), 597 os.Getenv("ADLV2_TENANT_ID")) 598 cred.Resource = azure.PublicCloud.ResourceIdentifiers.Storage 599 auth, err = cred.Authorizer() 600 t.Assert(err, IsNil) 601 } 602 603 config := ADLv2Config{ 604 Endpoint: os.Getenv("ENDPOINT"), 605 Authorizer: auth, 606 } 607 608 flags.Backend = &config 609 610 s.cloud, err = NewADLv2(bucket, flags, &config) 611 t.Assert(err, IsNil) 612 t.Assert(s.cloud, NotNil) 613 } else { 614 t.Fatal("Unsupported backend") 615 } 616 617 if mount == "false" { 618 s.removeBucket = append(s.removeBucket, s.cloud) 619 s.setupDefaultEnv(t, false) 620 } else { 621 _, err := s.cloud.MakeBucket(&MakeBucketInput{}) 622 if err == fuse.EEXIST { 623 err = nil 624 } 625 t.Assert(err, IsNil) 626 } 627 628 if hasEnv("AWS") { 629 s.fs = newGoofys(context.Background(), bucket, flags, 630 func(bucket string, flags *FlagStorage) (StorageBackend, error) { 631 cloud, err := NewBackend(bucket, flags) 632 if err != nil { 633 return nil, err 634 } 635 636 return NewS3BucketEventualConsistency(cloud.(*S3Backend)), nil 637 }) 638 } else { 639 s.fs = NewGoofys(context.Background(), bucket, flags) 640 } 641 t.Assert(s.fs, NotNil) 642 643 s.ctx = context.Background() 644 645 if hasEnv("GCS") { 646 flags.Endpoint = "http://storage.googleapis.com" 647 } 648 } 649 650 func (s *GoofysTest) getRoot(t *C) (inode *Inode) { 651 inode = s.fs.inodes[fuseops.RootInodeID] 652 t.Assert(inode, NotNil) 653 return 654 } 655 656 func (s *GoofysTest) TestGetRootInode(t *C) { 657 root := s.getRoot(t) 658 t.Assert(root.Id, Equals, fuseops.InodeID(fuseops.RootInodeID)) 659 } 660 661 func (s *GoofysTest) TestGetRootAttributes(t *C) { 662 _, err := s.getRoot(t).GetAttributes() 663 t.Assert(err, IsNil) 664 } 665 666 func (s *GoofysTest) ForgetInode(t *C, inode fuseops.InodeID) { 667 err := s.fs.ForgetInode(s.ctx, &fuseops.ForgetInodeOp{Inode: inode}) 668 t.Assert(err, IsNil) 669 } 670 671 func (s *GoofysTest) LookUpInode(t *C, name string) (in *Inode, err error) { 672 parent := s.getRoot(t) 673 674 for { 675 idx := strings.Index(name, "/") 676 if idx == -1 { 677 break 678 } 679 680 dirName := name[0:idx] 681 name = name[idx+1:] 682 683 lookup := fuseops.LookUpInodeOp{ 684 Parent: parent.Id, 685 Name: dirName, 686 } 687 688 err = s.fs.LookUpInode(nil, &lookup) 689 if err != nil { 690 return 691 } 692 parent = s.fs.inodes[lookup.Entry.Child] 693 } 694 695 lookup := fuseops.LookUpInodeOp{ 696 Parent: parent.Id, 697 Name: name, 698 } 699 700 err = s.fs.LookUpInode(nil, &lookup) 701 if err != nil { 702 return 703 } 704 in = s.fs.inodes[lookup.Entry.Child] 705 return 706 } 707 708 func (s *GoofysTest) TestSetup(t *C) { 709 } 710 711 func (s *GoofysTest) TestLookUpInode(t *C) { 712 _, err := s.LookUpInode(t, "file1") 713 t.Assert(err, IsNil) 714 715 _, err = s.LookUpInode(t, "fileNotFound") 716 t.Assert(err, Equals, fuse.ENOENT) 717 718 _, err = s.LookUpInode(t, "dir1/file3") 719 t.Assert(err, IsNil) 720 721 _, err = s.LookUpInode(t, "dir2/dir3") 722 t.Assert(err, IsNil) 723 724 _, err = s.LookUpInode(t, "dir2/dir3/file4") 725 t.Assert(err, IsNil) 726 727 _, err = s.LookUpInode(t, "empty_dir") 728 t.Assert(err, IsNil) 729 } 730 731 func (s *GoofysTest) TestPanicWrapper(t *C) { 732 debug.SetTraceback("single") 733 734 fs := FusePanicLogger{s.fs} 735 err := fs.GetInodeAttributes(nil, &fuseops.GetInodeAttributesOp{ 736 Inode: 1234, 737 }) 738 t.Assert(err, Equals, fuse.EIO) 739 } 740 741 func (s *GoofysTest) TestGetInodeAttributes(t *C) { 742 inode, err := s.getRoot(t).LookUp("file1") 743 t.Assert(err, IsNil) 744 745 attr, err := inode.GetAttributes() 746 t.Assert(err, IsNil) 747 t.Assert(attr.Size, Equals, uint64(len("file1"))) 748 } 749 750 func (s *GoofysTest) readDirFully(t *C, dh *DirHandle) (entries []DirHandleEntry) { 751 dh.mu.Lock() 752 defer dh.mu.Unlock() 753 754 en, err := dh.ReadDir(fuseops.DirOffset(0)) 755 t.Assert(err, IsNil) 756 t.Assert(en, NotNil) 757 t.Assert(en.Name, Equals, ".") 758 759 en, err = dh.ReadDir(fuseops.DirOffset(1)) 760 t.Assert(err, IsNil) 761 t.Assert(en, NotNil) 762 t.Assert(en.Name, Equals, "..") 763 764 for i := fuseops.DirOffset(2); ; i++ { 765 en, err = dh.ReadDir(i) 766 t.Assert(err, IsNil) 767 768 if en == nil { 769 return 770 } 771 772 entries = append(entries, *en) 773 } 774 } 775 776 func namesOf(entries []DirHandleEntry) (names []string) { 777 for _, en := range entries { 778 names = append(names, en.Name) 779 } 780 return 781 } 782 783 func (s *GoofysTest) assertEntries(t *C, in *Inode, names []string) { 784 dh := in.OpenDir() 785 defer dh.CloseDir() 786 787 t.Assert(namesOf(s.readDirFully(t, dh)), DeepEquals, names) 788 } 789 790 func (s *GoofysTest) readDirIntoCache(t *C, inode fuseops.InodeID) { 791 openDirOp := fuseops.OpenDirOp{Inode: inode} 792 err := s.fs.OpenDir(nil, &openDirOp) 793 t.Assert(err, IsNil) 794 795 readDirOp := fuseops.ReadDirOp{ 796 Inode: inode, 797 Handle: openDirOp.Handle, 798 Dst: make([]byte, 8*1024), 799 } 800 801 err = s.fs.ReadDir(nil, &readDirOp) 802 t.Assert(err, IsNil) 803 } 804 805 func (s *GoofysTest) TestReadDirCacheLookup(t *C) { 806 s.fs.flags.StatCacheTTL = 1 * time.Minute 807 s.fs.flags.TypeCacheTTL = 1 * time.Minute 808 809 s.readDirIntoCache(t, fuseops.RootInodeID) 810 s.disableS3() 811 812 // should be cached so lookup should not need to talk to s3 813 entries := []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"} 814 for _, en := range entries { 815 err := s.fs.LookUpInode(nil, &fuseops.LookUpInodeOp{ 816 Parent: fuseops.RootInodeID, 817 Name: en, 818 }) 819 t.Assert(err, IsNil) 820 } 821 } 822 823 func (s *GoofysTest) TestReadDirWithExternalChanges(t *C) { 824 s.fs.flags.TypeCacheTTL = time.Second 825 826 dir1, err := s.LookUpInode(t, "dir1") 827 t.Assert(err, IsNil) 828 829 defaultEntries := []string{ 830 "dir1", "dir2", "dir4", "empty_dir", 831 "empty_dir2", "file1", "file2", "zero"} 832 s.assertEntries(t, s.getRoot(t), defaultEntries) 833 // dir1 has file3 and nothing else. 834 s.assertEntries(t, dir1, []string{"file3"}) 835 836 // Do the following 'external' changes in s3 without involving goofys. 837 // - Remove file1, add file3. 838 // - Remove dir1/file3. Given that dir1 has just this one file, 839 // we are effectively removing dir1 as well. 840 s.removeBlob(s.cloud, t, "file1") 841 s.setupBlobs(s.cloud, t, map[string]*string{"file3": nil}) 842 s.removeBlob(s.cloud, t, "dir1/file3") 843 844 time.Sleep(s.fs.flags.TypeCacheTTL) 845 // newEntries = `defaultEntries` - dir1 - file1 + file3. 846 newEntries := []string{ 847 "dir2", "dir4", "empty_dir", "empty_dir2", 848 "file2", "file3", "zero"} 849 if s.cloud.Capabilities().DirBlob { 850 // dir1 is not automatically deleted 851 newEntries = append([]string{"dir1"}, newEntries...) 852 } 853 s.assertEntries(t, s.getRoot(t), newEntries) 854 } 855 856 func (s *GoofysTest) TestReadDir(t *C) { 857 // test listing / 858 dh := s.getRoot(t).OpenDir() 859 defer dh.CloseDir() 860 861 s.assertEntries(t, s.getRoot(t), []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero"}) 862 863 // test listing dir1/ 864 in, err := s.LookUpInode(t, "dir1") 865 t.Assert(err, IsNil) 866 s.assertEntries(t, in, []string{"file3"}) 867 868 // test listing dir2/ 869 in, err = s.LookUpInode(t, "dir2") 870 t.Assert(err, IsNil) 871 s.assertEntries(t, in, []string{"dir3"}) 872 873 // test listing dir2/dir3/ 874 in, err = s.LookUpInode(t, "dir2/dir3") 875 t.Assert(err, IsNil) 876 s.assertEntries(t, in, []string{"file4"}) 877 } 878 879 func (s *GoofysTest) TestReadFiles(t *C) { 880 parent := s.getRoot(t) 881 dh := parent.OpenDir() 882 defer dh.CloseDir() 883 884 var entries []*DirHandleEntry 885 886 dh.mu.Lock() 887 for i := fuseops.DirOffset(0); ; i++ { 888 en, err := dh.ReadDir(i) 889 t.Assert(err, IsNil) 890 891 if en == nil { 892 break 893 } 894 895 entries = append(entries, en) 896 } 897 dh.mu.Unlock() 898 899 for _, en := range entries { 900 if en.Type == fuseutil.DT_File { 901 in, err := parent.LookUp(en.Name) 902 t.Assert(err, IsNil) 903 904 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 905 t.Assert(err, IsNil) 906 907 buf := make([]byte, 4096) 908 909 nread, err := fh.ReadFile(0, buf) 910 if en.Name == "zero" { 911 t.Assert(nread, Equals, 0) 912 } else { 913 t.Assert(nread, Equals, len(en.Name)) 914 buf = buf[0:nread] 915 t.Assert(string(buf), Equals, en.Name) 916 } 917 } else { 918 919 } 920 } 921 } 922 923 func (s *GoofysTest) TestReadOffset(t *C) { 924 root := s.getRoot(t) 925 f := "file1" 926 927 in, err := root.LookUp(f) 928 t.Assert(err, IsNil) 929 930 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 931 t.Assert(err, IsNil) 932 933 buf := make([]byte, 4096) 934 935 nread, err := fh.ReadFile(1, buf) 936 t.Assert(err, IsNil) 937 t.Assert(nread, Equals, len(f)-1) 938 t.Assert(string(buf[0:nread]), DeepEquals, f[1:]) 939 940 r := rand.New(rand.NewSource(time.Now().UnixNano())) 941 942 for i := 0; i < 3; i++ { 943 off := r.Int31n(int32(len(f))) 944 nread, err = fh.ReadFile(int64(off), buf) 945 t.Assert(err, IsNil) 946 t.Assert(nread, Equals, len(f)-int(off)) 947 t.Assert(string(buf[0:nread]), DeepEquals, f[off:]) 948 } 949 } 950 951 func (s *GoofysTest) TestCreateFiles(t *C) { 952 fileName := "testCreateFile" 953 954 _, fh := s.getRoot(t).Create(fileName, fuseops.OpMetadata{uint32(os.Getpid())}) 955 956 err := fh.FlushFile() 957 t.Assert(err, IsNil) 958 959 resp, err := s.cloud.GetBlob(&GetBlobInput{Key: fileName}) 960 t.Assert(err, IsNil) 961 t.Assert(resp.HeadBlobOutput.Size, DeepEquals, uint64(0)) 962 defer resp.Body.Close() 963 964 _, err = s.getRoot(t).LookUp(fileName) 965 t.Assert(err, IsNil) 966 967 fileName = "testCreateFile2" 968 s.testWriteFile(t, fileName, 1, 128*1024) 969 970 inode, err := s.getRoot(t).LookUp(fileName) 971 t.Assert(err, IsNil) 972 973 fh, err = inode.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 974 t.Assert(err, IsNil) 975 976 err = fh.FlushFile() 977 t.Assert(err, IsNil) 978 979 resp, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName}) 980 t.Assert(err, IsNil) 981 // ADLv1 doesn't return size when we do a GET 982 if _, adlv1 := s.cloud.(*ADLv1); !adlv1 { 983 t.Assert(resp.HeadBlobOutput.Size, Equals, uint64(1)) 984 } 985 defer resp.Body.Close() 986 } 987 988 func (s *GoofysTest) TestUnlink(t *C) { 989 fileName := "file1" 990 991 err := s.getRoot(t).Unlink(fileName) 992 t.Assert(err, IsNil) 993 994 // make sure that it's gone from s3 995 _, err = s.cloud.GetBlob(&GetBlobInput{Key: fileName}) 996 t.Assert(mapAwsError(err), Equals, fuse.ENOENT) 997 } 998 999 type FileHandleReader struct { 1000 fs *Goofys 1001 fh *FileHandle 1002 offset int64 1003 } 1004 1005 func (r *FileHandleReader) Read(p []byte) (nread int, err error) { 1006 nread, err = r.fh.ReadFile(r.offset, p) 1007 r.offset += int64(nread) 1008 return 1009 } 1010 1011 func (r *FileHandleReader) Seek(offset int64, whence int) (int64, error) { 1012 switch whence { 1013 case 0: 1014 r.offset = offset 1015 case 1: 1016 r.offset += offset 1017 default: 1018 panic(fmt.Sprintf("unsupported whence: %v", whence)) 1019 } 1020 1021 return r.offset, nil 1022 } 1023 1024 func (s *GoofysTest) testWriteFile(t *C, fileName string, size int64, write_size int) { 1025 s.testWriteFileAt(t, fileName, int64(0), size, write_size) 1026 } 1027 1028 func (s *GoofysTest) testWriteFileAt(t *C, fileName string, offset int64, size int64, write_size int) { 1029 var fh *FileHandle 1030 root := s.getRoot(t) 1031 1032 lookup := fuseops.LookUpInodeOp{ 1033 Parent: root.Id, 1034 Name: fileName, 1035 } 1036 err := s.fs.LookUpInode(nil, &lookup) 1037 if err != nil { 1038 if err == fuse.ENOENT { 1039 create := fuseops.CreateFileOp{ 1040 Parent: root.Id, 1041 Name: fileName, 1042 } 1043 err = s.fs.CreateFile(nil, &create) 1044 t.Assert(err, IsNil) 1045 1046 fh = s.fs.fileHandles[create.Handle] 1047 } else { 1048 t.Assert(err, IsNil) 1049 } 1050 } else { 1051 in := s.fs.inodes[lookup.Entry.Child] 1052 fh, err = in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 1053 t.Assert(err, IsNil) 1054 } 1055 1056 buf := make([]byte, write_size) 1057 nwritten := offset 1058 1059 src := io.LimitReader(&SeqReader{}, size) 1060 1061 for { 1062 nread, err := src.Read(buf) 1063 if err == io.EOF { 1064 t.Assert(nwritten, Equals, size) 1065 break 1066 } 1067 t.Assert(err, IsNil) 1068 1069 err = fh.WriteFile(nwritten, buf[:nread]) 1070 t.Assert(err, IsNil) 1071 nwritten += int64(nread) 1072 } 1073 1074 err = fh.FlushFile() 1075 t.Assert(err, IsNil) 1076 1077 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: fileName}) 1078 t.Assert(err, IsNil) 1079 t.Assert(resp.Size, Equals, uint64(size+offset)) 1080 1081 fr := &FileHandleReader{s.fs, fh, offset} 1082 diff, err := CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 0) 1083 t.Assert(err, IsNil) 1084 t.Assert(diff, Equals, -1) 1085 t.Assert(fr.offset, Equals, size) 1086 1087 err = fh.FlushFile() 1088 t.Assert(err, IsNil) 1089 1090 // read again with exact 4KB to catch aligned read case 1091 fr = &FileHandleReader{s.fs, fh, offset} 1092 diff, err = CompareReader(fr, io.LimitReader(&SeqReader{offset}, size), 4096) 1093 t.Assert(err, IsNil) 1094 t.Assert(diff, Equals, -1) 1095 t.Assert(fr.offset, Equals, size) 1096 1097 fh.Release() 1098 } 1099 1100 func (s *GoofysTest) TestWriteLargeFile(t *C) { 1101 s.testWriteFile(t, "testLargeFile", int64(READAHEAD_CHUNK)+1024*1024, 128*1024) 1102 s.testWriteFile(t, "testLargeFile2", int64(READAHEAD_CHUNK), 128*1024) 1103 s.testWriteFile(t, "testLargeFile3", int64(READAHEAD_CHUNK)+1, 128*1024) 1104 } 1105 1106 func (s *GoofysTest) TestWriteReallyLargeFile(t *C) { 1107 s.testWriteFile(t, "testLargeFile", 512*1024*1024+1, 128*1024) 1108 } 1109 1110 func (s *GoofysTest) TestWriteReplicatorThrottle(t *C) { 1111 s.fs.replicators = Ticket{Total: 1}.Init() 1112 s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024) 1113 } 1114 1115 func (s *GoofysTest) TestReadWriteMinimumMemory(t *C) { 1116 if _, ok := s.cloud.(*ADLv1); ok { 1117 s.fs.bufferPool.maxBuffers = 4 1118 } else { 1119 s.fs.bufferPool.maxBuffers = 2 1120 } 1121 s.fs.bufferPool.computedMaxbuffers = s.fs.bufferPool.maxBuffers 1122 s.testWriteFile(t, "testLargeFile", 21*1024*1024, 128*1024) 1123 } 1124 1125 func (s *GoofysTest) TestWriteManyFilesFile(t *C) { 1126 var files sync.WaitGroup 1127 1128 for i := 0; i < 21; i++ { 1129 files.Add(1) 1130 fileName := "testSmallFile" + strconv.Itoa(i) 1131 go func() { 1132 defer files.Done() 1133 s.testWriteFile(t, fileName, 1, 128*1024) 1134 }() 1135 } 1136 1137 files.Wait() 1138 } 1139 1140 func (s *GoofysTest) testWriteFileNonAlign(t *C) { 1141 s.testWriteFile(t, "testWriteFileNonAlign", 6*1024*1024, 128*1024+1) 1142 } 1143 1144 func (s *GoofysTest) TestReadRandom(t *C) { 1145 size := int64(21 * 1024 * 1024) 1146 1147 s.testWriteFile(t, "testLargeFile", size, 128*1024) 1148 in, err := s.LookUpInode(t, "testLargeFile") 1149 t.Assert(err, IsNil) 1150 1151 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 1152 t.Assert(err, IsNil) 1153 fr := &FileHandleReader{s.fs, fh, 0} 1154 1155 src := rand.NewSource(time.Now().UnixNano()) 1156 truth := &SeqReader{} 1157 1158 for i := 0; i < 10; i++ { 1159 offset := src.Int63() % (size / 2) 1160 1161 fr.Seek(offset, 0) 1162 truth.Seek(offset, 0) 1163 1164 // read 5MB+1 from that offset 1165 nread := int64(5*1024*1024 + 1) 1166 CompareReader(io.LimitReader(fr, nread), io.LimitReader(truth, nread), 0) 1167 } 1168 } 1169 1170 func (s *GoofysTest) TestMkDir(t *C) { 1171 _, err := s.LookUpInode(t, "new_dir/file") 1172 t.Assert(err, Equals, fuse.ENOENT) 1173 1174 dirName := "new_dir" 1175 inode, err := s.getRoot(t).MkDir(dirName) 1176 t.Assert(err, IsNil) 1177 t.Assert(*inode.FullName(), Equals, dirName) 1178 1179 _, err = s.LookUpInode(t, dirName) 1180 t.Assert(err, IsNil) 1181 1182 fileName := "file" 1183 _, fh := inode.Create(fileName, fuseops.OpMetadata{uint32(os.Getpid())}) 1184 1185 err = fh.FlushFile() 1186 t.Assert(err, IsNil) 1187 1188 _, err = s.LookUpInode(t, dirName+"/"+fileName) 1189 t.Assert(err, IsNil) 1190 } 1191 1192 func (s *GoofysTest) TestRmDir(t *C) { 1193 root := s.getRoot(t) 1194 1195 err := root.RmDir("dir1") 1196 t.Assert(err, Equals, fuse.ENOTEMPTY) 1197 1198 err = root.RmDir("dir2") 1199 t.Assert(err, Equals, fuse.ENOTEMPTY) 1200 1201 err = root.RmDir("empty_dir") 1202 t.Assert(err, IsNil) 1203 1204 } 1205 1206 func (s *GoofysTest) TestRenamePreserveMetadata(t *C) { 1207 if _, ok := s.cloud.(*ADLv1); ok { 1208 t.Skip("ADLv1 doesn't support metadata") 1209 } 1210 root := s.getRoot(t) 1211 1212 from, to := "file1", "new_file" 1213 1214 metadata := make(map[string]*string) 1215 metadata["foo"] = aws.String("bar") 1216 1217 _, err := s.cloud.CopyBlob(&CopyBlobInput{ 1218 Source: from, 1219 Destination: from, 1220 Metadata: metadata, 1221 }) 1222 t.Assert(err, IsNil) 1223 1224 err = root.Rename(from, root, to) 1225 t.Assert(err, IsNil) 1226 1227 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: to}) 1228 t.Assert(err, IsNil) 1229 t.Assert(resp.Metadata["foo"], NotNil) 1230 t.Assert(*resp.Metadata["foo"], Equals, "bar") 1231 } 1232 1233 func (s *GoofysTest) TestRenameLarge(t *C) { 1234 fileSize := int64(2 * 1024 * 1024 * 1024) 1235 // AWS S3 can timeout when renaming large file 1236 if _, ok := s.cloud.(*S3Backend); ok && s.emulator { 1237 // S3proxy runs out of memory on truly large files. We 1238 // want to use a large file to test timeout issues 1239 // which wouldn't happen on s3proxy anyway 1240 fileSize = 21 * 1024 * 1024 1241 } 1242 1243 s.testWriteFile(t, "large_file", fileSize, 128*1024) 1244 1245 root := s.getRoot(t) 1246 1247 from, to := "large_file", "large_file2" 1248 err := root.Rename(from, root, to) 1249 t.Assert(err, IsNil) 1250 } 1251 1252 func (s *GoofysTest) TestRenameToExisting(t *C) { 1253 root := s.getRoot(t) 1254 1255 // cache these 2 files first 1256 _, err := s.LookUpInode(t, "file1") 1257 t.Assert(err, IsNil) 1258 1259 _, err = s.LookUpInode(t, "file2") 1260 t.Assert(err, IsNil) 1261 1262 err = s.fs.Rename(nil, &fuseops.RenameOp{ 1263 OldParent: root.Id, 1264 NewParent: root.Id, 1265 OldName: "file1", 1266 NewName: "file2", 1267 }) 1268 t.Assert(err, IsNil) 1269 1270 file1 := root.findChild("file1") 1271 t.Assert(file1, IsNil) 1272 1273 file2 := root.findChild("file2") 1274 t.Assert(file2, NotNil) 1275 t.Assert(*file2.Name, Equals, "file2") 1276 } 1277 1278 func (s *GoofysTest) TestBackendListPagination(t *C) { 1279 if _, ok := s.cloud.(*ADLv1); ok { 1280 t.Skip("ADLv1 doesn't have pagination") 1281 } 1282 if s.azurite { 1283 // https://github.com/Azure/Azurite/issues/262 1284 t.Skip("Azurite doesn't support pagination") 1285 } 1286 1287 var itemsPerPage int 1288 switch s.cloud.Delegate().(type) { 1289 case *S3Backend, *GCS3: 1290 itemsPerPage = 1000 1291 case *AZBlob, *ADLv2: 1292 itemsPerPage = 5000 1293 default: 1294 t.Fatalf("unknown backend: %T", s.cloud) 1295 } 1296 1297 root := s.getRoot(t) 1298 root.dir.mountPrefix = "this_test/" 1299 1300 blobs := make(map[string]*string) 1301 expect := make([]string, 0) 1302 for i := 0; i < itemsPerPage+1; i++ { 1303 b := fmt.Sprintf("%08v", i) 1304 blobs["this_test/"+b] = nil 1305 expect = append(expect, b) 1306 } 1307 1308 switch s.cloud.(type) { 1309 case *ADLv1, *ADLv2: 1310 // these backends don't support parallel delete so I 1311 // am doing this here 1312 defer func() { 1313 var wg sync.WaitGroup 1314 1315 for b, _ := range blobs { 1316 SmallActionsGate.Take(1, true) 1317 wg.Add(1) 1318 1319 go func(key string) { 1320 // ignore the error here, 1321 // anything we didn't cleanup 1322 // will be handled by teardown 1323 _, _ = s.cloud.DeleteBlob(&DeleteBlobInput{key}) 1324 SmallActionsGate.Return(1) 1325 wg.Done() 1326 }(b) 1327 } 1328 1329 wg.Wait() 1330 }() 1331 } 1332 1333 s.setupBlobs(s.cloud, t, blobs) 1334 1335 dh := root.OpenDir() 1336 defer dh.CloseDir() 1337 1338 children := namesOf(s.readDirFully(t, dh)) 1339 t.Assert(children, DeepEquals, expect) 1340 } 1341 1342 func (s *GoofysTest) TestBackendListPrefix(t *C) { 1343 res, err := s.cloud.ListBlobs(&ListBlobsInput{ 1344 Prefix: PString("random"), 1345 Delimiter: PString("/"), 1346 }) 1347 t.Assert(err, IsNil) 1348 t.Assert(len(res.Prefixes), Equals, 0) 1349 t.Assert(len(res.Items), Equals, 0) 1350 1351 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1352 Prefix: PString("empty_dir"), 1353 Delimiter: PString("/"), 1354 }) 1355 t.Assert(err, IsNil) 1356 t.Assert(len(res.Prefixes), Not(Equals), 0) 1357 t.Assert(*res.Prefixes[0].Prefix, Equals, "empty_dir/") 1358 t.Assert(len(res.Items), Equals, 0) 1359 1360 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1361 Prefix: PString("empty_dir/"), 1362 Delimiter: PString("/"), 1363 }) 1364 t.Assert(err, IsNil) 1365 t.Assert(len(res.Prefixes), Equals, 0) 1366 t.Assert(len(res.Items), Equals, 1) 1367 t.Assert(*res.Items[0].Key, Equals, "empty_dir/") 1368 1369 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1370 Prefix: PString("file1"), 1371 Delimiter: PString("/"), 1372 }) 1373 t.Assert(err, IsNil) 1374 t.Assert(len(res.Prefixes), Equals, 0) 1375 t.Assert(len(res.Items), Equals, 1) 1376 t.Assert(*res.Items[0].Key, Equals, "file1") 1377 1378 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1379 Prefix: PString("file1/"), 1380 Delimiter: PString("/"), 1381 }) 1382 t.Assert(err, IsNil) 1383 t.Assert(len(res.Prefixes), Equals, 0) 1384 t.Assert(len(res.Items), Equals, 0) 1385 1386 // ListBlobs: 1387 // - Case1: If the prefix foo/ is not added explicitly, then ListBlobs foo/ might or might not return foo/. 1388 // In the test setup dir2 is not expliticly created. 1389 // - Case2: Else, ListBlobs foo/ must return foo/ 1390 // In the test setup dir2/dir3 is expliticly created. 1391 1392 // ListBlobs:Case1 1393 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1394 Prefix: PString("dir2/"), 1395 Delimiter: PString("/"), 1396 }) 1397 t.Assert(err, IsNil) 1398 t.Assert(len(res.Prefixes), Equals, 1) 1399 t.Assert(*res.Prefixes[0].Prefix, Equals, "dir2/dir3/") 1400 if len(res.Items) == 1 { 1401 // azblob(with hierarchial ns on), adlv1, adlv2. 1402 t.Assert(*res.Items[0].Key, Equals, "dir2/") 1403 } else { 1404 // s3, azblob(with hierarchial ns off) 1405 t.Assert(len(res.Items), Equals, 0) 1406 } 1407 1408 // ListBlobs:Case2 1409 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1410 Prefix: PString("dir2/dir3/"), 1411 Delimiter: PString("/"), 1412 }) 1413 t.Assert(err, IsNil) 1414 t.Assert(len(res.Prefixes), Equals, 0) 1415 t.Assert(len(res.Items), Equals, 2) 1416 t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/") 1417 t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4") 1418 1419 // ListBlobs:Case1 1420 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1421 Prefix: PString("dir2/"), 1422 }) 1423 t.Assert(err, IsNil) 1424 t.Assert(len(res.Prefixes), Equals, 0) 1425 if len(res.Items) == 3 { 1426 // azblob(with hierarchial ns on), adlv1, adlv2. 1427 t.Assert(*res.Items[0].Key, Equals, "dir2/") 1428 t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/") 1429 t.Assert(*res.Items[2].Key, Equals, "dir2/dir3/file4") 1430 } else { 1431 // s3, azblob(with hierarchial ns off) 1432 t.Assert(len(res.Items), Equals, 2) 1433 t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/") 1434 t.Assert(*res.Items[1].Key, Equals, "dir2/dir3/file4") 1435 } 1436 1437 res, err = s.cloud.ListBlobs(&ListBlobsInput{ 1438 Prefix: PString("dir2/dir3/file4"), 1439 }) 1440 t.Assert(err, IsNil) 1441 t.Assert(len(res.Prefixes), Equals, 0) 1442 t.Assert(len(res.Items), Equals, 1) 1443 t.Assert(*res.Items[0].Key, Equals, "dir2/dir3/file4") 1444 } 1445 1446 func (s *GoofysTest) TestRenameDir(t *C) { 1447 s.fs.flags.StatCacheTTL = 0 1448 1449 root := s.getRoot(t) 1450 1451 err := root.Rename("empty_dir", root, "dir1") 1452 t.Assert(err, Equals, fuse.ENOTEMPTY) 1453 1454 err = root.Rename("empty_dir", root, "new_dir") 1455 t.Assert(err, IsNil) 1456 1457 dir2, err := s.LookUpInode(t, "dir2") 1458 t.Assert(err, IsNil) 1459 t.Assert(dir2, NotNil) 1460 1461 _, err = s.LookUpInode(t, "new_dir2") 1462 t.Assert(err, Equals, fuse.ENOENT) 1463 1464 err = s.fs.Rename(nil, &fuseops.RenameOp{ 1465 OldParent: root.Id, 1466 NewParent: root.Id, 1467 OldName: "dir2", 1468 NewName: "new_dir2", 1469 }) 1470 t.Assert(err, IsNil) 1471 1472 _, err = s.LookUpInode(t, "dir2/dir3") 1473 t.Assert(err, Equals, fuse.ENOENT) 1474 1475 _, err = s.LookUpInode(t, "dir2/dir3/file4") 1476 t.Assert(err, Equals, fuse.ENOENT) 1477 1478 new_dir2, err := s.LookUpInode(t, "new_dir2") 1479 t.Assert(err, IsNil) 1480 t.Assert(new_dir2, NotNil) 1481 t.Assert(dir2.Id, Equals, new_dir2.Id) 1482 1483 old, err := s.LookUpInode(t, "new_dir2/dir3/file4") 1484 t.Assert(err, IsNil) 1485 t.Assert(old, NotNil) 1486 1487 err = s.fs.Rename(nil, &fuseops.RenameOp{ 1488 OldParent: root.Id, 1489 NewParent: root.Id, 1490 OldName: "new_dir2", 1491 NewName: "new_dir3", 1492 }) 1493 t.Assert(err, IsNil) 1494 1495 new, err := s.LookUpInode(t, "new_dir3/dir3/file4") 1496 t.Assert(err, IsNil) 1497 t.Assert(new, NotNil) 1498 t.Assert(old.Id, Equals, new.Id) 1499 1500 _, err = s.LookUpInode(t, "new_dir2/dir3") 1501 t.Assert(err, Equals, fuse.ENOENT) 1502 1503 _, err = s.LookUpInode(t, "new_dir2/dir3/file4") 1504 t.Assert(err, Equals, fuse.ENOENT) 1505 } 1506 1507 func (s *GoofysTest) TestRename(t *C) { 1508 root := s.getRoot(t) 1509 1510 from, to := "empty_dir", "file1" 1511 err := root.Rename(from, root, to) 1512 t.Assert(err, Equals, fuse.ENOTDIR) 1513 1514 from, to = "file1", "empty_dir" 1515 err = root.Rename(from, root, to) 1516 t.Assert(err, Equals, syscall.EISDIR) 1517 1518 from, to = "file1", "new_file" 1519 err = root.Rename(from, root, to) 1520 t.Assert(err, IsNil) 1521 1522 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to}) 1523 t.Assert(err, IsNil) 1524 1525 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from}) 1526 t.Assert(mapAwsError(err), Equals, fuse.ENOENT) 1527 1528 from, to = "file3", "new_file2" 1529 dir, _ := s.LookUpInode(t, "dir1") 1530 err = dir.Rename(from, root, to) 1531 t.Assert(err, IsNil) 1532 1533 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: to}) 1534 t.Assert(err, IsNil) 1535 1536 _, err = s.cloud.HeadBlob(&HeadBlobInput{Key: from}) 1537 t.Assert(mapAwsError(err), Equals, fuse.ENOENT) 1538 1539 from, to = "no_such_file", "new_file" 1540 err = root.Rename(from, root, to) 1541 t.Assert(err, Equals, fuse.ENOENT) 1542 1543 if s3, ok := s.cloud.Delegate().(*S3Backend); ok { 1544 if !hasEnv("GCS") { 1545 // not really rename but can be used by rename 1546 from, to = s.fs.bucket+"/file2", "new_file" 1547 _, err = s3.copyObjectMultipart(int64(len("file2")), from, to, "", nil, nil, nil) 1548 t.Assert(err, IsNil) 1549 } 1550 } 1551 } 1552 1553 func (s *GoofysTest) TestConcurrentRefDeref(t *C) { 1554 root := s.getRoot(t) 1555 1556 lookupOp := fuseops.LookUpInodeOp{ 1557 Parent: root.Id, 1558 Name: "file1", 1559 } 1560 1561 for i := 0; i < 20; i++ { 1562 err := s.fs.LookUpInode(nil, &lookupOp) 1563 t.Assert(err, IsNil) 1564 1565 var wg sync.WaitGroup 1566 1567 wg.Add(2) 1568 go func() { 1569 // we want to yield to the forget goroutine so that it's run first 1570 // to trigger this bug 1571 if i%2 == 0 { 1572 runtime.Gosched() 1573 } 1574 s.fs.LookUpInode(nil, &lookupOp) 1575 wg.Done() 1576 }() 1577 go func() { 1578 s.fs.ForgetInode(nil, &fuseops.ForgetInodeOp{ 1579 Inode: lookupOp.Entry.Child, 1580 N: 1, 1581 }) 1582 wg.Done() 1583 }() 1584 1585 wg.Wait() 1586 } 1587 } 1588 1589 func hasEnv(env string) bool { 1590 v := os.Getenv(env) 1591 1592 return !(v == "" || v == "0" || v == "false") 1593 } 1594 1595 func isTravis() bool { 1596 return hasEnv("TRAVIS") 1597 } 1598 1599 func isCatfs() bool { 1600 return hasEnv("CATFS") 1601 } 1602 1603 func (s *GoofysTest) mount(t *C, mountPoint string) { 1604 err := os.MkdirAll(mountPoint, 0700) 1605 t.Assert(err, IsNil) 1606 1607 server := fuseutil.NewFileSystemServer(s.fs) 1608 1609 if isCatfs() { 1610 s.fs.flags.MountOptions = make(map[string]string) 1611 s.fs.flags.MountOptions["allow_other"] = "" 1612 } 1613 1614 // Mount the file system. 1615 mountCfg := &fuse.MountConfig{ 1616 FSName: s.fs.bucket, 1617 Options: s.fs.flags.MountOptions, 1618 ErrorLogger: GetStdLogger(NewLogger("fuse"), logrus.ErrorLevel), 1619 DisableWritebackCaching: true, 1620 } 1621 mountCfg.DebugLogger = GetStdLogger(fuseLog, logrus.DebugLevel) 1622 1623 _, err = fuse.Mount(mountPoint, server, mountCfg) 1624 t.Assert(err, IsNil) 1625 1626 if isCatfs() { 1627 cacheDir := mountPoint + "-cache" 1628 err := os.MkdirAll(cacheDir, 0700) 1629 t.Assert(err, IsNil) 1630 1631 catfs := exec.Command("catfs", "--test", "-ononempty", "--", mountPoint, cacheDir, mountPoint) 1632 _, err = catfs.Output() 1633 if err != nil { 1634 if ee, ok := err.(*exec.ExitError); ok { 1635 panic(ee.Stderr) 1636 } 1637 } 1638 1639 catfs = exec.Command("catfs", "-ononempty", "--", mountPoint, cacheDir, mountPoint) 1640 1641 if isTravis() { 1642 logger := NewLogger("catfs") 1643 lvl := logrus.InfoLevel 1644 logger.Formatter.(*LogHandle).Lvl = &lvl 1645 w := logger.Writer() 1646 1647 catfs.Stdout = w 1648 catfs.Stderr = w 1649 1650 catfs.Env = append(catfs.Env, "RUST_LOG=debug") 1651 } 1652 1653 err = catfs.Start() 1654 t.Assert(err, IsNil) 1655 1656 time.Sleep(time.Second) 1657 } 1658 } 1659 1660 func (s *GoofysTest) umount(t *C, mountPoint string) { 1661 var err error 1662 for i := 0; i < 10; i++ { 1663 err = fuse.Unmount(mountPoint) 1664 if err != nil { 1665 time.Sleep(100 * time.Millisecond) 1666 } else { 1667 break 1668 } 1669 } 1670 t.Assert(err, IsNil) 1671 1672 os.Remove(mountPoint) 1673 if isCatfs() { 1674 cacheDir := mountPoint + "-cache" 1675 os.Remove(cacheDir) 1676 } 1677 } 1678 1679 func (s *GoofysTest) runFuseTest(t *C, mountPoint string, umount bool, cmdArgs ...string) { 1680 s.mount(t, mountPoint) 1681 1682 if umount { 1683 defer s.umount(t, mountPoint) 1684 } 1685 1686 // if command starts with ./ or ../ then we are executing a 1687 // relative path and cannot do chdir 1688 chdir := cmdArgs[0][0] != '.' 1689 1690 cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) 1691 cmd.Env = append(cmd.Env, os.Environ()...) 1692 cmd.Env = append(cmd.Env, "FAST=true") 1693 cmd.Env = append(cmd.Env, "CLEANUP=false") 1694 1695 if isTravis() { 1696 logger := NewLogger("test") 1697 lvl := logrus.InfoLevel 1698 logger.Formatter.(*LogHandle).Lvl = &lvl 1699 w := logger.Writer() 1700 1701 cmd.Stdout = w 1702 cmd.Stderr = w 1703 } 1704 1705 if chdir { 1706 oldCwd, err := os.Getwd() 1707 t.Assert(err, IsNil) 1708 1709 err = os.Chdir(mountPoint) 1710 t.Assert(err, IsNil) 1711 1712 defer os.Chdir(oldCwd) 1713 } 1714 1715 err := cmd.Run() 1716 t.Assert(err, IsNil) 1717 } 1718 1719 func (s *GoofysTest) TestFuse(t *C) { 1720 mountPoint := "/tmp/mnt" + s.fs.bucket 1721 1722 s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint) 1723 } 1724 1725 func (s *GoofysTest) TestFuseWithTTL(t *C) { 1726 s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000 1727 mountPoint := "/tmp/mnt" + s.fs.bucket 1728 1729 s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint) 1730 } 1731 1732 func (s *GoofysTest) TestCheap(t *C) { 1733 s.fs.flags.Cheap = true 1734 s.TestLookUpInode(t) 1735 s.TestWriteLargeFile(t) 1736 } 1737 1738 func (s *GoofysTest) TestExplicitDir(t *C) { 1739 s.fs.flags.ExplicitDir = true 1740 s.testExplicitDir(t) 1741 } 1742 1743 func (s *GoofysTest) TestExplicitDirAndCheap(t *C) { 1744 s.fs.flags.ExplicitDir = true 1745 s.fs.flags.Cheap = true 1746 s.testExplicitDir(t) 1747 } 1748 1749 func (s *GoofysTest) testExplicitDir(t *C) { 1750 if s.cloud.Capabilities().DirBlob { 1751 t.Skip("only for backends without dir blob") 1752 } 1753 1754 _, err := s.LookUpInode(t, "file1") 1755 t.Assert(err, IsNil) 1756 1757 _, err = s.LookUpInode(t, "fileNotFound") 1758 t.Assert(err, Equals, fuse.ENOENT) 1759 1760 // dir1/ doesn't exist so we shouldn't be able to see it 1761 _, err = s.LookUpInode(t, "dir1/file3") 1762 t.Assert(err, Equals, fuse.ENOENT) 1763 1764 _, err = s.LookUpInode(t, "dir4/file5") 1765 t.Assert(err, IsNil) 1766 1767 _, err = s.LookUpInode(t, "empty_dir") 1768 t.Assert(err, IsNil) 1769 } 1770 1771 func (s *GoofysTest) TestBenchLs(t *C) { 1772 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1773 s.fs.flags.StatCacheTTL = 1 * time.Minute 1774 mountPoint := "/tmp/mnt" + s.fs.bucket 1775 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "ls") 1776 } 1777 1778 func (s *GoofysTest) TestBenchCreate(t *C) { 1779 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1780 s.fs.flags.StatCacheTTL = 1 * time.Minute 1781 mountPoint := "/tmp/mnt" + s.fs.bucket 1782 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create") 1783 } 1784 1785 func (s *GoofysTest) TestBenchCreateParallel(t *C) { 1786 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1787 s.fs.flags.StatCacheTTL = 1 * time.Minute 1788 mountPoint := "/tmp/mnt" + s.fs.bucket 1789 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "create_parallel") 1790 } 1791 1792 func (s *GoofysTest) TestBenchIO(t *C) { 1793 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1794 s.fs.flags.StatCacheTTL = 1 * time.Minute 1795 mountPoint := "/tmp/mnt" + s.fs.bucket 1796 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "io") 1797 } 1798 1799 func (s *GoofysTest) TestBenchFindTree(t *C) { 1800 s.fs.flags.TypeCacheTTL = 1 * time.Minute 1801 s.fs.flags.StatCacheTTL = 1 * time.Minute 1802 mountPoint := "/tmp/mnt" + s.fs.bucket 1803 1804 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "find") 1805 } 1806 1807 func (s *GoofysTest) TestIssue231(t *C) { 1808 if isTravis() { 1809 t.Skip("disable in travis, not sure if it has enough memory") 1810 } 1811 mountPoint := "/tmp/mnt" + s.fs.bucket 1812 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue231") 1813 } 1814 1815 func (s *GoofysTest) TestChmod(t *C) { 1816 root := s.getRoot(t) 1817 1818 lookupOp := fuseops.LookUpInodeOp{ 1819 Parent: root.Id, 1820 Name: "file1", 1821 } 1822 1823 err := s.fs.LookUpInode(nil, &lookupOp) 1824 t.Assert(err, IsNil) 1825 1826 targetMode := os.FileMode(0777) 1827 setOp := fuseops.SetInodeAttributesOp{Inode: lookupOp.Entry.Child, Mode: &targetMode} 1828 1829 err = s.fs.SetInodeAttributes(s.ctx, &setOp) 1830 t.Assert(err, IsNil) 1831 t.Assert(setOp.Attributes, NotNil) 1832 } 1833 1834 func (s *GoofysTest) TestIssue64(t *C) { 1835 /* 1836 mountPoint := "/tmp/mnt" + s.fs.bucket 1837 log.Level = logrus.DebugLevel 1838 1839 err := os.MkdirAll(mountPoint, 0700) 1840 t.Assert(err, IsNil) 1841 1842 defer os.Remove(mountPoint) 1843 1844 s.runFuseTest(t, mountPoint, false, "../bench/bench.sh", "cat", mountPoint, "issue64") 1845 */ 1846 } 1847 1848 func (s *GoofysTest) TestIssue69Fuse(t *C) { 1849 s.fs.flags.StatCacheTTL = 0 1850 1851 mountPoint := "/tmp/mnt" + s.fs.bucket 1852 1853 s.mount(t, mountPoint) 1854 1855 defer func() { 1856 err := os.Chdir("/") 1857 t.Assert(err, IsNil) 1858 1859 s.umount(t, mountPoint) 1860 }() 1861 1862 err := os.Chdir(mountPoint) 1863 t.Assert(err, IsNil) 1864 1865 _, err = os.Stat("dir1") 1866 t.Assert(err, IsNil) 1867 1868 err = os.Remove("dir1/file3") 1869 t.Assert(err, IsNil) 1870 1871 // don't really care about error code, but it should be a PathError 1872 os.Stat("dir1") 1873 os.Stat("dir1") 1874 } 1875 1876 func (s *GoofysTest) TestGetMimeType(t *C) { 1877 // option to use mime type not turned on 1878 mime := s.fs.flags.GetMimeType("foo.css") 1879 t.Assert(mime, IsNil) 1880 1881 s.fs.flags.UseContentType = true 1882 1883 mime = s.fs.flags.GetMimeType("foo.css") 1884 t.Assert(mime, NotNil) 1885 t.Assert(*mime, Equals, "text/css") 1886 1887 mime = s.fs.flags.GetMimeType("foo") 1888 t.Assert(mime, IsNil) 1889 1890 mime = s.fs.flags.GetMimeType("foo.") 1891 t.Assert(mime, IsNil) 1892 1893 mime = s.fs.flags.GetMimeType("foo.unknownExtension") 1894 t.Assert(mime, IsNil) 1895 } 1896 1897 func (s *GoofysTest) TestPutMimeType(t *C) { 1898 if _, ok := s.cloud.(*ADLv1); ok { 1899 // ADLv1 doesn't support content-type 1900 t.Skip("ADLv1 doesn't support content-type") 1901 } 1902 1903 s.fs.flags.UseContentType = true 1904 1905 root := s.getRoot(t) 1906 jpg := "test.jpg" 1907 jpg2 := "test2.jpg" 1908 file := "test" 1909 1910 s.testWriteFile(t, jpg, 10, 128) 1911 1912 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: jpg}) 1913 t.Assert(err, IsNil) 1914 t.Assert(*resp.ContentType, Equals, "image/jpeg") 1915 1916 err = root.Rename(jpg, root, file) 1917 t.Assert(err, IsNil) 1918 1919 resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: file}) 1920 t.Assert(err, IsNil) 1921 t.Assert(*resp.ContentType, Equals, "image/jpeg") 1922 1923 err = root.Rename(file, root, jpg2) 1924 t.Assert(err, IsNil) 1925 1926 resp, err = s.cloud.HeadBlob(&HeadBlobInput{Key: jpg2}) 1927 t.Assert(err, IsNil) 1928 t.Assert(*resp.ContentType, Equals, "image/jpeg") 1929 } 1930 1931 func (s *GoofysTest) TestBucketPrefixSlash(t *C) { 1932 s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2", s.fs.flags) 1933 t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/") 1934 1935 s.fs = NewGoofys(context.Background(), s.fs.bucket+":dir2///", s.fs.flags) 1936 t.Assert(s.getRoot(t).dir.mountPrefix, Equals, "dir2/") 1937 } 1938 1939 func (s *GoofysTest) TestFuseWithPrefix(t *C) { 1940 mountPoint := "/tmp/mnt" + s.fs.bucket 1941 1942 s.fs = NewGoofys(context.Background(), s.fs.bucket+":testprefix", s.fs.flags) 1943 1944 s.runFuseTest(t, mountPoint, true, "../test/fuse-test.sh", mountPoint) 1945 } 1946 1947 func (s *GoofysTest) TestRenameCache(t *C) { 1948 root := s.getRoot(t) 1949 s.fs.flags.StatCacheTTL = 60 * 1000 * 1000 * 1000 1950 1951 lookupOp1 := fuseops.LookUpInodeOp{ 1952 Parent: root.Id, 1953 Name: "file1", 1954 } 1955 1956 lookupOp2 := lookupOp1 1957 lookupOp2.Name = "newfile" 1958 1959 err := s.fs.LookUpInode(nil, &lookupOp1) 1960 t.Assert(err, IsNil) 1961 1962 err = s.fs.LookUpInode(nil, &lookupOp2) 1963 t.Assert(err, Equals, fuse.ENOENT) 1964 1965 renameOp := fuseops.RenameOp{ 1966 OldParent: root.Id, 1967 NewParent: root.Id, 1968 OldName: "file1", 1969 NewName: "newfile", 1970 } 1971 1972 err = s.fs.Rename(nil, &renameOp) 1973 t.Assert(err, IsNil) 1974 1975 lookupOp1.Entry = fuseops.ChildInodeEntry{} 1976 lookupOp2.Entry = fuseops.ChildInodeEntry{} 1977 1978 err = s.fs.LookUpInode(nil, &lookupOp1) 1979 t.Assert(err, Equals, fuse.ENOENT) 1980 1981 err = s.fs.LookUpInode(nil, &lookupOp2) 1982 t.Assert(err, IsNil) 1983 } 1984 1985 func (s *GoofysTest) anonymous(t *C) { 1986 // On azure this fails because we re-create the bucket with 1987 // the same name right away. And well anonymous access is not 1988 // implemented yet in our azure backend anyway 1989 var s3 *S3Backend 1990 var ok bool 1991 if s3, ok = s.cloud.Delegate().(*S3Backend); !ok { 1992 t.Skip("only for S3") 1993 } 1994 1995 err := s.deleteBucket(s.cloud) 1996 t.Assert(err, IsNil) 1997 1998 // use a different bucket name to prevent 409 Conflict from 1999 // delete bucket above 2000 s.fs.bucket = "goofys-test-" + RandStringBytesMaskImprSrc(16) 2001 s3.bucket = s.fs.bucket 2002 s.setupDefaultEnv(t, true) 2003 2004 s.fs = NewGoofys(context.Background(), s.fs.bucket, s.fs.flags) 2005 t.Assert(s.fs, NotNil) 2006 2007 // should have auto-detected by S3 backend 2008 cloud := s.getRoot(t).dir.cloud 2009 t.Assert(cloud, NotNil) 2010 s3, ok = cloud.Delegate().(*S3Backend) 2011 t.Assert(ok, Equals, true) 2012 2013 s3.awsConfig.Credentials = credentials.AnonymousCredentials 2014 s3.newS3() 2015 } 2016 2017 func (s *GoofysTest) disableS3() { 2018 time.Sleep(1 * time.Second) // wait for any background goroutines to finish 2019 dir := s.fs.inodes[fuseops.RootInodeID].dir 2020 dir.cloud = StorageBackendInitError{ 2021 fmt.Errorf("cloud disabled"), 2022 *dir.cloud.Capabilities(), 2023 } 2024 } 2025 2026 func (s *GoofysTest) TestWriteAnonymous(t *C) { 2027 s.anonymous(t) 2028 s.fs.flags.StatCacheTTL = 1 * time.Minute 2029 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2030 2031 fileName := "test" 2032 2033 createOp := fuseops.CreateFileOp{ 2034 Parent: s.getRoot(t).Id, 2035 Name: fileName, 2036 } 2037 2038 err := s.fs.CreateFile(s.ctx, &createOp) 2039 t.Assert(err, IsNil) 2040 2041 err = s.fs.FlushFile(s.ctx, &fuseops.FlushFileOp{ 2042 Handle: createOp.Handle, 2043 Inode: createOp.Entry.Child, 2044 }) 2045 t.Assert(err, Equals, syscall.EACCES) 2046 2047 err = s.fs.ReleaseFileHandle(s.ctx, &fuseops.ReleaseFileHandleOp{Handle: createOp.Handle}) 2048 t.Assert(err, IsNil) 2049 2050 err = s.fs.LookUpInode(s.ctx, &fuseops.LookUpInodeOp{ 2051 Parent: s.getRoot(t).Id, 2052 Name: fileName, 2053 }) 2054 t.Assert(err, Equals, fuse.ENOENT) 2055 // BUG! the file shouldn't exist, see test below for comment, 2056 // this behaves as expected only because we are bypassing 2057 // linux vfs in this test 2058 } 2059 2060 func (s *GoofysTest) TestWriteAnonymousFuse(t *C) { 2061 s.anonymous(t) 2062 s.fs.flags.StatCacheTTL = 1 * time.Minute 2063 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2064 2065 mountPoint := "/tmp/mnt" + s.fs.bucket 2066 2067 s.mount(t, mountPoint) 2068 defer s.umount(t, mountPoint) 2069 2070 err := ioutil.WriteFile(mountPoint+"/test", []byte(""), 0600) 2071 t.Assert(err, NotNil) 2072 pathErr, ok := err.(*os.PathError) 2073 t.Assert(ok, Equals, true) 2074 t.Assert(pathErr.Err, Equals, syscall.EACCES) 2075 2076 _, err = os.Stat(mountPoint + "/test") 2077 t.Assert(err, IsNil) 2078 // BUG! the file shouldn't exist, the condition below should hold instead 2079 // see comment in Goofys.FlushFile 2080 // pathErr, ok = err.(*os.PathError) 2081 // t.Assert(ok, Equals, true) 2082 // t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2083 2084 _, err = ioutil.ReadFile(mountPoint + "/test") 2085 t.Assert(err, NotNil) 2086 pathErr, ok = err.(*os.PathError) 2087 t.Assert(ok, Equals, true) 2088 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2089 2090 // reading the file and getting ENOENT causes the kernel to 2091 // invalidate the entry, failing at open is not sufficient, we 2092 // have to fail at read (which means that if the application 2093 // uses splice(2) it won't get to us, so this wouldn't work 2094 _, err = os.Stat(mountPoint + "/test") 2095 t.Assert(err, NotNil) 2096 pathErr, ok = err.(*os.PathError) 2097 t.Assert(ok, Equals, true) 2098 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2099 } 2100 2101 func (s *GoofysTest) TestWriteSyncWriteFuse(t *C) { 2102 mountPoint := "/tmp/mnt" + s.fs.bucket 2103 2104 s.mount(t, mountPoint) 2105 defer s.umount(t, mountPoint) 2106 2107 var f *os.File 2108 var n int 2109 var err error 2110 2111 defer func() { 2112 if err != nil { 2113 f.Close() 2114 } 2115 }() 2116 2117 f, err = os.Create(mountPoint + "/TestWriteSyncWrite") 2118 t.Assert(err, IsNil) 2119 2120 n, err = f.Write([]byte("hello\n")) 2121 t.Assert(err, IsNil) 2122 t.Assert(n, Equals, 6) 2123 2124 err = f.Sync() 2125 t.Assert(err, IsNil) 2126 2127 n, err = f.Write([]byte("world\n")) 2128 t.Assert(err, IsNil) 2129 t.Assert(n, Equals, 6) 2130 2131 err = f.Close() 2132 t.Assert(err, IsNil) 2133 } 2134 2135 func (s *GoofysTest) TestIssue156(t *C) { 2136 _, err := s.LookUpInode(t, "\xae\x8a-") 2137 // S3Proxy and aws s3 return different errors 2138 // https://github.com/andrewgaul/s3proxy/issues/201 2139 t.Assert(err, NotNil) 2140 } 2141 2142 func (s *GoofysTest) TestIssue162(t *C) { 2143 if s.azurite { 2144 t.Skip("https://github.com/Azure/Azurite/issues/221") 2145 } 2146 2147 params := &PutBlobInput{ 2148 Key: "dir1/lör 006.jpg", 2149 Body: bytes.NewReader([]byte("foo")), 2150 Size: PUInt64(3), 2151 } 2152 _, err := s.cloud.PutBlob(params) 2153 t.Assert(err, IsNil) 2154 2155 dir, err := s.LookUpInode(t, "dir1") 2156 t.Assert(err, IsNil) 2157 2158 err = dir.Rename("lör 006.jpg", dir, "myfile.jpg") 2159 t.Assert(err, IsNil) 2160 2161 resp, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "dir1/myfile.jpg"}) 2162 t.Assert(resp.Size, Equals, uint64(3)) 2163 } 2164 2165 func (s *GoofysTest) TestXAttrGet(t *C) { 2166 if _, ok := s.cloud.(*ADLv1); ok { 2167 t.Skip("ADLv1 doesn't support metadata") 2168 } 2169 2170 _, checkETag := s.cloud.Delegate().(*S3Backend) 2171 xattrPrefix := s.cloud.Capabilities().Name + "." 2172 2173 file1, err := s.LookUpInode(t, "file1") 2174 t.Assert(err, IsNil) 2175 2176 names, err := file1.ListXattr() 2177 t.Assert(err, IsNil) 2178 expectedXattrs := []string{ 2179 xattrPrefix + "etag", 2180 xattrPrefix + "storage-class", 2181 "user.name", 2182 } 2183 sort.Strings(expectedXattrs) 2184 t.Assert(names, DeepEquals, expectedXattrs) 2185 2186 _, err = file1.GetXattr("user.foobar") 2187 t.Assert(err, Equals, unix.ENODATA) 2188 2189 if checkETag { 2190 value, err := file1.GetXattr("s3.etag") 2191 t.Assert(err, IsNil) 2192 // md5sum of "file1" 2193 t.Assert(string(value), Equals, "\"826e8142e6baabe8af779f5f490cf5f5\"") 2194 } 2195 2196 value, err := file1.GetXattr("user.name") 2197 t.Assert(err, IsNil) 2198 t.Assert(string(value), Equals, "file1+/#\x00") 2199 2200 dir1, err := s.LookUpInode(t, "dir1") 2201 t.Assert(err, IsNil) 2202 2203 if !s.cloud.Capabilities().DirBlob { 2204 // implicit dir blobs don't have s3.etag at all 2205 names, err = dir1.ListXattr() 2206 t.Assert(err, IsNil) 2207 t.Assert(len(names), Equals, 0, Commentf("names: %v", names)) 2208 2209 value, err = dir1.GetXattr(xattrPrefix + "etag") 2210 t.Assert(err, Equals, syscall.ENODATA) 2211 } 2212 2213 // list dir1 to populate file3 in cache, then get file3's xattr 2214 lookup := fuseops.LookUpInodeOp{ 2215 Parent: fuseops.RootInodeID, 2216 Name: "dir1", 2217 } 2218 err = s.fs.LookUpInode(nil, &lookup) 2219 t.Assert(err, IsNil) 2220 2221 s.readDirIntoCache(t, lookup.Entry.Child) 2222 2223 dir1 = s.fs.inodes[lookup.Entry.Child] 2224 file3 := dir1.findChild("file3") 2225 t.Assert(file3, NotNil) 2226 t.Assert(file3.userMetadata, IsNil) 2227 2228 if checkETag { 2229 value, err = file3.GetXattr("s3.etag") 2230 t.Assert(err, IsNil) 2231 // md5sum of "dir1/file3" 2232 t.Assert(string(value), Equals, "\"5cd67e0e59fb85be91a515afe0f4bb24\"") 2233 } 2234 2235 // ensure that we get the dir blob instead of list 2236 s.fs.flags.Cheap = true 2237 2238 emptyDir2, err := s.LookUpInode(t, "empty_dir2") 2239 t.Assert(err, IsNil) 2240 2241 names, err = emptyDir2.ListXattr() 2242 t.Assert(err, IsNil) 2243 sort.Strings(names) 2244 t.Assert(names, DeepEquals, expectedXattrs) 2245 2246 emptyDir, err := s.LookUpInode(t, "empty_dir") 2247 t.Assert(err, IsNil) 2248 2249 if checkETag { 2250 value, err = emptyDir.GetXattr("s3.etag") 2251 t.Assert(err, IsNil) 2252 // dir blobs are empty 2253 t.Assert(string(value), Equals, "\"d41d8cd98f00b204e9800998ecf8427e\"") 2254 } 2255 2256 // s3proxy doesn't support storage class yet 2257 if hasEnv("AWS") { 2258 cloud := s.getRoot(t).dir.cloud 2259 s3, ok := cloud.Delegate().(*S3Backend) 2260 t.Assert(ok, Equals, true) 2261 s3.config.StorageClass = "STANDARD_IA" 2262 2263 s.testWriteFile(t, "ia", 1, 128*1024) 2264 2265 ia, err := s.LookUpInode(t, "ia") 2266 t.Assert(err, IsNil) 2267 2268 names, err = ia.ListXattr() 2269 t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"}) 2270 2271 value, err = ia.GetXattr("s3.storage-class") 2272 t.Assert(err, IsNil) 2273 // smaller than 128KB falls back to standard 2274 t.Assert(string(value), Equals, "STANDARD") 2275 2276 s.testWriteFile(t, "ia", 128*1024, 128*1024) 2277 time.Sleep(100 * time.Millisecond) 2278 2279 names, err = ia.ListXattr() 2280 t.Assert(names, DeepEquals, []string{"s3.etag", "s3.storage-class"}) 2281 2282 value, err = ia.GetXattr("s3.storage-class") 2283 t.Assert(err, IsNil) 2284 t.Assert(string(value), Equals, "STANDARD_IA") 2285 } 2286 } 2287 2288 func (s *GoofysTest) TestClientForkExec(t *C) { 2289 mountPoint := "/tmp/mnt" + s.fs.bucket 2290 s.mount(t, mountPoint) 2291 defer s.umount(t, mountPoint) 2292 file := mountPoint + "/TestClientForkExec" 2293 2294 // Create new file. 2295 fh, err := os.OpenFile(file, os.O_CREATE|os.O_RDWR, 0600) 2296 t.Assert(err, IsNil) 2297 defer func() { // Defer close file if it's not already closed. 2298 if fh != nil { 2299 fh.Close() 2300 } 2301 }() 2302 // Write to file. 2303 _, err = fh.WriteString("1.1;") 2304 t.Assert(err, IsNil) 2305 // The `Command` is run via fork+exec. 2306 // So all the file descriptors are copied over to the child process. 2307 // The child process 'closes' the files before exiting. This should 2308 // not result in goofys failing file operations invoked from the test. 2309 someCmd := exec.Command("echo", "hello") 2310 err = someCmd.Run() 2311 t.Assert(err, IsNil) 2312 // One more write. 2313 _, err = fh.WriteString("1.2;") 2314 t.Assert(err, IsNil) 2315 // Close file. 2316 err = fh.Close() 2317 t.Assert(err, IsNil) 2318 fh = nil 2319 // Check file content. 2320 content, err := ioutil.ReadFile(file) 2321 t.Assert(err, IsNil) 2322 t.Assert(string(content), Equals, "1.1;1.2;") 2323 2324 // Repeat the same excercise, but now with an existing file. 2325 fh, err = os.OpenFile(file, os.O_RDWR, 0600) 2326 // Write to file. 2327 _, err = fh.WriteString("2.1;") 2328 // fork+exec. 2329 someCmd = exec.Command("echo", "hello") 2330 err = someCmd.Run() 2331 t.Assert(err, IsNil) 2332 // One more write. 2333 _, err = fh.WriteString("2.2;") 2334 t.Assert(err, IsNil) 2335 // Close file. 2336 err = fh.Close() 2337 t.Assert(err, IsNil) 2338 fh = nil 2339 // Verify that the file is updated as per the new write. 2340 content, err = ioutil.ReadFile(file) 2341 t.Assert(err, IsNil) 2342 t.Assert(string(content), Equals, "2.1;2.2;") 2343 } 2344 2345 func (s *GoofysTest) TestXAttrGetCached(t *C) { 2346 if _, ok := s.cloud.(*ADLv1); ok { 2347 t.Skip("ADLv1 doesn't support metadata") 2348 } 2349 2350 xattrPrefix := s.cloud.Capabilities().Name + "." 2351 2352 s.fs.flags.StatCacheTTL = 1 * time.Minute 2353 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2354 s.readDirIntoCache(t, fuseops.RootInodeID) 2355 s.disableS3() 2356 2357 in, err := s.LookUpInode(t, "file1") 2358 t.Assert(err, IsNil) 2359 t.Assert(in.userMetadata, IsNil) 2360 2361 _, err = in.GetXattr(xattrPrefix + "etag") 2362 t.Assert(err, IsNil) 2363 } 2364 2365 func (s *GoofysTest) TestXAttrCopied(t *C) { 2366 if _, ok := s.cloud.(*ADLv1); ok { 2367 t.Skip("ADLv1 doesn't support metadata") 2368 } 2369 2370 root := s.getRoot(t) 2371 2372 err := root.Rename("file1", root, "file0") 2373 t.Assert(err, IsNil) 2374 2375 in, err := s.LookUpInode(t, "file0") 2376 t.Assert(err, IsNil) 2377 2378 _, err = in.GetXattr("user.name") 2379 t.Assert(err, IsNil) 2380 } 2381 2382 func (s *GoofysTest) TestXAttrRemove(t *C) { 2383 if _, ok := s.cloud.(*ADLv1); ok { 2384 t.Skip("ADLv1 doesn't support metadata") 2385 } 2386 2387 in, err := s.LookUpInode(t, "file1") 2388 t.Assert(err, IsNil) 2389 2390 _, err = in.GetXattr("user.name") 2391 t.Assert(err, IsNil) 2392 2393 err = in.RemoveXattr("user.name") 2394 t.Assert(err, IsNil) 2395 2396 _, err = in.GetXattr("user.name") 2397 t.Assert(err, Equals, syscall.ENODATA) 2398 } 2399 2400 func (s *GoofysTest) TestXAttrFuse(t *C) { 2401 if _, ok := s.cloud.(*ADLv1); ok { 2402 t.Skip("ADLv1 doesn't support metadata") 2403 } 2404 2405 _, checkETag := s.cloud.Delegate().(*S3Backend) 2406 xattrPrefix := s.cloud.Capabilities().Name + "." 2407 2408 //fuseLog.Level = logrus.DebugLevel 2409 mountPoint := "/tmp/mnt" + s.fs.bucket 2410 s.mount(t, mountPoint) 2411 defer s.umount(t, mountPoint) 2412 2413 expectedXattrs := []string{ 2414 xattrPrefix + "etag", 2415 xattrPrefix + "storage-class", 2416 "user.name", 2417 } 2418 sort.Strings(expectedXattrs) 2419 2420 var expectedXattrsStr string 2421 for _, x := range expectedXattrs { 2422 expectedXattrsStr += x + "\x00" 2423 } 2424 var buf [1024]byte 2425 2426 // error if size is too small (but not zero) 2427 _, err := unix.Listxattr(mountPoint+"/file1", buf[:1]) 2428 t.Assert(err, Equals, unix.ERANGE) 2429 2430 // 0 len buffer means interogate the size of buffer 2431 nbytes, err := unix.Listxattr(mountPoint+"/file1", nil) 2432 t.Assert(err, Equals, nil) 2433 t.Assert(nbytes, Equals, len(expectedXattrsStr)) 2434 2435 nbytes, err = unix.Listxattr(mountPoint+"/file1", buf[:nbytes]) 2436 t.Assert(err, IsNil) 2437 t.Assert(nbytes, Equals, len(expectedXattrsStr)) 2438 t.Assert(string(buf[:nbytes]), Equals, expectedXattrsStr) 2439 2440 _, err = unix.Getxattr(mountPoint+"/file1", "user.name", buf[:1]) 2441 t.Assert(err, Equals, unix.ERANGE) 2442 2443 nbytes, err = unix.Getxattr(mountPoint+"/file1", "user.name", nil) 2444 t.Assert(err, IsNil) 2445 t.Assert(nbytes, Equals, 9) 2446 2447 nbytes, err = unix.Getxattr(mountPoint+"/file1", "user.name", buf[:nbytes]) 2448 t.Assert(err, IsNil) 2449 t.Assert(nbytes, Equals, 9) 2450 t.Assert(string(buf[:nbytes]), Equals, "file1+/#\x00") 2451 2452 if !s.cloud.Capabilities().DirBlob { 2453 // dir1 has no xattrs 2454 nbytes, err = unix.Listxattr(mountPoint+"/dir1", nil) 2455 t.Assert(err, IsNil) 2456 t.Assert(nbytes, Equals, 0) 2457 2458 nbytes, err = unix.Listxattr(mountPoint+"/dir1", buf[:1]) 2459 t.Assert(err, IsNil) 2460 t.Assert(nbytes, Equals, 0) 2461 } 2462 2463 if checkETag { 2464 _, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", buf[:1]) 2465 t.Assert(err, Equals, unix.ERANGE) 2466 2467 nbytes, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", nil) 2468 t.Assert(err, IsNil) 2469 // 32 bytes md5 plus quotes 2470 t.Assert(nbytes, Equals, 34) 2471 2472 nbytes, err = unix.Getxattr(mountPoint+"/file1", "s3.etag", buf[:nbytes]) 2473 t.Assert(err, IsNil) 2474 t.Assert(nbytes, Equals, 34) 2475 t.Assert(string(buf[:nbytes]), Equals, 2476 "\"826e8142e6baabe8af779f5f490cf5f5\"") 2477 } 2478 } 2479 2480 func (s *GoofysTest) TestXAttrSet(t *C) { 2481 if _, ok := s.cloud.(*ADLv1); ok { 2482 t.Skip("ADLv1 doesn't support metadata") 2483 } 2484 2485 in, err := s.LookUpInode(t, "file1") 2486 t.Assert(err, IsNil) 2487 2488 err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_REPLACE) 2489 t.Assert(err, Equals, syscall.ENODATA) 2490 2491 err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_CREATE) 2492 t.Assert(err, IsNil) 2493 2494 err = in.SetXattr("user.bar", []byte("hello"), unix.XATTR_CREATE) 2495 t.Assert(err, Equals, syscall.EEXIST) 2496 2497 in, err = s.LookUpInode(t, "file1") 2498 t.Assert(err, IsNil) 2499 2500 value, err := in.GetXattr("user.bar") 2501 t.Assert(err, IsNil) 2502 t.Assert(string(value), Equals, "hello") 2503 2504 value = []byte("file1+%/#\x00") 2505 2506 err = in.SetXattr("user.bar", value, unix.XATTR_REPLACE) 2507 t.Assert(err, IsNil) 2508 2509 in, err = s.LookUpInode(t, "file1") 2510 t.Assert(err, IsNil) 2511 2512 value2, err := in.GetXattr("user.bar") 2513 t.Assert(err, IsNil) 2514 t.Assert(value2, DeepEquals, value) 2515 2516 // setting with flag = 0 always works 2517 err = in.SetXattr("user.bar", []byte("world"), 0) 2518 t.Assert(err, IsNil) 2519 2520 err = in.SetXattr("user.baz", []byte("world"), 0) 2521 t.Assert(err, IsNil) 2522 2523 value, err = in.GetXattr("user.bar") 2524 t.Assert(err, IsNil) 2525 2526 value2, err = in.GetXattr("user.baz") 2527 t.Assert(err, IsNil) 2528 2529 t.Assert(value2, DeepEquals, value) 2530 t.Assert(string(value2), DeepEquals, "world") 2531 2532 err = in.SetXattr("s3.bar", []byte("hello"), unix.XATTR_CREATE) 2533 t.Assert(err, Equals, syscall.EPERM) 2534 } 2535 2536 func (s *GoofysTest) TestPythonCopyTree(t *C) { 2537 mountPoint := "/tmp/mnt" + s.fs.bucket 2538 2539 s.runFuseTest(t, mountPoint, true, "python", "-c", 2540 "import shutil; shutil.copytree('dir2', 'dir5')", 2541 mountPoint) 2542 } 2543 2544 func (s *GoofysTest) TestCreateRenameBeforeCloseFuse(t *C) { 2545 if s.azurite { 2546 // Azurite returns 400 when copy source doesn't exist 2547 // https://github.com/Azure/Azurite/issues/219 2548 // so our code to ignore ENOENT fails 2549 t.Skip("https://github.com/Azure/Azurite/issues/219") 2550 } 2551 2552 mountPoint := "/tmp/mnt" + s.fs.bucket 2553 2554 s.mount(t, mountPoint) 2555 defer s.umount(t, mountPoint) 2556 2557 from := mountPoint + "/newfile" 2558 to := mountPoint + "/newfile2" 2559 2560 fh, err := os.Create(from) 2561 t.Assert(err, IsNil) 2562 defer func() { 2563 // close the file if the test failed so we can unmount 2564 if fh != nil { 2565 fh.Close() 2566 } 2567 }() 2568 2569 _, err = fh.WriteString("hello world") 2570 t.Assert(err, IsNil) 2571 2572 err = os.Rename(from, to) 2573 t.Assert(err, IsNil) 2574 2575 err = fh.Close() 2576 t.Assert(err, IsNil) 2577 fh = nil 2578 2579 _, err = os.Stat(from) 2580 t.Assert(err, NotNil) 2581 pathErr, ok := err.(*os.PathError) 2582 t.Assert(ok, Equals, true) 2583 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2584 2585 content, err := ioutil.ReadFile(to) 2586 t.Assert(err, IsNil) 2587 t.Assert(string(content), Equals, "hello world") 2588 } 2589 2590 func (s *GoofysTest) TestRenameBeforeCloseFuse(t *C) { 2591 mountPoint := "/tmp/mnt" + s.fs.bucket 2592 2593 s.mount(t, mountPoint) 2594 defer s.umount(t, mountPoint) 2595 2596 from := mountPoint + "/newfile" 2597 to := mountPoint + "/newfile2" 2598 2599 err := ioutil.WriteFile(from, []byte(""), 0600) 2600 t.Assert(err, IsNil) 2601 2602 fh, err := os.OpenFile(from, os.O_WRONLY, 0600) 2603 t.Assert(err, IsNil) 2604 defer func() { 2605 // close the file if the test failed so we can unmount 2606 if fh != nil { 2607 fh.Close() 2608 } 2609 }() 2610 2611 _, err = fh.WriteString("hello world") 2612 t.Assert(err, IsNil) 2613 2614 err = os.Rename(from, to) 2615 t.Assert(err, IsNil) 2616 2617 err = fh.Close() 2618 t.Assert(err, IsNil) 2619 fh = nil 2620 2621 _, err = os.Stat(from) 2622 t.Assert(err, NotNil) 2623 pathErr, ok := err.(*os.PathError) 2624 t.Assert(ok, Equals, true) 2625 t.Assert(pathErr.Err, Equals, fuse.ENOENT) 2626 2627 content, err := ioutil.ReadFile(to) 2628 t.Assert(err, IsNil) 2629 t.Assert(string(content), Equals, "hello world") 2630 } 2631 2632 func (s *GoofysTest) TestInodeInsert(t *C) { 2633 root := s.getRoot(t) 2634 2635 in := NewInode(s.fs, root, aws.String("2")) 2636 in.Attributes = InodeAttributes{} 2637 root.insertChild(in) 2638 t.Assert(*root.dir.Children[2].Name, Equals, "2") 2639 2640 in = NewInode(s.fs, root, aws.String("1")) 2641 in.Attributes = InodeAttributes{} 2642 root.insertChild(in) 2643 t.Assert(*root.dir.Children[2].Name, Equals, "1") 2644 t.Assert(*root.dir.Children[3].Name, Equals, "2") 2645 2646 in = NewInode(s.fs, root, aws.String("4")) 2647 in.Attributes = InodeAttributes{} 2648 root.insertChild(in) 2649 t.Assert(*root.dir.Children[2].Name, Equals, "1") 2650 t.Assert(*root.dir.Children[3].Name, Equals, "2") 2651 t.Assert(*root.dir.Children[4].Name, Equals, "4") 2652 2653 inode := root.findChild("1") 2654 t.Assert(inode, NotNil) 2655 t.Assert(*inode.Name, Equals, "1") 2656 2657 inode = root.findChild("2") 2658 t.Assert(inode, NotNil) 2659 t.Assert(*inode.Name, Equals, "2") 2660 2661 inode = root.findChild("4") 2662 t.Assert(inode, NotNil) 2663 t.Assert(*inode.Name, Equals, "4") 2664 2665 inode = root.findChild("0") 2666 t.Assert(inode, IsNil) 2667 2668 inode = root.findChild("3") 2669 t.Assert(inode, IsNil) 2670 2671 root.removeChild(root.dir.Children[3]) 2672 root.removeChild(root.dir.Children[2]) 2673 root.removeChild(root.dir.Children[2]) 2674 t.Assert(len(root.dir.Children), Equals, 2) 2675 } 2676 2677 func (s *GoofysTest) TestReadDirSlurpHeuristic(t *C) { 2678 if _, ok := s.cloud.Delegate().(*S3Backend); !ok { 2679 t.Skip("only for S3") 2680 } 2681 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2682 2683 s.setupBlobs(s.cloud, t, map[string]*string{"dir2isafile": nil}) 2684 2685 root := s.getRoot(t).dir 2686 t.Assert(root.seqOpenDirScore, Equals, uint8(0)) 2687 s.assertEntries(t, s.getRoot(t), []string{ 2688 "dir1", "dir2", "dir2isafile", "dir4", "empty_dir", 2689 "empty_dir2", "file1", "file2", "zero"}) 2690 2691 dir1, err := s.LookUpInode(t, "dir1") 2692 t.Assert(err, IsNil) 2693 dh1 := dir1.OpenDir() 2694 defer dh1.CloseDir() 2695 score := root.seqOpenDirScore 2696 2697 dir2, err := s.LookUpInode(t, "dir2") 2698 t.Assert(err, IsNil) 2699 dh2 := dir2.OpenDir() 2700 defer dh2.CloseDir() 2701 t.Assert(root.seqOpenDirScore, Equals, score+1) 2702 2703 dir3, err := s.LookUpInode(t, "dir4") 2704 t.Assert(err, IsNil) 2705 dh3 := dir3.OpenDir() 2706 defer dh3.CloseDir() 2707 t.Assert(root.seqOpenDirScore, Equals, score+2) 2708 } 2709 2710 func (s *GoofysTest) TestReadDirSlurpSubtree(t *C) { 2711 if _, ok := s.cloud.Delegate().(*S3Backend); !ok { 2712 t.Skip("only for S3") 2713 } 2714 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2715 s.fs.flags.StatCacheTTL = 1 * time.Minute 2716 2717 s.getRoot(t).dir.seqOpenDirScore = 2 2718 in, err := s.LookUpInode(t, "dir2") 2719 t.Assert(err, IsNil) 2720 t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(2)) 2721 2722 s.readDirIntoCache(t, in.Id) 2723 // should have incremented the score 2724 t.Assert(s.getRoot(t).dir.seqOpenDirScore, Equals, uint8(3)) 2725 2726 // reading dir2 should cause dir2/dir3 to have cached readdir 2727 s.disableS3() 2728 2729 in, err = s.LookUpInode(t, "dir2/dir3") 2730 t.Assert(err, IsNil) 2731 2732 s.assertEntries(t, in, []string{"file4"}) 2733 } 2734 2735 func (s *GoofysTest) TestReadDirCached(t *C) { 2736 s.fs.flags.StatCacheTTL = 1 * time.Minute 2737 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2738 2739 s.getRoot(t).dir.seqOpenDirScore = 2 2740 s.readDirIntoCache(t, fuseops.RootInodeID) 2741 s.disableS3() 2742 2743 dh := s.getRoot(t).OpenDir() 2744 2745 entries := s.readDirFully(t, dh) 2746 dirs := make([]string, 0) 2747 files := make([]string, 0) 2748 noMoreDir := false 2749 2750 for _, en := range entries { 2751 if en.Type == fuseutil.DT_Directory { 2752 t.Assert(noMoreDir, Equals, false) 2753 dirs = append(dirs, en.Name) 2754 } else { 2755 files = append(files, en.Name) 2756 noMoreDir = true 2757 } 2758 } 2759 2760 t.Assert(dirs, DeepEquals, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2"}) 2761 t.Assert(files, DeepEquals, []string{"file1", "file2", "zero"}) 2762 } 2763 2764 func (s *GoofysTest) TestReadDirLookUp(t *C) { 2765 s.getRoot(t).dir.seqOpenDirScore = 2 2766 2767 var wg sync.WaitGroup 2768 for i := 0; i < 10; i++ { 2769 wg.Add(2) 2770 go func() { 2771 defer wg.Done() 2772 s.readDirIntoCache(t, fuseops.RootInodeID) 2773 }() 2774 go func() { 2775 defer wg.Done() 2776 2777 lookup := fuseops.LookUpInodeOp{ 2778 Parent: fuseops.RootInodeID, 2779 Name: "file1", 2780 } 2781 err := s.fs.LookUpInode(nil, &lookup) 2782 t.Assert(err, IsNil) 2783 }() 2784 } 2785 wg.Wait() 2786 } 2787 2788 func (s *GoofysTest) writeSeekWriteFuse(t *C, file string, fh *os.File, first string, second string, third string) { 2789 fi, err := os.Stat(file) 2790 t.Assert(err, IsNil) 2791 2792 defer func() { 2793 // close the file if the test failed so we can unmount 2794 if fh != nil { 2795 fh.Close() 2796 } 2797 }() 2798 2799 _, err = fh.WriteString(first) 2800 t.Assert(err, IsNil) 2801 2802 off, err := fh.Seek(int64(len(second)), 1) 2803 t.Assert(err, IsNil) 2804 t.Assert(off, Equals, int64(len(first)+len(second))) 2805 2806 _, err = fh.WriteString(third) 2807 t.Assert(err, IsNil) 2808 2809 off, err = fh.Seek(int64(len(first)), 0) 2810 t.Assert(err, IsNil) 2811 t.Assert(off, Equals, int64(len(first))) 2812 2813 _, err = fh.WriteString(second) 2814 t.Assert(err, IsNil) 2815 2816 err = fh.Close() 2817 t.Assert(err, IsNil) 2818 fh = nil 2819 2820 content, err := ioutil.ReadFile(file) 2821 t.Assert(err, IsNil) 2822 t.Assert(string(content), Equals, first+second+third) 2823 2824 fi2, err := os.Stat(file) 2825 t.Assert(err, IsNil) 2826 t.Assert(fi.Mode(), Equals, fi2.Mode()) 2827 } 2828 2829 func (s *GoofysTest) TestWriteSeekWriteFuse(t *C) { 2830 if !isCatfs() { 2831 t.Skip("only works with CATFS=true") 2832 } 2833 2834 mountPoint := "/tmp/mnt" + s.fs.bucket 2835 s.mount(t, mountPoint) 2836 defer s.umount(t, mountPoint) 2837 2838 file := mountPoint + "/newfile" 2839 2840 fh, err := os.Create(file) 2841 t.Assert(err, IsNil) 2842 2843 s.writeSeekWriteFuse(t, file, fh, "hello", " ", "world") 2844 2845 fh, err = os.OpenFile(file, os.O_WRONLY, 0600) 2846 t.Assert(err, IsNil) 2847 2848 s.writeSeekWriteFuse(t, file, fh, "", "never", "minding") 2849 } 2850 2851 func (s *GoofysTest) TestDirMtimeCreate(t *C) { 2852 root := s.getRoot(t) 2853 2854 attr, _ := root.GetAttributes() 2855 m1 := attr.Mtime 2856 time.Sleep(time.Second) 2857 2858 _, _ = root.Create("foo", fuseops.OpMetadata{uint32(os.Getpid())}) 2859 attr2, _ := root.GetAttributes() 2860 m2 := attr2.Mtime 2861 2862 t.Assert(m1.Before(m2), Equals, true) 2863 } 2864 2865 func (s *GoofysTest) TestDirMtimeLs(t *C) { 2866 root := s.getRoot(t) 2867 2868 attr, _ := root.GetAttributes() 2869 m1 := attr.Mtime 2870 time.Sleep(3 * time.Second) 2871 2872 params := &PutBlobInput{ 2873 Key: "newfile", 2874 Body: bytes.NewReader([]byte("foo")), 2875 Size: PUInt64(3), 2876 } 2877 _, err := s.cloud.PutBlob(params) 2878 t.Assert(err, IsNil) 2879 2880 s.readDirIntoCache(t, fuseops.RootInodeID) 2881 2882 attr2, _ := root.GetAttributes() 2883 m2 := attr2.Mtime 2884 2885 t.Assert(m1.Before(m2), Equals, true) 2886 } 2887 2888 func (s *GoofysTest) TestRenameOverwrite(t *C) { 2889 mountPoint := "/tmp/mnt" + s.fs.bucket 2890 s.mount(t, mountPoint) 2891 defer s.umount(t, mountPoint) 2892 2893 file := mountPoint + "/newfile" 2894 rename := mountPoint + "/file1" 2895 2896 fh, err := os.Create(file) 2897 t.Assert(err, IsNil) 2898 2899 err = fh.Close() 2900 t.Assert(err, IsNil) 2901 2902 err = os.Rename(file, rename) 2903 t.Assert(err, IsNil) 2904 } 2905 2906 func (s *GoofysTest) TestRead403(t *C) { 2907 // anonymous only works in S3 for now 2908 cloud := s.getRoot(t).dir.cloud 2909 s3, ok := cloud.Delegate().(*S3Backend) 2910 if !ok { 2911 t.Skip("only for S3") 2912 } 2913 2914 s.fs.flags.StatCacheTTL = 1 * time.Minute 2915 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2916 2917 // cache the inode first so we don't get 403 when we lookup 2918 in, err := s.LookUpInode(t, "file1") 2919 t.Assert(err, IsNil) 2920 2921 fh, err := in.OpenFile(fuseops.OpMetadata{uint32(os.Getpid())}) 2922 t.Assert(err, IsNil) 2923 2924 s3.awsConfig.Credentials = credentials.AnonymousCredentials 2925 s3.newS3() 2926 2927 // fake enable read-ahead 2928 fh.seqReadAmount = uint64(READAHEAD_CHUNK) 2929 2930 buf := make([]byte, 5) 2931 2932 _, err = fh.ReadFile(0, buf) 2933 t.Assert(err, Equals, syscall.EACCES) 2934 2935 // now that the S3 GET has failed, try again, see 2936 // https://github.com/kahing/goofys/pull/243 2937 _, err = fh.ReadFile(0, buf) 2938 t.Assert(err, Equals, syscall.EACCES) 2939 } 2940 2941 func (s *GoofysTest) TestRmdirWithDiropen(t *C) { 2942 mountPoint := "/tmp/mnt" + s.fs.bucket 2943 s.fs.flags.StatCacheTTL = 1 * time.Minute 2944 s.fs.flags.TypeCacheTTL = 1 * time.Minute 2945 2946 s.mount(t, mountPoint) 2947 defer s.umount(t, mountPoint) 2948 2949 err := os.MkdirAll(mountPoint+"/dir2/dir4", 0700) 2950 t.Assert(err, IsNil) 2951 err = os.MkdirAll(mountPoint+"/dir2/dir5", 0700) 2952 t.Assert(err, IsNil) 2953 2954 //1, open dir5 2955 dir := mountPoint + "/dir2/dir5" 2956 fh, err := os.Open(dir) 2957 t.Assert(err, IsNil) 2958 defer fh.Close() 2959 2960 cmd1 := exec.Command("ls", mountPoint+"/dir2") 2961 //out, err := cmd.Output() 2962 out1, err1 := cmd1.Output() 2963 if err1 != nil { 2964 if ee, ok := err.(*exec.ExitError); ok { 2965 panic(ee.Stderr) 2966 } 2967 } 2968 t.Assert(string(out1), DeepEquals, ""+"dir3\n"+"dir4\n"+"dir5\n") 2969 2970 //2, rm -rf dir5 2971 cmd := exec.Command("rm", "-rf", dir) 2972 _, err = cmd.Output() 2973 if err != nil { 2974 if ee, ok := err.(*exec.ExitError); ok { 2975 panic(ee.Stderr) 2976 } 2977 } 2978 2979 //3, readdir dir2 2980 fh1, err := os.Open(mountPoint + "/dir2") 2981 t.Assert(err, IsNil) 2982 defer func() { 2983 // close the file if the test failed so we can unmount 2984 if fh1 != nil { 2985 fh1.Close() 2986 } 2987 }() 2988 2989 names, err := fh1.Readdirnames(0) 2990 t.Assert(err, IsNil) 2991 t.Assert(names, DeepEquals, []string{"dir3", "dir4"}) 2992 2993 cmd = exec.Command("ls", mountPoint+"/dir2") 2994 out, err := cmd.Output() 2995 if err != nil { 2996 if ee, ok := err.(*exec.ExitError); ok { 2997 panic(ee.Stderr) 2998 } 2999 } 3000 3001 t.Assert(string(out), DeepEquals, ""+"dir3\n"+"dir4\n") 3002 3003 err = fh1.Close() 3004 t.Assert(err, IsNil) 3005 3006 // 4,reset env 3007 err = fh.Close() 3008 t.Assert(err, IsNil) 3009 3010 err = os.RemoveAll(mountPoint + "/dir2/dir4") 3011 t.Assert(err, IsNil) 3012 3013 } 3014 3015 func (s *GoofysTest) TestDirMTime(t *C) { 3016 s.fs.flags.StatCacheTTL = 1 * time.Minute 3017 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3018 // enable cheap to ensure GET dir/ will come back before LIST dir/ 3019 s.fs.flags.Cheap = true 3020 3021 root := s.getRoot(t) 3022 t.Assert(time.Time{}.Before(root.Attributes.Mtime), Equals, true) 3023 3024 file1, err := s.LookUpInode(t, "dir1") 3025 t.Assert(err, IsNil) 3026 3027 // take mtime from a blob as init time because when we test against 3028 // real cloud, server time can be way off from local time 3029 initTime := file1.Attributes.Mtime 3030 3031 dir1, err := s.LookUpInode(t, "dir1") 3032 t.Assert(err, IsNil) 3033 3034 attr1, _ := dir1.GetAttributes() 3035 m1 := attr1.Mtime 3036 if !s.cloud.Capabilities().DirBlob { 3037 // dir1 doesn't have a dir blob, so should take root's mtime 3038 t.Assert(m1, Equals, root.Attributes.Mtime) 3039 } 3040 3041 time.Sleep(2 * time.Second) 3042 3043 dir2, err := dir1.MkDir("dir2") 3044 t.Assert(err, IsNil) 3045 3046 attr2, _ := dir2.GetAttributes() 3047 m2 := attr2.Mtime 3048 t.Assert(m1.Add(2*time.Second).Before(m2), Equals, true) 3049 3050 // dir1 didn't have an explicit mtime, so it should update now 3051 // that we did a mkdir inside it 3052 attr1, _ = dir1.GetAttributes() 3053 m1 = attr1.Mtime 3054 t.Assert(m1, Equals, m2) 3055 3056 // we never added the inode so this will do the lookup again 3057 dir2, err = dir1.LookUp("dir2") 3058 t.Assert(err, IsNil) 3059 3060 // the new time comes from S3 which only has seconds 3061 // granularity 3062 attr2, _ = dir2.GetAttributes() 3063 t.Assert(m2, Not(Equals), attr2.Mtime) 3064 t.Assert(initTime.Add(time.Second).Before(attr2.Mtime), Equals, true) 3065 3066 // different dir2 3067 dir2, err = s.LookUpInode(t, "dir2") 3068 t.Assert(err, IsNil) 3069 3070 attr2, _ = dir2.GetAttributes() 3071 m2 = attr2.Mtime 3072 3073 // this fails because we are listing dir/, which means we 3074 // don't actually see the dir blob dir2/dir3/ (it's returned 3075 // as common prefix), so we can't get dir3's mtime 3076 if false { 3077 // dir2/dir3/ exists and has mtime 3078 s.readDirIntoCache(t, dir2.Id) 3079 dir3, err := s.LookUpInode(t, "dir2/dir3") 3080 t.Assert(err, IsNil) 3081 3082 attr3, _ := dir3.GetAttributes() 3083 // setupDefaultEnv is before mounting 3084 t.Assert(attr3.Mtime.Before(m2), Equals, true) 3085 } 3086 3087 time.Sleep(time.Second) 3088 3089 params := &PutBlobInput{ 3090 Key: "dir2/newfile", 3091 Body: bytes.NewReader([]byte("foo")), 3092 Size: PUInt64(3), 3093 } 3094 _, err = s.cloud.PutBlob(params) 3095 t.Assert(err, IsNil) 3096 3097 s.readDirIntoCache(t, dir2.Id) 3098 3099 newfile, err := dir2.LookUp("newfile") 3100 t.Assert(err, IsNil) 3101 3102 attr2New, _ := dir2.GetAttributes() 3103 // mtime should reflect that of the latest object 3104 // GCS can return nano second resolution so truncate to second for compare 3105 t.Assert(attr2New.Mtime.Unix(), Equals, newfile.Attributes.Mtime.Unix()) 3106 t.Assert(m2.Before(attr2New.Mtime), Equals, true) 3107 } 3108 3109 func (s *GoofysTest) TestDirMTimeNoTTL(t *C) { 3110 if s.cloud.Capabilities().DirBlob { 3111 t.Skip("Tests for behavior without dir blob") 3112 } 3113 // enable cheap to ensure GET dir/ will come back before LIST dir/ 3114 s.fs.flags.Cheap = true 3115 3116 dir2, err := s.LookUpInode(t, "dir2") 3117 t.Assert(err, IsNil) 3118 3119 attr2, _ := dir2.GetAttributes() 3120 m2 := attr2.Mtime 3121 3122 // dir2/dir3/ exists and has mtime 3123 s.readDirIntoCache(t, dir2.Id) 3124 dir3, err := s.LookUpInode(t, "dir2/dir3") 3125 t.Assert(err, IsNil) 3126 3127 attr3, _ := dir3.GetAttributes() 3128 // setupDefaultEnv is before mounting but we can't really 3129 // compare the time here since dir3 is s3 server time and dir2 3130 // is local time 3131 t.Assert(attr3.Mtime, Not(Equals), m2) 3132 } 3133 3134 func (s *GoofysTest) TestIssue326(t *C) { 3135 root := s.getRoot(t) 3136 _, err := root.MkDir("folder@name.something") 3137 t.Assert(err, IsNil) 3138 _, err = root.MkDir("folder#1#") 3139 t.Assert(err, IsNil) 3140 3141 s.readDirIntoCache(t, root.Id) 3142 s.assertEntries(t, root, []string{"dir1", "dir2", "dir4", "empty_dir", "empty_dir2", 3143 "file1", "file2", "folder#1#", "folder@name.something", "zero"}) 3144 } 3145 3146 func (s *GoofysTest) TestSlurpFileAndDir(t *C) { 3147 if _, ok := s.cloud.Delegate().(*S3Backend); !ok { 3148 t.Skip("only for S3") 3149 } 3150 prefix := "TestSlurpFileAndDir/" 3151 // fileAndDir is both a file and a directory, and we are 3152 // slurping them together as part of our listing optimization 3153 blobs := []string{ 3154 prefix + "fileAndDir", 3155 prefix + "fileAndDir/a", 3156 } 3157 3158 for _, b := range blobs { 3159 params := &PutBlobInput{ 3160 Key: b, 3161 Body: bytes.NewReader([]byte("foo")), 3162 Size: PUInt64(3), 3163 } 3164 _, err := s.cloud.PutBlob(params) 3165 t.Assert(err, IsNil) 3166 } 3167 3168 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3169 s.fs.flags.StatCacheTTL = 1 * time.Minute 3170 3171 in, err := s.LookUpInode(t, prefix[0:len(prefix)-1]) 3172 t.Assert(err, IsNil) 3173 t.Assert(in.dir, NotNil) 3174 3175 s.getRoot(t).dir.seqOpenDirScore = 2 3176 s.readDirIntoCache(t, in.Id) 3177 3178 // should have slurped these 3179 in = in.findChild("fileAndDir") 3180 t.Assert(in, NotNil) 3181 t.Assert(in.dir, NotNil) 3182 3183 in = in.findChild("a") 3184 t.Assert(in, NotNil) 3185 3186 // because of slurping we've decided that this is a directory, 3187 // lookup must _not_ talk to S3 again because otherwise we may 3188 // decide it's a file again because of S3 race 3189 s.disableS3() 3190 in, err = s.LookUpInode(t, prefix+"fileAndDir") 3191 t.Assert(err, IsNil) 3192 3193 s.assertEntries(t, in, []string{"a"}) 3194 } 3195 3196 func (s *GoofysTest) TestAzureDirBlob(t *C) { 3197 if _, ok := s.cloud.(*AZBlob); !ok { 3198 t.Skip("only for Azure blob") 3199 } 3200 3201 fakedir := []string{"dir2", "dir3"} 3202 3203 for _, d := range fakedir { 3204 params := &PutBlobInput{ 3205 Key: "azuredir/" + d, 3206 Body: bytes.NewReader([]byte("")), 3207 Metadata: map[string]*string{ 3208 AzureDirBlobMetadataKey: PString("true"), 3209 }, 3210 Size: PUInt64(0), 3211 } 3212 _, err := s.cloud.PutBlob(params) 3213 t.Assert(err, IsNil) 3214 } 3215 3216 defer func() { 3217 // because our listing changes dir3 to dir3/, test 3218 // cleanup could not delete the blob so we wneed to 3219 // clean up 3220 for _, d := range fakedir { 3221 _, err := s.cloud.DeleteBlob(&DeleteBlobInput{Key: "azuredir/" + d}) 3222 t.Assert(err, IsNil) 3223 } 3224 }() 3225 3226 s.setupBlobs(s.cloud, t, map[string]*string{ 3227 // "azuredir/dir" would have gone here 3228 "azuredir/dir3,/": nil, 3229 "azuredir/dir3/file1": nil, 3230 "azuredir/dir345_is_a_file": nil, 3231 }) 3232 3233 head, err := s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir3"}) 3234 t.Assert(err, IsNil) 3235 t.Assert(head.IsDirBlob, Equals, true) 3236 3237 head, err = s.cloud.HeadBlob(&HeadBlobInput{Key: "azuredir/dir345_is_a_file"}) 3238 t.Assert(err, IsNil) 3239 t.Assert(head.IsDirBlob, Equals, false) 3240 3241 list, err := s.cloud.ListBlobs(&ListBlobsInput{Prefix: PString("azuredir/")}) 3242 t.Assert(err, IsNil) 3243 3244 // for flat listing, we rename `dir3` to `dir3/` and add it to Items, 3245 // `dir3` normally sorts before `dir3./`, but after the rename `dir3/` should 3246 // sort after `dir3./` 3247 t.Assert(len(list.Items), Equals, 5) 3248 t.Assert(*list.Items[0].Key, Equals, "azuredir/dir2/") 3249 t.Assert(*list.Items[1].Key, Equals, "azuredir/dir3,/") 3250 t.Assert(*list.Items[2].Key, Equals, "azuredir/dir3/") 3251 t.Assert(*list.Items[3].Key, Equals, "azuredir/dir3/file1") 3252 t.Assert(*list.Items[4].Key, Equals, "azuredir/dir345_is_a_file") 3253 t.Assert(sort.IsSorted(sortBlobItemOutput(list.Items)), Equals, true) 3254 3255 list, err = s.cloud.ListBlobs(&ListBlobsInput{ 3256 Prefix: PString("azuredir/"), 3257 Delimiter: PString("/"), 3258 }) 3259 t.Assert(err, IsNil) 3260 3261 // for delimited listing, we remove `dir3` from items and add `dir3/` to prefixes, 3262 // which should already be there 3263 t.Assert(len(list.Items), Equals, 1) 3264 t.Assert(*list.Items[0].Key, Equals, "azuredir/dir345_is_a_file") 3265 3266 t.Assert(len(list.Prefixes), Equals, 3) 3267 t.Assert(*list.Prefixes[0].Prefix, Equals, "azuredir/dir2/") 3268 t.Assert(*list.Prefixes[1].Prefix, Equals, "azuredir/dir3,/") 3269 t.Assert(*list.Prefixes[2].Prefix, Equals, "azuredir/dir3/") 3270 3271 // finally check that we are reading them in correctly 3272 in, err := s.LookUpInode(t, "azuredir") 3273 t.Assert(err, IsNil) 3274 3275 s.assertEntries(t, in, []string{"dir2", "dir3", "dir3,", "dir345_is_a_file"}) 3276 } 3277 3278 func (s *GoofysTest) TestReadDirLarge(t *C) { 3279 root := s.getRoot(t) 3280 root.dir.mountPrefix = "empty_dir" 3281 3282 blobs := make(map[string]*string) 3283 expect := make([]string, 0) 3284 for i := 0; i < 998; i++ { 3285 blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil 3286 expect = append(expect, fmt.Sprintf("%04vd", i)) 3287 } 3288 blobs["empty_dir/0998f"] = nil 3289 blobs["empty_dir/0999f"] = nil 3290 blobs["empty_dir/1000f"] = nil 3291 expect = append(expect, "0998f") 3292 expect = append(expect, "0999f") 3293 expect = append(expect, "1000f") 3294 3295 for i := 1001; i < 1003; i++ { 3296 blobs[fmt.Sprintf("empty_dir/%04vd/%v", i, i)] = nil 3297 expect = append(expect, fmt.Sprintf("%04vd", i)) 3298 } 3299 3300 s.setupBlobs(s.cloud, t, blobs) 3301 3302 dh := root.OpenDir() 3303 defer dh.CloseDir() 3304 3305 children := namesOf(s.readDirFully(t, dh)) 3306 sort.Strings(children) 3307 3308 t.Assert(children, DeepEquals, expect) 3309 } 3310 3311 func (s *GoofysTest) newBackend(t *C, bucket string, createBucket bool) (cloud StorageBackend) { 3312 var err error 3313 switch s.cloud.Delegate().(type) { 3314 case *S3Backend: 3315 config, _ := s.fs.flags.Backend.(*S3Config) 3316 s3, err := NewS3(bucket, s.fs.flags, config) 3317 t.Assert(err, IsNil) 3318 3319 s3.aws = hasEnv("AWS") 3320 3321 if s.emulator { 3322 s3.Handlers.Sign.Clear() 3323 s3.Handlers.Sign.PushBack(SignV2) 3324 s3.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) 3325 } 3326 3327 if s3.aws { 3328 cloud = NewS3BucketEventualConsistency(s3) 3329 } else { 3330 cloud = s3 3331 } 3332 case *GCS3: 3333 config, _ := s.fs.flags.Backend.(*S3Config) 3334 cloud, err = NewGCS3(bucket, s.fs.flags, config) 3335 t.Assert(err, IsNil) 3336 case *AZBlob: 3337 config, _ := s.fs.flags.Backend.(*AZBlobConfig) 3338 cloud, err = NewAZBlob(bucket, config) 3339 t.Assert(err, IsNil) 3340 case *ADLv1: 3341 config, _ := s.fs.flags.Backend.(*ADLv1Config) 3342 cloud, err = NewADLv1(bucket, s.fs.flags, config) 3343 t.Assert(err, IsNil) 3344 case *ADLv2: 3345 config, _ := s.fs.flags.Backend.(*ADLv2Config) 3346 cloud, err = NewADLv2(bucket, s.fs.flags, config) 3347 t.Assert(err, IsNil) 3348 default: 3349 t.Fatal("unknown backend") 3350 } 3351 3352 if createBucket { 3353 _, err = cloud.MakeBucket(&MakeBucketInput{}) 3354 t.Assert(err, IsNil) 3355 3356 s.removeBucket = append(s.removeBucket, cloud) 3357 } 3358 3359 return 3360 } 3361 3362 func (s *GoofysTest) TestVFS(t *C) { 3363 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3364 cloud2 := s.newBackend(t, bucket, true) 3365 3366 // "mount" this 2nd cloud 3367 in, err := s.LookUpInode(t, "dir4") 3368 t.Assert(in, NotNil) 3369 t.Assert(err, IsNil) 3370 3371 in.dir.cloud = cloud2 3372 in.dir.mountPrefix = "cloud2Prefix/" 3373 3374 rootCloud, rootPath := in.cloud() 3375 t.Assert(rootCloud, NotNil) 3376 t.Assert(rootCloud == cloud2, Equals, true) 3377 t.Assert(rootPath, Equals, "cloud2Prefix") 3378 3379 // the mount would shadow dir4/file5 3380 _, err = in.LookUp("file5") 3381 t.Assert(err, Equals, fuse.ENOENT) 3382 3383 _, fh := in.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())}) 3384 err = fh.FlushFile() 3385 t.Assert(err, IsNil) 3386 3387 resp, err := cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile"}) 3388 t.Assert(err, IsNil) 3389 defer resp.Body.Close() 3390 3391 err = s.getRoot(t).Rename("file1", in, "file2") 3392 t.Assert(err, Equals, syscall.EINVAL) 3393 3394 _, err = in.MkDir("subdir") 3395 t.Assert(err, IsNil) 3396 3397 subdirKey := "cloud2Prefix/subdir" 3398 if !cloud2.Capabilities().DirBlob { 3399 subdirKey += "/" 3400 } 3401 3402 _, err = cloud2.HeadBlob(&HeadBlobInput{Key: subdirKey}) 3403 t.Assert(err, IsNil) 3404 3405 subdir, err := s.LookUpInode(t, "dir4/subdir") 3406 t.Assert(err, IsNil) 3407 t.Assert(subdir, NotNil) 3408 t.Assert(subdir.dir, NotNil) 3409 t.Assert(subdir.dir.cloud, IsNil) 3410 3411 subdirCloud, subdirPath := subdir.cloud() 3412 t.Assert(subdirCloud, NotNil) 3413 t.Assert(subdirCloud == cloud2, Equals, true) 3414 t.Assert(subdirPath, Equals, "cloud2Prefix/subdir") 3415 3416 // create another file inside subdir to make sure that our 3417 // mount check is correct for dir inside the root 3418 _, fh = subdir.Create("testfile2", fuseops.OpMetadata{uint32(os.Getpid())}) 3419 err = fh.FlushFile() 3420 t.Assert(err, IsNil) 3421 3422 resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"}) 3423 t.Assert(err, IsNil) 3424 defer resp.Body.Close() 3425 3426 err = subdir.Rename("testfile2", in, "testfile2") 3427 t.Assert(err, IsNil) 3428 3429 _, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"}) 3430 t.Assert(err, Equals, fuse.ENOENT) 3431 3432 resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"}) 3433 t.Assert(err, IsNil) 3434 defer resp.Body.Close() 3435 3436 err = in.Rename("testfile2", subdir, "testfile2") 3437 t.Assert(err, IsNil) 3438 3439 _, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/testfile2"}) 3440 t.Assert(err, Equals, fuse.ENOENT) 3441 3442 resp, err = cloud2.GetBlob(&GetBlobInput{Key: "cloud2Prefix/subdir/testfile2"}) 3443 t.Assert(err, IsNil) 3444 defer resp.Body.Close() 3445 } 3446 3447 func (s *GoofysTest) TestMountsList(t *C) { 3448 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3449 s.fs.flags.StatCacheTTL = 1 * time.Minute 3450 3451 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3452 cloud := s.newBackend(t, bucket, true) 3453 3454 root := s.getRoot(t) 3455 rootCloud := root.dir.cloud 3456 3457 s.fs.MountAll([]*Mount{ 3458 &Mount{"dir4/cloud1", cloud, "", false}, 3459 }) 3460 3461 in, err := s.LookUpInode(t, "dir4") 3462 t.Assert(in, NotNil) 3463 t.Assert(err, IsNil) 3464 t.Assert(int(in.Id), Equals, 2) 3465 3466 s.readDirIntoCache(t, in.Id) 3467 // ensure that listing is listing mounts and root bucket in one go 3468 root.dir.cloud = nil 3469 3470 s.assertEntries(t, in, []string{"cloud1", "file5"}) 3471 3472 c1, err := s.LookUpInode(t, "dir4/cloud1") 3473 t.Assert(err, IsNil) 3474 t.Assert(*c1.Name, Equals, "cloud1") 3475 t.Assert(c1.dir.cloud == cloud, Equals, true) 3476 t.Assert(int(c1.Id), Equals, 3) 3477 3478 // pretend we've passed the normal cache ttl 3479 s.fs.flags.TypeCacheTTL = 0 3480 s.fs.flags.StatCacheTTL = 0 3481 3482 // listing root again should not overwrite the mounts 3483 root.dir.cloud = rootCloud 3484 3485 s.readDirIntoCache(t, in.Parent.Id) 3486 s.assertEntries(t, in, []string{"cloud1", "file5"}) 3487 3488 c1, err = s.LookUpInode(t, "dir4/cloud1") 3489 t.Assert(err, IsNil) 3490 t.Assert(*c1.Name, Equals, "cloud1") 3491 t.Assert(c1.dir.cloud == cloud, Equals, true) 3492 t.Assert(int(c1.Id), Equals, 3) 3493 } 3494 3495 func (s *GoofysTest) TestMountsNewDir(t *C) { 3496 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3497 cloud := s.newBackend(t, bucket, true) 3498 3499 _, err := s.LookUpInode(t, "dir5") 3500 t.Assert(err, NotNil) 3501 t.Assert(err, Equals, fuse.ENOENT) 3502 3503 s.fs.MountAll([]*Mount{ 3504 &Mount{"dir5/cloud1", cloud, "", false}, 3505 }) 3506 3507 in, err := s.LookUpInode(t, "dir5") 3508 t.Assert(err, IsNil) 3509 t.Assert(in.isDir(), Equals, true) 3510 3511 c1, err := s.LookUpInode(t, "dir5/cloud1") 3512 t.Assert(err, IsNil) 3513 t.Assert(c1.isDir(), Equals, true) 3514 t.Assert(c1.dir.cloud, Equals, cloud) 3515 } 3516 3517 func (s *GoofysTest) TestMountsNewMounts(t *C) { 3518 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3519 cloud := s.newBackend(t, bucket, true) 3520 3521 // "mount" this 2nd cloud 3522 in, err := s.LookUpInode(t, "dir4") 3523 t.Assert(in, NotNil) 3524 t.Assert(err, IsNil) 3525 3526 s.fs.MountAll([]*Mount{ 3527 &Mount{"dir4/cloud1", cloud, "", false}, 3528 }) 3529 3530 s.readDirIntoCache(t, in.Id) 3531 3532 c1, err := s.LookUpInode(t, "dir4/cloud1") 3533 t.Assert(err, IsNil) 3534 t.Assert(*c1.Name, Equals, "cloud1") 3535 t.Assert(c1.dir.cloud == cloud, Equals, true) 3536 3537 _, err = s.LookUpInode(t, "dir4/cloud2") 3538 t.Assert(err, Equals, fuse.ENOENT) 3539 3540 s.fs.MountAll([]*Mount{ 3541 &Mount{"dir4/cloud1", cloud, "", false}, 3542 &Mount{"dir4/cloud2", cloud, "cloudprefix", false}, 3543 }) 3544 3545 c2, err := s.LookUpInode(t, "dir4/cloud2") 3546 t.Assert(err, IsNil) 3547 t.Assert(*c2.Name, Equals, "cloud2") 3548 t.Assert(c2.dir.cloud == cloud, Equals, true) 3549 t.Assert(c2.dir.mountPrefix, Equals, "cloudprefix") 3550 } 3551 3552 func (s *GoofysTest) TestMountsError(t *C) { 3553 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3554 var cloud StorageBackend 3555 if s3, ok := s.cloud.Delegate().(*S3Backend); ok { 3556 // S3Backend can't detect bucket doesn't exist because 3557 // HEAD an object always return 404 NotFound (instead 3558 // of NoSuchBucket) 3559 flags := *s3.flags 3560 config := *s3.config 3561 flags.Endpoint = "0.0.0.0:0" 3562 var err error 3563 cloud, err = NewS3(bucket, &flags, &config) 3564 t.Assert(err, IsNil) 3565 } else if _, ok := s.cloud.(*ADLv1); ok { 3566 config, _ := s.fs.flags.Backend.(*ADLv1Config) 3567 config.Authorizer = nil 3568 3569 var err error 3570 cloud, err = NewADLv1(bucket, s.fs.flags, config) 3571 t.Assert(err, IsNil) 3572 } else if _, ok := s.cloud.(*ADLv2); ok { 3573 // ADLv2 currently doesn't detect bucket doesn't exist 3574 cloud = s.newBackend(t, bucket, false) 3575 adlCloud, _ := cloud.(*ADLv2) 3576 auth := adlCloud.client.BaseClient.Authorizer 3577 adlCloud.client.BaseClient.Authorizer = nil 3578 defer func() { 3579 adlCloud.client.BaseClient.Authorizer = auth 3580 }() 3581 } else { 3582 cloud = s.newBackend(t, bucket, false) 3583 } 3584 3585 s.fs.MountAll([]*Mount{ 3586 &Mount{"dir4/newerror", StorageBackendInitError{ 3587 fmt.Errorf("foo"), 3588 Capabilities{}, 3589 }, "errprefix1", false}, 3590 &Mount{"dir4/initerror", &StorageBackendInitWrapper{ 3591 StorageBackend: cloud, 3592 initKey: "foobar", 3593 }, "errprefix2", false}, 3594 }) 3595 3596 errfile, err := s.LookUpInode(t, "dir4/newerror/"+INIT_ERR_BLOB) 3597 t.Assert(err, IsNil) 3598 t.Assert(errfile.isDir(), Equals, false) 3599 3600 _, err = s.LookUpInode(t, "dir4/newerror/not_there") 3601 t.Assert(err, Equals, fuse.ENOENT) 3602 3603 errfile, err = s.LookUpInode(t, "dir4/initerror/"+INIT_ERR_BLOB) 3604 t.Assert(err, IsNil) 3605 t.Assert(errfile.isDir(), Equals, false) 3606 3607 _, err = s.LookUpInode(t, "dir4/initerror/not_there") 3608 t.Assert(err, Equals, fuse.ENOENT) 3609 3610 in, err := s.LookUpInode(t, "dir4/initerror") 3611 t.Assert(err, IsNil) 3612 t.Assert(in, NotNil) 3613 3614 t.Assert(in.dir.cloud.Capabilities().Name, Equals, cloud.Capabilities().Name) 3615 } 3616 3617 func (s *GoofysTest) TestMountsMultiLevel(t *C) { 3618 s.fs.flags.TypeCacheTTL = 1 * time.Minute 3619 3620 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3621 cloud := s.newBackend(t, bucket, true) 3622 3623 s.fs.MountAll([]*Mount{ 3624 &Mount{"dir4/sub/dir", cloud, "", false}, 3625 }) 3626 3627 sub, err := s.LookUpInode(t, "dir4/sub") 3628 t.Assert(err, IsNil) 3629 t.Assert(sub.isDir(), Equals, true) 3630 3631 s.assertEntries(t, sub, []string{"dir"}) 3632 } 3633 3634 func (s *GoofysTest) TestMountsNested(t *C) { 3635 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3636 cloud := s.newBackend(t, bucket, true) 3637 s.testMountsNested(t, cloud, []*Mount{ 3638 &Mount{"dir5/in/a/dir", cloud, "a/dir/", false}, 3639 &Mount{"dir5/in/", cloud, "b/", false}, 3640 }) 3641 } 3642 3643 // test that mount order doesn't matter for nested mounts 3644 func (s *GoofysTest) TestMountsNestedReversed(t *C) { 3645 bucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3646 cloud := s.newBackend(t, bucket, true) 3647 s.testMountsNested(t, cloud, []*Mount{ 3648 &Mount{"dir5/in/", cloud, "b/", false}, 3649 &Mount{"dir5/in/a/dir", cloud, "a/dir/", false}, 3650 }) 3651 } 3652 3653 func (s *GoofysTest) testMountsNested(t *C, cloud StorageBackend, 3654 mounts []*Mount) { 3655 3656 _, err := s.LookUpInode(t, "dir5") 3657 t.Assert(err, NotNil) 3658 t.Assert(err, Equals, fuse.ENOENT) 3659 3660 s.fs.MountAll(mounts) 3661 3662 in, err := s.LookUpInode(t, "dir5") 3663 t.Assert(err, IsNil) 3664 3665 s.readDirIntoCache(t, in.Id) 3666 3667 // make sure all the intermediate dirs never expire 3668 time.Sleep(time.Second) 3669 dir_in, err := s.LookUpInode(t, "dir5/in") 3670 t.Assert(err, IsNil) 3671 t.Assert(*dir_in.Name, Equals, "in") 3672 3673 s.readDirIntoCache(t, dir_in.Id) 3674 3675 dir_a, err := s.LookUpInode(t, "dir5/in/a") 3676 t.Assert(err, IsNil) 3677 t.Assert(*dir_a.Name, Equals, "a") 3678 3679 s.assertEntries(t, dir_a, []string{"dir"}) 3680 3681 dir_dir, err := s.LookUpInode(t, "dir5/in/a/dir") 3682 t.Assert(err, IsNil) 3683 t.Assert(*dir_dir.Name, Equals, "dir") 3684 t.Assert(dir_dir.dir.cloud == cloud, Equals, true) 3685 3686 _, fh := dir_in.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())}) 3687 err = fh.FlushFile() 3688 t.Assert(err, IsNil) 3689 3690 resp, err := cloud.GetBlob(&GetBlobInput{Key: "b/testfile"}) 3691 t.Assert(err, IsNil) 3692 defer resp.Body.Close() 3693 3694 _, fh = dir_dir.Create("testfile", fuseops.OpMetadata{uint32(os.Getpid())}) 3695 err = fh.FlushFile() 3696 t.Assert(err, IsNil) 3697 3698 resp, err = cloud.GetBlob(&GetBlobInput{Key: "a/dir/testfile"}) 3699 t.Assert(err, IsNil) 3700 defer resp.Body.Close() 3701 3702 s.assertEntries(t, in, []string{"in"}) 3703 } 3704 3705 func verifyFileData(t *C, mountPoint string, path string, content *string) { 3706 if !strings.HasSuffix(mountPoint, "/") { 3707 mountPoint = mountPoint + "/" 3708 } 3709 path = mountPoint + path 3710 data, err := ioutil.ReadFile(path) 3711 comment := Commentf("failed while verifying %v", path) 3712 if content != nil { 3713 t.Assert(err, IsNil, comment) 3714 t.Assert(strings.TrimSpace(string(data)), Equals, *content, comment) 3715 } else { 3716 t.Assert(err, Not(IsNil), comment) 3717 t.Assert(strings.Contains(err.Error(), "no such file or directory"), Equals, true, comment) 3718 } 3719 } 3720 3721 func (s *GoofysTest) TestNestedMountUnmountSimple(t *C) { 3722 childBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3723 childCloud := s.newBackend(t, childBucket, true) 3724 3725 parFileContent := "parent" 3726 childFileContent := "child" 3727 parEnv := map[string]*string{ 3728 "childmnt/x/in_child_and_par": &parFileContent, 3729 "childmnt/x/in_par_only": &parFileContent, 3730 "nonchildmnt/something": &parFileContent, 3731 } 3732 childEnv := map[string]*string{ 3733 "x/in_child_only": &childFileContent, 3734 "x/in_child_and_par": &childFileContent, 3735 } 3736 s.setupBlobs(s.cloud, t, parEnv) 3737 s.setupBlobs(childCloud, t, childEnv) 3738 3739 rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16) 3740 s.mount(t, rootMountPath) 3741 defer s.umount(t, rootMountPath) 3742 // Files under /tmp/fusetesting/ should all be from goofys root. 3743 verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent) 3744 verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent) 3745 verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent) 3746 verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil) 3747 3748 childMount := &Mount{"childmnt", childCloud, "", false} 3749 s.fs.Mount(childMount) 3750 // Now files under /tmp/fusetesting/childmnt should be from childBucket 3751 verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", nil) 3752 verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &childFileContent) 3753 verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", &childFileContent) 3754 // /tmp/fusetesting/nonchildmnt should be from parent bucket. 3755 verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent) 3756 3757 s.fs.Unmount(childMount.name) 3758 // Child is unmounted. So files under /tmp/fusetesting/ should all be from goofys root. 3759 verifyFileData(t, rootMountPath, "childmnt/x/in_par_only", &parFileContent) 3760 verifyFileData(t, rootMountPath, "childmnt/x/in_child_and_par", &parFileContent) 3761 verifyFileData(t, rootMountPath, "nonchildmnt/something", &parFileContent) 3762 verifyFileData(t, rootMountPath, "childmnt/x/in_child_only", nil) 3763 } 3764 3765 func (s *GoofysTest) TestUnmountBucketWithChild(t *C) { 3766 // This bucket will be mounted at ${goofysroot}/c 3767 cBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3768 cCloud := s.newBackend(t, cBucket, true) 3769 3770 // This bucket will be mounted at ${goofysroot}/c/c 3771 ccBucket := "goofys-test-" + RandStringBytesMaskImprSrc(16) 3772 ccCloud := s.newBackend(t, ccBucket, true) 3773 3774 pFileContent := "parent" 3775 cFileContent := "child" 3776 ccFileContent := "childchild" 3777 pEnv := map[string]*string{ 3778 "c/c/x/foo": &pFileContent, 3779 } 3780 cEnv := map[string]*string{ 3781 "c/x/foo": &cFileContent, 3782 } 3783 ccEnv := map[string]*string{ 3784 "x/foo": &ccFileContent, 3785 } 3786 3787 s.setupBlobs(s.cloud, t, pEnv) 3788 s.setupBlobs(cCloud, t, cEnv) 3789 s.setupBlobs(ccCloud, t, ccEnv) 3790 3791 rootMountPath := "/tmp/fusetesting/" + RandStringBytesMaskImprSrc(16) 3792 s.mount(t, rootMountPath) 3793 defer s.umount(t, rootMountPath) 3794 // c/c/foo should come from root mount. 3795 verifyFileData(t, rootMountPath, "c/c/x/foo", &pFileContent) 3796 3797 cMount := &Mount{"c", cCloud, "", false} 3798 s.fs.Mount(cMount) 3799 // c/c/foo should come from "c" mount. 3800 verifyFileData(t, rootMountPath, "c/c/x/foo", &cFileContent) 3801 3802 ccMount := &Mount{"c/c", ccCloud, "", false} 3803 s.fs.Mount(ccMount) 3804 // c/c/foo should come from "c/c" mount. 3805 verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent) 3806 3807 s.fs.Unmount(cMount.name) 3808 // c/c/foo should still come from "c/c" mount. 3809 verifyFileData(t, rootMountPath, "c/c/x/foo", &ccFileContent) 3810 } 3811 3812 func (s *GoofysTest) TestRmImplicitDir(t *C) { 3813 mountPoint := "/tmp/mnt" + s.fs.bucket 3814 3815 s.mount(t, mountPoint) 3816 defer s.umount(t, mountPoint) 3817 3818 defer os.Chdir("/") 3819 3820 dir, err := os.Open(mountPoint + "/dir2") 3821 t.Assert(err, IsNil) 3822 defer dir.Close() 3823 3824 err = dir.Chdir() 3825 t.Assert(err, IsNil) 3826 3827 err = os.RemoveAll(mountPoint + "/dir2") 3828 t.Assert(err, IsNil) 3829 3830 root, err := os.Open(mountPoint) 3831 t.Assert(err, IsNil) 3832 defer root.Close() 3833 3834 files, err := root.Readdirnames(0) 3835 t.Assert(err, IsNil) 3836 t.Assert(files, DeepEquals, []string{ 3837 "dir1", "dir4", "empty_dir", "empty_dir2", "file1", "file2", "zero", 3838 }) 3839 } 3840 3841 func (s *GoofysTest) TestMount(t *C) { 3842 if os.Getenv("MOUNT") == "false" { 3843 t.Skip("Not mounting") 3844 } 3845 3846 mountPoint := "/tmp/mnt" + s.fs.bucket 3847 3848 s.mount(t, mountPoint) 3849 defer s.umount(t, mountPoint) 3850 3851 log.Printf("Mounted at %v", mountPoint) 3852 3853 c := make(chan os.Signal, 2) 3854 signal.Notify(c, os.Interrupt, syscall.SIGTERM) 3855 <-c 3856 } 3857 3858 // Checks if 2 sorted lists are equal. Returns a helpful error if they differ. 3859 func checkSortedListsAreEqual(l1, l2 []string) error { 3860 i1, i2 := 0, 0 3861 onlyl1, onlyl2 := []string{}, []string{} 3862 for i1 < len(l1) && i2 < len(l2) { 3863 if l1[i1] == l2[i2] { 3864 i1++ 3865 i2++ 3866 } else if l1[i1] < l2[i2] { 3867 onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1])) 3868 i1++ 3869 } else { 3870 onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2])) 3871 i2++ 3872 } 3873 3874 } 3875 for ; i1 < len(l1); i1++ { 3876 onlyl1 = append(onlyl1, fmt.Sprintf("%d:%v", i1, l1[i1])) 3877 } 3878 for ; i2 < len(l2); i2++ { 3879 onlyl2 = append(onlyl2, fmt.Sprintf("%d:%v", i2, l2[i2])) 3880 } 3881 3882 if len(onlyl1)+len(onlyl2) == 0 { 3883 return nil 3884 } 3885 toString := func(l []string) string { 3886 ret := []string{} 3887 // The list can contain a lot of elements. Show only ten and say 3888 // "and x more". 3889 for i := 0; i < len(l) && i < 10; i++ { 3890 ret = append(ret, l[i]) 3891 } 3892 if len(ret) < len(l) { 3893 ret = append(ret, fmt.Sprintf("and %d more", len(l)-len(ret))) 3894 } 3895 return strings.Join(ret, ", ") 3896 } 3897 return fmt.Errorf("only l1: %+v, only l2: %+v", 3898 toString(onlyl1), toString(onlyl2)) 3899 } 3900 3901 func (s *GoofysTest) TestReadDirDash(t *C) { 3902 if s.azurite { 3903 t.Skip("ADLv1 doesn't have pagination") 3904 } 3905 root := s.getRoot(t) 3906 root.dir.mountPrefix = "prefix" 3907 3908 // SETUP 3909 // Add the following blobs 3910 // - prefix/2019/1 3911 // - prefix/2019-0000 to prefix/2019-4999 3912 // - prefix/20190000 to prefix/20194999 3913 // Fetching this result will need 3 pages in azure (pagesize 5k) and 11 pages 3914 // in amazon (pagesize 1k) 3915 // This setup will verify that we paginate and return results correctly before and after 3916 // seeing all contents that have a '-' ('-' < '/'). For more context read the comments in 3917 // dir.go::listBlobsSafe. 3918 blobs := make(map[string]*string) 3919 expect := []string{"2019"} 3920 blobs["prefix/2019/1"] = nil 3921 for i := 0; i < 5000; i++ { 3922 name := fmt.Sprintf("2019-%04d", i) 3923 expect = append(expect, name) 3924 blobs["prefix/"+name] = nil 3925 } 3926 for i := 0; i < 5000; i++ { 3927 name := fmt.Sprintf("2019%04d", i) 3928 expect = append(expect, name) 3929 blobs["prefix/"+name] = nil 3930 } 3931 s.setupBlobs(s.cloud, t, blobs) 3932 3933 // Read the directory and verify its contents. 3934 dh := root.OpenDir() 3935 defer dh.CloseDir() 3936 3937 children := namesOf(s.readDirFully(t, dh)) 3938 t.Assert(checkSortedListsAreEqual(children, expect), IsNil) 3939 } 3940 3941 func (s *GoofysTest) TestWriteListFlush(t *C) { 3942 root := s.getRoot(t) 3943 root.dir.mountPrefix = "this_test/" 3944 3945 dir, err := root.MkDir("dir") 3946 t.Assert(err, IsNil) 3947 s.fs.insertInode(root, dir) 3948 3949 in, fh := dir.Create("file1", fuseops.OpMetadata{}) 3950 t.Assert(in, NotNil) 3951 t.Assert(fh, NotNil) 3952 s.fs.insertInode(dir, in) 3953 3954 s.assertEntries(t, dir, []string{"file1"}) 3955 3956 // in should still be valid 3957 t.Assert(in.Parent, NotNil) 3958 t.Assert(in.Parent, Equals, dir) 3959 fh.FlushFile() 3960 3961 s.assertEntries(t, dir, []string{"file1"}) 3962 } 3963 3964 type includes struct{} 3965 3966 func (c includes) Info() *CheckerInfo { 3967 return &CheckerInfo{Name: "includes", Params: []string{"obtained", "expected"}} 3968 } 3969 3970 func (c includes) Check(params []interface{}, names []string) (res bool, error string) { 3971 arr := reflect.ValueOf(params[0]) 3972 switch arr.Kind() { 3973 case reflect.Array, reflect.Slice, reflect.String: 3974 default: 3975 panic(fmt.Sprintf("%v is not an array", names[0])) 3976 } 3977 3978 for i := 0; i < arr.Len(); i++ { 3979 v := arr.Index(i).Interface() 3980 res, error = DeepEquals.Check([]interface{}{v, params[1]}, names) 3981 if res { 3982 return 3983 } else { 3984 error = "" 3985 } 3986 3987 res = false 3988 } 3989 return 3990 } 3991 3992 func (s *GoofysTest) TestWriteUnlinkFlush(t *C) { 3993 root := s.getRoot(t) 3994 3995 dir, err := root.MkDir("dir") 3996 t.Assert(err, IsNil) 3997 s.fs.insertInode(root, dir) 3998 3999 in, fh := dir.Create("deleted", fuseops.OpMetadata{}) 4000 t.Assert(in, NotNil) 4001 t.Assert(fh, NotNil) 4002 s.fs.insertInode(dir, in) 4003 4004 err = dir.Unlink("deleted") 4005 t.Assert(err, IsNil) 4006 4007 s.disableS3() 4008 err = fh.FlushFile() 4009 t.Assert(err, IsNil) 4010 4011 dh := dir.OpenDir() 4012 defer dh.CloseDir() 4013 t.Assert(namesOf(s.readDirFully(t, dh)), Not(includes{}), "deleted") 4014 } 4015 4016 func (s *GoofysTest) TestIssue474(t *C) { 4017 s.fs.flags.TypeCacheTTL = 1 * time.Second 4018 s.fs.flags.Cheap = true 4019 4020 p := "this_test/" 4021 root := s.getRoot(t) 4022 root.dir.mountPrefix = "this_test/" 4023 root.dir.seqOpenDirScore = 2 4024 4025 blobs := make(map[string]*string) 4026 4027 in := []string{ 4028 "1/a/b", 4029 "2/c/d", 4030 } 4031 4032 for _, s := range in { 4033 blobs[p+s] = nil 4034 } 4035 4036 s.setupBlobs(s.cloud, t, blobs) 4037 4038 dir1, err := s.LookUpInode(t, "1") 4039 t.Assert(err, IsNil) 4040 // this would list 1/ and slurp in 2/c/d at the same time 4041 s.assertEntries(t, dir1, []string{"a"}) 4042 4043 // 2/ will expire and require re-listing. ensure that we don't 4044 // remove any children as stale as we update 4045 time.Sleep(time.Second) 4046 4047 dir2, err := s.LookUpInode(t, "2") 4048 t.Assert(err, IsNil) 4049 s.assertEntries(t, dir2, []string{"c"}) 4050 } 4051 4052 func (s *GoofysTest) TestReadExternalChangesFuse(t *C) { 4053 s.fs.flags.StatCacheTTL = 1 * time.Second 4054 4055 mountPoint := "/tmp/mnt" + s.fs.bucket 4056 4057 s.mount(t, mountPoint) 4058 defer s.umount(t, mountPoint) 4059 4060 file := "file1" 4061 filePath := mountPoint + "/file1" 4062 4063 buf, err := ioutil.ReadFile(filePath) 4064 t.Assert(err, IsNil) 4065 t.Assert(string(buf), Equals, file) 4066 4067 update := "file2" 4068 _, err = s.cloud.PutBlob(&PutBlobInput{ 4069 Key: file, 4070 Body: bytes.NewReader([]byte(update)), 4071 Size: PUInt64(uint64(len(update))), 4072 }) 4073 t.Assert(err, IsNil) 4074 4075 time.Sleep(1 * time.Second) 4076 4077 buf, err = ioutil.ReadFile(filePath) 4078 t.Assert(err, IsNil) 4079 t.Assert(string(buf), Equals, update) 4080 4081 // the next read shouldn't talk to cloud 4082 root := s.getRoot(t) 4083 root.dir.cloud = &StorageBackendInitError{ 4084 syscall.EINVAL, *root.dir.cloud.Capabilities(), 4085 } 4086 4087 buf, err = ioutil.ReadFile(filePath) 4088 t.Assert(err, IsNil) 4089 t.Assert(string(buf), Equals, update) 4090 } 4091 4092 func (s *GoofysTest) TestReadMyOwnWriteFuse(t *C) { 4093 s.testReadMyOwnWriteFuse(t, false) 4094 } 4095 4096 func (s *GoofysTest) TestReadMyOwnWriteExternalChangesFuse(t *C) { 4097 s.testReadMyOwnWriteFuse(t, true) 4098 } 4099 4100 func (s *GoofysTest) testReadMyOwnWriteFuse(t *C, externalUpdate bool) { 4101 s.fs.flags.StatCacheTTL = 1 * time.Second 4102 4103 mountPoint := "/tmp/mnt" + s.fs.bucket 4104 4105 s.mount(t, mountPoint) 4106 defer s.umount(t, mountPoint) 4107 4108 file := "file1" 4109 filePath := mountPoint + "/file1" 4110 4111 buf, err := ioutil.ReadFile(filePath) 4112 t.Assert(err, IsNil) 4113 t.Assert(string(buf), Equals, file) 4114 4115 if externalUpdate { 4116 update := "file2" 4117 _, err = s.cloud.PutBlob(&PutBlobInput{ 4118 Key: file, 4119 Body: bytes.NewReader([]byte(update)), 4120 Size: PUInt64(uint64(len(update))), 4121 }) 4122 t.Assert(err, IsNil) 4123 4124 time.Sleep(s.fs.flags.StatCacheTTL) 4125 } 4126 4127 fh, err := os.Create(filePath) 4128 t.Assert(err, IsNil) 4129 4130 _, err = fh.WriteString("file3") 4131 t.Assert(err, IsNil) 4132 // we can't flush yet because if we did, we would be reading 4133 // the new copy from cloud and that's not the point of this 4134 // test 4135 defer func() { 4136 // want fh to be late-binding because we re-use the variable 4137 fh.Close() 4138 }() 4139 4140 buf, err = ioutil.ReadFile(filePath) 4141 t.Assert(err, IsNil) 4142 if externalUpdate { 4143 // if there was an external update, we had set 4144 // KeepPageCache to false on os.Create above, which 4145 // causes our write to not be in cache, and read here 4146 // will go to cloud 4147 t.Assert(string(buf), Equals, "file2") 4148 } else { 4149 t.Assert(string(buf), Equals, "file3") 4150 } 4151 4152 err = fh.Close() 4153 t.Assert(err, IsNil) 4154 4155 time.Sleep(s.fs.flags.StatCacheTTL) 4156 4157 root := s.getRoot(t) 4158 cloud := &TestBackend{root.dir.cloud, nil} 4159 root.dir.cloud = cloud 4160 4161 fh, err = os.Open(filePath) 4162 t.Assert(err, IsNil) 4163 4164 if !externalUpdate { 4165 // we flushed and ttl expired, next lookup should 4166 // realize nothing is changed and NOT invalidate the 4167 // cache. Except ADLv1 because PUT there doesn't 4168 // return the mtime, so the open above will think the 4169 // file is updated and not re-use cache 4170 if _, adlv1 := s.cloud.(*ADLv1); !adlv1 { 4171 cloud.err = fuse.EINVAL 4172 } 4173 } else { 4174 // if there was externalUpdate, we wrote our own 4175 // update with KeepPageCache=false, so we should read 4176 // from the cloud her 4177 } 4178 4179 buf, err = ioutil.ReadAll(fh) 4180 t.Assert(err, IsNil) 4181 t.Assert(string(buf), Equals, "file3") 4182 } 4183 4184 func (s *GoofysTest) TestReadMyOwnNewFileFuse(t *C) { 4185 s.fs.flags.StatCacheTTL = 1 * time.Second 4186 s.fs.flags.TypeCacheTTL = 1 * time.Second 4187 4188 mountPoint := "/tmp/mnt" + s.fs.bucket 4189 4190 s.mount(t, mountPoint) 4191 defer s.umount(t, mountPoint) 4192 4193 filePath := mountPoint + "/filex" 4194 4195 // jacobsa/fuse doesn't support setting OpenKeepCache on 4196 // CreateFile but even after manually setting in in 4197 // fuse/conversions.go, we still receive read ops instead of 4198 // being handled by kernel 4199 4200 fh, err := os.Create(filePath) 4201 t.Assert(err, IsNil) 4202 4203 _, err = fh.WriteString("filex") 4204 t.Assert(err, IsNil) 4205 // we can't flush yet because if we did, we would be reading 4206 // the new copy from cloud and that's not the point of this 4207 // test 4208 defer fh.Close() 4209 4210 // disabled: we can't actually read back our own update 4211 //buf, err := ioutil.ReadFile(filePath) 4212 //t.Assert(err, IsNil) 4213 //t.Assert(string(buf), Equals, "filex") 4214 }