github.com/keybase/client/go@v0.0.0-20240309051027-028f7c731f8b/kbfs/simplefs/simplefs_test.go (about) 1 // Copyright 2016-2017 Keybase Inc. All rights reserved. 2 // Use of this source code is governed by a BSD 3 // license that can be found in the LICENSE file. 4 5 package simplefs 6 7 import ( 8 "archive/zip" 9 "context" 10 "encoding/json" 11 "fmt" 12 13 "os" 14 "path" 15 "path/filepath" 16 "sort" 17 "strings" 18 "sync" 19 "testing" 20 "time" 21 22 "github.com/keybase/client/go/kbfs/data" 23 "github.com/keybase/client/go/kbfs/env" 24 "github.com/keybase/client/go/kbfs/libfs" 25 "github.com/keybase/client/go/kbfs/libkbfs" 26 "github.com/keybase/client/go/kbfs/test/clocktest" 27 "github.com/keybase/client/go/kbfs/tlf" 28 "github.com/keybase/client/go/kbfs/tlfhandle" 29 "github.com/keybase/client/go/libkb" 30 "github.com/keybase/client/go/protocol/keybase1" 31 "github.com/stretchr/testify/require" 32 billy "gopkg.in/src-d/go-billy.v4" 33 ) 34 35 const TempDirBase = "." 36 37 func syncFS(ctx context.Context, t *testing.T, fs *SimpleFS, tlf string) { 38 ctx, err := fs.startOpWrapContext(ctx) 39 require.NoError(t, err) 40 remoteFS, _, err := fs.getFS(ctx, keybase1.NewPathWithKbfsPath(tlf)) 41 require.NoError(t, err) 42 if fs, ok := remoteFS.(*libfs.FS); ok { 43 err = fs.SyncAll() 44 } else if fs, ok := remoteFS.(*fsBlocker); ok { 45 err = fs.SyncAll() 46 } 47 require.NoError(t, err) 48 } 49 50 func closeSimpleFS(ctx context.Context, t *testing.T, fs *SimpleFS) { 51 // Sync in-memory data to disk before shutting down and flushing 52 // the journal. 53 syncFS(ctx, t, fs, "/private/jdoe") 54 err := fs.Shutdown(ctx) 55 require.NoError(t, err) 56 err = fs.config.Shutdown(ctx) 57 require.NoError(t, err) 58 } 59 60 func deleteTempLocalPath(path keybase1.Path) { 61 os.RemoveAll(path.Local()) 62 } 63 64 // "pending" tells whether we expect the operation to still be 65 // there, because there is no "none" in AsyncOps 66 func checkPendingOp(ctx context.Context, 67 t *testing.T, 68 sfs *SimpleFS, 69 opid keybase1.OpID, 70 expectedOp keybase1.AsyncOps, 71 src keybase1.Path, 72 dest keybase1.Path, 73 pending bool) { 74 75 // TODO: what do we expect the progress to be? 76 _, err := sfs.SimpleFSCheck(ctx, opid) 77 if pending { 78 require.NoError(t, err) 79 } else { 80 require.Error(t, err) 81 } 82 83 ops, err := sfs.SimpleFSGetOps(ctx) 84 require.NoError(t, err) 85 86 if !pending { 87 require.Len(t, ops, 0, "Expected zero pending operations") 88 return 89 } 90 91 require.True(t, len(ops) > 0, "Expected at least one pending operation") 92 93 o := ops[0] 94 op, err := o.AsyncOp() 95 require.NoError(t, err) 96 require.Equal(t, expectedOp, op, "Expected at least one pending operation") 97 98 // TODO: verify read/write arguments 99 switch op { 100 case keybase1.AsyncOps_LIST: 101 list := o.List() 102 require.Equal(t, list.Path, src, "Expected matching path in operation") 103 case keybase1.AsyncOps_LIST_RECURSIVE: 104 list := o.ListRecursive() 105 require.Equal(t, list.Path, src, "Expected matching path in operation") 106 case keybase1.AsyncOps_LIST_RECURSIVE_TO_DEPTH: 107 list := o.ListRecursiveToDepth() 108 require.Equal(t, list.Path, src, "Expected matching path in operation") 109 // TODO: read is not async 110 case keybase1.AsyncOps_READ: 111 read := o.Read() 112 require.Equal(t, read.Path, src, "Expected matching path in operation") 113 // TODO: write is not asynce 114 case keybase1.AsyncOps_WRITE: 115 write := o.Write() 116 require.Equal(t, write.Path, src, "Expected matching path in operation") 117 case keybase1.AsyncOps_COPY: 118 copy := o.Copy() 119 require.Equal(t, copy.Src, src, "Expected matching path in operation") 120 require.Equal(t, copy.Dest, dest, "Expected matching path in operation") 121 case keybase1.AsyncOps_MOVE: 122 move := o.Move() 123 require.Equal(t, move.Src, src, "Expected matching path in operation") 124 require.Equal(t, move.Dest, dest, "Expected matching path in operation") 125 case keybase1.AsyncOps_REMOVE: 126 remove := o.Remove() 127 require.Equal(t, remove.Path, src, "Expected matching path in operation") 128 } 129 } 130 131 func testListWithFilterAndUsername( 132 ctx context.Context, t *testing.T, sfs *SimpleFS, path keybase1.Path, 133 filter keybase1.ListFilter, username string, expectedEntries ...string) { 134 opid, err := sfs.SimpleFSMakeOpid(ctx) 135 require.NoError(t, err) 136 err = sfs.SimpleFSList(ctx, keybase1.SimpleFSListArg{ 137 OpID: opid, 138 Path: path, 139 Filter: filter, 140 }) 141 require.NoError(t, err) 142 checkPendingOp( 143 ctx, t, sfs, opid, keybase1.AsyncOps_LIST, path, keybase1.Path{}, true) 144 err = sfs.SimpleFSWait(ctx, opid) 145 require.NoError(t, err) 146 listResult, err := sfs.SimpleFSReadList(ctx, opid) 147 require.NoError(t, err) 148 require.Len(t, listResult.Entries, len(expectedEntries)) 149 sort.Slice(listResult.Entries, func(i, j int) bool { 150 return strings.Compare(listResult.Entries[i].Name, 151 listResult.Entries[j].Name) < 0 152 }) 153 sort.Strings(expectedEntries) 154 for i, entry := range listResult.Entries { 155 require.Equal(t, expectedEntries[i], entry.Name) 156 require.Equal(t, username, entry.LastWriterUnverified.Username) 157 } 158 159 // Assume we've exhausted the list now, so expect error 160 _, err = sfs.SimpleFSReadList(ctx, opid) 161 require.Error(t, err) 162 163 // Verify error on double wait 164 err = sfs.SimpleFSWait(ctx, opid) 165 require.Error(t, err) 166 } 167 168 func testList( 169 ctx context.Context, t *testing.T, sfs *SimpleFS, path keybase1.Path, 170 expectedEntries ...string) { 171 testListWithFilterAndUsername( 172 ctx, t, sfs, path, keybase1.ListFilter_NO_FILTER, "jdoe", 173 expectedEntries...) 174 } 175 176 func TestStatNonExistent(t *testing.T) { 177 ctx := context.Background() 178 config := libkbfs.MakeTestConfigOrBust(t, "dog", "cat") 179 defer func() { 180 err := config.Shutdown(ctx) 181 require.NoError(t, err) 182 }() 183 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, config) 184 185 t.Logf("/private/dog,cat should be writable for dog") 186 p := keybase1.NewPathWithKbfsPath("/private/dog,cat") 187 de, err := sfs.SimpleFSStat(ctx, keybase1.SimpleFSStatArg{ 188 Path: p, 189 }) 190 require.NoError(t, err) 191 require.True(t, de.Writable) 192 193 t.Logf("/private/cat#dog should not be writable for dog") 194 p = keybase1.NewPathWithKbfsPath("/private/cat#dog") 195 de, err = sfs.SimpleFSStat(ctx, keybase1.SimpleFSStatArg{ 196 Path: p, 197 }) 198 require.NoError(t, err) 199 require.False(t, de.Writable) 200 } 201 202 func TestSymlink(t *testing.T) { 203 ctx := context.Background() 204 config := libkbfs.MakeTestConfigOrBust(t, "jdoe") 205 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, config) 206 defer closeSimpleFS(ctx, t, sfs) 207 208 t.Logf("Make a file and then symlink it") 209 p := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 210 target := pathAppend(p, `test1.txt`) 211 writeRemoteFile(ctx, t, sfs, target, []byte(`foo`)) 212 linkName := "link" 213 link := pathAppend(p, linkName) 214 targetName := path.Base(target.String()) 215 err := sfs.SimpleFSSymlink(ctx, keybase1.SimpleFSSymlinkArg{ 216 Target: targetName, 217 Link: link, 218 }) 219 require.NoError(t, err) 220 require.Equal(t, "foo", 221 string(readRemoteFile(ctx, t, sfs, link))) 222 223 t.Log("Make sure the symlink ls entry has a target") 224 opid, err := sfs.SimpleFSMakeOpid(ctx) 225 require.NoError(t, err) 226 err = sfs.SimpleFSList(ctx, keybase1.SimpleFSListArg{ 227 OpID: opid, 228 Path: p, 229 Filter: keybase1.ListFilter_NO_FILTER, 230 }) 231 require.NoError(t, err) 232 checkPendingOp( 233 ctx, t, sfs, opid, keybase1.AsyncOps_LIST, p, keybase1.Path{}, true) 234 err = sfs.SimpleFSWait(ctx, opid) 235 require.NoError(t, err) 236 listResult, err := sfs.SimpleFSReadList(ctx, opid) 237 require.NoError(t, err) 238 require.Len(t, listResult.Entries, 2) 239 found := false 240 for _, e := range listResult.Entries { 241 if e.Name != linkName { 242 continue 243 } 244 require.Equal(t, targetName, e.SymlinkTarget) 245 found = true 246 } 247 require.True(t, found) 248 249 // Regression for HOTPOT-1276 250 t.Log("Make sure a link called . will fail") 251 badLink := pathAppend(p, `.`) 252 err = sfs.SimpleFSSymlink(ctx, keybase1.SimpleFSSymlinkArg{ 253 Target: path.Base(target.String()), 254 Link: badLink, 255 }) 256 require.Error(t, err) 257 258 // Regression test for HOTPOT-2007. 259 t.Log("Make sure a symlink can be deleted") 260 opid, err = sfs.SimpleFSMakeOpid(ctx) 261 require.NoError(t, err) 262 err = sfs.SimpleFSRemove(ctx, keybase1.SimpleFSRemoveArg{ 263 OpID: opid, 264 Path: link, 265 Recursive: true, 266 }) 267 require.NoError(t, err) 268 checkPendingOp( 269 ctx, t, sfs, opid, keybase1.AsyncOps_REMOVE, link, keybase1.Path{}, 270 true) 271 err = sfs.SimpleFSWait(ctx, opid) 272 require.NoError(t, err) 273 testList(ctx, t, sfs, p, "test1.txt") 274 } 275 276 func TestList(t *testing.T) { 277 ctx := context.Background() 278 config := libkbfs.MakeTestConfigOrBust(t, "jdoe") 279 clock := &clocktest.TestClock{} 280 clock.Set(time.Now()) 281 config.SetClock(clock) 282 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, config) 283 defer closeSimpleFS(ctx, t, sfs) 284 285 pathRoot := keybase1.NewPathWithKbfsPath(`/`) 286 testListWithFilterAndUsername( 287 ctx, t, sfs, pathRoot, keybase1.ListFilter_NO_FILTER, "", 288 "private", "public", "team") 289 290 pathPrivate := keybase1.NewPathWithKbfsPath(`/private`) 291 testListWithFilterAndUsername( 292 ctx, t, sfs, pathPrivate, keybase1.ListFilter_NO_FILTER, "", 293 "jdoe") 294 295 t.Log("List directory before it's created") 296 path1 := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 297 testList(ctx, t, sfs, path1) 298 299 t.Log("Shouldn't have created the TLF") 300 h, err := tlfhandle.ParseHandle( 301 ctx, config.KBPKI(), config.MDOps(), config, "jdoe", tlf.Private) 302 require.NoError(t, err) 303 rootNode, _, err := config.KBFSOps().GetRootNode( 304 ctx, h, data.MasterBranch) 305 require.NoError(t, err) 306 require.Nil(t, rootNode) 307 308 clock.Add(1 * time.Minute) 309 syncFS(ctx, t, sfs, "/private/jdoe") 310 311 rev1Time := clock.Now() 312 clock.Add(1 * time.Minute) 313 314 // make a temp remote directory + files we will clean up later 315 writeRemoteFile(ctx, t, sfs, pathAppend(path1, `test1.txt`), []byte(`foo`)) 316 syncFS(ctx, t, sfs, "/private/jdoe") // Make a revision. 317 writeRemoteFile(ctx, t, sfs, pathAppend(path1, `test2.txt`), []byte(`foo`)) 318 writeRemoteFile(ctx, t, sfs, pathAppend(path1, `.testfile`), []byte(`foo`)) 319 320 testListWithFilterAndUsername( 321 ctx, t, sfs, path1, keybase1.ListFilter_FILTER_ALL_HIDDEN, "jdoe", 322 "test1.txt", "test2.txt") 323 324 testList(ctx, t, sfs, pathAppend(path1, `test1.txt`), "test1.txt") 325 326 // Check for hidden files too. 327 testList( 328 ctx, t, sfs, path1, "test1.txt", "test2.txt", ".testfile") 329 330 // A single, requested hidden file shows up even if the filter is on. 331 testList(ctx, t, sfs, pathAppend(path1, `.testfile`), ".testfile") 332 333 // Test that the first archived revision shows no directory entries. 334 pathArchivedRev1 := keybase1.NewPathWithKbfsArchived( 335 keybase1.KBFSArchivedPath{ 336 Path: `/private/jdoe`, 337 ArchivedParam: keybase1.NewKBFSArchivedParamWithRevision(1), 338 }) 339 testList(ctx, t, sfs, pathArchivedRev1) 340 341 pathArchivedRev2 := keybase1.NewPathWithKbfsArchived( 342 keybase1.KBFSArchivedPath{ 343 Path: `/private/jdoe`, 344 ArchivedParam: keybase1.NewKBFSArchivedParamWithRevision(2), 345 }) 346 testList(ctx, t, sfs, pathArchivedRev2, "test1.txt") 347 348 // Same test, with by-time archived paths. 349 pathArchivedTime := keybase1.NewPathWithKbfsArchived( 350 keybase1.KBFSArchivedPath{ 351 Path: `/private/jdoe`, 352 ArchivedParam: keybase1.NewKBFSArchivedParamWithTime( 353 keybase1.ToTime(rev1Time)), 354 }) 355 testList(ctx, t, sfs, pathArchivedTime) 356 357 pathArchivedTimeString := keybase1.NewPathWithKbfsArchived( 358 keybase1.KBFSArchivedPath{ 359 Path: `/private/jdoe`, 360 ArchivedParam: keybase1.NewKBFSArchivedParamWithTimeString( 361 rev1Time.String()), 362 }) 363 testList(ctx, t, sfs, pathArchivedTimeString) 364 365 pathArchivedRelTimeString := keybase1.NewPathWithKbfsArchived( 366 keybase1.KBFSArchivedPath{ 367 Path: `/private/jdoe`, 368 ArchivedParam: keybase1.NewKBFSArchivedParamWithRelTimeString( 369 "45s"), 370 }) 371 testList(ctx, t, sfs, pathArchivedRelTimeString) 372 373 clock.Add(1 * time.Minute) 374 testList(ctx, t, sfs, pathArchivedRelTimeString, "test1.txt") 375 } 376 377 func TestListRecursive(t *testing.T) { 378 ctx := context.Background() 379 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 380 defer closeSimpleFS(ctx, t, sfs) 381 382 t.Log("List directory before it's created") 383 pathJDoe := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 384 opid, err := sfs.SimpleFSMakeOpid(ctx) 385 require.NoError(t, err) 386 err = sfs.SimpleFSListRecursive(ctx, keybase1.SimpleFSListRecursiveArg{ 387 OpID: opid, 388 Path: pathJDoe, 389 }) 390 require.NoError(t, err) 391 checkPendingOp( 392 ctx, t, sfs, opid, keybase1.AsyncOps_LIST_RECURSIVE, pathJDoe, 393 keybase1.Path{}, true) 394 err = sfs.SimpleFSWait(ctx, opid) 395 require.NoError(t, err) 396 listResult, err := sfs.SimpleFSReadList(ctx, opid) 397 require.NoError(t, err) 398 require.Len(t, listResult.Entries, 0, 399 "Expected 0 directory entries in listing") 400 401 // make a temp remote directory + files we will clean up later 402 writeRemoteDir(ctx, t, sfs, pathAppend(pathJDoe, `a`)) 403 patha := keybase1.NewPathWithKbfsPath(`/private/jdoe/a`) 404 writeRemoteDir(ctx, t, sfs, pathAppend(patha, `aa`)) 405 pathaa := keybase1.NewPathWithKbfsPath(`/private/jdoe/a/aa`) 406 writeRemoteDir(ctx, t, sfs, pathAppend(patha, `ab`)) 407 pathab := keybase1.NewPathWithKbfsPath(`/private/jdoe/a/ab`) 408 writeRemoteDir(ctx, t, sfs, pathAppend(pathaa, `aaa`)) 409 pathaaa := keybase1.NewPathWithKbfsPath(`/private/jdoe/a/aa/aaa`) 410 writeRemoteFile(ctx, t, sfs, pathAppend(pathaaa, `test1.txt`), []byte(`foo`)) 411 writeRemoteFile(ctx, t, sfs, pathAppend(pathab, `test2.txt`), []byte(`foo`)) 412 writeRemoteFile(ctx, t, sfs, pathAppend(patha, `.testfile`), []byte(`foo`)) 413 414 opid, err = sfs.SimpleFSMakeOpid(ctx) 415 require.NoError(t, err) 416 err = sfs.SimpleFSListRecursive(ctx, keybase1.SimpleFSListRecursiveArg{ 417 OpID: opid, 418 Path: pathJDoe, 419 }) 420 require.NoError(t, err) 421 checkPendingOp(ctx, t, sfs, opid, keybase1.AsyncOps_LIST_RECURSIVE, pathJDoe, keybase1.Path{}, true) 422 err = sfs.SimpleFSWait(ctx, opid) 423 require.NoError(t, err) 424 listResult, err = sfs.SimpleFSReadList(ctx, opid) 425 require.NoError(t, err) 426 expected := []string{ 427 "a", 428 "a/.testfile", 429 "a/aa", 430 "a/aa/aaa", 431 "a/aa/aaa/test1.txt", 432 "a/ab", 433 "a/ab/test2.txt", 434 } 435 require.Len(t, listResult.Entries, len(expected)) 436 sort.Slice(listResult.Entries, func(i, j int) bool { 437 return strings.Compare(listResult.Entries[i].Name, 438 listResult.Entries[j].Name) < 0 439 }) 440 for i, e := range expected { 441 require.Equal(t, e, listResult.Entries[i].Name) 442 } 443 444 opid, err = sfs.SimpleFSMakeOpid(ctx) 445 require.NoError(t, err) 446 err = sfs.SimpleFSListRecursiveToDepth(ctx, keybase1.SimpleFSListRecursiveToDepthArg{ 447 OpID: opid, 448 Path: patha, 449 Depth: 1, 450 }) 451 require.NoError(t, err) 452 checkPendingOp(ctx, t, sfs, opid, keybase1.AsyncOps_LIST_RECURSIVE_TO_DEPTH, patha, keybase1.Path{}, true) 453 err = sfs.SimpleFSWait(ctx, opid) 454 require.NoError(t, err) 455 listResult, err = sfs.SimpleFSReadList(ctx, opid) 456 require.NoError(t, err) 457 expected = []string{ 458 ".testfile", 459 "aa", 460 "aa/aaa", 461 "ab", 462 "ab/test2.txt", 463 } 464 require.Len(t, listResult.Entries, len(expected)) 465 sort.Slice(listResult.Entries, func(i, j int) bool { 466 return strings.Compare(listResult.Entries[i].Name, 467 listResult.Entries[j].Name) < 0 468 }) 469 for i, e := range expected { 470 require.Equal(t, e, listResult.Entries[i].Name) 471 } 472 } 473 474 func TestCopyToLocal(t *testing.T) { 475 ctx := context.Background() 476 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 477 defer closeSimpleFS(ctx, t, sfs) 478 479 // make a temp remote directory + file(s) we will clean up later 480 path1 := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 481 writeRemoteFile(ctx, t, sfs, pathAppend(path1, "test1.txt"), []byte("foo")) 482 483 // make a temp local dest directory + files we will clean up later 484 tempdir2, err := os.MkdirTemp(TempDirBase, "simpleFstest") 485 defer os.RemoveAll(tempdir2) 486 require.NoError(t, err) 487 path2 := keybase1.NewPathWithLocal(tempdir2) 488 489 opid, err := sfs.SimpleFSMakeOpid(ctx) 490 require.NoError(t, err) 491 492 srcPath := pathAppend(path1, "test1.txt") 493 destPath := pathAppend(path2, "test1.txt") 494 495 err = sfs.SimpleFSCopy(ctx, keybase1.SimpleFSCopyArg{ 496 OpID: opid, 497 Src: srcPath, 498 Dest: destPath, 499 }) 500 require.NoError(t, err) 501 502 checkPendingOp(ctx, t, sfs, opid, keybase1.AsyncOps_COPY, srcPath, destPath, true) 503 err = sfs.SimpleFSWait(ctx, opid) 504 require.NoError(t, err) 505 506 checkPendingOp(ctx, t, sfs, opid, keybase1.AsyncOps_COPY, srcPath, destPath, false) 507 // Verify error on double wait 508 err = sfs.SimpleFSWait(ctx, opid) 509 require.Error(t, err) 510 511 exists, err := libkb.FileExists(filepath.Join(tempdir2, "test1.txt")) 512 require.NoError(t, err) 513 require.True(t, exists, "File copy destination must exist") 514 } 515 516 func TestCopyRecursive(t *testing.T) { 517 ctx := context.Background() 518 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 519 defer closeSimpleFS(ctx, t, sfs) 520 521 // make a temp local dest directory + files we will clean up later 522 tempdir, err := os.MkdirTemp(TempDirBase, "simpleFstest") 523 require.NoError(t, err) 524 defer os.RemoveAll(tempdir) 525 526 // First try copying from a TLF that doesn't exist yet, which 527 // shouldn't do anything. 528 testdir := filepath.Join(tempdir, "testdir") 529 pathLocal := keybase1.NewPathWithLocal(filepath.ToSlash(testdir)) 530 pathKbfsEmpty := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 531 opid, err := sfs.SimpleFSMakeOpid(ctx) 532 require.NoError(t, err) 533 err = sfs.SimpleFSCopyRecursive(ctx, keybase1.SimpleFSCopyRecursiveArg{ 534 OpID: opid, 535 Src: pathKbfsEmpty, 536 Dest: pathLocal, 537 }) 538 require.NoError(t, err) 539 checkPendingOp( 540 ctx, t, sfs, opid, keybase1.AsyncOps_COPY, pathKbfsEmpty, pathLocal, 541 true) 542 err = sfs.SimpleFSWait(ctx, opid) 543 require.NoError(t, err) 544 d, err := os.Open(testdir) 545 require.NoError(t, err) 546 fis, err := d.Readdir(0) 547 require.NoError(t, err) 548 require.Len(t, fis, 0) 549 550 // Populate local starting directory. 551 err = os.WriteFile( 552 filepath.Join(tempdir, "testdir", "test1.txt"), []byte("foo"), 0600) 553 require.NoError(t, err) 554 err = os.WriteFile( 555 filepath.Join(tempdir, "testdir", "test2.txt"), []byte("bar"), 0600) 556 require.NoError(t, err) 557 558 opid, err = sfs.SimpleFSMakeOpid(ctx) 559 require.NoError(t, err) 560 561 // Copy it into KBFS. 562 pathKbfs := keybase1.NewPathWithKbfsPath(`/private/jdoe/testdir`) 563 err = sfs.SimpleFSCopyRecursive(ctx, keybase1.SimpleFSCopyRecursiveArg{ 564 OpID: opid, 565 Src: pathLocal, 566 Dest: pathKbfs, 567 }) 568 require.NoError(t, err) 569 checkPendingOp( 570 ctx, t, sfs, opid, keybase1.AsyncOps_COPY, pathLocal, pathKbfs, true) 571 err = sfs.SimpleFSWait(ctx, opid) 572 require.NoError(t, err) 573 574 require.Equal(t, "foo", 575 string(readRemoteFile(ctx, t, sfs, pathAppend(pathKbfs, "test1.txt")))) 576 require.Equal(t, "bar", 577 string(readRemoteFile(ctx, t, sfs, pathAppend(pathKbfs, "test2.txt")))) 578 579 // Copy it back. 580 tempdir2, err := os.MkdirTemp(TempDirBase, "simpleFstest") 581 require.NoError(t, err) 582 defer os.RemoveAll(tempdir2) 583 path3 := keybase1.NewPathWithLocal( 584 filepath.ToSlash(filepath.Join(tempdir2, "testdir"))) 585 opid2, err := sfs.SimpleFSMakeOpid(ctx) 586 require.NoError(t, err) 587 err = sfs.SimpleFSCopyRecursive(ctx, keybase1.SimpleFSCopyRecursiveArg{ 588 OpID: opid2, 589 Src: pathKbfs, 590 Dest: path3, 591 }) 592 require.NoError(t, err) 593 checkPendingOp( 594 ctx, t, sfs, opid2, keybase1.AsyncOps_COPY, pathKbfs, path3, true) 595 err = sfs.SimpleFSWait(ctx, opid2) 596 require.NoError(t, err) 597 dataFoo, err := os.ReadFile( 598 filepath.Join(tempdir2, "testdir", "test1.txt")) 599 require.NoError(t, err) 600 require.Equal(t, "foo", string(dataFoo)) 601 dataBar, err := os.ReadFile( 602 filepath.Join(tempdir2, "testdir", "test2.txt")) 603 require.NoError(t, err) 604 require.Equal(t, "bar", string(dataBar)) 605 606 // Get current revision number for the KBFS files. 607 syncFS(ctx, t, sfs, "/private/jdoe") 608 fb, _, err := sfs.getFolderBranchFromPath(ctx, pathKbfs) 609 require.NoError(t, err) 610 status, _, err := sfs.config.KBFSOps().FolderStatus(ctx, fb) 611 require.NoError(t, err) 612 rev := status.Revision 613 pathKbfsArchived := keybase1.NewPathWithKbfsArchived( 614 keybase1.KBFSArchivedPath{ 615 Path: `/private/jdoe/testdir`, 616 ArchivedParam: keybase1.NewKBFSArchivedParamWithRevision( 617 keybase1.KBFSRevision(rev)), 618 }) 619 620 // Overwrite the files in KBFS. 621 writeRemoteFile( 622 ctx, t, sfs, pathAppend(pathKbfs, `test1.txt`), []byte(`foo2`)) 623 writeRemoteFile( 624 ctx, t, sfs, pathAppend(pathKbfs, `test2.txt`), []byte(`bar2`)) 625 syncFS(ctx, t, sfs, "/private/jdoe") 626 require.Equal(t, "foo2", 627 string(readRemoteFile(ctx, t, sfs, pathAppend(pathKbfs, "test1.txt")))) 628 require.Equal(t, "bar2", 629 string(readRemoteFile(ctx, t, sfs, pathAppend(pathKbfs, "test2.txt")))) 630 631 // Read old data from archived path. 632 require.Equal(t, "foo", 633 string(readRemoteFile( 634 ctx, t, sfs, pathAppend(pathKbfsArchived, "test1.txt")))) 635 require.Equal(t, "bar", 636 string(readRemoteFile( 637 ctx, t, sfs, pathAppend(pathKbfsArchived, "test2.txt")))) 638 } 639 640 func TestCopyToRemote(t *testing.T) { 641 ctx := context.Background() 642 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 643 defer closeSimpleFS(ctx, t, sfs) 644 645 // make a temp remote directory + file(s) we will clean up later 646 path2 := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 647 648 // make a temp local dest directory + files we will clean up later 649 tempdir, err := os.MkdirTemp(TempDirBase, "simpleFstest") 650 defer os.RemoveAll(tempdir) 651 require.NoError(t, err) 652 path1 := keybase1.NewPathWithLocal(tempdir) 653 defer deleteTempLocalPath(path1) 654 err = os.WriteFile(filepath.Join(path1.Local(), "test1.txt"), []byte("foo"), 0644) 655 require.NoError(t, err) 656 657 opid, err := sfs.SimpleFSMakeOpid(ctx) 658 require.NoError(t, err) 659 660 srcPath := keybase1.NewPathWithLocal( 661 filepath.ToSlash(filepath.Join(path1.Local(), "test1.txt"))) 662 destPath := pathAppend(path2, "test1.txt") 663 err = sfs.SimpleFSCopy(ctx, keybase1.SimpleFSCopyArg{ 664 OpID: opid, 665 Src: srcPath, 666 Dest: destPath, 667 }) 668 require.NoError(t, err) 669 670 checkPendingOp(ctx, t, sfs, opid, keybase1.AsyncOps_COPY, srcPath, destPath, true) 671 672 err = sfs.SimpleFSWait(ctx, opid) 673 require.NoError(t, err) 674 675 checkPendingOp(ctx, t, sfs, opid, keybase1.AsyncOps_COPY, srcPath, destPath, false) 676 677 // Verify error on double wait 678 err = sfs.SimpleFSWait(ctx, opid) 679 require.Error(t, err) 680 681 require.Equal(t, `foo`, 682 string(readRemoteFile(ctx, t, sfs, pathAppend(path2, "test1.txt")))) 683 } 684 685 func writeRemoteFile(ctx context.Context, t *testing.T, sfs *SimpleFS, path keybase1.Path, data []byte) { 686 opid, err := sfs.SimpleFSMakeOpid(ctx) 687 require.NoError(t, err) 688 689 err = sfs.SimpleFSOpen(ctx, keybase1.SimpleFSOpenArg{ 690 OpID: opid, 691 Dest: path, 692 Flags: keybase1.OpenFlags_REPLACE | keybase1.OpenFlags_WRITE, 693 }) 694 defer sfs.SimpleFSClose(ctx, opid) 695 require.NoError(t, err) 696 697 err = sfs.SimpleFSWrite(ctx, keybase1.SimpleFSWriteArg{ 698 OpID: opid, 699 Offset: 0, 700 Content: data, 701 }) 702 703 require.NoError(t, err) 704 } 705 706 func writeRemoteDir(ctx context.Context, t *testing.T, sfs *SimpleFS, path keybase1.Path) { 707 opid, err := sfs.SimpleFSMakeOpid(ctx) 708 require.NoError(t, err) 709 710 err = sfs.SimpleFSOpen(ctx, keybase1.SimpleFSOpenArg{ 711 OpID: opid, 712 Dest: path, 713 Flags: keybase1.OpenFlags_REPLACE | keybase1.OpenFlags_WRITE | keybase1.OpenFlags_DIRECTORY, 714 }) 715 defer sfs.SimpleFSClose(ctx, opid) 716 require.NoError(t, err) 717 } 718 719 func readRemoteFile(ctx context.Context, t *testing.T, sfs *SimpleFS, path keybase1.Path) []byte { 720 opid, err := sfs.SimpleFSMakeOpid(ctx) 721 require.NoError(t, err) 722 723 de, err := sfs.SimpleFSStat(ctx, keybase1.SimpleFSStatArg{Path: path}) 724 require.NoError(t, err) 725 t.Logf("Stat remote %q %d bytes", path, de.Size) 726 727 err = sfs.SimpleFSOpen(ctx, keybase1.SimpleFSOpenArg{ 728 OpID: opid, 729 Dest: path, 730 Flags: keybase1.OpenFlags_READ | keybase1.OpenFlags_EXISTING, 731 }) 732 defer sfs.SimpleFSClose(ctx, opid) 733 require.NoError(t, err) 734 735 data, err := sfs.SimpleFSRead(ctx, keybase1.SimpleFSReadArg{ 736 OpID: opid, 737 Offset: 0, 738 Size: de.Size * 2, // Check that reading past the end works. 739 }) 740 require.NoError(t, err) 741 742 // Profile sizes are dynamic. For now just read something from 743 // it, to test that it's not empty. 744 isProfile := strings.Contains(path.String(), libfs.ProfileListDirName) 745 if de.DirentType != keybase1.DirentType_SYM && !isProfile { 746 require.Len(t, data.Data, de.Size) 747 } 748 749 // Starting the read past the end shouldn't matter either. 750 dataPastEnd, err := sfs.SimpleFSRead(ctx, keybase1.SimpleFSReadArg{ 751 OpID: opid, 752 Offset: int64(de.Size), 753 Size: de.Size, 754 }) 755 require.NoError(t, err) 756 if !isProfile { 757 require.Len(t, dataPastEnd.Data, 0) 758 } 759 760 return data.Data 761 } 762 763 type fsBlocker struct { 764 *libfs.FS 765 signalCh chan<- struct{} 766 unblockCh <-chan struct{} 767 } 768 769 var _ billy.Filesystem = (*fsBlocker)(nil) 770 771 func (fs *fsBlocker) OpenFile(filename string, flag int, perm os.FileMode) ( 772 f billy.File, err error) { 773 fs.signalCh <- struct{}{} 774 <-fs.unblockCh 775 return fs.FS.OpenFile(filename, flag, perm) 776 } 777 778 func (fs *fsBlocker) Create(filename string) (billy.File, error) { 779 fs.signalCh <- struct{}{} 780 <-fs.unblockCh 781 return fs.FS.Create(filename) 782 } 783 784 func (fs *fsBlocker) Open(filename string) (billy.File, error) { 785 fs.signalCh <- struct{}{} 786 <-fs.unblockCh 787 return fs.FS.Open(filename) 788 } 789 790 func (fs *fsBlocker) MkdirAll(filename string, perm os.FileMode) (err error) { 791 fs.signalCh <- struct{}{} 792 <-fs.unblockCh 793 return fs.FS.MkdirAll(filename, perm) 794 } 795 796 func (fs *fsBlocker) ReadDir(p string) (fis []os.FileInfo, err error) { 797 fs.signalCh <- struct{}{} 798 <-fs.unblockCh 799 return fs.FS.ReadDir(p) 800 } 801 802 func (fs *fsBlocker) Chroot(p string) (newFS billy.Filesystem, err error) { 803 chrootFS, err := fs.FS.ChrootAsLibFS(p) 804 if err != nil { 805 return nil, err 806 } 807 return &fsBlocker{chrootFS, fs.signalCh, fs.unblockCh}, nil 808 } 809 810 type fsBlockerMaker struct { 811 signalCh chan<- struct{} 812 unblockCh <-chan struct{} 813 } 814 815 func (maker fsBlockerMaker) makeNewBlocker( 816 ctx context.Context, config libkbfs.Config, 817 tlfHandle *tlfhandle.Handle, branch data.BranchName, subdir string, 818 create bool) (billy.Filesystem, error) { 819 fsMaker := libfs.NewFS 820 if !create { 821 fsMaker = libfs.NewFSIfExists 822 } 823 fs, err := fsMaker( 824 ctx, config, tlfHandle, branch, subdir, "", keybase1.MDPriorityNormal) 825 if err != nil { 826 return nil, err 827 } 828 return &fsBlocker{fs, maker.signalCh, maker.unblockCh}, nil 829 } 830 831 func TestCopyProgress(t *testing.T) { 832 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 833 defer cancel() 834 835 config := libkbfs.MakeTestConfigOrBust(t, "jdoe") 836 clock := &clocktest.TestClock{} 837 start := time.Now() 838 clock.Set(start) 839 config.SetClock(clock) 840 841 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, config) 842 defer closeSimpleFS(ctx, t, sfs) 843 844 waitCh := make(chan struct{}) 845 unblockCh := make(chan struct{}) 846 maker := fsBlockerMaker{waitCh, unblockCh} 847 sfs.newFS = maker.makeNewBlocker 848 849 // make a temp local dest directory + files we will clean up later 850 tempdir, err := os.MkdirTemp(TempDirBase, "simpleFstest") 851 require.NoError(t, err) 852 defer os.RemoveAll(tempdir) 853 854 // Make local starting directory. 855 err = os.Mkdir(filepath.Join(tempdir, "testdir"), 0700) 856 require.NoError(t, err) 857 err = os.WriteFile( 858 filepath.Join(tempdir, "testdir", "test1.txt"), []byte("foo"), 0600) 859 require.NoError(t, err) 860 err = os.WriteFile( 861 filepath.Join(tempdir, "testdir", "test2.txt"), []byte("bar"), 0600) 862 require.NoError(t, err) 863 path1 := keybase1.NewPathWithLocal( 864 filepath.ToSlash(filepath.Join(tempdir, "testdir"))) 865 path2 := keybase1.NewPathWithKbfsPath(`/private/jdoe/testdir`) 866 867 opid, err := sfs.SimpleFSMakeOpid(ctx) 868 require.NoError(t, err) 869 870 // Copy it into KBFS. 871 err = sfs.SimpleFSCopyRecursive(ctx, keybase1.SimpleFSCopyRecursiveArg{ 872 OpID: opid, 873 Src: path1, 874 Dest: path2, 875 }) 876 require.NoError(t, err) 877 checkPendingOp( 878 ctx, t, sfs, opid, keybase1.AsyncOps_COPY, path1, path2, true) 879 880 t.Log("Wait for the first mkdir") 881 waitFn := func() { 882 select { 883 case <-waitCh: 884 case <-ctx.Done(): 885 t.Fatal(ctx.Err()) 886 } 887 } 888 waitFn() 889 890 // Check the progress -- there shouldn't be any yet. 891 progress, err := sfs.SimpleFSCheck(ctx, opid) 892 require.NoError(t, err) 893 expectedProgress := keybase1.OpProgress{ 894 Start: keybase1.ToTime(start), 895 OpType: keybase1.AsyncOps_COPY, 896 BytesTotal: 6, 897 FilesTotal: 3, 898 } 899 require.Equal(t, expectedProgress, progress) 900 901 t.Log("Unblock the mkdir") 902 unblockCh <- struct{}{} 903 904 t.Log("Wait for the first file") 905 waitFn() 906 907 clock.Add(1 * time.Minute) 908 expectedProgress.FilesRead = 1 909 expectedProgress.FilesWritten = 1 910 // We read one directory but 0 bytes, so we still have no expected 911 // end time. 912 progress, err = sfs.SimpleFSCheck(ctx, opid) 913 require.NoError(t, err) 914 require.Equal(t, expectedProgress, progress) 915 916 t.Log("Unblock the first file") 917 unblockCh <- struct{}{} 918 919 t.Log("Wait for the second file") 920 waitFn() 921 922 clock.Add(1 * time.Minute) 923 expectedProgress.FilesRead = 2 924 expectedProgress.FilesWritten = 2 925 expectedProgress.BytesRead = 3 926 expectedProgress.BytesWritten = 3 927 progress, err = sfs.SimpleFSCheck(ctx, opid) 928 require.NoError(t, err) 929 930 // We read one file and two minutes have passed, so the estimated 931 // time should be two more minutes from now. But use the float 932 // calculation adds some uncertainty, so check it within a small 933 // error range, and then set it to the received value for the 934 // exact check. 935 endEstimate := keybase1.ToTime(start.Add(4 * time.Minute)) 936 require.InEpsilon( 937 t, float64(endEstimate), float64(progress.EndEstimate), 938 float64(5*time.Nanosecond)) 939 expectedProgress.EndEstimate = progress.EndEstimate 940 941 require.Equal(t, expectedProgress, progress) 942 943 t.Log("Unblock the second file") 944 unblockCh <- struct{}{} 945 946 err = sfs.SimpleFSWait(ctx, opid) 947 require.NoError(t, err) 948 } 949 950 func TestRemove(t *testing.T) { 951 ctx := context.Background() 952 sfs := newSimpleFS( 953 env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 954 defer closeSimpleFS(ctx, t, sfs) 955 956 t.Log("Make a file to remove") 957 pathKbfs := keybase1.NewPathWithKbfsPath("/private/jdoe") 958 writeRemoteFile( 959 ctx, t, sfs, pathAppend(pathKbfs, "test.txt"), []byte("foo")) 960 syncFS(ctx, t, sfs, "/private/jdoe") 961 962 t.Log("Make sure the file is there") 963 testList(ctx, t, sfs, pathKbfs, "test.txt") 964 965 t.Log("Remove the file") 966 pathFile := keybase1.NewPathWithKbfsPath("/private/jdoe/test.txt") 967 opid, err := sfs.SimpleFSMakeOpid(ctx) 968 require.NoError(t, err) 969 err = sfs.SimpleFSRemove(ctx, keybase1.SimpleFSRemoveArg{ 970 OpID: opid, 971 Path: pathFile, 972 }) 973 require.NoError(t, err) 974 checkPendingOp( 975 ctx, t, sfs, opid, keybase1.AsyncOps_REMOVE, pathFile, keybase1.Path{}, 976 true) 977 err = sfs.SimpleFSWait(ctx, opid) 978 require.NoError(t, err) 979 980 t.Log("Make sure it's gone") 981 testList(ctx, t, sfs, pathKbfs) 982 } 983 984 func TestRemoveRecursive(t *testing.T) { 985 ctx := context.Background() 986 sfs := newSimpleFS( 987 env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 988 defer closeSimpleFS(ctx, t, sfs) 989 990 t.Log("Make a directory to remove") 991 pathKbfs := keybase1.NewPathWithKbfsPath("/private/jdoe") 992 pathDir := pathAppend(pathKbfs, "a") 993 writeRemoteDir(ctx, t, sfs, pathDir) 994 writeRemoteFile(ctx, t, sfs, pathAppend(pathDir, "test1.txt"), []byte("1")) 995 writeRemoteFile(ctx, t, sfs, pathAppend(pathDir, "test2.txt"), []byte("2")) 996 pathDir2 := pathAppend(pathDir, "b") 997 writeRemoteDir(ctx, t, sfs, pathDir2) 998 writeRemoteFile(ctx, t, sfs, pathAppend(pathDir2, "test3.txt"), []byte("3")) 999 syncFS(ctx, t, sfs, "/private/jdoe") 1000 1001 t.Log("Make sure the files are there") 1002 testList(ctx, t, sfs, pathDir, "test1.txt", "test2.txt", "b") 1003 1004 t.Log("Remove dir without recursion, expect error") 1005 opid, err := sfs.SimpleFSMakeOpid(ctx) 1006 require.NoError(t, err) 1007 err = sfs.SimpleFSRemove(ctx, keybase1.SimpleFSRemoveArg{ 1008 OpID: opid, 1009 Path: pathDir, 1010 }) 1011 require.NoError(t, err) 1012 checkPendingOp( 1013 ctx, t, sfs, opid, keybase1.AsyncOps_REMOVE, pathDir, keybase1.Path{}, 1014 true) 1015 err = sfs.SimpleFSWait(ctx, opid) 1016 require.Error(t, err) 1017 1018 t.Log("Remove the dir recursively") 1019 opid, err = sfs.SimpleFSMakeOpid(ctx) 1020 require.NoError(t, err) 1021 err = sfs.SimpleFSRemove(ctx, keybase1.SimpleFSRemoveArg{ 1022 OpID: opid, 1023 Path: pathDir, 1024 Recursive: true, 1025 }) 1026 require.NoError(t, err) 1027 checkPendingOp( 1028 ctx, t, sfs, opid, keybase1.AsyncOps_REMOVE, pathDir, keybase1.Path{}, 1029 true) 1030 err = sfs.SimpleFSWait(ctx, opid) 1031 require.NoError(t, err) 1032 1033 t.Log("Make sure it's gone") 1034 testList(ctx, t, sfs, pathKbfs) 1035 } 1036 1037 func TestMoveWithinTlf(t *testing.T) { 1038 ctx := context.Background() 1039 sfs := newSimpleFS( 1040 env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 1041 defer closeSimpleFS(ctx, t, sfs) 1042 1043 t.Log("Make a file to move") 1044 pathKbfs := keybase1.NewPathWithKbfsPath("/private/jdoe") 1045 writeRemoteFile( 1046 ctx, t, sfs, pathAppend(pathKbfs, "test1.txt"), []byte("foo")) 1047 syncFS(ctx, t, sfs, "/private/jdoe") 1048 1049 t.Log("Make sure the file is there") 1050 testList(ctx, t, sfs, pathKbfs, "test1.txt") 1051 1052 t.Log("Move the file") 1053 pathFileOld := pathAppend(pathKbfs, "test1.txt") 1054 pathFileNew := pathAppend(pathKbfs, "test2.txt") 1055 opid, err := sfs.SimpleFSMakeOpid(ctx) 1056 require.NoError(t, err) 1057 err = sfs.SimpleFSMove(ctx, keybase1.SimpleFSMoveArg{ 1058 OpID: opid, 1059 Src: pathFileOld, 1060 Dest: pathFileNew, 1061 }) 1062 require.NoError(t, err) 1063 checkPendingOp( 1064 ctx, t, sfs, opid, keybase1.AsyncOps_MOVE, pathFileOld, pathFileNew, 1065 true) 1066 err = sfs.SimpleFSWait(ctx, opid) 1067 require.NoError(t, err) 1068 1069 t.Log("Make sure it's moved") 1070 testList(ctx, t, sfs, pathKbfs, "test2.txt") 1071 1072 t.Log("Move into subdir") 1073 pathDir := pathAppend(pathKbfs, "a") 1074 writeRemoteDir(ctx, t, sfs, pathDir) 1075 pathFileOld = pathFileNew 1076 pathFileNew = pathAppend(pathDir, "test3.txt") 1077 opid, err = sfs.SimpleFSMakeOpid(ctx) 1078 require.NoError(t, err) 1079 err = sfs.SimpleFSMove(ctx, keybase1.SimpleFSMoveArg{ 1080 OpID: opid, 1081 Src: pathFileOld, 1082 Dest: pathFileNew, 1083 }) 1084 require.NoError(t, err) 1085 checkPendingOp( 1086 ctx, t, sfs, opid, keybase1.AsyncOps_MOVE, pathFileOld, pathFileNew, 1087 true) 1088 err = sfs.SimpleFSWait(ctx, opid) 1089 require.NoError(t, err) 1090 1091 t.Log("Make sure it's moved") 1092 testList(ctx, t, sfs, pathKbfs, "a") 1093 testList(ctx, t, sfs, pathDir, "test3.txt") 1094 1095 t.Log("Move into different, parallel subdir") 1096 pathDirB := pathAppend(pathKbfs, "b") 1097 writeRemoteDir(ctx, t, sfs, pathDirB) 1098 pathDirC := pathAppend(pathDirB, "c") 1099 writeRemoteDir(ctx, t, sfs, pathDirC) 1100 pathFileOld = pathFileNew 1101 pathFileNew = pathAppend(pathDirC, "test3.txt") 1102 opid, err = sfs.SimpleFSMakeOpid(ctx) 1103 require.NoError(t, err) 1104 err = sfs.SimpleFSMove(ctx, keybase1.SimpleFSMoveArg{ 1105 OpID: opid, 1106 Src: pathFileOld, 1107 Dest: pathFileNew, 1108 }) 1109 require.NoError(t, err) 1110 checkPendingOp( 1111 ctx, t, sfs, opid, keybase1.AsyncOps_MOVE, pathFileOld, pathFileNew, 1112 true) 1113 err = sfs.SimpleFSWait(ctx, opid) 1114 require.NoError(t, err) 1115 1116 t.Log("Make sure it's moved") 1117 testList(ctx, t, sfs, pathDir) 1118 testList(ctx, t, sfs, pathDirC, "test3.txt") 1119 } 1120 1121 func TestMoveBetweenTlfs(t *testing.T) { 1122 ctx := context.Background() 1123 sfs := newSimpleFS( 1124 env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 1125 defer closeSimpleFS(ctx, t, sfs) 1126 1127 t.Log("Make a file to move") 1128 pathPrivate := keybase1.NewPathWithKbfsPath("/private/jdoe") 1129 writeRemoteFile( 1130 ctx, t, sfs, pathAppend(pathPrivate, "test1.txt"), []byte("foo")) 1131 syncFS(ctx, t, sfs, "/private/jdoe") 1132 1133 t.Log("Make sure the file is there") 1134 testList(ctx, t, sfs, pathPrivate, "test1.txt") 1135 1136 t.Log("Move the file") 1137 pathFileOld := pathAppend(pathPrivate, "test1.txt") 1138 pathPublic := keybase1.NewPathWithKbfsPath("/public/jdoe") 1139 pathFileNew := pathAppend(pathPublic, "test2.txt") 1140 opid, err := sfs.SimpleFSMakeOpid(ctx) 1141 require.NoError(t, err) 1142 err = sfs.SimpleFSMove(ctx, keybase1.SimpleFSMoveArg{ 1143 OpID: opid, 1144 Src: pathFileOld, 1145 Dest: pathFileNew, 1146 }) 1147 require.NoError(t, err) 1148 checkPendingOp( 1149 ctx, t, sfs, opid, keybase1.AsyncOps_MOVE, pathFileOld, pathFileNew, 1150 true) 1151 err = sfs.SimpleFSWait(ctx, opid) 1152 require.NoError(t, err) 1153 syncFS(ctx, t, sfs, "/public/jdoe") 1154 1155 t.Log("Make sure it's moved") 1156 testList(ctx, t, sfs, pathPrivate) 1157 testList(ctx, t, sfs, pathPublic, "test2.txt") 1158 1159 t.Log("Now move a whole populated directory") 1160 pathDir := pathAppend(pathPrivate, "a") 1161 writeRemoteDir(ctx, t, sfs, pathDir) 1162 writeRemoteFile(ctx, t, sfs, pathAppend(pathDir, "test1.txt"), []byte("1")) 1163 writeRemoteFile(ctx, t, sfs, pathAppend(pathDir, "test2.txt"), []byte("2")) 1164 pathDir2 := pathAppend(pathDir, "b") 1165 writeRemoteDir(ctx, t, sfs, pathDir2) 1166 writeRemoteFile(ctx, t, sfs, pathAppend(pathDir2, "test3.txt"), []byte("3")) 1167 syncFS(ctx, t, sfs, "/private/jdoe") 1168 1169 opid, err = sfs.SimpleFSMakeOpid(ctx) 1170 require.NoError(t, err) 1171 err = sfs.SimpleFSMove(ctx, keybase1.SimpleFSMoveArg{ 1172 OpID: opid, 1173 Src: pathDir, 1174 Dest: pathPublic, 1175 OverwriteExistingFiles: true, 1176 }) 1177 require.NoError(t, err) 1178 checkPendingOp( 1179 ctx, t, sfs, opid, keybase1.AsyncOps_MOVE, pathDir, pathPublic, true) 1180 err = sfs.SimpleFSWait(ctx, opid) 1181 require.NoError(t, err) 1182 syncFS(ctx, t, sfs, "/public/jdoe") 1183 1184 t.Log("Make sure it's moved (one file was overwritten)") 1185 testList(ctx, t, sfs, pathPrivate) 1186 testList(ctx, t, sfs, pathPublic, "test1.txt", "test2.txt", "b") 1187 testList(ctx, t, sfs, pathAppend(pathPublic, "b"), "test3.txt") 1188 require.Equal(t, "2", 1189 string(readRemoteFile( 1190 ctx, t, sfs, pathAppend(pathPublic, "test2.txt")))) 1191 } 1192 1193 func TestTlfEditHistory(t *testing.T) { 1194 ctx := context.Background() 1195 sfs := newSimpleFS( 1196 env.EmptyAppStateUpdater{}, 1197 libkbfs.MakeTestConfigOrBust(t, "jdoe")) 1198 defer closeSimpleFS(ctx, t, sfs) 1199 1200 path := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 1201 writeRemoteFile(ctx, t, sfs, pathAppend(path, `test1.txt`), []byte(`foo`)) 1202 writeRemoteFile(ctx, t, sfs, pathAppend(path, `test2.txt`), []byte(`foo`)) 1203 syncFS(ctx, t, sfs, "/private/jdoe") 1204 1205 history, err := sfs.SimpleFSFolderEditHistory(ctx, path) 1206 require.NoError(t, err) 1207 require.Len(t, history.History, 1) 1208 require.Equal(t, "jdoe", history.History[0].WriterName) 1209 require.Len(t, history.History[0].Edits, 2) 1210 } 1211 1212 type subscriptionReporter struct { 1213 libkbfs.Reporter 1214 lastPathNotify chan struct{} 1215 1216 lastPathMtx sync.RWMutex 1217 lastPath string 1218 } 1219 1220 func (sr *subscriptionReporter) NotifyPathUpdated( 1221 _ context.Context, path string) { 1222 sr.lastPathMtx.Lock() 1223 defer sr.lastPathMtx.Unlock() 1224 sr.lastPath = path 1225 sr.lastPathNotify <- struct{}{} 1226 } 1227 1228 func (sr *subscriptionReporter) LastPath() string { 1229 sr.lastPathMtx.RLock() 1230 defer sr.lastPathMtx.RUnlock() 1231 return sr.lastPath 1232 } 1233 1234 func (sr *subscriptionReporter) waitForNotification(t *testing.T) { 1235 t.Helper() 1236 select { 1237 case <-sr.lastPathNotify: 1238 case <-time.After(10 * time.Millisecond): 1239 t.Fatal("Timed out while waiting for notification") 1240 } 1241 } 1242 1243 func (sr *subscriptionReporter) requireNoNotification(t *testing.T) { 1244 t.Helper() 1245 select { 1246 case <-sr.lastPathNotify: 1247 t.Fatalf("Got notification but expected none: %q", sr.lastPath) 1248 case <-time.After(10 * time.Millisecond): 1249 } 1250 } 1251 1252 func (sr *subscriptionReporter) depleteExistingNotifications(t *testing.T) { 1253 t.Helper() 1254 for { 1255 select { 1256 case <-sr.lastPathNotify: 1257 case <-time.After(10 * time.Millisecond): 1258 return 1259 } 1260 } 1261 } 1262 1263 func TestRefreshSubscription(t *testing.T) { 1264 ctx := context.Background() 1265 config := libkbfs.MakeTestConfigOrBust(t, "jdoe", "alice") 1266 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, config) 1267 defer closeSimpleFS(ctx, t, sfs) 1268 sr := &subscriptionReporter{Reporter: config.Reporter(), lastPathNotify: make(chan struct{}, 1<<30)} 1269 config.SetReporter(sr) 1270 1271 // Use a non-canonical (possibly preferred) path to make sure notification 1272 // comes back with same path. 1273 path1 := keybase1.NewPathWithKbfsPath(`/private/jdoe,alice`) 1274 1275 t.Log("Writing a file with no subscription") 1276 writeRemoteFile(ctx, t, sfs, pathAppend(path1, `test1.txt`), []byte(`foo`)) 1277 syncFS(ctx, t, sfs, "/private/jdoe,alice") 1278 sr.requireNoNotification(t) 1279 require.Equal(t, "", sr.LastPath()) 1280 1281 t.Log("Subscribe, and make sure we get a notification") 1282 opid, err := sfs.SimpleFSMakeOpid(ctx) 1283 require.NoError(t, err) 1284 err = sfs.SimpleFSList(ctx, keybase1.SimpleFSListArg{ 1285 OpID: opid, 1286 Path: path1, 1287 RefreshSubscription: true, 1288 }) 1289 require.NoError(t, err) 1290 err = sfs.SimpleFSWait(ctx, opid) 1291 require.NoError(t, err) 1292 1293 writeRemoteFile(ctx, t, sfs, pathAppend(path1, `test2.txt`), []byte(`foo`)) 1294 syncFS(ctx, t, sfs, "/private/jdoe,alice") 1295 sr.waitForNotification(t) 1296 require.Equal(t, "/keybase"+path1.Kbfs().Path, sr.LastPath()) 1297 1298 t.Log("Make a public TLF") 1299 path2 := keybase1.NewPathWithKbfsPath(`/public/jdoe`) 1300 // Now subscribe to a different one, before the TLF even exists, 1301 // and make sure the old subscription goes away. 1302 opid2, err := sfs.SimpleFSMakeOpid(ctx) 1303 require.NoError(t, err) 1304 err = sfs.SimpleFSList(ctx, keybase1.SimpleFSListArg{ 1305 OpID: opid2, 1306 Path: path2, 1307 RefreshSubscription: true, 1308 }) 1309 require.NoError(t, err) 1310 err = sfs.SimpleFSWait(ctx, opid2) 1311 require.NoError(t, err) 1312 1313 writeRemoteFile(ctx, t, sfs, pathAppend(path2, `test.txt`), []byte(`foo`)) 1314 syncFS(ctx, t, sfs, "/public/jdoe") 1315 sr.waitForNotification(t) 1316 require.Equal(t, "/keybase"+path2.Kbfs().Path, sr.LastPath()) 1317 1318 // Make sure notification works with file content change. 1319 writeRemoteFile(ctx, t, sfs, pathAppend(path2, `test.txt`), []byte(`poo`)) 1320 syncFS(ctx, t, sfs, "/public/jdoe") 1321 sr.waitForNotification(t) 1322 require.Equal(t, "/keybase"+path2.Kbfs().Path, sr.LastPath()) 1323 1324 // We might have more than one notifications in channel here, so deplete 1325 // them before attempting more. 1326 sr.depleteExistingNotifications(t) 1327 1328 writeRemoteFile(ctx, t, sfs, pathAppend(path1, `test3.txt`), []byte(`foo`)) 1329 syncFS(ctx, t, sfs, "/private/jdoe,alice") 1330 sr.requireNoNotification(t) 1331 require.Equal(t, "/keybase"+path2.Kbfs().Path, sr.LastPath()) 1332 1333 // Now subscribe to the first one again, but using SimpleFSStat. 1334 path3 := keybase1.NewPathWithKbfsPath(`/private/jdoe,alice/test3.txt`) 1335 _, err = sfs.SimpleFSStat(ctx, keybase1.SimpleFSStatArg{ 1336 Path: path3, 1337 RefreshSubscription: true, 1338 }) 1339 require.NoError(t, err) 1340 1341 writeRemoteFile(ctx, t, sfs, pathAppend(path1, `test3.txt`), []byte(`foo`)) 1342 syncFS(ctx, t, sfs, "/private/jdoe,alice") 1343 sr.waitForNotification(t) 1344 require.Equal(t, "/keybase/private/jdoe,alice", sr.LastPath()) 1345 } 1346 1347 func TestGetRevisions(t *testing.T) { 1348 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 1349 defer cancel() 1350 1351 config := libkbfs.MakeTestConfigOrBust(t, "jdoe") 1352 clock := &clocktest.TestClock{} 1353 start := time.Now() 1354 clock.Set(start) 1355 config.SetClock(clock) 1356 1357 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, config) 1358 defer closeSimpleFS(ctx, t, sfs) 1359 1360 path := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 1361 filePath := pathAppend(path, `test1.txt`) 1362 1363 getRevisions := func( 1364 spanType keybase1.RevisionSpanType) keybase1.GetRevisionsResult { 1365 opid, err := sfs.SimpleFSMakeOpid(ctx) 1366 require.NoError(t, err) 1367 err = sfs.SimpleFSGetRevisions(ctx, keybase1.SimpleFSGetRevisionsArg{ 1368 OpID: opid, 1369 Path: filePath, 1370 SpanType: spanType, 1371 }) 1372 require.NoError(t, err) 1373 err = sfs.SimpleFSWait(ctx, opid) 1374 require.NoError(t, err) 1375 res, err := sfs.SimpleFSReadRevisions(ctx, opid) 1376 require.NoError(t, err) 1377 err = sfs.SimpleFSClose(ctx, opid) 1378 require.NoError(t, err) 1379 return res 1380 } 1381 1382 gcJump := config.Mode().QuotaReclamationMinUnrefAge() + 1*time.Second 1383 checkRevisions := func( 1384 numExpected, newestRev int, spanType keybase1.RevisionSpanType) { 1385 res := getRevisions(spanType) 1386 require.Len(t, res.Revisions, numExpected) 1387 1388 // Default should get the most recent one, and then the 4 1389 // earliest ones, while LAST_FIVE should get the last five. 1390 expectedTime := clock.Now() 1391 expectedRev := keybase1.KBFSRevision(newestRev) 1392 for i, r := range res.Revisions { 1393 require.Equal(t, keybase1.ToTime(expectedTime), r.Entry.Time, fmt.Sprintf("%d %d", i, r.Revision)) 1394 require.Equal(t, expectedRev, r.Revision) 1395 expectedTime = expectedTime.Add(-1 * time.Minute) 1396 expectedRev-- 1397 // Adjust for the skip-list when the list is full. 1398 if newestRev == 7 && i == 3 && 1399 spanType == keybase1.RevisionSpanType_DEFAULT { 1400 expectedTime = expectedTime.Add(-1 * time.Minute) 1401 expectedRev-- 1402 } else if newestRev == 9 && i == 0 { 1403 expectedTime = expectedTime.Add(-gcJump) 1404 } 1405 } 1406 } 1407 1408 t.Log("Write 6 revisions of a single file, spaced out a minute each") 1409 for i := 0; i < 6; i++ { 1410 clock.Add(1 * time.Minute) 1411 writeRemoteFile(ctx, t, sfs, filePath, []byte{byte(i)}) 1412 syncFS(ctx, t, sfs, "/private/jdoe") 1413 numExpected := i + 1 1414 if numExpected > 5 { 1415 numExpected = 5 1416 } 1417 checkRevisions(numExpected, i+2, keybase1.RevisionSpanType_DEFAULT) 1418 checkRevisions(numExpected, i+2, keybase1.RevisionSpanType_LAST_FIVE) 1419 } 1420 1421 t.Log("Jump the clock forward and force quota reclamation") 1422 clock.Add(gcJump) 1423 fb, _, err := sfs.getFolderBranchFromPath(ctx, path) 1424 require.NoError(t, err) 1425 err = libkbfs.ForceQuotaReclamationForTesting(config, fb) 1426 require.NoError(t, err) 1427 err = config.KBFSOps().SyncFromServer(ctx, fb, nil) 1428 require.NoError(t, err) 1429 syncFS(ctx, t, sfs, "/private/jdoe") 1430 1431 t.Log("Make a new revision after QR") 1432 clock.Add(1 * time.Minute) 1433 writeRemoteFile(ctx, t, sfs, filePath, []byte{6}) 1434 syncFS(ctx, t, sfs, "/private/jdoe") 1435 1436 // Now we should be able to see two revisions, since the previous 1437 // version was live at the time of QR. 1438 newestRev := 9 /* Last file revision was at 7, plus one for GC */ 1439 checkRevisions(2, newestRev, keybase1.RevisionSpanType_DEFAULT) 1440 checkRevisions(2, newestRev, keybase1.RevisionSpanType_LAST_FIVE) 1441 } 1442 1443 func TestOverallStatusFile(t *testing.T) { 1444 ctx := context.Background() 1445 sfs := newSimpleFS( 1446 env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 1447 defer closeSimpleFS(ctx, t, sfs) 1448 1449 path := keybase1.NewPathWithKbfsPath("/" + libfs.StatusFileName) 1450 buf := readRemoteFile(ctx, t, sfs, path) 1451 var status libkbfs.KBFSStatus 1452 err := json.Unmarshal(buf, &status) 1453 require.NoError(t, err) 1454 require.Equal(t, "jdoe", status.CurrentUser) 1455 } 1456 1457 func TestFavoriteConflicts(t *testing.T) { 1458 ctx := context.Background() 1459 tempdir, err := os.MkdirTemp(TempDirBase, "journal_for_simplefs_cr") 1460 defer os.RemoveAll(tempdir) 1461 require.NoError(t, err) 1462 sfs := newSimpleFS( 1463 env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 1464 defer closeSimpleFS(ctx, t, sfs) 1465 config := sfs.config.(*libkbfs.ConfigLocal) 1466 1467 t.Log("Enable journaling") 1468 err = config.EnableDiskLimiter(tempdir) 1469 require.NoError(t, err) 1470 err = config.EnableJournaling( 1471 ctx, tempdir, libkbfs.TLFJournalBackgroundWorkEnabled) 1472 require.NoError(t, err) 1473 jManager, err := libkbfs.GetJournalManager(config) 1474 require.NoError(t, err) 1475 err = jManager.EnableAuto(ctx) 1476 require.NoError(t, err) 1477 1478 pathPriv := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 1479 pathPub := keybase1.NewPathWithKbfsPath(`/public/jdoe`) 1480 1481 t.Log("Add one file in each directory") 1482 writeRemoteFile( 1483 ctx, t, sfs, pathAppend(pathPriv, `test.txt`), []byte(`foo`)) 1484 syncFS(ctx, t, sfs, "/private/jdoe") 1485 writeRemoteFile( 1486 ctx, t, sfs, pathAppend(pathPub, `test.txt`), []byte(`foo`)) 1487 syncFS(ctx, t, sfs, "/public/jdoe") 1488 1489 t.Log("Make sure we see two favorites with no conflicts") 1490 favs, err := sfs.SimpleFSListFavorites(ctx) 1491 require.NoError(t, err) 1492 require.Len(t, favs.FavoriteFolders, 2) 1493 for _, f := range favs.FavoriteFolders { 1494 require.Nil(t, f.ConflictState) 1495 } 1496 1497 t.Log("Force a stuck conflict and make sure it's captured correctly") 1498 err = sfs.SimpleFSForceStuckConflict(ctx, pathPub) 1499 require.NoError(t, err) 1500 favs, err = sfs.SimpleFSListFavorites(ctx) 1501 require.NoError(t, err) 1502 require.Len(t, favs.FavoriteFolders, 2) 1503 stuck, notStuck := 0, 0 1504 for _, f := range favs.FavoriteFolders { 1505 if f.FolderType == keybase1.FolderType_PUBLIC { 1506 require.NotNil(t, f.ConflictState) 1507 conflictStateType, err := f.ConflictState.ConflictStateType() 1508 require.NoError(t, err) 1509 require.Equal(t, keybase1.ConflictStateType_NormalView, 1510 conflictStateType) 1511 require.True(t, f.ConflictState.Normalview().ResolvingConflict) 1512 require.True(t, f.ConflictState.Normalview().StuckInConflict) 1513 stuck++ 1514 } else { 1515 require.Nil(t, f.ConflictState) 1516 notStuck++ 1517 } 1518 } 1519 require.Equal(t, 1, stuck) 1520 require.Equal(t, 1, notStuck) 1521 1522 t.Log("Check for stuck badge state") 1523 badge, err := sfs.SimpleFSGetFilesTabBadge(ctx) 1524 require.NoError(t, err) 1525 require.Equal(t, keybase1.FilesTabBadge_UPLOADING_STUCK, badge) 1526 1527 t.Log("Resolve the conflict") 1528 err = sfs.SimpleFSClearConflictState(ctx, pathPub) 1529 require.NoError(t, err) 1530 favs, err = sfs.SimpleFSListFavorites(ctx) 1531 require.NoError(t, err) 1532 require.Len(t, favs.FavoriteFolders, 3) 1533 var pathConflict keybase1.Path 1534 var pathLocalView keybase1.Path 1535 for _, f := range favs.FavoriteFolders { 1536 switch { 1537 case tlf.ContainsLocalConflictExtensionPrefix(f.Name): 1538 require.NotNil(t, f.ConflictState) 1539 ct, err := f.ConflictState.ConflictStateType() 1540 require.NoError(t, err) 1541 require.Equal( 1542 t, keybase1.ConflictStateType_ManualResolvingLocalView, ct) 1543 mrlv := f.ConflictState.Manualresolvinglocalview() 1544 require.Equal(t, pathPub.String(), mrlv.NormalView.String()) 1545 pathConflict = keybase1.NewPathWithKbfsPath("/public/" + f.Name) 1546 case f.Name == "jdoe" && f.FolderType == keybase1.FolderType_PUBLIC: 1547 require.NotNil(t, f.ConflictState) 1548 ct, err := f.ConflictState.ConflictStateType() 1549 require.NoError(t, err) 1550 require.Equal( 1551 t, keybase1.ConflictStateType_NormalView, ct) 1552 sv := f.ConflictState.Normalview() 1553 require.False(t, sv.ResolvingConflict) 1554 require.False(t, sv.StuckInConflict) 1555 require.Len(t, sv.LocalViews, 1) 1556 pathLocalView = sv.LocalViews[0] 1557 default: 1558 require.Nil(t, f.ConflictState) 1559 } 1560 } 1561 require.NotEqual(t, "", pathConflict.String()) 1562 require.Equal(t, pathLocalView.String(), pathConflict.String()) 1563 1564 t.Log("Make sure we see all the conflict files in the local branch") 1565 opid, err := sfs.SimpleFSMakeOpid(ctx) 1566 require.NoError(t, err) 1567 err = sfs.SimpleFSList(ctx, keybase1.SimpleFSListArg{ 1568 OpID: opid, 1569 Path: pathConflict, 1570 }) 1571 require.NoError(t, err) 1572 err = sfs.SimpleFSWait(ctx, opid) 1573 require.NoError(t, err) 1574 listResult, err := sfs.SimpleFSReadList(ctx, opid) 1575 require.NoError(t, err) 1576 require.Len(t, listResult.Entries, 12) 1577 1578 t.Log("Finish resolving the conflict") 1579 err = sfs.SimpleFSFinishResolvingConflict(ctx, pathLocalView) 1580 require.NoError(t, err) 1581 favs, err = sfs.SimpleFSListFavorites(ctx) 1582 require.NoError(t, err) 1583 require.Len(t, favs.FavoriteFolders, 2) 1584 for _, f := range favs.FavoriteFolders { 1585 require.Nil(t, f.ConflictState) 1586 } 1587 1588 t.Log("Try stat'ing the old local view, should get an error") 1589 _, err = sfs.SimpleFSStat(ctx, keybase1.SimpleFSStatArg{ 1590 Path: pathLocalView, 1591 }) 1592 require.Error(t, err) 1593 } 1594 1595 func TestSyncConfigFavorites(t *testing.T) { 1596 ctx := context.Background() 1597 config := libkbfs.MakeTestConfigOrBust(t, "jdoe") 1598 tempdir, err := os.MkdirTemp(TempDirBase, "journal_for_simplefs_favs") 1599 require.NoError(t, err) 1600 defer os.RemoveAll(tempdir) 1601 err = config.EnableDiskLimiter(tempdir) 1602 require.NoError(t, err) 1603 config.SetDiskCacheMode(libkbfs.DiskCacheModeLocal) 1604 err = config.MakeDiskBlockCacheIfNotExists() 1605 require.NoError(t, err) 1606 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, config) 1607 defer closeSimpleFS(ctx, t, sfs) 1608 1609 pathPriv := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 1610 pathPub := keybase1.NewPathWithKbfsPath(`/public/jdoe`) 1611 1612 t.Log("Add one file in each directory") 1613 writeRemoteFile( 1614 ctx, t, sfs, pathAppend(pathPriv, `test.txt`), []byte(`foo`)) 1615 syncFS(ctx, t, sfs, "/private/jdoe") 1616 writeRemoteFile( 1617 ctx, t, sfs, pathAppend(pathPub, `test.txt`), []byte(`foo`)) 1618 syncFS(ctx, t, sfs, "/public/jdoe") 1619 1620 t.Log("Make sure none are marked for syncing") 1621 favs, err := sfs.SimpleFSListFavorites(ctx) 1622 require.NoError(t, err) 1623 require.Len(t, favs.FavoriteFolders, 2) 1624 for _, f := range favs.FavoriteFolders { 1625 require.Equal(t, keybase1.FolderSyncMode_DISABLED, f.SyncConfig.Mode) 1626 } 1627 1628 t.Log("Start syncing the public folder") 1629 setArg := keybase1.SimpleFSSetFolderSyncConfigArg{ 1630 Path: pathPub, 1631 Config: keybase1.FolderSyncConfig{ 1632 Mode: keybase1.FolderSyncMode_ENABLED, 1633 }, 1634 } 1635 err = sfs.SimpleFSSetFolderSyncConfig(ctx, setArg) 1636 require.NoError(t, err) 1637 favs, err = sfs.SimpleFSListFavorites(ctx) 1638 require.NoError(t, err) 1639 require.Len(t, favs.FavoriteFolders, 2) 1640 numSyncing := 0 1641 for _, f := range favs.FavoriteFolders { 1642 if f.FolderType == keybase1.FolderType_PUBLIC { 1643 numSyncing++ 1644 require.Equal( 1645 t, keybase1.FolderSyncMode_ENABLED, f.SyncConfig.Mode) 1646 } else { 1647 require.Equal( 1648 t, keybase1.FolderSyncMode_DISABLED, f.SyncConfig.Mode) 1649 } 1650 } 1651 require.Equal(t, 1, numSyncing) 1652 } 1653 1654 func TestRemoveFavorite(t *testing.T) { 1655 ctx := context.Background() 1656 config := libkbfs.MakeTestConfigOrBust(t, "jdoe", "alice") 1657 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, config) 1658 defer closeSimpleFS(ctx, t, sfs) 1659 1660 t.Log("Write a file in the shared directory") 1661 pathPriv := keybase1.NewPathWithKbfsPath(`/private/alice,jdoe`) 1662 writeRemoteFile( 1663 ctx, t, sfs, pathAppend(pathPriv, `test.txt`), []byte(`foo`)) 1664 syncFS(ctx, t, sfs, "/private/alice,jdoe") 1665 1666 t.Log("Make sure it's in the favorites list") 1667 favs, err := sfs.SimpleFSListFavorites(ctx) 1668 require.NoError(t, err) 1669 require.Len(t, favs.FavoriteFolders, 3) 1670 find := func() bool { 1671 for _, f := range favs.FavoriteFolders { 1672 t.Logf("NAME=%s", f.Name) 1673 if f.FolderType == keybase1.FolderType_PRIVATE && 1674 f.Name == "alice,jdoe" { 1675 return true 1676 } 1677 } 1678 return false 1679 } 1680 found := find() 1681 require.True(t, found) 1682 1683 t.Log("Remove the favorite") 1684 opid, err := sfs.SimpleFSMakeOpid(ctx) 1685 require.NoError(t, err) 1686 err = sfs.SimpleFSRemove(ctx, keybase1.SimpleFSRemoveArg{ 1687 OpID: opid, 1688 Path: pathPriv, 1689 }) 1690 require.NoError(t, err) 1691 checkPendingOp( 1692 ctx, t, sfs, opid, keybase1.AsyncOps_REMOVE, pathPriv, keybase1.Path{}, 1693 true) 1694 err = sfs.SimpleFSWait(ctx, opid) 1695 require.NoError(t, err) 1696 1697 t.Log("Check that it's gone") 1698 favs, err = sfs.SimpleFSListFavorites(ctx) 1699 require.NoError(t, err) 1700 require.Len(t, favs.FavoriteFolders, 2) 1701 found = find() 1702 require.False(t, found) 1703 } 1704 1705 func TestBadgeState(t *testing.T) { 1706 ctx := context.Background() 1707 tempdir, err := os.MkdirTemp(TempDirBase, "journal_for_simplefs_badge") 1708 defer os.RemoveAll(tempdir) 1709 require.NoError(t, err) 1710 sfs := newSimpleFS( 1711 env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 1712 defer closeSimpleFS(ctx, t, sfs) 1713 config := sfs.config.(*libkbfs.ConfigLocal) 1714 1715 t.Log("Enable journaling") 1716 err = config.EnableDiskLimiter(tempdir) 1717 require.NoError(t, err) 1718 err = config.EnableJournaling( 1719 ctx, tempdir, libkbfs.TLFJournalBackgroundWorkEnabled) 1720 require.NoError(t, err) 1721 jManager, err := libkbfs.GetJournalManager(config) 1722 require.NoError(t, err) 1723 err = jManager.EnableAuto(ctx) 1724 require.NoError(t, err) 1725 1726 t.Log("No badge yet") 1727 badge, err := sfs.SimpleFSGetFilesTabBadge(ctx) 1728 require.NoError(t, err) 1729 require.Equal(t, keybase1.FilesTabBadge_NONE, badge) 1730 1731 pathPriv := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 1732 pathPub := keybase1.NewPathWithKbfsPath(`/public/jdoe`) 1733 1734 t.Log("Add one private file.") 1735 writeRemoteFile( 1736 ctx, t, sfs, pathAppend(pathPriv, `test.txt`), []byte(`foo`)) 1737 syncFS(ctx, t, sfs, "/private/jdoe") 1738 _, tlfIDs := jManager.Status(ctx) 1739 require.Len(t, tlfIDs, 1) 1740 tlfID := tlfIDs[0] 1741 err = jManager.Wait(ctx, tlfID) 1742 require.NoError(t, err) 1743 1744 t.Log("Still no badge yet") 1745 badge, err = sfs.SimpleFSGetFilesTabBadge(ctx) 1746 require.NoError(t, err) 1747 require.Equal(t, keybase1.FilesTabBadge_NONE, badge) 1748 1749 t.Log("Pause the journal and add another file") 1750 jManager.PauseBackgroundWork(ctx, tlfID) 1751 writeRemoteFile( 1752 ctx, t, sfs, pathAppend(pathPriv, `test2.txt`), []byte(`foo2`)) 1753 syncFS(ctx, t, sfs, "/private/jdoe") 1754 // Wait shouldn't do anything unless there's a bug with pausing, 1755 // so do it just in case. 1756 err = jManager.Wait(ctx, tlfID) 1757 require.NoError(t, err) 1758 badge, err = sfs.SimpleFSGetFilesTabBadge(ctx) 1759 require.NoError(t, err) 1760 require.Equal(t, keybase1.FilesTabBadge_UPLOADING, badge) 1761 1762 t.Log("Get a different TLF stuck, badge state should update") 1763 writeRemoteFile( 1764 ctx, t, sfs, pathAppend(pathPub, `test3.txt`), []byte(`foo3`)) 1765 syncFS(ctx, t, sfs, "/public/jdoe") 1766 err = sfs.SimpleFSForceStuckConflict(ctx, pathPub) 1767 require.NoError(t, err) 1768 badge, err = sfs.SimpleFSGetFilesTabBadge(ctx) 1769 require.NoError(t, err) 1770 require.Equal(t, keybase1.FilesTabBadge_UPLOADING_STUCK, badge) 1771 1772 jManager.ResumeBackgroundWork(ctx, tlfID) 1773 } 1774 1775 func TestProfiles(t *testing.T) { 1776 ctx := context.Background() 1777 sfs := newSimpleFS( 1778 env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 1779 defer closeSimpleFS(ctx, t, sfs) 1780 libfs.AddRootWrapper(sfs.config) 1781 1782 t.Log("Root-level profiles") 1783 path := keybase1.NewPathWithKbfsPath("/" + libfs.ProfileListDirName) 1784 testListWithFilterAndUsername( 1785 ctx, t, sfs, path, keybase1.ListFilter_NO_FILTER, "", 1786 libfs.ListProfileNames()...) 1787 buf := readRemoteFile(ctx, t, sfs, pathAppend(path, "goroutine")) 1788 require.NotEmpty(t, buf) 1789 1790 t.Log("In-TLF profiles") 1791 // Create TLF first. 1792 pathPriv := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 1793 writeRemoteFile( 1794 ctx, t, sfs, pathAppend(pathPriv, `test.txt`), []byte(`foo`)) 1795 syncFS(ctx, t, sfs, "/private/jdoe") 1796 path = pathAppend(pathPriv, libfs.ProfileListDirName) 1797 testListWithFilterAndUsername( 1798 ctx, t, sfs, path, keybase1.ListFilter_NO_FILTER, "", 1799 libfs.ListProfileNames()...) 1800 buf = readRemoteFile(ctx, t, sfs, pathAppend(path, "goroutine")) 1801 require.NotEmpty(t, buf) 1802 } 1803 1804 func TestArchiveSymlink(t *testing.T) { 1805 ctx, cancel := context.WithTimeout(context.Background(), time.Second*20) 1806 defer cancel() 1807 1808 // make a temp local dest directory + files we will clean up later 1809 tempdir, err := os.MkdirTemp(TempDirBase, "simpleFStest") 1810 defer os.RemoveAll(tempdir) 1811 require.NoError(t, err) 1812 t.Logf("temp dir: %s", tempdir) 1813 1814 setCacheDirForTest(tempdir) 1815 defer unsetCacheDirForTest() 1816 1817 sfs := newSimpleFS(env.EmptyAppStateUpdater{}, libkbfs.MakeTestConfigOrBust(t, "jdoe")) 1818 defer closeSimpleFS(ctx, t, sfs) 1819 1820 path1 := keybase1.NewPathWithKbfsPath(`/private/jdoe`) 1821 writeRemoteFile(ctx, t, sfs, pathAppend(path1, "test1.txt"), []byte("foo")) 1822 1823 // make some symlinks 1824 { 1825 { 1826 linkName := "link1" 1827 link := pathAppend(path1, linkName) 1828 err := sfs.SimpleFSSymlink(ctx, keybase1.SimpleFSSymlinkArg{ 1829 Target: "test1.txt", 1830 Link: link, 1831 }) 1832 require.NoError(t, err) 1833 } 1834 { 1835 linkName := "link-escaping" 1836 link := pathAppend(path1, linkName) 1837 err := sfs.SimpleFSSymlink(ctx, keybase1.SimpleFSSymlinkArg{ 1838 Target: "../test1.txt", 1839 Link: link, 1840 }) 1841 require.NoError(t, err) 1842 } 1843 } 1844 syncFS(ctx, t, sfs, "/private/jdoe") 1845 1846 desc, err := sfs.SimpleFSArchiveStart(ctx, keybase1.SimpleFSArchiveStartArg{ 1847 KbfsPath: path1.Kbfs(), 1848 OutputPath: filepath.Join(tempdir, "archive"), 1849 }) 1850 require.NoError(t, err) 1851 require.Equal(t, filepath.Join(tempdir, "archive.zip"), desc.ZipFilePath) 1852 1853 ticker := time.NewTicker(time.Millisecond * 100) 1854 loopWait: 1855 for { 1856 select { 1857 case <-ctx.Done(): 1858 require.NoError(t, ctx.Err()) 1859 case <-ticker.C: 1860 } 1861 status, err := sfs.SimpleFSGetArchiveStatus(ctx) 1862 require.NoError(t, err) 1863 require.Equal(t, 1, len(status.Jobs)) 1864 job := status.Jobs[desc.JobID] 1865 t.Logf("got job status %#+v", job) 1866 require.Nil(t, job.Error) 1867 if job.Phase == keybase1.SimpleFSArchiveJobPhase_Done { 1868 break loopWait 1869 } 1870 } 1871 1872 reader, err := zip.OpenReader(filepath.Join(tempdir, "archive.zip")) 1873 defer func() { _ = reader.Close() }() 1874 require.NoError(t, err) 1875 require.Equal(t, 2, len(reader.File)) // file and one symlink 1876 }