github.com/keybase/client/go@v0.0.0-20240309051027-028f7c731f8b/kbfs/libkbfs/kbfs_ops_concur_test.go (about) 1 // Copyright 2016 Keybase Inc. All rights reserved. 2 // Use of this source code is governed by a BSD 3 // license that can be found in the LICENSE file. 4 5 package libkbfs 6 7 import ( 8 "bytes" 9 "errors" 10 "fmt" 11 "runtime" 12 "sync" 13 "testing" 14 "time" 15 16 "github.com/golang/mock/gomock" 17 kbfsdata "github.com/keybase/client/go/kbfs/data" 18 "github.com/keybase/client/go/kbfs/kbfsblock" 19 "github.com/keybase/client/go/kbfs/kbfscrypto" 20 "github.com/keybase/client/go/kbfs/kbfssync" 21 "github.com/keybase/client/go/kbfs/libcontext" 22 "github.com/keybase/client/go/kbfs/libkey" 23 "github.com/keybase/client/go/kbfs/tlf" 24 "github.com/keybase/client/go/kbfs/tlfhandle" 25 kbname "github.com/keybase/client/go/kbun" 26 "github.com/keybase/client/go/logger" 27 "github.com/keybase/client/go/protocol/keybase1" 28 "github.com/keybase/go-framed-msgpack-rpc/rpc" 29 "github.com/stretchr/testify/require" 30 "golang.org/x/net/context" 31 ) 32 33 // CounterLock keeps track of the number of lock attempts 34 type CounterLock struct { 35 countLock sync.Mutex 36 realLock sync.Mutex 37 count int 38 } 39 40 func (cl *CounterLock) Lock() { 41 cl.countLock.Lock() 42 cl.count++ 43 cl.countLock.Unlock() 44 cl.realLock.Lock() 45 } 46 47 func (cl *CounterLock) Unlock() { 48 cl.realLock.Unlock() 49 } 50 51 func (cl *CounterLock) GetCount() int { 52 cl.countLock.Lock() 53 defer cl.countLock.Unlock() 54 return cl.count 55 } 56 57 func kbfsOpsConcurInit(t *testing.T, users ...kbname.NormalizedUsername) ( 58 *ConfigLocal, keybase1.UID, context.Context, context.CancelFunc) { 59 return kbfsOpsInitNoMocks(t, users...) 60 } 61 62 func kbfsConcurTestShutdown( 63 ctx context.Context, t *testing.T, 64 config *ConfigLocal, cancel context.CancelFunc) { 65 kbfsTestShutdownNoMocks(ctx, t, config, cancel) 66 } 67 68 // TODO: Get rid of all users of this. 69 func kbfsConcurTestShutdownNoCheck( 70 ctx context.Context, t *testing.T, 71 config *ConfigLocal, cancel context.CancelFunc) { 72 kbfsTestShutdownNoMocksNoCheck(ctx, t, config, cancel) 73 } 74 75 // Test that only one of two concurrent GetRootMD requests can end up 76 // fetching the MD from the server. The second one should wait, and 77 // then get it from the MD cache. 78 func TestKBFSOpsConcurDoubleMDGet(t *testing.T) { 79 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 80 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 81 82 onGetStalledCh, getUnstallCh, ctxStallGetForTLF := 83 StallMDOp(ctx, config, StallableMDGetForTLF, 1) 84 85 // Initialize the MD using a different config 86 c2 := ConfigAsUser(config, "test_user") 87 defer CheckConfigAndShutdown(ctx, t, c2) 88 rootNode := GetRootNodeOrBust(ctx, t, c2, "test_user", tlf.Private) 89 90 n := 10 91 c := make(chan error, n) 92 cl := &CounterLock{} 93 ops := getOps(config, rootNode.GetFolderBranch().Tlf) 94 ops.mdWriterLock = kbfssync.MakeLeveledMutex( 95 kbfssync.MutexLevel(fboMDWriter), cl) 96 for i := 0; i < n; i++ { 97 go func() { 98 _, _, _, err := ops.getRootNode(ctxStallGetForTLF) 99 c <- err 100 }() 101 } 102 103 // wait until the first one starts the get 104 <-onGetStalledCh 105 // make sure that the second goroutine has also started its write 106 // call, and thus must be queued behind the first one (since we 107 // are guaranteed the first one is currently running, and they 108 // both need the same lock). 109 for cl.GetCount() < 2 { 110 runtime.Gosched() 111 } 112 // Now let the first one complete. The second one should find the 113 // MD in the cache, and thus never call MDOps.Get(). 114 close(getUnstallCh) 115 for i := 0; i < n; i++ { 116 err := <-c 117 require.NoError(t, err, "Got an error doing concurrent MD gets: err=(%s)", err) 118 } 119 } 120 121 // Test that a read can happen concurrently with a sync 122 func TestKBFSOpsConcurReadDuringSync(t *testing.T) { 123 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 124 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 125 126 onPutStalledCh, putUnstallCh, putCtx := 127 StallMDOp(ctx, config, StallableMDAfterPut, 1) 128 129 // create and write to a file 130 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 131 132 kbfsOps := config.KBFSOps() 133 fileNode, _, err := kbfsOps.CreateFile( 134 ctx, rootNode, testPPS("a"), false, NoExcl) 135 require.NoError(t, err, "Couldn't create file: %v", err) 136 data := []byte{1} 137 err = kbfsOps.Write(ctx, fileNode, data, 0) 138 require.NoError(t, err, "Couldn't write file: %v", err) 139 140 // start the sync 141 errChan := make(chan error) 142 go func() { 143 errChan <- kbfsOps.SyncAll(putCtx, fileNode.GetFolderBranch()) 144 }() 145 146 // wait until Sync gets stuck at MDOps.Put() 147 <-onPutStalledCh 148 149 // now make sure we can read the file and see the byte we wrote 150 buf := make([]byte, 1) 151 nr, err := kbfsOps.Read(ctx, fileNode, buf, 0) 152 require.NoError(t, err, "Couldn't read data: %v\n", err) 153 if nr != 1 || !bytes.Equal(data, buf) { 154 t.Errorf("Got wrong data %v; expected %v", buf, data) 155 } 156 157 // now unblock Sync and make sure there was no error 158 close(putUnstallCh) 159 err = <-errChan 160 require.NoError(t, err, "Sync got an error: %v", err) 161 } 162 163 func testCalcNumFileBlocks( 164 dataLen int, bsplitter *kbfsdata.BlockSplitterSimple) int { 165 nChildBlocks := 1 + dataLen/int(bsplitter.MaxSize()) 166 nFileBlocks := nChildBlocks 167 for nChildBlocks > 1 { 168 parentBlocks := 0 169 // Add parent blocks for each level of the tree. 170 for i := 0; i < nChildBlocks; i += bsplitter.MaxPtrsPerBlock() { 171 parentBlocks++ 172 } 173 nFileBlocks += parentBlocks 174 nChildBlocks = parentBlocks 175 } 176 return nFileBlocks 177 } 178 179 // Test that writes can happen concurrently with a sync 180 func testKBFSOpsConcurWritesDuringSync(t *testing.T, 181 initialWriteBytes int, nOneByteWrites int, nFiles int) { 182 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 183 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 184 185 onPutStalledCh, putUnstallCh, putCtx := 186 StallMDOp(ctx, config, StallableMDAfterPut, 1) 187 188 // Use the smallest possible block size. 189 bsplitter, err := kbfsdata.NewBlockSplitterSimple(20, 8*1024, config.Codec()) 190 require.NoError(t, err, "Couldn't create block splitter: %v", err) 191 config.SetBlockSplitter(bsplitter) 192 193 // create and write to a file 194 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 195 196 fileNodes := make([]Node, nFiles) 197 kbfsOps := config.KBFSOps() 198 for i := 0; i < nFiles; i++ { 199 name := fmt.Sprintf("file%d", i) 200 fileNode, _, err := kbfsOps.CreateFile( 201 ctx, rootNode, testPPS(name), false, NoExcl) 202 require.NoError(t, err, "Couldn't create file %s: %v", name, err) 203 fileNodes[i] = fileNode 204 } 205 206 expectedData := make([][]byte, len(fileNodes)) 207 for i, fileNode := range fileNodes { 208 data := make([]byte, initialWriteBytes) 209 for j := 0; j < initialWriteBytes; j++ { 210 data[j] = byte(initialWriteBytes * (i + 1)) 211 } 212 err = kbfsOps.Write(ctx, fileNode, data, 0) 213 require.NoError(t, err, "Couldn't write file: %v", err) 214 expectedData[i] = make([]byte, len(data)) 215 copy(expectedData[i], data) 216 } 217 218 // start the sync 219 errChan := make(chan error) 220 go func() { 221 errChan <- kbfsOps.SyncAll(putCtx, fileNodes[0].GetFolderBranch()) 222 }() 223 224 // wait until Sync gets stuck at MDOps.Put() 225 select { 226 case <-onPutStalledCh: 227 case <-ctx.Done(): 228 t.Fatalf("Timeout waiting for stall") 229 } 230 231 for i, fileNode := range fileNodes { 232 for j := 0; j < nOneByteWrites; j++ { 233 // now make sure we can write the file and see the new 234 // byte we wrote 235 newData := []byte{byte(nOneByteWrites * (j + 2))} 236 err = kbfsOps.Write(ctx, fileNode, newData, 237 int64(j+initialWriteBytes)) 238 require.NoError(t, err, "Couldn't write data: %v\n", err) 239 240 // read the data back 241 buf := make([]byte, j+1+initialWriteBytes) 242 nr, err := kbfsOps.Read(ctx, fileNode, buf, 0) 243 require.NoError(t, err, "Couldn't read data: %v\n", err) 244 expectedData[i] = append(expectedData[i], newData...) 245 if nr != int64(j+1+initialWriteBytes) || 246 !bytes.Equal(expectedData[i], buf) { 247 t.Errorf("Got wrong data %v; expected %v", buf, expectedData[i]) 248 } 249 } 250 } 251 252 // now unblock Sync and make sure there was no error 253 close(putUnstallCh) 254 err = <-errChan 255 require.NoError(t, err, "Sync got an error: %v", err) 256 257 // finally, make sure we can still read it after the sync too 258 // (even though the second write hasn't been sync'd yet) 259 totalSize := nOneByteWrites + initialWriteBytes 260 for i, fileNode := range fileNodes { 261 buf2 := make([]byte, totalSize) 262 nr, err := kbfsOps.Read(ctx, fileNode, buf2, 0) 263 require.NoError(t, err, "Couldn't read data: %v\n", err) 264 if nr != int64(totalSize) || 265 !bytes.Equal(expectedData[i], buf2) { 266 t.Errorf("2nd read: Got wrong data %v; expected %v", 267 buf2, expectedData[i]) 268 } 269 } 270 271 // there should be 4+n clean blocks at this point: the original 272 // root block + 2 modifications (create + write), the empty file 273 // block, the n initial modification blocks plus top block (if 274 // applicable). 275 bcs := config.BlockCache().(*kbfsdata.BlockCacheStandard) 276 numCleanBlocks := bcs.NumCleanTransientBlocks() 277 nFileBlocks := testCalcNumFileBlocks(initialWriteBytes, bsplitter) 278 if g, e := numCleanBlocks, 4+nFileBlocks; g != e { 279 t.Logf("Unexpected number of cached clean blocks: %d vs %d (%d vs %d)", g, e, totalSize, bsplitter.MaxSize()) 280 } 281 282 err = kbfsOps.SyncAll(ctx, fileNodes[0].GetFolderBranch()) 283 require.NoError(t, err, "Final sync failed: %v", err) 284 285 for _, fileNode := range fileNodes { 286 if ei, err := kbfsOps.Stat(ctx, fileNode); err != nil { 287 t.Fatalf("Couldn't stat: %v", err) 288 } else if g, e := ei.Size, uint64(totalSize); g != e { 289 t.Fatalf("Unexpected size: %d vs %d", g, e) 290 } 291 } 292 293 // Make sure there are no dirty blocks left at the end of the test. 294 dbcs := config.DirtyBlockCache().(*kbfsdata.DirtyBlockCacheStandard) 295 numDirtyBlocks := dbcs.Size() 296 if numDirtyBlocks != 0 { 297 t.Errorf("%d dirty blocks left after final sync", numDirtyBlocks) 298 } 299 } 300 301 // Test that a write can happen concurrently with a sync 302 func TestKBFSOpsConcurWriteDuringSync(t *testing.T) { 303 testKBFSOpsConcurWritesDuringSync(t, 1, 1, 1) 304 } 305 306 // Test that multiple writes can happen concurrently with a sync 307 // (regression for KBFS-616) 308 func TestKBFSOpsConcurMultipleWritesDuringSync(t *testing.T) { 309 testKBFSOpsConcurWritesDuringSync(t, 1, 10, 1) 310 } 311 312 // Test that multiple indirect writes can happen concurrently with a 313 // sync (regression for KBFS-661) 314 func TestKBFSOpsConcurMultipleIndirectWritesDuringSync(t *testing.T) { 315 testKBFSOpsConcurWritesDuringSync(t, 25, 50, 1) 316 } 317 318 // Test that a write can happen concurrently with a sync all of two files. 319 func TestKBFSOpsConcurWriteDuringSyncAllTwoFiles(t *testing.T) { 320 testKBFSOpsConcurWritesDuringSync(t, 1, 1, 2) 321 } 322 323 // Test that a write can happen concurrently with a sync all of ten files. 324 func TestKBFSOpsConcurWriteDuringSyncAllTenFiles(t *testing.T) { 325 testKBFSOpsConcurWritesDuringSync(t, 1, 1, 10) 326 } 327 328 // Test that writes that happen concurrently with a sync, which write 329 // to the same block, work correctly. 330 func TestKBFSOpsConcurDeferredDoubleWritesDuringSync(t *testing.T) { 331 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 332 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 333 334 onPutStalledCh, putUnstallCh, putCtx := 335 StallMDOp(ctx, config, StallableMDAfterPut, 1) 336 337 // Use the smallest possible block size. 338 bsplitter, err := kbfsdata.NewBlockSplitterSimple(20, 8*1024, config.Codec()) 339 require.NoError(t, err, "Couldn't create block splitter: %v", err) 340 config.SetBlockSplitter(bsplitter) 341 342 // create and write to a file 343 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 344 345 kbfsOps := config.KBFSOps() 346 fileNode, _, err := kbfsOps.CreateFile( 347 ctx, rootNode, testPPS("a"), false, NoExcl) 348 require.NoError(t, err, "Couldn't create file: %v", err) 349 var data []byte 350 // Write 2 blocks worth of data 351 for i := 0; i < 30; i++ { 352 data = append(data, byte(i)) 353 } 354 err = kbfsOps.Write(ctx, fileNode, data, 0) 355 require.NoError(t, err, "Couldn't write file: %v", err) 356 357 // Sync the initial two data blocks 358 err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()) 359 require.NoError(t, err, "Initial sync failed: %v", err) 360 361 // Now dirty the first block. 362 newData1 := make([]byte, 10) 363 copy(newData1, data[20:]) 364 err = kbfsOps.Write(ctx, fileNode, newData1, 0) 365 require.NoError(t, err, "Couldn't write file: %v", err) 366 367 // start the sync 368 errChan := make(chan error) 369 go func() { 370 errChan <- kbfsOps.SyncAll(putCtx, fileNode.GetFolderBranch()) 371 }() 372 373 // wait until Sync gets stuck at MDOps.Put() 374 <-onPutStalledCh 375 376 // Now dirty the second block, twice. 377 newData2 := make([]byte, 10) 378 copy(newData2, data[:10]) 379 err = kbfsOps.Write(ctx, fileNode, newData2, 20) 380 require.NoError(t, err, "Couldn't write file: %v", err) 381 err = kbfsOps.Write(ctx, fileNode, newData2, 30) 382 require.NoError(t, err, "Couldn't write file: %v", err) 383 384 // now unblock Sync and make sure there was no error 385 close(putUnstallCh) 386 err = <-errChan 387 require.NoError(t, err, "Sync got an error: %v", err) 388 389 expectedData := make([]byte, 40) 390 copy(expectedData[:10], newData1) 391 copy(expectedData[10:20], data[10:20]) 392 copy(expectedData[20:30], newData2) 393 copy(expectedData[30:40], newData2) 394 395 gotData := make([]byte, 40) 396 nr, err := kbfsOps.Read(ctx, fileNode, gotData, 0) 397 require.NoError(t, err, "Couldn't read data: %v", err) 398 if nr != int64(len(gotData)) { 399 t.Errorf("Only read %d bytes", nr) 400 } 401 if !bytes.Equal(expectedData, gotData) { 402 t.Errorf("Read wrong data. Expected %v, got %v", expectedData, gotData) 403 } 404 405 // Final sync 406 err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()) 407 require.NoError(t, err, "Final sync failed: %v", err) 408 409 gotData = make([]byte, 40) 410 nr, err = kbfsOps.Read(ctx, fileNode, gotData, 0) 411 require.NoError(t, err, "Couldn't read data: %v", err) 412 if nr != int64(len(gotData)) { 413 t.Errorf("Only read %d bytes", nr) 414 } 415 if !bytes.Equal(expectedData, gotData) { 416 t.Errorf("Read wrong data. Expected %v, got %v", expectedData, gotData) 417 } 418 419 // Make sure there are no dirty blocks left at the end of the test. 420 dbcs := config.DirtyBlockCache().(*kbfsdata.DirtyBlockCacheStandard) 421 numDirtyBlocks := dbcs.Size() 422 if numDirtyBlocks != 0 { 423 t.Errorf("%d dirty blocks left after final sync", numDirtyBlocks) 424 } 425 } 426 427 // Test that a block write can happen concurrently with a block 428 // read. This is a regression test for KBFS-536. 429 func TestKBFSOpsConcurBlockReadWrite(t *testing.T) { 430 t.Skip("Broken test since Go 1.12.4 due to extra pending requests after test termination. Panic: unable to shutdown block ops.") 431 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 432 // TODO: Use kbfsConcurTestShutdown. 433 defer kbfsConcurTestShutdownNoCheck(ctx, t, config, cancel) 434 435 // Turn off transient block caching. 436 config.SetBlockCache(kbfsdata.NewBlockCacheStandard(0, 1<<30)) 437 438 // Create a file. 439 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 440 441 kbfsOps := config.KBFSOps() 442 fileNode, _, err := kbfsOps.CreateFile( 443 ctx, rootNode, testPPS("a"), false, NoExcl) 444 require.NoError(t, err, "Couldn't create file: %v", err) 445 err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch()) 446 require.NoError(t, err, "Couldn't sync file: %v", err) 447 448 onReadStalledCh, readUnstallCh, ctxStallRead := 449 StallBlockOp(ctx, config, StallableBlockGet, 1) 450 onWriteStalledCh, writeUnstallCh, ctxStallWrite := 451 StallBlockOp(ctx, config, StallableBlockGet, 1) 452 453 var wg sync.WaitGroup 454 455 // Start the read and wait for it to stall. 456 wg.Add(1) 457 var readErr error 458 go func() { 459 defer wg.Done() 460 461 _, readErr = kbfsOps.GetDirChildren(ctxStallRead, rootNode) 462 }() 463 <-onReadStalledCh 464 465 // Start the write and wait for it to stall. 466 wg.Add(1) 467 var writeErr error 468 go func() { 469 defer wg.Done() 470 471 data := []byte{1} 472 writeErr = kbfsOps.Write(ctxStallWrite, fileNode, data, 0) 473 }() 474 <-onWriteStalledCh 475 476 // Unstall the read, which shouldn't blow up. 477 close(readUnstallCh) 478 479 // Finally, unstall the write. 480 close(writeUnstallCh) 481 482 wg.Wait() 483 484 // Do these in the main goroutine since t isn't goroutine 485 // safe, and do these after wg.Wait() since we only know 486 // they're set after the goroutines exit. 487 if readErr != nil { 488 t.Errorf("Couldn't get children: %v", readErr) 489 } 490 if writeErr != nil { 491 t.Errorf("Couldn't write file: %v", writeErr) 492 } 493 } 494 495 // mdRecordingKeyManager records the last KeyMetadata argument seen 496 // in its KeyManager methods. 497 type mdRecordingKeyManager struct { 498 lastKMDMu sync.RWMutex 499 lastKMD libkey.KeyMetadata 500 delegate KeyManager 501 } 502 503 func (km *mdRecordingKeyManager) getLastKMD() libkey.KeyMetadata { 504 km.lastKMDMu.RLock() 505 defer km.lastKMDMu.RUnlock() 506 return km.lastKMD 507 } 508 509 func (km *mdRecordingKeyManager) setLastKMD(kmd libkey.KeyMetadata) { 510 km.lastKMDMu.Lock() 511 defer km.lastKMDMu.Unlock() 512 km.lastKMD = kmd 513 } 514 515 func (km *mdRecordingKeyManager) GetTLFCryptKeyForEncryption( 516 ctx context.Context, kmd libkey.KeyMetadata) (kbfscrypto.TLFCryptKey, error) { 517 km.setLastKMD(kmd) 518 return km.delegate.GetTLFCryptKeyForEncryption(ctx, kmd) 519 } 520 521 func (km *mdRecordingKeyManager) GetTLFCryptKeyForMDDecryption( 522 ctx context.Context, kmdToDecrypt, kmdWithKeys libkey.KeyMetadata) ( 523 kbfscrypto.TLFCryptKey, error) { 524 km.setLastKMD(kmdToDecrypt) 525 return km.delegate.GetTLFCryptKeyForMDDecryption(ctx, 526 kmdToDecrypt, kmdWithKeys) 527 } 528 529 func (km *mdRecordingKeyManager) GetTLFCryptKeyForBlockDecryption( 530 ctx context.Context, kmd libkey.KeyMetadata, blockPtr kbfsdata.BlockPointer) ( 531 kbfscrypto.TLFCryptKey, error) { 532 km.setLastKMD(kmd) 533 return km.delegate.GetTLFCryptKeyForBlockDecryption(ctx, kmd, blockPtr) 534 } 535 536 func (km *mdRecordingKeyManager) GetFirstTLFCryptKey( 537 ctx context.Context, kmd libkey.KeyMetadata) ( 538 kbfscrypto.TLFCryptKey, error) { 539 km.setLastKMD(kmd) 540 return km.delegate.GetFirstTLFCryptKey(ctx, kmd) 541 } 542 543 func (km *mdRecordingKeyManager) GetTLFCryptKeyOfAllGenerations( 544 ctx context.Context, kmd libkey.KeyMetadata) ( 545 keys []kbfscrypto.TLFCryptKey, err error) { 546 km.setLastKMD(kmd) 547 return km.delegate.GetTLFCryptKeyOfAllGenerations(ctx, kmd) 548 } 549 550 func (km *mdRecordingKeyManager) Rekey( 551 ctx context.Context, md *RootMetadata, promptPaper bool) ( 552 bool, *kbfscrypto.TLFCryptKey, error) { 553 km.setLastKMD(md) 554 return km.delegate.Rekey(ctx, md, promptPaper) 555 } 556 557 // Test that a sync can happen concurrently with a write. This is a 558 // regression test for KBFS-558. 559 func TestKBFSOpsConcurBlockSyncWrite(t *testing.T) { 560 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 561 // TODO: Use kbfsConcurTestShutdown. 562 defer kbfsConcurTestShutdownNoCheck(ctx, t, config, cancel) 563 564 <-config.BlockOps().TogglePrefetcher(false) 565 566 km := &mdRecordingKeyManager{delegate: config.KeyManager()} 567 568 config.SetKeyManager(km) 569 570 // Turn off block caching. 571 config.SetBlockCache(kbfsdata.NewBlockCacheStandard(0, 1<<30)) 572 573 // Create a file. 574 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 575 576 kbfsOps := config.KBFSOps() 577 fileNode, _, err := kbfsOps.CreateFile( 578 ctx, rootNode, testPPS("a"), false, NoExcl) 579 require.NoError(t, err, "Couldn't create file: %v", err) 580 581 // Write to file to mark it dirty. 582 data := []byte{1} 583 err = kbfsOps.Write(ctx, fileNode, data, 0) 584 require.NoError(t, err, "Couldn't write to file: %v", err) 585 586 lState := makeFBOLockState() 587 588 fbo := kbfsOps.(*KBFSOpsStandard).getOpsNoAdd( 589 ctx, rootNode.GetFolderBranch()) 590 if fbo.blocks.GetState(lState) != dirtyState { 591 t.Fatal("Unexpectedly not in dirty state") 592 } 593 594 onSyncStalledCh, syncUnstallCh, ctxStallSync := 595 StallBlockOp(ctx, config, StallableBlockPut, 1) 596 597 var wg sync.WaitGroup 598 599 // Start the sync and wait for it to stall (on getting the dir 600 // block). 601 wg.Add(1) 602 var syncErr error 603 go func() { 604 defer wg.Done() 605 606 syncErr = kbfsOps.SyncAll(ctxStallSync, fileNode.GetFolderBranch()) 607 }() 608 <-onSyncStalledCh 609 610 err = kbfsOps.Write(ctx, fileNode, data, 0) 611 require.NoError(t, err, "Couldn't write file: %v", err) 612 613 deferredWriteCount := fbo.blocks.getDeferredWriteCountForTest(lState) 614 if deferredWriteCount != 1 { 615 t.Errorf("Unexpected deferred write count %d", 616 deferredWriteCount) 617 } 618 619 // Unstall the sync. 620 close(syncUnstallCh) 621 622 wg.Wait() 623 624 // Do this in the main goroutine since it isn't goroutine safe, 625 // and do this after wg.Wait() since we only know it's set 626 // after the goroutine exits. 627 if syncErr != nil { 628 t.Errorf("Couldn't sync: %v", syncErr) 629 } 630 631 md, err := fbo.getMDForRead(ctx, lState, mdReadNeedIdentify) 632 require.NoError(t, err, "Couldn't get MD: %v", err) 633 634 lastKMD := km.getLastKMD() 635 636 if md.ReadOnlyRootMetadata != lastKMD { 637 t.Error("Last MD seen by key manager != head") 638 } 639 } 640 641 // Test that a sync can happen concurrently with a truncate. This is a 642 // regression test for KBFS-558. 643 func TestKBFSOpsConcurBlockSyncTruncate(t *testing.T) { 644 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 645 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 646 647 <-config.BlockOps().TogglePrefetcher(false) 648 649 km := &mdRecordingKeyManager{delegate: config.KeyManager()} 650 651 config.SetKeyManager(km) 652 653 // Turn off block caching. 654 config.SetBlockCache(kbfsdata.NewBlockCacheStandard(0, 1<<30)) 655 656 // Create a file. 657 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 658 659 kbfsOps := config.KBFSOps() 660 fileNode, _, err := kbfsOps.CreateFile( 661 ctx, rootNode, testPPS("a"), false, NoExcl) 662 require.NoError(t, err, "Couldn't create file: %v", err) 663 664 // Write to file to mark it dirty. 665 data := []byte{1} 666 err = kbfsOps.Write(ctx, fileNode, data, 0) 667 require.NoError(t, err, "Couldn't write to file: %v", err) 668 669 lState := makeFBOLockState() 670 671 fbo := kbfsOps.(*KBFSOpsStandard).getOpsNoAdd( 672 ctx, rootNode.GetFolderBranch()) 673 if fbo.blocks.GetState(lState) != dirtyState { 674 t.Fatal("Unexpectedly not in dirty state") 675 } 676 677 onSyncStalledCh, syncUnstallCh, ctxStallSync := 678 StallBlockOp(ctx, config, StallableBlockPut, 1) 679 680 // Start the sync and wait for it to stall (on getting the dir 681 // block). 682 syncErrCh := make(chan error, 1) 683 go func() { 684 syncErrCh <- kbfsOps.SyncAll(ctxStallSync, fileNode.GetFolderBranch()) 685 }() 686 select { 687 case <-onSyncStalledCh: 688 case <-ctx.Done(): 689 t.Fatalf("Timeout waiting for sync to stall: %v", ctx.Err()) 690 } 691 692 err = kbfsOps.Truncate(ctx, fileNode, 0) 693 require.NoError(t, err, "Couldn't truncate file: %v", err) 694 695 deferredWriteCount := fbo.blocks.getDeferredWriteCountForTest(lState) 696 if deferredWriteCount != 1 { 697 t.Errorf("Unexpected deferred write count %d", 698 deferredWriteCount) 699 } 700 701 // Unstall the sync. 702 close(syncUnstallCh) 703 704 // Do this in the main goroutine since it isn't goroutine safe, 705 // and do this after wg.Wait() since we only know it's set 706 // after the goroutine exits. 707 select { 708 case syncErr := <-syncErrCh: 709 if syncErr != nil { 710 t.Errorf("Couldn't sync: %v", syncErr) 711 } 712 case <-ctx.Done(): 713 t.Fatalf("Timeout waiting for sync: %v", ctx.Err()) 714 } 715 716 md, err := fbo.getMDForRead(ctx, lState, mdReadNeedIdentify) 717 require.NoError(t, err, "Couldn't get MD: %v", err) 718 719 lastKMD := km.getLastKMD() 720 lastRMD, ok := lastKMD.(ReadOnlyRootMetadata) 721 require.True(t, ok) 722 723 if md.ReadOnlyRootMetadata != lastRMD { 724 t.Error("Last MD seen by key manager != head") 725 } 726 } 727 728 // Tests that a file that has been truncate-extended and overwritten 729 // to several times can sync, and then take several deferred 730 // overwrites, plus one write that blocks until the dirty bcache has 731 // room. This is a repro for KBFS-1846. 732 func TestKBFSOpsTruncateAndOverwriteDeferredWithArchivedBlock(t *testing.T) { 733 config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user") 734 defer kbfsTestShutdownNoMocks(ctx, t, config, cancel) 735 736 bsplitter, err := kbfsdata.NewBlockSplitterSimple( 737 kbfsdata.MaxBlockSizeBytesDefault, 8*1024, config.Codec()) 738 if err != nil { 739 t.Fatal(err) 740 } 741 config.SetBlockSplitter(bsplitter) 742 743 // create a file. 744 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 745 746 kbfsOps := config.KBFSOps() 747 fileNode, _, err := kbfsOps.CreateFile( 748 ctx, rootNode, testPPS("a"), false, NoExcl) 749 require.NoError(t, err, "Couldn't create file: %+v", err) 750 751 err = kbfsOps.Truncate(ctx, fileNode, 131072) 752 require.NoError(t, err, "Couldn't truncate file: %+v", err) 753 754 // Write a few blocks 755 data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} 756 err = kbfsOps.Write(ctx, fileNode, data[0:3], 0) 757 require.NoError(t, err, "Couldn't write file: %+v", err) 758 759 err = kbfsOps.Write(ctx, fileNode, data[3:6], 0) 760 require.NoError(t, err, "Couldn't write file: %+v", err) 761 762 err = kbfsOps.Write(ctx, fileNode, data[6:9], 0) 763 require.NoError(t, err, "Couldn't write file: %+v", err) 764 765 err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()) 766 require.NoError(t, err, "Couldn't sync file: %+v", err) 767 768 // Now overwrite those blocks to archive them 769 newData := []byte{11, 12, 13, 14, 15, 16, 17, 18, 19, 20} 770 err = kbfsOps.Write(ctx, fileNode, newData, 0) 771 require.NoError(t, err, "Couldn't write file: %+v", err) 772 773 err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()) 774 require.NoError(t, err, "Couldn't sync file: %+v", err) 775 776 // Wait for the archiving to finish 777 err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil) 778 if err != nil { 779 t.Fatalf("Couldn't sync from server") 780 } 781 782 fileNode2, _, err := kbfsOps.CreateFile( 783 ctx, rootNode, testPPS("b"), false, NoExcl) 784 require.NoError(t, err, "Couldn't create file: %+v", err) 785 786 err = kbfsOps.Truncate(ctx, fileNode2, 131072) 787 require.NoError(t, err, "Couldn't truncate file: %+v", err) 788 789 // Now write the original first block, which has been archived, 790 // and make sure it works. 791 err = kbfsOps.Write(ctx, fileNode2, data[0:3], 0) 792 require.NoError(t, err, "Couldn't write file: %+v", err) 793 794 err = kbfsOps.Write(ctx, fileNode2, data[3:6], 0) 795 require.NoError(t, err, "Couldn't write file: %+v", err) 796 797 err = kbfsOps.Write(ctx, fileNode2, data[6:9], 0) 798 require.NoError(t, err, "Couldn't write file: %+v", err) 799 800 oldBServer := config.BlockServer() 801 defer config.SetBlockServer(oldBServer) 802 onSyncStalledCh, syncUnstallCh, ctxStallSync := 803 StallBlockOp(ctx, config, StallableBlockPut, 1) 804 805 // Start the sync and wait for it to stall (on getting the dir 806 // block). 807 syncErrCh := make(chan error, 1) 808 go func() { 809 syncErrCh <- kbfsOps.SyncAll(ctxStallSync, fileNode2.GetFolderBranch()) 810 }() 811 select { 812 case <-onSyncStalledCh: 813 case <-ctx.Done(): 814 t.Fatalf("Timeout waiting for sync to stall: %v", ctx.Err()) 815 } 816 817 err = kbfsOps.Write(ctx, fileNode2, data[1:4], 0) 818 require.NoError(t, err, "Couldn't write file: %+v", err) 819 820 err = kbfsOps.Write(ctx, fileNode2, data[4:7], 0) 821 require.NoError(t, err, "Couldn't write file: %+v", err) 822 823 // The last write blocks because the dirty buffer is now full. 824 writeErrCh := make(chan error, 1) 825 go func() { 826 writeErrCh <- kbfsOps.Write(ctx, fileNode2, data[7:10], 0) 827 }() 828 829 // Unstall the sync. 830 close(syncUnstallCh) 831 832 // Do this in the main goroutine since it isn't goroutine safe, 833 // and do this after wg.Wait() since we only know it's set 834 // after the goroutine exits. 835 select { 836 case syncErr := <-syncErrCh: 837 if syncErr != nil { 838 t.Errorf("Couldn't sync: %v", syncErr) 839 } 840 case <-ctx.Done(): 841 t.Fatalf("Timeout waiting for sync: %v", ctx.Err()) 842 } 843 844 select { 845 case writeErr := <-writeErrCh: 846 if writeErr != nil { 847 t.Errorf("Couldn't write file: %v", writeErr) 848 } 849 case <-ctx.Done(): 850 t.Fatalf("Timeout waiting for write: %v", ctx.Err()) 851 } 852 853 err = kbfsOps.SyncAll(ctx, fileNode2.GetFolderBranch()) 854 require.NoError(t, err, "Couldn't sync file: %+v", err) 855 } 856 857 // Test that a sync can happen concurrently with a read for a file 858 // large enough to have indirect blocks without messing anything 859 // up. This should pass with -race. This is a regression test for 860 // KBFS-537. 861 func TestKBFSOpsConcurBlockSyncReadIndirect(t *testing.T) { 862 t.Skip("Broken test since Go 1.12.4 due to extra pending requests after test termination. Panic: unable to shutdown block ops.") 863 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 864 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 865 866 // Turn off block caching. 867 config.SetBlockCache(kbfsdata.NewBlockCacheStandard(0, 1<<30)) 868 869 // Use the smallest block size possible. 870 bsplitter, err := kbfsdata.NewBlockSplitterSimple(20, 8*1024, config.Codec()) 871 require.NoError(t, err, "Couldn't create block splitter: %v", err) 872 config.SetBlockSplitter(bsplitter) 873 874 // Create a file. 875 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 876 877 kbfsOps := config.KBFSOps() 878 fileNode, _, err := kbfsOps.CreateFile( 879 ctx, rootNode, testPPS("a"), false, NoExcl) 880 require.NoError(t, err, "Couldn't create file: %v", err) 881 // Write to file to make an indirect block. 882 data := make([]byte, bsplitter.MaxSize()+1) 883 err = kbfsOps.Write(ctx, fileNode, data, 0) 884 require.NoError(t, err, "Couldn't write to file: %v", err) 885 886 // Decouple the read context from the sync context. 887 readCtx, cancel := context.WithCancel(context.Background()) 888 defer cancel() 889 890 // Read in a loop in a separate goroutine until we encounter 891 // an error or the test ends. 892 c := make(chan struct{}) 893 go func() { 894 defer close(c) 895 outer: 896 for { 897 _, err := kbfsOps.Read(readCtx, fileNode, data, 0) 898 select { 899 case <-readCtx.Done(): 900 break outer 901 default: 902 } 903 if err != nil { 904 t.Errorf("Couldn't read file: %v", err) 905 break 906 } 907 } 908 }() 909 910 err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()) 911 require.NoError(t, err, "Couldn't sync file: %v", err) 912 cancel() 913 // Wait for the read loop to finish 914 <-c 915 } 916 917 // Test that a write can survive a folder BlockPointer update 918 func TestKBFSOpsConcurWriteDuringFolderUpdate(t *testing.T) { 919 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 920 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 921 922 // create and write to a file 923 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 924 925 kbfsOps := config.KBFSOps() 926 fileNode, _, err := kbfsOps.CreateFile( 927 ctx, rootNode, testPPS("a"), false, NoExcl) 928 require.NoError(t, err, "Couldn't create file: %v", err) 929 data := []byte{1} 930 err = kbfsOps.Write(ctx, fileNode, data, 0) 931 require.NoError(t, err, "Couldn't write file: %v", err) 932 933 // Now update the folder pointer in some other way 934 _, _, err = kbfsOps.CreateFile(ctx, rootNode, testPPS("b"), false, NoExcl) 935 require.NoError(t, err, "Couldn't create file: %v", err) 936 937 // Now sync the original file and see make sure the write survived 938 if err := kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()); err != nil { 939 t.Fatalf("Couldn't sync: %v", err) 940 } 941 942 de, err := kbfsOps.Stat(ctx, fileNode) 943 require.NoError(t, err, "Couldn't stat file: %v", err) 944 if g, e := de.Size, len(data); g != uint64(e) { 945 t.Errorf("Got wrong size %d; expected %d", g, e) 946 } 947 } 948 949 // Test that a write can happen concurrently with a sync when there 950 // are multiple blocks in the file. 951 func TestKBFSOpsConcurWriteDuringSyncMultiBlocks(t *testing.T) { 952 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 953 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 954 955 onPutStalledCh, putUnstallCh, putCtx := 956 StallMDOp(ctx, config, StallableMDAfterPut, 1) 957 958 // Make the blocks small, with multiple levels of indirection, but 959 // make the unembedded size large, so we don't create thousands of 960 // unembedded block change blocks. 961 blockSize := int64(5) 962 bsplit, err := kbfsdata.NewBlockSplitterSimpleExact(blockSize, 2, 100*1024) 963 require.NoError(t, err) 964 config.SetBlockSplitter(bsplit) 965 966 // create and write to a file 967 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 968 969 kbfsOps := config.KBFSOps() 970 fileNode, _, err := kbfsOps.CreateFile( 971 ctx, rootNode, testPPS("a"), false, NoExcl) 972 require.NoError(t, err, "Couldn't create file: %v", err) 973 err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch()) 974 require.NoError(t, err, "Couldn't sync file: %v", err) 975 976 // 2 blocks worth of data 977 data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} 978 err = kbfsOps.Write(ctx, fileNode, data, 0) 979 require.NoError(t, err, "Couldn't write file: %v", err) 980 981 // sync these initial blocks 982 err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()) 983 require.NoError(t, err, "Couldn't do the first sync: %v", err) 984 985 // there should be 7 blocks at this point: the original root block 986 // + 2 modifications (create + write), the top indirect file block 987 // and a modification (write), and its two children blocks. 988 numCleanBlocks := config.BlockCache().(*kbfsdata.BlockCacheStandard). 989 NumCleanTransientBlocks() 990 if numCleanBlocks != 7 { 991 t.Errorf("Unexpected number of cached clean blocks: %d\n", 992 numCleanBlocks) 993 } 994 995 // write to the first block 996 b1data := []byte{11, 12} 997 err = kbfsOps.Write(ctx, fileNode, b1data, 0) 998 require.NoError(t, err, "Couldn't write 1st block of file: %v", err) 999 1000 // start the sync 1001 errChan := make(chan error) 1002 go func() { 1003 errChan <- kbfsOps.SyncAll(putCtx, fileNode.GetFolderBranch()) 1004 }() 1005 1006 // wait until Sync gets stuck at MDOps.Put() 1007 <-onPutStalledCh 1008 1009 // now make sure we can write the second block of the file and see 1010 // the new bytes we wrote 1011 newData := []byte{20} 1012 err = kbfsOps.Write(ctx, fileNode, newData, 9) 1013 require.NoError(t, err, "Couldn't write data: %v\n", err) 1014 1015 // read the data back 1016 buf := make([]byte, 10) 1017 nr, err := kbfsOps.Read(ctx, fileNode, buf, 0) 1018 require.NoError(t, err, "Couldn't read data: %v\n", err) 1019 expectedData := []byte{11, 12, 3, 4, 5, 6, 7, 8, 9, 20} 1020 if nr != 10 || !bytes.Equal(expectedData, buf) { 1021 t.Errorf("Got wrong data %v; expected %v", buf, expectedData) 1022 } 1023 1024 // now unstall Sync and make sure there was no error 1025 close(putUnstallCh) 1026 err = <-errChan 1027 require.NoError(t, err, "Sync got an error: %v", err) 1028 1029 // finally, make sure we can still read it after the sync too 1030 // (even though the second write hasn't been sync'd yet) 1031 buf2 := make([]byte, 10) 1032 nr, err = kbfsOps.Read(ctx, fileNode, buf2, 0) 1033 require.NoError(t, err, "Couldn't read data: %v\n", err) 1034 if nr != 10 || !bytes.Equal(expectedData, buf2) { 1035 t.Errorf("2nd read: Got wrong data %v; expected %v", buf2, expectedData) 1036 } 1037 1038 // Final sync to clean up 1039 if err := kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()); err != nil { 1040 t.Errorf("Couldn't sync the final write") 1041 } 1042 } 1043 1044 type stallingBServer struct { 1045 *BlockServerMemory 1046 1047 readyChan chan<- struct{} 1048 goChan <-chan struct{} 1049 finishChan chan<- struct{} 1050 } 1051 1052 func newStallingBServer(log logger.Logger) *stallingBServer { 1053 return &stallingBServer{BlockServerMemory: NewBlockServerMemory(log)} 1054 } 1055 1056 func (fc *stallingBServer) maybeWaitOnChannel(ctx context.Context) error { 1057 if fc.readyChan == nil { 1058 return nil 1059 } 1060 1061 // say we're ready, and wait for a signal to proceed or a 1062 // cancellation. 1063 select { 1064 case fc.readyChan <- struct{}{}: 1065 case <-ctx.Done(): 1066 return ctx.Err() 1067 } 1068 select { 1069 case <-fc.goChan: 1070 return nil 1071 case <-ctx.Done(): 1072 return ctx.Err() 1073 } 1074 } 1075 1076 func (fc *stallingBServer) maybeFinishOnChannel(ctx context.Context) error { 1077 if fc.finishChan != nil { 1078 select { 1079 case fc.finishChan <- struct{}{}: 1080 return nil 1081 case <-ctx.Done(): 1082 return ctx.Err() 1083 } 1084 } 1085 return nil 1086 } 1087 1088 func (fc *stallingBServer) Get( 1089 ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, 1090 context kbfsblock.Context, cacheType DiskBlockCacheType) ( 1091 buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, 1092 err error) { 1093 err = fc.maybeWaitOnChannel(ctx) 1094 if err != nil { 1095 return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err 1096 } 1097 defer func() { 1098 finishErr := fc.maybeFinishOnChannel(ctx) 1099 if err == nil { 1100 err = finishErr 1101 } 1102 }() 1103 1104 return fc.BlockServerMemory.Get(ctx, tlfID, id, context, cacheType) 1105 } 1106 1107 func (fc *stallingBServer) Put( 1108 ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, 1109 context kbfsblock.Context, 1110 buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, 1111 cacheType DiskBlockCacheType) (err error) { 1112 err = fc.maybeWaitOnChannel(ctx) 1113 if err != nil { 1114 return err 1115 } 1116 defer func() { 1117 finishErr := fc.maybeFinishOnChannel(ctx) 1118 if err == nil { 1119 err = finishErr 1120 } 1121 }() 1122 1123 return fc.BlockServerMemory.Put( 1124 ctx, tlfID, id, context, buf, serverHalf, cacheType) 1125 } 1126 1127 // Test that a write consisting of multiple blocks can be canceled 1128 // before all blocks have been written. 1129 func TestKBFSOpsConcurWriteParallelBlocksCanceled(t *testing.T) { 1130 if maxParallelBlockPuts <= 1 { 1131 t.Skip("Skipping because we are not putting blocks in parallel.") 1132 } 1133 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 1134 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 1135 1136 // give it a remote block server with a fake client 1137 log := config.MakeLogger("") 1138 config.BlockServer().Shutdown(ctx) 1139 b := newStallingBServer(log) 1140 config.SetBlockServer(b) 1141 1142 // Make the blocks small, with multiple levels of indirection, but 1143 // make the unembedded size large, so we don't create thousands of 1144 // unembedded block change blocks. 1145 blockSize := int64(5) 1146 bsplit, err := kbfsdata.NewBlockSplitterSimpleExact(blockSize, 2, 100*1024) 1147 require.NoError(t, err) 1148 config.SetBlockSplitter(bsplit) 1149 1150 // create and write to a file 1151 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 1152 1153 kbfsOps := config.KBFSOps() 1154 fileNode, _, err := kbfsOps.CreateFile( 1155 ctx, rootNode, testPPS("a"), false, NoExcl) 1156 require.NoError(t, err, "Couldn't create file: %v", err) 1157 // Two initial blocks, then maxParallelBlockPuts blocks that 1158 // will be processed but discarded, then three extra blocks 1159 // that will be ignored. 1160 initialBlocks := 2 1161 extraBlocks := 3 1162 totalFileBlocks := initialBlocks + maxParallelBlockPuts + extraBlocks 1163 var data []byte 1164 for i := int64(0); i < blockSize*int64(totalFileBlocks); i++ { 1165 data = append(data, byte(i)) 1166 } 1167 err = kbfsOps.Write(ctx, fileNode, data, 0) 1168 require.NoError(t, err, "Couldn't write file: %v", err) 1169 1170 // now set a control channel, let a couple blocks go in, and then 1171 // cancel the context 1172 readyChan := make(chan struct{}) 1173 goChan := make(chan struct{}) 1174 finishChan := make(chan struct{}) 1175 b.readyChan = readyChan 1176 b.goChan = goChan 1177 b.finishChan = finishChan 1178 1179 prevNBlocks := b.numBlocks() 1180 nowNBlocks := 0 1181 ctx2, cancel2 := context.WithCancel(ctx) 1182 go func() { 1183 // let the first initialBlocks blocks through. 1184 for i := 0; i < initialBlocks; i++ { 1185 select { 1186 case <-readyChan: 1187 case <-ctx.Done(): 1188 t.Error(ctx.Err()) 1189 } 1190 } 1191 1192 for i := 0; i < initialBlocks; i++ { 1193 select { 1194 case goChan <- struct{}{}: 1195 case <-ctx.Done(): 1196 t.Error(ctx.Err()) 1197 } 1198 } 1199 1200 for i := 0; i < initialBlocks; i++ { 1201 select { 1202 case <-finishChan: 1203 case <-ctx.Done(): 1204 t.Error(ctx.Err()) 1205 } 1206 } 1207 1208 // Get the number of blocks now, before canceling the context, 1209 // because after canceling the context we would be racing with 1210 // cleanup code that could delete the put blocks. 1211 nowNBlocks = b.numBlocks() 1212 1213 // Let each parallel block worker block on readyChan. 1214 for i := 0; i < maxParallelBlockPuts; i++ { 1215 select { 1216 case <-readyChan: 1217 case <-ctx.Done(): 1218 t.Error(ctx.Err()) 1219 } 1220 } 1221 1222 // Make sure all the workers are busy. 1223 select { 1224 case <-readyChan: 1225 t.Error("Worker unexpectedly ready") 1226 case <-ctx.Done(): 1227 t.Error(ctx.Err()) 1228 default: 1229 } 1230 1231 // Let all the workers go through. 1232 cancel2() 1233 }() 1234 1235 err = kbfsOps.SyncAll(ctx2, fileNode.GetFolderBranch()) 1236 if err != context.Canceled { 1237 t.Errorf("Sync did not get canceled error: %v", err) 1238 } 1239 if nowNBlocks != prevNBlocks+2 { 1240 t.Errorf("Unexpected number of blocks; prev = %d, now = %d", 1241 prevNBlocks, nowNBlocks) 1242 } 1243 1244 // Make sure there are no more workers, i.e. the extra blocks 1245 // aren't sent to the server. 1246 select { 1247 case <-readyChan: 1248 t.Error("Worker unexpectedly ready") 1249 default: 1250 } 1251 1252 // As a regression for KBFS-635, test that a second sync succeeds, 1253 // and that future operations also succeed. 1254 // 1255 // Create new objects to avoid racing with goroutines from the 1256 // first sync. 1257 config.BlockServer().Shutdown(ctx) 1258 b = newStallingBServer(log) 1259 config.SetBlockServer(b) 1260 if err := kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()); err != nil { 1261 t.Fatalf("Second sync failed: %v", err) 1262 } 1263 1264 if _, _, err := kbfsOps.CreateFile( 1265 ctx, rootNode, testPPS("b"), false, NoExcl); err != nil { 1266 t.Fatalf("Couldn't create file after sync: %v", err) 1267 } 1268 1269 // Avoid checking state when using a fake block server. 1270 config.MDServer().Shutdown() 1271 } 1272 1273 // Test that, when writing multiple blocks in parallel, one error will 1274 // cancel the remaining puts. 1275 func TestKBFSOpsConcurWriteParallelBlocksError(t *testing.T) { 1276 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 1277 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 1278 1279 // give it a mock'd block server 1280 ctr := NewSafeTestReporter(t) 1281 mockCtrl := gomock.NewController(ctr) 1282 defer mockCtrl.Finish() 1283 defer ctr.CheckForFailures() 1284 b := NewMockBlockServer(mockCtrl) 1285 config.BlockServer().Shutdown(ctx) 1286 config.SetBlockServer(b) 1287 1288 // from the folder creation, then 2 for file creation 1289 c := b.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), 1290 gomock.Any(), gomock.Any(), gomock.Any()).Times(3).Return(nil) 1291 b.EXPECT().ArchiveBlockReferences(gomock.Any(), gomock.Any(), 1292 gomock.Any()).AnyTimes().Return(nil) 1293 1294 // Make the blocks small, with multiple levels of indirection, but 1295 // make the unembedded size large, so we don't create thousands of 1296 // unembedded block change blocks. 1297 blockSize := int64(5) 1298 bsplit, err := kbfsdata.NewBlockSplitterSimpleExact(blockSize, 2, 100*1024) 1299 require.NoError(t, err) 1300 config.SetBlockSplitter(bsplit) 1301 1302 // create and write to a file 1303 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 1304 1305 kbfsOps := config.KBFSOps() 1306 fileNode, _, err := kbfsOps.CreateFile( 1307 ctx, rootNode, testPPS("a"), false, NoExcl) 1308 require.NoError(t, err, "Couldn't create file: %v", err) 1309 // 15 blocks 1310 var data []byte 1311 fileBlocks := int64(15) 1312 for i := int64(0); i < blockSize*fileBlocks; i++ { 1313 data = append(data, byte(i)) 1314 } 1315 err = kbfsOps.Write(ctx, fileNode, data, 0) 1316 require.NoError(t, err, "Couldn't write file: %v", err) 1317 1318 // let two blocks through and fail the third: 1319 c = b.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), 1320 gomock.Any(), gomock.Any(), gomock.Any()).Times(2).After(c).Return(nil) 1321 putErr := errors.New("This is a forced error on put") 1322 errPtrChan := make(chan kbfsdata.BlockPointer) 1323 c = b.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), 1324 gomock.Any(), gomock.Any(), gomock.Any()). 1325 Do(func(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, 1326 context kbfsblock.Context, buf []byte, 1327 serverHalf kbfscrypto.BlockCryptKeyServerHalf, 1328 _ DiskBlockCacheType) { 1329 errPtrChan <- kbfsdata.BlockPointer{ 1330 ID: id, 1331 Context: context, 1332 } 1333 }).After(c).Return(putErr) 1334 // let the rest through 1335 proceedChan := make(chan struct{}) 1336 b.EXPECT().Put(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), 1337 gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes(). 1338 Do(func(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, 1339 context kbfsblock.Context, buf []byte, 1340 serverHalf kbfscrypto.BlockCryptKeyServerHalf, 1341 _ DiskBlockCacheType) { 1342 <-proceedChan 1343 }).After(c).Return(nil) 1344 b.EXPECT().RemoveBlockReferences(gomock.Any(), gomock.Any(), gomock.Any()). 1345 AnyTimes().Return(nil, nil) 1346 b.EXPECT().Shutdown(gomock.Any()).AnyTimes() 1347 1348 var errPtr kbfsdata.BlockPointer 1349 go func() { 1350 errPtr = <-errPtrChan 1351 close(proceedChan) 1352 }() 1353 1354 err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()) 1355 if err != putErr { 1356 t.Errorf("Sync did not get the expected error: %v", err) 1357 } 1358 1359 // wait for proceedChan to close, so we know the errPtr has been set 1360 <-proceedChan 1361 1362 // Make sure the error'd file didn't make it to the actual cache 1363 // -- it's still in the permanent cache because the file might 1364 // still be read or sync'd later. 1365 err = config.BlockCache().DeletePermanent(errPtr.ID) 1366 require.NoError(t, err) 1367 if _, err := config.BlockCache().Get(errPtr); err == nil { 1368 t.Errorf("Failed block put for %v left block in cache", errPtr) 1369 } 1370 1371 // State checking won't happen on the mock block server since we 1372 // leave ourselves in a dirty state. 1373 } 1374 1375 func testKBFSOpsMultiBlockWriteDuringRetriedSync(t *testing.T, nFiles int) { 1376 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 1377 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 1378 1379 // Use the smallest possible block size. 1380 bsplitter, err := kbfsdata.NewBlockSplitterSimple(20, 8*1024, config.Codec()) 1381 require.NoError(t, err, "Couldn't create block splitter: %v", err) 1382 config.SetBlockSplitter(bsplitter) 1383 1384 oldBServer := config.BlockServer() 1385 defer config.SetBlockServer(oldBServer) 1386 onSyncStalledCh, syncUnstallCh, ctxStallSync := 1387 StallBlockOp(ctx, config, StallableBlockPut, 1) 1388 1389 // create and write to a file 1390 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 1391 1392 kbfsOps := config.KBFSOps() 1393 fileNodes := make([]Node, nFiles) 1394 1395 fileNodes[0], _, err = kbfsOps.CreateFile( 1396 ctx, rootNode, testPPS("file0"), false, NoExcl) 1397 require.NoError(t, err, "Couldn't create file: %v", err) 1398 1399 firstData := make([]byte, 30) 1400 // Write 2 blocks worth of data 1401 for i := 0; i < 30; i++ { 1402 firstData[i] = byte(i) 1403 } 1404 1405 err = kbfsOps.Write(ctx, fileNodes[0], firstData, 0) 1406 require.NoError(t, err, "Couldn't write file: %v", err) 1407 1408 err = kbfsOps.SyncAll(ctx, fileNodes[0].GetFolderBranch()) 1409 require.NoError(t, err, "First sync failed: %v", err) 1410 1411 // Remove the first file, and wait for the archiving to complete. 1412 err = kbfsOps.RemoveEntry(ctx, rootNode, testPPS("file0")) 1413 require.NoError(t, err, "Couldn't remove file: %v", err) 1414 1415 err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil) 1416 require.NoError(t, err, "Couldn't sync from server: %v", err) 1417 1418 fileNode2, _, err := kbfsOps.CreateFile( 1419 ctx, rootNode, testPPS("file0"), false, NoExcl) 1420 require.NoError(t, err, "Couldn't create file: %v", err) 1421 1422 // Now write the identical first block and sync it. 1423 err = kbfsOps.Write(ctx, fileNode2, firstData[:20], 0) 1424 require.NoError(t, err, "Couldn't write file: %v", err) 1425 1426 // Write all the rest of the files to sync concurrently, if any. 1427 for i := 1; i < nFiles; i++ { 1428 name := fmt.Sprintf("file%d", i) 1429 fileNode, _, err := kbfsOps.CreateFile( 1430 ctx, rootNode, testPPS(name), false, NoExcl) 1431 require.NoError(t, err, "Couldn't create file: %v", err) 1432 data := make([]byte, 30) 1433 // Write 2 blocks worth of data 1434 for j := 0; j < 30; j++ { 1435 data[j] = byte(j + 30*i) 1436 } 1437 err = kbfsOps.Write(ctx, fileNode, data, 0) 1438 require.NoError(t, err, "Couldn't write file: %v", err) 1439 fileNodes[i] = fileNode 1440 } 1441 1442 // Sync the initial two data blocks 1443 errChan := make(chan error) 1444 // start the sync 1445 go func() { 1446 errChan <- kbfsOps.SyncAll(ctxStallSync, fileNode2.GetFolderBranch()) 1447 }() 1448 select { 1449 case <-onSyncStalledCh: 1450 case <-ctx.Done(): 1451 t.Fatalf("Timeout waiting to stall") 1452 } 1453 1454 // Now write the second block. 1455 err = kbfsOps.Write(ctx, fileNode2, firstData[20:], 20) 1456 require.NoError(t, err, "Couldn't write file: %v", err) 1457 1458 // Unstall the sync. 1459 close(syncUnstallCh) 1460 err = <-errChan 1461 require.NoError(t, err, "Sync got an error: %v", err) 1462 1463 // Final sync 1464 err = kbfsOps.SyncAll(ctx, fileNode2.GetFolderBranch()) 1465 require.NoError(t, err, "Final sync failed: %v", err) 1466 1467 gotData := make([]byte, 30) 1468 nr, err := kbfsOps.Read(ctx, fileNode2, gotData, 0) 1469 require.NoError(t, err, "Couldn't read data: %v", err) 1470 if nr != int64(len(gotData)) { 1471 t.Errorf("Only read %d bytes", nr) 1472 } 1473 if !bytes.Equal(firstData, gotData) { 1474 t.Errorf("Read wrong data. Expected %v, got %v", firstData, gotData) 1475 } 1476 1477 // Make sure there are no dirty blocks left at the end of the test. 1478 dbcs := config.DirtyBlockCache().(*kbfsdata.DirtyBlockCacheStandard) 1479 numDirtyBlocks := dbcs.Size() 1480 if numDirtyBlocks != 0 { 1481 t.Errorf("%d dirty blocks left after final sync", numDirtyBlocks) 1482 } 1483 } 1484 1485 // When writes happen on a multi-block file concurrently with a sync, 1486 // and the sync has to retry due to an archived block, test that 1487 // everything works correctly. Regression test for KBFS-700. 1488 func TestKBFSOpsMultiBlockWriteDuringRetriedSync(t *testing.T) { 1489 testKBFSOpsMultiBlockWriteDuringRetriedSync(t, 1) 1490 } 1491 1492 // When writes happen on a multi-block file concurrently with a 1493 // 2-file sync, and the sync has to retry due to an archived 1494 // block, test that everything works correctly. 1495 func TestKBFSOpsMultiBlockWriteDuringRetriedSyncAllTwoFiles(t *testing.T) { 1496 testKBFSOpsMultiBlockWriteDuringRetriedSync(t, 2) 1497 } 1498 1499 // Test that a sync of a multi-block file that hits both a retriable 1500 // error and a unretriable error leave the system in a clean state. 1501 // Regression test for KBFS-1508. 1502 func testKBFSOpsMultiBlockWriteWithRetryAndError(t *testing.T, nFiles int) { 1503 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 1504 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 1505 1506 // Use the smallest possible block size. 1507 bsplitter, err := kbfsdata.NewBlockSplitterSimple(20, 8*1024, config.Codec()) 1508 require.NoError(t, err, "Couldn't create block splitter: %v", err) 1509 config.SetBlockSplitter(bsplitter) 1510 1511 nFileBlocks := testCalcNumFileBlocks(40, bsplitter) * nFiles 1512 t.Logf("nFileBlocks=%d", nFileBlocks) 1513 1514 oldBServer := config.BlockServer() 1515 defer config.SetBlockServer(oldBServer) 1516 onSyncStalledCh, syncUnstallCh, ctxStallSync := 1517 StallBlockOp(ctx, config, StallableBlockPut, nFileBlocks) 1518 ctxStallSync, cancel2 := context.WithCancel(ctxStallSync) 1519 1520 t.Log("Create and write to a file: file0") 1521 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 1522 1523 kbfsOps := config.KBFSOps() 1524 fileNodes := make([]Node, nFiles) 1525 fileNodes[0], _, err = kbfsOps.CreateFile( 1526 ctx, rootNode, testPPS("file0"), false, NoExcl) 1527 require.NoError(t, err, "Couldn't create file: %v", err) 1528 var data []byte 1529 1530 t.Log("Write 2 blocks worth of data") 1531 for i := 0; i < 30; i++ { 1532 data = append(data, byte(i)) 1533 } 1534 err = kbfsOps.Write(ctx, fileNodes[0], data, 0) 1535 require.NoError(t, err, "Couldn't write file: %v", err) 1536 1537 t.Log("Sync those blocks of data") 1538 err = kbfsOps.SyncAll(ctx, fileNodes[0].GetFolderBranch()) 1539 require.NoError(t, err, "First sync failed: %v", err) 1540 1541 t.Log("Retrieve the metadata for the blocks so far") 1542 ops := getOps(config, rootNode.GetFolderBranch().Tlf) 1543 lState := makeFBOLockState() 1544 head, _ := ops.getHead(ctx, lState, mdNoCommit) 1545 filePath := ops.nodeCache.PathFromNode(fileNodes[0]) 1546 pointerMap, err := ops.blocks.GetIndirectFileBlockInfos(ctx, lState, head, filePath) 1547 require.NoError(t, err, "Couldn't get the pointer map for file0: %+v", err) 1548 1549 t.Log("Remove that file") 1550 err = kbfsOps.RemoveEntry(ctx, rootNode, testPPS("file0")) 1551 require.NoError(t, err, "Couldn't remove file: %v", err) 1552 1553 t.Log("Sync from server, waiting for the archiving to complete") 1554 err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil) 1555 require.NoError(t, err, "Couldn't sync from server: %v", err) 1556 1557 t.Log("Ensure that the block references have been removed rather than just archived") 1558 bOps := config.BlockOps() 1559 h, err := tlfhandle.ParseHandle( 1560 ctx, config.KBPKI(), config.MDOps(), nil, "test_user", tlf.Private) 1561 require.NoError(t, err) 1562 ptrs := make([]kbfsdata.BlockPointer, len(pointerMap)) 1563 for _, ptr := range pointerMap { 1564 ptrs = append(ptrs, ptr.BlockPointer) 1565 } 1566 _, err = bOps.Delete(ctx, h.TlfID(), ptrs) 1567 require.NoError(t, err) 1568 1569 t.Log("Create file0 again") 1570 fileNode2, _, err := kbfsOps.CreateFile( 1571 ctx, rootNode, testPPS("file0"), false, NoExcl) 1572 require.NoError(t, err, "Couldn't create file: %v", err) 1573 1574 t.Log("Now write the identical first block, plus a new block and sync it.") 1575 err = kbfsOps.Write(ctx, fileNode2, data[:20], 0) 1576 require.NoError(t, err, "Couldn't write file: %v", err) 1577 1578 err = kbfsOps.Write(ctx, fileNode2, data[10:30], 20) 1579 require.NoError(t, err, "Couldn't write file: %v", err) 1580 1581 t.Log("Write all the rest of the files to sync concurrently, if any.") 1582 for i := 1; i < nFiles; i++ { 1583 name := fmt.Sprintf("Create file%d", i) 1584 fileNode, _, err := kbfsOps.CreateFile( 1585 ctx, rootNode, testPPS(name), false, NoExcl) 1586 require.NoError(t, err, "Couldn't create file: %v", err) 1587 data := make([]byte, 30) 1588 // Write 2 blocks worth of data 1589 for j := 0; j < 30; j++ { 1590 data[j] = byte(j + 30*i) 1591 } 1592 err = kbfsOps.Write(ctx, fileNode, data, 0) 1593 require.NoError(t, err, "Couldn't write file: %v", err) 1594 fileNodes[i] = fileNode 1595 } 1596 1597 t.Log("Sync the initial three data blocks") 1598 errChan := make(chan error, 1) 1599 1600 t.Log("Start the sync in a goroutine") 1601 go func() { 1602 errChan <- kbfsOps.SyncAll(ctxStallSync, fileNode2.GetFolderBranch()) 1603 }() 1604 1605 t.Log("Wait for the first block to finish (before the retry)") 1606 select { 1607 case <-onSyncStalledCh: 1608 case <-ctx.Done(): 1609 t.Fatal(ctx.Err()) 1610 } 1611 1612 t.Log("Dirty the last block and extend it, so the one that was sent as " + 1613 "part of the first sync is no longer part of the file.") 1614 err = kbfsOps.Write(ctx, fileNode2, data[10:20], 40) 1615 require.NoError(t, err, "Couldn't write file: %v", err) 1616 select { 1617 case syncUnstallCh <- struct{}{}: 1618 case <-ctx.Done(): 1619 t.Fatal(ctx.Err()) 1620 } 1621 1622 t.Log("Wait for the rest of the first set of blocks to finish " + 1623 "(before the retry)") 1624 for i := 0; i < nFileBlocks-1; i++ { 1625 t.Logf("Waiting for sync %d", i) 1626 select { 1627 case <-onSyncStalledCh: 1628 case <-ctx.Done(): 1629 t.Fatal(ctx.Err()) 1630 } 1631 select { 1632 case syncUnstallCh <- struct{}{}: 1633 case <-ctx.Done(): 1634 t.Fatal(ctx.Err()) 1635 } 1636 } 1637 1638 t.Log("Once the first block of the retry comes in, cancel everything.") 1639 select { 1640 case <-onSyncStalledCh: 1641 case <-ctx.Done(): 1642 t.Fatal(ctx.Err()) 1643 } 1644 cancel2() 1645 1646 t.Log("Unstall the sync.") 1647 close(syncUnstallCh) 1648 err = <-errChan 1649 if err != context.Canceled { 1650 t.Errorf("Sync got an unexpected error: %v", err) 1651 } 1652 1653 t.Log("finish the sync.") 1654 err = kbfsOps.SyncAll(ctx, fileNode2.GetFolderBranch()) 1655 require.NoError(t, err, "Couldn't sync file after error: %v", err) 1656 1657 gotData := make([]byte, 50) 1658 nr, err := kbfsOps.Read(ctx, fileNode2, gotData, 0) 1659 require.NoError(t, err, "Couldn't read data: %v", err) 1660 if nr != int64(len(gotData)) { 1661 t.Errorf("Only read %d bytes", nr) 1662 } 1663 expectedData := make([]byte, 0, 45) 1664 expectedData = append(expectedData, data[0:20]...) 1665 expectedData = append(expectedData, data[10:30]...) 1666 expectedData = append(expectedData, data[10:20]...) 1667 if !bytes.Equal(expectedData, gotData) { 1668 t.Errorf("Read wrong data. Expected %v, got %v", expectedData, gotData) 1669 } 1670 1671 t.Log("Make sure there are no dirty blocks left at the end of the test.") 1672 dbcs := config.DirtyBlockCache().(*kbfsdata.DirtyBlockCacheStandard) 1673 numDirtyBlocks := dbcs.Size() 1674 if numDirtyBlocks != 0 { 1675 t.Errorf("%d dirty blocks left after final sync", numDirtyBlocks) 1676 } 1677 // Shutdown the MDServer to disable state checking at the end of the test, 1678 // since we hacked stuff a bit by deleting blocks manually rather than 1679 // allowing them to be garbage collected. 1680 config.MDServer().Shutdown() 1681 } 1682 1683 // Test that a sync of a multi-block file that hits both a retriable 1684 // error and a unretriable error leave the system in a clean state. 1685 // Regression test for KBFS-1508. 1686 func TestKBFSOpsMultiBlockWriteWithRetryAndError(t *testing.T) { 1687 testKBFSOpsMultiBlockWriteWithRetryAndError(t, 1) 1688 } 1689 1690 // Test that a multi-file sync that includes a multi-block file that 1691 // hits both a retriable error and a unretriable error leave the 1692 // system in a clean state. 1693 func TestKBFSOpsMultiBlockWriteWithRetryAndErrorTwoFiles(t *testing.T) { 1694 testKBFSOpsMultiBlockWriteWithRetryAndError(t, 2) 1695 } 1696 1697 // This tests the situation where cancellation happens when the MD write has 1698 // already started, and cancellation is delayed. Since no extra delay greater 1699 // than the grace period in MD writes is introduced, Create should succeed. 1700 func TestKBFSOpsCanceledCreateNoError(t *testing.T) { 1701 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 1702 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 1703 1704 onPutStalledCh, putUnstallCh, putCtx := 1705 StallMDOp(context.Background(), config, StallableMDPut, 1) 1706 1707 putCtx, cancel2 := context.WithCancel(putCtx) 1708 1709 putCtx, err := libcontext.NewContextWithCancellationDelayer(putCtx) 1710 if err != nil { 1711 t.Fatal(err) 1712 } 1713 1714 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 1715 1716 kbfsOps := config.KBFSOps() 1717 errChan := make(chan error, 1) 1718 go func() { 1719 _, _, err := kbfsOps.CreateFile( 1720 putCtx, rootNode, testPPS("a"), false, WithExcl) 1721 errChan <- err 1722 }() 1723 1724 // Wait until Create gets stuck at MDOps.Put(). At this point, the delayed 1725 // cancellation should have been enabled. 1726 select { 1727 case <-onPutStalledCh: 1728 case <-ctx.Done(): 1729 t.Fatal(ctx.Err()) 1730 } 1731 cancel2() 1732 close(putUnstallCh) 1733 1734 // We expect no canceled error 1735 select { 1736 case err = <-errChan: 1737 case <-ctx.Done(): 1738 t.Fatal(ctx.Err()) 1739 } 1740 require.NoError(t, err, "Create returned error: %v", err) 1741 ctx2 := libcontext.BackgroundContextWithCancellationDelayer() 1742 defer func() { 1743 err := libcontext.CleanupCancellationDelayer(ctx2) 1744 require.NoError(t, err) 1745 }() 1746 if _, _, err = kbfsOps.Lookup( 1747 ctx2, rootNode, testPPS("a")); err != nil { 1748 t.Fatalf("Lookup returned error: %v", err) 1749 } 1750 } 1751 1752 // This tests the situation where cancellation happens when the MD write has 1753 // already started, and cancellation is delayed. A delay larger than the grace 1754 // period is introduced to MD write, so Create should fail. This is to ensure 1755 // Ctrl-C is able to interrupt the process eventually after the grace period. 1756 func TestKBFSOpsCanceledCreateDelayTimeoutErrors(t *testing.T) { 1757 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 1758 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 1759 1760 // This essentially fast-forwards the grace period timer, making cancellation 1761 // happen much faster. This way we can avoid time.Sleep. 1762 config.SetDelayedCancellationGracePeriod(0) 1763 1764 onPutStalledCh, putUnstallCh, putCtx := 1765 StallMDOp(context.Background(), config, StallableMDPut, 1) 1766 1767 putCtx, cancel2 := context.WithCancel(putCtx) 1768 1769 putCtx, err := libcontext.NewContextWithCancellationDelayer(putCtx) 1770 if err != nil { 1771 t.Fatal(err) 1772 } 1773 1774 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 1775 1776 kbfsOps := config.KBFSOps() 1777 errChan := make(chan error, 1) 1778 go func() { 1779 _, _, err := kbfsOps.CreateFile( 1780 putCtx, rootNode, testPPS("a"), false, WithExcl) 1781 errChan <- err 1782 }() 1783 1784 // Wait until Create gets stuck at MDOps.Put(). At this point, the delayed 1785 // cancellation should have been enabled. 1786 select { 1787 case <-onPutStalledCh: 1788 case <-ctx.Done(): 1789 t.Fatal(ctx.Err()) 1790 } 1791 cancel2() 1792 1793 select { 1794 case <-ctx.Done(): 1795 t.Fatal(ctx.Err()) 1796 case <-putCtx.Done(): 1797 // The cancellation delayer makes cancellation become async. This makes 1798 // sure ctx is actually canceled before unstalling. 1799 case <-time.After(time.Second): 1800 // We have a grace period of 0s. This is too long; something must have gone 1801 // wrong! 1802 t.Fatalf("it took too long for cancellation to happen") 1803 } 1804 1805 close(putUnstallCh) 1806 1807 // We expect a canceled error 1808 select { 1809 case err = <-errChan: 1810 case <-ctx.Done(): 1811 t.Fatal(ctx.Err()) 1812 } 1813 if err != context.Canceled { 1814 t.Fatalf("Create didn't fail after grace period after cancellation."+ 1815 " Got %v; expecting context.Canceled", err) 1816 } 1817 1818 ctx2 := libcontext.BackgroundContextWithCancellationDelayer() 1819 defer func() { 1820 err := libcontext.CleanupCancellationDelayer(ctx2) 1821 require.NoError(t, err) 1822 }() 1823 // do another Op, which generates a new revision, to make sure 1824 // CheckConfigAndShutdown doesn't get stuck 1825 if _, _, err = kbfsOps.CreateFile(ctx2, 1826 rootNode, testPPS("b"), false, NoExcl); err != nil { 1827 t.Fatalf("throwaway op failed: %v", err) 1828 } 1829 } 1830 1831 // Test that a Sync that is canceled during a successful MD put works. 1832 func TestKBFSOpsConcurCanceledSyncSucceeds(t *testing.T) { 1833 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 1834 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 1835 1836 onPutStalledCh, putUnstallCh, putCtx := 1837 StallMDOp(ctx, config, StallableMDAfterPut, 1) 1838 1839 // Use the smallest possible block size. 1840 bsplitter, err := kbfsdata.NewBlockSplitterSimple(20, 8*1024, config.Codec()) 1841 require.NoError(t, err, "Couldn't create block splitter: %v", err) 1842 config.SetBlockSplitter(bsplitter) 1843 1844 // create and write to a file 1845 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 1846 1847 kbfsOps := config.KBFSOps() 1848 fileNode, _, err := kbfsOps.CreateFile( 1849 ctx, rootNode, testPPS("a"), false, NoExcl) 1850 require.NoError(t, err, "Couldn't create file: %v", err) 1851 err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch()) 1852 require.NoError(t, err, "Couldn't sync file: %v", err) 1853 1854 data := make([]byte, 30) 1855 for i := 0; i < 30; i++ { 1856 data[i] = 1 1857 } 1858 err = kbfsOps.Write(ctx, fileNode, data, 0) 1859 require.NoError(t, err, "Couldn't write file: %v", err) 1860 1861 ops := getOps(config, rootNode.GetFolderBranch().Tlf) 1862 unpauseDeleting := make(chan struct{}) 1863 ops.fbm.blocksToDeletePauseChan <- unpauseDeleting 1864 1865 // start the sync 1866 errChan := make(chan error) 1867 cancelCtx, cancel := context.WithCancel(putCtx) 1868 go func() { 1869 errChan <- kbfsOps.SyncAll(cancelCtx, fileNode.GetFolderBranch()) 1870 }() 1871 1872 // wait until Sync gets stuck at MDOps.Put() 1873 <-onPutStalledCh 1874 cancel() 1875 close(putUnstallCh) 1876 1877 // We expect a canceled error 1878 err = <-errChan 1879 if err != context.Canceled { 1880 t.Fatalf("No expected canceled error: %v", err) 1881 } 1882 1883 // Flush the file. This will result in conflict resolution, and 1884 // an extra copy of the file, but that's ok for now. 1885 if err := kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()); err != nil { 1886 t.Fatalf("Couldn't sync: %v", err) 1887 } 1888 if len(ops.fbm.blocksToDeleteChan) == 0 { 1889 t.Fatalf("No blocks to delete after error") 1890 } 1891 1892 unpauseDeleting <- struct{}{} 1893 1894 err = ops.fbm.waitForDeletingBlocks(ctx) 1895 require.NoError(t, err) 1896 if len(ops.fbm.blocksToDeleteChan) > 0 { 1897 t.Fatalf("Blocks left to delete after sync") 1898 } 1899 1900 // The first put actually succeeded, so SyncFromServer and make 1901 // sure it worked. 1902 err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil) 1903 require.NoError(t, err, "Couldn't sync from server: %v", err) 1904 1905 gotData := make([]byte, 30) 1906 nr, err := kbfsOps.Read(ctx, fileNode, gotData, 0) 1907 require.NoError(t, err, "Couldn't read data: %v", err) 1908 if nr != int64(len(gotData)) { 1909 t.Errorf("Only read %d bytes", nr) 1910 } 1911 if !bytes.Equal(data, gotData) { 1912 t.Errorf("Read wrong data. Expected %v, got %v", data, gotData) 1913 } 1914 } 1915 1916 // Test that when a Sync that is canceled during a successful MD put, 1917 // and then another Sync hits a conflict but then is also canceled, 1918 // and finally a Sync succeeds (as a conflict), the TLF is left in a 1919 // reasonable state where CR can succeed. Regression for KBFS-1569. 1920 func TestKBFSOpsConcurCanceledSyncFailsAfterCanceledSyncSucceeds(t *testing.T) { 1921 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 1922 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 1923 1924 onPutStalledCh, putUnstallCh, putCtx := 1925 StallMDOp(ctx, config, StallableMDAfterPut, 1) 1926 1927 // Use the smallest possible block size. 1928 bsplitter, err := kbfsdata.NewBlockSplitterSimple(20, 8*1024, config.Codec()) 1929 require.NoError(t, err, "Couldn't create block splitter: %v", err) 1930 config.SetBlockSplitter(bsplitter) 1931 1932 // create and write to a file 1933 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 1934 1935 kbfsOps := config.KBFSOps() 1936 fileNode, _, err := kbfsOps.CreateFile( 1937 ctx, rootNode, testPPS("a"), false, NoExcl) 1938 require.NoError(t, err, "Couldn't create file: %v", err) 1939 err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch()) 1940 require.NoError(t, err, "Couldn't sync file: %v", err) 1941 1942 data := make([]byte, 30) 1943 for i := 0; i < 30; i++ { 1944 data[i] = 1 1945 } 1946 err = kbfsOps.Write(ctx, fileNode, data, 0) 1947 require.NoError(t, err, "Couldn't write file: %v", err) 1948 1949 // start the sync 1950 errChan := make(chan error) 1951 cancelCtx, cancel := context.WithCancel(putCtx) 1952 go func() { 1953 errChan <- kbfsOps.SyncAll(cancelCtx, fileNode.GetFolderBranch()) 1954 }() 1955 1956 // wait until Sync gets stuck at MDOps.Put() 1957 <-onPutStalledCh 1958 cancel() 1959 close(putUnstallCh) 1960 1961 // We expect a canceled error 1962 err = <-errChan 1963 if err != context.Canceled { 1964 t.Fatalf("No expected canceled error: %v", err) 1965 } 1966 1967 // Cancel this one after it succeeds. 1968 onUnmergedPutStalledCh, unmergedPutUnstallCh, putUnmergedCtx := 1969 StallMDOp(ctx, config, StallableMDAfterPutUnmerged, 1) 1970 1971 // Flush the file again, which will result in an unmerged put, 1972 // which we will also cancel. 1973 cancelCtx, cancel = context.WithCancel(putUnmergedCtx) 1974 go func() { 1975 errChan <- kbfsOps.SyncAll(cancelCtx, fileNode.GetFolderBranch()) 1976 }() 1977 1978 // wait until Sync gets stuck at MDOps.PutUnmerged() 1979 <-onUnmergedPutStalledCh 1980 cancel() 1981 close(unmergedPutUnstallCh) 1982 1983 // We expect a canceled error, or possibly a nil error since we 1984 // ignore the PutUnmerged error internally. 1985 err = <-errChan 1986 if err != context.Canceled && err != nil { 1987 t.Fatalf("No expected canceled error: %v", err) 1988 } 1989 1990 // Now finally flush the file again, which will result in a 1991 // conflict file. 1992 if err := kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()); err != nil { 1993 t.Fatalf("Couldn't sync: %v", err) 1994 } 1995 1996 // Wait for all the deletes to go through. 1997 ops := getOps(config, rootNode.GetFolderBranch().Tlf) 1998 err = ops.fbm.waitForDeletingBlocks(ctx) 1999 require.NoError(t, err) 2000 if len(ops.fbm.blocksToDeleteChan) > 0 { 2001 t.Fatalf("Blocks left to delete after sync") 2002 } 2003 2004 // Wait for CR to finish 2005 err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil) 2006 require.NoError(t, err, "Couldn't sync from server: %v", err) 2007 } 2008 2009 // Test that truncating a block to a zero-contents block, for which a 2010 // duplicate has previously been archived, works correctly after a 2011 // cancel. Regression test for KBFS-727. 2012 func TestKBFSOpsTruncateWithDupBlockCanceled(t *testing.T) { 2013 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 2014 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 2015 2016 // create and write to a file 2017 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 2018 2019 kbfsOps := config.KBFSOps() 2020 _, _, err := kbfsOps.CreateFile(ctx, rootNode, testPPS("a"), false, NoExcl) 2021 require.NoError(t, err, "Couldn't create file: %v", err) 2022 err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch()) 2023 require.NoError(t, err, "Couldn't sync file: %v", err) 2024 2025 // Remove that file, and wait for the archiving to complete 2026 err = kbfsOps.RemoveEntry(ctx, rootNode, testPPS("a")) 2027 require.NoError(t, err, "Couldn't remove file: %v", err) 2028 err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch()) 2029 require.NoError(t, err, "Couldn't sync file: %v", err) 2030 2031 err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil) 2032 require.NoError(t, err, "Couldn't sync from server: %v", err) 2033 2034 fileNode2, _, err := kbfsOps.CreateFile( 2035 ctx, rootNode, testPPS("a"), false, NoExcl) 2036 require.NoError(t, err, "Couldn't create file: %v", err) 2037 2038 var data []byte 2039 // Write some data 2040 for i := 0; i < 30; i++ { 2041 data = append(data, byte(i)) 2042 } 2043 err = kbfsOps.Write(ctx, fileNode2, data, 0) 2044 require.NoError(t, err, "Couldn't write file: %v", err) 2045 2046 err = kbfsOps.SyncAll(ctx, fileNode2.GetFolderBranch()) 2047 require.NoError(t, err, "First sync failed: %v", err) 2048 2049 // Now truncate and sync, canceling during the block puts 2050 err = kbfsOps.Truncate(ctx, fileNode2, 0) 2051 require.NoError(t, err, "Couldn't truncate file: %v", err) 2052 2053 // Sync the initial two data blocks 2054 errChan := make(chan error) 2055 // start the sync 2056 cancelCtx, cancel := context.WithCancel(ctx) 2057 2058 oldBServer := config.BlockServer() 2059 defer config.SetBlockServer(oldBServer) 2060 onSyncStalledCh, syncUnstallCh, ctxStallSync := 2061 StallBlockOp(cancelCtx, config, StallableBlockPut, 1) 2062 2063 go func() { 2064 errChan <- kbfsOps.SyncAll(ctxStallSync, fileNode2.GetFolderBranch()) 2065 }() 2066 <-onSyncStalledCh 2067 2068 cancel() 2069 // Unstall the sync. 2070 close(syncUnstallCh) 2071 err = <-errChan 2072 if err != context.Canceled { 2073 t.Errorf("Sync got wrong error: %v", err) 2074 } 2075 2076 // Final sync 2077 err = kbfsOps.SyncAll(ctx, fileNode2.GetFolderBranch()) 2078 require.NoError(t, err, "Final sync failed: %v", err) 2079 } 2080 2081 type blockOpsOverQuota struct { 2082 BlockOps 2083 } 2084 2085 func (booq *blockOpsOverQuota) Put(ctx context.Context, tlfID tlf.ID, 2086 blockPtr kbfsdata.BlockPointer, readyBlockData kbfsdata.ReadyBlockData) error { 2087 return kbfsblock.ServerErrorOverQuota{ 2088 Throttled: true, 2089 } 2090 } 2091 2092 // Test that a quota error causes deferred writes to error. 2093 // Regression test for KBFS-751. 2094 func TestKBFSOpsErrorOnBlockedWriteDuringSync(t *testing.T) { 2095 t.Skip("Broken pending KBFS-1261") 2096 2097 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 2098 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 2099 2100 // create and write to a file 2101 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 2102 2103 kbfsOps := config.KBFSOps() 2104 fileNode, _, err := kbfsOps.CreateFile( 2105 ctx, rootNode, testPPS("a"), false, NoExcl) 2106 require.NoError(t, err, "Couldn't create file: %v", err) 2107 2108 // Write over the dirty amount of data. TODO: make this 2109 // configurable for a speedier test. 2110 const minSyncBufCap = int64(kbfsdata.MaxBlockSizeBytesDefault) 2111 data := make([]byte, minSyncBufCap+1) 2112 err = kbfsOps.Write(ctx, fileNode, data, 0) 2113 require.NoError(t, err, "Couldn't write file: %v", err) 2114 2115 realBlockOps := config.BlockOps() 2116 2117 config.SetBlockOps(&blockOpsOverQuota{BlockOps: config.BlockOps()}) 2118 2119 onSyncStalledCh, syncUnstallCh, ctxStallSync := 2120 StallBlockOp(ctx, config, StallableBlockPut, 1) 2121 2122 // Block the Sync 2123 // Sync the initial two data blocks 2124 syncErrCh := make(chan error) 2125 go func() { 2126 syncErrCh <- kbfsOps.SyncAll(ctxStallSync, fileNode.GetFolderBranch()) 2127 }() 2128 <-onSyncStalledCh 2129 2130 // Write more data which should get accepted but deferred. 2131 moreData := make([]byte, minSyncBufCap*2+1) 2132 err = kbfsOps.Write(ctx, fileNode, moreData, int64(len(data))) 2133 require.NoError(t, err, "Couldn't write file: %v", err) 2134 2135 // Now write more data which should get blocked 2136 newData := make([]byte, 1) 2137 writeErrCh := make(chan error) 2138 go func() { 2139 writeErrCh <- kbfsOps.Write(ctx, fileNode, newData, 2140 int64(len(data)+len(moreData))) 2141 }() 2142 2143 // Wait until the second write is blocked 2144 ops := getOps(config, rootNode.GetFolderBranch().Tlf) 2145 func() { 2146 lState := makeFBOLockState() 2147 filePath := ops.nodeCache.PathFromNode(fileNode) 2148 ops.blocks.blockLock.Lock(lState) 2149 defer ops.blocks.blockLock.Unlock(lState) 2150 df := ops.blocks.getOrCreateDirtyFileLocked(lState, filePath) 2151 // TODO: locking 2152 for df.NumErrListeners() != 3 { 2153 ops.blocks.blockLock.Unlock(lState) 2154 runtime.Gosched() 2155 ops.blocks.blockLock.Lock(lState) 2156 } 2157 }() 2158 2159 // Unblock the sync 2160 close(syncUnstallCh) 2161 2162 // Both errors should be an OverQuota error 2163 syncErr := <-syncErrCh 2164 writeErr := <-writeErrCh 2165 if _, ok := syncErr.(kbfsblock.ServerErrorOverQuota); !ok { 2166 t.Fatalf("Unexpected sync err: %v", syncErr) 2167 } 2168 if writeErr != syncErr { 2169 t.Fatalf("Unexpected write err: %v", writeErr) 2170 } 2171 2172 // Finish the sync to clear out the byte counts 2173 config.SetBlockOps(realBlockOps) 2174 if err := kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()); err != nil { 2175 t.Fatalf("Couldn't finish sync: %v", err) 2176 } 2177 } 2178 2179 func TestKBFSOpsCancelGetFavorites(t *testing.T) { 2180 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 2181 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 2182 2183 serverConn, conn := rpc.MakeConnectionForTest(t) 2184 daemon := newKeybaseDaemonRPCWithClient( 2185 nil, 2186 conn.GetClient(), 2187 config.MakeLogger("")) 2188 config.SetKeybaseService(daemon) 2189 2190 f := func(ctx context.Context) error { 2191 _, err := config.KBFSOps().GetFavorites(ctx) 2192 return err 2193 } 2194 testRPCWithCanceledContext(t, serverConn, f) 2195 } 2196 2197 type stallingNodeCache struct { 2198 NodeCache 2199 2200 doStallUpdate <-chan struct{} 2201 unstallUpdate <-chan struct{} 2202 beforePathsCalled chan<- struct{} 2203 afterPathCalled chan<- struct{} 2204 } 2205 2206 func (snc *stallingNodeCache) UpdatePointer( 2207 oldRef kbfsdata.BlockRef, newPtr kbfsdata.BlockPointer) NodeID { 2208 select { 2209 case <-snc.doStallUpdate: 2210 <-snc.unstallUpdate 2211 default: 2212 } 2213 return snc.NodeCache.UpdatePointer(oldRef, newPtr) 2214 } 2215 2216 func (snc *stallingNodeCache) PathFromNode(node Node) kbfsdata.Path { 2217 snc.beforePathsCalled <- struct{}{} 2218 p := snc.NodeCache.PathFromNode(node) 2219 snc.afterPathCalled <- struct{}{} 2220 return p 2221 } 2222 2223 // Test that a lookup that straddles a sync from the same file doesn't 2224 // have any races. Regression test for KBFS-1717. 2225 func TestKBFSOpsLookupSyncRace(t *testing.T) { 2226 var userName1, userName2 kbname.NormalizedUsername = "u1", "u2" 2227 config1, _, ctx, cancel := kbfsOpsConcurInit(t, userName1, userName2) 2228 defer kbfsConcurTestShutdown(ctx, t, config1, cancel) 2229 2230 config2 := ConfigAsUser(config1, userName2) 2231 defer CheckConfigAndShutdown(ctx, t, config2) 2232 2233 name := userName1.String() + "," + userName2.String() 2234 2235 rootNode2 := GetRootNodeOrBust(ctx, t, config2, name, tlf.Private) 2236 kbfsOps2 := config2.KBFSOps() 2237 ops2 := getOps(config2, rootNode2.GetFolderBranch().Tlf) 2238 doStallUpdate := make(chan struct{}, 1) 2239 unstallUpdate := make(chan struct{}) 2240 beforePathsCalled := make(chan struct{}) 2241 afterPathCalled := make(chan struct{}) 2242 snc := &stallingNodeCache{ 2243 NodeCache: ops2.nodeCache, 2244 doStallUpdate: doStallUpdate, 2245 unstallUpdate: unstallUpdate, 2246 beforePathsCalled: beforePathsCalled, 2247 afterPathCalled: afterPathCalled, 2248 } 2249 ops2.nodeCache = snc 2250 ops2.blocks.nodeCache = snc 2251 defer func() { 2252 ops2.nodeCache = snc.NodeCache 2253 ops2.blocks.nodeCache = snc.NodeCache 2254 }() 2255 2256 // u1 creates a file. 2257 rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, tlf.Private) 2258 kbfsOps1 := config1.KBFSOps() 2259 fileNodeA1, _, err := kbfsOps1.CreateFile( 2260 ctx, rootNode1, testPPS("a"), false, NoExcl) 2261 require.NoError(t, err, "Couldn't create file: %v", err) 2262 err = kbfsOps1.SyncAll(ctx, rootNode1.GetFolderBranch()) 2263 require.NoError(t, err, "Couldn't sync file: %v", err) 2264 2265 // u2 syncs and then disables updates. 2266 if err := kbfsOps2.SyncFromServer( 2267 ctx, rootNode2.GetFolderBranch(), nil); err != nil { 2268 t.Fatal("Couldn't sync user 2 from server") 2269 } 2270 _, err = DisableUpdatesForTesting(config2, rootNode2.GetFolderBranch()) 2271 require.NoError(t, err, "Couldn't disable updates: %v", err) 2272 2273 // u2 writes to the file. 2274 data := []byte{1, 2, 3} 2275 err = kbfsOps1.Write(ctx, fileNodeA1, data, 0) 2276 require.NoError(t, err, "Couldn't write file: %v", err) 2277 if err := kbfsOps1.SyncAll(ctx, fileNodeA1.GetFolderBranch()); err != nil { 2278 t.Fatalf("Couldn't finish sync: %v", err) 2279 } 2280 2281 // u2 tries to lookup the file, which will block until we drain 2282 // the afterPathCalled channel. 2283 var wg sync.WaitGroup 2284 wg.Add(1) 2285 var fileNodeA2 Node 2286 go func() { 2287 defer wg.Done() 2288 var err error 2289 fileNodeA2, _, err = kbfsOps2.Lookup(ctx, rootNode2, testPPS("a")) 2290 require.NoError(t, err, "Couldn't lookup a: %v", err) 2291 }() 2292 // Wait for the lookup to block. 2293 select { 2294 case <-beforePathsCalled: 2295 case <-ctx.Done(): 2296 t.Fatal("Timeout while waiting for lookup to block") 2297 } 2298 2299 // u2 starts to sync but the sync is stalled while holding the 2300 // block lock. 2301 doStallUpdate <- struct{}{} 2302 wg.Add(1) 2303 go func() { 2304 defer wg.Done() 2305 if err := kbfsOps2.SyncFromServer( 2306 ctx, rootNode2.GetFolderBranch(), nil); err != nil { 2307 t.Errorf("Couldn't sync user 2 from server: %v", err) 2308 } 2309 }() 2310 2311 // Unblock the lookup. 2312 select { 2313 case <-afterPathCalled: 2314 case <-ctx.Done(): 2315 t.Fatal("Timeout while waiting for afterPathCalled") 2316 } 2317 2318 // Wait for the sync to block and let the sync succeed (which will 2319 // let the lookup succeed). NOTE: To repro KBFS-1717, this call 2320 // needs to go before we unblock the paths lookup. However, with 2321 // the fix for KBFS-1717, the test will hang if we do that since 2322 // the Lookup holds blockLock while it gets the path. So as is, 2323 // this isn't a direct repro but it's still a test worth having 2324 // around. 2325 select { 2326 case unstallUpdate <- struct{}{}: 2327 case <-ctx.Done(): 2328 t.Fatal("Timeout while waiting for sync to block") 2329 } 2330 wg.Wait() 2331 2332 // Now u2 reads using the node it just looked up, and should see 2333 // the right data. 2334 gotData := make([]byte, len(data)) 2335 // Read needs a path lookup too, so revert the node cache. 2336 ops2.nodeCache = snc.NodeCache 2337 ops2.blocks.nodeCache = snc.NodeCache 2338 nr, err := kbfsOps2.Read(ctx, fileNodeA2, gotData, 0) 2339 require.NoError(t, err, "Couldn't read data: %v", err) 2340 if nr != int64(len(gotData)) { 2341 t.Errorf("Only read %d bytes", nr) 2342 } 2343 if !bytes.Equal(data, gotData) { 2344 t.Errorf("Read wrong data. Expected %v, got %v", data, gotData) 2345 } 2346 } 2347 2348 // Test that a Sync of a multi-block file that fails twice, and then 2349 // retried later, is successful. Regression test for KBFS-2157. 2350 func TestKBFSOpsConcurMultiblockOverwriteWithCanceledSync(t *testing.T) { 2351 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 2352 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 2353 2354 onPutStalledCh, putUnstallCh, putCtx := 2355 StallMDOp(ctx, config, StallableMDPut, 1) 2356 2357 // Use the smallest possible block size. 2358 bsplitter, err := kbfsdata.NewBlockSplitterSimple(20, 8*1024, config.Codec()) 2359 require.NoError(t, err, "Couldn't create block splitter: %v", err) 2360 config.SetBlockSplitter(bsplitter) 2361 2362 // create and write to a file 2363 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 2364 2365 kbfsOps := config.KBFSOps() 2366 fileNode, _, err := kbfsOps.CreateFile( 2367 ctx, rootNode, testPPS("a"), false, NoExcl) 2368 require.NoError(t, err, "Couldn't create file: %v", err) 2369 2370 data := make([]byte, 30) 2371 for i := 0; i < 30; i++ { 2372 data[i] = 1 2373 } 2374 err = kbfsOps.Write(ctx, fileNode, data, 0) 2375 require.NoError(t, err, "Couldn't write file: %v", err) 2376 2377 err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch()) 2378 require.NoError(t, err, "Couldn't sync file: %v", err) 2379 2380 // Over write the data to cause the leaf blocks to be unreferenced. 2381 data2 := make([]byte, 30) 2382 for i := 0; i < 30; i++ { 2383 data2[i] = byte(i + 30) 2384 } 2385 err = kbfsOps.Write(ctx, fileNode, data2, 0) 2386 require.NoError(t, err, "Couldn't write file: %v", err) 2387 2388 // start the sync 2389 errChan := make(chan error) 2390 cancelCtx, cancel := context.WithCancel(putCtx) 2391 go func() { 2392 errChan <- kbfsOps.SyncAll(cancelCtx, fileNode.GetFolderBranch()) 2393 }() 2394 2395 // wait until Sync gets stuck at MDOps.Put() 2396 <-onPutStalledCh 2397 cancel() 2398 close(putUnstallCh) 2399 2400 // We expect a canceled error 2401 err = <-errChan 2402 if err != context.Canceled { 2403 t.Fatalf("No expected canceled error: %v", err) 2404 } 2405 2406 data3 := make([]byte, 30) 2407 for i := 0; i < 30; i++ { 2408 data3[i] = byte(i + 60) 2409 } 2410 err = kbfsOps.Write(ctx, fileNode, data3, 0) 2411 require.NoError(t, err, "Couldn't write file: %v", err) 2412 2413 onPutStalledCh, putUnstallCh, putCtx = 2414 StallMDOp(ctx, config, StallableMDPut, 1) 2415 2416 // Cancel it again. 2417 cancelCtx, cancel = context.WithCancel(putCtx) 2418 go func() { 2419 errChan <- kbfsOps.SyncAll(cancelCtx, fileNode.GetFolderBranch()) 2420 }() 2421 2422 // wait until Sync gets stuck at MDOps.Put() 2423 <-onPutStalledCh 2424 cancel() 2425 close(putUnstallCh) 2426 2427 // We expect a canceled error 2428 err = <-errChan 2429 if err != context.Canceled { 2430 t.Fatalf("No expected canceled error: %v", err) 2431 } 2432 2433 data4 := make([]byte, 30) 2434 for i := 0; i < 30; i++ { 2435 data4[i] = byte(i + 90) 2436 } 2437 err = kbfsOps.Write(ctx, fileNode, data4, 0) 2438 require.NoError(t, err, "Couldn't write file: %v", err) 2439 2440 // Flush the file again. 2441 if err := kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch()); err != nil { 2442 t.Fatalf("Couldn't sync: %v", err) 2443 } 2444 2445 gotData := make([]byte, 30) 2446 nr, err := kbfsOps.Read(ctx, fileNode, gotData, 0) 2447 require.NoError(t, err, "Couldn't read data: %v", err) 2448 if nr != int64(len(gotData)) { 2449 t.Errorf("Only read %d bytes", nr) 2450 } 2451 if !bytes.Equal(data4, gotData) { 2452 t.Errorf("Read wrong data. Expected %v, got %v", data4, gotData) 2453 } 2454 } 2455 2456 // Test that during a sync of a directory, a non-syncing file can be 2457 // updated without losing its file size after the sync completes. 2458 // Regression test for KBFS-4165. 2459 func TestKBFSOpsConcurWriteOfNonsyncedFileDuringSync(t *testing.T) { 2460 config, _, ctx, cancel := kbfsOpsConcurInit(t, "test_user") 2461 defer kbfsConcurTestShutdown(ctx, t, config, cancel) 2462 kbfsOps := config.KBFSOps() 2463 2464 t.Log("Create and sync a 0-byte file") 2465 rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private) 2466 fileA := "a" 2467 fileANode, _, err := kbfsOps.CreateFile( 2468 ctx, rootNode, testPPS(fileA), false, NoExcl) 2469 require.NoError(t, err) 2470 err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch()) 2471 require.NoError(t, err) 2472 2473 t.Log("Create a second file, but stall the SyncAll") 2474 onPutStalledCh, putUnstallCh, putCtx := 2475 StallMDOp(ctx, config, StallableMDAfterPut, 1) 2476 2477 fileB := "b" 2478 fileBNode, _, err := kbfsOps.CreateFile( 2479 ctx, rootNode, testPPS(fileB), false, NoExcl) 2480 require.NoError(t, err) 2481 dataB := []byte{1, 2, 3} 2482 err = kbfsOps.Write(ctx, fileBNode, dataB, 0) 2483 require.NoError(t, err) 2484 2485 // start the sync 2486 errChan := make(chan error) 2487 go func() { 2488 errChan <- kbfsOps.SyncAll(putCtx, rootNode.GetFolderBranch()) 2489 }() 2490 2491 // wait until Sync gets stuck at MDOps.Put() 2492 select { 2493 case <-onPutStalledCh: 2494 case <-ctx.Done(): 2495 require.NoError(t, ctx.Err()) 2496 } 2497 2498 t.Log("Write some data into the first file") 2499 dataA := []byte{3, 2, 1} 2500 err = kbfsOps.Write(ctx, fileANode, dataA, 0) 2501 require.NoError(t, err) 2502 ei, err := kbfsOps.Stat(ctx, fileANode) 2503 require.NoError(t, err) 2504 require.Equal(t, uint64(len(dataA)), ei.Size) 2505 2506 t.Log("Finish the sync, and make sure the first file's data " + 2507 "is still available") 2508 close(putUnstallCh) 2509 err = <-errChan 2510 require.NoError(t, err) 2511 2512 ei, err = kbfsOps.Stat(ctx, fileANode) 2513 require.NoError(t, err) 2514 require.Equal(t, uint64(len(dataA)), ei.Size) 2515 2516 err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch()) 2517 require.NoError(t, err) 2518 }