github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/kbfs/libkbfs/tlf_journal.go (about) 1 // Copyright 2016 Keybase Inc. All rights reserved. 2 // Use of this source code is governed by a BSD 3 // license that can be found in the LICENSE file. 4 5 package libkbfs 6 7 import ( 8 "fmt" 9 "os" 10 "path/filepath" 11 "regexp" 12 "strings" 13 "sync" 14 "time" 15 16 "github.com/keybase/backoff" 17 "github.com/keybase/client/go/kbfs/data" 18 "github.com/keybase/client/go/kbfs/idutil" 19 "github.com/keybase/client/go/kbfs/ioutil" 20 "github.com/keybase/client/go/kbfs/kbfsblock" 21 "github.com/keybase/client/go/kbfs/kbfscodec" 22 "github.com/keybase/client/go/kbfs/kbfscrypto" 23 "github.com/keybase/client/go/kbfs/kbfsmd" 24 "github.com/keybase/client/go/kbfs/kbfssync" 25 "github.com/keybase/client/go/kbfs/tlf" 26 "github.com/keybase/client/go/kbfs/tlfhandle" 27 "github.com/keybase/client/go/libkb" 28 "github.com/keybase/client/go/protocol/keybase1" 29 "github.com/pkg/errors" 30 "github.com/vividcortex/ewma" 31 "golang.org/x/net/context" 32 "golang.org/x/sync/errgroup" 33 ) 34 35 // tlfJournalConfig is the subset of the Config interface needed by 36 // tlfJournal (for ease of testing). 37 type tlfJournalConfig interface { 38 BlockSplitter() data.BlockSplitter 39 Clock() Clock 40 Codec() kbfscodec.Codec 41 Crypto() Crypto 42 BlockCache() data.BlockCache 43 BlockOps() BlockOps 44 MDCache() MDCache 45 MetadataVersion() kbfsmd.MetadataVer 46 Reporter() Reporter 47 encryptionKeyGetter() encryptionKeyGetter 48 mdDecryptionKeyGetter() mdDecryptionKeyGetter 49 MDServer() MDServer 50 usernameGetter() idutil.NormalizedUsernameGetter 51 resolver() idutil.Resolver 52 logMaker 53 diskLimitTimeout() time.Duration 54 teamMembershipChecker() kbfsmd.TeamMembershipChecker 55 BGFlushDirOpBatchSize() int 56 syncedTlfGetterSetter 57 SubscriptionManagerPublisher() SubscriptionManagerPublisher 58 } 59 60 // tlfJournalConfigWrapper is an adapter for Config objects to the 61 // tlfJournalConfig interface. 62 type tlfJournalConfigAdapter struct { 63 Config 64 } 65 66 func (ca tlfJournalConfigAdapter) encryptionKeyGetter() encryptionKeyGetter { 67 return ca.Config.KeyManager() 68 } 69 70 func (ca tlfJournalConfigAdapter) mdDecryptionKeyGetter() mdDecryptionKeyGetter { 71 return ca.Config.KeyManager() 72 } 73 74 func (ca tlfJournalConfigAdapter) usernameGetter() idutil.NormalizedUsernameGetter { 75 return ca.Config.KBPKI() 76 } 77 78 func (ca tlfJournalConfigAdapter) resolver() idutil.Resolver { 79 return ca.Config.KBPKI() 80 } 81 82 func (ca tlfJournalConfigAdapter) teamMembershipChecker() kbfsmd.TeamMembershipChecker { 83 return ca.Config.KBPKI() 84 } 85 86 func (ca tlfJournalConfigAdapter) diskLimitTimeout() time.Duration { 87 // Set this to slightly larger than the max delay, so that we 88 // don't start failing writes when we hit the max delay. 89 return defaultDiskLimitMaxDelay + time.Second 90 } 91 92 const ( 93 // Maximum number of blocks that can be flushed in a single batch 94 // by the journal. TODO: make this configurable, so that users 95 // can choose how much bandwidth is used by the journal. 96 maxJournalBlockFlushBatchSize = 25 97 // This will be the final entry for unflushed paths if there are 98 // too many revisions to process at once. 99 incompleteUnflushedPathsMarker = "..." 100 // ForcedBranchSquashRevThreshold is the minimum number of MD 101 // revisions in the journal that will trigger an automatic branch 102 // conversion (and subsequent resolution). 103 ForcedBranchSquashRevThreshold = 20 104 // ForcedBranchSquashBytesThresholdDefault is the minimum number of 105 // unsquashed MD bytes in the journal that will trigger an 106 // automatic branch conversion (and subsequent resolution). 107 ForcedBranchSquashBytesThresholdDefault = uint64(25 << 20) // 25 MB 108 // How often to check the server for conflicts while flushing. 109 tlfJournalServerMDCheckInterval = 1 * time.Minute 110 111 tlfJournalBakFmt = "%s-%d.bak" 112 ) 113 114 var tlfJournalBakRegexp = regexp.MustCompile( 115 `[/\\]([[:alnum:]]+)-([[:digit:]]+)\.bak$`) 116 117 // TLFJournalStatus represents the status of a TLF's journal for 118 // display in diagnostics. It is suitable for encoding directly as 119 // JSON. 120 type TLFJournalStatus struct { 121 Dir string 122 RevisionStart kbfsmd.Revision 123 RevisionEnd kbfsmd.Revision 124 BranchID string 125 BlockOpCount uint64 126 // The byte counters below are signed because 127 // os.FileInfo.Size() is signed. The file counter is signed 128 // for consistency. 129 StoredBytes int64 130 StoredFiles int64 131 UnflushedBytes int64 132 UnflushedPaths []string 133 EndEstimate *time.Time 134 QuotaUsedBytes int64 135 QuotaLimitBytes int64 136 LastFlushErr string `json:",omitempty"` 137 } 138 139 // TLFJournalBackgroundWorkStatus indicates whether a journal should 140 // be doing background work or not. 141 type TLFJournalBackgroundWorkStatus int 142 143 const ( 144 // TLFJournalBackgroundWorkPaused indicates that the journal 145 // should not currently be doing background work. 146 TLFJournalBackgroundWorkPaused TLFJournalBackgroundWorkStatus = iota 147 // TLFJournalBackgroundWorkEnabled indicates that the journal 148 // should be doing background work. 149 TLFJournalBackgroundWorkEnabled 150 // TLFJournalSingleOpBackgroundWorkEnabled indicates that the 151 // journal should make all of its work visible to other readers as 152 // a single operation. That means blocks may be uploaded as they 153 // come in, but MDs must be squashed together and only one MD 154 // update total should be uploaded. The end of the operation will 155 // be indicated by an explicit call to 156 // `tlfJournal.finishSingleOp`. 157 // 158 // Note that this is an explicit per-TLF setting, rather than 159 // taken from `Config.Mode()`, in case we find it useful in the 160 // future to be able to turn this on for individual TLFs running 161 // in normal KBFS mode. 162 TLFJournalSingleOpBackgroundWorkEnabled 163 ) 164 165 type tlfJournalPauseType int 166 167 const ( 168 journalPauseConflict tlfJournalPauseType = 1 << iota 169 journalPauseCommand 170 ) 171 172 func (bws TLFJournalBackgroundWorkStatus) String() string { 173 switch bws { 174 case TLFJournalBackgroundWorkEnabled: 175 return "Background work enabled" 176 case TLFJournalBackgroundWorkPaused: 177 return "Background work paused" 178 case TLFJournalSingleOpBackgroundWorkEnabled: 179 return "Background work in single-op mode" 180 default: 181 return fmt.Sprintf("TLFJournalBackgroundWorkStatus(%d)", bws) 182 } 183 } 184 185 // bwState indicates the state of the background work goroutine. 186 type bwState int 187 188 const ( 189 bwBusy bwState = iota 190 bwIdle 191 bwPaused 192 ) 193 194 func (bws bwState) String() string { 195 switch bws { 196 case bwBusy: 197 return "bwBusy" 198 case bwIdle: 199 return "bwIdle" 200 case bwPaused: 201 return "bwPaused" 202 default: 203 return fmt.Sprintf("bwState(%d)", bws) 204 } 205 } 206 207 // tlfJournalBWDelegate is used by tests to know what the background 208 // goroutine is doing, and also to enforce a timeout (via the 209 // context). 210 type tlfJournalBWDelegate interface { 211 GetBackgroundContext() context.Context 212 OnNewState(ctx context.Context, bws bwState) 213 OnShutdown(ctx context.Context) 214 } 215 216 type singleOpMode int 217 218 const ( 219 singleOpDisabled singleOpMode = iota 220 singleOpRunning 221 singleOpFinished 222 ) 223 224 type flushContext struct { 225 lockContextForPut *keybase1.LockContext 226 priorityForPut keybase1.MDPriority 227 } 228 229 func defaultFlushContext() flushContext { 230 return flushContext{priorityForPut: keybase1.MDPriorityNormal} 231 } 232 233 // A tlfJournal contains all the journals for a (TLF, user, device) 234 // tuple and controls the synchronization between the objects that are 235 // adding to those journals (via journalBlockServer or journalMDOps) 236 // and a background goroutine that flushes journal entries to the 237 // servers. 238 // 239 // The maximum number of characters added to the root dir by a TLF 240 // journal is 51, which just the max of the block journal and MD 241 // journal numbers. 242 type tlfJournal struct { 243 uid keybase1.UID 244 key kbfscrypto.VerifyingKey 245 tlfID tlf.ID 246 chargedTo keybase1.UserOrTeamID 247 dir string 248 config tlfJournalConfig 249 delegateBlockServer BlockServer 250 log traceLogger 251 vlog *libkb.VDebugLog 252 deferLog traceLogger 253 onBranchChange branchChangeListener 254 onMDFlush mdFlushListener 255 forcedSquashByBytes uint64 256 257 // overrideTlfID is used for journals that are accessed by the 258 // upper layers through a TLF ID that's not the same one present 259 // in the on-disk MDs. MDs returned from this journal will have 260 // their TLF ID overridden by this ID. An example is a local 261 // conflict branch that's been moved to a new storage directory, 262 // and is presented under a new fake TLF ID. This should only be 263 // used for read-only journals; writes will fail. 264 overrideTlfID tlf.ID 265 266 // Invariant: this tlfJournal acquires exactly 267 // blockJournal.getStoredBytes() and 268 // blockJournal.getStoredFiles() until shutdown. 269 diskLimiter DiskLimiter 270 271 // All the channels below are used as simple on/off 272 // signals. They're buffered for one object, and all sends are 273 // asynchronous, so multiple sends get collapsed into one 274 // signal. 275 hasWorkCh chan struct{} 276 needPauseCh chan struct{} 277 needResumeCh chan struct{} 278 needShutdownCh chan struct{} 279 needBranchCheckCh chan struct{} 280 281 // Track the ways in which the journal is paused. We don't allow 282 // work to resume unless a resume has come in corresponding to 283 // each type of paused that's happened. 284 pauseLock sync.Mutex 285 pauseType tlfJournalPauseType 286 287 // This channel is closed when background work shuts down. 288 backgroundShutdownCh chan struct{} 289 290 // Serializes all flushes, and protects `lastServerMDCheck` and 291 // `singleOpMode`. 292 flushLock sync.Mutex 293 lastServerMDCheck time.Time 294 singleOpMode singleOpMode 295 finishSingleOpCh chan flushContext 296 singleOpFlushContext flushContext 297 298 // Tracks background work. 299 wg kbfssync.RepeatedWaitGroup 300 301 // Needs to be taken for reading when putting block data, and for 302 // writing when clearing block data, to ensure that we don't put a 303 // block (in parallel with other blocks), then clear out the whole 304 // block journal before appending the block's entry to the block 305 // journal. Should be taken before `journalLock`. 306 blockPutFlushLock sync.RWMutex 307 308 // Protects all operations on blockJournal and mdJournal, and all 309 // the fields until the next blank line. 310 // 311 // TODO: Consider using https://github.com/pkg/singlefile 312 // instead. 313 journalLock sync.RWMutex 314 // both of these are nil after shutdown() is called. 315 blockJournal *blockJournal 316 mdJournal *mdJournal 317 disabled bool 318 lastFlushErr error 319 unflushedPaths *unflushedPathCache 320 // An estimate of how many bytes have been written since the last 321 // squash. 322 unsquashedBytes uint64 323 flushingBlocks map[kbfsblock.ID]bool 324 // An exponential moving average of the perceived block upload 325 // bandwidth of this journal. Since we don't add values at 326 // regular time intervals, this ends up weighting the average by 327 // number of samples. 328 bytesPerSecEstimate ewma.MovingAverage 329 currBytesFlushing int64 330 currFlushStarted time.Time 331 needInfoFile bool 332 333 bwDelegate tlfJournalBWDelegate 334 } 335 336 func getTLFJournalInfoFilePath(dir string) string { 337 return filepath.Join(dir, "info.json") 338 } 339 340 // tlfJournalInfo is the structure stored in 341 // getTLFJournalInfoFilePath(dir). 342 type tlfJournalInfo struct { 343 UID keybase1.UID 344 VerifyingKey kbfscrypto.VerifyingKey 345 TlfID tlf.ID 346 ChargedTo keybase1.UserOrTeamID 347 } 348 349 func readTLFJournalInfoFile(dir string) ( 350 keybase1.UID, kbfscrypto.VerifyingKey, tlf.ID, 351 keybase1.UserOrTeamID, error) { 352 var info tlfJournalInfo 353 err := ioutil.DeserializeFromJSONFile( 354 getTLFJournalInfoFilePath(dir), &info) 355 if err != nil { 356 return keybase1.UID(""), kbfscrypto.VerifyingKey{}, tlf.ID{}, 357 keybase1.UserOrTeamID(""), err 358 } 359 360 chargedTo := info.UID.AsUserOrTeam() 361 if info.ChargedTo.Exists() { 362 chargedTo = info.ChargedTo 363 } 364 365 return info.UID, info.VerifyingKey, info.TlfID, chargedTo, nil 366 } 367 368 func writeTLFJournalInfoFile(dir string, uid keybase1.UID, 369 key kbfscrypto.VerifyingKey, tlfID tlf.ID, 370 chargedTo keybase1.UserOrTeamID) error { 371 info := tlfJournalInfo{uid, key, tlfID, chargedTo} 372 return ioutil.SerializeToJSONFile(info, getTLFJournalInfoFilePath(dir)) 373 } 374 375 func makeTLFJournal( 376 ctx context.Context, uid keybase1.UID, key kbfscrypto.VerifyingKey, 377 dir string, tlfID tlf.ID, chargedTo keybase1.UserOrTeamID, 378 config tlfJournalConfig, delegateBlockServer BlockServer, 379 bws TLFJournalBackgroundWorkStatus, bwDelegate tlfJournalBWDelegate, 380 onBranchChange branchChangeListener, onMDFlush mdFlushListener, 381 diskLimiter DiskLimiter, overrideTlfID tlf.ID) (*tlfJournal, error) { 382 if uid == keybase1.UID("") { 383 return nil, errors.New("Empty user") 384 } 385 if key == (kbfscrypto.VerifyingKey{}) { 386 return nil, errors.New("Empty verifying key") 387 } 388 if tlfID == (tlf.ID{}) { 389 return nil, errors.New("Empty tlf.ID") 390 } 391 392 if tlfID.Type() == tlf.SingleTeam && chargedTo.IsUser() { 393 return nil, errors.New("Team ID required for single-team TLF") 394 } else if tlfID.Type() != tlf.SingleTeam && !chargedTo.IsUser() { 395 return nil, errors.New("User ID required for non-team TLF") 396 } 397 398 readUID, readKey, readTlfID, readChargedTo, err := 399 readTLFJournalInfoFile(dir) 400 switch { 401 case ioutil.IsNotExist(err): 402 // Info file doesn't exist, so write it. 403 err := writeTLFJournalInfoFile(dir, uid, key, tlfID, chargedTo) 404 if err != nil { 405 return nil, err 406 } 407 408 case err != nil: 409 return nil, err 410 411 default: 412 // Info file exists, so it should match passed-in 413 // parameters. 414 if uid != readUID { 415 return nil, errors.Errorf( 416 "Expected UID %s, got %s", uid, readUID) 417 } 418 419 if key != readKey { 420 return nil, errors.Errorf( 421 "Expected verifying key %s, got %s", 422 key, readKey) 423 } 424 425 if tlfID != readTlfID { 426 return nil, errors.Errorf( 427 "Expected TLF ID %s, got %s", tlfID, readTlfID) 428 } 429 430 if chargedTo != readChargedTo { 431 return nil, errors.Errorf( 432 "Expected chargedTo ID %s, got %s", chargedTo, readChargedTo) 433 } 434 } 435 436 log := config.MakeLogger("TLFJ") 437 vlog := config.MakeVLogger(log) 438 439 blockJournal, err := makeBlockJournal(ctx, config.Codec(), dir, log, vlog) 440 if err != nil { 441 return nil, err 442 } 443 444 mdJournal, err := makeMDJournal( 445 ctx, uid, key, config.Codec(), config.Crypto(), config.Clock(), 446 config.teamMembershipChecker(), config, tlfID, config.MetadataVersion(), 447 dir, log, overrideTlfID) 448 if err != nil { 449 return nil, err 450 } 451 452 // TODO(KBFS-2217): if this is a team TLF, transform the given 453 // disk limiter into one that checks the team's quota, not the 454 // user's. 455 456 j := &tlfJournal{ 457 uid: uid, 458 key: key, 459 tlfID: tlfID, 460 chargedTo: chargedTo, 461 dir: dir, 462 config: config, 463 delegateBlockServer: delegateBlockServer, 464 log: traceLogger{log}, 465 deferLog: traceLogger{log.CloneWithAddedDepth(1)}, 466 vlog: vlog, 467 onBranchChange: onBranchChange, 468 onMDFlush: onMDFlush, 469 forcedSquashByBytes: ForcedBranchSquashBytesThresholdDefault, 470 overrideTlfID: overrideTlfID, 471 diskLimiter: diskLimiter, 472 hasWorkCh: make(chan struct{}, 1), 473 needPauseCh: make(chan struct{}, 1), 474 needResumeCh: make(chan struct{}, 1), 475 needShutdownCh: make(chan struct{}, 1), 476 needBranchCheckCh: make(chan struct{}, 1), 477 backgroundShutdownCh: make(chan struct{}), 478 finishSingleOpCh: make(chan flushContext, 1), 479 singleOpFlushContext: defaultFlushContext(), 480 blockJournal: blockJournal, 481 mdJournal: mdJournal, 482 unflushedPaths: &unflushedPathCache{}, 483 flushingBlocks: make(map[kbfsblock.ID]bool), 484 bytesPerSecEstimate: ewma.NewMovingAverage(), 485 bwDelegate: bwDelegate, 486 } 487 488 switch bws { 489 case TLFJournalSingleOpBackgroundWorkEnabled: 490 j.singleOpMode = singleOpRunning 491 j.log.CDebugf( 492 ctx, "Starting journal for %s in single op mode", tlfID.String()) 493 // Now that we've set `j.singleOpMode`, `bws` can be the 494 // normal background work mode again, just to keep the 495 // transition logic in `doBackgroundWorkLoop` simple (it 496 // doesn't depend on single-opness). 497 bws = TLFJournalBackgroundWorkEnabled 498 case TLFJournalBackgroundWorkPaused: 499 j.pauseType |= journalPauseCommand 500 } 501 502 isConflict, err := j.isOnConflictBranch() 503 if err != nil { 504 return nil, err 505 } 506 if isConflict { 507 // Conflict branches must start off paused until the first 508 // resolution. 509 j.log.CDebugf(ctx, "Journal for %s has a conflict, so starting off "+ 510 "paused (requested status %s)", tlfID, bws) 511 bws = TLFJournalBackgroundWorkPaused 512 j.pauseType |= journalPauseConflict 513 } 514 if bws == TLFJournalBackgroundWorkPaused { 515 j.wg.Pause() 516 } 517 518 // Do this only once we're sure we won't error. 519 storedBytes := j.blockJournal.getStoredBytes() 520 unflushedBytes := j.blockJournal.getUnflushedBytes() 521 storedFiles := j.blockJournal.getStoredFiles() 522 availableBytes, availableFiles := j.diskLimiter.onJournalEnable( 523 ctx, storedBytes, unflushedBytes, storedFiles, j.chargedTo) 524 525 retry := backoff.NewExponentialBackOff() 526 retry.MaxElapsedTime = 0 527 go j.doBackgroundWorkLoop(bws, retry) 528 529 // Signal work to pick up any existing journal entries. 530 j.signalWork() 531 532 j.log.CDebugf(ctx, 533 "Enabled journal for %s (stored bytes=%d/files=%d, available bytes=%d/files=%d) with path %s", 534 tlfID, storedBytes, storedFiles, availableBytes, availableFiles, dir) 535 return j, nil 536 } 537 538 func (j *tlfJournal) signalWork() { 539 j.wg.Add(1) 540 select { 541 case j.hasWorkCh <- struct{}{}: 542 default: 543 j.wg.Done() 544 } 545 } 546 547 // CtxJournalTagKey is the type used for unique context tags within 548 // background journal work. 549 type CtxJournalTagKey int 550 551 const ( 552 // CtxJournalIDKey is the type of the tag for unique operation IDs 553 // within background journal work. 554 CtxJournalIDKey CtxJournalTagKey = iota 555 ) 556 557 // CtxJournalOpID is the display name for the unique operation 558 // enqueued journal ID tag. 559 const CtxJournalOpID = "JID" 560 561 // doBackgroundWorkLoop is the main function for the background 562 // goroutine. It spawns off a worker goroutine to call 563 // doBackgroundWork whenever there is work, and can be paused and 564 // resumed. 565 func (j *tlfJournal) doBackgroundWorkLoop( 566 bws TLFJournalBackgroundWorkStatus, retry backoff.BackOff) { 567 ctx := context.Background() 568 if j.bwDelegate != nil { 569 ctx = j.bwDelegate.GetBackgroundContext() 570 } 571 572 // Non-nil when a retry has been scheduled for the future. 573 var retryTimer *time.Timer 574 defer func() { 575 close(j.backgroundShutdownCh) 576 if j.bwDelegate != nil { 577 j.bwDelegate.OnShutdown(ctx) 578 } 579 if retryTimer != nil { 580 retryTimer.Stop() 581 } 582 }() 583 584 // Below we have a state machine with three states: 585 // 586 // 1) Idle, where we wait for new work or to be paused; 587 // 2) Busy, where we wait for the worker goroutine to 588 // finish, or to be paused; 589 // 3) Paused, where we wait to be resumed. 590 // 591 // We run this state machine until we are shutdown. Also, if 592 // we exit the busy state for any reason other than the worker 593 // goroutine finished, we stop the worker goroutine (via 594 // bwCancel below). 595 596 // errCh and bwCancel are non-nil only when we're in the busy 597 // state. errCh is the channel on which we receive the error 598 // from the worker goroutine, and bwCancel is the CancelFunc 599 // corresponding to the context passed to the worker 600 // goroutine. 601 var errCh <-chan error 602 var bwCancel context.CancelFunc 603 // Handle the case where we panic while in the busy state. 604 defer func() { 605 if bwCancel != nil { 606 bwCancel() 607 } 608 }() 609 610 for { 611 ctx := CtxWithRandomIDReplayable(ctx, CtxJournalIDKey, CtxJournalOpID, 612 j.log) 613 switch { 614 case bws == TLFJournalBackgroundWorkEnabled && errCh == nil: 615 // 1) Idle. 616 if j.bwDelegate != nil { 617 j.bwDelegate.OnNewState(ctx, bwIdle) 618 } 619 j.vlog.CLogf( 620 ctx, libkb.VLog1, "Waiting for the work signal for %s", 621 j.tlfID) 622 select { 623 case <-j.hasWorkCh: 624 j.vlog.CLogf( 625 ctx, libkb.VLog1, "Got work signal for %s", j.tlfID) 626 if retryTimer != nil { 627 retryTimer.Stop() 628 retryTimer = nil 629 } 630 bwCtx, cancel := context.WithCancel(ctx) 631 errCh = j.doBackgroundWork(bwCtx) 632 bwCancel = cancel 633 634 case <-j.needPauseCh: 635 j.log.CDebugf(ctx, 636 "Got pause signal for %s", j.tlfID) 637 bws = TLFJournalBackgroundWorkPaused 638 639 case <-j.needShutdownCh: 640 j.log.CDebugf(ctx, 641 "Got shutdown signal for %s", j.tlfID) 642 return 643 } 644 645 case bws == TLFJournalBackgroundWorkEnabled && errCh != nil: 646 // 2) Busy. 647 if j.bwDelegate != nil { 648 j.bwDelegate.OnNewState(ctx, bwBusy) 649 } 650 j.vlog.CLogf( 651 ctx, libkb.VLog1, 652 "Waiting for background work to be done for %s", j.tlfID) 653 needShutdown := false 654 select { 655 case err := <-errCh: 656 if retryTimer != nil { 657 panic("Retry timer should be nil after work is done") 658 } 659 660 if err != nil { 661 j.log.CWarningf(ctx, 662 "Background work error for %s: %+v", 663 j.tlfID, err) 664 665 bTime := retry.NextBackOff() 666 if bTime != backoff.Stop { 667 j.log.CWarningf(ctx, "Retrying in %s", bTime) 668 retryTimer = time.AfterFunc(bTime, j.signalWork) 669 } 670 } else { 671 retry.Reset() 672 } 673 674 case <-j.needPauseCh: 675 j.log.CDebugf(ctx, 676 "Got pause signal for %s", j.tlfID) 677 bws = TLFJournalBackgroundWorkPaused 678 679 case <-j.needShutdownCh: 680 j.log.CDebugf(ctx, 681 "Got shutdown signal for %s", j.tlfID) 682 needShutdown = true 683 } 684 685 // Cancel the worker goroutine as we exit this 686 // state. 687 bwCancel() 688 bwCancel = nil 689 690 // Ensure the worker finishes after being canceled, so it 691 // doesn't pick up any new work. For example, if the 692 // worker doesn't check for cancellations before checking 693 // the journal for new work, it might process some journal 694 // entries before returning an error. 695 <-errCh 696 errCh = nil 697 698 if needShutdown { 699 return 700 } 701 702 case bws == TLFJournalBackgroundWorkPaused: 703 // 3) Paused 704 if j.bwDelegate != nil { 705 j.bwDelegate.OnNewState(ctx, bwPaused) 706 } 707 j.log.CDebugf( 708 ctx, "Waiting to resume background work for %s", 709 j.tlfID) 710 select { 711 case <-j.needResumeCh: 712 j.log.CDebugf(ctx, 713 "Got resume signal for %s", j.tlfID) 714 bws = TLFJournalBackgroundWorkEnabled 715 716 case <-j.needShutdownCh: 717 j.log.CDebugf(ctx, 718 "Got shutdown signal for %s", j.tlfID) 719 return 720 } 721 722 default: 723 j.log.CErrorf( 724 ctx, "Unknown TLFJournalBackgroundStatus %s", 725 bws) 726 return 727 } 728 } 729 } 730 731 // doBackgroundWork currently only does auto-flushing. It assumes that 732 // ctx is canceled when the background processing should stop. 733 func (j *tlfJournal) doBackgroundWork(ctx context.Context) <-chan error { 734 errCh := make(chan error, 1) 735 // TODO: Handle panics. 736 go func() { 737 defer j.wg.Done() 738 errCh <- j.flush(ctx) 739 close(errCh) 740 }() 741 return errCh 742 } 743 744 // We don't guarantee that background pause/resume requests will be 745 // processed in strict FIFO order. In particular, multiple pause 746 // requests are collapsed into one (also multiple resume requests), so 747 // it's possible that a pause-resume-pause sequence will be processed 748 // as pause-resume. But that's okay, since these are just for 749 // infrequent ad-hoc testing. 750 751 func (j *tlfJournal) pause(pauseType tlfJournalPauseType) { 752 j.pauseLock.Lock() 753 defer j.pauseLock.Unlock() 754 oldPauseType := j.pauseType 755 j.pauseType |= pauseType 756 757 if oldPauseType > 0 { 758 // No signal is needed since someone already called pause. 759 return 760 } 761 762 j.wg.Pause() 763 select { 764 case j.needPauseCh <- struct{}{}: 765 default: 766 } 767 } 768 769 func (j *tlfJournal) pauseBackgroundWork() { 770 j.pause(journalPauseCommand) 771 } 772 773 func (j *tlfJournal) resume(pauseType tlfJournalPauseType) { 774 j.pauseLock.Lock() 775 defer j.pauseLock.Unlock() 776 if j.pauseType == 0 { 777 // Nothing has paused us yet (possibly because we are still 778 // flushing blocks after we ran branch conversion -- see 779 // KBFS-2501), so no need to resume. 780 return 781 } 782 783 j.pauseType &= ^pauseType 784 785 if j.pauseType != 0 { 786 return 787 } 788 789 select { 790 case j.needResumeCh <- struct{}{}: 791 // Resume the wait group right away, so future callers will block 792 // even before the background goroutine picks up this signal. 793 j.wg.Resume() 794 default: 795 } 796 } 797 798 func (j *tlfJournal) resumeBackgroundWork() { 799 j.resume(journalPauseCommand) 800 } 801 802 func (j *tlfJournal) checkEnabledLocked() error { 803 if j.blockJournal == nil || j.mdJournal == nil { 804 return errors.WithStack(errTLFJournalShutdown{}) 805 } 806 if j.disabled { 807 return errors.WithStack(errTLFJournalDisabled{}) 808 } 809 return nil 810 } 811 812 func (j *tlfJournal) getJournalEnds(ctx context.Context) ( 813 blockEnd journalOrdinal, mdEnd kbfsmd.Revision, mdJournalID kbfsmd.ID, 814 err error) { 815 j.journalLock.RLock() 816 defer j.journalLock.RUnlock() 817 if err := j.checkEnabledLocked(); err != nil { 818 return 0, kbfsmd.RevisionUninitialized, kbfsmd.ID{}, err 819 } 820 821 blockEnd, err = j.blockJournal.end() 822 if err != nil { 823 return 0, 0, kbfsmd.ID{}, err 824 } 825 826 mdEnd, err = j.mdJournal.end() 827 if err != nil { 828 return 0, 0, kbfsmd.ID{}, err 829 } 830 mdJournalID, err = j.mdJournal.getOrCreateJournalID() 831 if err != nil { 832 return 0, 0, kbfsmd.ID{}, err 833 } 834 835 return blockEnd, mdEnd, mdJournalID, nil 836 } 837 838 func (j *tlfJournal) checkAndFinishSingleOpFlushLocked( 839 ctx context.Context) error { 840 switch j.singleOpMode { 841 case singleOpDisabled: 842 j.log.CDebugf(ctx, "Single op mode is disabled; cannot finish") 843 case singleOpFinished: 844 j.log.CDebugf(ctx, "Single op mode already finished") 845 case singleOpRunning: 846 j.log.CDebugf(ctx, "Marking single op as finished") 847 j.singleOpMode = singleOpFinished 848 default: 849 return errors.Errorf("Unrecognized single op mode: %d", j.singleOpMode) 850 } 851 return nil 852 } 853 854 var errReadOnlyJournal = errors.New("Can't modify a read-only journal") 855 856 func (j *tlfJournal) checkWriteable() error { 857 if j.overrideTlfID != tlf.NullID { 858 return errors.WithStack(errReadOnlyJournal) 859 } 860 return nil 861 } 862 863 func (j *tlfJournal) flush(ctx context.Context) (err error) { 864 if err := j.checkWriteable(); err != nil { 865 return err 866 } 867 868 j.flushLock.Lock() 869 defer j.flushLock.Unlock() 870 871 flushedBlockEntries := 0 872 flushedMDEntries := 0 873 defer func() { 874 if err != nil { 875 j.deferLog.CDebugf(ctx, 876 "Flushed %d block entries and %d MD entries "+ 877 "for %s, but got error %v", 878 flushedBlockEntries, flushedMDEntries, 879 j.tlfID, err) 880 } 881 j.journalLock.Lock() 882 j.lastFlushErr = err 883 j.journalLock.Unlock() 884 }() 885 886 for { 887 select { 888 case <-ctx.Done(): 889 j.log.CDebugf(ctx, "Flush canceled: %+v", ctx.Err()) 890 return nil 891 default: 892 } 893 894 isConflict, err := j.isOnConflictBranch() 895 if err != nil { 896 return err 897 } 898 if isConflict { 899 j.vlog.CLogf( 900 ctx, libkb.VLog1, "Ignoring flush while on conflict branch") 901 // It's safe to send a pause signal here, because even if 902 // CR has already resolved the conflict and send the 903 // resume signal, we know the background work loop is 904 // still waiting for this flush() loop to finish before it 905 // processes either the pause or the resume channel. 906 j.pause(journalPauseConflict) 907 return nil 908 } 909 910 select { 911 case j.singleOpFlushContext = <-j.finishSingleOpCh: 912 err := j.checkAndFinishSingleOpFlushLocked(ctx) 913 if err != nil { 914 return err 915 } 916 default: 917 } 918 919 converted, err := j.convertMDsToBranchIfOverThreshold(ctx, true) 920 if err != nil { 921 return err 922 } 923 if converted { 924 return nil 925 } 926 927 blockEnd, mdEnd, mdJournalID, err := j.getJournalEnds(ctx) 928 if err != nil { 929 return err 930 } 931 932 if blockEnd == 0 && 933 (mdEnd == kbfsmd.RevisionUninitialized || 934 j.singleOpMode == singleOpRunning) { 935 j.vlog.CLogf(ctx, libkb.VLog1, "Nothing else to flush") 936 if j.singleOpMode == singleOpFinished { 937 j.log.CDebugf(ctx, "Resetting single op mode") 938 j.singleOpMode = singleOpRunning 939 j.singleOpFlushContext = defaultFlushContext() 940 } 941 break 942 } 943 944 j.log.CDebugf(ctx, "Flushing up to blockEnd=%d and mdEnd=%d", 945 blockEnd, mdEnd) 946 947 // Flush the block journal ops in parallel. 948 numFlushed, maxMDRevToFlush, converted, err := 949 j.flushBlockEntries(ctx, blockEnd, mdJournalID) 950 if err != nil { 951 return err 952 } 953 flushedBlockEntries += numFlushed 954 955 if numFlushed == 0 { 956 // If converted is true, the journal may have 957 // shrunk, and so mdEnd would be obsolete. But 958 // converted is always false when numFlushed 959 // is 0. 960 if converted { 961 panic("numFlushed == 0 and converted is true") 962 } 963 964 // There were no blocks to flush, so we can 965 // flush all of the remaining MDs. 966 maxMDRevToFlush = mdEnd 967 } 968 969 if j.singleOpMode == singleOpRunning { 970 j.vlog.CLogf( 971 ctx, libkb.VLog1, "Skipping MD flushes in single-op mode") 972 continue 973 } 974 975 // TODO: Flush MDs in batch. 976 977 flushedOneMD := false 978 for { 979 flushed, err := j.flushOneMDOp(ctx, 980 maxMDRevToFlush, j.singleOpFlushContext) 981 if err != nil { 982 return err 983 } 984 if !flushed { 985 break 986 } 987 flushedOneMD = true 988 j.lastServerMDCheck = j.config.Clock().Now() 989 flushedMDEntries++ 990 } 991 992 if !flushedOneMD { 993 err = j.checkServerForConflicts(ctx, nil) 994 if err != nil { 995 return err 996 } 997 } 998 } 999 1000 j.log.CDebugf(ctx, "Flushed %d block entries and %d MD entries for %s", 1001 flushedBlockEntries, flushedMDEntries, j.tlfID) 1002 return nil 1003 } 1004 1005 type errTLFJournalShutdown struct{} 1006 1007 func (e errTLFJournalShutdown) Error() string { 1008 return "tlfJournal is shutdown" 1009 } 1010 1011 type errTLFJournalDisabled struct{} 1012 1013 func (e errTLFJournalDisabled) Error() string { 1014 return "tlfJournal is disabled" 1015 } 1016 1017 type errTLFJournalNotEmpty struct{} 1018 1019 func (e errTLFJournalNotEmpty) Error() string { 1020 return "tlfJournal is not empty" 1021 } 1022 1023 func (j *tlfJournal) checkServerForConflicts(ctx context.Context, 1024 needLock *keybase1.LockID) error { 1025 durSinceCheck := j.config.Clock().Now().Sub(j.lastServerMDCheck) 1026 if durSinceCheck < tlfJournalServerMDCheckInterval { 1027 return nil 1028 } 1029 1030 isConflict, err := j.isOnConflictBranch() 1031 if err != nil { 1032 return err 1033 } 1034 if isConflict { 1035 return nil 1036 } 1037 1038 nextMDToFlush, err := func() (kbfsmd.Revision, error) { 1039 j.journalLock.RLock() 1040 defer j.journalLock.RUnlock() 1041 return j.mdJournal.readEarliestRevision() 1042 }() 1043 if err != nil { 1044 return err 1045 } 1046 if nextMDToFlush == kbfsmd.RevisionUninitialized { 1047 return nil 1048 } 1049 1050 j.vlog.CLogf( 1051 ctx, libkb.VLog1, "Checking the MD server for the latest revision; "+ 1052 "next MD revision in the journal is %d", nextMDToFlush) 1053 // TODO(KBFS-2186): implement a lighterweight server RPC that just 1054 // returns the latest revision number, so we don't have to fetch 1055 // the entire MD? 1056 currHead, err := j.config.MDServer().GetForTLF( 1057 ctx, j.tlfID, kbfsmd.NullBranchID, kbfsmd.Merged, needLock) 1058 if err != nil { 1059 return err 1060 } 1061 j.lastServerMDCheck = j.config.Clock().Now() 1062 if currHead == nil { 1063 return nil 1064 } 1065 if currHead.MD.RevisionNumber()+1 == nextMDToFlush { 1066 // We're still up-to-date with the server. Nothing left to do. 1067 return nil 1068 } 1069 1070 j.log.CDebugf( 1071 ctx, "Server is ahead of local journal (rev=%d, nextMD=%d), "+ 1072 "indicating a conflict", 1073 currHead.MD.RevisionNumber(), nextMDToFlush) 1074 return j.convertMDsToBranch(ctx) 1075 } 1076 1077 func (j *tlfJournal) getNextBlockEntriesToFlush( 1078 ctx context.Context, end journalOrdinal, mdJournalID kbfsmd.ID) ( 1079 entries blockEntriesToFlush, bytesToFlush int64, 1080 maxMDRevToFlush kbfsmd.Revision, err error) { 1081 j.journalLock.RLock() 1082 defer j.journalLock.RUnlock() 1083 if err := j.checkEnabledLocked(); err != nil { 1084 return blockEntriesToFlush{}, 0, kbfsmd.RevisionUninitialized, err 1085 } 1086 1087 return j.blockJournal.getNextEntriesToFlush(ctx, end, 1088 maxJournalBlockFlushBatchSize, mdJournalID) 1089 } 1090 1091 func (j *tlfJournal) removeFlushedBlockEntries(ctx context.Context, 1092 entries blockEntriesToFlush, flushEnded time.Time) error { 1093 j.journalLock.Lock() 1094 defer j.journalLock.Unlock() 1095 if err := j.checkEnabledLocked(); err != nil { 1096 return err 1097 } 1098 1099 storedBytesBefore := j.blockJournal.getStoredBytes() 1100 1101 // TODO: Check storedFiles also. 1102 1103 j.config.SubscriptionManagerPublisher().PublishChange( 1104 keybase1.SubscriptionTopic_JOURNAL_STATUS) 1105 j.config.SubscriptionManagerPublisher().PublishChange( 1106 keybase1.SubscriptionTopic_FILES_TAB_BADGE) 1107 flushedBytes, err := j.blockJournal.removeFlushedEntries( 1108 ctx, entries, j.tlfID, j.config.Reporter()) 1109 if err != nil { 1110 return err 1111 } 1112 storedBytesAfter := j.blockJournal.getStoredBytes() 1113 1114 // storedBytes shouldn't change since removedBytes is 0. 1115 if storedBytesAfter != storedBytesBefore { 1116 panic(fmt.Sprintf( 1117 "storedBytes unexpectedly changed from %d to %d", 1118 storedBytesBefore, storedBytesAfter)) 1119 } 1120 1121 timeToFlush := flushEnded.Sub(j.currFlushStarted) 1122 j.currBytesFlushing = 0 1123 j.currFlushStarted = time.Time{} 1124 if flushedBytes > 0 { 1125 j.bytesPerSecEstimate.Add(float64(flushedBytes) / timeToFlush.Seconds()) 1126 } 1127 j.diskLimiter.onBlocksFlush(ctx, flushedBytes, j.chargedTo) 1128 j.vlog.CLogf( 1129 ctx, libkb.VLog1, "Flushed %d bytes in %s; new bandwidth estimate "+ 1130 "is %f bytes/sec", flushedBytes, timeToFlush, 1131 j.bytesPerSecEstimate.Value()) 1132 1133 return nil 1134 } 1135 1136 func (j *tlfJournal) startFlush(bytesToFlush int64) { 1137 j.journalLock.Lock() 1138 defer j.journalLock.Unlock() 1139 j.currBytesFlushing = bytesToFlush 1140 j.currFlushStarted = j.config.Clock().Now() 1141 } 1142 1143 func (j *tlfJournal) flushBlockEntries( 1144 ctx context.Context, end journalOrdinal, mdJournalID kbfsmd.ID) ( 1145 numFlushed int, maxMDRevToFlush kbfsmd.Revision, 1146 converted bool, err error) { 1147 entries, bytesToFlush, maxMDRevToFlush, err := j.getNextBlockEntriesToFlush( 1148 ctx, end, mdJournalID) 1149 if err != nil { 1150 return 0, kbfsmd.RevisionUninitialized, false, err 1151 } 1152 1153 if entries.length() == 0 { 1154 return 0, maxMDRevToFlush, false, nil 1155 } 1156 1157 j.vlog.CLogf( 1158 ctx, libkb.VLog1, "Flushing %d blocks, up to rev %d", 1159 entries.puts.numBlocks(), maxMDRevToFlush) 1160 1161 // Mark these blocks as flushing, and clear when done. 1162 err = j.markFlushingBlockIDs(entries) 1163 if err != nil { 1164 return 0, kbfsmd.RevisionUninitialized, false, err 1165 } 1166 cleared := false 1167 defer func() { 1168 if !cleared { 1169 clearErr := j.clearFlushingBlockIDs(entries) 1170 if err == nil { 1171 err = clearErr 1172 } 1173 } 1174 }() 1175 1176 // TODO: fill this in for logging/error purposes. 1177 var tlfName tlf.CanonicalName 1178 1179 eg, groupCtx := errgroup.WithContext(ctx) 1180 convertCtx, convertCancel := context.WithCancel(groupCtx) 1181 1182 // Flush the blocks in a goroutine. Alongside make another 1183 // goroutine that listens for MD puts and checks the size of the 1184 // MD journal, and converts to a local squash branch if it gets 1185 // too large. While the 2nd goroutine is waiting, it should exit 1186 // immediately as soon as the 1st one finishes, but if it is 1187 // already working on a conversion it should finish that work. 1188 // 1189 // If the 2nd goroutine makes a branch, foreground writes could 1190 // trigger CR while blocks are still being flushed. This can't 1191 // usually happen, because flushing is paused while CR is 1192 // happening. flush() has to make sure to get the new MD journal 1193 // end, and we need to make sure `maxMDRevToFlush` is still valid. 1194 j.startFlush(bytesToFlush) 1195 eg.Go(func() error { 1196 defer convertCancel() 1197 cacheType := DiskBlockAnyCache 1198 if j.config.IsSyncedTlf(j.tlfID) { 1199 cacheType = DiskBlockSyncCache 1200 } 1201 return flushBlockEntries(groupCtx, j.log, j.deferLog, 1202 j.delegateBlockServer, j.config.BlockCache(), j.config.Reporter(), 1203 j.tlfID, tlfName, entries, cacheType) 1204 }) 1205 converted = false 1206 eg.Go(func() error { 1207 // We might need to run multiple conversions during a single 1208 // batch of block flushes, so loop until the batch finishes. 1209 for { 1210 select { 1211 case <-j.needBranchCheckCh: 1212 // Don't signal a pause when doing this conversion in 1213 // a separate goroutine, because it ends up canceling 1214 // the flush context, which means all the block puts 1215 // would get canceled and we don't want that. 1216 // Instead, let the current iteration of the flush 1217 // finish, and then signal at the top of the next 1218 // iteration. 1219 convertedNow, err := 1220 j.convertMDsToBranchIfOverThreshold(groupCtx, false) 1221 if err != nil { 1222 return err 1223 } 1224 converted = converted || convertedNow 1225 case <-convertCtx.Done(): 1226 return nil // Canceled because the block puts finished 1227 } 1228 } 1229 }) 1230 1231 err = eg.Wait() 1232 if err != nil { 1233 return 0, kbfsmd.RevisionUninitialized, false, err 1234 } 1235 endFlush := j.config.Clock().Now() 1236 1237 err = j.clearFlushingBlockIDs(entries) 1238 cleared = true 1239 if err != nil { 1240 return 0, kbfsmd.RevisionUninitialized, false, err 1241 } 1242 1243 err = j.removeFlushedBlockEntries(ctx, entries, endFlush) 1244 if err != nil { 1245 return 0, kbfsmd.RevisionUninitialized, false, err 1246 } 1247 1248 // If a conversion happened, the original `maxMDRevToFlush` only 1249 // applies for sure if its mdRevMarker entry was already for a 1250 // local squash. TODO: conversion might not have actually 1251 // happened yet, in which case it's still ok to flush 1252 // maxMDRevToFlush. 1253 if converted && maxMDRevToFlush != kbfsmd.RevisionUninitialized && 1254 !entries.revIsLocalSquash(maxMDRevToFlush, mdJournalID) { 1255 maxMDRevToFlush = kbfsmd.RevisionUninitialized 1256 } 1257 1258 return entries.length(), maxMDRevToFlush, converted, nil 1259 } 1260 1261 func (j *tlfJournal) getNextMDEntryToFlush(ctx context.Context, 1262 end kbfsmd.Revision) (kbfsmd.ID, *RootMetadataSigned, kbfsmd.ExtraMetadata, error) { 1263 j.journalLock.RLock() 1264 defer j.journalLock.RUnlock() 1265 if err := j.checkEnabledLocked(); err != nil { 1266 return kbfsmd.ID{}, nil, nil, err 1267 } 1268 1269 return j.mdJournal.getNextEntryToFlush(ctx, end, j.config.Crypto()) 1270 } 1271 1272 func (j *tlfJournal) convertMDsToBranchLocked( 1273 ctx context.Context, bid kbfsmd.BranchID, doSignal bool) error { 1274 err := j.mdJournal.convertToBranch( 1275 ctx, bid, j.config.Crypto(), j.config.Codec(), j.tlfID, 1276 j.config.MDCache()) 1277 if err != nil { 1278 return err 1279 } 1280 j.unsquashedBytes = 0 1281 1282 if j.onBranchChange != nil { 1283 j.onBranchChange.onTLFBranchChange(j.tlfID, bid) 1284 } 1285 1286 // Pause while on a conflict branch. 1287 if doSignal { 1288 j.pause(journalPauseConflict) 1289 } 1290 1291 return nil 1292 } 1293 1294 func (j *tlfJournal) convertMDsToBranch(ctx context.Context) error { 1295 bid, err := j.config.Crypto().MakeRandomBranchID() 1296 if err != nil { 1297 return err 1298 } 1299 1300 j.journalLock.Lock() 1301 defer j.journalLock.Unlock() 1302 if err := j.checkEnabledLocked(); err != nil { 1303 return err 1304 } 1305 1306 return j.convertMDsToBranchLocked(ctx, bid, true) 1307 } 1308 1309 func (j *tlfJournal) convertMDsToBranchIfOverThreshold(ctx context.Context, 1310 doSignal bool) (bool, error) { 1311 if j.singleOpMode == singleOpRunning { 1312 // Don't squash until the single operation is complete. 1313 return false, nil 1314 } 1315 1316 j.journalLock.Lock() 1317 defer j.journalLock.Unlock() 1318 if err := j.checkEnabledLocked(); err != nil { 1319 return false, err 1320 } 1321 1322 if j.mdJournal.getBranchID() != kbfsmd.NullBranchID { 1323 // Already on a conflict branch, so nothing to do. 1324 return false, nil 1325 } 1326 1327 atLeastOneRev, err := j.mdJournal.atLeastNNonLocalSquashes(1) 1328 if err != nil { 1329 return false, err 1330 } 1331 if !atLeastOneRev { 1332 // If there isn't at least one non-local-squash revision, we can 1333 // bail early since there's definitely nothing to do. 1334 return false, nil 1335 } 1336 1337 squashByRev := false 1338 switch { 1339 case j.singleOpMode == singleOpFinished: 1340 j.vlog.CLogf(ctx, libkb.VLog1, "Squashing due to single op completion") 1341 // Always squash if we've finished the single op and have more 1342 // than one revision pending. 1343 squashByRev = true 1344 j.unsquashedBytes = 0 1345 case j.config.BGFlushDirOpBatchSize() == 1: 1346 squashByRev, err = 1347 j.mdJournal.atLeastNNonLocalSquashes(ForcedBranchSquashRevThreshold) 1348 if err != nil { 1349 return false, err 1350 } 1351 default: 1352 // Squashing is already done in folderBranchOps, so just mark 1353 // this revision as squashed, so simply turn it off here. 1354 j.unsquashedBytes = 0 1355 } 1356 1357 // Note that j.unsquashedBytes is just an estimate -- it doesn't 1358 // account for blocks that will be eliminated as part of the 1359 // squash, and it doesn't count unsquashed bytes that were written 1360 // to disk before this tlfJournal instance started. But it should 1361 // be close enough to work for the purposes of this optimization. 1362 squashByBytes := j.unsquashedBytes >= j.forcedSquashByBytes 1363 if !squashByRev && !squashByBytes { 1364 // Not over either threshold yet. 1365 return false, nil 1366 } 1367 1368 j.vlog.CLogf( 1369 ctx, libkb.VLog1, "Converting journal with %d unsquashed bytes "+ 1370 "to a branch", j.unsquashedBytes) 1371 1372 mdJournalID, err := j.mdJournal.getOrCreateJournalID() 1373 if err != nil { 1374 return false, err 1375 } 1376 1377 // If we're squashing by bytes, and there's exactly one 1378 // non-local-squash revision, just directly mark it as squashed to 1379 // avoid the CR overhead. 1380 if !squashByRev { 1381 moreThanOneRev, err := j.mdJournal.atLeastNNonLocalSquashes(2) 1382 if err != nil { 1383 return false, err 1384 } 1385 1386 if !moreThanOneRev { 1387 j.vlog.CLogf( 1388 ctx, libkb.VLog1, "Avoiding CR when there is only one "+ 1389 "revision that needs squashing; marking as local squash") 1390 err = j.mdJournal.markLatestAsLocalSquash(ctx) 1391 if err != nil { 1392 return false, err 1393 } 1394 1395 err = j.blockJournal.markLatestRevMarkerAsLocalSquash(mdJournalID) 1396 if err != nil { 1397 return false, err 1398 } 1399 1400 j.unsquashedBytes = 0 1401 return true, nil 1402 } 1403 } 1404 1405 err = j.convertMDsToBranchLocked(ctx, kbfsmd.PendingLocalSquashBranchID, doSignal) 1406 if err != nil { 1407 return false, err 1408 } 1409 return true, nil 1410 } 1411 1412 // getBlockDeferredGCRange wraps blockJournal.getDeferredGCRange. The 1413 // returned blockJournal should be used instead of j.blockJournal, as 1414 // we want to call blockJournal.doGC outside of journalLock. 1415 func (j *tlfJournal) getBlockDeferredGCRange() ( 1416 blockJournal *blockJournal, length int, 1417 earliest, latest journalOrdinal, err error) { 1418 j.journalLock.Lock() 1419 defer j.journalLock.Unlock() 1420 if err := j.checkEnabledLocked(); err != nil { 1421 return nil, 0, 0, 0, err 1422 } 1423 length, earliest, latest, err = j.blockJournal.getDeferredGCRange() 1424 if err != nil { 1425 return nil, 0, 0, 0, err 1426 } 1427 return j.blockJournal, length, earliest, latest, nil 1428 } 1429 1430 func (j *tlfJournal) doOnMDFlushAndRemoveFlushedMDEntry(ctx context.Context, 1431 mdID kbfsmd.ID, rmds *RootMetadataSigned) error { 1432 if j.onMDFlush != nil { 1433 j.onMDFlush.onMDFlush(rmds.MD.TlfID(), rmds.MD.BID(), 1434 rmds.MD.RevisionNumber()) 1435 } 1436 1437 blockJournal, length, earliest, latest, err := 1438 j.getBlockDeferredGCRange() 1439 if err != nil { 1440 return err 1441 } 1442 1443 var removedBytes, removedFiles int64 1444 if length != 0 { 1445 // doGC() only needs to be called under the flushLock, not the 1446 // journalLock, as it doesn't touch the actual journal, only 1447 // the deferred GC journal. 1448 var err error 1449 removedBytes, removedFiles, err = blockJournal.doGC( 1450 ctx, earliest, latest) 1451 if err != nil { 1452 return err 1453 } 1454 1455 j.diskLimiter.release(ctx, journalLimitTrackerType, removedBytes, 1456 removedFiles) 1457 } 1458 1459 j.blockPutFlushLock.Lock() 1460 defer j.blockPutFlushLock.Unlock() 1461 1462 j.journalLock.Lock() 1463 defer j.journalLock.Unlock() 1464 if err := j.checkEnabledLocked(); err != nil { 1465 return err 1466 } 1467 1468 if length != 0 { 1469 clearedBlockJournal, aggregateInfo, err := 1470 j.blockJournal.clearDeferredGCRange( 1471 ctx, removedBytes, removedFiles, earliest, latest) 1472 if err != nil { 1473 return err 1474 } 1475 1476 if clearedBlockJournal { 1477 equal, err := kbfscodec.Equal( 1478 j.config.Codec(), aggregateInfo, blockAggregateInfo{}) 1479 if err != nil { 1480 return err 1481 } 1482 if !equal { 1483 j.log.CWarningf(ctx, 1484 "Cleared block journal for %s, but still has aggregate info %+v", 1485 j.tlfID, aggregateInfo) 1486 // TODO: Consider trying to adjust the disk 1487 // limiter state to compensate for the 1488 // leftover bytes/files here. Ideally, the 1489 // disk limiter would keep track of per-TLF 1490 // state, so we could just call 1491 // j.diskLimiter.onJournalClear(tlfID) to have 1492 // it clear its state for this TLF. 1493 } 1494 } 1495 } 1496 1497 clearedMDJournal, err := j.removeFlushedMDEntryLocked(ctx, mdID, rmds) 1498 if err != nil { 1499 return err 1500 } 1501 1502 // If we check just clearedBlockJournal here, we'll miss the 1503 // chance to clear the TLF journal if the block journal 1504 // empties out before the MD journal does. 1505 if j.blockJournal.empty() && clearedMDJournal { 1506 j.log.CDebugf(ctx, 1507 "TLF journal is now empty; removing all files in %s", j.dir) 1508 1509 // If we ever need to upgrade the journal version, 1510 // this would be the place to do it. 1511 1512 // Reset to initial state. 1513 j.unflushedPaths = &unflushedPathCache{} 1514 j.unsquashedBytes = 0 1515 j.flushingBlocks = make(map[kbfsblock.ID]bool) 1516 1517 err := ioutil.RemoveAll(j.dir) 1518 if err != nil { 1519 return err 1520 } 1521 // Remember to make the info file again if more data comes 1522 // into this journal. 1523 j.needInfoFile = true 1524 } 1525 1526 return nil 1527 } 1528 1529 func (j *tlfJournal) removeFlushedMDEntryLocked(ctx context.Context, 1530 mdID kbfsmd.ID, rmds *RootMetadataSigned) (clearedMDJournal bool, err error) { 1531 clearedMDJournal, err = j.mdJournal.removeFlushedEntry(ctx, mdID, rmds) 1532 if err != nil { 1533 return false, err 1534 } 1535 1536 j.unflushedPaths.removeFromCache(rmds.MD.RevisionNumber()) 1537 return clearedMDJournal, nil 1538 } 1539 1540 func (j *tlfJournal) flushOneMDOp(ctx context.Context, 1541 maxMDRevToFlush kbfsmd.Revision, flushCtx flushContext) ( 1542 flushed bool, err error) { 1543 if maxMDRevToFlush == kbfsmd.RevisionUninitialized { 1544 // Avoid a call to `getNextMDEntryToFlush`, which 1545 // would otherwise unnecessarily read an MD from disk. 1546 return false, nil 1547 } 1548 1549 j.vlog.CLogf(ctx, libkb.VLog1, "Flushing one MD to server") 1550 defer func() { 1551 if err != nil { 1552 j.deferLog.CDebugf(ctx, "Flush failed with %v", err) 1553 } 1554 }() 1555 1556 mdServer := j.config.MDServer() 1557 1558 mdID, rmds, extra, err := j.getNextMDEntryToFlush(ctx, maxMDRevToFlush+1) 1559 if err != nil { 1560 return false, err 1561 } 1562 if mdID == (kbfsmd.ID{}) { 1563 return false, nil 1564 } 1565 1566 // If this is the first revision, and there are more MD entries 1567 // still to be flushed, don't pass in lc yet, wait for the next 1568 // flush. Normally the lock context is used in single-op mode, 1569 // and there will always only be one revision to flush. The 1570 // exception is when the first revision of the TLF hasn't been 1571 // pushed yet, since it can't be squashed. In that case, there 1572 // could be 2 revisions in the MD journal. Don't unlock on the 1573 // first flush, since we'll still need to hold the lock until the 1574 // second flush. 1575 if flushCtx.lockContextForPut != nil { 1576 _, mdCount, err := j.getJournalEntryCounts() 1577 if err != nil { 1578 return false, err 1579 } 1580 if mdCount > 1 { 1581 if rmds.MD.RevisionNumber() != kbfsmd.RevisionInitial { 1582 return false, errors.New("Unexpectedly flushing more " + 1583 "than one revision while unlocking, and the first one " + 1584 "isn't the initial revision") 1585 } 1586 1587 j.vlog.CLogf( 1588 ctx, libkb.VLog1, "Ignoring lock context for initial MD flush") 1589 flushCtx.lockContextForPut = nil 1590 } 1591 } 1592 1593 j.vlog.CLogf( 1594 ctx, libkb.VLog1, "Flushing MD for TLF=%s with id=%s, rev=%s, bid=%s", 1595 rmds.MD.TlfID(), mdID, rmds.MD.RevisionNumber(), rmds.MD.BID()) 1596 pushErr := mdServer.Put(ctx, rmds, extra, 1597 flushCtx.lockContextForPut, flushCtx.priorityForPut) 1598 if isRevisionConflict(pushErr) { 1599 headMdID, err := getMdID(ctx, mdServer, j.config.Codec(), 1600 rmds.MD.TlfID(), rmds.MD.BID(), rmds.MD.MergedStatus(), 1601 rmds.MD.RevisionNumber(), nil) 1602 switch { 1603 case err != nil: 1604 j.log.CWarningf(ctx, 1605 "getMdID failed for TLF %s, BID %s, and revision %d: %v", 1606 rmds.MD.TlfID(), rmds.MD.BID(), rmds.MD.RevisionNumber(), err) 1607 case headMdID == mdID: 1608 if headMdID == (kbfsmd.ID{}) { 1609 panic("nil earliestID and revision conflict error returned by pushEarliestToServer") 1610 } 1611 // We must have already flushed this MD, so continue. 1612 pushErr = nil 1613 case rmds.MD.MergedStatus() == kbfsmd.Merged: 1614 j.log.CDebugf(ctx, "Conflict detected %v", pushErr) 1615 // Convert MDs to a branch and return -- the journal 1616 // pauses until the resolution is complete. 1617 err = j.convertMDsToBranch(ctx) 1618 if err != nil { 1619 return false, err 1620 } 1621 return false, nil 1622 } 1623 } 1624 if pushErr != nil { 1625 return false, pushErr 1626 } 1627 1628 j.config.MDCache().MarkPutToServer( 1629 rmds.MD.TlfID(), rmds.MD.RevisionNumber(), rmds.MD.BID()) 1630 1631 err = j.doOnMDFlushAndRemoveFlushedMDEntry(ctx, mdID, rmds) 1632 if err != nil { 1633 return false, err 1634 } 1635 1636 return true, nil 1637 } 1638 1639 func (j *tlfJournal) getJournalEntryCounts() ( 1640 blockEntryCount, mdEntryCount uint64, err error) { 1641 j.journalLock.RLock() 1642 defer j.journalLock.RUnlock() 1643 if err := j.checkEnabledLocked(); err != nil { 1644 return 0, 0, err 1645 } 1646 1647 blockEntryCount = j.blockJournal.length() 1648 mdEntryCount = j.mdJournal.length() 1649 return blockEntryCount, mdEntryCount, nil 1650 } 1651 1652 func (j *tlfJournal) isOnConflictBranch() (bool, error) { 1653 j.journalLock.RLock() 1654 defer j.journalLock.RUnlock() 1655 1656 if err := j.checkEnabledLocked(); err != nil { 1657 return false, err 1658 } 1659 1660 return j.mdJournal.getBranchID() != kbfsmd.NullBranchID, nil 1661 } 1662 1663 func (j *tlfJournal) getJournalStatusLocked() (TLFJournalStatus, error) { 1664 if err := j.checkEnabledLocked(); err != nil { 1665 return TLFJournalStatus{}, err 1666 } 1667 1668 earliestRevision, err := j.mdJournal.readEarliestRevision() 1669 if err != nil { 1670 return TLFJournalStatus{}, err 1671 } 1672 latestRevision, err := j.mdJournal.readLatestRevision() 1673 if err != nil { 1674 return TLFJournalStatus{}, err 1675 } 1676 blockEntryCount := j.blockJournal.length() 1677 lastFlushErr := "" 1678 if j.lastFlushErr != nil { 1679 lastFlushErr = j.lastFlushErr.Error() 1680 } 1681 storedBytes := j.blockJournal.getStoredBytes() 1682 storedFiles := j.blockJournal.getStoredFiles() 1683 unflushedBytes := j.blockJournal.getUnflushedBytes() 1684 quotaUsed, quotaLimit := j.diskLimiter.getQuotaInfo(j.chargedTo) 1685 var endEstimate *time.Time 1686 if unflushedBytes > 0 { 1687 now := j.config.Clock().Now() 1688 bwEstimate := j.bytesPerSecEstimate.Value() 1689 1690 // How long do we think is remaining in the current flush? 1691 timeLeftInCurrFlush := time.Duration(0) 1692 if j.currBytesFlushing > 0 { 1693 timeFlushingSoFar := now.Sub(j.currFlushStarted) 1694 totalExpectedTime := time.Duration(0) 1695 if bwEstimate > 0 { 1696 totalExpectedTime = time.Duration( 1697 float64(j.currBytesFlushing)/bwEstimate) * time.Second 1698 } 1699 1700 if totalExpectedTime > timeFlushingSoFar { 1701 timeLeftInCurrFlush = totalExpectedTime - timeFlushingSoFar 1702 } else { 1703 // Arbitrarily say that there's one second left, if 1704 // we've taken longer than expected to flush so far. 1705 timeLeftInCurrFlush = 1 * time.Second 1706 } 1707 } 1708 1709 // Add the estimate for the blocks that haven't started flushing yet. 1710 1711 // If we have no estimate for this TLF yet, pick 10 seconds 1712 // arbitrarily. 1713 restOfTimeLeftEstimate := 10 * time.Second 1714 if bwEstimate > 0 { 1715 bytesLeft := unflushedBytes - j.currBytesFlushing 1716 restOfTimeLeftEstimate = time.Duration( 1717 float64(bytesLeft)/bwEstimate) * time.Second 1718 } 1719 1720 t := now.Add(timeLeftInCurrFlush + restOfTimeLeftEstimate) 1721 endEstimate = &t 1722 } 1723 return TLFJournalStatus{ 1724 Dir: j.dir, 1725 BranchID: j.mdJournal.getBranchID().String(), 1726 RevisionStart: earliestRevision, 1727 RevisionEnd: latestRevision, 1728 BlockOpCount: blockEntryCount, 1729 StoredBytes: storedBytes, 1730 StoredFiles: storedFiles, 1731 QuotaUsedBytes: quotaUsed, 1732 QuotaLimitBytes: quotaLimit, 1733 UnflushedBytes: unflushedBytes, 1734 EndEstimate: endEstimate, 1735 LastFlushErr: lastFlushErr, 1736 }, nil 1737 } 1738 1739 func (j *tlfJournal) getJournalStatus() (TLFJournalStatus, error) { 1740 j.journalLock.RLock() 1741 defer j.journalLock.RUnlock() 1742 return j.getJournalStatusLocked() 1743 } 1744 1745 // getJournalStatusWithRange returns the journal status, and either a 1746 // non-nil unflushedPathsMap is returned, which can be used directly 1747 // to fill in UnflushedPaths, or a list of ImmutableBareRootMetadatas 1748 // is returned (along with a bool indicating whether that list is 1749 // complete), which can be used to build an unflushedPathsMap. If 1750 // complete is true, then the list may be empty; otherwise, it is 1751 // guaranteed to not be empty. 1752 func (j *tlfJournal) getJournalStatusWithRange(ctx context.Context) ( 1753 jStatus TLFJournalStatus, unflushedPaths unflushedPathsMap, 1754 ibrmds []ImmutableBareRootMetadata, complete bool, err error) { 1755 j.journalLock.RLock() 1756 defer j.journalLock.RUnlock() 1757 jStatus, err = j.getJournalStatusLocked() 1758 if err != nil { 1759 return TLFJournalStatus{}, nil, nil, false, err 1760 } 1761 1762 unflushedPaths = j.unflushedPaths.getUnflushedPaths() 1763 if unflushedPaths != nil { 1764 return jStatus, unflushedPaths, nil, true, nil 1765 } 1766 1767 if jStatus.RevisionEnd == kbfsmd.RevisionUninitialized { 1768 return jStatus, nil, nil, true, nil 1769 } 1770 1771 complete = true 1772 stop := jStatus.RevisionEnd 1773 if stop > jStatus.RevisionStart+1000 { 1774 stop = jStatus.RevisionStart + 1000 1775 complete = false 1776 } 1777 // It would be nice to avoid getting this range if we are not 1778 // the initializer, but at this point we don't know if we'll 1779 // need to initialize or not. 1780 ibrmds, err = j.mdJournal.getRange( 1781 ctx, j.mdJournal.branchID, jStatus.RevisionStart, stop) 1782 if err != nil { 1783 return TLFJournalStatus{}, nil, nil, false, err 1784 } 1785 return jStatus, nil, ibrmds, complete, nil 1786 } 1787 1788 // getUnflushedPathMDInfos converts the given list of bare root 1789 // metadatas into unflushedPathMDInfo objects. The caller must NOT 1790 // hold `j.journalLock`, because blocks from the journal may need to 1791 // be read as part of the decryption. 1792 func (j *tlfJournal) getUnflushedPathMDInfos(ctx context.Context, 1793 ibrmds []ImmutableBareRootMetadata) ([]unflushedPathMDInfo, error) { 1794 if len(ibrmds) == 0 { 1795 return nil, nil 1796 } 1797 1798 ibrmdBareHandle, err := ibrmds[0].MakeBareTlfHandleWithExtra() 1799 if err != nil { 1800 return nil, err 1801 } 1802 1803 handle, err := tlfhandle.MakeHandleWithTlfID( 1804 ctx, ibrmdBareHandle, j.tlfID.Type(), j.config.resolver(), 1805 j.config.usernameGetter(), j.tlfID, 1806 j.config.OfflineAvailabilityForID(j.tlfID)) 1807 if err != nil { 1808 return nil, err 1809 } 1810 1811 mdInfos := make([]unflushedPathMDInfo, 0, len(ibrmds)) 1812 1813 for _, ibrmd := range ibrmds { 1814 // TODO: Avoid having to do this type assertion and 1815 // convert to RootMetadata. 1816 brmd, ok := ibrmd.RootMetadata.(kbfsmd.MutableRootMetadata) 1817 if !ok { 1818 return nil, kbfsmd.MutableRootMetadataNoImplError{} 1819 } 1820 rmd := makeRootMetadata(brmd, ibrmd.extra, handle) 1821 1822 // Assume, since journal is running, that we're in default mode. 1823 mode := NewInitModeFromType(InitDefault) 1824 pmd, err := decryptMDPrivateData( 1825 ctx, j.config.Codec(), j.config.Crypto(), 1826 j.config.BlockCache(), j.config.BlockOps(), 1827 j.config.mdDecryptionKeyGetter(), j.config.teamMembershipChecker(), 1828 j.config, mode, j.uid, rmd.GetSerializedPrivateMetadata(), rmd, 1829 rmd, j.log) 1830 if err != nil { 1831 return nil, err 1832 } 1833 rmd.data = pmd 1834 1835 mdInfo := unflushedPathMDInfo{ 1836 revision: ibrmd.RevisionNumber(), 1837 kmd: rmd, 1838 pmd: pmd, 1839 localTimestamp: ibrmd.localTimestamp, 1840 } 1841 mdInfos = append(mdInfos, mdInfo) 1842 } 1843 return mdInfos, nil 1844 } 1845 1846 func (j *tlfJournal) getJournalStatusWithPaths(ctx context.Context, 1847 cpp chainsPathPopulator) (jStatus TLFJournalStatus, err error) { 1848 // This loop is limited only by the lifetime of `ctx`. 1849 var unflushedPaths unflushedPathsMap 1850 var complete bool 1851 for { 1852 var ibrmds []ImmutableBareRootMetadata 1853 jStatus, unflushedPaths, ibrmds, complete, err = 1854 j.getJournalStatusWithRange(ctx) 1855 if err != nil { 1856 return TLFJournalStatus{}, err 1857 } 1858 1859 if unflushedPaths != nil { 1860 break 1861 } 1862 1863 // We need to make or initialize the unflushed paths. 1864 if !complete { 1865 // Figure out the paths for the truncated MD range, 1866 // but don't cache it. 1867 unflushedPaths = make(unflushedPathsMap) 1868 j.log.CDebugf(ctx, "Making incomplete unflushed path cache") 1869 mdInfos, err := j.getUnflushedPathMDInfos(ctx, ibrmds) 1870 if err != nil { 1871 return TLFJournalStatus{}, err 1872 } 1873 err = addUnflushedPaths(ctx, j.uid, j.key, 1874 j.config.Codec(), j.log, j.config, mdInfos, cpp, 1875 unflushedPaths) 1876 if err != nil { 1877 return TLFJournalStatus{}, err 1878 } 1879 break 1880 } 1881 1882 // We need to init it ourselves, or wait for someone else to 1883 // do it. Save the cache in a local variable in case it gets 1884 // cleared when the journal is flushed while it's 1885 // initializing. 1886 upCache := j.unflushedPaths 1887 doInit, err := upCache.startInitializeOrWait(ctx) 1888 if err != nil { 1889 return TLFJournalStatus{}, err 1890 } 1891 if doInit { 1892 initSuccess := false 1893 defer func() { 1894 if !initSuccess || err != nil { 1895 upCache.abortInitialization() 1896 } 1897 }() 1898 mdInfos, err := j.getUnflushedPathMDInfos(ctx, ibrmds) 1899 if err != nil { 1900 return TLFJournalStatus{}, err 1901 } 1902 unflushedPaths, initSuccess, err = upCache.initialize( 1903 ctx, j.uid, j.key, j.config.Codec(), j.log, j.config, cpp, 1904 mdInfos) 1905 if err != nil { 1906 return TLFJournalStatus{}, err 1907 } 1908 // All done! 1909 break 1910 } 1911 1912 j.log.CDebugf(ctx, "Waited for unflushed paths initialization, "+ 1913 "trying again to get the status") 1914 } 1915 1916 pathsSeen := make(map[string]bool) 1917 for _, revPaths := range unflushedPaths { 1918 for path := range revPaths { 1919 if !pathsSeen[path] { 1920 jStatus.UnflushedPaths = append(jStatus.UnflushedPaths, path) 1921 pathsSeen[path] = true 1922 } 1923 } 1924 } 1925 1926 if !complete { 1927 jStatus.UnflushedPaths = 1928 append(jStatus.UnflushedPaths, incompleteUnflushedPathsMarker) 1929 } 1930 return jStatus, nil 1931 } 1932 1933 func (j *tlfJournal) getByteCounts() ( 1934 storedBytes, storedFiles, unflushedBytes int64, err error) { 1935 j.journalLock.RLock() 1936 defer j.journalLock.RUnlock() 1937 if err := j.checkEnabledLocked(); err != nil { 1938 return 0, 0, 0, err 1939 } 1940 1941 return j.blockJournal.getStoredBytes(), j.blockJournal.getStoredFiles(), 1942 j.blockJournal.getUnflushedBytes(), nil 1943 } 1944 1945 func (j *tlfJournal) shutdown(ctx context.Context) { 1946 select { 1947 case j.needShutdownCh <- struct{}{}: 1948 default: 1949 } 1950 1951 <-j.backgroundShutdownCh 1952 1953 j.journalLock.Lock() 1954 defer j.journalLock.Unlock() 1955 if err := j.checkEnabledLocked(); err != nil { 1956 // Already shutdown. 1957 return 1958 } 1959 1960 // Even if we shut down the journal, its blocks still take up 1961 // space, but we don't want to double-count them if we start 1962 // up this journal again, so we need to adjust them here. 1963 // 1964 // TODO: If we ever expect to shut down non-empty journals any 1965 // time other than during shutdown, we should still count 1966 // shut-down journals against the disk limit. 1967 storedBytes := j.blockJournal.getStoredBytes() 1968 unflushedBytes := j.blockJournal.getUnflushedBytes() 1969 storedFiles := j.blockJournal.getStoredFiles() 1970 j.diskLimiter.onJournalDisable( 1971 ctx, storedBytes, unflushedBytes, storedFiles, j.chargedTo) 1972 1973 // Make further accesses error out. 1974 j.blockJournal = nil 1975 j.mdJournal = nil 1976 } 1977 1978 // disable prevents new operations from hitting the journal. Will 1979 // fail unless the journal is completely empty. 1980 func (j *tlfJournal) disable() (wasEnabled bool, err error) { 1981 j.journalLock.Lock() 1982 defer j.journalLock.Unlock() 1983 err = j.checkEnabledLocked() 1984 switch errors.Cause(err).(type) { 1985 case nil: 1986 // Continue. 1987 break 1988 case errTLFJournalDisabled: 1989 // Already disabled. 1990 return false, nil 1991 default: 1992 return false, err 1993 } 1994 1995 blockEntryCount := j.blockJournal.length() 1996 mdEntryCount := j.mdJournal.length() 1997 1998 // You can only disable an empty journal. 1999 if blockEntryCount > 0 || mdEntryCount > 0 { 2000 return false, errors.WithStack(errTLFJournalNotEmpty{}) 2001 } 2002 2003 j.disabled = true 2004 return true, nil 2005 } 2006 2007 func (j *tlfJournal) enable() error { 2008 j.journalLock.Lock() 2009 defer j.journalLock.Unlock() 2010 err := j.checkEnabledLocked() 2011 switch errors.Cause(err).(type) { 2012 case nil: 2013 // Already enabled. 2014 return nil 2015 case errTLFJournalDisabled: 2016 // Continue. 2017 break 2018 default: 2019 return err 2020 } 2021 2022 j.disabled = false 2023 return nil 2024 } 2025 2026 // All the functions below just do the equivalent blockJournal or 2027 // mdJournal function under j.journalLock. 2028 2029 // getBlockData doesn't take a block context param, unlike the remote 2030 // block server, since we still want to serve blocks even if all local 2031 // references have been deleted (for example, a block that's been 2032 // flushed but is still being and served on disk until the next 2033 // successful MD flush). This is safe because the journal doesn't 2034 // support removing references for anything other than a flush (see 2035 // the comment in tlfJournal.removeBlockReferences). 2036 func (j *tlfJournal) getBlockData(ctx context.Context, id kbfsblock.ID) ( 2037 []byte, kbfscrypto.BlockCryptKeyServerHalf, error) { 2038 j.journalLock.RLock() 2039 defer j.journalLock.RUnlock() 2040 if err := j.checkEnabledLocked(); err != nil { 2041 return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err 2042 } 2043 2044 return j.blockJournal.getData(ctx, id) 2045 } 2046 2047 func (j *tlfJournal) getBlockSize( 2048 ctx context.Context, id kbfsblock.ID) (uint32, error) { 2049 j.journalLock.RLock() 2050 defer j.journalLock.RUnlock() 2051 if err := j.checkEnabledLocked(); err != nil { 2052 return 0, err 2053 } 2054 2055 size, err := j.blockJournal.getDataSize(ctx, id) 2056 if err != nil { 2057 return 0, err 2058 } 2059 // Block sizes are restricted, but `size` is an int64 because 2060 // that's what the OS gives us. Convert it to a uint32. TODO: 2061 // check this is safe? 2062 return uint32(size), nil 2063 } 2064 2065 // ErrDiskLimitTimeout is returned when putBlockData exceeds 2066 // diskLimitTimeout when trying to acquire bytes to put. 2067 type ErrDiskLimitTimeout struct { 2068 timeout time.Duration 2069 requestedBytes int64 2070 requestedFiles int64 2071 availableBytes int64 2072 availableFiles int64 2073 usageBytes int64 2074 usageFiles int64 2075 limitBytes float64 2076 limitFiles float64 2077 err error 2078 reportable bool 2079 } 2080 2081 // Error implements the error interface for ErrDiskLimitTimeout. It 2082 // has a pointer receiver because `block_util.go` need to 2083 // modify it in some cases while preserving any stacks attached to it 2084 // via the `errors` package. 2085 func (e *ErrDiskLimitTimeout) Error() string { 2086 return fmt.Sprintf("Disk limit timeout of %s reached; requested %d bytes and %d files, %d bytes and %d files available: %+v", 2087 e.timeout, e.requestedBytes, e.requestedFiles, 2088 e.availableBytes, e.availableFiles, e.err) 2089 } 2090 2091 func (j *tlfJournal) checkInfoFileLocked() error { 2092 if !j.needInfoFile { 2093 return nil 2094 } 2095 2096 err := writeTLFJournalInfoFile( 2097 j.dir, j.uid, j.key, j.tlfID, j.chargedTo) 2098 if err != nil { 2099 return err 2100 } 2101 j.needInfoFile = false 2102 return nil 2103 } 2104 2105 func (j *tlfJournal) putBlockData( 2106 ctx context.Context, id kbfsblock.ID, blockCtx kbfsblock.Context, buf []byte, 2107 serverHalf kbfscrypto.BlockCryptKeyServerHalf) (err error) { 2108 if err := j.checkWriteable(); err != nil { 2109 return err 2110 } 2111 2112 // Since beforeBlockPut can block, it should happen outside of 2113 // the journal lock. 2114 2115 timeout := j.config.diskLimitTimeout() 2116 // If there's a deadline set in the context, use 60% of that (if 2117 // that ends up being longer than the default timeout). 2118 // Otherwise, use the default disk limit timeout. 2119 deadline, ok := ctx.Deadline() 2120 if ok { 2121 ctxScaledTimeout := time.Duration(.6 * float64(time.Until(deadline))) 2122 if ctxScaledTimeout > timeout { 2123 timeout = ctxScaledTimeout 2124 } 2125 } 2126 2127 acquireCtx, cancel := context.WithTimeout(ctx, timeout) 2128 defer cancel() 2129 2130 bufLen := int64(len(buf)) 2131 availableBytes, availableFiles, err := 2132 j.diskLimiter.reserveWithBackpressure( 2133 acquireCtx, journalLimitTrackerType, bufLen, filesPerBlockMax, 2134 j.chargedTo) 2135 switch errors.Cause(err) { 2136 case nil: 2137 // Continue. 2138 case context.DeadlineExceeded: 2139 // NOTE: there is a slight race here, where if a flush 2140 // finishes between the `beforeBlockPut` call and here, we 2141 // could put out-of-date limit info in the error, and it might 2142 // look like there is available space. It doesn't seem worth 2143 // changing the interface of `beforeBlockPut` to fix, though. 2144 usageBytes, limitBytes, usageFiles, limitFiles := 2145 j.diskLimiter.getDiskLimitInfo() 2146 return errors.WithStack(&ErrDiskLimitTimeout{ 2147 timeout, bufLen, filesPerBlockMax, 2148 availableBytes, availableFiles, usageBytes, usageFiles, 2149 limitBytes, limitFiles, err, false, 2150 }) 2151 default: 2152 return err 2153 } 2154 2155 var putData bool 2156 defer func() { 2157 j.diskLimiter.commitOrRollback(ctx, journalLimitTrackerType, bufLen, 2158 filesPerBlockMax, putData, j.chargedTo) 2159 }() 2160 2161 j.blockPutFlushLock.RLock() 2162 defer j.blockPutFlushLock.RUnlock() 2163 2164 // Put the block data before taking the lock, so block puts can 2165 // run in parallel. 2166 putData, err = j.blockJournal.putBlockData( 2167 ctx, id, blockCtx, buf, serverHalf) 2168 if err != nil { 2169 return err 2170 } 2171 2172 j.journalLock.Lock() 2173 defer j.journalLock.Unlock() 2174 if err := j.checkEnabledLocked(); err != nil { 2175 return err 2176 } 2177 2178 if err := j.checkInfoFileLocked(); err != nil { 2179 return err 2180 } 2181 2182 storedBytesBefore := j.blockJournal.getStoredBytes() 2183 2184 bufLenToAdd := bufLen 2185 if !putData { 2186 bufLenToAdd = 0 2187 } 2188 2189 err = j.blockJournal.appendBlock(ctx, id, blockCtx, bufLenToAdd) 2190 if err != nil { 2191 return err 2192 } 2193 2194 storedBytesAfter := j.blockJournal.getStoredBytes() 2195 2196 if putData && storedBytesAfter != (storedBytesBefore+bufLen) { 2197 panic(fmt.Sprintf( 2198 "storedBytes changed from %d to %d, but %d bytes of data was put", 2199 storedBytesBefore, storedBytesAfter, bufLen)) 2200 } else if !putData && storedBytesBefore != storedBytesAfter { 2201 panic(fmt.Sprintf( 2202 "storedBytes changed from %d to %d, but data was not put", 2203 storedBytesBefore, storedBytesAfter)) 2204 } 2205 2206 if putData && j.mdJournal.branchID == kbfsmd.NullBranchID { 2207 j.unsquashedBytes += uint64(bufLen) 2208 } 2209 2210 j.config.SubscriptionManagerPublisher().PublishChange( 2211 keybase1.SubscriptionTopic_JOURNAL_STATUS) 2212 j.config.SubscriptionManagerPublisher().PublishChange( 2213 keybase1.SubscriptionTopic_FILES_TAB_BADGE) 2214 j.config.Reporter().NotifySyncStatus(ctx, &keybase1.FSPathSyncStatus{ 2215 FolderType: j.tlfID.Type().FolderType(), 2216 // Path: TODO, 2217 // TODO: should this be the complete total for the file/directory, 2218 // rather than the diff? 2219 SyncingBytes: bufLen, 2220 // SyncingOps: TODO, 2221 }) 2222 2223 j.signalWork() 2224 2225 return nil 2226 } 2227 2228 func (j *tlfJournal) getQuotaInfo() (usedQuotaBytes, quotaBytes int64) { 2229 return j.diskLimiter.getQuotaInfo(j.chargedTo) 2230 } 2231 2232 func (j *tlfJournal) addBlockReference( 2233 ctx context.Context, id kbfsblock.ID, context kbfsblock.Context) error { 2234 j.journalLock.Lock() 2235 defer j.journalLock.Unlock() 2236 if err := j.checkEnabledLocked(); err != nil { 2237 return err 2238 } 2239 2240 if err := j.checkInfoFileLocked(); err != nil { 2241 return err 2242 } 2243 2244 err := j.blockJournal.addReference(ctx, id, context) 2245 if err != nil { 2246 return err 2247 } 2248 2249 j.signalWork() 2250 2251 return nil 2252 } 2253 2254 func (j *tlfJournal) isBlockUnflushed( 2255 ctx context.Context, id kbfsblock.ID) (bool, error) { 2256 j.journalLock.RLock() 2257 defer j.journalLock.RUnlock() 2258 if err := j.checkEnabledLocked(); err != nil { 2259 return false, err 2260 } 2261 2262 // Conservatively assume that a block that's on its way to the 2263 // server _has_ been flushed, so that the caller will try to clean 2264 // it up if it's not needed anymore. 2265 if j.flushingBlocks[id] { 2266 return true, nil 2267 } 2268 2269 return j.blockJournal.isUnflushed(ctx, id) 2270 } 2271 2272 func (j *tlfJournal) markFlushingBlockIDs(entries blockEntriesToFlush) error { 2273 j.journalLock.Lock() 2274 defer j.journalLock.Unlock() 2275 if err := j.checkEnabledLocked(); err != nil { 2276 return err 2277 } 2278 2279 entries.markFlushingBlockIDs(j.flushingBlocks) 2280 return nil 2281 } 2282 2283 func (j *tlfJournal) clearFlushingBlockIDs(entries blockEntriesToFlush) error { 2284 j.journalLock.Lock() 2285 defer j.journalLock.Unlock() 2286 if err := j.checkEnabledLocked(); err != nil { 2287 return err 2288 } 2289 2290 entries.clearFlushingBlockIDs(j.flushingBlocks) 2291 return nil 2292 } 2293 2294 func (j *tlfJournal) getBranchID() (kbfsmd.BranchID, error) { 2295 j.journalLock.RLock() 2296 defer j.journalLock.RUnlock() 2297 if err := j.checkEnabledLocked(); err != nil { 2298 return kbfsmd.NullBranchID, err 2299 } 2300 2301 return j.mdJournal.branchID, nil 2302 } 2303 2304 func (j *tlfJournal) getMDHead( 2305 ctx context.Context, bid kbfsmd.BranchID) (ImmutableBareRootMetadata, error) { 2306 j.journalLock.RLock() 2307 defer j.journalLock.RUnlock() 2308 if err := j.checkEnabledLocked(); err != nil { 2309 return ImmutableBareRootMetadata{}, err 2310 } 2311 2312 return j.mdJournal.getHead(ctx, bid) 2313 } 2314 2315 func (j *tlfJournal) getMDRange( 2316 ctx context.Context, bid kbfsmd.BranchID, start, stop kbfsmd.Revision) ( 2317 []ImmutableBareRootMetadata, error) { 2318 j.journalLock.RLock() 2319 defer j.journalLock.RUnlock() 2320 if err := j.checkEnabledLocked(); err != nil { 2321 return nil, err 2322 } 2323 2324 return j.mdJournal.getRange(ctx, bid, start, stop) 2325 } 2326 2327 func (j *tlfJournal) doPutMD( 2328 ctx context.Context, rmd *RootMetadata, 2329 mdInfo unflushedPathMDInfo, perRevMap unflushedPathsPerRevMap, 2330 verifyingKey kbfscrypto.VerifyingKey, bps data.BlockPutState) ( 2331 irmd ImmutableRootMetadata, retryPut bool, err error) { 2332 // Now take the lock and put the MD, merging in the unflushed 2333 // paths while under the lock. 2334 j.journalLock.Lock() 2335 defer j.journalLock.Unlock() 2336 if err := j.checkEnabledLocked(); err != nil { 2337 return ImmutableRootMetadata{}, false, err 2338 } 2339 2340 if err := j.checkInfoFileLocked(); err != nil { 2341 return ImmutableRootMetadata{}, false, err 2342 } 2343 2344 if !j.unflushedPaths.appendToCache(mdInfo, perRevMap) { 2345 return ImmutableRootMetadata{}, true, nil 2346 } 2347 2348 // Treat the first revision as a squash, so that it doesn't end up 2349 // as the earliest revision in a range during a CR squash. 2350 isFirstRev := rmd.Revision() == kbfsmd.RevisionInitial 2351 2352 // TODO: remove the revision from the cache on any errors below? 2353 // Tricky when the append is only queued. 2354 2355 mdID, journalID, err := j.mdJournal.put(ctx, j.config.Crypto(), 2356 j.config.encryptionKeyGetter(), j.config.BlockSplitter(), 2357 rmd, isFirstRev) 2358 if err != nil { 2359 return ImmutableRootMetadata{}, false, err 2360 } 2361 2362 err = j.blockJournal.markMDRevision( 2363 ctx, rmd.Revision(), journalID, isFirstRev) 2364 if err != nil { 2365 return ImmutableRootMetadata{}, false, err 2366 } 2367 2368 // Put the MD into the cache under the same lock as it is put in 2369 // the journal, to guarantee it will be replaced if the journal is 2370 // converted into a branch before any of the upper layer have a 2371 // chance to cache it. 2372 rmd, err = rmd.loadCachedBlockChanges( 2373 ctx, bps, j.log, j.vlog, j.config.Codec()) 2374 if err != nil { 2375 return ImmutableRootMetadata{}, false, err 2376 } 2377 irmd = MakeImmutableRootMetadata( 2378 rmd, verifyingKey, mdID, j.config.Clock().Now(), false) 2379 2380 // Revisions created locally should always override anything else 2381 // in the cache, so use `Replace` rather than `Put`. 2382 err = j.config.MDCache().Replace(irmd, irmd.BID()) 2383 if err != nil { 2384 return ImmutableRootMetadata{}, false, err 2385 } 2386 j.log.CDebugf(ctx, "Put update rev=%d id=%s", rmd.Revision(), mdID) 2387 2388 j.signalWork() 2389 2390 select { 2391 case j.needBranchCheckCh <- struct{}{}: 2392 default: 2393 } 2394 2395 return irmd, false, nil 2396 } 2397 2398 // prepAndAddRMDWithRetry prepare the paths without holding the lock, 2399 // as `f` might need to take the lock. This is a no-op if the 2400 // unflushed path cache is uninitialized. TODO: avoid doing this if 2401 // we can somehow be sure the cache won't be initialized by the time 2402 // we finish this operation. 2403 func (j *tlfJournal) prepAndAddRMDWithRetry(ctx context.Context, 2404 rmd *RootMetadata, 2405 f func(unflushedPathMDInfo, unflushedPathsPerRevMap) (bool, error)) error { 2406 mdInfo := unflushedPathMDInfo{ 2407 revision: rmd.Revision(), 2408 kmd: rmd, 2409 pmd: *rmd.Data(), 2410 // TODO: Plumb through clock? Though the timestamp doesn't 2411 // matter for the unflushed path cache. 2412 localTimestamp: time.Now(), 2413 } 2414 perRevMap, err := j.unflushedPaths.prepUnflushedPaths( 2415 ctx, j.uid, j.key, j.config.Codec(), j.log, j.config, mdInfo) 2416 if err != nil { 2417 return err 2418 } 2419 2420 retry, err := f(mdInfo, perRevMap) 2421 if err != nil { 2422 return err 2423 } 2424 2425 if retry { 2426 // The cache was initialized after the last time we tried to 2427 // prepare the unflushed paths. 2428 perRevMap, err = j.unflushedPaths.prepUnflushedPaths( 2429 ctx, j.uid, j.key, j.config.Codec(), j.log, j.config, mdInfo) 2430 if err != nil { 2431 return err 2432 } 2433 2434 retry, err := f(mdInfo, perRevMap) 2435 if err != nil { 2436 return err 2437 } 2438 2439 if retry { 2440 return errors.New("Unexpectedly asked to retry " + 2441 "MD put more than once") 2442 } 2443 } 2444 return nil 2445 } 2446 2447 func (j *tlfJournal) putMD( 2448 ctx context.Context, rmd *RootMetadata, 2449 verifyingKey kbfscrypto.VerifyingKey, bps data.BlockPutState) ( 2450 irmd ImmutableRootMetadata, err error) { 2451 if err := j.checkWriteable(); err != nil { 2452 return ImmutableRootMetadata{}, err 2453 } 2454 2455 err = j.prepAndAddRMDWithRetry(ctx, rmd, 2456 func(mdInfo unflushedPathMDInfo, perRevMap unflushedPathsPerRevMap) ( 2457 retry bool, err error) { 2458 irmd, retry, err = j.doPutMD( 2459 ctx, rmd, mdInfo, perRevMap, verifyingKey, bps) 2460 return retry, err 2461 }) 2462 if err != nil { 2463 return ImmutableRootMetadata{}, err 2464 } 2465 return irmd, nil 2466 } 2467 2468 func (j *tlfJournal) clearMDs(ctx context.Context, bid kbfsmd.BranchID) error { 2469 if err := j.checkWriteable(); err != nil { 2470 return err 2471 } 2472 2473 if j.onBranchChange != nil { 2474 j.onBranchChange.onTLFBranchChange(j.tlfID, kbfsmd.NullBranchID) 2475 } 2476 2477 j.journalLock.Lock() 2478 defer j.journalLock.Unlock() 2479 if err := j.checkEnabledLocked(); err != nil { 2480 return err 2481 } 2482 2483 if err := j.checkInfoFileLocked(); err != nil { 2484 return err 2485 } 2486 2487 err := j.mdJournal.clear(ctx, bid) 2488 if err != nil { 2489 return err 2490 } 2491 2492 j.resume(journalPauseConflict) 2493 return nil 2494 } 2495 2496 func (j *tlfJournal) doResolveBranch( 2497 ctx context.Context, bid kbfsmd.BranchID, blocksToDelete []kbfsblock.ID, 2498 rmd *RootMetadata, mdInfo unflushedPathMDInfo, 2499 perRevMap unflushedPathsPerRevMap, verifyingKey kbfscrypto.VerifyingKey, 2500 bps data.BlockPutState) ( 2501 irmd ImmutableRootMetadata, retry bool, err error) { 2502 j.journalLock.Lock() 2503 defer j.journalLock.Unlock() 2504 if err := j.checkEnabledLocked(); err != nil { 2505 return ImmutableRootMetadata{}, false, err 2506 } 2507 2508 if err := j.checkInfoFileLocked(); err != nil { 2509 return ImmutableRootMetadata{}, false, err 2510 } 2511 2512 // The set of unflushed paths could change as part of the 2513 // resolution, and the revision numbers definitely change. 2514 isPendingLocalSquash := bid == kbfsmd.PendingLocalSquashBranchID 2515 if !j.unflushedPaths.reinitializeWithResolution( 2516 mdInfo, perRevMap, isPendingLocalSquash) { 2517 return ImmutableRootMetadata{}, true, nil 2518 } 2519 2520 // First write the resolution to a new branch, and swap it with 2521 // the existing branch, then clear the existing branch. 2522 mdID, journalID, err := j.mdJournal.resolveAndClear( 2523 ctx, j.config.Crypto(), j.config.encryptionKeyGetter(), 2524 j.config.BlockSplitter(), j.config.MDCache(), bid, rmd) 2525 if err != nil { 2526 return ImmutableRootMetadata{}, false, err 2527 } 2528 2529 // Then go through and mark blocks and md rev markers for ignoring. 2530 totalIgnoredBytes, err := j.blockJournal.ignoreBlocksAndMDRevMarkers( 2531 ctx, blocksToDelete, rmd.Revision()) 2532 if err != nil { 2533 return ImmutableRootMetadata{}, false, err 2534 } 2535 2536 // Treat ignored blocks as flushed for the purposes of 2537 // accounting. 2538 j.diskLimiter.onBlocksFlush(ctx, totalIgnoredBytes, j.chargedTo) 2539 2540 // Finally, append a new, non-ignored md rev marker for the new revision. 2541 err = j.blockJournal.markMDRevision( 2542 ctx, rmd.Revision(), journalID, isPendingLocalSquash) 2543 if err != nil { 2544 return ImmutableRootMetadata{}, false, err 2545 } 2546 2547 // Put the MD into the cache under the same lock as it is put in 2548 // the journal, to guarantee it will be replaced if the journal is 2549 // converted into a branch before any of the upper layer have a 2550 // chance to cache it. Revisions created locally should always 2551 // override anything else in the cache, so use `Replace` rather 2552 // than `Put`. 2553 rmd, err = rmd.loadCachedBlockChanges( 2554 ctx, bps, j.log, j.vlog, j.config.Codec()) 2555 if err != nil { 2556 return ImmutableRootMetadata{}, false, err 2557 } 2558 irmd = MakeImmutableRootMetadata( 2559 rmd, verifyingKey, mdID, j.config.Clock().Now(), false) 2560 err = j.config.MDCache().Replace(irmd, irmd.BID()) 2561 if err != nil { 2562 return ImmutableRootMetadata{}, false, err 2563 } 2564 j.log.CDebugf(ctx, "Put resolution rev=%d id=%s", rmd.Revision(), mdID) 2565 2566 j.resume(journalPauseConflict) 2567 j.signalWork() 2568 2569 // TODO: kick off a background goroutine that deletes ignored 2570 // block data files before the flush gets to them. 2571 2572 return irmd, false, nil 2573 } 2574 2575 func (j *tlfJournal) moveAway(ctx context.Context) (string, error) { 2576 if err := j.checkWriteable(); err != nil { 2577 return "", err 2578 } 2579 2580 j.journalLock.Lock() 2581 defer j.journalLock.Unlock() 2582 // j.dir is X/y-z. Current timestamp is T. Move directory X/y-z to X/z-T.bak 2583 // and then rebuild it. 2584 restDir, lastDir := filepath.Split(j.dir) 2585 idParts := strings.Split(lastDir, "-") 2586 newDirName := fmt.Sprintf(tlfJournalBakFmt, idParts[len(idParts)-1], 2587 j.config.Clock().Now().UnixNano()) 2588 fullDirName := filepath.Join(restDir, newDirName) 2589 2590 err := os.Rename(j.dir, fullDirName) 2591 if err != nil { 2592 return "", err 2593 } 2594 2595 err = os.MkdirAll(j.dir, 0700) 2596 if err != nil { 2597 return "", err 2598 } 2599 2600 // Copy over the info.json file 2601 infoData, err := ioutil.ReadFile(getTLFJournalInfoFilePath(fullDirName)) 2602 if err != nil { 2603 return "", err 2604 } 2605 err = ioutil.WriteFile(getTLFJournalInfoFilePath(j.dir), infoData, 0600) 2606 if err != nil { 2607 return "", err 2608 } 2609 2610 // Make new block and MD journals 2611 blockJournal, err := makeBlockJournal( 2612 ctx, j.config.Codec(), j.dir, j.log, j.vlog) 2613 if err != nil { 2614 return "", err 2615 } 2616 2617 mdJournal, err := makeMDJournal(ctx, j.uid, j.key, j.config.Codec(), 2618 j.config.Crypto(), j.config.Clock(), j.config.teamMembershipChecker(), 2619 j.config, j.tlfID, j.config.MetadataVersion(), j.dir, j.log, 2620 j.overrideTlfID) 2621 if err != nil { 2622 return "", err 2623 } 2624 2625 j.blockJournal = blockJournal 2626 j.mdJournal = mdJournal 2627 2628 j.resume(journalPauseConflict) 2629 j.signalWork() 2630 return fullDirName, nil 2631 } 2632 2633 func (j *tlfJournal) resolveBranch(ctx context.Context, 2634 bid kbfsmd.BranchID, blocksToDelete []kbfsblock.ID, rmd *RootMetadata, 2635 verifyingKey kbfscrypto.VerifyingKey, bps data.BlockPutState) ( 2636 irmd ImmutableRootMetadata, err error) { 2637 if err := j.checkWriteable(); err != nil { 2638 return ImmutableRootMetadata{}, err 2639 } 2640 2641 err = j.prepAndAddRMDWithRetry(ctx, rmd, 2642 func(mdInfo unflushedPathMDInfo, perRevMap unflushedPathsPerRevMap) ( 2643 retry bool, err error) { 2644 irmd, retry, err = j.doResolveBranch( 2645 ctx, bid, blocksToDelete, rmd, mdInfo, perRevMap, verifyingKey, 2646 bps) 2647 return retry, err 2648 }) 2649 if err != nil { 2650 return ImmutableRootMetadata{}, err 2651 } 2652 return irmd, nil 2653 } 2654 2655 func (j *tlfJournal) wait(ctx context.Context) error { 2656 workLeft, err := j.wg.WaitUnlessPaused(ctx) 2657 if err != nil { 2658 return err 2659 } 2660 if workLeft { 2661 j.log.CDebugf(ctx, "Wait completed with work left, "+ 2662 "due to paused journal") 2663 } 2664 return nil 2665 } 2666 2667 func (j *tlfJournal) waitForCompleteFlush(ctx context.Context) error { 2668 // Now we wait for the journal to completely empty. Waiting on 2669 // the wg isn't enough, because conflicts/squashes can cause the 2670 // journal to pause and we'll be called too early. 2671 for { 2672 blockEntryCount, mdEntryCount, err := j.getJournalEntryCounts() 2673 if err != nil { 2674 return err 2675 } 2676 if blockEntryCount == 0 && mdEntryCount == 0 { 2677 j.log.CDebugf(ctx, "Single op completely flushed") 2678 return nil 2679 } 2680 2681 // Let the background flusher know it should try to flush 2682 // everything again, once any conflicts have been resolved. 2683 j.signalWork() 2684 2685 err = j.wg.Wait(ctx) 2686 if err != nil { 2687 return err 2688 } 2689 _, noLock := j.lastFlushErr.(kbfsmd.ServerErrorLockConflict) 2690 if noLock { 2691 return j.lastFlushErr 2692 } 2693 } 2694 } 2695 2696 func (j *tlfJournal) finishSingleOp(ctx context.Context, 2697 lc *keybase1.LockContext, priority keybase1.MDPriority) error { 2698 j.log.CDebugf(ctx, "Finishing single op") 2699 2700 flushCtx := flushContext{ 2701 lockContextForPut: lc, 2702 priorityForPut: priority, 2703 } 2704 2705 // Let the background flusher know it should change the single op 2706 // mode to finished, so we can have it set ASAP without waiting to 2707 // take `flushLock` here. 2708 select { 2709 case j.finishSingleOpCh <- flushCtx: 2710 default: 2711 } 2712 2713 return j.waitForCompleteFlush(ctx) 2714 }