github.com/keybase/client/go@v0.0.0-20240309051027-028f7c731f8b/kbfs/libkbfs/interfaces.go (about) 1 // Copyright 2016 Keybase Inc. All rights reserved. 2 // Use of this source code is governed by a BSD 3 // license that can be found in the LICENSE file. 4 5 package libkbfs 6 7 import ( 8 "context" 9 "os" 10 "time" 11 12 "github.com/keybase/client/go/kbfs/data" 13 "github.com/keybase/client/go/kbfs/favorites" 14 "github.com/keybase/client/go/kbfs/idutil" 15 "github.com/keybase/client/go/kbfs/kbfsblock" 16 "github.com/keybase/client/go/kbfs/kbfscodec" 17 "github.com/keybase/client/go/kbfs/kbfscrypto" 18 "github.com/keybase/client/go/kbfs/kbfsedits" 19 "github.com/keybase/client/go/kbfs/kbfsmd" 20 "github.com/keybase/client/go/kbfs/ldbutils" 21 "github.com/keybase/client/go/kbfs/libkey" 22 "github.com/keybase/client/go/kbfs/tlf" 23 "github.com/keybase/client/go/kbfs/tlfhandle" 24 "github.com/keybase/client/go/libkb" 25 "github.com/keybase/client/go/logger" 26 "github.com/keybase/client/go/protocol/chat1" 27 "github.com/keybase/client/go/protocol/keybase1" 28 metrics "github.com/rcrowley/go-metrics" 29 billy "gopkg.in/src-d/go-billy.v4" 30 ) 31 32 type logMaker interface { 33 MakeLogger(module string) logger.Logger 34 MakeVLogger(logger.Logger) *libkb.VDebugLog 35 GetPerfLog() logger.Logger 36 } 37 38 type blockCacher interface { 39 BlockCache() data.BlockCache 40 } 41 42 type keyGetterGetter interface { 43 keyGetter() blockKeyGetter 44 } 45 46 type codecGetter interface { 47 Codec() kbfscodec.Codec 48 } 49 50 type blockOpsGetter interface { 51 BlockOps() BlockOps 52 } 53 54 type blockServerGetter interface { 55 BlockServer() BlockServer 56 } 57 58 type cryptoPureGetter interface { 59 cryptoPure() cryptoPure 60 } 61 62 type cryptoGetter interface { 63 Crypto() Crypto 64 } 65 66 type chatGetter interface { 67 Chat() Chat 68 } 69 70 type currentSessionGetterGetter interface { 71 CurrentSessionGetter() idutil.CurrentSessionGetter 72 } 73 74 type signerGetter interface { 75 Signer() kbfscrypto.Signer 76 } 77 78 type diskBlockCacheGetter interface { 79 DiskBlockCache() DiskBlockCache 80 } 81 82 type diskBlockCacheSetter interface { 83 MakeDiskBlockCacheIfNotExists() error 84 } 85 86 type diskBlockCacheFractionSetter interface { 87 SetDiskBlockCacheFraction(float64) 88 } 89 90 type syncBlockCacheFractionSetter interface { 91 SetSyncBlockCacheFraction(float64) 92 } 93 94 type diskMDCacheGetter interface { 95 DiskMDCache() DiskMDCache 96 } 97 98 type diskMDCacheSetter interface { 99 MakeDiskMDCacheIfNotExists() error 100 } 101 102 type diskQuotaCacheGetter interface { 103 DiskQuotaCache() DiskQuotaCache 104 } 105 106 type diskQuotaCacheSetter interface { 107 MakeDiskQuotaCacheIfNotExists() error 108 } 109 110 type blockMetadataStoreGetSeter interface { 111 MakeBlockMetadataStoreIfNotExists() error 112 XattrStore() XattrStore 113 // Other metadata store types goes here. 114 } 115 116 type clockGetter interface { 117 Clock() Clock 118 } 119 120 type reporterGetter interface { 121 Reporter() Reporter 122 } 123 124 type diskLimiterGetter interface { 125 DiskLimiter() DiskLimiter 126 } 127 128 type syncedTlfGetterSetter interface { 129 IsSyncedTlf(tlfID tlf.ID) bool 130 IsSyncedTlfPath(tlfPath string) bool 131 GetTlfSyncState(tlfID tlf.ID) FolderSyncConfig 132 SetTlfSyncState( 133 ctx context.Context, tlfID tlf.ID, config FolderSyncConfig) ( 134 <-chan error, error) 135 GetAllSyncedTlfs() []tlf.ID 136 137 idutil.OfflineStatusGetter 138 } 139 140 type blockRetrieverGetter interface { 141 BlockRetriever() BlockRetriever 142 } 143 144 type settingsDBGetter interface { 145 GetSettingsDB() *SettingsDB 146 } 147 148 // SubscriptionManagerClientID identifies a subscriptionManager client. 149 type SubscriptionManagerClientID string 150 151 type subscriptionManagerGetter interface { 152 // SubscriptionManager returns a subscription manager that can be used to 153 // subscribe to events. 154 // 155 // clientID identifies a subscriptionManager client. Each user of the 156 // subscription manager should specify a unique clientID. When a 157 // notification happens, the client ID is provided. 158 // 159 // This is helpful for caller to filter out notifications that other clients 160 // subscribe. 161 // 162 // If purgeable is true, the client is marked as purgeable. We keep a 163 // maximum of 3 purgeable clients (FIFO). This is useful as a way to purge 164 // old, likely dead, clients, which happens a lot with electron refreshes. 165 // 166 // notifier specifies how a notification should be delivered when things 167 // change. If different notifiers are used across multiple calls to get the 168 // subscription manager for the same clientID, only the first one is 169 // effective. 170 SubscriptionManager(clientID SubscriptionManagerClientID, purgeable bool, 171 notifier SubscriptionNotifier) SubscriptionManager 172 } 173 174 type subscriptionManagerPublisherGetter interface { 175 SubscriptionManagerPublisher() SubscriptionManagerPublisher 176 } 177 178 // NodeID is a unique but transient ID for a Node. That is, two Node 179 // objects in memory at the same time represent the same file or 180 // directory if and only if their NodeIDs are equal (by pointer). 181 type NodeID interface { 182 // ParentID returns the NodeID of the directory containing the 183 // pointed-to file or directory, or nil if none exists. 184 ParentID() NodeID 185 } 186 187 // NodeFSReadOnly is the subset of billy.Filesystem that is actually 188 // used by libkbfs. The method comments are copied from go-billy. 189 type NodeFSReadOnly interface { 190 // ReadDir reads the directory named by dirname and returns a list of 191 // directory entries sorted by filename. 192 ReadDir(path string) ([]os.FileInfo, error) 193 // Lstat returns a FileInfo describing the named file. If the file is a 194 // symbolic link, the returned FileInfo describes the symbolic link. Lstat 195 // makes no attempt to follow the link. 196 Lstat(filename string) (os.FileInfo, error) 197 // Readlink returns the target path of link. 198 Readlink(link string) (string, error) 199 // Open opens the named file for reading. If successful, methods on the 200 // returned file can be used for reading; the associated file descriptor has 201 // mode O_RDONLY. 202 Open(filename string) (billy.File, error) 203 // OpenFile is the generalized open call; most users will use Open or Create 204 // instead. It opens the named file with specified flag (O_RDONLY etc.) and 205 // perm, (0666 etc.) if applicable. If successful, methods on the returned 206 // File can be used for I/O. 207 OpenFile(filename string, flags int, mode os.FileMode) (billy.File, error) 208 } 209 210 // Node represents a direct pointer to a file or directory in KBFS. 211 // It is somewhat like an inode in a regular file system. Users of 212 // KBFS can use Node as a handle when accessing files or directories 213 // they have previously looked up. 214 type Node interface { 215 // GetID returns the ID of this Node. This should be used as a 216 // map key instead of the Node itself. 217 GetID() NodeID 218 // GetFolderBranch returns the folder ID and branch for this Node. 219 GetFolderBranch() data.FolderBranch 220 // GetBasename returns the current basename of the node, or "" 221 // if the node has been unlinked. 222 GetBasename() data.PathPartString 223 // GetPathPlaintextSansTlf returns the cleaned path of the node in 224 // plaintext. 225 GetPathPlaintextSansTlf() (string, bool) 226 // Readonly returns true if KBFS should outright reject any write 227 // attempts on data or directory structures of this node. Though 228 // note that even if it returns false, KBFS can reject writes to 229 // the node for other reasons, such as TLF permissions. An 230 // implementation that wraps another `Node` (`inner`) must return 231 // `inner.Readonly()` if it decides not to return `true` on its 232 // own. 233 Readonly(ctx context.Context) bool 234 // ShouldCreateMissedLookup is called for Nodes representing 235 // directories, whenever `name` is looked up but is not found in 236 // the directory. If the Node decides a new entry should be 237 // created matching this lookup, it should return `true` as well 238 // as a context to use for the creation, the type of the new entry 239 // and the symbolic link contents if the entry is a Sym; the 240 // caller should then create this entry. Otherwise it should 241 // return false. It may return the types `FakeDir` or `FakeFile` 242 // to indicate that the caller should pretend the entry exists, 243 // even if it really does not. In the case of fake files, a 244 // non-nil `fi` can be returned and used by the caller to 245 // construct the dir entry for the file. It can also return the 246 // type `RealDir`, along with a non-zero `ptr`, to indicate a real 247 // directory corresponding to that pointer should be used. An 248 // implementation that wraps another `Node` (`inner`) must return 249 // `inner.ShouldCreateMissedLookup()` if it decides not to return 250 // `true` on its own. 251 ShouldCreateMissedLookup(ctx context.Context, name data.PathPartString) ( 252 shouldCreate bool, newCtx context.Context, et data.EntryType, 253 fi os.FileInfo, sympath data.PathPartString, ptr data.BlockPointer) 254 // ShouldRetryOnDirRead is called for Nodes representing 255 // directories, whenever a `Lookup` or `GetDirChildren` is done on 256 // them. It should return true to instruct the caller that it 257 // should re-sync its view of the directory and retry the 258 // operation. 259 ShouldRetryOnDirRead(ctx context.Context) bool 260 // RemoveDir is called on a `Node` before going through the normal 261 // `RemoveDir` flow, to give the Node a chance to handle it in a 262 // custom way. If the `Node` handles it internally, it should 263 // return `true`. 264 RemoveDir(ctx context.Context, dirName data.PathPartString) ( 265 removeHandled bool, err error) 266 // WrapChild returns a wrapped version of child, if desired, to 267 // add custom behavior to the child node. An implementation that 268 // wraps another `Node` (`inner`) must first call 269 // `inner.WrapChild(child)` before performing its own wrapping 270 // operation, to ensure that all wrapping is preserved and that it 271 // happens in the correct order. 272 WrapChild(child Node) Node 273 // Unwrap returns the initial, unwrapped Node that was used to 274 // create this Node. 275 Unwrap() Node 276 // GetFS returns a file system interface that, if non-nil, should 277 // be used to satisfy any directory-related calls on this Node, 278 // instead of the standard, block-based method of acessing data. 279 // The provided context will be used, if possible, for any 280 // subsequent calls on the file system. 281 GetFS(ctx context.Context) NodeFSReadOnly 282 // GetFile returns a file interface that, if non-nil, should be 283 // used to satisfy any file-related calls on this Node, instead of 284 // the standard, block-based method of accessing data. The 285 // provided context will be used, if possible, for any subsequent 286 // calls on the file. 287 GetFile(ctx context.Context) billy.File 288 // EntryType is the type of the entry represented by this node. 289 EntryType() data.EntryType 290 // GetBlockID returns the block ID of the node. 291 GetBlockID() kbfsblock.ID 292 // FillCacheDuration sets `d` to the suggested cache time for this 293 // node, if desired. 294 FillCacheDuration(d *time.Duration) 295 // Obfuscator returns something that can obfuscate the child 296 // entries of this Node in the case of directories; for other 297 // types, it returns nil. 298 Obfuscator() data.Obfuscator 299 // ChildName returns an obfuscatable version of the given name of 300 // a child entry of this node. 301 ChildName(name string) data.PathPartString 302 } 303 304 // SyncedTlfMD contains the node metadata and handle for a given synced TLF. 305 type SyncedTlfMD struct { 306 MD NodeMetadata 307 Handle *tlfhandle.Handle 308 } 309 310 // KBFSOps handles all file system operations. Expands all indirect 311 // pointers. Operations that modify the server data change all the 312 // block IDs along the path, and so must return a path with the new 313 // BlockIds so the caller can update their references. 314 // 315 // KBFSOps implementations must guarantee goroutine-safety of calls on 316 // a per-top-level-folder basis. 317 // 318 // There are two types of operations that could block: 319 // - remote-sync operations, that need to synchronously update the 320 // MD for the corresponding top-level folder. When these 321 // operations return successfully, they will have guaranteed to 322 // have successfully written the modification to the KBFS servers. 323 // - remote-access operations, that don't sync any modifications to KBFS 324 // servers, but may block on reading data from the servers. 325 // 326 // KBFSOps implementations are supposed to give git-like consistency 327 // semantics for modification operations; they will be visible to 328 // other clients immediately after the remote-sync operations succeed, 329 // if and only if there was no other intervening modification to the 330 // same folder. If not, the change will be sync'd to the server in a 331 // special per-device "unmerged" area before the operation succeeds. 332 // In this case, the modification will not be visible to other clients 333 // until the KBFS code on this device performs automatic conflict 334 // resolution in the background. 335 // 336 // All methods take a Context (see https://blog.golang.org/context), 337 // and if that context is cancelled during the operation, KBFSOps will 338 // abort any blocking calls and return ctx.Err(). Any notifications 339 // resulting from an operation will also include this ctx (or a 340 // Context derived from it), allowing the caller to determine whether 341 // the notification is a result of their own action or an external 342 // action. 343 // 344 // Each directory and file name is specified with a 345 // `data.PathPartString`, to protect against accidentally logging 346 // plaintext filenames. These can be easily created from the parent 347 // node's `Node` object with the `ChildName` function. 348 type KBFSOps interface { 349 // GetFavorites returns the logged-in user's list of favorite 350 // top-level folders. This is a remote-access operation when the cache 351 // is empty or expired. 352 GetFavorites(ctx context.Context) ([]favorites.Folder, error) 353 // GetFolderWithFavFlags returns a keybase1.FolderWithFavFlags for given 354 // handle. 355 GetFolderWithFavFlags(ctx context.Context, 356 handle *tlfhandle.Handle) (keybase1.FolderWithFavFlags, error) 357 // GetFavoritesAll returns the logged-in user's lists of favorite, ignored, 358 // and new top-level folders. This is a remote-access operation when the 359 // cache is empty or expired. 360 GetFavoritesAll(ctx context.Context) (keybase1.FavoritesResult, error) 361 // GetBadge returns the overall KBFS badge state for this device. 362 // It's cheaper than the other favorites methods. 363 GetBadge(ctx context.Context) (keybase1.FilesTabBadge, error) 364 // RefreshCachedFavorites tells the instances to forget any cached 365 // favorites list and fetch a new list from the server. The 366 // effects are asychronous; if there's an error refreshing the 367 // favorites, the cached favorites will become empty. 368 RefreshCachedFavorites(ctx context.Context, mode FavoritesRefreshMode) 369 // ClearCachedFavorites tells the instances to forget any cached 370 // favorites list, e.g. when a user logs out. 371 ClearCachedFavorites(ctx context.Context) 372 // AddFavorite adds the favorite to both the server and 373 // the local cache. 374 AddFavorite(ctx context.Context, fav favorites.Folder, data favorites.Data) error 375 // DeleteFavorite deletes the favorite from both the server and 376 // the local cache. Idempotent, so it succeeds even if the folder 377 // isn't favorited. 378 DeleteFavorite(ctx context.Context, fav favorites.Folder) error 379 // SetFavoritesHomeTLFInfo sets the home TLF TeamIDs to initialize the 380 // favorites cache on login. 381 SetFavoritesHomeTLFInfo(ctx context.Context, info homeTLFInfo) 382 // RefreshEditHistory asks the FBO for the given favorite to reload its 383 // edit history. 384 RefreshEditHistory(fav favorites.Folder) 385 386 // GetTLFCryptKeys gets crypt key of all generations as well as 387 // TLF ID for tlfHandle. The returned keys (the keys slice) are ordered by 388 // generation, starting with the key for FirstValidKeyGen. 389 GetTLFCryptKeys(ctx context.Context, tlfHandle *tlfhandle.Handle) ( 390 keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error) 391 392 // GetTLFID gets the TLF ID for tlfHandle. 393 GetTLFID(ctx context.Context, tlfHandle *tlfhandle.Handle) (tlf.ID, error) 394 395 // GetTLFHandle returns the TLF handle for a given node. 396 GetTLFHandle(ctx context.Context, node Node) (*tlfhandle.Handle, error) 397 398 // GetOrCreateRootNode returns the root node and root entry 399 // info associated with the given TLF handle and branch, if 400 // the logged-in user has read permissions to the top-level 401 // folder. It creates the folder if one doesn't exist yet (and 402 // branch == MasterBranch), and the logged-in user has write 403 // permissions to the top-level folder. This is a 404 // remote-access operation. 405 GetOrCreateRootNode( 406 ctx context.Context, h *tlfhandle.Handle, branch data.BranchName) ( 407 node Node, ei data.EntryInfo, err error) 408 // GetRootNode is like GetOrCreateRootNode but if the root node 409 // does not exist it will return a nil Node and not create it. 410 GetRootNode( 411 ctx context.Context, h *tlfhandle.Handle, branch data.BranchName) ( 412 node Node, ei data.EntryInfo, err error) 413 // GetDirChildren returns a map of children in the directory, 414 // mapped to their EntryInfo, if the logged-in user has read 415 // permission for the top-level folder. This is a remote-access 416 // operation. 417 GetDirChildren(ctx context.Context, dir Node) ( 418 map[data.PathPartString]data.EntryInfo, error) 419 // Lookup returns the Node and entry info associated with a 420 // given name in a directory, if the logged-in user has read 421 // permissions to the top-level folder. The returned Node is nil 422 // if the name is a symlink. This is a remote-access operation. 423 Lookup(ctx context.Context, dir Node, name data.PathPartString) ( 424 Node, data.EntryInfo, error) 425 // Stat returns the entry info associated with a 426 // given Node, if the logged-in user has read permissions to the 427 // top-level folder. This is a remote-access operation. 428 Stat(ctx context.Context, node Node) (data.EntryInfo, error) 429 // CreateDir creates a new subdirectory under the given node, if 430 // the logged-in user has write permission to the top-level 431 // folder. Returns the new Node for the created subdirectory, and 432 // its new entry info. This is a remote-sync operation. 433 CreateDir(ctx context.Context, dir Node, name data.PathPartString) ( 434 Node, data.EntryInfo, error) 435 // CreateFile creates a new file under the given node, if the 436 // logged-in user has write permission to the top-level folder. 437 // Returns the new Node for the created file, and its new 438 // entry info. excl (when implemented) specifies whether this is an exclusive 439 // create. Semantically setting excl to WithExcl is like O_CREAT|O_EXCL in a 440 // Unix open() call. 441 // 442 // This is a remote-sync operation. 443 CreateFile( 444 ctx context.Context, dir Node, name data.PathPartString, isExec bool, 445 excl Excl) (Node, data.EntryInfo, error) 446 // CreateLink creates a new symlink under the given node, if the 447 // logged-in user has write permission to the top-level folder. 448 // Returns the new entry info for the created symlink. The 449 // symlink is represented as a single `data.PathPartString` 450 // (generally obfuscated by `dir`'s Obfuscator) to avoid 451 // accidental logging, even though it could point outside of the 452 // directory. The deobfuscate command will inspect symlinks when 453 // deobfuscating to make this easier to debug. This is a 454 // remote-sync operation. 455 CreateLink( 456 ctx context.Context, dir Node, fromName, toPath data.PathPartString) ( 457 data.EntryInfo, error) 458 // RemoveDir removes the subdirectory represented by the given 459 // node, if the logged-in user has write permission to the 460 // top-level folder. Will return an error if the subdirectory is 461 // not empty. This is a remote-sync operation. 462 RemoveDir(ctx context.Context, dir Node, dirName data.PathPartString) error 463 // RemoveEntry removes the directory entry represented by the 464 // given node, if the logged-in user has write permission to the 465 // top-level folder. This is a remote-sync operation. 466 RemoveEntry(ctx context.Context, dir Node, name data.PathPartString) error 467 // Rename performs an atomic rename operation with a given 468 // top-level folder if the logged-in user has write permission to 469 // that folder, and will return an error if nodes from different 470 // folders are passed in. Also returns an error if the new name 471 // already has an entry corresponding to an existing directory 472 // (only non-dir types may be renamed over). This is a 473 // remote-sync operation. 474 Rename( 475 ctx context.Context, oldParent Node, oldName data.PathPartString, 476 newParent Node, newName data.PathPartString) error 477 // Read fills in the given buffer with data from the file at the 478 // given node starting at the given offset, if the logged-in user 479 // has read permission to the top-level folder. The read data 480 // reflects any outstanding writes and truncates to that file that 481 // have been written through this KBFSOps object, even if those 482 // writes have not yet been sync'd. There is no guarantee that 483 // Read returns all of the requested data; it will return the 484 // number of bytes that it wrote to the dest buffer. Reads on an 485 // unlinked file may or may not succeed, depending on whether or 486 // not the data has been cached locally. If (0, nil) is returned, 487 // that means EOF has been reached. This is a remote-access 488 // operation. 489 Read(ctx context.Context, file Node, dest []byte, off int64) (int64, error) 490 // Write modifies the file at the given node, by writing the given 491 // buffer at the given offset within the file, if the logged-in 492 // user has write permission to the top-level folder. It 493 // overwrites any data already there, and extends the file size as 494 // necessary to accomodate the new data. It guarantees to write 495 // the entire buffer in one operation. Writes on an unlinked file 496 // may or may not succeed as no-ops, depending on whether or not 497 // the necessary blocks have been locally cached. This is a 498 // remote-access operation. 499 Write(ctx context.Context, file Node, data []byte, off int64) error 500 // Truncate modifies the file at the given node, by either 501 // shrinking or extending its size to match the given size, if the 502 // logged-in user has write permission to the top-level folder. 503 // If extending the file, it pads the new data with 0s. Truncates 504 // on an unlinked file may or may not succeed as no-ops, depending 505 // on whether or not the necessary blocks have been locally 506 // cached. This is a remote-access operation. 507 Truncate(ctx context.Context, file Node, size uint64) error 508 // SetEx turns on or off the executable bit on the file 509 // represented by a given node, if the logged-in user has write 510 // permissions to the top-level folder. This is a remote-sync 511 // operation. 512 SetEx(ctx context.Context, file Node, ex bool) error 513 // SetMtime sets the modification time on the file represented by 514 // a given node, if the logged-in user has write permissions to 515 // the top-level folder. If mtime is nil, it is a noop. This is 516 // a remote-sync operation. 517 SetMtime(ctx context.Context, file Node, mtime *time.Time) error 518 // SyncAll flushes all outstanding writes and truncates for any 519 // dirty files to the KBFS servers within the given folder, if the 520 // logged-in user has write permissions to the top-level folder. 521 // If done through a file system interface, this may include 522 // modifications done via multiple file handles. This is a 523 // remote-sync operation. 524 SyncAll(ctx context.Context, folderBranch data.FolderBranch) error 525 // FolderStatus returns the status of a particular folder/branch, along 526 // with a channel that will be closed when the status has been 527 // updated (to eliminate the need for polling this method). 528 FolderStatus(ctx context.Context, folderBranch data.FolderBranch) ( 529 FolderBranchStatus, <-chan StatusUpdate, error) 530 // FolderConflictStatus is a lightweight method to return the 531 // conflict status of a particular folder/branch. (The conflict 532 // status is also available in `FolderBranchStatus`.) 533 FolderConflictStatus(ctx context.Context, folderBranch data.FolderBranch) ( 534 keybase1.FolderConflictType, error) 535 // Status returns the status of KBFS, along with a channel that will be 536 // closed when the status has been updated (to eliminate the need for 537 // polling this method). Note that this channel only applies to 538 // connection status changes. 539 // 540 // KBFSStatus can be non-empty even if there is an error. 541 Status(ctx context.Context) ( 542 KBFSStatus, <-chan StatusUpdate, error) 543 // UnstageForTesting clears out this device's staged state, if 544 // any, and fast-forwards to the current head of this 545 // folder-branch. 546 UnstageForTesting(ctx context.Context, folderBranch data.FolderBranch) error 547 // RequestRekey requests to rekey this folder. Note that this asynchronously 548 // requests a rekey, so canceling ctx doesn't cancel the rekey. 549 RequestRekey(ctx context.Context, id tlf.ID) 550 // SyncFromServer blocks until the local client has contacted the 551 // server and guaranteed that all known updates for the given 552 // top-level folder have been applied locally (and notifications 553 // sent out to any observers). It returns an error if this 554 // folder-branch is currently unmerged or dirty locally. If 555 // lockBeforeGet is non-nil, it blocks on idempotently taking the 556 // lock from server at the time it gets any metadata. 557 SyncFromServer(ctx context.Context, 558 folderBranch data.FolderBranch, lockBeforeGet *keybase1.LockID) error 559 // GetUpdateHistory returns a complete history of all the merged 560 // updates of the given folder, in a data structure that's 561 // suitable for encoding directly into JSON. This is an expensive 562 // operation, and should only be used for ocassional debugging. 563 // Note that the history does not include any unmerged changes or 564 // outstanding writes from the local device. To get all the 565 // revisions after `start`, use `kbfsmd.RevisionUninitialized` for 566 // the `end` parameter. 567 GetUpdateHistory( 568 ctx context.Context, folderBranch data.FolderBranch, 569 start, end kbfsmd.Revision) (history TLFUpdateHistory, err error) 570 // GetEditHistory returns the edit history of the TLF, clustered 571 // by writer. 572 GetEditHistory(ctx context.Context, folderBranch data.FolderBranch) ( 573 tlfHistory keybase1.FSFolderEditHistory, err error) 574 575 // GetNodeMetadata gets metadata associated with a Node. 576 GetNodeMetadata(ctx context.Context, node Node) (NodeMetadata, error) 577 // GetRootNodeMetadata gets metadata associated with the root node 578 // of a FolderBranch, and for convenience the TLF handle as well. 579 GetRootNodeMetadata(ctx context.Context, folderBranch data.FolderBranch) ( 580 NodeMetadata, *tlfhandle.Handle, error) 581 // Shutdown is called to clean up any resources associated with 582 // this KBFSOps instance. 583 Shutdown(ctx context.Context) error 584 // PushConnectionStatusChange updates the status of a service for 585 // human readable connection status tracking. 586 PushConnectionStatusChange(service string, newStatus error) 587 // PushStatusChange causes Status listeners to be notified via closing 588 // the status channel. 589 PushStatusChange() 590 // ClearPrivateFolderMD clears any cached private folder metadata, 591 // e.g. on a logout. 592 ClearPrivateFolderMD(ctx context.Context) 593 // ForceFastForward forwards the nodes of all folders that have 594 // been previously cleared with `ClearPrivateFolderMD` to their 595 // newest version. It works asynchronously, so no error is 596 // returned. 597 ForceFastForward(ctx context.Context) 598 // InvalidateNodeAndChildren sends invalidation messages for the 599 // given node and all of its children that are currently in the 600 // NodeCache. It's useful if the caller has outside knowledge of 601 // data changes to that node or its children that didn't come 602 // through the usual MD update channels (e.g., autogit nodes need 603 // invalidation when the corresponding git repo is updated). 604 InvalidateNodeAndChildren(ctx context.Context, node Node) error 605 // TeamNameChanged indicates that a team has changed its name, and 606 // we should clean up any outstanding handle info associated with 607 // the team ID. 608 TeamNameChanged(ctx context.Context, tid keybase1.TeamID) 609 // TeamAbandoned indicates that a team has been abandoned, and 610 // shouldn't be referred to by its previous name anymore. 611 TeamAbandoned(ctx context.Context, tid keybase1.TeamID) 612 // CheckMigrationPerms returns an error if this device cannot 613 // perform implicit team migration for the given TLF. 614 CheckMigrationPerms(ctx context.Context, id tlf.ID) (err error) 615 // MigrateToImplicitTeam migrates the given folder from a private- 616 // or public-keyed folder, to a team-keyed folder. If it's 617 // already a private/public team-keyed folder, nil is returned. 618 MigrateToImplicitTeam(ctx context.Context, id tlf.ID) error 619 // KickoffAllOutstandingRekeys kicks off all outstanding rekeys. It does 620 // nothing to folders that have not scheduled a rekey. This should be 621 // called when we receive an event of "paper key cached" from service. 622 KickoffAllOutstandingRekeys() error 623 // NewNotificationChannel is called to notify any existing TLF 624 // matching `handle` that a new kbfs-edits channel is available. 625 NewNotificationChannel( 626 ctx context.Context, handle *tlfhandle.Handle, 627 convID chat1.ConversationID, channelName string) 628 // ClearConflictView moves the conflict view of the given TLF out of the 629 // way and resets the state of the TLF. 630 ClearConflictView(ctx context.Context, tlfID tlf.ID) error 631 // FinishResolvingConflict removes the local view of a 632 // previously-cleared conflict. 633 FinishResolvingConflict(ctx context.Context, fb data.FolderBranch) error 634 // ForceStuckConflictForTesting forces the local view of the given 635 // TLF into a stuck conflict view, in order to test the above 636 // `ClearConflictView` method and related state changes. 637 ForceStuckConflictForTesting(ctx context.Context, tlfID tlf.ID) error 638 // CancelUploads stops journal uploads for the given TLF, reverts 639 // the local view of the TLF to the server's view, and clears the 640 // journal from the disk. Note that this could result in 641 // partially-uploaded changes, and may leak blocks on the bserver. 642 CancelUploads(ctx context.Context, fb data.FolderBranch) error 643 // Reset completely resets the given folder. Should only be 644 // called after explicit user confirmation. After the call, 645 // `handle` has the new TLF ID. If `*newTlfID` is non-nil, that 646 // will be the new TLF ID of the reset TLF, if it already points 647 // to a MD object that matches the same handle as the original TLF 648 // (see HOTPOT-685 for an example of how this can happen -- it 649 // should be very rare). 650 Reset(ctx context.Context, handle *tlfhandle.Handle, newTlfID *tlf.ID) error 651 652 // GetSyncConfig returns the sync state configuration for the 653 // given TLF. 654 GetSyncConfig(ctx context.Context, tlfID tlf.ID) ( 655 keybase1.FolderSyncConfig, error) 656 // SetSyncConfig sets the sync state configuration for the given 657 // TLF to either fully enabled, fully disabled, or partially 658 // syncing selected paths. If syncing is disabled, it returns a 659 // channel that is closed when all of the TLF's blocks have been 660 // removed from the sync cache. For a partially-synced folder, 661 // the config must contain no absolute paths, no duplicate paths, 662 // and no relative paths that go out of the TLF. 663 SetSyncConfig( 664 ctx context.Context, tlfID tlf.ID, config keybase1.FolderSyncConfig) ( 665 <-chan error, error) 666 // GetAllSyncedTlfMDs returns the synced TLF metadata (and 667 // handle), only for those synced TLFs to which the current 668 // logged-in user has access. 669 GetAllSyncedTlfMDs(ctx context.Context) map[tlf.ID]SyncedTlfMD 670 671 // AddRootNodeWrapper adds a new root node wrapper for every 672 // existing TLF. Any Nodes that have already been returned by 673 // `KBFSOps` won't use these wrappers. 674 AddRootNodeWrapper(func(Node) Node) 675 676 // StatusOfServices returns the current status of various connected 677 // services. 678 StatusOfServices() (map[string]error, chan StatusUpdate) 679 } 680 681 type gitMetadataPutter interface { 682 PutGitMetadata(ctx context.Context, folder keybase1.FolderHandle, 683 repoID keybase1.RepoID, metadata keybase1.GitLocalMetadata) error 684 } 685 686 // KeybaseService is an interface for communicating with the keybase 687 // service. 688 type KeybaseService interface { 689 idutil.KeybaseService 690 gitMetadataPutter 691 SubscriptionNotifier 692 693 // FavoriteAdd adds the given folder to the list of favorites. 694 FavoriteAdd(ctx context.Context, folder keybase1.FolderHandle) error 695 696 // FavoriteAdd removes the given folder from the list of 697 // favorites. 698 FavoriteDelete(ctx context.Context, folder keybase1.FolderHandle) error 699 700 // FavoriteList returns the current list of favorites. 701 FavoriteList(ctx context.Context, sessionID int) (keybase1.FavoritesResult, 702 error) 703 704 // EncryptFavorites encrypts cached favorites to store on disk. 705 EncryptFavorites(ctx context.Context, dataToEncrypt []byte) ([]byte, error) 706 707 // DecryptFavorites decrypts cached favorites stored on disk. 708 DecryptFavorites(ctx context.Context, dataToDecrypt []byte) ([]byte, error) 709 710 // NotifyOnlineStatusChanged notifies about online/offline status 711 // changes. 712 NotifyOnlineStatusChanged(ctx context.Context, online bool) error 713 // Notify sends a filesystem notification. 714 Notify(ctx context.Context, notification *keybase1.FSNotification) error 715 716 // NotifyPathUpdated sends a path updated notification. 717 NotifyPathUpdated(ctx context.Context, path string) error 718 719 // NotifySyncStatus sends a sync status notification. 720 NotifySyncStatus(ctx context.Context, 721 status *keybase1.FSPathSyncStatus) error 722 723 // NotifyOverallSyncStatus sends an overall sync status 724 // notification. 725 NotifyOverallSyncStatus( 726 ctx context.Context, status keybase1.FolderSyncStatus) error 727 728 // NotifyFavoritesChanged sends a notification that favorites have 729 // changed. 730 NotifyFavoritesChanged(ctx context.Context) error 731 732 // FlushUserFromLocalCache instructs this layer to clear any 733 // KBFS-side, locally-cached information about the given user. 734 // This does NOT involve communication with the daemon, this is 735 // just to force future calls loading this user to fall through to 736 // the daemon itself, rather than being served from the cache. 737 FlushUserFromLocalCache(ctx context.Context, uid keybase1.UID) 738 739 // ClearCaches flushes all user and team info from KBFS-side 740 // caches. 741 ClearCaches(ctx context.Context) 742 743 // TODO: Add CryptoClient methods, too. 744 745 // EstablishMountDir asks the service for the current mount path 746 // and sets it if not established. 747 EstablishMountDir(ctx context.Context) (string, error) 748 749 // GetKVStoreClient returns a client for accessing the KVStore service. 750 GetKVStoreClient() keybase1.KvstoreInterface 751 752 // Shutdown frees any resources associated with this 753 // instance. No other methods may be called after this is 754 // called. 755 Shutdown() 756 } 757 758 // KeybaseServiceCn defines methods needed to construct KeybaseService 759 // and Crypto implementations. 760 type KeybaseServiceCn interface { 761 NewKeybaseService( 762 config Config, params InitParams, ctx Context, log logger.Logger) ( 763 KeybaseService, error) 764 NewCrypto( 765 config Config, params InitParams, ctx Context, log logger.Logger) ( 766 Crypto, error) 767 NewChat( 768 config Config, params InitParams, ctx Context, log logger.Logger) ( 769 Chat, error) 770 } 771 772 // teamMembershipChecker is a copy of kbfsmd.TeamMembershipChecker for 773 // embedding in KBPKI. Unfortunately, this is necessary since mockgen 774 // can't handle embedded interfaces living in other packages. 775 type teamMembershipChecker interface { 776 // IsTeamWriter is a copy of 777 // kbfsmd.TeamMembershipChecker.IsTeamWriter. 778 // 779 // If the caller knows that the writership needs to be checked 780 // while offline, they should pass in 781 // `keybase1.OfflineAvailability_BEST_EFFORT` as the `offline` 782 // parameter. Otherwise `IsTeamWriter` might block on a network 783 // call. 784 IsTeamWriter( 785 ctx context.Context, tid keybase1.TeamID, uid keybase1.UID, 786 verifyingKey kbfscrypto.VerifyingKey, 787 offline keybase1.OfflineAvailability) (bool, error) 788 // NoLongerTeamWriter returns the global Merkle root of the 789 // most-recent time the given user (with the given device key, 790 // which implies an eldest seqno) transitioned from being a writer 791 // to not being a writer on the given team. If the user was never 792 // a writer of the team, it returns an error. 793 // 794 // If the caller knows that the writership needs to be checked 795 // while offline, they should pass in 796 // `keybase1.OfflineAvailability_BEST_EFFORT` as the `offline` 797 // parameter. Otherwise `NoLongerTeamWriter` might block on a 798 // network call. 799 NoLongerTeamWriter( 800 ctx context.Context, tid keybase1.TeamID, tlfType tlf.Type, 801 uid keybase1.UID, verifyingKey kbfscrypto.VerifyingKey, 802 offline keybase1.OfflineAvailability) (keybase1.MerkleRootV2, error) 803 // IsTeamReader is a copy of 804 // kbfsmd.TeamMembershipChecker.IsTeamWriter. 805 // 806 // If the caller knows that the readership needs to be checked 807 // while offline, they should pass in 808 // `keybase1.OfflineAvailability_BEST_EFFORT` as the `offline` 809 // parameter. Otherwise `IsTeamReader` might block on a 810 // network call. 811 IsTeamReader( 812 ctx context.Context, tid keybase1.TeamID, uid keybase1.UID, 813 offline keybase1.OfflineAvailability) (bool, error) 814 } 815 816 type teamKeysGetter interface { 817 // GetTeamTLFCryptKeys gets all of a team's secret crypt keys, by 818 // generation, as well as the latest key generation number for the 819 // team. The caller can specify `desiredKeyGen` to force a server 820 // check if that particular key gen isn't yet known; it may be set 821 // to UnspecifiedKeyGen if no server check is required. 822 // 823 // If the caller knows that the keys need to be retrieved while 824 // offline, they should pass in 825 // `keybase1.OfflineAvailability_BEST_EFFORT` as the `offline` 826 // parameter. Otherwise `GetTeamTLFCryptKeys` might block on a 827 // network call. 828 GetTeamTLFCryptKeys(ctx context.Context, tid keybase1.TeamID, 829 desiredKeyGen kbfsmd.KeyGen, offline keybase1.OfflineAvailability) ( 830 map[kbfsmd.KeyGen]kbfscrypto.TLFCryptKey, kbfsmd.KeyGen, error) 831 } 832 833 type teamRootIDGetter interface { 834 // GetTeamRootID returns the root team ID for the given (sub)team 835 // ID. 836 // 837 // If the caller knows that the root needs to be retrieved while 838 // offline, they should pass in 839 // `keybase1.OfflineAvailability_BEST_EFFORT` as the `offline` 840 // parameter. Otherwise `GetTeamRootID` might block on a network 841 // call. 842 GetTeamRootID( 843 ctx context.Context, tid keybase1.TeamID, 844 offline keybase1.OfflineAvailability) (keybase1.TeamID, error) 845 } 846 847 // KBPKI interacts with the Keybase daemon to fetch user info. 848 type KBPKI interface { 849 idutil.KBPKI 850 idutil.MerkleRootGetter 851 teamMembershipChecker 852 teamKeysGetter 853 teamRootIDGetter 854 gitMetadataPutter 855 856 // HasVerifyingKey returns nil if the given user has the given 857 // VerifyingKey, and an error otherwise. If the revoked key was 858 // valid according to the untrusted server timestamps, a special 859 // error type `RevokedDeviceVerificationError` is returned, which 860 // includes information the caller can use to verify the key using 861 // the merkle tree. 862 // 863 // If the caller knows that the keys needs to be verified while 864 // offline, they should pass in 865 // `keybase1.OfflineAvailability_BEST_EFFORT` as the `offline` 866 // parameter. Otherwise `HasVerifyingKey` might block on a 867 // network call. 868 HasVerifyingKey(ctx context.Context, uid keybase1.UID, 869 verifyingKey kbfscrypto.VerifyingKey, 870 atServerTime time.Time, offline keybase1.OfflineAvailability) error 871 872 // GetCryptPublicKeys gets all of a user's crypt public keys (including 873 // paper keys). 874 // 875 // If the caller knows that the keys needs to be retrieved while 876 // offline, they should pass in 877 // `keybase1.OfflineAvailability_BEST_EFFORT` as the `offline` 878 // parameter. Otherwise `GetCryptPublicKeys` might block on a 879 // network call. 880 GetCryptPublicKeys( 881 ctx context.Context, uid keybase1.UID, 882 offline keybase1.OfflineAvailability) ( 883 []kbfscrypto.CryptPublicKey, error) 884 885 // TODO: Split the methods below off into a separate 886 // FavoriteOps interface. 887 888 // FavoriteAdd adds folder to the list of the logged in user's 889 // favorite folders. It is idempotent. 890 FavoriteAdd(ctx context.Context, folder keybase1.FolderHandle) error 891 892 // FavoriteDelete deletes folder from the list of the logged in user's 893 // favorite folders. It is idempotent. 894 FavoriteDelete(ctx context.Context, folder keybase1.FolderHandle) error 895 896 // FavoriteList returns the list of all favorite folders for 897 // the logged in user. 898 FavoriteList(ctx context.Context) (keybase1.FavoritesResult, error) 899 900 // CreateTeamTLF associates the given TLF ID with the team ID in 901 // the team's sigchain. If the team already has a TLF ID 902 // associated with it, this overwrites it. 903 CreateTeamTLF( 904 ctx context.Context, teamID keybase1.TeamID, tlfID tlf.ID) error 905 906 // Notify sends a filesystem notification. 907 Notify(ctx context.Context, notification *keybase1.FSNotification) error 908 909 // NotifyPathUpdated sends a path updated notification. 910 NotifyPathUpdated(ctx context.Context, path string) error 911 912 // InvalidateTeamCacheForID instructs KBPKI to discard any cached 913 // information about the given team ID. 914 InvalidateTeamCacheForID(tid keybase1.TeamID) 915 } 916 917 // KeyMetadataWithRootDirEntry is like KeyMetadata, but can also 918 // return the root dir entry for the associated MD update. 919 type KeyMetadataWithRootDirEntry interface { 920 libkey.KeyMetadata 921 922 // GetRootDirEntry returns the root directory entry for the 923 // associated MD. 924 GetRootDirEntry() data.DirEntry 925 } 926 927 type encryptionKeyGetter interface { 928 // GetTLFCryptKeyForEncryption gets the crypt key to use for 929 // encryption (i.e., with the latest key generation) for the 930 // TLF with the given metadata. 931 GetTLFCryptKeyForEncryption(ctx context.Context, kmd libkey.KeyMetadata) ( 932 kbfscrypto.TLFCryptKey, error) 933 } 934 935 type mdDecryptionKeyGetter interface { 936 // GetTLFCryptKeyForMDDecryption gets the crypt key to use for the 937 // TLF with the given metadata to decrypt the private portion of 938 // the metadata. It finds the appropriate key from mdWithKeys 939 // (which in most cases is the same as mdToDecrypt) if it's not 940 // already cached. 941 GetTLFCryptKeyForMDDecryption(ctx context.Context, 942 kmdToDecrypt, kmdWithKeys libkey.KeyMetadata) ( 943 kbfscrypto.TLFCryptKey, error) 944 // GetFirstTLFCryptKey gets the first valid crypt key for the 945 // TLF with the given metadata. 946 GetFirstTLFCryptKey(ctx context.Context, kmd libkey.KeyMetadata) ( 947 kbfscrypto.TLFCryptKey, error) 948 } 949 950 type blockDecryptionKeyGetter interface { 951 // GetTLFCryptKeyForBlockDecryption gets the crypt key to use 952 // for the TLF with the given metadata to decrypt the block 953 // pointed to by the given pointer. 954 GetTLFCryptKeyForBlockDecryption(ctx context.Context, kmd libkey.KeyMetadata, 955 blockPtr data.BlockPointer) (kbfscrypto.TLFCryptKey, error) 956 } 957 958 type blockKeyGetter interface { 959 encryptionKeyGetter 960 blockDecryptionKeyGetter 961 } 962 963 // KeyManager fetches and constructs the keys needed for KBFS file 964 // operations. 965 type KeyManager interface { 966 blockKeyGetter 967 mdDecryptionKeyGetter 968 969 // GetTLFCryptKeyOfAllGenerations gets the crypt keys of all generations 970 // for current devices. keys contains crypt keys from all generations, in 971 // order, starting from FirstValidKeyGen. 972 GetTLFCryptKeyOfAllGenerations(ctx context.Context, kmd libkey.KeyMetadata) ( 973 keys []kbfscrypto.TLFCryptKey, err error) 974 975 // Rekey checks the given MD object, if it is a private TLF, 976 // against the current set of device keys for all valid 977 // readers and writers. If there are any new devices, it 978 // updates all existing key generations to include the new 979 // devices. If there are devices that have been removed, it 980 // creates a new epoch of keys for the TLF. If there was an 981 // error, or the RootMetadata wasn't changed, it returns false. 982 // Otherwise, it returns true. If a new key generation is 983 // added the second return value points to this new key. This 984 // is to allow for caching of the TLF crypt key only after a 985 // successful merged write of the metadata. Otherwise we could 986 // prematurely pollute the key cache. 987 // 988 // If the given MD object is a public TLF, it simply updates 989 // the TLF's handle with any newly-resolved writers. 990 // 991 // If promptPaper is set, prompts for any unlocked paper keys. 992 // promptPaper shouldn't be set if md is for a public TLF. 993 Rekey(ctx context.Context, md *RootMetadata, promptPaper bool) ( 994 bool, *kbfscrypto.TLFCryptKey, error) 995 } 996 997 // Reporter exports events (asynchronously) to any number of sinks 998 type Reporter interface { 999 // ReportErr records that a given error happened. 1000 ReportErr(ctx context.Context, tlfName tlf.CanonicalName, t tlf.Type, 1001 mode ErrorModeType, err error) 1002 // AllKnownErrors returns all errors known to this Reporter. 1003 AllKnownErrors() []ReportedError 1004 // NotifyOnlineStatusChanged sends the given notification to any sink. 1005 OnlineStatusChanged(ctx context.Context, online bool) 1006 // Notify sends the given notification to any sink. 1007 Notify(ctx context.Context, notification *keybase1.FSNotification) 1008 // NotifyPathUpdated sends the given notification to any sink. 1009 NotifyPathUpdated(ctx context.Context, path string) 1010 // NotifySyncStatus sends the given path sync status to any sink. 1011 NotifySyncStatus(ctx context.Context, status *keybase1.FSPathSyncStatus) 1012 // NotifyOverallSyncStatus sends the given path overall sync 1013 // status to any sink. 1014 NotifyOverallSyncStatus( 1015 ctx context.Context, status keybase1.FolderSyncStatus) 1016 // NotifyFavoritesChanged sends the a favorites invalidation to any sink. 1017 NotifyFavoritesChanged(ctx context.Context) 1018 // Shutdown frees any resources allocated by a Reporter. 1019 Shutdown() 1020 } 1021 1022 // MDCache gets and puts plaintext top-level metadata into the cache. 1023 type MDCache interface { 1024 // Get gets the metadata object associated with the given TLF ID, 1025 // revision number, and branch ID (kbfsmd.NullBranchID for merged MD). 1026 Get(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID) (ImmutableRootMetadata, error) 1027 // Put stores the metadata object, only if an MD matching that TLF 1028 // ID, revision number, and branch ID isn't already cached. If 1029 // there is already a matching item in the cache, we require that 1030 // caller manages the cache explicitly by deleting or replacing it 1031 // explicitly. This should be used when putting existing MDs 1032 // being fetched from the server. 1033 Put(md ImmutableRootMetadata) error 1034 // Delete removes the given metadata object from the cache if it exists. 1035 Delete(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID) 1036 // Replace replaces the entry matching the md under the old branch 1037 // ID with the new one. If the old entry doesn't exist, this is 1038 // equivalent to a Put, except that it overrides anything else 1039 // that's already in the cache. This should be used when putting 1040 // new MDs created locally. 1041 Replace(newRmd ImmutableRootMetadata, oldBID kbfsmd.BranchID) error 1042 // MarkPutToServer sets `PutToServer` to true for the specified 1043 // MD, if it already exists in the cache. 1044 MarkPutToServer(tlf tlf.ID, rev kbfsmd.Revision, bid kbfsmd.BranchID) 1045 // GetIDForHandle retrieves a cached, trusted TLF ID for the given 1046 // handle, if one exists. 1047 GetIDForHandle(handle *tlfhandle.Handle) (tlf.ID, error) 1048 // PutIDForHandle caches a trusted TLF ID for the given handle. 1049 PutIDForHandle(handle *tlfhandle.Handle, id tlf.ID) error 1050 // ChangeHandleForID moves an ID to be under a new handle, if the 1051 // ID is cached already. 1052 ChangeHandleForID(oldHandle *tlfhandle.Handle, newHandle *tlfhandle.Handle) 1053 // GetNextMD returns a cached view of the next MD following the 1054 // given global Merkle root. 1055 GetNextMD(tlfID tlf.ID, rootSeqno keybase1.Seqno) ( 1056 nextKbfsRoot *kbfsmd.MerkleRoot, nextMerkleNodes [][]byte, 1057 nextRootSeqno keybase1.Seqno, err error) 1058 // PutNextMD caches a view of the next MD following the given 1059 // global Merkle root. 1060 PutNextMD(tlfID tlf.ID, rootSeqno keybase1.Seqno, 1061 nextKbfsRoot *kbfsmd.MerkleRoot, nextMerkleNodes [][]byte, 1062 nextRootSeqno keybase1.Seqno) error 1063 } 1064 1065 // KeyCache handles caching for both TLFCryptKeys and BlockCryptKeys. 1066 type KeyCache interface { 1067 // GetTLFCryptKey gets the crypt key for the given TLF. 1068 GetTLFCryptKey(tlf.ID, kbfsmd.KeyGen) (kbfscrypto.TLFCryptKey, error) 1069 // PutTLFCryptKey stores the crypt key for the given TLF. 1070 PutTLFCryptKey(tlf.ID, kbfsmd.KeyGen, kbfscrypto.TLFCryptKey) error 1071 } 1072 1073 // DiskBlockCacheType specifies a type of an on-disk block cache. 1074 type DiskBlockCacheType int 1075 1076 const ( 1077 // DiskBlockAnyCache indicates that any disk block cache is fine. 1078 DiskBlockAnyCache DiskBlockCacheType = iota 1079 // DiskBlockWorkingSetCache indicates that the working set cache 1080 // should be used. 1081 DiskBlockWorkingSetCache 1082 // DiskBlockSyncCache indicates that the sync cache should be 1083 // used. 1084 DiskBlockSyncCache 1085 ) 1086 1087 func (dbct DiskBlockCacheType) String() string { 1088 switch dbct { 1089 case DiskBlockSyncCache: 1090 return "DiskBlockSyncCache" 1091 case DiskBlockWorkingSetCache: 1092 return "DiskBlockWorkingSetCache" 1093 case DiskBlockAnyCache: 1094 return "DiskBlockAnyCache" 1095 default: 1096 return "unknown DiskBlockCacheType" 1097 } 1098 } 1099 1100 // DiskBlockCache caches blocks to the disk. 1101 type DiskBlockCache interface { 1102 // Get gets a block from the disk cache. If a specific preferred 1103 // cache type is given, the block and its metadata are moved to 1104 // that cache if they're not yet in it. 1105 Get(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID, 1106 preferredCacheType DiskBlockCacheType) ( 1107 buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, 1108 prefetchStatus PrefetchStatus, err error) 1109 // GetPrefetchStatus returns just the prefetchStatus for the 1110 // block. If a specific preferred cache type is given, the block 1111 // and its metadata are moved to that cache if they're not yet in 1112 // it. 1113 GetPrefetchStatus( 1114 ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID, 1115 cacheType DiskBlockCacheType) (PrefetchStatus, error) 1116 // Put puts a block to the disk cache. Returns after it has 1117 // updated the metadata but before it has finished writing the 1118 // block. If cacheType is specified, the block is put into that 1119 // cache; by default, block are put into the working set cache. 1120 Put(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID, buf []byte, 1121 serverHalf kbfscrypto.BlockCryptKeyServerHalf, 1122 cacheType DiskBlockCacheType) error 1123 // Delete deletes some blocks from the disk cache. 1124 Delete( 1125 ctx context.Context, blockIDs []kbfsblock.ID, 1126 cacheType DiskBlockCacheType) ( 1127 numRemoved int, sizeRemoved int64, err error) 1128 // UpdateMetadata updates metadata for a given block in the disk 1129 // cache. If a specific preferred cache type is given, the block 1130 // and its metadata are moved to that cache if they're not yet in 1131 // it. 1132 UpdateMetadata(ctx context.Context, tlfID tlf.ID, blockID kbfsblock.ID, 1133 prefetchStatus PrefetchStatus, cacheType DiskBlockCacheType) error 1134 // ClearAllTlfBlocks deletes all the synced blocks corresponding 1135 // to the given TLF ID from the cache. It doesn't affect 1136 // transient blocks for unsynced TLFs. 1137 ClearAllTlfBlocks( 1138 ctx context.Context, tlfID tlf.ID, cacheType DiskBlockCacheType) error 1139 // GetLastUnrefRev returns the last revision that has been marked 1140 // unref'd for the given TLF. 1141 GetLastUnrefRev( 1142 ctx context.Context, tlfID tlf.ID, cacheType DiskBlockCacheType) ( 1143 kbfsmd.Revision, error) 1144 // PutLastUnrefRev saves the given revision as the last unref'd 1145 // revision for the given TLF. 1146 PutLastUnrefRev( 1147 ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision, 1148 cacheType DiskBlockCacheType) error 1149 // Status returns the current status of the disk cache. 1150 Status(ctx context.Context) map[string]DiskBlockCacheStatus 1151 // DoesCacheHaveSpace returns whether the given cache has 1152 // space. 1153 DoesCacheHaveSpace(ctx context.Context, 1154 cacheType DiskBlockCacheType) (bool, int64, error) 1155 // Mark tags a given block in the disk cache with the given tag. 1156 Mark( 1157 ctx context.Context, blockID kbfsblock.ID, tag string, 1158 cacheType DiskBlockCacheType) error 1159 // DeleteUnmarked deletes all the given TLF's blocks in the disk 1160 // cache without the given tag. 1161 DeleteUnmarked( 1162 ctx context.Context, tlfID tlf.ID, tag string, 1163 cacheType DiskBlockCacheType) error 1164 // AddHomeTLF adds a TLF marked as "home" so that the blocks from it are 1165 // less likely to be evicted, as well as whether this is their public or 1166 // private TLF, where the public TLF's files are more likely to be evicted 1167 // than the private one's. 1168 AddHomeTLF(ctx context.Context, tlfID tlf.ID) error 1169 // ClearHomeTLFs should be called on logout so that the old user's TLFs 1170 // are not still marked as home. 1171 ClearHomeTLFs(ctx context.Context) error 1172 // GetTlfSize returns the number of bytes stored for the given TLF 1173 // in the cache of the given type. If `DiskBlockAnyCache` is 1174 // specified, it returns the total sum of bytes across all caches. 1175 GetTlfSize( 1176 ctx context.Context, tlfID tlf.ID, cacheType DiskBlockCacheType) ( 1177 uint64, error) 1178 // GetTlfIDs returns the TLF IDs with blocks in the cache. If 1179 // `DiskBlockAnyCache` is specified, it returns the set of 1180 // TLF IDs across all caches. 1181 GetTlfIDs( 1182 ctx context.Context, cacheType DiskBlockCacheType) ([]tlf.ID, error) 1183 // WaitUntilStarted waits until the block cache of the given type 1184 // has finished starting. If `DiskBlockAnyCache` is specified, it 1185 // waits for all caches to start. 1186 WaitUntilStarted(cacheType DiskBlockCacheType) error 1187 // Shutdown cleanly shuts down the disk block cache. 1188 Shutdown(ctx context.Context) <-chan struct{} 1189 } 1190 1191 // DiskMDCache caches encrypted MD objects to the disk. 1192 type DiskMDCache interface { 1193 // Get gets the latest cached MD for the given TLF from the disk 1194 // cache. `ver` is the version of the encoded MD, and `timestamp` 1195 // is the server timestamp for the MD. 1196 Get(ctx context.Context, tlfID tlf.ID) ( 1197 buf []byte, ver kbfsmd.MetadataVer, timestamp time.Time, err error) 1198 // Stage asks the disk cache to store the given MD in memory, but 1199 // not yet write it to disk. A later call to `Commit` or 1200 // `Unstage` for `rev` or higher is required to avoid memory leaks. 1201 Stage(ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision, buf []byte, 1202 ver kbfsmd.MetadataVer, timestamp time.Time) error 1203 // Commit writes a previously-staged MD to disk. Trying to commit 1204 // a revision that hasn't been staged is a no-op, to allow callers 1205 // to call Commit without knowing whether Stage was called first 1206 // (e.g., if the revision came from the cache in the first place). 1207 // If older revisions (or other copies of this same revision) are 1208 // staged, they will become unstaged. 1209 Commit(ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision) error 1210 // Unstage unstages and forgets about a previously-staged MD. (If 1211 // multiple copies of the same revision have been staged, it only 1212 // unstages the first of them.) 1213 Unstage(ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision) error 1214 // Status returns the current status of the disk cache. 1215 Status(ctx context.Context) DiskMDCacheStatus 1216 // Shutdown cleanly shuts down the disk MD cache. 1217 Shutdown(ctx context.Context) 1218 } 1219 1220 // DiskQuotaCache caches encrypts per-ID quotas to the disk. 1221 type DiskQuotaCache interface { 1222 // Get gets the latest cached quota for the given ID from the disk 1223 // cache. 1224 Get(ctx context.Context, id keybase1.UserOrTeamID) ( 1225 info kbfsblock.QuotaInfo, err error) 1226 // Put stores the latest cached quota for the given ID to the disk 1227 // cache. 1228 Put(ctx context.Context, id keybase1.UserOrTeamID, 1229 info kbfsblock.QuotaInfo) (err error) 1230 // Status returns the current status of the disk cache. 1231 Status(ctx context.Context) DiskQuotaCacheStatus 1232 // Shutdown cleanly shuts down the disk quota cache. 1233 Shutdown(ctx context.Context) 1234 } 1235 1236 // BlockMetadataStore defines a type that stores block metadata locally on 1237 // device. 1238 type BlockMetadataStore interface { 1239 // GetMetadata looks for and returns the block metadata for blockID if it's 1240 // found, and an error whose Cause is ldberrors.ErrNotFound if it's not 1241 // found. 1242 GetMetadata(ctx context.Context, blockID kbfsblock.ID) (BlockMetadataValue, error) 1243 // UpdateMetadata updates the block metadata for blockID using updater. 1244 // Specifically, it looks for existing block metdata for blockID. If it's 1245 // found, it's passed into updater. Otherwise, a zero value of 1246 // BlockMetadataValue is passed into the updater. After if updater returns 1247 // nil, the updated metadata is stored. 1248 UpdateMetadata(ctx context.Context, blockID kbfsblock.ID, updater BlockMetadataUpdater) error 1249 // Shutdown cleanly shuts down the disk block metadata cache. 1250 Shutdown() 1251 } 1252 1253 // XattrStore defines a type that handles locally stored xattr 1254 // values by interacting with a BlockMetadataStore. 1255 type XattrStore interface { 1256 // GetXattr looks for and returns the Xattr value of xattrType for blockID 1257 // if it's found, and an error whose Cause is ldberrors.ErrNotFound if it's 1258 // not found. 1259 GetXattr(ctx context.Context, 1260 blockID kbfsblock.ID, xattrType XattrType) ([]byte, error) 1261 // SetXattr sets xattrType Xattr to xattrValue for blockID. 1262 SetXattr(ctx context.Context, 1263 blockID kbfsblock.ID, xattrType XattrType, xattrValue []byte) error 1264 } 1265 1266 // cryptoPure contains all methods of Crypto that don't depend on 1267 // implicit state, i.e. they're pure functions of the input. 1268 type cryptoPure interface { 1269 // MakeRandomTlfID generates a dir ID using a CSPRNG. 1270 MakeRandomTlfID(t tlf.Type) (tlf.ID, error) 1271 1272 // MakeRandomBranchID generates a per-device branch ID using a 1273 // CSPRNG. It will not return LocalSquashBranchID or 1274 // kbfsmd.NullBranchID. 1275 MakeRandomBranchID() (kbfsmd.BranchID, error) 1276 1277 // MakeTemporaryBlockID generates a temporary block ID using a 1278 // CSPRNG. This is used for indirect blocks before they're 1279 // committed to the server. 1280 MakeTemporaryBlockID() (kbfsblock.ID, error) 1281 1282 // MakeRefNonce generates a block reference nonce using a 1283 // CSPRNG. This is used for distinguishing different references to 1284 // the same BlockID. 1285 MakeBlockRefNonce() (kbfsblock.RefNonce, error) 1286 1287 // MakeRandomTLFEphemeralKeys generates ephemeral keys using a 1288 // CSPRNG for a TLF. These keys can then be used to key/rekey 1289 // the TLF. 1290 MakeRandomTLFEphemeralKeys() (kbfscrypto.TLFEphemeralPublicKey, 1291 kbfscrypto.TLFEphemeralPrivateKey, error) 1292 1293 // MakeRandomTLFKeys generates keys using a CSPRNG for a 1294 // single key generation of a TLF. 1295 MakeRandomTLFKeys() (kbfscrypto.TLFPublicKey, 1296 kbfscrypto.TLFPrivateKey, kbfscrypto.TLFCryptKey, error) 1297 1298 // MakeRandomBlockCryptKeyServerHalf generates the server-side of 1299 // a block crypt key. 1300 MakeRandomBlockCryptKeyServerHalf() ( 1301 kbfscrypto.BlockCryptKeyServerHalf, error) 1302 1303 // EncryptPrivateMetadata encrypts a PrivateMetadata object. 1304 EncryptPrivateMetadata( 1305 pmd PrivateMetadata, key kbfscrypto.TLFCryptKey) ( 1306 kbfscrypto.EncryptedPrivateMetadata, error) 1307 // DecryptPrivateMetadata decrypts a PrivateMetadata object. 1308 DecryptPrivateMetadata( 1309 encryptedPMD kbfscrypto.EncryptedPrivateMetadata, 1310 key kbfscrypto.TLFCryptKey) (PrivateMetadata, error) 1311 1312 // EncryptBlocks encrypts a block. plainSize is the size of the encoded 1313 // block; EncryptBlock() must guarantee that plainSize <= 1314 // len(encryptedBlock). 1315 EncryptBlock( 1316 block data.Block, tlfCryptKey kbfscrypto.TLFCryptKey, 1317 blockServerHalf kbfscrypto.BlockCryptKeyServerHalf) ( 1318 plainSize int, encryptedBlock kbfscrypto.EncryptedBlock, err error) 1319 1320 // DecryptBlock decrypts a block. Similar to EncryptBlock(), 1321 // DecryptBlock() must guarantee that (size of the decrypted 1322 // block) <= len(encryptedBlock). 1323 DecryptBlock( 1324 encryptedBlock kbfscrypto.EncryptedBlock, 1325 tlfCryptKey kbfscrypto.TLFCryptKey, 1326 blockServerHalf kbfscrypto.BlockCryptKeyServerHalf, block data.Block) error 1327 } 1328 1329 // Crypto signs, verifies, encrypts, and decrypts stuff. 1330 type Crypto interface { 1331 cryptoPure 1332 1333 // Duplicate kbfscrypto.Signer here to work around gomock's 1334 // limitations. 1335 Sign(context.Context, []byte) (kbfscrypto.SignatureInfo, error) 1336 SignForKBFS(context.Context, []byte) (kbfscrypto.SignatureInfo, error) 1337 SignToString(context.Context, []byte) (string, error) 1338 1339 // DecryptTLFCryptKeyClientHalf decrypts a 1340 // kbfscrypto.TLFCryptKeyClientHalf using the current device's 1341 // private key and the TLF's ephemeral public key. 1342 DecryptTLFCryptKeyClientHalf(ctx context.Context, 1343 publicKey kbfscrypto.TLFEphemeralPublicKey, 1344 encryptedClientHalf kbfscrypto.EncryptedTLFCryptKeyClientHalf) ( 1345 kbfscrypto.TLFCryptKeyClientHalf, error) 1346 1347 // DecryptTLFCryptKeyClientHalfAny decrypts one of the 1348 // kbfscrypto.TLFCryptKeyClientHalf using the available 1349 // private keys and the ephemeral public key. If promptPaper 1350 // is true, the service will prompt the user for any unlocked 1351 // paper keys. 1352 DecryptTLFCryptKeyClientHalfAny(ctx context.Context, 1353 keys []EncryptedTLFCryptKeyClientAndEphemeral, 1354 promptPaper bool) ( 1355 kbfscrypto.TLFCryptKeyClientHalf, int, error) 1356 1357 // DecryptTeamMerkleLeaf decrypts a team-encrypted Merkle leaf 1358 // using some team key generation greater than `minKeyGen`, and 1359 // the provided ephemeral public key. 1360 DecryptTeamMerkleLeaf(ctx context.Context, teamID keybase1.TeamID, 1361 publicKey kbfscrypto.TLFEphemeralPublicKey, 1362 encryptedMerkleLeaf kbfscrypto.EncryptedMerkleLeaf, 1363 minKeyGen keybase1.PerTeamKeyGeneration) ([]byte, error) 1364 1365 // Shutdown frees any resources associated with this instance. 1366 Shutdown() 1367 } 1368 1369 // MDOps gets and puts root metadata to an MDServer. On a get, it 1370 // verifies the metadata is signed by the metadata's signing key. 1371 type MDOps interface { 1372 tlfhandle.IDGetter 1373 1374 // GetForTLF returns the current metadata object 1375 // corresponding to the given top-level folder, if the logged-in 1376 // user has read permission on the folder. 1377 // 1378 // If lockBeforeGet is not nil, it causes mdserver to take the lock on the 1379 // lock ID before the get. 1380 GetForTLF(ctx context.Context, id tlf.ID, lockBeforeGet *keybase1.LockID) ( 1381 ImmutableRootMetadata, error) 1382 1383 // GetForTLFByTime returns the newest merged MD update with a 1384 // server timestamp less than or equal to `serverTime`. 1385 GetForTLFByTime(ctx context.Context, id tlf.ID, serverTime time.Time) ( 1386 ImmutableRootMetadata, error) 1387 1388 // GetUnmergedForTLF is the same as the above but for unmerged 1389 // metadata. 1390 GetUnmergedForTLF(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) ( 1391 ImmutableRootMetadata, error) 1392 1393 // GetRange returns a range of metadata objects corresponding to 1394 // the passed revision numbers (inclusive). 1395 // 1396 // If lockBeforeGet is not nil, it causes mdserver to take the lock on the 1397 // lock ID before the get. 1398 GetRange(ctx context.Context, id tlf.ID, start, stop kbfsmd.Revision, 1399 lockID *keybase1.LockID) ([]ImmutableRootMetadata, error) 1400 1401 // GetUnmergedRange is the same as the above but for unmerged 1402 // metadata history (inclusive). 1403 GetUnmergedRange(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, 1404 start, stop kbfsmd.Revision) ([]ImmutableRootMetadata, error) 1405 1406 // Put stores the metadata object for the given top-level folder. 1407 // This also adds the resulting ImmutableRootMetadata object to 1408 // the mdcache, if the Put is successful. Note that constructing 1409 // the ImmutableRootMetadata requires knowing the verifying key, 1410 // which might not be the same as the local user's verifying key 1411 // if the MD has been copied from a previous update. 1412 // 1413 // If lockContext is not nil, it causes the mdserver to check a lockID at 1414 // the time of the put, and optionally (if specified in lockContext) 1415 // releases the lock on the lock ID if the put is successful. Releasing the 1416 // lock in mdserver is idempotent. Note that journalMDOps doesn't support 1417 // lockContext for now. If journaling is enabled, use FinishSinbleOp to 1418 // require locks. 1419 // 1420 // The priority parameter specifies the priority of this particular MD put 1421 // operation. When conflict happens, mdserver tries to prioritize writes 1422 // with higher priorities. Caller should use pre-defined (or define new) 1423 // constants in keybase1 package, such as keybase1.MDPriorityNormal. Note 1424 // that journalMDOps doesn't support any priority other than 1425 // MDPriorityNormal for now. If journaling is enabled, use FinishSinbleOp 1426 // to override priority. 1427 Put( 1428 ctx context.Context, rmd *RootMetadata, 1429 verifyingKey kbfscrypto.VerifyingKey, lockContext *keybase1.LockContext, 1430 priority keybase1.MDPriority, bps data.BlockPutState) ( 1431 ImmutableRootMetadata, error) 1432 1433 // PutUnmerged is the same as the above but for unmerged metadata 1434 // history. This also adds the resulting ImmutableRootMetadata 1435 // object to the mdcache, if the PutUnmerged is successful. Note 1436 // that constructing the ImmutableRootMetadata requires knowing 1437 // the verifying key, which might not be the same as the local 1438 // user's verifying key if the MD has been copied from a previous 1439 // update. 1440 PutUnmerged( 1441 ctx context.Context, rmd *RootMetadata, 1442 verifyingKey kbfscrypto.VerifyingKey, bps data.BlockPutState) ( 1443 ImmutableRootMetadata, error) 1444 1445 // PruneBranch prunes all unmerged history for the given TLF 1446 // branch. 1447 PruneBranch(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) error 1448 1449 // ResolveBranch prunes all unmerged history for the given TLF 1450 // branch, and also deletes any blocks in `blocksToDelete` that 1451 // are still in the local journal. In addition, it appends the 1452 // given MD to the journal. This also adds the resulting 1453 // ImmutableRootMetadata object to the mdcache, if the 1454 // ResolveBranch is successful. Note that constructing the 1455 // ImmutableRootMetadata requires knowing the verifying key, which 1456 // might not be the same as the local user's verifying key if the 1457 // MD has been copied from a previous update. 1458 ResolveBranch( 1459 ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, 1460 blocksToDelete []kbfsblock.ID, rmd *RootMetadata, 1461 verifyingKey kbfscrypto.VerifyingKey, bps data.BlockPutState) ( 1462 ImmutableRootMetadata, error) 1463 1464 // GetLatestHandleForTLF returns the server's idea of the latest 1465 // handle for the TLF, which may not yet be reflected in the MD if 1466 // the TLF hasn't been rekeyed since it entered into a conflicting 1467 // state. 1468 GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (tlf.Handle, error) 1469 } 1470 1471 // Prefetcher is an interface to a block prefetcher. 1472 type Prefetcher interface { 1473 // ProcessBlockForPrefetch potentially triggers and monitors a prefetch. 1474 ProcessBlockForPrefetch(ctx context.Context, ptr data.BlockPointer, block data.Block, 1475 kmd libkey.KeyMetadata, priority int, lifetime data.BlockCacheLifetime, 1476 prefetchStatus PrefetchStatus, action BlockRequestAction) 1477 // WaitChannelForBlockPrefetch returns a channel that can be used 1478 // to wait for a block to finish prefetching or be canceled. If 1479 // the block isn't currently being prefetched, it will return an 1480 // already-closed channel. When the channel is closed, the caller 1481 // should still verify that the prefetch status of the block is 1482 // what they expect it to be, in case there was an error. 1483 WaitChannelForBlockPrefetch(ctx context.Context, ptr data.BlockPointer) ( 1484 <-chan struct{}, error) 1485 // Status returns the current status of the prefetch for the block 1486 // tree rooted at the given pointer. 1487 Status(ctx context.Context, ptr data.BlockPointer) (PrefetchProgress, error) 1488 // OverallSyncStatus returns the current status of all sync 1489 // prefetches. 1490 OverallSyncStatus() PrefetchProgress 1491 // CancelPrefetch notifies the prefetcher that a prefetch should be 1492 // canceled. 1493 CancelPrefetch(data.BlockPointer) 1494 // CancelTlfPrefetches notifies the prefetcher that all prefetches 1495 // for a given TLF should be canceled. 1496 CancelTlfPrefetches(context.Context, tlf.ID) error 1497 // Shutdown shuts down the prefetcher idempotently. Future calls to 1498 // the various Prefetch* methods will return io.EOF. The returned channel 1499 // allows upstream components to block until all pending prefetches are 1500 // complete. This feature is mainly used for testing, but also to toggle 1501 // the prefetcher on and off. 1502 Shutdown() <-chan struct{} 1503 } 1504 1505 // BlockOps gets and puts data blocks to a BlockServer. It performs 1506 // the necessary crypto operations on each block. 1507 type BlockOps interface { 1508 blockRetrieverGetter 1509 data.ReadyProvider 1510 1511 // Get gets the block associated with the given block pointer 1512 // (which belongs to the TLF with the given key metadata), 1513 // decrypts it if necessary, and fills in the provided block 1514 // object with its contents, if the logged-in user has read 1515 // permission for that block. cacheLifetime controls the behavior of the 1516 // write-through cache once a Get completes. 1517 // 1518 // TODO: Make a `BlockRequestParameters` object to encapsulate the 1519 // cache lifetime and branch name, to avoid future plumbing. Or 1520 // maybe just get rid of the `Get()` method entirely and have 1521 // everyone use the block retrieval queue directly. 1522 Get(ctx context.Context, kmd libkey.KeyMetadata, blockPtr data.BlockPointer, 1523 block data.Block, cacheLifetime data.BlockCacheLifetime, 1524 branch data.BranchName) error 1525 1526 // GetEncodedSizes gets the encoded sizes and statuses of the 1527 // block associated with the given block pointers (which belongs 1528 // to the TLF with the given key metadata). If a block is not 1529 // found, it gets a size of 0 and an UNKNOWN status. 1530 GetEncodedSizes(ctx context.Context, kmd libkey.KeyMetadata, 1531 blockPtrs []data.BlockPointer) ([]uint32, []keybase1.BlockStatus, error) 1532 1533 // Delete instructs the server to delete the given block references. 1534 // It returns the number of not-yet deleted references to 1535 // each block reference 1536 Delete(ctx context.Context, tlfID tlf.ID, ptrs []data.BlockPointer) ( 1537 liveCounts map[kbfsblock.ID]int, err error) 1538 1539 // Archive instructs the server to mark the given block references 1540 // as "archived"; that is, they are not being used in the current 1541 // view of the folder, and shouldn't be served to anyone other 1542 // than folder writers. 1543 Archive(ctx context.Context, tlfID tlf.ID, ptrs []data.BlockPointer) error 1544 1545 // GetLiveCount returns the number of "live" 1546 // (non-archived, non-deleted) references for each given block. 1547 GetLiveCount( 1548 ctx context.Context, tlfID tlf.ID, ptrs []data.BlockPointer) ( 1549 liveCounts map[kbfsblock.ID]int, err error) 1550 1551 // TogglePrefetcher activates or deactivates the prefetcher. 1552 TogglePrefetcher(enable bool) <-chan struct{} 1553 1554 // Prefetcher retrieves this BlockOps' Prefetcher. 1555 Prefetcher() Prefetcher 1556 1557 // Shutdown shuts down all the workers performing Get operations 1558 Shutdown(ctx context.Context) error 1559 } 1560 1561 // Duplicate kbfscrypto.AuthTokenRefreshHandler here to work around 1562 // gomock's limitations. 1563 type authTokenRefreshHandler interface { 1564 RefreshAuthToken(context.Context) 1565 } 1566 1567 // MDServer gets and puts metadata for each top-level directory. The 1568 // instantiation should be able to fetch session/user details via KBPKI. On a 1569 // put, the server is responsible for 1) ensuring the user has appropriate 1570 // permissions for whatever modifications were made; 2) ensuring that 1571 // LastModifyingWriter and LastModifyingUser are updated appropriately; and 3) 1572 // detecting conflicting writes based on the previous root block ID (i.e., when 1573 // it supports strict consistency). On a get, it verifies the logged-in user 1574 // has read permissions. 1575 // 1576 // TODO: Add interface for searching by time 1577 type MDServer interface { 1578 authTokenRefreshHandler 1579 1580 // GetForHandle returns the current (signed/encrypted) metadata 1581 // object corresponding to the given top-level folder's handle, if 1582 // the logged-in user has read permission on the folder. It 1583 // creates the folder if one doesn't exist yet, and the logged-in 1584 // user has permission to do so. 1585 // 1586 // If lockBeforeGet is not nil, it takes a lock on the lock ID before 1587 // trying to get anything. If taking the lock fails, an error is returned. 1588 // Note that taking a lock from the mdserver is idempotent. 1589 // 1590 // If there is no returned error, then the returned ID must 1591 // always be non-null. A nil *RootMetadataSigned may be 1592 // returned, but if it is non-nil, then its ID must match the 1593 // returned ID. 1594 GetForHandle(ctx context.Context, handle tlf.Handle, 1595 mStatus kbfsmd.MergeStatus, lockBeforeGet *keybase1.LockID) ( 1596 tlf.ID, *RootMetadataSigned, error) 1597 1598 // GetForTLF returns the current (signed/encrypted) metadata object 1599 // corresponding to the given top-level folder, if the logged-in 1600 // user has read permission on the folder. 1601 // 1602 // If lockBeforeGet is not nil, it takes a lock on the lock ID before 1603 // trying to get anything. If taking the lock fails, an error is returned. 1604 // Note that taking a lock from the mdserver is idempotent. 1605 GetForTLF(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, mStatus kbfsmd.MergeStatus, 1606 lockBeforeGet *keybase1.LockID) (*RootMetadataSigned, error) 1607 1608 // GetForTLFByTime returns the earliest merged MD update with a 1609 // server timestamp equal or greater to `serverTime`. 1610 GetForTLFByTime(ctx context.Context, id tlf.ID, serverTime time.Time) ( 1611 *RootMetadataSigned, error) 1612 1613 // GetRange returns a range of (signed/encrypted) metadata objects 1614 // corresponding to the passed revision numbers (inclusive). 1615 // 1616 // If lockBeforeGet is not nil, it takes a lock on the lock ID before 1617 // trying to get anything. If taking the lock fails, an error is returned. 1618 // Note that taking a lock from the mdserver is idempotent. 1619 GetRange(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID, mStatus kbfsmd.MergeStatus, 1620 start, stop kbfsmd.Revision, lockBeforeGet *keybase1.LockID) ( 1621 []*RootMetadataSigned, error) 1622 1623 // Put stores the (signed/encrypted) metadata object for the given 1624 // top-level folder. Note: If the unmerged bit is set in the metadata 1625 // block's flags bitmask it will be appended to the unmerged per-device 1626 // history. 1627 // 1628 // If lockContext is not nil, it causes the mdserver to check a lockID at 1629 // the time of the put, and optionally (if specified in lockContext) 1630 // releases the lock on the lock ID if the put is successful. Releasing the 1631 // lock in mdserver is idempotent. 1632 Put(ctx context.Context, rmds *RootMetadataSigned, extra kbfsmd.ExtraMetadata, 1633 lockContext *keybase1.LockContext, priority keybase1.MDPriority) error 1634 1635 // Lock ensures lockID for tlfID is taken by this session, i.e., 1636 // idempotently take the lock. If the lock is already taken by *another* 1637 // session, mdserver returns a throttle error, causing RPC layer at client 1638 // to retry. So caller of this method should observe a behavior similar to 1639 // blocking call, which upon successful return, makes sure the lock is 1640 // taken on the server. Note that the lock expires after certain time, so 1641 // it's important to make writes contingent to the lock by requiring the 1642 // lockID in Put. 1643 Lock(ctx context.Context, tlfID tlf.ID, lockID keybase1.LockID) error 1644 1645 // Release Lock ensures lockID for tlfID is not taken by this session, i.e., 1646 // idempotently release the lock. If the lock is already released or 1647 // expired, this is a no-op. 1648 ReleaseLock(ctx context.Context, tlfID tlf.ID, lockID keybase1.LockID) error 1649 1650 // StartImplicitTeamMigration tells mdserver to put a implicit team 1651 // migration lock on id, which prevents any rekey MD writes from going 1652 // in. Normal classic MD updates can still happen after implicit team 1653 // migration has started, until a iTeam-style MD is written. 1654 StartImplicitTeamMigration(ctx context.Context, id tlf.ID) (err error) 1655 1656 // PruneBranch prunes all unmerged history for the given TLF branch. 1657 PruneBranch(ctx context.Context, id tlf.ID, bid kbfsmd.BranchID) error 1658 1659 // RegisterForUpdate tells the MD server to inform the caller when 1660 // there is a merged update with a revision number greater than 1661 // currHead, which did NOT originate from this same MD server 1662 // session. This method returns a chan which can receive only a 1663 // single error before it's closed. If the received err is nil, 1664 // then there is updated MD ready to fetch which didn't originate 1665 // locally; if it is non-nil, then the previous registration 1666 // cannot send the next notification (e.g., the connection to the 1667 // MD server may have failed). In either case, the caller must 1668 // re-register to get a new chan that can receive future update 1669 // notifications. 1670 RegisterForUpdate(ctx context.Context, id tlf.ID, 1671 currHead kbfsmd.Revision) (<-chan error, error) 1672 1673 // CancelRegistration lets the local MDServer instance know that 1674 // we are no longer interested in updates for the specified 1675 // folder. It does not necessarily forward this cancellation to 1676 // remote servers. 1677 CancelRegistration(ctx context.Context, id tlf.ID) 1678 1679 // CheckForRekeys initiates the rekey checking process on the 1680 // server. The server is allowed to delay this request, and so it 1681 // returns a channel for returning the error. Actual rekey 1682 // requests are expected to come in asynchronously. 1683 CheckForRekeys(ctx context.Context) <-chan error 1684 1685 // TruncateLock attempts to take the history truncation lock for 1686 // this folder, for a TTL defined by the server. Returns true if 1687 // the lock was successfully taken. 1688 TruncateLock(ctx context.Context, id tlf.ID) (bool, error) 1689 // TruncateUnlock attempts to release the history truncation lock 1690 // for this folder. Returns true if the lock was successfully 1691 // released. 1692 TruncateUnlock(ctx context.Context, id tlf.ID) (bool, error) 1693 1694 // DisableRekeyUpdatesForTesting disables processing rekey updates 1695 // received from the mdserver while testing. 1696 DisableRekeyUpdatesForTesting() 1697 1698 // Shutdown is called to shutdown an MDServer connection. 1699 Shutdown() 1700 1701 // IsConnected returns whether the MDServer is connected. 1702 IsConnected() bool 1703 1704 // GetLatestHandleForTLF returns the server's idea of the latest handle for the TLF, 1705 // which may not yet be reflected in the MD if the TLF hasn't been rekeyed since it 1706 // entered into a conflicting state. For the highest level of confidence, the caller 1707 // should verify the mapping with a Merkle tree lookup. 1708 GetLatestHandleForTLF(ctx context.Context, id tlf.ID) (tlf.Handle, error) 1709 1710 // OffsetFromServerTime is the current estimate for how off our 1711 // local clock is from the mdserver clock. Add this to any 1712 // mdserver-provided timestamps to get the "local" time of the 1713 // corresponding event. If the returned bool is false, then we 1714 // don't have a current estimate for the offset. 1715 OffsetFromServerTime() (time.Duration, bool) 1716 1717 // GetKeyBundles looks up the key bundles for the given key 1718 // bundle IDs. tlfID must be non-zero but either or both wkbID 1719 // and rkbID can be zero, in which case nil will be returned 1720 // for the respective bundle. If a bundle cannot be found, an 1721 // error is returned and nils are returned for both bundles. 1722 GetKeyBundles(ctx context.Context, tlfID tlf.ID, 1723 wkbID kbfsmd.TLFWriterKeyBundleID, rkbID kbfsmd.TLFReaderKeyBundleID) ( 1724 *kbfsmd.TLFWriterKeyBundleV3, *kbfsmd.TLFReaderKeyBundleV3, error) 1725 1726 // CheckReachability is called when the Keybase service sends a notification 1727 // that network connectivity has changed. 1728 CheckReachability(ctx context.Context) 1729 1730 // FastForwardBackoff fast forwards any existing backoff timer for 1731 // connecting to the mdserver. If mdserver is connected at the time this 1732 // is called, it's essentially a no-op. 1733 FastForwardBackoff() 1734 1735 // FindNextMD finds the serialized (and possibly encrypted) root 1736 // metadata object from the leaf node of the second KBFS merkle 1737 // tree to be produced after a given Keybase global merkle tree 1738 // sequence number `rootSeqno` (and all merkle nodes between it 1739 // and the root, and the root itself). It also returns the global 1740 // merkle tree sequence number of the root that first included the 1741 // returned metadata object. 1742 FindNextMD(ctx context.Context, tlfID tlf.ID, rootSeqno keybase1.Seqno) ( 1743 nextKbfsRoot *kbfsmd.MerkleRoot, nextMerkleNodes [][]byte, 1744 nextRootSeqno keybase1.Seqno, err error) 1745 1746 // GetMerkleRootLatest returns the latest KBFS merkle root for the 1747 // given tree ID. 1748 GetMerkleRootLatest(ctx context.Context, treeID keybase1.MerkleTreeID) ( 1749 root *kbfsmd.MerkleRoot, err error) 1750 } 1751 1752 type mdServerLocal interface { 1753 MDServer 1754 addNewAssertionForTest( 1755 uid keybase1.UID, newAssertion keybase1.SocialAssertion) error 1756 getCurrentMergedHeadRevision(ctx context.Context, id tlf.ID) ( 1757 rev kbfsmd.Revision, err error) 1758 isShutdown() bool 1759 copy(config mdServerLocalConfig) mdServerLocal 1760 enableImplicitTeams() 1761 setKbfsMerkleRoot(treeID keybase1.MerkleTreeID, root *kbfsmd.MerkleRoot) 1762 } 1763 1764 // BlockServer gets and puts opaque data blocks. The instantiation 1765 // should be able to fetch session/user details via KBPKI. On a 1766 // put/delete, the server is reponsible for: 1) checking that the ID 1767 // matches the hash of the buffer; and 2) enforcing writer quotas. 1768 type BlockServer interface { 1769 authTokenRefreshHandler 1770 1771 // FastForwardBackoff fast forwards any existing backoff timer for 1772 // connecting to bserver. If bserver is connected at the time this is 1773 // called, it's essentially a no-op. 1774 FastForwardBackoff() 1775 1776 // Get gets the (encrypted) block data associated with the given 1777 // block ID and context, uses the provided block key to decrypt 1778 // the block, and fills in the provided block object with its 1779 // contents, if the logged-in user has read permission for that 1780 // block. 1781 Get(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, 1782 context kbfsblock.Context, cacheType DiskBlockCacheType) ( 1783 []byte, kbfscrypto.BlockCryptKeyServerHalf, error) 1784 1785 // GetEncodedSizes gets the encoded sizes and statuses of the 1786 // blocks associated with the given block IDs (which belong to the 1787 // TLF with the given key metadata). If a block is not found, it 1788 // gets a size of 0 and an UNKNOWN status. 1789 GetEncodedSizes( 1790 ctx context.Context, tlfID tlf.ID, ids []kbfsblock.ID, 1791 contexts []kbfsblock.Context) ([]uint32, []keybase1.BlockStatus, error) 1792 1793 // Put stores the (encrypted) block data under the given ID 1794 // and context on the server, along with the server half of 1795 // the block key. context should contain a kbfsblock.RefNonce 1796 // of zero. There will be an initial reference for this block 1797 // for the given context. 1798 // 1799 // Put should be idempotent, although it should also return an 1800 // error if, for a given ID, any of the other arguments differ 1801 // from previous Put calls with the same ID. 1802 // 1803 // If this returns a kbfsblock.ServerErrorOverQuota, with 1804 // Throttled=false, the caller can treat it as informational 1805 // and otherwise ignore the error. 1806 Put(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, 1807 context kbfsblock.Context, buf []byte, 1808 serverHalf kbfscrypto.BlockCryptKeyServerHalf, 1809 cacheType DiskBlockCacheType) error 1810 1811 // PutAgain re-stores a previously deleted block under the same ID 1812 // with the same data. 1813 PutAgain(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, 1814 context kbfsblock.Context, buf []byte, 1815 serverHalf kbfscrypto.BlockCryptKeyServerHalf, 1816 cacheType DiskBlockCacheType) error 1817 1818 // AddBlockReference adds a new reference to the given block, 1819 // defined by the given context (which should contain a 1820 // non-zero kbfsblock.RefNonce). (Contexts with a 1821 // kbfsblock.RefNonce of zero should be used when putting the 1822 // block for the first time via Put().) Returns a 1823 // kbfsblock.ServerErrorBlockNonExistent if id is unknown within this 1824 // folder. 1825 // 1826 // AddBlockReference should be idempotent, although it should 1827 // also return an error if, for a given ID and refnonce, any 1828 // of the other fields of context differ from previous 1829 // AddBlockReference calls with the same ID and refnonce. 1830 // 1831 // If this returns a kbfsblock.ServerErrorOverQuota, with 1832 // Throttled=false, the caller can treat it as informational 1833 // and otherwise ignore the error. 1834 AddBlockReference(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID, 1835 context kbfsblock.Context) error 1836 // RemoveBlockReferences removes the references to the given block 1837 // ID defined by the given contexts. If no references to the block 1838 // remain after this call, the server is allowed to delete the 1839 // corresponding block permanently. If the reference defined by 1840 // the count has already been removed, the call is a no-op. 1841 // It returns the number of remaining not-yet-deleted references after this 1842 // reference has been removed 1843 RemoveBlockReferences(ctx context.Context, tlfID tlf.ID, 1844 contexts kbfsblock.ContextMap) (liveCounts map[kbfsblock.ID]int, err error) 1845 1846 // ArchiveBlockReferences marks the given block references as 1847 // "archived"; that is, they are not being used in the current 1848 // view of the folder, and shouldn't be served to anyone other 1849 // than folder writers. 1850 // 1851 // For a given ID/refnonce pair, ArchiveBlockReferences should 1852 // be idempotent, although it should also return an error if 1853 // any of the other fields of the context differ from previous 1854 // calls with the same ID/refnonce pair. 1855 ArchiveBlockReferences(ctx context.Context, tlfID tlf.ID, 1856 contexts kbfsblock.ContextMap) error 1857 1858 // GetLiveBlockReferences returns the number of "live" 1859 // (non-archived, non-deleted) references for each given block. 1860 GetLiveBlockReferences(ctx context.Context, tlfID tlf.ID, 1861 contexts kbfsblock.ContextMap) ( 1862 liveCounts map[kbfsblock.ID]int, err error) 1863 1864 // IsUnflushed returns whether a given block is being queued 1865 // locally for later flushing to another block server. If the 1866 // block is currently being flushed to the server, this should 1867 // return `true`, so that the caller will try to clean it up from 1868 // the server if it's no longer needed. 1869 IsUnflushed(ctx context.Context, tlfID tlf.ID, id kbfsblock.ID) ( 1870 bool, error) 1871 1872 // Shutdown is called to shutdown a BlockServer connection. 1873 Shutdown(ctx context.Context) 1874 1875 // GetUserQuotaInfo returns the quota for the logged-in user. 1876 GetUserQuotaInfo(ctx context.Context) (info *kbfsblock.QuotaInfo, err error) 1877 1878 // GetTeamQuotaInfo returns the quota for a team. 1879 GetTeamQuotaInfo(ctx context.Context, tid keybase1.TeamID) ( 1880 info *kbfsblock.QuotaInfo, err error) 1881 } 1882 1883 // blockServerLocal is the interface for BlockServer implementations 1884 // that store data locally. 1885 type blockServerLocal interface { 1886 BlockServer 1887 // getAllRefsForTest returns all the known block references 1888 // for the given TLF, and should only be used during testing. 1889 getAllRefsForTest(ctx context.Context, tlfID tlf.ID) ( 1890 map[kbfsblock.ID]blockRefMap, error) 1891 } 1892 1893 // NodeChange represents a change made to a node as part of an atomic 1894 // file system operation. 1895 type NodeChange struct { 1896 Node Node 1897 // Basenames of entries added/removed. 1898 DirUpdated []data.PathPartString 1899 FileUpdated []WriteRange 1900 } 1901 1902 // Observer can be notified that there is an available update for a 1903 // given directory. The notification callbacks should not block, or 1904 // make any calls to the Notifier interface. Nodes passed to the 1905 // observer should not be held past the end of the notification 1906 // callback. 1907 type Observer interface { 1908 // LocalChange announces that the file at this Node has been 1909 // updated locally, but not yet saved at the server. 1910 LocalChange(ctx context.Context, node Node, write WriteRange) 1911 // BatchChanges announces that the nodes have all been updated 1912 // together atomically. Each NodeChange in `changes` affects the 1913 // same top-level folder and branch. `allAffectedNodeIDs` is a 1914 // list of all the nodes that had their underlying data changed, 1915 // even if it wasn't an user-visible change (e.g., if a 1916 // subdirectory was updated, the directory block for the TLF root 1917 // is updated but that wouldn't be visible to a user). 1918 BatchChanges(ctx context.Context, changes []NodeChange, 1919 allAffectedNodeIDs []NodeID) 1920 // TlfHandleChange announces that the handle of the corresponding 1921 // folder branch has changed, likely due to previously-unresolved 1922 // assertions becoming resolved. This indicates that the listener 1923 // should switch over any cached paths for this folder-branch to 1924 // the new name. Nodes that were acquired under the old name will 1925 // still continue to work, but new lookups on the old name may 1926 // either encounter alias errors or entirely new TLFs (in the case 1927 // of conflicts). 1928 TlfHandleChange(ctx context.Context, newHandle *tlfhandle.Handle) 1929 } 1930 1931 // SyncedTlfObserver can be notified when a sync has started for a 1932 // synced TLF, or when a TLF becomes unsynced. The notification 1933 // callbacks should not block, or make any calls to the Notifier 1934 // interface. 1935 type SyncedTlfObserver interface { 1936 // FullSyncStarted announces that a new full sync has begun for 1937 // the given tlf ID. The provided `waitCh` will be completed (or 1938 // canceled) once `waitCh` is closed. 1939 FullSyncStarted( 1940 ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision, 1941 waitCh <-chan struct{}) 1942 // SyncModeChanged announces that the sync mode has changed for 1943 // the given tlf ID. 1944 SyncModeChanged( 1945 ctx context.Context, tlfID tlf.ID, newMode keybase1.FolderSyncMode) 1946 } 1947 1948 // Notifier notifies registrants of directory changes 1949 type Notifier interface { 1950 // RegisterForChanges declares that the given Observer wants to 1951 // subscribe to updates for the given top-level folders. 1952 RegisterForChanges(folderBranches []data.FolderBranch, obs Observer) error 1953 // UnregisterFromChanges declares that the given Observer no 1954 // longer wants to subscribe to updates for the given top-level 1955 // folders. 1956 UnregisterFromChanges(folderBranches []data.FolderBranch, obs Observer) error 1957 // RegisterForSyncedTlfs declares that the given 1958 // `SyncedTlfObserver` wants to subscribe to updates about synced 1959 // TLFs. 1960 RegisterForSyncedTlfs(obs SyncedTlfObserver) error 1961 // UnregisterFromChanges declares that the given 1962 // `SyncedTlfObserver` no longer wants to subscribe to updates 1963 // about synced TLFs. 1964 UnregisterFromSyncedTlfs(obs SyncedTlfObserver) error 1965 } 1966 1967 // Clock is an interface for getting the current time 1968 type Clock interface { 1969 // Now returns the current time. 1970 Now() time.Time 1971 } 1972 1973 // ConflictRenamer deals with names for conflicting directory entries. 1974 type ConflictRenamer interface { 1975 // ConflictRename returns the appropriately modified filename. 1976 ConflictRename( 1977 ctx context.Context, op op, original string) (string, error) 1978 } 1979 1980 // Tracer maybe adds traces to contexts. 1981 type Tracer interface { 1982 // MaybeStartTrace, if tracing is on, returns a new context 1983 // based on the given one with an attached trace made with the 1984 // given family and title. Otherwise, it returns the given 1985 // context unchanged. 1986 MaybeStartTrace(ctx context.Context, family, title string) context.Context 1987 // MaybeFinishTrace, finishes the trace attached to the given 1988 // context, if any. 1989 MaybeFinishTrace(ctx context.Context, err error) 1990 } 1991 1992 // InitMode encapsulates mode differences. 1993 type InitMode interface { 1994 // Type returns the InitModeType of this mode. 1995 Type() InitModeType 1996 // IsTestMode returns whether we are running a test. 1997 IsTestMode() bool 1998 // IsSingleOp returns whether this is a single-op mode (only one 1999 // write is expected at a time). 2000 IsSingleOp() bool 2001 // BlockWorkers returns the number of block workers to run. 2002 BlockWorkers() int 2003 // PrefetchWorkers returns the number of prefetch workers to run. 2004 PrefetchWorkers() int 2005 // ThrottledPrefetchTime returns the period for each prefetch 2006 // worker to start a throttled prefetch request. 2007 ThrottledPrefetchPeriod() time.Duration 2008 // DefaultBlockRequestAction returns the action to be used by 2009 // default whenever fetching a block. 2010 DefaultBlockRequestAction() BlockRequestAction 2011 // RekeyWorkers returns the number of rekey workers to run. 2012 RekeyWorkers() int 2013 // RekeyQueueSize returns the size of the rekey queue. 2014 RekeyQueueSize() int 2015 // DirtyBlockCacheEnabled indicates if we should run a dirty block 2016 // cache. 2017 DirtyBlockCacheEnabled() bool 2018 // BackgroundFlushesEnabled indicates if we should periodically be 2019 // flushing unsynced dirty writes to the server or journal. 2020 BackgroundFlushesEnabled() bool 2021 // MetricsEnabled indicates if we should be collecting metrics. 2022 MetricsEnabled() bool 2023 // ConflictResolutionEnabled indicated if we should be running 2024 // the conflict resolution background process. 2025 ConflictResolutionEnabled() bool 2026 // BlockManagementEnabled indicates whether we should be running 2027 // the block archive/delete background process, and whether we 2028 // should be re-embedding block change blocks in MDs. 2029 BlockManagementEnabled() bool 2030 // MaxBlockPtrsToManageAtOnce indicates how many block pointers 2031 // the block manager should try to hold in memory at once. -1 2032 // indicates that there is no limit. 2033 MaxBlockPtrsToManageAtOnce() int 2034 // QuotaReclamationEnabled indicates whether we should be running 2035 // the quota reclamation background process. 2036 QuotaReclamationEnabled() bool 2037 // QuotaReclamationPeriod indicates how often should each TLF 2038 // should check for quota to reclaim. If the Duration.Seconds() 2039 // == 0, quota reclamation should not run automatically. 2040 QuotaReclamationPeriod() time.Duration 2041 // QuotaReclamationMinUnrefAge indicates the minimum time a block 2042 // must have been unreferenced before it can be reclaimed. 2043 QuotaReclamationMinUnrefAge() time.Duration 2044 // QuotaReclamationMinHeadAge indicates the minimum age of the 2045 // most recently merged MD update before we can run reclamation, 2046 // to avoid conflicting with a currently active writer. 2047 QuotaReclamationMinHeadAge() time.Duration 2048 // NodeCacheEnabled indicates whether we should be caching data nodes. 2049 NodeCacheEnabled() bool 2050 // TLFUpdatesEnabled indicates whether we should be registering 2051 // ourselves with the mdserver for TLF updates. 2052 TLFUpdatesEnabled() bool 2053 // KBFSServiceEnabled indicates whether we should launch a local 2054 // service for answering incoming KBFS-related RPCs. 2055 KBFSServiceEnabled() bool 2056 // JournalEnabled indicates whether this mode supports a journal. 2057 JournalEnabled() bool 2058 // UnmergedTLFsEnabled indicates whether it's possible for a 2059 // device in this mode to have unmerged TLFs. 2060 UnmergedTLFsEnabled() bool 2061 // ServiceKeepaliveEnabled indicates whether we need to send 2062 // keepalive probes to the Keybase service daemon. 2063 ServiceKeepaliveEnabled() bool 2064 // TLFEditHistoryEnabled indicates whether we should be running 2065 // the background TLF edit history process. 2066 TLFEditHistoryEnabled() bool 2067 // SendEditNotificationsEnabled indicates whether we should send 2068 // edit notifications on FS writes. 2069 SendEditNotificationsEnabled() bool 2070 // ClientType indicates the type we should advertise to the 2071 // Keybase service. 2072 ClientType() keybase1.ClientType 2073 // LocalHTTPServerEnabled represents whether we should launch an HTTP 2074 // server. 2075 LocalHTTPServerEnabled() bool 2076 // MaxCleanBlockCacheCapacity is the maximum number of bytes to be taken up 2077 // by the clean block cache. 2078 MaxCleanBlockCacheCapacity() uint64 2079 // OldStorageRootCleaningEnabled indicates whether we should clean 2080 // old temporary storage root directories. 2081 OldStorageRootCleaningEnabled() bool 2082 // DoRefreshFavoritesOnInit indicates whether we should refresh 2083 // our cached versions of the favorites immediately upon a login. 2084 DoRefreshFavoritesOnInit() bool 2085 // DoLogObfuscation indicates whether senstive data like filenames 2086 // should be obfuscated in log messages. 2087 DoLogObfuscation() bool 2088 // BlockTLFEditHistoryIntialization indicates where we should 2089 // delay initializing the edit histories of the most recent TLFs 2090 // until the first request that uses them is made. 2091 BlockTLFEditHistoryIntialization() bool 2092 // InitialDelayForBackgroundWork indicates how long non-critical 2093 // work that happens in the background on startup should wait 2094 // before it begins. 2095 InitialDelayForBackgroundWork() time.Duration 2096 // BackgroundWorkPeriod indicates how long to wait between 2097 // non-critical background work tasks. 2098 BackgroundWorkPeriod() time.Duration 2099 // IndexingEnabled indicates whether or not synced TLFs are 2100 // indexed and searchable. 2101 IndexingEnabled() bool 2102 // DelayInitialConnect indicates whether the initial connection to KBFS 2103 // servers should be delayed. 2104 DelayInitialConnect() bool 2105 // DiskCacheCompactionEnabled indicates whether the local disk 2106 // block cache should trigger compaction automatically. 2107 DiskCacheCompactionEnabled() bool 2108 // EditHistoryPrefetchingEnabled indicates whether we should 2109 // auto-prefetch the most recently-edited files. 2110 EditHistoryPrefetchingEnabled() bool 2111 2112 ldbutils.DbWriteBufferSizeGetter 2113 } 2114 2115 type initModeGetter interface { 2116 // Mode indicates how KBFS is configured to run. 2117 Mode() InitMode 2118 2119 // IsTestMode() inidicates whether KBFS is running in a test. 2120 IsTestMode() bool 2121 } 2122 2123 type blockCryptVersioner interface { 2124 // BlockCryptVersion returns the block encryption version to be used for 2125 // new blocks. 2126 BlockCryptVersion() kbfscrypto.EncryptionVer 2127 } 2128 2129 // SubscriptionID identifies a subscription. 2130 type SubscriptionID string 2131 2132 // SubscriptionNotifier defines a group of methods for notifying about changes 2133 // on subscribed topics. 2134 type SubscriptionNotifier interface { 2135 // OnPathChange notifies about a change that's related to a specific path. 2136 // Multiple subscriptionIDs may be sent because a client can subscribe on 2137 // the same path multiple times. In the future topics will become a single 2138 // topic but we don't differeciate between the two topics for now so they 2139 // are just sent together if both topics are subscribed. 2140 OnPathChange( 2141 clientID SubscriptionManagerClientID, subscriptionIDs []SubscriptionID, 2142 path string, topics []keybase1.PathSubscriptionTopic) 2143 // OnNonPathChange notifies about a change that's not related to a specific 2144 // path. 2145 OnNonPathChange( 2146 clientID SubscriptionManagerClientID, subscriptionIDs []SubscriptionID, 2147 topic keybase1.SubscriptionTopic) 2148 } 2149 2150 // OnlineStatusTracker tracks the online status for the GUI. 2151 type OnlineStatusTracker interface { 2152 GetOnlineStatus() keybase1.KbfsOnlineStatus 2153 UserIn(ctx context.Context, clientKey string) 2154 UserOut(ctx context.Context, clientKey string) 2155 } 2156 2157 // SubscriptionManager manages subscriptions associated with one clientID. 2158 // Multiple subscribers can be used with the same SubscriptionManager. 2159 // If multiple subscriptions exist on the same topic (and for the same path, if 2160 // applicable), notifications are deduplicated. 2161 // 2162 // The two Subscribe methods are for path and non-path subscriptions 2163 // respectively. Notes on some common arguments: 2164 // 1. subscriptionID needs to be unique among all subscriptions that happens 2165 // with this process. A UUID or even just a timestamp might work. If 2166 // duplicate subscriptionIDs are used, an error is returned. 2167 // 2. Optionally a deduplicateInterval can be used. When this arg is set, we 2168 // debounce the events so it doesn't send more frequently than the interval. 2169 // If deduplicateInterval is not set, i.e. nil, no deduplication is done and 2170 // all events will be delivered. 2171 type SubscriptionManager interface { 2172 // SubscribePath subscribes to changes about path, when topic happens. 2173 SubscribePath( 2174 ctx context.Context, subscriptionID SubscriptionID, 2175 path string, topic keybase1.PathSubscriptionTopic, 2176 deduplicateInterval *time.Duration) error 2177 // SubscribeNonPath subscribes to changes when topic happens. 2178 SubscribeNonPath(ctx context.Context, subscriptionID SubscriptionID, 2179 topic keybase1.SubscriptionTopic, 2180 deduplicateInterval *time.Duration) error 2181 // Unsubscribe unsubscribes a previsous subscription. The subscriptionID 2182 // should be the same as when caller subscribed. Otherwise, it's a no-op. 2183 Unsubscribe(context.Context, SubscriptionID) 2184 // OnlineStatusTracker returns the OnlineStatusTracker for getting the 2185 // current online status for GUI. 2186 OnlineStatusTracker() OnlineStatusTracker 2187 // Shutdown shuts the subscription manager down. 2188 Shutdown(ctx context.Context) 2189 } 2190 2191 // SubscriptionManagerPublisher associates with one SubscriptionManager, and is 2192 // used to publish changes to subscribers mangaged by it. 2193 type SubscriptionManagerPublisher interface { 2194 PublishChange(topic keybase1.SubscriptionTopic) 2195 } 2196 2197 type kbContextGetter interface { 2198 // KbContext returns the Keybase Context. 2199 KbContext() Context 2200 } 2201 2202 // Config collects all the singleton instance instantiations needed to 2203 // run KBFS in one place. The methods below are self-explanatory and 2204 // do not require comments. 2205 type Config interface { 2206 data.Versioner 2207 blockCryptVersioner 2208 logMaker 2209 blockCacher 2210 blockServerGetter 2211 blockOpsGetter 2212 codecGetter 2213 cryptoPureGetter 2214 keyGetterGetter 2215 cryptoGetter 2216 chatGetter 2217 signerGetter 2218 currentSessionGetterGetter 2219 diskBlockCacheGetter 2220 diskBlockCacheSetter 2221 diskBlockCacheFractionSetter 2222 syncBlockCacheFractionSetter 2223 diskMDCacheGetter 2224 diskMDCacheSetter 2225 diskQuotaCacheGetter 2226 diskQuotaCacheSetter 2227 blockMetadataStoreGetSeter 2228 clockGetter 2229 diskLimiterGetter 2230 syncedTlfGetterSetter 2231 initModeGetter 2232 settingsDBGetter 2233 SetMode(mode InitMode) 2234 Tracer 2235 KBFSOps() KBFSOps 2236 SetKBFSOps(KBFSOps) 2237 KBPKI() KBPKI 2238 SetKBPKI(KBPKI) 2239 KeyManager() KeyManager 2240 SetKeyManager(KeyManager) 2241 SetReporter(Reporter) 2242 reporterGetter 2243 MDCache() MDCache 2244 SetMDCache(MDCache) 2245 KeyCache() KeyCache 2246 SetKeyBundleCache(kbfsmd.KeyBundleCache) 2247 KeyBundleCache() kbfsmd.KeyBundleCache 2248 SetKeyCache(KeyCache) 2249 SetBlockCache(data.BlockCache) 2250 DirtyBlockCache() data.DirtyBlockCache 2251 SetDirtyBlockCache(data.DirtyBlockCache) 2252 SetCrypto(Crypto) 2253 SetChat(Chat) 2254 SetCodec(kbfscodec.Codec) 2255 MDOps() MDOps 2256 SetMDOps(MDOps) 2257 KeyOps() libkey.KeyOps 2258 SetKeyOps(libkey.KeyOps) 2259 SetBlockOps(BlockOps) 2260 MDServer() MDServer 2261 SetMDServer(MDServer) 2262 SetBlockServer(BlockServer) 2263 KeyServer() libkey.KeyServer 2264 SetKeyServer(libkey.KeyServer) 2265 KeybaseService() KeybaseService 2266 SetKeybaseService(KeybaseService) 2267 BlockSplitter() data.BlockSplitter 2268 SetBlockSplitter(data.BlockSplitter) 2269 Notifier() Notifier 2270 SetNotifier(Notifier) 2271 SetClock(Clock) 2272 ConflictRenamer() ConflictRenamer 2273 SetConflictRenamer(ConflictRenamer) 2274 UserHistory() *kbfsedits.UserHistory 2275 SetUserHistory(*kbfsedits.UserHistory) 2276 MetadataVersion() kbfsmd.MetadataVer 2277 SetMetadataVersion(kbfsmd.MetadataVer) 2278 SetBlockCryptVersion(kbfscrypto.EncryptionVer) 2279 DefaultBlockType() keybase1.BlockType 2280 SetDefaultBlockType(blockType keybase1.BlockType) 2281 // GetConflictResolutionDB gets the levelDB in which conflict resolution 2282 // status is stored. 2283 GetConflictResolutionDB() (db *ldbutils.LevelDb) 2284 RekeyQueue() RekeyQueue 2285 SetRekeyQueue(RekeyQueue) 2286 // ReqsBufSize indicates the number of read or write operations 2287 // that can be buffered per folder 2288 ReqsBufSize() int 2289 // MaxNameBytes indicates the maximum supported size of a 2290 // directory entry name in bytes. 2291 MaxNameBytes() uint32 2292 // DoBackgroundFlushes says whether we should periodically try to 2293 // flush dirty files, even without a sync from the user. Should 2294 // be true except for during some testing. 2295 DoBackgroundFlushes() bool 2296 SetDoBackgroundFlushes(bool) 2297 // RekeyWithPromptWaitTime indicates how long to wait, after 2298 // setting the rekey bit, before prompting for a paper key. 2299 RekeyWithPromptWaitTime() time.Duration 2300 SetRekeyWithPromptWaitTime(time.Duration) 2301 // PrefetchStatus returns the prefetch status of a block. 2302 PrefetchStatus(context.Context, tlf.ID, data.BlockPointer) PrefetchStatus 2303 GetQuotaUsage(keybase1.UserOrTeamID) *EventuallyConsistentQuotaUsage 2304 2305 // GracePeriod specifies a grace period for which a delayed cancellation 2306 // waits before actual cancels the context. This is useful for giving 2307 // critical portion of a slow remote operation some extra time to finish as 2308 // an effort to avoid conflicting. Example include an O_EXCL Create call 2309 // interrupted by ALRM signal actually makes it to the server, while 2310 // application assumes not since EINTR is returned. A delayed cancellation 2311 // allows us to distinguish between successful cancel (where remote operation 2312 // didn't make to server) or failed cancel (where remote operation made to 2313 // the server). However, the optimal value of this depends on the network 2314 // conditions. A long grace period for really good network condition would 2315 // just unnecessarily slow down Ctrl-C. 2316 // 2317 // TODO: make this adaptive and self-change over time based on network 2318 // conditions. 2319 DelayedCancellationGracePeriod() time.Duration 2320 SetDelayedCancellationGracePeriod(time.Duration) 2321 2322 // ResetCaches clears and re-initializes all data and key caches. 2323 ResetCaches() 2324 2325 // StorageRoot returns the path to the storage root for this config. 2326 StorageRoot() string 2327 2328 // MetricsRegistry may be nil, which should be interpreted as 2329 // not using metrics at all. (i.e., as if UseNilMetrics were 2330 // set). This differs from how go-metrics treats nil Registry 2331 // objects, which is to use the default registry. 2332 MetricsRegistry() metrics.Registry 2333 SetMetricsRegistry(metrics.Registry) 2334 2335 // SetTraceOptions set the options for tracing (via x/net/trace). 2336 SetTraceOptions(enabled bool) 2337 2338 // TLFValidDuration is the time TLFs are valid before identification needs to be redone. 2339 TLFValidDuration() time.Duration 2340 // SetTLFValidDuration sets TLFValidDuration. 2341 SetTLFValidDuration(time.Duration) 2342 2343 // BGFlushDirOpBatchSize returns the directory op batch size for 2344 // background flushes. 2345 BGFlushDirOpBatchSize() int 2346 // SetBGFlushDirOpBatchSize sets the directory op batch size for 2347 // background flushes. 2348 SetBGFlushDirOpBatchSize(s int) 2349 2350 // BGFlushPeriod returns how long to wait for a batch to fill up 2351 // before syncing a set of changes to the servers. 2352 BGFlushPeriod() time.Duration 2353 // SetBGFlushPeriod sets how long to wait for a batch to fill up 2354 // before syncing a set of changes to the servers. 2355 SetBGFlushPeriod(p time.Duration) 2356 2357 // Shutdown is called to free config resources. 2358 Shutdown(context.Context) error 2359 // CheckStateOnShutdown tells the caller whether or not it is safe 2360 // to check the state of the system on shutdown. 2361 CheckStateOnShutdown() bool 2362 2363 // GetRekeyFSMLimiter returns the global rekey FSM limiter. 2364 GetRekeyFSMLimiter() *OngoingWorkLimiter 2365 2366 // RootNodeWrappers returns the set of root node wrapper functions 2367 // that will be applied to each newly-created root node. 2368 RootNodeWrappers() []func(Node) Node 2369 // AddRootNodeWrapper adds a new wrapper function that will be 2370 // applied whenever a root Node is created. This will only apply 2371 // to TLFs that are first accessed after `AddRootNodeWrapper` is 2372 // called. 2373 AddRootNodeWrapper(func(Node) Node) 2374 2375 // SetVLogLevel sets the vdebug level for all logs. The possible 2376 // strings are hard-coded in go/libkb/vdebug.go, but include 2377 // "mobile", "vlog1", "vlog2", etc. 2378 SetVLogLevel(levelString string) 2379 2380 // VLogLevel gets the vdebug level for this config. The possible 2381 // strings are hard-coded in go/libkb/vdebug.go, but include 2382 // "mobile", "vlog1", "vlog2", etc. 2383 VLogLevel() string 2384 2385 subscriptionManagerGetter 2386 2387 // SubscriptionManagerPublisher retursn a publisher that can be used to 2388 // publish events to the subscription manager. 2389 SubscriptionManagerPublisher() SubscriptionManagerPublisher 2390 // KbEnv returns the *libkb.Env. 2391 KbEnv() *libkb.Env 2392 2393 kbContextGetter 2394 } 2395 2396 // NodeCache holds Nodes, and allows libkbfs to update them when 2397 // things change about the underlying KBFS blocks. It is probably 2398 // most useful to instantiate this on a per-folder-branch basis, so 2399 // that it can create a Path with the correct DirId and Branch name. 2400 type NodeCache interface { 2401 // GetOrCreate either makes a new Node for the given 2402 // BlockPointer, or returns an existing one. TODO: If we ever 2403 // support hard links, we will have to revisit the "name" and 2404 // "parent" parameters here. name must not be empty. Returns 2405 // an error if parent cannot be found. 2406 GetOrCreate( 2407 ptr data.BlockPointer, name data.PathPartString, parent Node, 2408 et data.EntryType) (Node, error) 2409 // Get returns the Node associated with the given ptr if one 2410 // already exists. Otherwise, it returns nil. 2411 Get(ref data.BlockRef) Node 2412 // UpdatePointer updates the BlockPointer for the corresponding 2413 // Node. NodeCache ignores this call when oldRef is not cached in 2414 // any Node. Returns whether the ID of the node that was updated, 2415 // or `nil` if nothing was updated. 2416 UpdatePointer(oldRef data.BlockRef, newPtr data.BlockPointer) NodeID 2417 // Move swaps the parent node for the corresponding Node, and 2418 // updates the node's name. NodeCache ignores the call when ptr 2419 // is not cached. If newParent is nil, it treats the ptr's 2420 // corresponding node as being unlinked from the old parent 2421 // completely. If successful, it returns a function that can be 2422 // called to undo the effect of the move (or `nil` if nothing 2423 // needs to be done); if newParent cannot be found, it returns an 2424 // error and a `nil` undo function. 2425 Move(ref data.BlockRef, newParent Node, newName data.PathPartString) ( 2426 undoFn func(), err error) 2427 // Unlink set the corresponding node's parent to nil and caches 2428 // the provided path in case the node is still open. NodeCache 2429 // ignores the call when ptr is not cached. The path is required 2430 // because the caller may have made changes to the parent nodes 2431 // already that shouldn't be reflected in the cached path. It 2432 // returns a function that can be called to undo the effect of the 2433 // unlink (or `nil` if nothing needs to be done). 2434 Unlink(ref data.BlockRef, oldPath data.Path, oldDe data.DirEntry) ( 2435 undoFn func()) 2436 // IsUnlinked returns whether `Unlink` has been called for the 2437 // reference behind this node. 2438 IsUnlinked(node Node) bool 2439 // UnlinkedDirEntry returns a directory entry if `Unlink` has been 2440 // called for the reference behind this node. 2441 UnlinkedDirEntry(node Node) data.DirEntry 2442 // UpdateUnlinkedDirEntry modifies a cached directory entry for a 2443 // node that has already been unlinked. 2444 UpdateUnlinkedDirEntry(node Node, newDe data.DirEntry) 2445 // PathFromNode creates the path up to a given Node. 2446 PathFromNode(node Node) data.Path 2447 // AllNodes returns the complete set of nodes currently in the 2448 // cache. The returned Nodes are not wrapped, and shouldn't be 2449 // used for data access. 2450 AllNodes() []Node 2451 // AllNodeChildren returns the complete set of nodes currently in 2452 // the cache, for which the given node `n` is a parent (direct or 2453 // indirect). The returned slice does not include `n` itself. 2454 // The returned Nodes are not wrapped, and shouldn't be used for 2455 // data access. 2456 AllNodeChildren(n Node) []Node 2457 // AddRootWrapper adds a new wrapper function that will be applied 2458 // whenever a root Node is created. 2459 AddRootWrapper(func(Node) Node) 2460 // SetObfuscatorMaker sets the obfuscator-making function for this cache. 2461 SetObfuscatorMaker(func() data.Obfuscator) 2462 // ObfuscatorMaker sets the obfuscator-making function for this cache. 2463 ObfuscatorMaker() func() data.Obfuscator 2464 } 2465 2466 // fileBlockDeepCopier fetches a file block, makes a deep copy of it 2467 // (duplicating pointer for any indirect blocks) and generates a new 2468 // random temporary block ID for it. It returns the new BlockPointer, 2469 // and internally saves the block for future uses. 2470 type fileBlockDeepCopier func( 2471 context.Context, data.PathPartString, data.BlockPointer) ( 2472 data.BlockPointer, error) 2473 2474 // crAction represents a specific action to take as part of the 2475 // conflict resolution process. 2476 type crAction interface { 2477 // swapUnmergedBlock should be called before do(), and if it 2478 // returns true, the caller must use the merged block 2479 // corresponding to the returned BlockPointer instead of 2480 // unmergedBlock when calling do(). If BlockPointer{} is zeroPtr 2481 // (and true is returned), just swap in the regular mergedBlock. 2482 swapUnmergedBlock( 2483 ctx context.Context, unmergedChains, mergedChains *crChains, 2484 unmergedDir *data.DirData) (bool, data.BlockPointer, error) 2485 // do modifies the given merged `dirData` in place to resolve the 2486 // conflict, and potentially uses the provided 2487 // `fileBlockDeepCopier`s to obtain copies of other blocks (along 2488 // with new BlockPointers) when requiring a block copy. It 2489 // returns a set of block infos that need to be unreferenced as 2490 // part of this conflict resolution. 2491 do( 2492 ctx context.Context, unmergedCopier, mergedCopier fileBlockDeepCopier, 2493 unmergedDir, mergedDir *data.DirData) (unrefs []data.BlockInfo, err error) 2494 // updateOps potentially modifies, in place, the slices of 2495 // unmerged and merged operations stored in the corresponding 2496 // crChains for the given unmerged and merged most recent 2497 // pointers. Eventually, the "unmerged" ops will be pushed as 2498 // part of a MD update, and so should contain any necessarily 2499 // operations to fully merge the unmerged data, including any 2500 // conflict resolution. The "merged" ops will be played through 2501 // locally, to notify any caches about the newly-obtained merged 2502 // data (and any changes to local data that were required as part 2503 // of conflict resolution, such as renames). A few things to note: 2504 // * A particular action's updateOps method may be called more than 2505 // once for different sets of chains, however it should only add 2506 // new directory operations (like create/rm/rename) into directory 2507 // chains. 2508 // * updateOps doesn't necessarily result in correct BlockPointers within 2509 // each of those ops; that must happen in a later phase. 2510 // * mergedDir can be nil if the chain is for a file. 2511 updateOps( 2512 ctx context.Context, unmergedMostRecent, mergedMostRecent data.BlockPointer, 2513 unmergedDir, mergedDir *data.DirData, 2514 unmergedChains, mergedChains *crChains) error 2515 // String returns a string representation for this crAction, used 2516 // for debugging. 2517 String() string 2518 } 2519 2520 // RekeyQueue is a managed queue of folders needing some rekey action taken 2521 // upon them by the current client. 2522 type RekeyQueue interface { 2523 // Enqueue enqueues a folder for rekey action. If the TLF is already in the 2524 // rekey queue, the error channel of the existing one is returned. 2525 Enqueue(tlf.ID) 2526 // IsRekeyPending returns true if the given folder is in the rekey queue. 2527 // Note that an ongoing rekey doesn't count as "pending". 2528 IsRekeyPending(tlf.ID) bool 2529 // Shutdown cancels all pending rekey actions and clears the queue. It 2530 // doesn't cancel ongoing rekeys. After Shutdown() is called, the same 2531 // RekeyQueue shouldn't be used anymore. 2532 Shutdown() 2533 } 2534 2535 // RekeyFSM is a Finite State Machine (FSM) for housekeeping rekey states for a 2536 // FolderBranch. Each FolderBranch has its own FSM for rekeys. 2537 // 2538 // See rekey_fsm.go for implementation details. 2539 // 2540 // TODO: report FSM status in FolderBranchStatus? 2541 type RekeyFSM interface { 2542 // Event sends an event to the FSM. 2543 Event(event RekeyEvent) 2544 // Shutdown shuts down the FSM. No new event should be sent into the FSM 2545 // after this method is called. 2546 Shutdown() 2547 2548 // listenOnEvent adds a listener (callback) to the FSM so that when 2549 // event happens, callback is called with the received event. If repeatedly 2550 // is set to false, callback is called only once. Otherwise it's called every 2551 // time event happens. 2552 // 2553 // Currently this is only used in tests and for RekeyFile. See comment for 2554 // RequestRekeyAndWaitForOneFinishEvent for more details. 2555 listenOnEvent( 2556 event rekeyEventType, callback func(RekeyEvent), repeatedly bool) 2557 } 2558 2559 // BlockRetriever specifies how to retrieve blocks. 2560 type BlockRetriever interface { 2561 // Request retrieves blocks asynchronously. `action` determines 2562 // what happens after the block is fetched successfully. 2563 Request(ctx context.Context, priority int, kmd libkey.KeyMetadata, 2564 ptr data.BlockPointer, block data.Block, lifetime data.BlockCacheLifetime, 2565 action BlockRequestAction) <-chan error 2566 // PutInCaches puts the block into the in-memory cache, and ensures that 2567 // the disk cache metadata is updated. 2568 PutInCaches(ctx context.Context, ptr data.BlockPointer, tlfID tlf.ID, 2569 block data.Block, lifetime data.BlockCacheLifetime, 2570 prefetchStatus PrefetchStatus, cacheType DiskBlockCacheType) error 2571 // TogglePrefetcher creates a new prefetcher. 2572 TogglePrefetcher(enable bool, syncCh <-chan struct{}, doneCh chan<- struct{}) <-chan struct{} 2573 } 2574 2575 // ChatChannelNewMessageCB is a callback function that can be called 2576 // when there's a new message on a given conversation. 2577 type ChatChannelNewMessageCB func(convID chat1.ConversationID, body string) 2578 2579 // Chat specifies a minimal interface for Keybase chatting. 2580 type Chat interface { 2581 // GetConversationID returns the chat conversation ID associated 2582 // with the given TLF name, type, chat type and channel name. 2583 GetConversationID( 2584 ctx context.Context, tlfName tlf.CanonicalName, tlfType tlf.Type, 2585 channelName string, chatType chat1.TopicType) ( 2586 chat1.ConversationID, error) 2587 2588 // SendTextMessage (asynchronously) sends a text chat message to 2589 // the given conversation and channel. 2590 SendTextMessage( 2591 ctx context.Context, tlfName tlf.CanonicalName, tlfType tlf.Type, 2592 convID chat1.ConversationID, body string) error 2593 2594 // GetGroupedInbox returns the TLFs with the most-recent chat 2595 // messages of the given type, up to `maxChats` of them. 2596 GetGroupedInbox( 2597 ctx context.Context, chatType chat1.TopicType, maxChats int) ( 2598 []*tlfhandle.Handle, error) 2599 2600 // GetChannels returns a list of all the channels for a given 2601 // chat. The entries in `convIDs` and `channelNames` have a 1-to-1 2602 // correspondence. 2603 GetChannels( 2604 ctx context.Context, tlfName tlf.CanonicalName, tlfType tlf.Type, 2605 chatType chat1.TopicType) ( 2606 convIDs []chat1.ConversationID, channelNames []string, err error) 2607 2608 // ReadChannel returns a set of text messages from a channel, and 2609 // a `nextPage` pointer to the following set of messages. If the 2610 // given `startPage` is non-nil, it's used to specify the starting 2611 // point for the set of messages returned. 2612 ReadChannel( 2613 ctx context.Context, convID chat1.ConversationID, startPage []byte) ( 2614 messages []string, nextPage []byte, err error) 2615 2616 // RegisterForMessages registers a callback that will be called 2617 // for each new messages that reaches convID. 2618 RegisterForMessages(convID chat1.ConversationID, cb ChatChannelNewMessageCB) 2619 2620 // ClearCache is called to force this instance to forget 2621 // everything it might have cached, e.g. when a user logs out. 2622 ClearCache() 2623 } 2624 2625 // blockPutState is an interface for keeping track of readied blocks 2626 // before putting them to the bserver. 2627 type blockPutState interface { 2628 data.BlockPutState 2629 oldPtr(ctx context.Context, blockPtr data.BlockPointer) (data.BlockPointer, error) 2630 getReadyBlockData( 2631 ctx context.Context, blockPtr data.BlockPointer) (data.ReadyBlockData, error) 2632 synced(blockPtr data.BlockPointer) error 2633 numBlocks() int 2634 } 2635 2636 // blockPutStateCopiable is a more manipulatable interface around 2637 // `blockPutState`, allowing copying as well as merging/unmerging. 2638 type blockPutStateCopiable interface { 2639 blockPutState 2640 2641 mergeOtherBps(ctx context.Context, other blockPutStateCopiable) error 2642 removeOtherBps(ctx context.Context, other blockPutStateCopiable) error 2643 deepCopy(ctx context.Context) (blockPutStateCopiable, error) 2644 deepCopyWithBlacklist( 2645 ctx context.Context, blacklist map[data.BlockPointer]bool) ( 2646 blockPutStateCopiable, error) 2647 } 2648 2649 type fileBlockMap interface { 2650 putTopBlock( 2651 ctx context.Context, parentPtr data.BlockPointer, 2652 childName data.PathPartString, topBlock *data.FileBlock) error 2653 GetTopBlock( 2654 ctx context.Context, parentPtr data.BlockPointer, 2655 childName data.PathPartString) (*data.FileBlock, error) 2656 getFilenames( 2657 ctx context.Context, parentPtr data.BlockPointer) ( 2658 []data.PathPartString, error) 2659 } 2660 2661 type dirBlockMap interface { 2662 putBlock( 2663 ctx context.Context, ptr data.BlockPointer, block *data.DirBlock) error 2664 getBlock( 2665 ctx context.Context, ptr data.BlockPointer) (*data.DirBlock, error) 2666 hasBlock(ctx context.Context, ptr data.BlockPointer) (bool, error) 2667 deleteBlock(ctx context.Context, ptr data.BlockPointer) error 2668 numBlocks() int 2669 }