github.com/keybase/client/go@v0.0.0-20240309051027-028f7c731f8b/kbfs/libkbfs/subscription_manager.go (about) 1 // Copyright 2019 Keybase Inc. All rights reserved. 2 // Use of this source code is governed by a BSD 3 // license that can be found in the LICENSE file. 4 5 package libkbfs 6 7 import ( 8 "path" 9 "strings" 10 "sync" 11 "time" 12 13 "github.com/keybase/client/go/kbfs/data" 14 "github.com/keybase/client/go/kbfs/tlfhandle" 15 "github.com/keybase/client/go/logger" 16 "github.com/keybase/client/go/protocol/keybase1" 17 "github.com/pkg/errors" 18 "golang.org/x/net/context" 19 "golang.org/x/time/rate" 20 ) 21 22 const ( 23 folderBranchPollingInterval = time.Second 24 maxPurgeableSubscriptionManagerClient = 3 25 ) 26 27 // userPath is always the full path including the /keybase prefix, but may 28 // not be canonical or cleaned. The goal is to track whatever the user of this 29 // type is dealing with without needing them to know if a path is canonicalized 30 // at any time. 31 // Examples: 32 // 33 // "/keybase/public/karlthefog@twitter/dir 34 // "/keybase/team/keybase/dir/../file" 35 type userPath string 36 37 // cleanInTlfPath is clean path rooted at a TLF, and it's what we get 38 // from Node.GetPathPlaintextSansTlf(). 39 // Examples, considering TLF /keybase/private/user1,user2: 40 // 41 // "/foo/bar" (representing /keybase/private/user1,user2/foo/bar) 42 // "/" (representing /keybase/private/user1,user2) 43 type cleanInTlfPath string 44 45 func getCleanInTlfPath(p *parsedPath) cleanInTlfPath { 46 return cleanInTlfPath(path.Clean(p.rawInTlfPath)) 47 } 48 49 func getParentPath(p cleanInTlfPath) (parent cleanInTlfPath, ok bool) { 50 lastSlashIndex := strings.LastIndex(string(p), "/") 51 if lastSlashIndex <= 0 { 52 return "", false 53 } 54 return p[:lastSlashIndex], true 55 } 56 57 type debouncedNotify struct { 58 notify func() 59 shutdown func() 60 } 61 62 func getChSender(ch chan<- struct{}, blocking bool) func() { 63 if blocking { 64 return func() { 65 ch <- struct{}{} 66 } 67 } 68 return func() { 69 select { 70 case ch <- struct{}{}: 71 default: 72 } 73 } 74 } 75 76 func debounce(do func(), limit rate.Limit) *debouncedNotify { 77 ctx, shutdown := context.WithCancel(context.Background()) 78 ch := make(chan struct{}, 1) 79 limiter := rate.NewLimiter(limit, 1) 80 go func() { 81 for { 82 err := limiter.Wait(ctx) 83 if err != nil { 84 return 85 } 86 select { 87 case <-ch: 88 go do() 89 case <-ctx.Done(): 90 return 91 } 92 } 93 }() 94 return &debouncedNotify{ 95 notify: getChSender(ch, limit == rate.Inf), 96 shutdown: shutdown, 97 } 98 } 99 100 type pathSubscriptionRef struct { 101 folderBranch data.FolderBranch 102 path cleanInTlfPath 103 } 104 105 type pathSubscription struct { 106 subscriptionIDs map[SubscriptionID]keybase1.PathSubscriptionTopic 107 // Keep track of different paths from input since GUI doesn't have a 108 // concept of "cleaned path" yet and when we notify about changes we need 109 // to use the original path that came in with the SubscribePath calls. 110 pathsToNotify map[string]struct{} 111 limit rate.Limit 112 debouncedNotify *debouncedNotify 113 } 114 115 type nonPathSubscription struct { 116 subscriptionIDs map[SubscriptionID]bool 117 limit rate.Limit 118 debouncedNotify *debouncedNotify 119 } 120 121 // subscriptionManager manages subscriptions. There are two types of 122 // subscriptions: path and non-path. Path subscriptions are for changes related 123 // to a specific path, such as file content change, dir children change, and 124 // timestamp change. Non-path subscriptions are for general changes that are 125 // not specific to a path, such as journal flushing, online status change, etc. 126 // We store a debouncedNotify struct for each subscription, which includes a 127 // notify function that might be debounced if caller asked so. 128 // 129 // This is per client. For example, if we have multiple GUI instances, each of 130 // them get their own client ID and their subscriptions won't affect each 131 // other. 132 type subscriptionManager struct { 133 clientID SubscriptionManagerClientID 134 config Config 135 log logger.Logger 136 notifier SubscriptionNotifier 137 138 onlineStatusTracker *onlineStatusTracker 139 lock sync.RWMutex 140 // TODO HOTPOT-416: add another layer here to reference by topics, and 141 // actually check topics in LocalChange and BatchChanges. 142 pathSubscriptions map[pathSubscriptionRef]*pathSubscription 143 pathSubscriptionIDToRef map[SubscriptionID]pathSubscriptionRef 144 nonPathSubscriptions map[keybase1.SubscriptionTopic]*nonPathSubscription 145 nonPathSubscriptionIDToTopic map[SubscriptionID]keybase1.SubscriptionTopic 146 subscriptionIDs map[SubscriptionID]bool 147 subscriptionCountByFolderBranch map[data.FolderBranch]int 148 folderBranchPollerCancelers map[SubscriptionID]context.CancelFunc 149 } 150 151 func (sm *subscriptionManager) notifyOnlineStatus() { 152 sm.lock.RLock() 153 defer sm.lock.RUnlock() 154 if sm.nonPathSubscriptions[keybase1.SubscriptionTopic_ONLINE_STATUS] == nil { 155 return 156 } 157 if nps, ok := sm.nonPathSubscriptions[keybase1.SubscriptionTopic_ONLINE_STATUS]; ok { 158 nps.debouncedNotify.notify() 159 } 160 } 161 162 func newSubscriptionManager(clientID SubscriptionManagerClientID, config Config, notifier SubscriptionNotifier) *subscriptionManager { 163 sm := &subscriptionManager{ 164 pathSubscriptions: make(map[pathSubscriptionRef]*pathSubscription), 165 pathSubscriptionIDToRef: make(map[SubscriptionID]pathSubscriptionRef), 166 nonPathSubscriptions: make(map[keybase1.SubscriptionTopic]*nonPathSubscription), 167 nonPathSubscriptionIDToTopic: make(map[SubscriptionID]keybase1.SubscriptionTopic), 168 clientID: clientID, 169 config: config, 170 log: config.MakeLogger("SubMan"), 171 notifier: notifier, 172 subscriptionIDs: make(map[SubscriptionID]bool), 173 subscriptionCountByFolderBranch: make(map[data.FolderBranch]int), 174 folderBranchPollerCancelers: make(map[SubscriptionID]context.CancelFunc), 175 } 176 sm.onlineStatusTracker = newOnlineStatusTracker(config, sm.notifyOnlineStatus) 177 return sm 178 } 179 180 func (sm *subscriptionManager) Shutdown(ctx context.Context) { 181 sm.onlineStatusTracker.shutdown() 182 sm.lock.Lock() 183 defer sm.lock.Unlock() 184 pathSids := make([]SubscriptionID, 0, len(sm.pathSubscriptionIDToRef)) 185 nonPathSids := make([]SubscriptionID, 0, len(sm.nonPathSubscriptionIDToTopic)) 186 for sid := range sm.pathSubscriptionIDToRef { 187 pathSids = append(pathSids, sid) 188 } 189 for sid := range sm.nonPathSubscriptionIDToTopic { 190 nonPathSids = append(nonPathSids, sid) 191 } 192 for _, sid := range pathSids { 193 sm.unsubscribePathLocked(ctx, sid) 194 } 195 for _, sid := range nonPathSids { 196 sm.unsubscribeNonPathLocked(ctx, sid) 197 } 198 } 199 200 func (sm *subscriptionManager) OnlineStatusTracker() OnlineStatusTracker { 201 return sm.onlineStatusTracker 202 } 203 204 func (sm *subscriptionManager) checkSubscriptionIDLocked(sid SubscriptionID) (setter func(), err error) { 205 if sm.subscriptionIDs[sid] { 206 return nil, errors.Errorf("duplicate subscription ID %q", sid) 207 } 208 return func() { 209 sm.subscriptionIDs[sid] = true 210 }, nil 211 } 212 213 func (sm *subscriptionManager) registerForChangesLocked(fb data.FolderBranch) { 214 if sm.subscriptionCountByFolderBranch[fb] == 0 { 215 _ = sm.config.Notifier().RegisterForChanges( 216 []data.FolderBranch{fb}, sm) 217 } 218 sm.subscriptionCountByFolderBranch[fb]++ 219 } 220 221 func (sm *subscriptionManager) unregisterForChangesLocked(fb data.FolderBranch) { 222 if sm.subscriptionCountByFolderBranch[fb] == 1 { 223 _ = sm.config.Notifier().UnregisterFromChanges( 224 []data.FolderBranch{fb}, sm) 225 delete(sm.subscriptionCountByFolderBranch, fb) 226 return 227 } 228 sm.subscriptionCountByFolderBranch[fb]-- 229 } 230 231 func (sm *subscriptionManager) preparePathNotification( 232 ref pathSubscriptionRef) (sids []SubscriptionID, 233 paths []string, topics []keybase1.PathSubscriptionTopic) { 234 sm.lock.RLock() 235 defer sm.lock.RUnlock() 236 237 ps, ok := sm.pathSubscriptions[ref] 238 if !ok { 239 return 240 } 241 sids = make([]SubscriptionID, 0, len(ps.subscriptionIDs)) 242 topicsMap := make(map[keybase1.PathSubscriptionTopic]struct{}) 243 for sid, topic := range ps.subscriptionIDs { 244 sids = append(sids, sid) 245 topicsMap[topic] = struct{}{} 246 } 247 topics = make([]keybase1.PathSubscriptionTopic, 0, len(topicsMap)) 248 for topic := range topicsMap { 249 topics = append(topics, topic) 250 } 251 paths = make([]string, 0, len(ps.pathsToNotify)) 252 for path := range ps.pathsToNotify { 253 paths = append(paths, path) 254 } 255 return sids, paths, topics 256 } 257 258 func (sm *subscriptionManager) makePathSubscriptionDebouncedNotify( 259 ref pathSubscriptionRef, limit rate.Limit) *debouncedNotify { 260 return debounce(func() { 261 sids, paths, topics := sm.preparePathNotification(ref) 262 263 for _, path := range paths { 264 sm.notifier.OnPathChange(sm.clientID, sids, path, topics) 265 } 266 }, limit) 267 } 268 269 func (sm *subscriptionManager) prepareNonPathNotification( 270 topic keybase1.SubscriptionTopic) (sids []SubscriptionID) { 271 sm.lock.RLock() 272 defer sm.lock.RUnlock() 273 nps, ok := sm.nonPathSubscriptions[topic] 274 if !ok { 275 return 276 } 277 sids = make([]SubscriptionID, 0, len(nps.subscriptionIDs)) 278 for sid := range nps.subscriptionIDs { 279 sids = append(sids, sid) 280 } 281 return sids 282 } 283 284 func (sm *subscriptionManager) makeNonPathSubscriptionDebouncedNotify( 285 topic keybase1.SubscriptionTopic, limit rate.Limit) *debouncedNotify { 286 return debounce(func() { 287 sids := sm.prepareNonPathNotification(topic) 288 sm.notifier.OnNonPathChange(sm.clientID, sids, topic) 289 }, limit) 290 } 291 292 type subscribePathRequest struct { 293 sid SubscriptionID 294 path string // original, uncleaned path from GUI 295 topic keybase1.PathSubscriptionTopic 296 deduplicateInterval *time.Duration 297 } 298 299 func (sm *subscriptionManager) subscribePathWithFolderBranchLocked( 300 req subscribePathRequest, 301 parsedPath *parsedPath, fb data.FolderBranch) error { 302 nitp := getCleanInTlfPath(parsedPath) 303 ref := pathSubscriptionRef{ 304 folderBranch: fb, 305 path: nitp, 306 } 307 308 subscriptionIDSetter, err := sm.checkSubscriptionIDLocked(req.sid) 309 if err != nil { 310 return err 311 } 312 sm.registerForChangesLocked(ref.folderBranch) 313 314 limit := rate.Inf 315 if req.deduplicateInterval != nil { 316 limit = rate.Every(*req.deduplicateInterval) 317 } 318 ps, ok := sm.pathSubscriptions[ref] 319 if !ok { 320 ps = &pathSubscription{ 321 subscriptionIDs: make(map[SubscriptionID]keybase1.PathSubscriptionTopic), 322 limit: limit, 323 debouncedNotify: sm.makePathSubscriptionDebouncedNotify(ref, limit), 324 pathsToNotify: make(map[string]struct{}), 325 } 326 sm.pathSubscriptions[ref] = ps 327 } else if ps.limit < limit { 328 // New limit is higher than what we have. Update it to match. 329 ps.limit = limit 330 ps.debouncedNotify.shutdown() 331 ps.debouncedNotify = sm.makePathSubscriptionDebouncedNotify(ref, limit) 332 } 333 ps.subscriptionIDs[req.sid] = req.topic 334 ps.pathsToNotify[req.path] = struct{}{} 335 336 sm.pathSubscriptionIDToRef[req.sid] = ref 337 subscriptionIDSetter() 338 return nil 339 } 340 341 func (sm *subscriptionManager) cancelAndDeleteFolderBranchPollerLocked( 342 sid SubscriptionID) (deleted bool) { 343 if cancel, ok := sm.folderBranchPollerCancelers[sid]; ok { 344 cancel() 345 delete(sm.folderBranchPollerCancelers, sid) 346 return true 347 } 348 return false 349 } 350 351 func (sm *subscriptionManager) cancelAndDeleteFolderBranchPoller( 352 sid SubscriptionID) (deleted bool) { 353 sm.lock.Lock() 354 defer sm.lock.Unlock() 355 return sm.cancelAndDeleteFolderBranchPollerLocked(sid) 356 } 357 358 func (sm *subscriptionManager) pollOnFolderBranchForSubscribePathRequest( 359 ctx context.Context, req subscribePathRequest, parsedPath *parsedPath) { 360 ticker := time.NewTicker(folderBranchPollingInterval) 361 for { 362 select { 363 case <-ticker.C: 364 fb, err := parsedPath.getFolderBranch(ctx, sm.config) 365 if err != nil { 366 _ = sm.cancelAndDeleteFolderBranchPoller(req.sid) 367 return 368 } 369 370 if fb == (data.FolderBranch{}) { 371 continue 372 } 373 374 // We have a folderBranch now! Go ahead and complete the 375 // subscription, and send a notification too. 376 377 sm.lock.Lock() 378 defer sm.lock.Unlock() 379 // Check if we're done while holding the lock to protect 380 // against racing against unsubscribe. 381 select { 382 case <-ctx.Done(): 383 // No need to call cancelAndDeleteFolderBranchPollerLocked here 384 // since we always cancel and delete at the same tiem and if 385 // it's canceled it must have been deleted too. 386 return 387 default: 388 } 389 390 err = sm.subscribePathWithFolderBranchLocked(req, parsedPath, fb) 391 if err != nil { 392 sm.log.CErrorf(ctx, 393 "subscribePathWithFolderBranchLocked sid=%s err=%v", req.sid, err) 394 } 395 396 sm.notifier.OnPathChange( 397 sm.clientID, []SubscriptionID{req.sid}, 398 req.path, []keybase1.PathSubscriptionTopic{req.topic}) 399 400 _ = sm.cancelAndDeleteFolderBranchPollerLocked(req.sid) 401 return 402 case <-ctx.Done(): 403 _ = sm.cancelAndDeleteFolderBranchPoller(req.sid) 404 return 405 } 406 } 407 } 408 409 func (sm *subscriptionManager) subscribePathWithoutFolderBranchLocked( 410 req subscribePathRequest, parsedPath *parsedPath) { 411 ctx, cancel := context.WithCancel(context.Background()) 412 sm.folderBranchPollerCancelers[req.sid] = cancel 413 go sm.pollOnFolderBranchForSubscribePathRequest(ctx, req, parsedPath) 414 } 415 416 // SubscribePath implements the SubscriptionManager interface. 417 func (sm *subscriptionManager) SubscribePath(ctx context.Context, 418 sid SubscriptionID, path string, topic keybase1.PathSubscriptionTopic, 419 deduplicateInterval *time.Duration) error { 420 parsedPath, err := parsePath(userPath(path)) 421 if err != nil { 422 return err 423 } 424 425 // Lock here to protect against racing with unsubscribe. Specifically, we 426 // don't want to launch the poller if an unsubscribe call for this sid 427 // comes in before we get fb from parsedPath.getFolderBranch(). 428 // 429 // We could still end up with a lingering subscription if unsubscribe 430 // happens too fast and RPC somehow gives use the unsubscribe call before 431 // the subscribe call, but that's probably rare enough to ignore here. 432 // 433 // In the future if this end up contributing a deadlock because 434 // folderBranch starts using the subscription manager somehow, we can add a 435 // "recently unsubscribed" cache to the subscription manager and move this 436 // lock further down. This cache should also mitigate the issue where the 437 // unsubscribe call gets deliverd before subscribe. 438 sm.lock.Lock() 439 defer sm.lock.Unlock() 440 441 fb, err := parsedPath.getFolderBranch(ctx, sm.config) 442 if err != nil { 443 return err 444 } 445 req := subscribePathRequest{ 446 sid: sid, 447 path: path, 448 topic: topic, 449 deduplicateInterval: deduplicateInterval, 450 } 451 if fb != (data.FolderBranch{}) { 452 return sm.subscribePathWithFolderBranchLocked(req, parsedPath, fb) 453 } 454 sm.subscribePathWithoutFolderBranchLocked(req, parsedPath) 455 return nil 456 } 457 458 // SubscribeNonPath implements the SubscriptionManager interface. 459 func (sm *subscriptionManager) SubscribeNonPath( 460 ctx context.Context, sid SubscriptionID, topic keybase1.SubscriptionTopic, 461 deduplicateInterval *time.Duration) error { 462 sm.lock.Lock() 463 defer sm.lock.Unlock() 464 subscriptionIDSetter, err := sm.checkSubscriptionIDLocked(sid) 465 if err != nil { 466 return err 467 } 468 469 limit := rate.Inf 470 if deduplicateInterval != nil { 471 limit = rate.Every(*deduplicateInterval) 472 } 473 nps, ok := sm.nonPathSubscriptions[topic] 474 if !ok { 475 nps = &nonPathSubscription{ 476 subscriptionIDs: make(map[SubscriptionID]bool), 477 limit: limit, 478 debouncedNotify: sm.makeNonPathSubscriptionDebouncedNotify(topic, limit), 479 } 480 sm.nonPathSubscriptions[topic] = nps 481 } else if nps.limit < limit { 482 // New limit is higher than what we have. Update it to match. 483 nps.limit = limit 484 nps.debouncedNotify.shutdown() 485 nps.debouncedNotify = sm.makeNonPathSubscriptionDebouncedNotify(topic, limit) 486 } 487 nps.subscriptionIDs[sid] = true 488 489 sm.nonPathSubscriptionIDToTopic[sid] = topic 490 subscriptionIDSetter() 491 return nil 492 } 493 494 func (sm *subscriptionManager) unsubscribePathLocked( 495 ctx context.Context, subscriptionID SubscriptionID) { 496 // First check if this is a subscription we don't yet have a folderBranch 497 // for. 498 if sm.cancelAndDeleteFolderBranchPollerLocked(subscriptionID) { 499 return 500 } 501 502 ref, ok := sm.pathSubscriptionIDToRef[subscriptionID] 503 if !ok { 504 return 505 } 506 delete(sm.pathSubscriptionIDToRef, subscriptionID) 507 508 ps, ok := sm.pathSubscriptions[ref] 509 if !ok { 510 return 511 } 512 delete(ps.subscriptionIDs, subscriptionID) 513 if len(ps.subscriptionIDs) == 0 { 514 ps.debouncedNotify.shutdown() 515 sm.unregisterForChangesLocked(ref.folderBranch) 516 delete(sm.pathSubscriptions, ref) 517 } 518 519 delete(sm.subscriptionIDs, subscriptionID) 520 } 521 522 func (sm *subscriptionManager) unsubscribeNonPathLocked( 523 ctx context.Context, subscriptionID SubscriptionID) { 524 topic, ok := sm.nonPathSubscriptionIDToTopic[subscriptionID] 525 if !ok { 526 return 527 } 528 delete(sm.nonPathSubscriptionIDToTopic, subscriptionID) 529 530 nps, ok := sm.nonPathSubscriptions[topic] 531 if !ok { 532 return 533 } 534 delete(nps.subscriptionIDs, subscriptionID) 535 if len(nps.subscriptionIDs) == 0 { 536 nps.debouncedNotify.shutdown() 537 delete(sm.nonPathSubscriptions, topic) 538 } 539 540 delete(sm.subscriptionIDs, subscriptionID) 541 } 542 543 // Unsubscribe implements the SubscriptionManager interface. 544 func (sm *subscriptionManager) Unsubscribe(ctx context.Context, sid SubscriptionID) { 545 sm.lock.Lock() 546 defer sm.lock.Unlock() 547 sm.unsubscribePathLocked(ctx, sid) 548 sm.unsubscribeNonPathLocked(ctx, sid) 549 } 550 551 func (sm *subscriptionManager) notifyRefLocked(ref pathSubscriptionRef) { 552 ps, ok := sm.pathSubscriptions[ref] 553 if !ok { 554 return 555 } 556 // We are notify()-ing while holding a lock, but it's fine since the 557 // other side of the channel consumes it pretty fast, either by 558 // dropping deduplicated ones, or by doing the actual send in a 559 // separate goroutine. 560 // 561 // We are not differentiating topics here yet. TODO: do it. 562 ps.debouncedNotify.notify() 563 } 564 565 func (sm *subscriptionManager) nodeChangeLocked(node Node) { 566 path, ok := node.GetPathPlaintextSansTlf() 567 if !ok { 568 return 569 } 570 cleanPath := cleanInTlfPath(path) 571 572 sm.notifyRefLocked(pathSubscriptionRef{ 573 folderBranch: node.GetFolderBranch(), 574 path: cleanPath, 575 }) 576 577 // Do this for parent as well, so if "children" is subscribed on parent 578 // path, we'd trigger a notification too. 579 if parent, ok := getParentPath(cleanPath); ok { 580 sm.notifyRefLocked(pathSubscriptionRef{ 581 folderBranch: node.GetFolderBranch(), 582 path: parent, 583 }) 584 } 585 } 586 587 var _ SubscriptionManagerPublisher = (*subscriptionManager)(nil) 588 589 // PublishChange implements the SubscriptionManagerPublisher interface. 590 func (sm *subscriptionManager) PublishChange(topic keybase1.SubscriptionTopic) { 591 sm.lock.RLock() 592 defer sm.lock.RUnlock() 593 594 // When sync status changes, trigger notification for all paths so they 595 // reload to get new prefetch status. This is unfortunate but it's 596 // non-trivial to actually build notification around individuall path's 597 // prefetch status. Since GUI doesnt' have that many path notifications, 598 // this should be fine. 599 // 600 // TODO: Build it. 601 if topic == keybase1.SubscriptionTopic_OVERALL_SYNC_STATUS { 602 for _, ps := range sm.pathSubscriptions { 603 ps.debouncedNotify.notify() 604 } 605 } 606 607 if nps, ok := sm.nonPathSubscriptions[topic]; ok { 608 nps.debouncedNotify.notify() 609 } 610 } 611 612 var _ Observer = (*subscriptionManager)(nil) 613 614 // LocalChange implements the Observer interface. 615 func (sm *subscriptionManager) LocalChange(ctx context.Context, 616 node Node, write WriteRange) { 617 sm.lock.RLock() 618 defer sm.lock.RUnlock() 619 // TODO HOTPOT-416: check topics 620 sm.nodeChangeLocked(node) 621 } 622 623 // BatchChanges implements the Observer interface. 624 func (sm *subscriptionManager) BatchChanges(ctx context.Context, 625 changes []NodeChange, allAffectedNodeIDs []NodeID) { 626 sm.lock.RLock() 627 defer sm.lock.RUnlock() 628 // TODO HOTPOT-416: check topics 629 for _, change := range changes { 630 sm.nodeChangeLocked(change.Node) 631 } 632 } 633 634 // TlfHandleChange implements the Observer interface. 635 func (sm *subscriptionManager) TlfHandleChange(ctx context.Context, 636 newHandle *tlfhandle.Handle) { 637 } 638 639 type subscriptionManagerManager struct { 640 lock sync.RWMutex 641 config Config 642 subscriptionManagers map[SubscriptionManagerClientID]*subscriptionManager 643 purgeableClientIDsFIFO []SubscriptionManagerClientID 644 } 645 646 func newSubscriptionManagerManager(config Config) *subscriptionManagerManager { 647 return &subscriptionManagerManager{ 648 config: config, 649 subscriptionManagers: make(map[SubscriptionManagerClientID]*subscriptionManager), 650 purgeableClientIDsFIFO: nil, 651 } 652 } 653 654 func (smm *subscriptionManagerManager) Shutdown(ctx context.Context) { 655 smm.lock.Lock() 656 defer smm.lock.Unlock() 657 658 for _, sm := range smm.subscriptionManagers { 659 sm.Shutdown(ctx) 660 } 661 smm.subscriptionManagers = make(map[SubscriptionManagerClientID]*subscriptionManager) 662 smm.purgeableClientIDsFIFO = nil 663 } 664 665 func (smm *subscriptionManagerManager) get( 666 clientID SubscriptionManagerClientID, purgeable bool, 667 notifier SubscriptionNotifier) *subscriptionManager { 668 smm.lock.RLock() 669 sm, ok := smm.subscriptionManagers[clientID] 670 smm.lock.RUnlock() 671 672 if ok { 673 return sm 674 } 675 676 smm.lock.Lock() 677 defer smm.lock.Unlock() 678 679 // Check again under the lock in case we've already created one. This is 680 // important since if we create it twice we'd end up with having the same 681 // clientID appearing twice in purgeableClientIDsFIFO and when we purge the 682 // second one we'd have a panic. 683 sm, ok = smm.subscriptionManagers[clientID] 684 if ok { 685 return sm 686 } 687 688 if purgeable { 689 if len(smm.purgeableClientIDsFIFO) == maxPurgeableSubscriptionManagerClient { 690 toPurge := smm.purgeableClientIDsFIFO[0] 691 smm.subscriptionManagers[toPurge].Shutdown(context.Background()) 692 delete(smm.subscriptionManagers, toPurge) 693 smm.purgeableClientIDsFIFO = smm.purgeableClientIDsFIFO[1:] 694 } 695 smm.purgeableClientIDsFIFO = append(smm.purgeableClientIDsFIFO, clientID) 696 } 697 698 sm = newSubscriptionManager(clientID, smm.config, notifier) 699 smm.subscriptionManagers[clientID] = sm 700 701 return sm 702 } 703 704 // PublishChange implements the SubscriptionManagerPublisher interface. 705 func (smm *subscriptionManagerManager) PublishChange(topic keybase1.SubscriptionTopic) { 706 smm.lock.RLock() 707 defer smm.lock.RUnlock() 708 for _, sm := range smm.subscriptionManagers { 709 sm.PublishChange(topic) 710 } 711 }