github.com/juju/juju@v0.0.0-20240430160146-1752b71fcf00/worker/changestream/eventqueue/eventqueue.go (about) 1 // Copyright 2023 Canonical Ltd. 2 // Licensed under the AGPLv3, see LICENCE file for details. 3 4 package eventqueue 5 6 import ( 7 "sync/atomic" 8 9 "github.com/juju/errors" 10 "github.com/juju/worker/v3/catacomb" 11 "gopkg.in/tomb.v2" 12 13 "github.com/juju/juju/core/changestream" 14 ) 15 16 // Logger represents the logging methods called. 17 type Logger interface { 18 Infof(message string, args ...interface{}) 19 Tracef(message string, args ...interface{}) 20 IsTraceEnabled() bool 21 } 22 23 // Stream represents a way to get change events. 24 type Stream interface { 25 // Changes returns a channel for a given namespace (database). 26 Changes() <-chan changestream.ChangeEvent 27 } 28 29 // subscription represents a subscriber in the event queue. It holds a tomb, so 30 // that we can tie the lifecycle of a subscription to the event queue. 31 type subscription struct { 32 tomb tomb.Tomb 33 id uint64 34 35 topics map[string]struct{} 36 37 changes chan changestream.ChangeEvent 38 active chan struct{} 39 40 unsubscribeFn func() 41 } 42 43 func newSubscription(id uint64, unsubscribeFn func()) *subscription { 44 sub := &subscription{ 45 id: id, 46 changes: make(chan changestream.ChangeEvent), 47 topics: make(map[string]struct{}), 48 active: make(chan struct{}), 49 unsubscribeFn: unsubscribeFn, 50 } 51 52 sub.tomb.Go(sub.loop) 53 54 return sub 55 } 56 57 // Unsubscribe removes the subscription from the event queue asynchronously. 58 // This ensures that all unsubscriptions can be serialized. No unsubscribe will 59 // actually never happen inside a dispatch call. If you attempt to unsubscribe 60 // whilst the dispatch signalling, the unsubscribe will happen after all 61 // dispatches have been called. 62 func (s *subscription) Unsubscribe() { 63 s.unsubscribeFn() 64 } 65 66 // Changes returns the channel that the subscription will receive events on. 67 func (s *subscription) Changes() <-chan changestream.ChangeEvent { 68 return s.changes 69 } 70 71 // Done provides a way to know from the consumer side if the underlying 72 // subscription has been terminated. This is useful to know if the event queue 73 // has been closed. 74 func (s *subscription) Done() <-chan struct{} { 75 return s.active 76 } 77 78 // Kill implements worker.Worker. 79 func (s *subscription) Kill() { 80 s.tomb.Kill(nil) 81 } 82 83 // Wait implements worker.Worker. 84 func (s *subscription) Wait() error { 85 return s.tomb.Wait() 86 } 87 88 func (s *subscription) loop() error { 89 select { 90 case <-s.tomb.Dying(): 91 return tomb.ErrDying 92 case <-s.active: 93 return nil 94 } 95 } 96 97 // signal will dispatch a change event to the subscription. If the subscription 98 // is not active, the change will be dropped. 99 func (s *subscription) signal(change changestream.ChangeEvent) { 100 select { 101 case <-s.tomb.Dying(): 102 return 103 case <-s.active: 104 return 105 case s.changes <- change: 106 } 107 } 108 109 // close closes the active channel, which will signal to the consumer that the 110 // subscription is no longer active. 111 func (s *subscription) close() { 112 close(s.active) 113 } 114 115 type subscriptionOpts struct { 116 *subscription 117 opts []changestream.SubscriptionOption 118 } 119 120 type eventFilter struct { 121 subscriptionID uint64 122 changeMask changestream.ChangeType 123 filter func(changestream.ChangeEvent) bool 124 } 125 126 // EventQueue defines an event listener and dispatcher for db changes that can 127 // be multiplexed to subscriptions. The event queue allows consumers to 128 // subscribe via callbacks to the event queue. This is a lockless 129 // implementation, all subscriptions and changes are serialized in the main 130 // loop. Dispatching is randomized to ensure that subscriptions don't depend on 131 // ordering. The subscriptions can be associated with different subscription 132 // options, which provide filtering when dispatching. Unsubscribing is provided 133 // per subscription, which is done asynchronously. 134 type EventQueue struct { 135 catacomb catacomb.Catacomb 136 stream Stream 137 logger Logger 138 139 subscriptions map[uint64]*subscription 140 subscriptionsByNS map[string][]*eventFilter 141 subscriptionsAll map[uint64]struct{} 142 subscriptionsCount uint64 143 144 // (un)subscription related channels to serialize adding and removing 145 // subscriptions. This allows the queue to be lock less. 146 subscriptionCh chan subscriptionOpts 147 unsubscriptionCh chan uint64 148 } 149 150 // New creates a new EventQueue that will use the Stream for events. 151 func New(stream Stream, logger Logger) (*EventQueue, error) { 152 queue := &EventQueue{ 153 stream: stream, 154 logger: logger, 155 subscriptions: make(map[uint64]*subscription), 156 subscriptionsByNS: make(map[string][]*eventFilter), 157 subscriptionsAll: make(map[uint64]struct{}), 158 subscriptionsCount: 0, 159 160 subscriptionCh: make(chan subscriptionOpts), 161 unsubscriptionCh: make(chan uint64), 162 } 163 164 if err := catacomb.Invoke(catacomb.Plan{ 165 Site: &queue.catacomb, 166 Work: queue.loop, 167 }); err != nil { 168 return nil, errors.Trace(err) 169 } 170 171 return queue, nil 172 } 173 174 // Subscribe creates a new subscription to the event queue. Options can be 175 // provided to allow filter during the dispatching phase. 176 func (q *EventQueue) Subscribe(opts ...changestream.SubscriptionOption) (changestream.Subscription, error) { 177 // Get a new subscription count without using any mutexes. 178 subID := atomic.AddUint64(&q.subscriptionsCount, 1) 179 180 sub := newSubscription(subID, func() { q.unsubscribe(subID) }) 181 if err := q.catacomb.Add(sub); err != nil { 182 return nil, errors.Trace(err) 183 } 184 185 select { 186 case <-q.catacomb.Dying(): 187 return nil, q.catacomb.ErrDying() 188 case q.subscriptionCh <- subscriptionOpts{ 189 subscription: sub, 190 opts: opts, 191 }: 192 } 193 194 return sub, nil 195 } 196 197 // Kill stops the event queue. 198 func (q *EventQueue) Kill() { 199 q.catacomb.Kill(nil) 200 } 201 202 // Wait waits for the event queue to stop. 203 func (q *EventQueue) Wait() error { 204 return q.catacomb.Wait() 205 } 206 207 func (q *EventQueue) unsubscribe(subscriptionID uint64) { 208 select { 209 case <-q.catacomb.Dying(): 210 return 211 case q.unsubscriptionCh <- subscriptionID: 212 } 213 } 214 215 func (q *EventQueue) loop() error { 216 defer func() { 217 for _, sub := range q.subscriptions { 218 sub.close() 219 } 220 q.subscriptions = nil 221 q.subscriptionsByNS = nil 222 223 close(q.subscriptionCh) 224 close(q.unsubscriptionCh) 225 }() 226 227 for { 228 select { 229 case <-q.catacomb.Dying(): 230 return q.catacomb.ErrDying() 231 232 case event, ok := <-q.stream.Changes(): 233 // If the stream is closed, we expect that a new worker will come 234 // again using the change stream worker infrastructure. In this case 235 // just ignore and close out. 236 if !ok { 237 q.logger.Infof("change stream change channel is closed") 238 return nil 239 } 240 241 subs := q.gatherSubscriptions(event) 242 for _, sub := range subs { 243 sub.signal(event) 244 } 245 246 case subOpt := <-q.subscriptionCh: 247 sub := subOpt.subscription 248 249 // Create a new subscription and assign a unique ID to it. 250 q.subscriptions[sub.id] = sub 251 252 // No options were supplied, just add it to the all bucket, so 253 // they'll be included in every dispatch. 254 if len(subOpt.opts) == 0 { 255 q.subscriptionsAll[sub.id] = struct{}{} 256 continue 257 } 258 259 // Register filters to route changes matching the subscription criteria to 260 // the newly created subscription. 261 for _, opt := range subOpt.opts { 262 namespace := opt.Namespace() 263 q.subscriptionsByNS[namespace] = append(q.subscriptionsByNS[namespace], &eventFilter{ 264 subscriptionID: sub.id, 265 changeMask: opt.ChangeMask(), 266 filter: opt.Filter(), 267 }) 268 sub.topics[namespace] = struct{}{} 269 } 270 271 case subscriptionID := <-q.unsubscriptionCh: 272 sub, found := q.subscriptions[subscriptionID] 273 if !found { 274 continue 275 } 276 277 for topic := range sub.topics { 278 var updatedFilters []*eventFilter 279 for _, filter := range q.subscriptionsByNS[topic] { 280 if filter.subscriptionID == subscriptionID { 281 continue 282 } 283 updatedFilters = append(updatedFilters, filter) 284 } 285 q.subscriptionsByNS[topic] = updatedFilters 286 } 287 288 delete(q.subscriptions, subscriptionID) 289 delete(q.subscriptionsAll, subscriptionID) 290 291 sub.close() 292 } 293 } 294 } 295 296 func (q *EventQueue) gatherSubscriptions(ch changestream.ChangeEvent) []*subscription { 297 subs := make(map[uint64]*subscription) 298 299 for id := range q.subscriptionsAll { 300 subs[id] = q.subscriptions[id] 301 } 302 303 for _, subOpt := range q.subscriptionsByNS[ch.Namespace()] { 304 if _, ok := subs[subOpt.subscriptionID]; ok { 305 continue 306 } 307 308 if (ch.Type() & subOpt.changeMask) == 0 { 309 continue 310 } 311 312 if !subOpt.filter(ch) { 313 if q.logger.IsTraceEnabled() { 314 q.logger.Tracef("filtering out change: %v", ch) 315 } 316 continue 317 } 318 319 if q.logger.IsTraceEnabled() { 320 q.logger.Tracef("dispatching change: %v", ch) 321 } 322 323 subs[subOpt.subscriptionID] = q.subscriptions[subOpt.subscriptionID] 324 } 325 326 // By collecting the subs within a map to ensure that a sub can be only 327 // called once, we actually gain random ordering. This prevents subscribers 328 // from depending on the order of dispatches. 329 results := make([]*subscription, 0, len(subs)) 330 for _, sub := range subs { 331 results = append(results, sub) 332 } 333 return results 334 }