go.ligato.io/vpp-agent/v3@v3.5.0/plugins/orchestrator/watcher/aggregator.go (about) 1 // Copyright (c) 2019 Cisco and/or its affiliates. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at: 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package watcher 16 17 import ( 18 "context" 19 "fmt" 20 "strings" 21 22 "go.ligato.io/cn-infra/v2/config" 23 "go.ligato.io/cn-infra/v2/datasync" 24 "go.ligato.io/cn-infra/v2/datasync/kvdbsync" 25 "go.ligato.io/cn-infra/v2/datasync/kvdbsync/local" 26 "go.ligato.io/cn-infra/v2/datasync/resync" 27 "go.ligato.io/cn-infra/v2/datasync/syncbase" 28 "go.ligato.io/cn-infra/v2/infra" 29 "go.ligato.io/cn-infra/v2/logging" 30 "go.ligato.io/cn-infra/v2/utils/safeclose" 31 "go.ligato.io/vpp-agent/v3/plugins/orchestrator/contextdecorator" 32 "go.ligato.io/vpp-agent/v3/plugins/orchestrator/localregistry" 33 ) 34 35 // Option is a function that acts on a Plugin to inject Dependencies or configuration 36 type Option func(*Aggregator) 37 38 // UseWatchers returns option that sets watchers. 39 func UseWatchers(watchers ...datasync.KeyValProtoWatcher) Option { 40 return func(p *Aggregator) { 41 p.Watchers = watchers 42 } 43 } 44 45 // Aggregator is an adapter that allows multiple 46 // watchers (KeyValProtoWatcher) to be aggregated in one. 47 // Watch request is delegated to all of them. 48 type Aggregator struct { 49 infra.PluginDeps 50 51 keyPrefixes []string 52 localKVs map[string]datasync.KeyVal 53 config *Config 54 55 Resync *resync.Plugin 56 Local *syncbase.Registry 57 Watchers []datasync.KeyValProtoWatcher 58 } 59 60 // Config holds the Aggregator configuration. 61 type Config struct { 62 // ResyncDataSourceOverride overrides default data source (empty in aggregator and later elsewhere 63 // set to "datasync") to support certain use cases where data sources must match otherwise resync doesn't 64 // affect the same set of data (i.e. using only initfile watcher to fill initial data and agentctl resync 65 // to clean them -> agentctl resync works only on 'grpc'-sourced data and default 'datasync'-sourced 66 // initfile data couldn't be handled) 67 // This is not a full solution covering all combinations of watchers and agentctl resync, but rather 68 // a possible fix for some use cases. The full solution should handle multiple resyncs (one per data source) 69 // and all its corner cases. 70 ResyncDataSourceOverride string `json:"resync-data-source-override"` 71 } 72 73 // NewPlugin creates a new Plugin with the provides Options 74 func NewPlugin(opts ...Option) *Aggregator { 75 p := &Aggregator{} 76 77 p.PluginName = "aggregator" 78 p.Local = local.DefaultRegistry 79 p.Resync = &resync.DefaultPlugin 80 81 for _, o := range opts { 82 o(p) 83 } 84 if p.Cfg == nil { 85 p.Cfg = config.ForPlugin(p.String(), 86 config.WithCustomizedFlag(config.FlagName(p.String()), "aggregator.conf"), 87 ) 88 } 89 p.PluginDeps.SetupLog() 90 91 return p 92 } 93 94 func (p *Aggregator) Init() error { 95 p.localKVs = map[string]datasync.KeyVal{} 96 97 // parse configuration file 98 var err error 99 p.config, err = p.retrieveConfig() 100 if err != nil { 101 return err 102 } 103 104 return nil 105 } 106 107 // Watch subscribes to every transport available within transport aggregator 108 // and also subscribes to localclient (local.Registry). 109 // The function implements KeyValProtoWatcher.Watch(). 110 func (p *Aggregator) Watch( 111 resyncName string, 112 changeChan chan datasync.ChangeEvent, 113 resyncChan chan datasync.ResyncEvent, 114 keyPrefixes ...string, 115 ) (datasync.WatchRegistration, error) { 116 117 p.keyPrefixes = keyPrefixes 118 119 // prepare list of watchers 120 var watchers []datasync.KeyValProtoWatcher 121 for _, w := range p.Watchers { 122 if l, ok := w.(*syncbase.Registry); ok && p.Local != nil && l == p.Local { 123 p.Log.Warn("found local registry (localclient) in watchers, ignoring it..") 124 continue 125 } 126 // ignoring watchers that have data sources that will be never used and 127 // therefore never send configuration data to this aggregator 128 if syncer, ok := w.(*kvdbsync.Plugin); ok { 129 if syncer.KvPlugin != nil && syncer.KvPlugin.Disabled() { 130 continue 131 } 132 } 133 // TODO Handle kvdbsync.Plugin watchers that are not disabled, but won't transmit any resync data. 134 // This aggregator collects resyncs from all watchers, but if one or more resync from watcher don't happen, 135 // the agregator resyncing won't be triggered. This might happen when one of watchers is not registered 136 // to resync plugin and that is usually due to not connecting to data source as the registration to 137 // resync plugin happen in OnConnect callback function. 138 // To properly handle the situation, OnConnect callback must be used to distinguish what watched is 139 // reached by resync trigger. That might lead to delayed readiness of all watchers and hence the trigger 140 // for initial call of DoResync() to do initial Agent resync must be delayed as well SOMEHOW (can't use 141 // init or after init of plugins). 142 143 // localregistry.InitFileRegistry can also be watcher that never sends anything (i.e. if misconfigured 144 // or no default init file is present or loading of data fails or ... -> check for Empty()) 145 if initRegistry, ok := w.(*localregistry.InitFileRegistry); ok && initRegistry.Empty() { 146 continue 147 } 148 149 watchers = append(watchers, w) 150 } 151 p.Watchers = watchers 152 153 // start watch for all watchers 154 p.Log.Infof("Watch for %v with %d prefixes", resyncName, len(keyPrefixes)) 155 156 aggrResync := make(chan datasync.ResyncEvent, len(watchers)) 157 158 go p.watchAggrResync(aggrResync, resyncChan) 159 160 var registrations []datasync.WatchRegistration 161 for i, adapter := range watchers { 162 partChange := make(chan datasync.ChangeEvent) 163 partResync := make(chan datasync.ResyncEvent) 164 165 name := fmt.Sprint(adapter) + "/" + resyncName 166 watcherReg, err := adapter.Watch(name, partChange, partResync, keyPrefixes...) 167 if err != nil { 168 return nil, err 169 } 170 171 go func(i int, chanChange chan datasync.ChangeEvent, chanResync chan datasync.ResyncEvent) { 172 for { 173 select { 174 case e := <-chanChange: 175 p.Log.Debugf("watcher %d got CHANGE PART, sending to aggregated", i) 176 changeChan <- e 177 178 case e := <-chanResync: 179 p.Log.Debugf("watcher %d got RESYNC PART, sending to aggregated", i) 180 aggrResync <- e 181 } 182 } 183 }(i+1, partChange, partResync) 184 185 if watcherReg != nil { 186 registrations = append(registrations, watcherReg) 187 } 188 } 189 190 // register and watch for localclient 191 partResync := make(chan datasync.ResyncEvent) 192 partChange := make(chan datasync.ChangeEvent) 193 194 go p.watchLocalEvents(partChange, changeChan, partResync) 195 196 name := "LOCAL" + "/" + resyncName 197 localReg, err := p.Local.Watch(name, partChange, partResync, keyPrefixes...) 198 if err != nil { 199 return nil, err 200 } 201 202 p.Log.Debug("added localclient as aggregated watcher") 203 204 registrations = append(registrations, localReg) 205 206 return &WatchRegistration{ 207 Registrations: registrations, 208 }, nil 209 } 210 211 func (p *Aggregator) watchAggrResync(aggrResync, resyncCh chan datasync.ResyncEvent) { 212 aggregatedResync := func(allResyncs []datasync.ResyncEvent) { 213 var prefixKeyVals = map[string]map[string]datasync.KeyVal{} 214 215 kvToKeyVals := func(prefix string, kv datasync.KeyVal) { 216 keyVals, ok := prefixKeyVals[prefix] 217 if !ok { 218 p.Log.Debugf(" - keyval prefix: %v", prefix) 219 keyVals = map[string]datasync.KeyVal{} 220 prefixKeyVals[prefix] = keyVals 221 } 222 key := kv.GetKey() 223 if _, ok := keyVals[key]; ok { 224 p.Log.Warnf("resync from watcher overwrites key: %v", key) 225 } 226 keyVals[key] = kv 227 } 228 229 // process resync events from all watchers 230 p.Log.Debugf("preparing keyvals for aggregated resync from %d cached resyncs", len(allResyncs)) 231 for _, ev := range allResyncs { 232 for prefix, iterator := range ev.GetValues() { 233 for { 234 kv, allReceived := iterator.GetNext() 235 if allReceived { 236 break 237 } 238 239 kvToKeyVals(prefix, kv) 240 } 241 } 242 } 243 244 // process keyvals from localclient 245 p.Log.Debugf("preparing localclient keyvals for aggregated resync with %d keyvals", len(allResyncs)) 246 for key, kv := range p.localKVs { 247 var kvprefix string 248 for _, prefix := range p.keyPrefixes { 249 if strings.HasPrefix(key, prefix) { 250 kvprefix = prefix 251 break 252 } 253 } 254 if kvprefix == "" { 255 p.Log.Warnf("not found registered prefix for keyval from localclient with key: %v", key) 256 } 257 kvToKeyVals(kvprefix, kv) 258 } 259 260 // prepare aggregated resync 261 var vals = map[string]datasync.KeyValIterator{} 262 for prefix, keyVals := range prefixKeyVals { 263 var data []datasync.KeyVal 264 for _, kv := range keyVals { 265 data = append(data, kv) 266 } 267 vals[prefix] = syncbase.NewKVIterator(data) 268 } 269 270 ctx := context.Background() 271 if p.config.ResyncDataSourceOverride != "" { 272 ctx = contextdecorator.DataSrcContext(ctx, p.config.ResyncDataSourceOverride) 273 } 274 resEv := syncbase.NewResyncEventDB(ctx, vals) 275 276 p.Log.Debugf("sending aggregated resync event (%d prefixes) to original resync channel", len(vals)) 277 resyncCh <- resEv 278 p.Log.Debugf("aggregated resync was accepted, waiting for done chan") 279 resErr := <-resEv.DoneChan 280 p.Log.Debugf("aggregated resync done (err=%v) watchers", resErr) 281 282 } 283 284 var cachedResyncs []datasync.ResyncEvent 285 286 // process resync events from watchers 287 for e := range aggrResync { 288 cachedResyncs = append(cachedResyncs, e) 289 p.Log.Debugf("watchers received resync event (%d/%d watchers done)", len(cachedResyncs), len(p.Watchers)) 290 291 e.Done(nil) 292 293 if len(cachedResyncs) == len(p.Watchers) { 294 p.Log.Debug("resyncs from all watchers received, calling aggregated resync") 295 aggregatedResync(cachedResyncs) 296 // clear resyncs 297 cachedResyncs = nil 298 } 299 } 300 p.Log.Debugf("aggrResync channel was closed") 301 } 302 303 func (p *Aggregator) watchLocalEvents(partChange, changeChan chan datasync.ChangeEvent, partResync chan datasync.ResyncEvent) { 304 for { 305 select { 306 case e := <-partChange: 307 p.Log.Debugf("LOCAL got CHANGE part, %d changes, sending to aggregated", len(e.GetChanges())) 308 309 for _, change := range e.GetChanges() { 310 key := change.GetKey() 311 switch change.GetChangeType() { 312 case datasync.Delete: 313 p.Log.Debugf(" - DEL %s", key) 314 delete(p.localKVs, key) 315 case datasync.Put: 316 p.Log.Debugf(" - PUT %s", key) 317 p.localKVs[key] = change 318 } 319 } 320 changeChan <- e 321 322 case e := <-partResync: 323 p.Log.Debugf("LOCAL watcher got RESYNC part, sending to aggregated") 324 325 p.localKVs = map[string]datasync.KeyVal{} 326 for _, iterator := range e.GetValues() { 327 for { 328 kv, allReceived := iterator.GetNext() 329 if allReceived { 330 break 331 } 332 333 key := kv.GetKey() 334 p.localKVs[key] = kv 335 } 336 } 337 p.Log.Debugf("LOCAL watcher resynced %d keyvals", len(p.localKVs)) 338 e.Done(nil) 339 340 p.Log.Debug("LOCAL watcher calling RESYNC") 341 p.Resync.DoResync() // execution will appear in p.watchAggrResync go routine where p.localKVs will handled 342 } 343 } 344 } 345 346 // retrieveConfig loads Aggregator plugin configuration file. 347 func (p *Aggregator) retrieveConfig() (*Config, error) { 348 config := &Config{ 349 // default configuration 350 ResyncDataSourceOverride: "", // don't override 351 } 352 found, err := p.Cfg.LoadValue(config) 353 if !found { 354 if err == nil { 355 p.Log.Debug("Aggregator plugin config not found") 356 } else { 357 p.Log.Debugf("Aggregator plugin config can't be loaded due to: %v", err) 358 } 359 return config, err 360 } 361 if err != nil { 362 return nil, err 363 } 364 return config, err 365 } 366 367 // WatchRegistration is adapter that allows multiple 368 // registrations (WatchRegistration) to be aggregated in one. 369 // Close operation is applied collectively to all included registration. 370 type WatchRegistration struct { 371 Registrations []datasync.WatchRegistration 372 } 373 374 // Register new key for all available aggregator objects. Call Register(keyPrefix) on specific registration 375 // to add the key from that registration only 376 func (wa *WatchRegistration) Register(resyncName, keyPrefix string) error { 377 for _, registration := range wa.Registrations { 378 if err := registration.Register(resyncName, keyPrefix); err != nil { 379 logging.DefaultLogger.Warnf("aggregated register failed: %v", err) 380 } 381 } 382 383 return nil 384 } 385 386 // Unregister closed registration of specific key under all available aggregator objects. 387 // Call Unregister(keyPrefix) on specific registration to remove the key from that registration only 388 func (wa *WatchRegistration) Unregister(keyPrefix string) error { 389 for _, registration := range wa.Registrations { 390 if err := registration.Unregister(keyPrefix); err != nil { 391 logging.DefaultLogger.Warnf("aggregated unregister failed: %v", err) 392 } 393 } 394 395 return nil 396 } 397 398 // Close every registration under the aggregator. 399 // This function implements WatchRegistration.Close(). 400 func (wa *WatchRegistration) Close() error { 401 return safeclose.Close(wa.Registrations) 402 }