github.com/cilium/cilium@v1.16.2/pkg/policy/k8s/service.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright Authors of Cilium 3 4 package k8s 5 6 import ( 7 "context" 8 "errors" 9 "sync" 10 11 "github.com/cilium/stream" 12 "github.com/sirupsen/logrus" 13 "k8s.io/apimachinery/pkg/labels" 14 15 "github.com/cilium/cilium/pkg/k8s" 16 "github.com/cilium/cilium/pkg/k8s/resource" 17 "github.com/cilium/cilium/pkg/k8s/types" 18 "github.com/cilium/cilium/pkg/lock" 19 "github.com/cilium/cilium/pkg/logging/logfields" 20 "github.com/cilium/cilium/pkg/policy/api" 21 "github.com/cilium/cilium/pkg/time" 22 ) 23 24 // isSelectableService returns true if the service svc can be selected by a ToServices rule. 25 // Normally, only services without a label selector (i.e. empty services) 26 // are allowed as targets of a toServices rule. 27 // This is to minimize the chances of a pod IP being selected by this rule, which might 28 // cause conflicting entries in the ipcache. 29 // 30 // This requirement, however, is dropped for HighScale IPCache mode, because pod IPs are 31 // normally excluded from the ipcache regardless. Therefore, in HighScale IPCache mode, 32 // all services can be selected by ToServices. 33 func (p *policyWatcher) isSelectableService(svc *k8s.Service) bool { 34 if svc == nil { 35 return false 36 } 37 return p.config.EnableHighScaleIPcache || svc.IsExternal() 38 } 39 40 // onServiceEvent processes a ServiceNotification and (if necessary) 41 // recalculates all policies affected by this change. 42 func (p *policyWatcher) onServiceEvent(event k8s.ServiceNotification) { 43 err := p.updateToServicesPolicies(event.ID, event.Service, event.OldService) 44 if err != nil { 45 p.log.WithError(err).WithFields(logrus.Fields{ 46 logfields.Event: event.Action, 47 logfields.ServiceID: event.ID, 48 }).Warning("Failed to recalculate CiliumNetworkPolicy rules after service event") 49 } 50 } 51 52 // updateToServicesPolicies is to be invoked when a service has changed (i.e. it was 53 // added, removed, its endpoints have changed, or its labels have changed). 54 // This function then checks if any of the known CNP/CCNPs are affected by this 55 // change, and recomputes them by calling resolveCiliumNetworkPolicyRefs. 56 func (p *policyWatcher) updateToServicesPolicies(svcID k8s.ServiceID, newSVC, oldSVC *k8s.Service) error { 57 var errs []error 58 59 // Bail out early if updated service is not selectable 60 if !(p.isSelectableService(newSVC) || p.isSelectableService(oldSVC)) { 61 return nil 62 } 63 64 // newService is true if this is the first time we observe this service 65 newService := oldSVC == nil 66 // changedService is true if the service label or selector has changed 67 changedService := !newSVC.DeepEqual(oldSVC) 68 69 // candidatePolicyKeys contains the set of policy names we need to process 70 // for this service update. By default, we consider all policies with 71 // a ToServices selector as candidates. 72 candidatePolicyKeys := p.toServicesPolicies 73 if !(newService || changedService) { 74 // If the service definition itself has not changed, and it's not the 75 // first time we process this service, we only need to check the 76 // policies which are known to select the old version of the service 77 candidatePolicyKeys = p.cnpByServiceID[svcID] 78 } 79 80 // Iterate over all policies potentially affected by this service update, 81 // and re-resolve the policy refs for each. 82 for key := range candidatePolicyKeys { 83 cnp, ok := p.cnpCache[key] 84 if !ok { 85 p.log.WithFields(logrus.Fields{ 86 logfields.Key: key, 87 logfields.ServiceID: svcID, 88 }).Error("BUG: Candidate policy for service update not found. Please report this bug to Cilium developers.") 89 continue 90 } 91 92 // Skip policies which are not affected by this service update 93 if !(p.cnpMatchesService(cnp, svcID, newSVC) || 94 (!newService && changedService && p.cnpMatchesService(cnp, svcID, oldSVC))) { 95 continue 96 } 97 98 if p.config.Debug { 99 p.log.WithFields(logrus.Fields{ 100 logfields.CiliumNetworkPolicyName: cnp.Name, 101 logfields.K8sAPIVersion: cnp.APIVersion, 102 logfields.K8sNamespace: cnp.Namespace, 103 logfields.ServiceID: svcID, 104 }).Debug("Service updated or deleted, recalculating CiliumNetworkPolicy rules") 105 } 106 initialRecvTime := time.Now() 107 108 resourceID := resourceIDForCiliumNetworkPolicy(key, cnp) 109 110 errs = append(errs, p.resolveCiliumNetworkPolicyRefs(cnp, key, initialRecvTime, resourceID)) 111 } 112 return errors.Join(errs...) 113 } 114 115 // resolveToServices translates all ToServices rules found in the provided CNP 116 // and to corresponding ToCIDRSet rules. Mutates the passed in cnp in place. 117 func (p *policyWatcher) resolveToServices(key resource.Key, cnp *types.SlimCNP) { 118 // We consult the service cache to obtain the service endpoints 119 // which are selected by the ToServices selectors found in the CNP. 120 p.svcCache.ForEachService(func(svcID k8s.ServiceID, svc *k8s.Service, eps *k8s.Endpoints) bool { 121 if !p.isSelectableService(svc) { 122 return true // continue 123 } 124 125 // svcEndpoints caches the selected endpoints in case they are 126 // referenced more than once by this CNP 127 svcEndpoints := newServiceEndpoints(svcID, svc, eps) 128 129 // This extracts the selected service endpoints from the rule 130 // and translates it to a ToCIDRSet 131 numMatches := svcEndpoints.processRule(cnp.Spec) 132 for _, spec := range cnp.Specs { 133 numMatches += svcEndpoints.processRule(spec) 134 } 135 136 // Mark the policy as selecting the service svcID. This allows us to 137 // reduce the number of policy candidates in updateToServicesPolicies 138 if numMatches > 0 { 139 p.markCNPForService(key, svcID) 140 } else { 141 p.clearCNPForService(key, svcID) 142 } 143 144 return true 145 }) 146 } 147 148 // cnpMatchesService returns true if the cnp contains a ToServices rule which 149 // matches the provided service svcID/svc 150 func (p *policyWatcher) cnpMatchesService(cnp *types.SlimCNP, svcID k8s.ServiceID, svc *k8s.Service) bool { 151 if !p.isSelectableService(svc) { 152 return false 153 } 154 155 if hasMatchingToServices(cnp.Spec, svcID, svc) { 156 return true 157 } 158 159 for _, spec := range cnp.Specs { 160 if hasMatchingToServices(spec, svcID, svc) { 161 return true 162 } 163 } 164 165 return false 166 } 167 168 // markCNPForService marks that a policy (referred to by 'key') contains a 169 // ToServices selector selecting the service svcID 170 func (p *policyWatcher) markCNPForService(key resource.Key, svcID k8s.ServiceID) { 171 svcMap, ok := p.cnpByServiceID[svcID] 172 if !ok { 173 svcMap = make(map[resource.Key]struct{}, 1) 174 p.cnpByServiceID[svcID] = svcMap 175 } 176 177 svcMap[key] = struct{}{} 178 } 179 180 // clearCNPForService indicates that a policy (referred to by 'key') no longer 181 // selects the service svcID via a ToServices rule 182 func (p *policyWatcher) clearCNPForService(key resource.Key, svcID k8s.ServiceID) { 183 delete(p.cnpByServiceID[svcID], key) 184 if len(p.cnpByServiceID[svcID]) == 0 { 185 delete(p.cnpByServiceID, svcID) 186 } 187 } 188 189 // specHasMatchingToServices returns true if the rule contains a ToServices rule which 190 // matches the provided service svcID/svc 191 func hasMatchingToServices(spec *api.Rule, svcID k8s.ServiceID, svc *k8s.Service) bool { 192 if spec == nil { 193 return false 194 } 195 for _, egress := range spec.Egress { 196 for _, toService := range egress.ToServices { 197 if sel := toService.K8sServiceSelector; sel != nil { 198 if serviceSelectorMatches(sel, svcID, svc) { 199 return true 200 } 201 } else if ref := toService.K8sService; ref != nil { 202 if serviceRefMatches(ref, svcID) { 203 return true 204 } 205 } 206 } 207 } 208 209 return false 210 } 211 212 // hasToServices returns true if the CNP contains a ToServices rule 213 func hasToServices(cnp *types.SlimCNP) bool { 214 if specHasToServices(cnp.Spec) { 215 return true 216 } 217 for _, spec := range cnp.Specs { 218 if specHasToServices(spec) { 219 return true 220 } 221 } 222 return false 223 } 224 225 // specHasToServices returns true if the rule contains a ToServices rule 226 func specHasToServices(spec *api.Rule) bool { 227 if spec == nil { 228 return false 229 } 230 for _, egress := range spec.Egress { 231 if len(egress.ToServices) > 0 { 232 return true 233 } 234 } 235 236 return false 237 } 238 239 // serviceSelectorMatches returns true if the ToServices k8sServiceSelector 240 // matches the labels of the provided service svc 241 func serviceSelectorMatches(sel *api.K8sServiceSelectorNamespace, svcID k8s.ServiceID, svc *k8s.Service) bool { 242 if !(sel.Namespace == svcID.Namespace || sel.Namespace == "") { 243 return false 244 } 245 246 es := api.EndpointSelector(sel.Selector) 247 es.SyncRequirementsWithLabelSelector() 248 return es.Matches(labels.Set(svc.Labels)) 249 } 250 251 // serviceRefMatches returns true if the ToServices k8sService reference 252 // matches the name/namespace of the provided service svc 253 func serviceRefMatches(ref *api.K8sServiceNamespace, svcID k8s.ServiceID) bool { 254 return (ref.Namespace == svcID.Namespace || ref.Namespace == "") && 255 ref.ServiceName == svcID.Name 256 } 257 258 // serviceEndpoints stores the endpoints associated with a service 259 type serviceEndpoints struct { 260 svcID k8s.ServiceID 261 svc *k8s.Service 262 eps *k8s.Endpoints 263 264 valid bool 265 cached []api.CIDR 266 } 267 268 // newServiceEndpoints returns an initialized serviceEndpoints struct 269 func newServiceEndpoints(svcID k8s.ServiceID, svc *k8s.Service, eps *k8s.Endpoints) *serviceEndpoints { 270 return &serviceEndpoints{ 271 svcID: svcID, 272 svc: svc, 273 eps: eps, 274 } 275 } 276 277 // endpoints returns the service's endpoints as an []api.CIDR slice. 278 // It caches the result such that repeat invocations do not allocate. 279 func (s *serviceEndpoints) endpoints() []api.CIDR { 280 if s.valid { 281 return s.cached 282 } 283 284 prefixes := s.eps.Prefixes() 285 s.cached = make([]api.CIDR, 0, len(prefixes)) 286 for _, prefix := range prefixes { 287 s.cached = append(s.cached, api.CIDR(prefix.String())) 288 } 289 290 s.valid = true 291 return s.cached 292 } 293 294 // appendEndpoints appends all the endpoint as generated CIDRRules into the toCIDRSet 295 func appendEndpoints(toCIDRSet *api.CIDRRuleSlice, endpoints []api.CIDR) { 296 for _, cidr := range endpoints { 297 *toCIDRSet = append(*toCIDRSet, api.CIDRRule{ 298 Cidr: cidr, 299 Generated: true, 300 }) 301 } 302 } 303 304 // processRule parses the ToServices selectors in the provided rule and translates 305 // it to ToCIDRSet entries 306 func (s *serviceEndpoints) processRule(rule *api.Rule) (numMatches int) { 307 if rule == nil { 308 return 309 } 310 for i, egress := range rule.Egress { 311 for _, toService := range egress.ToServices { 312 if sel := toService.K8sServiceSelector; sel != nil { 313 if serviceSelectorMatches(sel, s.svcID, s.svc) { 314 appendEndpoints(&rule.Egress[i].ToCIDRSet, s.endpoints()) 315 numMatches++ 316 } 317 } else if ref := toService.K8sService; ref != nil { 318 if serviceRefMatches(ref, s.svcID) { 319 appendEndpoints(&rule.Egress[i].ToCIDRSet, s.endpoints()) 320 numMatches++ 321 } 322 } 323 } 324 } 325 return numMatches 326 } 327 328 type serviceQueue struct { 329 mu *lock.Mutex 330 cond *sync.Cond 331 queue []k8s.ServiceNotification 332 } 333 334 func newServiceQueue() *serviceQueue { 335 mu := new(lock.Mutex) 336 return &serviceQueue{ 337 mu: mu, 338 cond: sync.NewCond(mu), 339 queue: []k8s.ServiceNotification{}, 340 } 341 } 342 343 func (q *serviceQueue) enqueue(item k8s.ServiceNotification) { 344 q.mu.Lock() 345 q.queue = append(q.queue, item) 346 q.cond.Signal() 347 q.mu.Unlock() 348 } 349 350 func (q *serviceQueue) signal() { 351 q.mu.Lock() 352 q.cond.Signal() 353 q.mu.Unlock() 354 } 355 356 func (q *serviceQueue) dequeue(ctx context.Context) (item k8s.ServiceNotification, ok bool) { 357 q.mu.Lock() 358 defer q.mu.Unlock() 359 360 for len(q.queue) == 0 && ctx.Err() == nil { 361 q.cond.Wait() 362 } 363 364 // If ctx is cancelled, we return immediately 365 if ctx.Err() != nil { 366 return item, false 367 } 368 369 item = q.queue[0] 370 q.queue = q.queue[1:] 371 372 return item, true 373 } 374 375 // serviceNotificationsQueue converts the observable src into a channel. 376 // When the provided context is cancelled the underlying subscription is 377 // cancelled and the channel is closed. 378 // In contrast to stream.ToChannel, this function has an unbounded buffer, 379 // meaning the consumer must always consume the channel (or cancel ctx) 380 func serviceNotificationsQueue(ctx context.Context, src stream.Observable[k8s.ServiceNotification]) <-chan k8s.ServiceNotification { 381 ctx, cancel := context.WithCancel(ctx) 382 ch := make(chan k8s.ServiceNotification) 383 q := newServiceQueue() 384 385 // This go routine is woken up whenever there a new item has been added to 386 // queue and forwards it to ch. It exits when context ctx is cancelled. 387 go func() { 388 // Close downstream channel on exit 389 defer close(ch) 390 391 // Exit the for-loop below if the context is cancelled. 392 // See https://pkg.go.dev/context#AfterFunc for a more detailed 393 // explanation of this pattern 394 cleanupCancellation := context.AfterFunc(ctx, q.signal) 395 defer cleanupCancellation() 396 397 for { 398 item, ok := q.dequeue(ctx) 399 if !ok { 400 return 401 } 402 403 select { 404 case ch <- item: 405 continue 406 case <-ctx.Done(): 407 return 408 } 409 } 410 }() 411 412 src.Observe(ctx, 413 q.enqueue, 414 func(err error) { 415 cancel() // stops above go routine 416 }, 417 ) 418 419 return ch 420 }