github.com/datadog/cilium@v1.6.12/daemon/k8s_watcher.go (about)

     1  // Copyright 2016-2019 Authors of Cilium
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package main
    16  
    17  import (
    18  	"context"
    19  	"errors"
    20  	"fmt"
    21  	"net"
    22  	"net/url"
    23  	"os"
    24  	"strconv"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/cilium/cilium/pkg/comparator"
    29  	"github.com/cilium/cilium/pkg/controller"
    30  	"github.com/cilium/cilium/pkg/endpointmanager"
    31  	"github.com/cilium/cilium/pkg/identity"
    32  	"github.com/cilium/cilium/pkg/ipcache"
    33  	"github.com/cilium/cilium/pkg/k8s"
    34  	ciliumio "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
    35  	cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
    36  	clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
    37  	"github.com/cilium/cilium/pkg/k8s/informer"
    38  	k8smetrics "github.com/cilium/cilium/pkg/k8s/metrics"
    39  	"github.com/cilium/cilium/pkg/k8s/types"
    40  	k8sUtils "github.com/cilium/cilium/pkg/k8s/utils"
    41  	k8sversion "github.com/cilium/cilium/pkg/k8s/version"
    42  	"github.com/cilium/cilium/pkg/kvstore"
    43  	"github.com/cilium/cilium/pkg/labels"
    44  	"github.com/cilium/cilium/pkg/loadbalancer"
    45  	"github.com/cilium/cilium/pkg/lock"
    46  	"github.com/cilium/cilium/pkg/logging/logfields"
    47  	bpfIPCache "github.com/cilium/cilium/pkg/maps/ipcache"
    48  	"github.com/cilium/cilium/pkg/metrics"
    49  	"github.com/cilium/cilium/pkg/node"
    50  	"github.com/cilium/cilium/pkg/option"
    51  	"github.com/cilium/cilium/pkg/policy"
    52  	"github.com/cilium/cilium/pkg/serializer"
    53  	"github.com/cilium/cilium/pkg/service"
    54  	"github.com/cilium/cilium/pkg/source"
    55  	"github.com/cilium/cilium/pkg/spanstat"
    56  
    57  	"github.com/sirupsen/logrus"
    58  	v1 "k8s.io/api/core/v1"
    59  	"k8s.io/api/extensions/v1beta1"
    60  	networkingv1 "k8s.io/api/networking/v1"
    61  	"k8s.io/apimachinery/pkg/fields"
    62  	"k8s.io/apimachinery/pkg/util/runtime"
    63  	"k8s.io/apimachinery/pkg/util/wait"
    64  	"k8s.io/client-go/tools/cache"
    65  	k8s_metrics "k8s.io/client-go/tools/metrics"
    66  )
    67  
    68  const (
    69  	k8sAPIGroupCRD                   = "CustomResourceDefinition"
    70  	k8sAPIGroupNodeV1Core            = "core/v1::Node"
    71  	k8sAPIGroupNamespaceV1Core       = "core/v1::Namespace"
    72  	k8sAPIGroupServiceV1Core         = "core/v1::Service"
    73  	k8sAPIGroupEndpointV1Core        = "core/v1::Endpoint"
    74  	k8sAPIGroupPodV1Core             = "core/v1::Pods"
    75  	k8sAPIGroupNetworkingV1Core      = "networking.k8s.io/v1::NetworkPolicy"
    76  	k8sAPIGroupIngressV1Beta1        = "extensions/v1beta1::Ingress"
    77  	k8sAPIGroupCiliumNetworkPolicyV2 = "cilium/v2::CiliumNetworkPolicy"
    78  	k8sAPIGroupCiliumNodeV2          = "cilium/v2::CiliumNode"
    79  	k8sAPIGroupCiliumEndpointV2      = "cilium/v2::CiliumEndpoint"
    80  
    81  	metricCNP            = "CiliumNetworkPolicy"
    82  	metricEndpoint       = "Endpoint"
    83  	metricIngress        = "Ingress"
    84  	metricKNP            = "NetworkPolicy"
    85  	metricNS             = "Namespace"
    86  	metricCiliumNode     = "CiliumNode"
    87  	metricCiliumEndpoint = "CiliumEndpoint"
    88  	metricPod            = "Pod"
    89  	metricService        = "Service"
    90  	metricCreate         = "create"
    91  	metricDelete         = "delete"
    92  	metricUpdate         = "update"
    93  )
    94  
    95  var (
    96  	k8sCM = controller.NewManager()
    97  
    98  	importMetadataCache = ruleImportMetadataCache{
    99  		ruleImportMetadataMap: make(map[string]policyImportMetadata),
   100  	}
   101  
   102  	errIPCacheOwnedByNonK8s = fmt.Errorf("ipcache entry owned by kvstore or agent")
   103  )
   104  
   105  // ruleImportMetadataCache maps the unique identifier of a CiliumNetworkPolicy
   106  // (namespace and name) to metadata about the importing of the rule into the
   107  // agent's policy repository at the time said rule was imported (revision
   108  // number, and if any error occurred while importing).
   109  type ruleImportMetadataCache struct {
   110  	mutex                 lock.RWMutex
   111  	ruleImportMetadataMap map[string]policyImportMetadata
   112  }
   113  
   114  type policyImportMetadata struct {
   115  	revision          uint64
   116  	policyImportError error
   117  }
   118  
   119  func (r *ruleImportMetadataCache) upsert(cnp *types.SlimCNP, revision uint64, importErr error) {
   120  	if cnp == nil {
   121  		return
   122  	}
   123  
   124  	meta := policyImportMetadata{
   125  		revision:          revision,
   126  		policyImportError: importErr,
   127  	}
   128  	podNSName := k8sUtils.GetObjNamespaceName(&cnp.ObjectMeta)
   129  
   130  	r.mutex.Lock()
   131  	r.ruleImportMetadataMap[podNSName] = meta
   132  	r.mutex.Unlock()
   133  }
   134  
   135  func (r *ruleImportMetadataCache) delete(cnp *types.SlimCNP) {
   136  	if cnp == nil {
   137  		return
   138  	}
   139  	podNSName := k8sUtils.GetObjNamespaceName(&cnp.ObjectMeta)
   140  
   141  	r.mutex.Lock()
   142  	delete(r.ruleImportMetadataMap, podNSName)
   143  	r.mutex.Unlock()
   144  }
   145  
   146  func (r *ruleImportMetadataCache) get(cnp *types.SlimCNP) (policyImportMetadata, bool) {
   147  	if cnp == nil {
   148  		return policyImportMetadata{}, false
   149  	}
   150  	podNSName := k8sUtils.GetObjNamespaceName(&cnp.ObjectMeta)
   151  	r.mutex.RLock()
   152  	policyImportMeta, ok := r.ruleImportMetadataMap[podNSName]
   153  	r.mutex.RUnlock()
   154  	return policyImportMeta, ok
   155  }
   156  
   157  // k8sAPIGroupsUsed is a lockable map to hold which k8s API Groups we have
   158  // enabled/in-use
   159  // Note: We can replace it with a Go 1.9 map once we require that version
   160  type k8sAPIGroupsUsed struct {
   161  	lock.RWMutex
   162  	apis map[string]bool
   163  }
   164  
   165  func (m *k8sAPIGroupsUsed) addAPI(api string) {
   166  	m.Lock()
   167  	defer m.Unlock()
   168  	if m.apis == nil {
   169  		m.apis = make(map[string]bool)
   170  	}
   171  	m.apis[api] = true
   172  }
   173  
   174  func (m *k8sAPIGroupsUsed) removeAPI(api string) {
   175  	m.Lock()
   176  	defer m.Unlock()
   177  	delete(m.apis, api)
   178  }
   179  
   180  func (m *k8sAPIGroupsUsed) getGroups() []string {
   181  	m.RLock()
   182  	defer m.RUnlock()
   183  	groups := make([]string, 0, len(m.apis))
   184  	for k := range m.apis {
   185  		groups = append(groups, k)
   186  	}
   187  	return groups
   188  }
   189  
   190  // k8sMetrics implements the LatencyMetric and ResultMetric interface from
   191  // k8s client-go package
   192  type k8sMetrics struct{}
   193  
   194  func (*k8sMetrics) Observe(verb string, u url.URL, latency time.Duration) {
   195  	metrics.KubernetesAPIInteractions.WithLabelValues(u.Path, verb).Observe(latency.Seconds())
   196  }
   197  
   198  func (*k8sMetrics) Increment(code string, method string, host string) {
   199  	metrics.KubernetesAPICalls.WithLabelValues(host, method, code).Inc()
   200  	k8smetrics.LastInteraction.Reset()
   201  }
   202  
   203  func init() {
   204  	// Replace error handler with our own
   205  	runtime.ErrorHandlers = []func(error){
   206  		k8s.K8sErrorHandler,
   207  	}
   208  
   209  	k8sMetric := &k8sMetrics{}
   210  	k8s_metrics.Register(k8sMetric, k8sMetric)
   211  }
   212  
   213  // blockWaitGroupToSyncResources ensures that anything which waits on waitGroup
   214  // waits until all objects of the specified resource stored in Kubernetes are
   215  // received by the informer and processed by controller.
   216  // Fatally exits if syncing these initial objects fails.
   217  // If the given stop channel is closed, it does not fatal.
   218  func (d *Daemon) blockWaitGroupToSyncResources(stop <-chan struct{}, informer cache.Controller, resourceName string) {
   219  	ch := make(chan struct{})
   220  	d.k8sResourceSyncedMu.Lock()
   221  	d.k8sResourceSynced[resourceName] = ch
   222  	d.k8sResourceSyncedMu.Unlock()
   223  	go func() {
   224  		scopedLog := log.WithField("kubernetesResource", resourceName)
   225  		scopedLog.Debug("waiting for cache to synchronize")
   226  		if ok := cache.WaitForCacheSync(stop, informer.HasSynced); !ok {
   227  			select {
   228  			case <-stop:
   229  				scopedLog.Debug("canceled cache synchronization")
   230  				// do not fatal if the channel was stopped
   231  			default:
   232  				// Fatally exit it resource fails to sync
   233  				scopedLog.Fatalf("failed to wait for cache to sync")
   234  			}
   235  		} else {
   236  			scopedLog.Debug("cache synced")
   237  		}
   238  		close(ch)
   239  	}()
   240  }
   241  
   242  // waitForCacheSync waits for k8s caches to be synchronized for the given
   243  // resource. Returns once all resourcesNames are synchronized with cilium-agent.
   244  func (d *Daemon) waitForCacheSync(resourceNames ...string) {
   245  	for _, resourceName := range resourceNames {
   246  		d.k8sResourceSyncedMu.RLock()
   247  		c, ok := d.k8sResourceSynced[resourceName]
   248  		d.k8sResourceSyncedMu.RUnlock()
   249  		if !ok {
   250  			continue
   251  		}
   252  		<-c
   253  	}
   254  }
   255  
   256  // initK8sSubsystem returns a channel for which it will be closed when all
   257  // caches essential for daemon are synchronized.
   258  func (d *Daemon) initK8sSubsystem() <-chan struct{} {
   259  	if err := d.EnableK8sWatcher(option.Config.K8sWatcherQueueSize); err != nil {
   260  		log.WithError(err).Fatal("Unable to establish connection to Kubernetes apiserver")
   261  	}
   262  
   263  	cachesSynced := make(chan struct{})
   264  
   265  	go func() {
   266  		log.Info("Waiting until all pre-existing resources related to policy have been received")
   267  		// Wait only for certain caches, but not all!
   268  		// We don't wait for nodes synchronization nor ingresses.
   269  		d.waitForCacheSync(
   270  			// To perform the service translation and have the BPF LB datapath
   271  			// with the right service -> backend (k8s endpoints) translation.
   272  			k8sAPIGroupServiceV1Core,
   273  			// To perform the service translation and have the BPF LB datapath
   274  			// with the right service -> backend (k8s endpoints) translation.
   275  			k8sAPIGroupEndpointV1Core,
   276  			// We need all network policies in place before restoring to make sure
   277  			// we are enforcing the correct policies for each endpoint before
   278  			// restarting.
   279  			k8sAPIGroupCiliumNetworkPolicyV2,
   280  			// We we need to know about all other nodes
   281  			k8sAPIGroupCiliumNodeV2,
   282  			// We need all network policies in place before restoring to make sure
   283  			// we are enforcing the correct policies for each endpoint before
   284  			// restarting.
   285  			k8sAPIGroupNetworkingV1Core,
   286  			// Namespaces can contain labels which are essential for endpoints
   287  			// being restored to have the right identity.
   288  			k8sAPIGroupNamespaceV1Core,
   289  			// Pods can contain labels which are essential for endpoints
   290  			// being restored to have the right identity.
   291  			k8sAPIGroupPodV1Core,
   292  		)
   293  		// CiliumEndpoint is used to synchronize the ipcache, wait for
   294  		// it unless it is disabled
   295  		if !option.Config.DisableCiliumEndpointCRD {
   296  			d.waitForCacheSync(k8sAPIGroupCiliumEndpointV2)
   297  		}
   298  		close(cachesSynced)
   299  	}()
   300  
   301  	go func() {
   302  		select {
   303  		case <-cachesSynced:
   304  			log.Info("All pre-existing resources related to policy have been received; continuing")
   305  		case <-time.After(option.Config.K8sSyncTimeout):
   306  			log.Fatalf("Timed out waiting for pre-existing resources related to policy to be received; exiting")
   307  		}
   308  	}()
   309  
   310  	return cachesSynced
   311  }
   312  
   313  // K8sEventReceived does metric accounting for each received Kubernetes event
   314  func (d *Daemon) K8sEventReceived(scope string, action string, valid, equal bool) {
   315  	metrics.EventTSK8s.SetToCurrentTime()
   316  	k8smetrics.LastInteraction.Reset()
   317  
   318  	metrics.KubernetesEventReceived.WithLabelValues(scope, action, strconv.FormatBool(valid), strconv.FormatBool(equal)).Inc()
   319  }
   320  
   321  // EnableK8sWatcher watches for policy, services and endpoint changes on the Kubernetes
   322  // api server defined in the receiver's daemon k8sClient.
   323  // queueSize specifies the queue length used to serialize k8s events.
   324  func (d *Daemon) EnableK8sWatcher(queueSize uint) error {
   325  	if !k8s.IsEnabled() {
   326  		log.Debug("Not enabling k8s event listener because k8s is not enabled")
   327  		return nil
   328  	}
   329  	log.Info("Enabling k8s event listener")
   330  
   331  	d.k8sAPIGroups.addAPI(k8sAPIGroupCRD)
   332  
   333  	ciliumNPClient := k8s.CiliumClient()
   334  
   335  	serKNPs := serializer.NewFunctionQueue(queueSize)
   336  	serSvcs := serializer.NewFunctionQueue(queueSize)
   337  	serEps := serializer.NewFunctionQueue(queueSize)
   338  	serIngresses := serializer.NewFunctionQueue(queueSize)
   339  	serCNPs := serializer.NewFunctionQueue(queueSize)
   340  	serPods := serializer.NewFunctionQueue(queueSize)
   341  	serNodes := serializer.NewFunctionQueue(queueSize)
   342  	serCiliumEndpoints := serializer.NewFunctionQueue(queueSize)
   343  	serNamespaces := serializer.NewFunctionQueue(queueSize)
   344  
   345  	_, policyController := informer.NewInformer(
   346  		cache.NewListWatchFromClient(k8s.Client().NetworkingV1().RESTClient(),
   347  			"networkpolicies", v1.NamespaceAll, fields.Everything()),
   348  		&networkingv1.NetworkPolicy{},
   349  		0,
   350  		cache.ResourceEventHandlerFuncs{
   351  			AddFunc: func(obj interface{}) {
   352  				var valid, equal bool
   353  				defer func() { d.K8sEventReceived(metricKNP, metricCreate, valid, equal) }()
   354  				if k8sNP := k8s.CopyObjToV1NetworkPolicy(obj); k8sNP != nil {
   355  					valid = true
   356  					serKNPs.Enqueue(func() error {
   357  						err := d.addK8sNetworkPolicyV1(k8sNP)
   358  						d.K8sEventProcessed(metricKNP, metricCreate, err == nil)
   359  						return nil
   360  					}, serializer.NoRetry)
   361  				}
   362  			},
   363  			UpdateFunc: func(oldObj, newObj interface{}) {
   364  				var valid, equal bool
   365  				defer func() { d.K8sEventReceived(metricKNP, metricUpdate, valid, equal) }()
   366  				if oldK8sNP := k8s.CopyObjToV1NetworkPolicy(oldObj); oldK8sNP != nil {
   367  					valid = true
   368  					if newK8sNP := k8s.CopyObjToV1NetworkPolicy(newObj); newK8sNP != nil {
   369  						if k8s.EqualV1NetworkPolicy(oldK8sNP, newK8sNP) {
   370  							equal = true
   371  							return
   372  						}
   373  
   374  						serKNPs.Enqueue(func() error {
   375  							err := d.updateK8sNetworkPolicyV1(oldK8sNP, newK8sNP)
   376  							d.K8sEventProcessed(metricKNP, metricUpdate, err == nil)
   377  							return nil
   378  						}, serializer.NoRetry)
   379  					}
   380  				}
   381  			},
   382  			DeleteFunc: func(obj interface{}) {
   383  				var valid, equal bool
   384  				defer func() { d.K8sEventReceived(metricKNP, metricDelete, valid, equal) }()
   385  				k8sNP := k8s.CopyObjToV1NetworkPolicy(obj)
   386  				if k8sNP == nil {
   387  					deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   388  					if !ok {
   389  						return
   390  					}
   391  					// Delete was not observed by the watcher but is
   392  					// removed from kube-apiserver. This is the last
   393  					// known state and the object no longer exists.
   394  					k8sNP = k8s.CopyObjToV1NetworkPolicy(deletedObj.Obj)
   395  					if k8sNP == nil {
   396  						return
   397  					}
   398  				}
   399  
   400  				valid = true
   401  				serKNPs.Enqueue(func() error {
   402  					err := d.deleteK8sNetworkPolicyV1(k8sNP)
   403  					d.K8sEventProcessed(metricKNP, metricDelete, err == nil)
   404  					return nil
   405  				}, serializer.NoRetry)
   406  			},
   407  		},
   408  		k8s.ConvertToNetworkPolicy,
   409  	)
   410  	d.blockWaitGroupToSyncResources(wait.NeverStop, policyController, k8sAPIGroupNetworkingV1Core)
   411  	go policyController.Run(wait.NeverStop)
   412  
   413  	d.k8sAPIGroups.addAPI(k8sAPIGroupNetworkingV1Core)
   414  
   415  	_, svcController := informer.NewInformer(
   416  		cache.NewListWatchFromClient(k8s.Client().CoreV1().RESTClient(),
   417  			"services", v1.NamespaceAll, fields.Everything()),
   418  		&v1.Service{},
   419  		0,
   420  		cache.ResourceEventHandlerFuncs{
   421  			AddFunc: func(obj interface{}) {
   422  				var valid, equal bool
   423  				defer func() { d.K8sEventReceived(metricService, metricCreate, valid, equal) }()
   424  				if k8sSvc := k8s.CopyObjToV1Services(obj); k8sSvc != nil {
   425  					valid = true
   426  					serSvcs.Enqueue(func() error {
   427  						err := d.addK8sServiceV1(k8sSvc)
   428  						d.K8sEventProcessed(metricService, metricCreate, err == nil)
   429  						return nil
   430  					}, serializer.NoRetry)
   431  				}
   432  			},
   433  			UpdateFunc: func(oldObj, newObj interface{}) {
   434  				var valid, equal bool
   435  				defer func() { d.K8sEventReceived(metricService, metricUpdate, valid, equal) }()
   436  				if oldk8sSvc := k8s.CopyObjToV1Services(oldObj); oldk8sSvc != nil {
   437  					valid = true
   438  					if newk8sSvc := k8s.CopyObjToV1Services(newObj); newk8sSvc != nil {
   439  						if k8s.EqualV1Services(oldk8sSvc, newk8sSvc) {
   440  							equal = true
   441  							return
   442  						}
   443  
   444  						serSvcs.Enqueue(func() error {
   445  							err := d.updateK8sServiceV1(oldk8sSvc, newk8sSvc)
   446  							d.K8sEventProcessed(metricService, metricUpdate, err == nil)
   447  							return nil
   448  						}, serializer.NoRetry)
   449  					}
   450  				}
   451  			},
   452  			DeleteFunc: func(obj interface{}) {
   453  				var valid, equal bool
   454  				defer func() { d.K8sEventReceived(metricService, metricDelete, valid, equal) }()
   455  				k8sSvc := k8s.CopyObjToV1Services(obj)
   456  				if k8sSvc == nil {
   457  					deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   458  					if !ok {
   459  						return
   460  					}
   461  					// Delete was not observed by the watcher but is
   462  					// removed from kube-apiserver. This is the last
   463  					// known state and the object no longer exists.
   464  					k8sSvc = k8s.CopyObjToV1Services(deletedObj.Obj)
   465  					if k8sSvc == nil {
   466  						return
   467  					}
   468  				}
   469  
   470  				valid = true
   471  				serSvcs.Enqueue(func() error {
   472  					err := d.deleteK8sServiceV1(k8sSvc)
   473  					d.K8sEventProcessed(metricService, metricDelete, err == nil)
   474  					return nil
   475  				}, serializer.NoRetry)
   476  			},
   477  		},
   478  		k8s.ConvertToK8sService,
   479  	)
   480  	d.blockWaitGroupToSyncResources(wait.NeverStop, svcController, k8sAPIGroupServiceV1Core)
   481  	go svcController.Run(wait.NeverStop)
   482  	d.k8sAPIGroups.addAPI(k8sAPIGroupServiceV1Core)
   483  
   484  	_, endpointController := informer.NewInformer(
   485  		cache.NewListWatchFromClient(k8s.Client().CoreV1().RESTClient(),
   486  			"endpoints", v1.NamespaceAll,
   487  			fields.ParseSelectorOrDie(option.Config.K8sWatcherEndpointSelector),
   488  		),
   489  		&v1.Endpoints{},
   490  		0,
   491  		cache.ResourceEventHandlerFuncs{
   492  			AddFunc: func(obj interface{}) {
   493  				var valid, equal bool
   494  				defer func() { d.K8sEventReceived(metricEndpoint, metricCreate, valid, equal) }()
   495  				if k8sEP := k8s.CopyObjToV1Endpoints(obj); k8sEP != nil {
   496  					valid = true
   497  					serEps.Enqueue(func() error {
   498  						err := d.addK8sEndpointV1(k8sEP)
   499  						d.K8sEventProcessed(metricEndpoint, metricCreate, err == nil)
   500  						return nil
   501  					}, serializer.NoRetry)
   502  				}
   503  			},
   504  			UpdateFunc: func(oldObj, newObj interface{}) {
   505  				var valid, equal bool
   506  				defer func() { d.K8sEventReceived(metricEndpoint, metricUpdate, valid, equal) }()
   507  				if oldk8sEP := k8s.CopyObjToV1Endpoints(oldObj); oldk8sEP != nil {
   508  					valid = true
   509  					if newk8sEP := k8s.CopyObjToV1Endpoints(newObj); newk8sEP != nil {
   510  						if k8s.EqualV1Endpoints(oldk8sEP, newk8sEP) {
   511  							equal = true
   512  							return
   513  						}
   514  
   515  						serEps.Enqueue(func() error {
   516  							err := d.updateK8sEndpointV1(oldk8sEP, newk8sEP)
   517  							d.K8sEventProcessed(metricEndpoint, metricUpdate, err == nil)
   518  							return nil
   519  						}, serializer.NoRetry)
   520  					}
   521  				}
   522  			},
   523  			DeleteFunc: func(obj interface{}) {
   524  				var valid, equal bool
   525  				defer func() { d.K8sEventReceived(metricEndpoint, metricDelete, valid, equal) }()
   526  				k8sEP := k8s.CopyObjToV1Endpoints(obj)
   527  				if k8sEP == nil {
   528  					deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   529  					if !ok {
   530  						return
   531  					}
   532  					// Delete was not observed by the watcher but is
   533  					// removed from kube-apiserver. This is the last
   534  					// known state and the object no longer exists.
   535  					k8sEP = k8s.CopyObjToV1Endpoints(deletedObj.Obj)
   536  					if k8sEP == nil {
   537  						return
   538  					}
   539  				}
   540  				valid = true
   541  				serEps.Enqueue(func() error {
   542  					err := d.deleteK8sEndpointV1(k8sEP)
   543  					d.K8sEventProcessed(metricEndpoint, metricDelete, err == nil)
   544  					return nil
   545  				}, serializer.NoRetry)
   546  			},
   547  		},
   548  		k8s.ConvertToK8sEndpoints,
   549  	)
   550  	d.blockWaitGroupToSyncResources(wait.NeverStop, endpointController, k8sAPIGroupEndpointV1Core)
   551  	go endpointController.Run(wait.NeverStop)
   552  	d.k8sAPIGroups.addAPI(k8sAPIGroupEndpointV1Core)
   553  
   554  	if option.Config.IsLBEnabled() {
   555  		_, ingressController := informer.NewInformer(
   556  			cache.NewListWatchFromClient(k8s.Client().ExtensionsV1beta1().RESTClient(),
   557  				"ingresses", v1.NamespaceAll, fields.Everything()),
   558  			&v1beta1.Ingress{},
   559  			0,
   560  			cache.ResourceEventHandlerFuncs{
   561  				AddFunc: func(obj interface{}) {
   562  					var valid, equal bool
   563  					defer func() { d.K8sEventReceived(metricEndpoint, metricCreate, valid, equal) }()
   564  					if k8sIngress := k8s.CopyObjToV1beta1Ingress(obj); k8sIngress != nil {
   565  						valid = true
   566  						serIngresses.Enqueue(func() error {
   567  							err := d.addIngressV1beta1(k8sIngress)
   568  							d.K8sEventProcessed(metricIngress, metricCreate, err == nil)
   569  							return nil
   570  						}, serializer.NoRetry)
   571  					}
   572  				},
   573  				UpdateFunc: func(oldObj, newObj interface{}) {
   574  					var valid, equal bool
   575  					defer func() { d.K8sEventReceived(metricEndpoint, metricUpdate, valid, equal) }()
   576  					if oldk8sIngress := k8s.CopyObjToV1beta1Ingress(oldObj); oldk8sIngress != nil {
   577  						valid = true
   578  						if newk8sIngress := k8s.CopyObjToV1beta1Ingress(newObj); newk8sIngress != nil {
   579  							if k8s.EqualV1beta1Ingress(oldk8sIngress, newk8sIngress) {
   580  								equal = true
   581  								return
   582  							}
   583  
   584  							serIngresses.Enqueue(func() error {
   585  								err := d.updateIngressV1beta1(oldk8sIngress, newk8sIngress)
   586  								d.K8sEventProcessed(metricIngress, metricUpdate, err == nil)
   587  								return nil
   588  							}, serializer.NoRetry)
   589  						}
   590  					}
   591  				},
   592  				DeleteFunc: func(obj interface{}) {
   593  					var valid, equal bool
   594  					defer func() { d.K8sEventReceived(metricEndpoint, metricDelete, valid, equal) }()
   595  					k8sIngress := k8s.CopyObjToV1beta1Ingress(obj)
   596  					if k8sIngress == nil {
   597  						deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   598  						if !ok {
   599  							return
   600  						}
   601  						// Delete was not observed by the watcher but is
   602  						// removed from kube-apiserver. This is the last
   603  						// known state and the object no longer exists.
   604  						k8sIngress = k8s.CopyObjToV1beta1Ingress(deletedObj.Obj)
   605  						if k8sIngress == nil {
   606  							return
   607  						}
   608  					}
   609  					valid = true
   610  					serEps.Enqueue(func() error {
   611  						err := d.deleteIngressV1beta1(k8sIngress)
   612  						d.K8sEventProcessed(metricIngress, metricDelete, err == nil)
   613  						return nil
   614  					}, serializer.NoRetry)
   615  				},
   616  			},
   617  			k8s.ConvertToIngress,
   618  		)
   619  		d.blockWaitGroupToSyncResources(wait.NeverStop, ingressController, k8sAPIGroupIngressV1Beta1)
   620  		go ingressController.Run(wait.NeverStop)
   621  		d.k8sAPIGroups.addAPI(k8sAPIGroupIngressV1Beta1)
   622  	}
   623  
   624  	var (
   625  		cnpEventStore    cache.Store
   626  		cnpConverterFunc informer.ConvertFunc
   627  	)
   628  	cnpStore := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
   629  	switch {
   630  	case k8sversion.Capabilities().Patch:
   631  		// k8s >= 1.13 does not require a store to update CNP status so
   632  		// we don't even need to keep the status of a CNP with us.
   633  		cnpConverterFunc = k8s.ConvertToCNP
   634  	default:
   635  		cnpEventStore = cnpStore
   636  		cnpConverterFunc = k8s.ConvertToCNPWithStatus
   637  	}
   638  
   639  	ciliumV2Controller := informer.NewInformerWithStore(
   640  		cache.NewListWatchFromClient(ciliumNPClient.CiliumV2().RESTClient(),
   641  			"ciliumnetworkpolicies", v1.NamespaceAll, fields.Everything()),
   642  		&cilium_v2.CiliumNetworkPolicy{},
   643  		0,
   644  		cache.ResourceEventHandlerFuncs{
   645  			AddFunc: func(obj interface{}) {
   646  				var valid, equal bool
   647  				defer func() { d.K8sEventReceived(metricCNP, metricCreate, valid, equal) }()
   648  				if cnp := k8s.CopyObjToV2CNP(obj); cnp != nil {
   649  					valid = true
   650  					serCNPs.Enqueue(func() error {
   651  						if cnp.RequiresDerivative() {
   652  							return nil
   653  						}
   654  						err := d.addCiliumNetworkPolicyV2(ciliumNPClient, cnpEventStore, cnp)
   655  						d.K8sEventProcessed(metricCNP, metricCreate, err == nil)
   656  						return nil
   657  					}, serializer.NoRetry)
   658  				}
   659  			},
   660  			UpdateFunc: func(oldObj, newObj interface{}) {
   661  				var valid, equal bool
   662  				defer func() { d.K8sEventReceived(metricCNP, metricUpdate, valid, equal) }()
   663  				if oldCNP := k8s.CopyObjToV2CNP(oldObj); oldCNP != nil {
   664  					valid = true
   665  					if newCNP := k8s.CopyObjToV2CNP(newObj); newCNP != nil {
   666  						if k8s.EqualV2CNP(oldCNP, newCNP) {
   667  							equal = true
   668  							return
   669  						}
   670  
   671  						serCNPs.Enqueue(func() error {
   672  							if newCNP.RequiresDerivative() {
   673  								return nil
   674  							}
   675  
   676  							err := d.updateCiliumNetworkPolicyV2(ciliumNPClient, cnpEventStore, oldCNP, newCNP)
   677  							d.K8sEventProcessed(metricCNP, metricUpdate, err == nil)
   678  							return nil
   679  						}, serializer.NoRetry)
   680  					}
   681  				}
   682  			},
   683  			DeleteFunc: func(obj interface{}) {
   684  				var valid, equal bool
   685  				defer func() { d.K8sEventReceived(metricCNP, metricDelete, valid, equal) }()
   686  				cnp := k8s.CopyObjToV2CNP(obj)
   687  				if cnp == nil {
   688  					deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   689  					if !ok {
   690  						return
   691  					}
   692  					// Delete was not observed by the watcher but is
   693  					// removed from kube-apiserver. This is the last
   694  					// known state and the object no longer exists.
   695  					cnp = k8s.CopyObjToV2CNP(deletedObj.Obj)
   696  					if cnp == nil {
   697  						return
   698  					}
   699  				}
   700  				valid = true
   701  				serCNPs.Enqueue(func() error {
   702  					err := d.deleteCiliumNetworkPolicyV2(cnp)
   703  					d.K8sEventProcessed(metricCNP, metricDelete, err == nil)
   704  					return nil
   705  				}, serializer.NoRetry)
   706  			},
   707  		},
   708  		cnpConverterFunc,
   709  		cnpStore,
   710  	)
   711  	d.blockWaitGroupToSyncResources(wait.NeverStop, ciliumV2Controller, k8sAPIGroupCiliumNetworkPolicyV2)
   712  	go ciliumV2Controller.Run(wait.NeverStop)
   713  	d.k8sAPIGroups.addAPI(k8sAPIGroupCiliumNetworkPolicyV2)
   714  
   715  	asyncControllers := sync.WaitGroup{}
   716  	asyncControllers.Add(1)
   717  
   718  	// CiliumNode objects are used for node discovery until the key-value
   719  	// store is connected
   720  	go func() {
   721  		var once sync.Once
   722  		for {
   723  			_, ciliumNodeInformer := informer.NewInformer(
   724  				cache.NewListWatchFromClient(ciliumNPClient.CiliumV2().RESTClient(),
   725  					"ciliumnodes", v1.NamespaceAll, fields.Everything()),
   726  				&cilium_v2.CiliumNode{},
   727  				0,
   728  				cache.ResourceEventHandlerFuncs{
   729  					AddFunc: func(obj interface{}) {
   730  						var valid, equal bool
   731  						defer func() { d.K8sEventReceived(metricCiliumNode, metricCreate, valid, equal) }()
   732  						if ciliumNode, ok := obj.(*cilium_v2.CiliumNode); ok {
   733  							valid = true
   734  							n := node.ParseCiliumNode(ciliumNode)
   735  							if n.IsLocal() {
   736  								return
   737  							}
   738  							serNodes.Enqueue(func() error {
   739  								d.nodeDiscovery.Manager.NodeUpdated(n)
   740  								d.K8sEventProcessed(metricCiliumNode, metricCreate, true)
   741  								return nil
   742  							}, serializer.NoRetry)
   743  						}
   744  					},
   745  					UpdateFunc: func(oldObj, newObj interface{}) {
   746  						var valid, equal bool
   747  						defer func() { d.K8sEventReceived(metricCiliumNode, metricUpdate, valid, equal) }()
   748  						if ciliumNode, ok := newObj.(*cilium_v2.CiliumNode); ok {
   749  							valid = true
   750  							n := node.ParseCiliumNode(ciliumNode)
   751  							if n.IsLocal() {
   752  								return
   753  							}
   754  							serNodes.Enqueue(func() error {
   755  								d.nodeDiscovery.Manager.NodeUpdated(n)
   756  								d.K8sEventProcessed(metricCiliumNode, metricUpdate, true)
   757  								return nil
   758  							}, serializer.NoRetry)
   759  						}
   760  					},
   761  					DeleteFunc: func(obj interface{}) {
   762  						var valid, equal bool
   763  						defer func() { d.K8sEventReceived(metricCiliumNode, metricDelete, valid, equal) }()
   764  						ciliumNode := k8s.CopyObjToCiliumNode(obj)
   765  						if ciliumNode == nil {
   766  							deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   767  							if !ok {
   768  								return
   769  							}
   770  							// Delete was not observed by the watcher but is
   771  							// removed from kube-apiserver. This is the last
   772  							// known state and the object no longer exists.
   773  							ciliumNode = k8s.CopyObjToCiliumNode(deletedObj.Obj)
   774  							if ciliumNode == nil {
   775  								return
   776  							}
   777  						}
   778  						valid = true
   779  						n := node.ParseCiliumNode(ciliumNode)
   780  						serNodes.Enqueue(func() error {
   781  							d.nodeDiscovery.Manager.NodeDeleted(n)
   782  							return nil
   783  						}, serializer.NoRetry)
   784  					},
   785  				},
   786  				k8s.ConvertToCiliumNode,
   787  			)
   788  			isConnected := make(chan struct{})
   789  			// once isConnected is closed, it will stop waiting on caches to be
   790  			// synchronized.
   791  			d.blockWaitGroupToSyncResources(isConnected, ciliumNodeInformer, k8sAPIGroupCiliumNodeV2)
   792  
   793  			once.Do(func() {
   794  				// Signalize that we have put node controller in the wait group
   795  				// to sync resources.
   796  				asyncControllers.Done()
   797  			})
   798  			d.k8sAPIGroups.addAPI(k8sAPIGroupCiliumNodeV2)
   799  			go ciliumNodeInformer.Run(isConnected)
   800  
   801  			<-kvstore.Client().Connected()
   802  			close(isConnected)
   803  
   804  			log.Info("Connected to key-value store, stopping CiliumNode watcher")
   805  
   806  			d.k8sAPIGroups.removeAPI(k8sAPIGroupCiliumNodeV2)
   807  			// Create a new node controller when we are disconnected with the
   808  			// kvstore
   809  			<-kvstore.Client().Disconnected()
   810  
   811  			log.Info("Disconnected from key-value store, restarting CiliumNode watcher")
   812  		}
   813  	}()
   814  
   815  	asyncControllers.Add(1)
   816  
   817  	// CiliumEndpoint objects are used for ipcache discovery until the
   818  	// key-value store is connected
   819  	go func() {
   820  		var once sync.Once
   821  		for {
   822  			_, ciliumEndpointInformer := informer.NewInformer(
   823  				cache.NewListWatchFromClient(ciliumNPClient.CiliumV2().RESTClient(),
   824  					"ciliumendpoints", v1.NamespaceAll, fields.Everything()),
   825  				&cilium_v2.CiliumEndpoint{},
   826  				0,
   827  				cache.ResourceEventHandlerFuncs{
   828  					AddFunc: func(obj interface{}) {
   829  						var valid, equal bool
   830  						defer func() { d.K8sEventReceived(metricCiliumEndpoint, metricCreate, valid, equal) }()
   831  						if ciliumEndpoint, ok := obj.(*types.CiliumEndpoint); ok {
   832  							valid = true
   833  							endpoint := ciliumEndpoint.DeepCopy()
   834  							serCiliumEndpoints.Enqueue(func() error {
   835  								endpointUpdated(endpoint)
   836  								d.K8sEventProcessed(metricCiliumEndpoint, metricCreate, true)
   837  								return nil
   838  							}, serializer.NoRetry)
   839  						}
   840  					},
   841  					UpdateFunc: func(oldObj, newObj interface{}) {
   842  						var valid, equal bool
   843  						defer func() { d.K8sEventReceived(metricCiliumEndpoint, metricUpdate, valid, equal) }()
   844  						if ciliumEndpoint, ok := newObj.(*types.CiliumEndpoint); ok {
   845  							valid = true
   846  							endpoint := ciliumEndpoint.DeepCopy()
   847  							serCiliumEndpoints.Enqueue(func() error {
   848  								endpointUpdated(endpoint)
   849  								d.K8sEventProcessed(metricCiliumEndpoint, metricUpdate, true)
   850  								return nil
   851  							}, serializer.NoRetry)
   852  						}
   853  					},
   854  					DeleteFunc: func(obj interface{}) {
   855  						var valid, equal bool
   856  						defer func() { d.K8sEventReceived(metricCiliumEndpoint, metricDelete, valid, equal) }()
   857  						ciliumEndpoint := k8s.CopyObjToCiliumEndpoint(obj)
   858  						if ciliumEndpoint == nil {
   859  							deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   860  							if !ok {
   861  								return
   862  							}
   863  							// Delete was not observed by the watcher but is
   864  							// removed from kube-apiserver. This is the last
   865  							// known state and the object no longer exists.
   866  							ciliumEndpoint = k8s.CopyObjToCiliumEndpoint(deletedObj.Obj)
   867  							if ciliumEndpoint == nil {
   868  								return
   869  							}
   870  						}
   871  						valid = true
   872  						serCiliumEndpoints.Enqueue(func() error {
   873  							endpointDeleted(ciliumEndpoint)
   874  							return nil
   875  						}, serializer.NoRetry)
   876  					},
   877  				},
   878  				k8s.ConvertToCiliumEndpoint,
   879  			)
   880  			isConnected := make(chan struct{})
   881  			// once isConnected is closed, it will stop waiting on caches to be
   882  			// synchronized.
   883  			d.blockWaitGroupToSyncResources(isConnected, ciliumEndpointInformer, k8sAPIGroupCiliumEndpointV2)
   884  
   885  			once.Do(func() {
   886  				// Signalize that we have put node controller in the wait group
   887  				// to sync resources.
   888  				asyncControllers.Done()
   889  			})
   890  			d.k8sAPIGroups.addAPI(k8sAPIGroupCiliumEndpointV2)
   891  			go ciliumEndpointInformer.Run(isConnected)
   892  
   893  			<-kvstore.Client().Connected()
   894  			close(isConnected)
   895  
   896  			log.Info("Connected to key-value store, stopping CiliumEndpoint watcher")
   897  
   898  			d.k8sAPIGroups.removeAPI(k8sAPIGroupCiliumEndpointV2)
   899  			// Create a new node controller when we are disconnected with the
   900  			// kvstore
   901  			<-kvstore.Client().Disconnected()
   902  
   903  			log.Info("Disconnected from key-value store, restarting CiliumEndpoint watcher")
   904  		}
   905  	}()
   906  
   907  	asyncControllers.Add(1)
   908  	go func() {
   909  		var once sync.Once
   910  		for {
   911  			createPodController := func(fieldSelector fields.Selector) cache.Controller {
   912  				_, podController := informer.NewInformer(
   913  					cache.NewListWatchFromClient(k8s.Client().CoreV1().RESTClient(),
   914  						"pods", v1.NamespaceAll, fieldSelector),
   915  					&v1.Pod{},
   916  					0,
   917  					cache.ResourceEventHandlerFuncs{
   918  						AddFunc: func(obj interface{}) {
   919  							var valid, equal bool
   920  							defer func() { d.K8sEventReceived(metricPod, metricCreate, valid, equal) }()
   921  							if pod := k8s.CopyObjToV1Pod(obj); pod != nil {
   922  								valid = true
   923  								serPods.Enqueue(func() error {
   924  									err := d.addK8sPodV1(pod)
   925  									d.K8sEventProcessed(metricPod, metricCreate, err == nil)
   926  									return nil
   927  								}, serializer.NoRetry)
   928  							}
   929  						},
   930  						UpdateFunc: func(oldObj, newObj interface{}) {
   931  							var valid, equal bool
   932  							defer func() { d.K8sEventReceived(metricPod, metricUpdate, valid, equal) }()
   933  							if oldPod := k8s.CopyObjToV1Pod(oldObj); oldPod != nil {
   934  								valid = true
   935  								if newPod := k8s.CopyObjToV1Pod(newObj); newPod != nil {
   936  									if k8s.EqualV1Pod(oldPod, newPod) {
   937  										equal = true
   938  										return
   939  									}
   940  
   941  									serPods.Enqueue(func() error {
   942  										err := d.updateK8sPodV1(oldPod, newPod)
   943  										d.K8sEventProcessed(metricPod, metricUpdate, err == nil)
   944  										return nil
   945  									}, serializer.NoRetry)
   946  								}
   947  							}
   948  						},
   949  						DeleteFunc: func(obj interface{}) {
   950  							var valid, equal bool
   951  							defer func() { d.K8sEventReceived(metricPod, metricDelete, valid, equal) }()
   952  							if pod := k8s.CopyObjToV1Pod(obj); pod != nil {
   953  								valid = true
   954  								serPods.Enqueue(func() error {
   955  									err := d.deleteK8sPodV1(pod)
   956  									d.K8sEventProcessed(metricPod, metricDelete, err == nil)
   957  									return nil
   958  								}, serializer.NoRetry)
   959  							}
   960  						},
   961  					},
   962  					k8s.ConvertToPod,
   963  				)
   964  				return podController
   965  			}
   966  			podController := createPodController(fields.Everything())
   967  
   968  			isConnected := make(chan struct{})
   969  			// once isConnected is closed, it will stop waiting on caches to be
   970  			// synchronized.
   971  			d.blockWaitGroupToSyncResources(isConnected, podController, k8sAPIGroupPodV1Core)
   972  			once.Do(func() {
   973  				asyncControllers.Done()
   974  				d.k8sAPIGroups.addAPI(k8sAPIGroupPodV1Core)
   975  			})
   976  			go podController.Run(isConnected)
   977  
   978  			if !option.Config.K8sEventHandover {
   979  				return
   980  			}
   981  
   982  			// Replace pod controller by only receiving events from our own
   983  			// node once we are connected to the kvstore.
   984  
   985  			<-kvstore.Client().Connected()
   986  			close(isConnected)
   987  
   988  			log.WithField(logfields.Node, node.GetName()).Info("Connected to KVStore, watching for pod events on node")
   989  			// Only watch for pod events for our node.
   990  			podController = createPodController(fields.ParseSelectorOrDie("spec.nodeName=" + node.GetName()))
   991  			isConnected = make(chan struct{})
   992  			go podController.Run(isConnected)
   993  
   994  			// Create a new pod controller when we are disconnected with the
   995  			// kvstore
   996  			<-kvstore.Client().Disconnected()
   997  			close(isConnected)
   998  			log.Info("Disconnected from KVStore, watching for pod events all nodes")
   999  		}
  1000  	}()
  1001  
  1002  	_, namespaceController := informer.NewInformer(
  1003  		cache.NewListWatchFromClient(k8s.Client().CoreV1().RESTClient(),
  1004  			"namespaces", v1.NamespaceAll, fields.Everything()),
  1005  		&v1.Namespace{},
  1006  		0,
  1007  		cache.ResourceEventHandlerFuncs{
  1008  			// AddFunc does not matter since the endpoint will fetch
  1009  			// namespace labels when the endpoint is created
  1010  			// DelFunc does not matter since, when a namespace is deleted, all
  1011  			// pods belonging to that namespace are also deleted.
  1012  			UpdateFunc: func(oldObj, newObj interface{}) {
  1013  				var valid, equal bool
  1014  				defer func() { d.K8sEventReceived(metricNS, metricUpdate, valid, equal) }()
  1015  				if oldNS := k8s.CopyObjToV1Namespace(oldObj); oldNS != nil {
  1016  					valid = true
  1017  					if newNS := k8s.CopyObjToV1Namespace(newObj); newNS != nil {
  1018  						if k8s.EqualV1Namespace(oldNS, newNS) {
  1019  							equal = true
  1020  							return
  1021  						}
  1022  
  1023  						serNamespaces.Enqueue(func() error {
  1024  							err := d.updateK8sV1Namespace(oldNS, newNS)
  1025  							d.K8sEventProcessed(metricNS, metricUpdate, err == nil)
  1026  							return nil
  1027  						}, serializer.NoRetry)
  1028  					}
  1029  				}
  1030  			},
  1031  		},
  1032  		k8s.ConvertToNamespace,
  1033  	)
  1034  
  1035  	go namespaceController.Run(wait.NeverStop)
  1036  	d.k8sAPIGroups.addAPI(k8sAPIGroupNamespaceV1Core)
  1037  
  1038  	asyncControllers.Wait()
  1039  
  1040  	return nil
  1041  }
  1042  
  1043  func (d *Daemon) addK8sNetworkPolicyV1(k8sNP *types.NetworkPolicy) error {
  1044  	scopedLog := log.WithField(logfields.K8sAPIVersion, k8sNP.TypeMeta.APIVersion)
  1045  	rules, err := k8s.ParseNetworkPolicy(k8sNP.NetworkPolicy)
  1046  	if err != nil {
  1047  		scopedLog.WithError(err).WithFields(logrus.Fields{
  1048  			logfields.CiliumNetworkPolicy: logfields.Repr(k8sNP),
  1049  		}).Error("Error while parsing k8s kubernetes NetworkPolicy")
  1050  		return err
  1051  	}
  1052  	scopedLog = scopedLog.WithField(logfields.K8sNetworkPolicyName, k8sNP.ObjectMeta.Name)
  1053  
  1054  	opts := AddOptions{Replace: true, Source: metrics.LabelEventSourceK8s}
  1055  	if _, err := d.PolicyAdd(rules, &opts); err != nil {
  1056  		scopedLog.WithError(err).WithFields(logrus.Fields{
  1057  			logfields.CiliumNetworkPolicy: logfields.Repr(rules),
  1058  		}).Error("Unable to add NetworkPolicy rules to policy repository")
  1059  		return err
  1060  	}
  1061  
  1062  	scopedLog.Info("NetworkPolicy successfully added")
  1063  	return nil
  1064  }
  1065  
  1066  func (d *Daemon) updateK8sNetworkPolicyV1(oldk8sNP, newk8sNP *types.NetworkPolicy) error {
  1067  	log.WithFields(logrus.Fields{
  1068  		logfields.K8sAPIVersion:                 oldk8sNP.TypeMeta.APIVersion,
  1069  		logfields.K8sNetworkPolicyName + ".old": oldk8sNP.ObjectMeta.Name,
  1070  		logfields.K8sNamespace + ".old":         oldk8sNP.ObjectMeta.Namespace,
  1071  		logfields.K8sNetworkPolicyName:          newk8sNP.ObjectMeta.Name,
  1072  		logfields.K8sNamespace:                  newk8sNP.ObjectMeta.Namespace,
  1073  	}).Debug("Received policy update")
  1074  
  1075  	return d.addK8sNetworkPolicyV1(newk8sNP)
  1076  }
  1077  
  1078  func (d *Daemon) deleteK8sNetworkPolicyV1(k8sNP *types.NetworkPolicy) error {
  1079  	labels := k8s.GetPolicyLabelsv1(k8sNP.NetworkPolicy)
  1080  
  1081  	if labels == nil {
  1082  		log.Fatalf("provided v1 NetworkPolicy is nil, so cannot delete it")
  1083  	}
  1084  
  1085  	scopedLog := log.WithFields(logrus.Fields{
  1086  		logfields.K8sNetworkPolicyName: k8sNP.ObjectMeta.Name,
  1087  		logfields.K8sNamespace:         k8sNP.ObjectMeta.Namespace,
  1088  		logfields.K8sAPIVersion:        k8sNP.TypeMeta.APIVersion,
  1089  		logfields.Labels:               logfields.Repr(labels),
  1090  	})
  1091  	if _, err := d.PolicyDelete(labels); err != nil {
  1092  		scopedLog.WithError(err).Error("Error while deleting k8s NetworkPolicy")
  1093  		return err
  1094  	}
  1095  
  1096  	scopedLog.Info("NetworkPolicy successfully removed")
  1097  	return nil
  1098  }
  1099  
  1100  func (d *Daemon) k8sServiceHandler() {
  1101  	for {
  1102  		event, ok := <-d.k8sSvcCache.Events
  1103  		if !ok {
  1104  			return
  1105  		}
  1106  
  1107  		svc := event.Service
  1108  
  1109  		scopedLog := log.WithFields(logrus.Fields{
  1110  			logfields.K8sSvcName:   event.ID.Name,
  1111  			logfields.K8sNamespace: event.ID.Namespace,
  1112  		})
  1113  
  1114  		scopedLog.WithFields(logrus.Fields{
  1115  			"action":      event.Action.String(),
  1116  			"service":     event.Service.String(),
  1117  			"old-service": event.OldService.String(),
  1118  			"endpoints":   event.Endpoints.String(),
  1119  		}).Debug("Kubernetes service definition changed")
  1120  
  1121  		switch event.Action {
  1122  		case k8s.UpdateService, k8s.UpdateIngress:
  1123  			if err := d.addK8sSVCs(event.ID, event.OldService, svc, event.Endpoints); err != nil {
  1124  				scopedLog.WithError(err).Error("Unable to add/update service to implement k8s event")
  1125  			}
  1126  
  1127  			if !svc.IsExternal() {
  1128  				continue
  1129  			}
  1130  
  1131  			translator := k8s.NewK8sTranslator(event.ID, *event.Endpoints, false, svc.Labels, bpfIPCache.IPCache)
  1132  			result, err := d.policy.TranslateRules(translator)
  1133  			if err != nil {
  1134  				log.Errorf("Unable to repopulate egress policies from ToService rules: %v", err)
  1135  				break
  1136  			} else if result.NumToServicesRules > 0 {
  1137  				// Only trigger policy updates if ToServices rules are in effect
  1138  				d.TriggerPolicyUpdates(true, "Kubernetes service endpoint added")
  1139  			}
  1140  
  1141  		case k8s.DeleteService, k8s.DeleteIngress:
  1142  			if err := d.delK8sSVCs(event.ID, event.Service, event.Endpoints); err != nil {
  1143  				scopedLog.WithError(err).Error("Unable to delete service to implement k8s event")
  1144  			}
  1145  
  1146  			if !svc.IsExternal() {
  1147  				continue
  1148  			}
  1149  
  1150  			translator := k8s.NewK8sTranslator(event.ID, *event.Endpoints, true, svc.Labels, bpfIPCache.IPCache)
  1151  			result, err := d.policy.TranslateRules(translator)
  1152  			if err != nil {
  1153  				log.Errorf("Unable to depopulate egress policies from ToService rules: %v", err)
  1154  				break
  1155  			} else if result.NumToServicesRules > 0 {
  1156  				// Only trigger policy updates if ToServices rules are in effect
  1157  				d.TriggerPolicyUpdates(true, "Kubernetes service endpoint deleted")
  1158  			}
  1159  		}
  1160  	}
  1161  }
  1162  
  1163  func (d *Daemon) runK8sServiceHandler() {
  1164  	go d.k8sServiceHandler()
  1165  }
  1166  
  1167  func (d *Daemon) addK8sServiceV1(svc *types.Service) error {
  1168  	d.k8sSvcCache.UpdateService(svc)
  1169  	return nil
  1170  }
  1171  
  1172  func (d *Daemon) updateK8sServiceV1(oldSvc, newSvc *types.Service) error {
  1173  	return d.addK8sServiceV1(newSvc)
  1174  }
  1175  
  1176  func (d *Daemon) deleteK8sServiceV1(svc *types.Service) error {
  1177  	d.k8sSvcCache.DeleteService(svc)
  1178  	return nil
  1179  }
  1180  
  1181  func (d *Daemon) addK8sEndpointV1(ep *types.Endpoints) error {
  1182  	d.k8sSvcCache.UpdateEndpoints(ep)
  1183  	return nil
  1184  }
  1185  
  1186  func (d *Daemon) updateK8sEndpointV1(oldEP, newEP *types.Endpoints) error {
  1187  	d.k8sSvcCache.UpdateEndpoints(newEP)
  1188  	return nil
  1189  }
  1190  
  1191  func (d *Daemon) deleteK8sEndpointV1(ep *types.Endpoints) error {
  1192  	d.k8sSvcCache.DeleteEndpoints(ep)
  1193  	return nil
  1194  }
  1195  
  1196  func (d *Daemon) delK8sSVCs(svc k8s.ServiceID, svcInfo *k8s.Service, se *k8s.Endpoints) error {
  1197  	// If east-west load balancing is disabled, we should not sync(add or delete)
  1198  	// K8s service to a cilium service.
  1199  	if option.Config.DisableK8sServices {
  1200  		return nil
  1201  	}
  1202  
  1203  	// Headless services do not need any datapath implementation
  1204  	if svcInfo.IsHeadless {
  1205  		return nil
  1206  	}
  1207  
  1208  	scopedLog := log.WithFields(logrus.Fields{
  1209  		logfields.K8sSvcName:   svc.Name,
  1210  		logfields.K8sNamespace: svc.Namespace,
  1211  	})
  1212  
  1213  	repPorts := svcInfo.UniquePorts()
  1214  
  1215  	frontends := []*loadbalancer.L3n4AddrID{}
  1216  
  1217  	for portName, svcPort := range svcInfo.Ports {
  1218  		if !repPorts[svcPort.Port] {
  1219  			continue
  1220  		}
  1221  		repPorts[svcPort.Port] = false
  1222  
  1223  		fe := loadbalancer.NewL3n4AddrID(svcPort.Protocol, svcInfo.FrontendIP, svcPort.Port, loadbalancer.ID(svcPort.ID))
  1224  		frontends = append(frontends, fe)
  1225  
  1226  		for _, nodePortFE := range svcInfo.NodePorts[portName] {
  1227  			frontends = append(frontends, nodePortFE)
  1228  		}
  1229  	}
  1230  
  1231  	for _, fe := range frontends {
  1232  		if fe.ID != 0 {
  1233  			if err := service.DeleteID(uint32(fe.ID)); err != nil {
  1234  				scopedLog.WithError(err).Warn("Error while cleaning service ID")
  1235  			}
  1236  		}
  1237  
  1238  		if err := d.svcDeleteByFrontend(&fe.L3n4Addr); err != nil {
  1239  			l := scopedLog.WithError(err).WithField(logfields.Object, logfields.Repr(fe))
  1240  			msg := "Error deleting service by frontend"
  1241  			if _, ok := err.(*errSVCNotFound); ok {
  1242  				l.Info(msg)
  1243  			} else {
  1244  				l.Warn(msg)
  1245  			}
  1246  			continue
  1247  		} else {
  1248  			scopedLog.Debugf("# cilium lb delete-service %s %d 0", fe.IP, fe.Port)
  1249  		}
  1250  
  1251  		if err := d.RevNATDelete(loadbalancer.ServiceID(fe.ID)); err != nil {
  1252  			scopedLog.WithError(err).WithField(logfields.ServiceID, fe.ID).Warn("Error deleting reverse NAT")
  1253  		} else {
  1254  			scopedLog.Debugf("# cilium lb delete-rev-nat %d", fe.ID)
  1255  		}
  1256  	}
  1257  	return nil
  1258  }
  1259  
  1260  func genCartesianProduct(
  1261  	scopedLog *logrus.Entry,
  1262  	fe net.IP,
  1263  	isNodePort bool,
  1264  	ports map[loadbalancer.FEPortName]*loadbalancer.FEPort,
  1265  	bes *k8s.Endpoints,
  1266  ) []loadbalancer.LBSVC {
  1267  
  1268  	var svcs []loadbalancer.LBSVC
  1269  
  1270  	for fePortName, fePort := range ports {
  1271  
  1272  		if fePort.ID == 0 {
  1273  			feAddr := loadbalancer.NewL3n4Addr(fePort.Protocol, fe, fePort.Port)
  1274  			feAddrID, err := service.AcquireID(*feAddr, 0)
  1275  			if err != nil {
  1276  				scopedLog.WithError(err).WithFields(logrus.Fields{
  1277  					logfields.ServiceID: fePortName,
  1278  					logfields.IPAddr:    fe,
  1279  					logfields.Port:      fePort.Port,
  1280  					logfields.Protocol:  fePort.Protocol,
  1281  				}).Error("Error while getting a new service ID. Ignoring service...")
  1282  				continue
  1283  			}
  1284  			scopedLog.WithFields(logrus.Fields{
  1285  				logfields.ServiceName: fePortName,
  1286  				logfields.ServiceID:   feAddrID.ID,
  1287  				logfields.Object:      logfields.Repr(fe),
  1288  			}).Debug("Got feAddr ID for service")
  1289  			fePort.ID = loadbalancer.ServiceID(feAddrID.ID)
  1290  		}
  1291  
  1292  		var besValues []loadbalancer.LBBackEnd
  1293  		for ip, portConfiguration := range bes.Backends {
  1294  			if backendPort := portConfiguration[string(fePortName)]; backendPort != nil {
  1295  				besValues = append(besValues, loadbalancer.LBBackEnd{
  1296  					L3n4Addr: loadbalancer.L3n4Addr{
  1297  						IP:     net.ParseIP(ip),
  1298  						L4Addr: *backendPort,
  1299  					},
  1300  					Weight: 0,
  1301  				})
  1302  			}
  1303  		}
  1304  
  1305  		svcs = append(svcs,
  1306  			loadbalancer.LBSVC{
  1307  				FE: loadbalancer.L3n4AddrID{
  1308  					L3n4Addr: loadbalancer.L3n4Addr{
  1309  						IP: fe,
  1310  						L4Addr: loadbalancer.L4Addr{
  1311  							Protocol: fePort.Protocol,
  1312  							Port:     fePort.Port,
  1313  						},
  1314  					},
  1315  					ID: loadbalancer.ID(fePort.ID),
  1316  				},
  1317  				BES:      besValues,
  1318  				NodePort: isNodePort,
  1319  			})
  1320  	}
  1321  	return svcs
  1322  }
  1323  
  1324  // datapathSVCs returns all services that should be set in the datapath.
  1325  func datapathSVCs(
  1326  	scopedLog *logrus.Entry,
  1327  	svc *k8s.Service,
  1328  	endpoints *k8s.Endpoints) (svcs []loadbalancer.LBSVC) {
  1329  
  1330  	uniqPorts := svc.UniquePorts()
  1331  
  1332  	clusterIPPorts := map[loadbalancer.FEPortName]*loadbalancer.FEPort{}
  1333  	for fePortName, fePort := range svc.Ports {
  1334  		if !uniqPorts[fePort.Port] {
  1335  			continue
  1336  		}
  1337  		uniqPorts[fePort.Port] = false
  1338  		clusterIPPorts[fePortName] = fePort
  1339  	}
  1340  	if svc.FrontendIP != nil {
  1341  		dpSVC := genCartesianProduct(scopedLog, svc.FrontendIP, false, clusterIPPorts, endpoints)
  1342  		svcs = append(svcs, dpSVC...)
  1343  	}
  1344  
  1345  	for fePortName := range clusterIPPorts {
  1346  		for _, nodePortFE := range svc.NodePorts[fePortName] {
  1347  			nodePortPorts := map[loadbalancer.FEPortName]*loadbalancer.FEPort{
  1348  				fePortName: {
  1349  					L4Addr: &nodePortFE.L4Addr,
  1350  				},
  1351  			}
  1352  			dpSVC := genCartesianProduct(scopedLog, nodePortFE.IP, true, nodePortPorts, endpoints)
  1353  			svcs = append(svcs, dpSVC...)
  1354  		}
  1355  	}
  1356  	return svcs
  1357  }
  1358  
  1359  // hashSVCMap returns a mapping of all frontend's hash to the its corresponded
  1360  // value.
  1361  func hashSVCMap(svcs []loadbalancer.LBSVC) map[string]loadbalancer.L3n4Addr {
  1362  	m := map[string]loadbalancer.L3n4Addr{}
  1363  	for _, svc := range svcs {
  1364  		m[svc.FE.L3n4Addr.SHA256Sum()] = svc.FE.L3n4Addr
  1365  	}
  1366  	return m
  1367  }
  1368  
  1369  func (d *Daemon) addK8sSVCs(svcID k8s.ServiceID, oldSvc, svc *k8s.Service, endpoints *k8s.Endpoints) error {
  1370  	// If east-west load balancing is disabled, we should not sync(add or delete)
  1371  	// K8s service to a cilium service.
  1372  	if option.Config.DisableK8sServices {
  1373  		return nil
  1374  	}
  1375  
  1376  	// Headless services do not need any datapath implementation
  1377  	if svc.IsHeadless {
  1378  		return nil
  1379  	}
  1380  
  1381  	scopedLog := log.WithFields(logrus.Fields{
  1382  		logfields.K8sSvcName:   svcID.Name,
  1383  		logfields.K8sNamespace: svcID.Namespace,
  1384  	})
  1385  
  1386  	svcs := datapathSVCs(scopedLog, svc, endpoints)
  1387  	svcMap := hashSVCMap(svcs)
  1388  
  1389  	if oldSvc != nil {
  1390  		// If we have oldService then we need to detect which frontends
  1391  		// are no longer in the updated service and delete them in the datapath.
  1392  
  1393  		oldSVCs := datapathSVCs(scopedLog, oldSvc, endpoints)
  1394  		oldSVCMap := hashSVCMap(oldSVCs)
  1395  
  1396  		for svcHash, oldSvc := range oldSVCMap {
  1397  			if _, ok := svcMap[svcHash]; !ok {
  1398  				if err := d.svcDeleteByFrontend(&oldSvc); err != nil {
  1399  					scopedLog.WithError(err).WithField(logfields.Object, logfields.Repr(oldSvc)).
  1400  						Warn("Error deleting service by frontend")
  1401  				} else {
  1402  					scopedLog.Debugf("# cilium lb delete-service %s %d 0", oldSvc.IP, oldSvc.Port)
  1403  				}
  1404  			}
  1405  		}
  1406  	}
  1407  
  1408  	for _, dpSvc := range svcs {
  1409  		if _, err := d.svcAdd(dpSvc.FE, dpSvc.BES, true, dpSvc.NodePort); err != nil {
  1410  			scopedLog.WithError(err).Error("Error while inserting service in LB map")
  1411  		}
  1412  	}
  1413  	return nil
  1414  }
  1415  
  1416  func (d *Daemon) addIngressV1beta1(ingress *types.Ingress) error {
  1417  	scopedLog := log.WithFields(logrus.Fields{
  1418  		logfields.K8sIngressName: ingress.ObjectMeta.Name,
  1419  		logfields.K8sAPIVersion:  ingress.TypeMeta.APIVersion,
  1420  		logfields.K8sNamespace:   ingress.ObjectMeta.Namespace,
  1421  	})
  1422  	scopedLog.Info("Kubernetes ingress added")
  1423  
  1424  	var host net.IP
  1425  	switch {
  1426  	case option.Config.EnableIPv4:
  1427  		host = option.Config.HostV4Addr
  1428  	case option.Config.EnableIPv6:
  1429  		host = option.Config.HostV6Addr
  1430  	default:
  1431  		return fmt.Errorf("either IPv4 or IPv6 must be enabled")
  1432  	}
  1433  
  1434  	_, err := d.k8sSvcCache.UpdateIngress(ingress, host)
  1435  	if err != nil {
  1436  		return err
  1437  	}
  1438  
  1439  	hostname, _ := os.Hostname()
  1440  	dpyCopyIngress := ingress.DeepCopy()
  1441  	dpyCopyIngress.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{
  1442  		{
  1443  			IP:       host.String(),
  1444  			Hostname: hostname,
  1445  		},
  1446  	}
  1447  
  1448  	_, err = k8s.Client().ExtensionsV1beta1().Ingresses(dpyCopyIngress.ObjectMeta.Namespace).UpdateStatus(dpyCopyIngress.Ingress)
  1449  	if err != nil {
  1450  		scopedLog.WithError(err).WithFields(logrus.Fields{
  1451  			logfields.K8sIngress: dpyCopyIngress,
  1452  		}).Error("Unable to update status of ingress")
  1453  	}
  1454  	return err
  1455  }
  1456  
  1457  func (d *Daemon) updateIngressV1beta1(oldIngress, newIngress *types.Ingress) error {
  1458  	scopedLog := log.WithFields(logrus.Fields{
  1459  		logfields.K8sIngressName + ".old": oldIngress.ObjectMeta.Name,
  1460  		logfields.K8sAPIVersion + ".old":  oldIngress.TypeMeta.APIVersion,
  1461  		logfields.K8sNamespace + ".old":   oldIngress.ObjectMeta.Namespace,
  1462  		logfields.K8sIngressName:          newIngress.ObjectMeta.Name,
  1463  		logfields.K8sAPIVersion:           newIngress.TypeMeta.APIVersion,
  1464  		logfields.K8sNamespace:            newIngress.ObjectMeta.Namespace,
  1465  	})
  1466  
  1467  	if oldIngress.Spec.Backend == nil || newIngress.Spec.Backend == nil {
  1468  		// We only support Single Service Ingress for now
  1469  		scopedLog.Warn("Cilium only supports Single Service Ingress for now, ignoring ingress")
  1470  		return nil
  1471  	}
  1472  
  1473  	// Add RevNAT to the BPF Map for non-LB nodes when a LB node update the
  1474  	// ingress status with its address.
  1475  	if !option.Config.IsLBEnabled() {
  1476  		port := newIngress.Spec.Backend.ServicePort.IntValue()
  1477  		for _, lb := range newIngress.Status.LoadBalancer.Ingress {
  1478  			ingressIP := net.ParseIP(lb.IP)
  1479  			if ingressIP == nil {
  1480  				continue
  1481  			}
  1482  			feAddr := loadbalancer.NewL3n4Addr(loadbalancer.TCP, ingressIP, uint16(port))
  1483  			feAddrID, err := service.AcquireID(*feAddr, 0)
  1484  			if err != nil {
  1485  				scopedLog.WithError(err).Error("Error while getting a new service ID. Ignoring ingress...")
  1486  				continue
  1487  			}
  1488  			scopedLog.WithFields(logrus.Fields{
  1489  				logfields.ServiceID: feAddrID.ID,
  1490  			}).Debug("Got service ID for ingress")
  1491  
  1492  			if err := d.RevNATAdd(loadbalancer.ServiceID(feAddrID.ID),
  1493  				feAddrID.L3n4Addr); err != nil {
  1494  				scopedLog.WithError(err).WithFields(logrus.Fields{
  1495  					logfields.ServiceID: feAddrID.ID,
  1496  					logfields.IPAddr:    feAddrID.L3n4Addr.IP,
  1497  					logfields.Port:      feAddrID.L3n4Addr.Port,
  1498  					logfields.Protocol:  feAddrID.L3n4Addr.Protocol,
  1499  				}).Error("Unable to add reverse NAT ID for ingress")
  1500  			}
  1501  		}
  1502  		return nil
  1503  	}
  1504  
  1505  	if oldIngress.Spec.Backend.ServiceName == newIngress.Spec.Backend.ServiceName &&
  1506  		oldIngress.Spec.Backend.ServicePort == newIngress.Spec.Backend.ServicePort {
  1507  		return nil
  1508  	}
  1509  
  1510  	return d.addIngressV1beta1(newIngress)
  1511  }
  1512  
  1513  func (d *Daemon) deleteIngressV1beta1(ingress *types.Ingress) error {
  1514  	scopedLog := log.WithFields(logrus.Fields{
  1515  		logfields.K8sIngressName: ingress.ObjectMeta.Name,
  1516  		logfields.K8sAPIVersion:  ingress.TypeMeta.APIVersion,
  1517  		logfields.K8sNamespace:   ingress.ObjectMeta.Namespace,
  1518  	})
  1519  
  1520  	if ingress.Spec.Backend == nil {
  1521  		// We only support Single Service Ingress for now
  1522  		scopedLog.Warn("Cilium only supports Single Service Ingress for now, ignoring ingress deletion")
  1523  		return nil
  1524  	}
  1525  
  1526  	d.k8sSvcCache.DeleteIngress(ingress)
  1527  
  1528  	// Remove RevNAT from the BPF Map for non-LB nodes.
  1529  	if !option.Config.IsLBEnabled() {
  1530  		port := ingress.Spec.Backend.ServicePort.IntValue()
  1531  		for _, lb := range ingress.Status.LoadBalancer.Ingress {
  1532  			ingressIP := net.ParseIP(lb.IP)
  1533  			if ingressIP == nil {
  1534  				continue
  1535  			}
  1536  			feAddr := loadbalancer.NewL3n4Addr(loadbalancer.TCP, ingressIP, uint16(port))
  1537  			// This is the only way that we can get the service's ID
  1538  			// without accessing the KVStore.
  1539  			svc := d.svcGetBySHA256Sum(feAddr.SHA256Sum())
  1540  			if svc != nil {
  1541  				if err := d.RevNATDelete(loadbalancer.ServiceID(svc.FE.ID)); err != nil {
  1542  					scopedLog.WithError(err).WithFields(logrus.Fields{
  1543  						logfields.ServiceID: svc.FE.ID,
  1544  					}).Error("Error while removing RevNAT for ingress")
  1545  				}
  1546  			}
  1547  		}
  1548  		return nil
  1549  	}
  1550  
  1551  	return nil
  1552  }
  1553  
  1554  func (d *Daemon) updateCiliumNetworkPolicyV2AnnotationsOnly(ciliumNPClient clientset.Interface, ciliumV2Store cache.Store, cnp *types.SlimCNP) {
  1555  
  1556  	scopedLog := log.WithFields(logrus.Fields{
  1557  		logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name,
  1558  		logfields.K8sAPIVersion:           cnp.TypeMeta.APIVersion,
  1559  		logfields.K8sNamespace:            cnp.ObjectMeta.Namespace,
  1560  	})
  1561  
  1562  	scopedLog.Info("updating node status due to annotations-only change to CiliumNetworkPolicy")
  1563  
  1564  	ctrlName := cnp.GetControllerName()
  1565  
  1566  	// Revision will *always* be populated because importMetadataCache is guaranteed
  1567  	// to be updated by addCiliumNetworkPolicyV2 before calls to
  1568  	// updateCiliumNetworkPolicyV2 are invoked.
  1569  	meta, _ := importMetadataCache.get(cnp)
  1570  	updateContext := &k8s.CNPStatusUpdateContext{
  1571  		CiliumNPClient:              ciliumNPClient,
  1572  		CiliumV2Store:               ciliumV2Store,
  1573  		NodeName:                    node.GetName(),
  1574  		NodeManager:                 d.nodeDiscovery.Manager,
  1575  		UpdateDuration:              spanstat.Start(),
  1576  		WaitForEndpointsAtPolicyRev: endpointmanager.WaitForEndpointsAtPolicyRev,
  1577  	}
  1578  
  1579  	k8sCM.UpdateController(ctrlName,
  1580  		controller.ControllerParams{
  1581  			DoFunc: func(ctx context.Context) error {
  1582  				return updateContext.UpdateStatus(ctx, cnp, meta.revision, meta.policyImportError)
  1583  			},
  1584  		})
  1585  
  1586  }
  1587  
  1588  func (d *Daemon) addCiliumNetworkPolicyV2(ciliumNPClient clientset.Interface, ciliumV2Store cache.Store, cnp *types.SlimCNP) error {
  1589  	scopedLog := log.WithFields(logrus.Fields{
  1590  		logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name,
  1591  		logfields.K8sAPIVersion:           cnp.TypeMeta.APIVersion,
  1592  		logfields.K8sNamespace:            cnp.ObjectMeta.Namespace,
  1593  	})
  1594  
  1595  	scopedLog.Debug("Adding CiliumNetworkPolicy")
  1596  
  1597  	var rev uint64
  1598  
  1599  	rules, policyImportErr := cnp.Parse()
  1600  	if policyImportErr == nil {
  1601  		policyImportErr = k8s.PreprocessRules(rules, &d.k8sSvcCache)
  1602  		// Replace all rules with the same name, namespace and
  1603  		// resourceTypeCiliumNetworkPolicy
  1604  		rev, policyImportErr = d.PolicyAdd(rules, &AddOptions{
  1605  			ReplaceWithLabels: cnp.GetIdentityLabels(),
  1606  			Source:            metrics.LabelEventSourceK8s,
  1607  		})
  1608  	}
  1609  
  1610  	if policyImportErr != nil {
  1611  		scopedLog.WithError(policyImportErr).Warn("Unable to add CiliumNetworkPolicy")
  1612  	} else {
  1613  		scopedLog.Info("Imported CiliumNetworkPolicy")
  1614  	}
  1615  
  1616  	// Upsert to rule revision cache outside of controller, because upsertion
  1617  	// *must* be synchronous so that if we get an update for the CNP, the cache
  1618  	// is populated by the time updateCiliumNetworkPolicyV2 is invoked.
  1619  	importMetadataCache.upsert(cnp, rev, policyImportErr)
  1620  
  1621  	if !option.Config.DisableCNPStatusUpdates {
  1622  		updateContext := &k8s.CNPStatusUpdateContext{
  1623  			CiliumNPClient:              ciliumNPClient,
  1624  			CiliumV2Store:               ciliumV2Store,
  1625  			NodeName:                    node.GetName(),
  1626  			NodeManager:                 d.nodeDiscovery.Manager,
  1627  			UpdateDuration:              spanstat.Start(),
  1628  			WaitForEndpointsAtPolicyRev: endpointmanager.WaitForEndpointsAtPolicyRev,
  1629  		}
  1630  
  1631  		ctrlName := cnp.GetControllerName()
  1632  		k8sCM.UpdateController(ctrlName,
  1633  			controller.ControllerParams{
  1634  				DoFunc: func(ctx context.Context) error {
  1635  					return updateContext.UpdateStatus(ctx, cnp, rev, policyImportErr)
  1636  				},
  1637  			},
  1638  		)
  1639  	}
  1640  
  1641  	return policyImportErr
  1642  }
  1643  
  1644  func (d *Daemon) deleteCiliumNetworkPolicyV2(cnp *types.SlimCNP) error {
  1645  	scopedLog := log.WithFields(logrus.Fields{
  1646  		logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name,
  1647  		logfields.K8sAPIVersion:           cnp.TypeMeta.APIVersion,
  1648  		logfields.K8sNamespace:            cnp.ObjectMeta.Namespace,
  1649  	})
  1650  
  1651  	scopedLog.Debug("Deleting CiliumNetworkPolicy")
  1652  
  1653  	importMetadataCache.delete(cnp)
  1654  	ctrlName := cnp.GetControllerName()
  1655  	err := k8sCM.RemoveControllerAndWait(ctrlName)
  1656  	if err != nil {
  1657  		log.Debugf("Unable to remove controller %s: %s", ctrlName, err)
  1658  	}
  1659  
  1660  	_, err = d.PolicyDelete(cnp.GetIdentityLabels())
  1661  	if err == nil {
  1662  		scopedLog.Info("Deleted CiliumNetworkPolicy")
  1663  	} else {
  1664  		scopedLog.WithError(err).Warn("Unable to delete CiliumNetworkPolicy")
  1665  	}
  1666  	return err
  1667  }
  1668  
  1669  func (d *Daemon) updateCiliumNetworkPolicyV2(ciliumNPClient clientset.Interface,
  1670  	ciliumV2Store cache.Store,
  1671  	oldRuleCpy, newRuleCpy *types.SlimCNP) error {
  1672  
  1673  	_, err := oldRuleCpy.Parse()
  1674  	if err != nil {
  1675  		log.WithError(err).WithField(logfields.Object, logfields.Repr(oldRuleCpy)).
  1676  			Warn("Error parsing old CiliumNetworkPolicy rule")
  1677  		return err
  1678  	}
  1679  	_, err = newRuleCpy.Parse()
  1680  	if err != nil {
  1681  		log.WithError(err).WithField(logfields.Object, logfields.Repr(newRuleCpy)).
  1682  			Warn("Error parsing new CiliumNetworkPolicy rule")
  1683  		return err
  1684  	}
  1685  
  1686  	log.WithFields(logrus.Fields{
  1687  		logfields.K8sAPIVersion:                    oldRuleCpy.TypeMeta.APIVersion,
  1688  		logfields.CiliumNetworkPolicyName + ".old": oldRuleCpy.ObjectMeta.Name,
  1689  		logfields.K8sNamespace + ".old":            oldRuleCpy.ObjectMeta.Namespace,
  1690  		logfields.CiliumNetworkPolicyName:          newRuleCpy.ObjectMeta.Name,
  1691  		logfields.K8sNamespace:                     newRuleCpy.ObjectMeta.Namespace,
  1692  		"annotations.old":                          oldRuleCpy.ObjectMeta.Annotations,
  1693  		"annotations":                              newRuleCpy.ObjectMeta.Annotations,
  1694  	}).Debug("Modified CiliumNetworkPolicy")
  1695  
  1696  	// Do not add rule into policy repository if the spec remains unchanged.
  1697  	if !option.Config.DisableCNPStatusUpdates {
  1698  		if oldRuleCpy.SpecEquals(newRuleCpy.CiliumNetworkPolicy) {
  1699  			if !oldRuleCpy.AnnotationsEquals(newRuleCpy.CiliumNetworkPolicy) {
  1700  
  1701  				// Update annotations within a controller so the status of the update
  1702  				// is trackable from the list of running controllers, and so we do
  1703  				// not block subsequent policy lifecycle operations from Kubernetes
  1704  				// until the update is complete.
  1705  				oldCtrlName := oldRuleCpy.GetControllerName()
  1706  				newCtrlName := newRuleCpy.GetControllerName()
  1707  
  1708  				// In case the controller name changes between copies of rules,
  1709  				// remove old controller so we do not leak goroutines.
  1710  				if oldCtrlName != newCtrlName {
  1711  					err := k8sCM.RemoveController(oldCtrlName)
  1712  					if err != nil {
  1713  						log.Debugf("Unable to remove controller %s: %s", oldCtrlName, err)
  1714  					}
  1715  				}
  1716  				d.updateCiliumNetworkPolicyV2AnnotationsOnly(ciliumNPClient, ciliumV2Store, newRuleCpy)
  1717  			}
  1718  			return nil
  1719  		}
  1720  	}
  1721  
  1722  	return d.addCiliumNetworkPolicyV2(ciliumNPClient, ciliumV2Store, newRuleCpy)
  1723  }
  1724  
  1725  func (d *Daemon) updatePodHostIP(pod *types.Pod) (bool, error) {
  1726  	if pod.SpecHostNetwork {
  1727  		return true, fmt.Errorf("pod is using host networking")
  1728  	}
  1729  
  1730  	hostIP := net.ParseIP(pod.StatusHostIP)
  1731  	if hostIP == nil {
  1732  		return true, fmt.Errorf("no/invalid HostIP: %s", pod.StatusHostIP)
  1733  	}
  1734  
  1735  	podIP := net.ParseIP(pod.StatusPodIP)
  1736  	if podIP == nil {
  1737  		return true, fmt.Errorf("no/invalid PodIP: %s", pod.StatusPodIP)
  1738  	}
  1739  
  1740  	hostKey := node.GetIPsecKeyIdentity()
  1741  
  1742  	// Initial mapping of podIP <-> hostIP <-> identity. The mapping is
  1743  	// later updated once the allocator has determined the real identity.
  1744  	// If the endpoint remains unmanaged, the identity remains untouched.
  1745  	selfOwned := ipcache.IPIdentityCache.Upsert(pod.StatusPodIP, hostIP, hostKey, ipcache.Identity{
  1746  		ID:     identity.ReservedIdentityUnmanaged,
  1747  		Source: source.Kubernetes,
  1748  	})
  1749  	if !selfOwned {
  1750  		return true, fmt.Errorf("ipcache entry owned by kvstore or agent")
  1751  	}
  1752  
  1753  	return false, nil
  1754  }
  1755  
  1756  func (d *Daemon) deletePodHostIP(pod *types.Pod) (bool, error) {
  1757  	if pod.SpecHostNetwork {
  1758  		return true, fmt.Errorf("pod is using host networking")
  1759  	}
  1760  
  1761  	podIP := net.ParseIP(pod.StatusPodIP)
  1762  	if podIP == nil {
  1763  		return true, fmt.Errorf("no/invalid PodIP: %s", pod.StatusPodIP)
  1764  	}
  1765  
  1766  	// a small race condition exists here as deletion could occur in
  1767  	// parallel based on another event but it doesn't matter as the
  1768  	// identity is going away
  1769  	id, exists := ipcache.IPIdentityCache.LookupByIP(pod.StatusPodIP)
  1770  	if !exists {
  1771  		return true, fmt.Errorf("identity for IP does not exist in case")
  1772  	}
  1773  
  1774  	if id.Source != source.Kubernetes {
  1775  		return true, fmt.Errorf("ipcache entry not owned by kubernetes source")
  1776  	}
  1777  
  1778  	ipcache.IPIdentityCache.Delete(pod.StatusPodIP, source.Kubernetes)
  1779  
  1780  	return false, nil
  1781  }
  1782  
  1783  func (d *Daemon) addK8sPodV1(pod *types.Pod) error {
  1784  	logger := log.WithFields(logrus.Fields{
  1785  		logfields.K8sPodName:   pod.ObjectMeta.Name,
  1786  		logfields.K8sNamespace: pod.ObjectMeta.Namespace,
  1787  		"podIP":                pod.StatusPodIP,
  1788  		"hostIP":               pod.StatusHostIP,
  1789  	})
  1790  
  1791  	skipped, err := d.updatePodHostIP(pod)
  1792  	switch {
  1793  	case skipped:
  1794  		logger.WithError(err).Debug("Skipped ipcache map update on pod add")
  1795  		return nil
  1796  	case err != nil:
  1797  		msg := "Unable to update ipcache map entry on pod add"
  1798  		if err == errIPCacheOwnedByNonK8s {
  1799  			logger.WithError(err).Debug(msg)
  1800  		} else {
  1801  			logger.WithError(err).Warning(msg)
  1802  		}
  1803  	default:
  1804  		logger.Debug("Updated ipcache map entry on pod add")
  1805  	}
  1806  	return err
  1807  }
  1808  
  1809  func (d *Daemon) updateK8sPodV1(oldK8sPod, newK8sPod *types.Pod) error {
  1810  	if oldK8sPod == nil || newK8sPod == nil {
  1811  		return nil
  1812  	}
  1813  
  1814  	// The pod IP can never change, it can only switch from unassigned to
  1815  	// assigned
  1816  	d.addK8sPodV1(newK8sPod)
  1817  
  1818  	// We only care about label updates
  1819  	oldPodLabels := oldK8sPod.GetLabels()
  1820  	newPodLabels := newK8sPod.GetLabels()
  1821  	if comparator.MapStringEquals(oldPodLabels, newPodLabels) {
  1822  		return nil
  1823  	}
  1824  
  1825  	podNSName := k8sUtils.GetObjNamespaceName(&newK8sPod.ObjectMeta)
  1826  
  1827  	podEP := endpointmanager.LookupPodName(podNSName)
  1828  	if podEP == nil {
  1829  		log.WithField("pod", podNSName).Debugf("Endpoint not found running for the given pod")
  1830  		return nil
  1831  	}
  1832  
  1833  	newLabels := labels.Map2Labels(newPodLabels, labels.LabelSourceK8s)
  1834  	newIdtyLabels, _ := labels.FilterLabels(newLabels)
  1835  	oldLabels := labels.Map2Labels(oldPodLabels, labels.LabelSourceK8s)
  1836  	oldIdtyLabels, _ := labels.FilterLabels(oldLabels)
  1837  
  1838  	err := podEP.ModifyIdentityLabels(newIdtyLabels, oldIdtyLabels)
  1839  	if err != nil {
  1840  		log.WithError(err).Debugf("error while updating endpoint with new labels")
  1841  		return err
  1842  	}
  1843  
  1844  	log.WithFields(logrus.Fields{
  1845  		logfields.EndpointID: podEP.GetID(),
  1846  		logfields.Labels:     logfields.Repr(newIdtyLabels),
  1847  	}).Debug("Update endpoint with new labels")
  1848  	return nil
  1849  }
  1850  
  1851  func (d *Daemon) deleteK8sPodV1(pod *types.Pod) error {
  1852  	logger := log.WithFields(logrus.Fields{
  1853  		logfields.K8sPodName:   pod.ObjectMeta.Name,
  1854  		logfields.K8sNamespace: pod.ObjectMeta.Namespace,
  1855  		"podIP":                pod.StatusPodIP,
  1856  		"hostIP":               pod.StatusHostIP,
  1857  	})
  1858  
  1859  	skipped, err := d.deletePodHostIP(pod)
  1860  	switch {
  1861  	case skipped:
  1862  		logger.WithError(err).Debug("Skipped ipcache map delete on pod delete")
  1863  	case err != nil:
  1864  		logger.WithError(err).Warning("Unable to delete ipcache map entry on pod delete")
  1865  	default:
  1866  		logger.Debug("Deleted ipcache map entry on pod delete")
  1867  	}
  1868  	return err
  1869  }
  1870  
  1871  func (d *Daemon) updateK8sV1Namespace(oldNS, newNS *types.Namespace) error {
  1872  	if oldNS == nil || newNS == nil {
  1873  		return nil
  1874  	}
  1875  
  1876  	// We only care about label updates
  1877  	if comparator.MapStringEquals(oldNS.GetLabels(), newNS.GetLabels()) {
  1878  		return nil
  1879  	}
  1880  
  1881  	oldNSLabels := map[string]string{}
  1882  	newNSLabels := map[string]string{}
  1883  
  1884  	for k, v := range oldNS.GetLabels() {
  1885  		oldNSLabels[policy.JoinPath(ciliumio.PodNamespaceMetaLabels, k)] = v
  1886  	}
  1887  	for k, v := range newNS.GetLabels() {
  1888  		newNSLabels[policy.JoinPath(ciliumio.PodNamespaceMetaLabels, k)] = v
  1889  	}
  1890  
  1891  	oldLabels := labels.Map2Labels(oldNSLabels, labels.LabelSourceK8s)
  1892  	newLabels := labels.Map2Labels(newNSLabels, labels.LabelSourceK8s)
  1893  
  1894  	oldIdtyLabels, _ := labels.FilterLabels(oldLabels)
  1895  	newIdtyLabels, _ := labels.FilterLabels(newLabels)
  1896  
  1897  	eps := endpointmanager.GetEndpoints()
  1898  	failed := false
  1899  	for _, ep := range eps {
  1900  		epNS := ep.GetK8sNamespace()
  1901  		if oldNS.Name == epNS {
  1902  			err := ep.ModifyIdentityLabels(newIdtyLabels, oldIdtyLabels)
  1903  			if err != nil {
  1904  				log.WithError(err).WithField(logfields.EndpointID, ep.ID).
  1905  					Warningf("unable to update endpoint with new namespace labels")
  1906  				failed = true
  1907  			}
  1908  		}
  1909  	}
  1910  	if failed {
  1911  		return errors.New("unable to update some endpoints with new namespace labels")
  1912  	}
  1913  	return nil
  1914  }
  1915  
  1916  // K8sEventProcessed is called to do metrics accounting for each processed
  1917  // Kubernetes event
  1918  func (d *Daemon) K8sEventProcessed(scope string, action string, status bool) {
  1919  	result := "success"
  1920  	if status == false {
  1921  		result = "failed"
  1922  	}
  1923  
  1924  	metrics.KubernetesEventProcessed.WithLabelValues(scope, action, result).Inc()
  1925  }
  1926  
  1927  func endpointUpdated(endpoint *types.CiliumEndpoint) {
  1928  	// default to the standard key
  1929  	encryptionKey := node.GetIPsecKeyIdentity()
  1930  
  1931  	id := identity.ReservedIdentityUnmanaged
  1932  	if endpoint.Identity != nil {
  1933  		id = identity.NumericIdentity(endpoint.Identity.ID)
  1934  	}
  1935  
  1936  	if endpoint.Encryption != nil {
  1937  		encryptionKey = uint8(endpoint.Encryption.Key)
  1938  	}
  1939  
  1940  	if endpoint.Networking != nil {
  1941  		if endpoint.Networking.NodeIP == "" {
  1942  			// When upgrading from an older version, the nodeIP may
  1943  			// not be available yet in the CiliumEndpoint and we
  1944  			// have to wait for it to be propagated
  1945  			return
  1946  		}
  1947  
  1948  		nodeIP := net.ParseIP(endpoint.Networking.NodeIP)
  1949  		if nodeIP == nil {
  1950  			log.WithField("nodeIP", endpoint.Networking.NodeIP).Warning("Unable to parse node IP while processing CiliumEndpoint update")
  1951  			return
  1952  		}
  1953  
  1954  		for _, pair := range endpoint.Networking.Addressing {
  1955  			if pair.IPV4 != "" {
  1956  				ipcache.IPIdentityCache.Upsert(pair.IPV4, nodeIP, encryptionKey,
  1957  					ipcache.Identity{ID: id, Source: source.CustomResource})
  1958  			}
  1959  
  1960  			if pair.IPV6 != "" {
  1961  				ipcache.IPIdentityCache.Upsert(pair.IPV6, nodeIP, encryptionKey,
  1962  					ipcache.Identity{ID: id, Source: source.CustomResource})
  1963  			}
  1964  		}
  1965  	}
  1966  }
  1967  
  1968  func endpointDeleted(endpoint *types.CiliumEndpoint) {
  1969  	if endpoint.Networking != nil {
  1970  		for _, pair := range endpoint.Networking.Addressing {
  1971  			if pair.IPV4 != "" {
  1972  				ipcache.IPIdentityCache.Delete(pair.IPV4, source.CustomResource)
  1973  			}
  1974  
  1975  			if pair.IPV6 != "" {
  1976  				ipcache.IPIdentityCache.Delete(pair.IPV6, source.CustomResource)
  1977  			}
  1978  		}
  1979  	}
  1980  }