github.com/fafucoder/cilium@v1.6.11/daemon/k8s_watcher.go (about)

     1  // Copyright 2016-2019 Authors of Cilium
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package main
    16  
    17  import (
    18  	"context"
    19  	"errors"
    20  	"fmt"
    21  	"net"
    22  	"net/url"
    23  	"os"
    24  	"strconv"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/cilium/cilium/pkg/comparator"
    29  	"github.com/cilium/cilium/pkg/controller"
    30  	"github.com/cilium/cilium/pkg/endpointmanager"
    31  	"github.com/cilium/cilium/pkg/identity"
    32  	"github.com/cilium/cilium/pkg/ipcache"
    33  	"github.com/cilium/cilium/pkg/k8s"
    34  	ciliumio "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
    35  	cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
    36  	clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
    37  	"github.com/cilium/cilium/pkg/k8s/informer"
    38  	k8smetrics "github.com/cilium/cilium/pkg/k8s/metrics"
    39  	"github.com/cilium/cilium/pkg/k8s/types"
    40  	k8sUtils "github.com/cilium/cilium/pkg/k8s/utils"
    41  	k8sversion "github.com/cilium/cilium/pkg/k8s/version"
    42  	"github.com/cilium/cilium/pkg/kvstore"
    43  	"github.com/cilium/cilium/pkg/labels"
    44  	"github.com/cilium/cilium/pkg/loadbalancer"
    45  	"github.com/cilium/cilium/pkg/lock"
    46  	"github.com/cilium/cilium/pkg/logging/logfields"
    47  	bpfIPCache "github.com/cilium/cilium/pkg/maps/ipcache"
    48  	"github.com/cilium/cilium/pkg/metrics"
    49  	"github.com/cilium/cilium/pkg/node"
    50  	"github.com/cilium/cilium/pkg/option"
    51  	"github.com/cilium/cilium/pkg/policy"
    52  	"github.com/cilium/cilium/pkg/serializer"
    53  	"github.com/cilium/cilium/pkg/service"
    54  	"github.com/cilium/cilium/pkg/source"
    55  	"github.com/cilium/cilium/pkg/spanstat"
    56  
    57  	"github.com/sirupsen/logrus"
    58  	v1 "k8s.io/api/core/v1"
    59  	"k8s.io/api/extensions/v1beta1"
    60  	networkingv1 "k8s.io/api/networking/v1"
    61  	"k8s.io/apimachinery/pkg/fields"
    62  	"k8s.io/apimachinery/pkg/util/runtime"
    63  	"k8s.io/apimachinery/pkg/util/wait"
    64  	"k8s.io/client-go/tools/cache"
    65  	k8s_metrics "k8s.io/client-go/tools/metrics"
    66  )
    67  
    68  const (
    69  	k8sAPIGroupCRD                   = "CustomResourceDefinition"
    70  	k8sAPIGroupNodeV1Core            = "core/v1::Node"
    71  	k8sAPIGroupNamespaceV1Core       = "core/v1::Namespace"
    72  	k8sAPIGroupServiceV1Core         = "core/v1::Service"
    73  	k8sAPIGroupEndpointV1Core        = "core/v1::Endpoint"
    74  	k8sAPIGroupPodV1Core             = "core/v1::Pods"
    75  	k8sAPIGroupNetworkingV1Core      = "networking.k8s.io/v1::NetworkPolicy"
    76  	k8sAPIGroupIngressV1Beta1        = "extensions/v1beta1::Ingress"
    77  	k8sAPIGroupCiliumNetworkPolicyV2 = "cilium/v2::CiliumNetworkPolicy"
    78  	k8sAPIGroupCiliumNodeV2          = "cilium/v2::CiliumNode"
    79  	k8sAPIGroupCiliumEndpointV2      = "cilium/v2::CiliumEndpoint"
    80  	cacheSyncTimeout                 = time.Duration(3 * time.Minute)
    81  
    82  	metricCNP            = "CiliumNetworkPolicy"
    83  	metricEndpoint       = "Endpoint"
    84  	metricIngress        = "Ingress"
    85  	metricKNP            = "NetworkPolicy"
    86  	metricNS             = "Namespace"
    87  	metricCiliumNode     = "CiliumNode"
    88  	metricCiliumEndpoint = "CiliumEndpoint"
    89  	metricPod            = "Pod"
    90  	metricService        = "Service"
    91  	metricCreate         = "create"
    92  	metricDelete         = "delete"
    93  	metricUpdate         = "update"
    94  )
    95  
    96  var (
    97  	k8sCM = controller.NewManager()
    98  
    99  	importMetadataCache = ruleImportMetadataCache{
   100  		ruleImportMetadataMap: make(map[string]policyImportMetadata),
   101  	}
   102  
   103  	errIPCacheOwnedByNonK8s = fmt.Errorf("ipcache entry owned by kvstore or agent")
   104  )
   105  
   106  // ruleImportMetadataCache maps the unique identifier of a CiliumNetworkPolicy
   107  // (namespace and name) to metadata about the importing of the rule into the
   108  // agent's policy repository at the time said rule was imported (revision
   109  // number, and if any error occurred while importing).
   110  type ruleImportMetadataCache struct {
   111  	mutex                 lock.RWMutex
   112  	ruleImportMetadataMap map[string]policyImportMetadata
   113  }
   114  
   115  type policyImportMetadata struct {
   116  	revision          uint64
   117  	policyImportError error
   118  }
   119  
   120  func (r *ruleImportMetadataCache) upsert(cnp *types.SlimCNP, revision uint64, importErr error) {
   121  	if cnp == nil {
   122  		return
   123  	}
   124  
   125  	meta := policyImportMetadata{
   126  		revision:          revision,
   127  		policyImportError: importErr,
   128  	}
   129  	podNSName := k8sUtils.GetObjNamespaceName(&cnp.ObjectMeta)
   130  
   131  	r.mutex.Lock()
   132  	r.ruleImportMetadataMap[podNSName] = meta
   133  	r.mutex.Unlock()
   134  }
   135  
   136  func (r *ruleImportMetadataCache) delete(cnp *types.SlimCNP) {
   137  	if cnp == nil {
   138  		return
   139  	}
   140  	podNSName := k8sUtils.GetObjNamespaceName(&cnp.ObjectMeta)
   141  
   142  	r.mutex.Lock()
   143  	delete(r.ruleImportMetadataMap, podNSName)
   144  	r.mutex.Unlock()
   145  }
   146  
   147  func (r *ruleImportMetadataCache) get(cnp *types.SlimCNP) (policyImportMetadata, bool) {
   148  	if cnp == nil {
   149  		return policyImportMetadata{}, false
   150  	}
   151  	podNSName := k8sUtils.GetObjNamespaceName(&cnp.ObjectMeta)
   152  	r.mutex.RLock()
   153  	policyImportMeta, ok := r.ruleImportMetadataMap[podNSName]
   154  	r.mutex.RUnlock()
   155  	return policyImportMeta, ok
   156  }
   157  
   158  // k8sAPIGroupsUsed is a lockable map to hold which k8s API Groups we have
   159  // enabled/in-use
   160  // Note: We can replace it with a Go 1.9 map once we require that version
   161  type k8sAPIGroupsUsed struct {
   162  	lock.RWMutex
   163  	apis map[string]bool
   164  }
   165  
   166  func (m *k8sAPIGroupsUsed) addAPI(api string) {
   167  	m.Lock()
   168  	defer m.Unlock()
   169  	if m.apis == nil {
   170  		m.apis = make(map[string]bool)
   171  	}
   172  	m.apis[api] = true
   173  }
   174  
   175  func (m *k8sAPIGroupsUsed) removeAPI(api string) {
   176  	m.Lock()
   177  	defer m.Unlock()
   178  	delete(m.apis, api)
   179  }
   180  
   181  func (m *k8sAPIGroupsUsed) getGroups() []string {
   182  	m.RLock()
   183  	defer m.RUnlock()
   184  	groups := make([]string, 0, len(m.apis))
   185  	for k := range m.apis {
   186  		groups = append(groups, k)
   187  	}
   188  	return groups
   189  }
   190  
   191  // k8sMetrics implements the LatencyMetric and ResultMetric interface from
   192  // k8s client-go package
   193  type k8sMetrics struct{}
   194  
   195  func (*k8sMetrics) Observe(verb string, u url.URL, latency time.Duration) {
   196  	metrics.KubernetesAPIInteractions.WithLabelValues(u.Path, verb).Observe(latency.Seconds())
   197  }
   198  
   199  func (*k8sMetrics) Increment(code string, method string, host string) {
   200  	metrics.KubernetesAPICalls.WithLabelValues(host, method, code).Inc()
   201  	k8smetrics.LastInteraction.Reset()
   202  }
   203  
   204  func init() {
   205  	// Replace error handler with our own
   206  	runtime.ErrorHandlers = []func(error){
   207  		k8s.K8sErrorHandler,
   208  	}
   209  
   210  	k8sMetric := &k8sMetrics{}
   211  	k8s_metrics.Register(k8sMetric, k8sMetric)
   212  }
   213  
   214  // blockWaitGroupToSyncResources ensures that anything which waits on waitGroup
   215  // waits until all objects of the specified resource stored in Kubernetes are
   216  // received by the informer and processed by controller.
   217  // Fatally exits if syncing these initial objects fails.
   218  // If the given stop channel is closed, it does not fatal.
   219  func (d *Daemon) blockWaitGroupToSyncResources(stop <-chan struct{}, informer cache.Controller, resourceName string) {
   220  	ch := make(chan struct{})
   221  	d.k8sResourceSyncedMu.Lock()
   222  	d.k8sResourceSynced[resourceName] = ch
   223  	d.k8sResourceSyncedMu.Unlock()
   224  	go func() {
   225  		scopedLog := log.WithField("kubernetesResource", resourceName)
   226  		scopedLog.Debug("waiting for cache to synchronize")
   227  		if ok := cache.WaitForCacheSync(stop, informer.HasSynced); !ok {
   228  			select {
   229  			case <-stop:
   230  				scopedLog.Debug("canceled cache synchronization")
   231  				// do not fatal if the channel was stopped
   232  			default:
   233  				// Fatally exit it resource fails to sync
   234  				scopedLog.Fatalf("failed to wait for cache to sync")
   235  			}
   236  		} else {
   237  			scopedLog.Debug("cache synced")
   238  		}
   239  		close(ch)
   240  	}()
   241  }
   242  
   243  // waitForCacheSync waits for k8s caches to be synchronized for the given
   244  // resource. Returns once all resourcesNames are synchronized with cilium-agent.
   245  func (d *Daemon) waitForCacheSync(resourceNames ...string) {
   246  	for _, resourceName := range resourceNames {
   247  		d.k8sResourceSyncedMu.RLock()
   248  		c, ok := d.k8sResourceSynced[resourceName]
   249  		d.k8sResourceSyncedMu.RUnlock()
   250  		if !ok {
   251  			continue
   252  		}
   253  		<-c
   254  	}
   255  }
   256  
   257  // initK8sSubsystem returns a channel for which it will be closed when all
   258  // caches essential for daemon are synchronized.
   259  func (d *Daemon) initK8sSubsystem() <-chan struct{} {
   260  	if err := d.EnableK8sWatcher(option.Config.K8sWatcherQueueSize); err != nil {
   261  		log.WithError(err).Fatal("Unable to establish connection to Kubernetes apiserver")
   262  	}
   263  
   264  	cachesSynced := make(chan struct{})
   265  
   266  	go func() {
   267  		log.Info("Waiting until all pre-existing resources related to policy have been received")
   268  		// Wait only for certain caches, but not all!
   269  		// We don't wait for nodes synchronization nor ingresses.
   270  		d.waitForCacheSync(
   271  			// To perform the service translation and have the BPF LB datapath
   272  			// with the right service -> backend (k8s endpoints) translation.
   273  			k8sAPIGroupServiceV1Core,
   274  			// To perform the service translation and have the BPF LB datapath
   275  			// with the right service -> backend (k8s endpoints) translation.
   276  			k8sAPIGroupEndpointV1Core,
   277  			// We need all network policies in place before restoring to make sure
   278  			// we are enforcing the correct policies for each endpoint before
   279  			// restarting.
   280  			k8sAPIGroupCiliumNetworkPolicyV2,
   281  			// We we need to know about all other nodes
   282  			k8sAPIGroupCiliumNodeV2,
   283  			// We need all network policies in place before restoring to make sure
   284  			// we are enforcing the correct policies for each endpoint before
   285  			// restarting.
   286  			k8sAPIGroupNetworkingV1Core,
   287  			// Namespaces can contain labels which are essential for endpoints
   288  			// being restored to have the right identity.
   289  			k8sAPIGroupNamespaceV1Core,
   290  			// Pods can contain labels which are essential for endpoints
   291  			// being restored to have the right identity.
   292  			k8sAPIGroupPodV1Core,
   293  		)
   294  		// CiliumEndpoint is used to synchronize the ipcache, wait for
   295  		// it unless it is disabled
   296  		if !option.Config.DisableCiliumEndpointCRD {
   297  			d.waitForCacheSync(k8sAPIGroupCiliumEndpointV2)
   298  		}
   299  		close(cachesSynced)
   300  	}()
   301  
   302  	go func() {
   303  		select {
   304  		case <-cachesSynced:
   305  			log.Info("All pre-existing resources related to policy have been received; continuing")
   306  		case <-time.After(cacheSyncTimeout):
   307  			log.Fatalf("Timed out waiting for pre-existing resources related to policy to be received; exiting")
   308  		}
   309  	}()
   310  
   311  	return cachesSynced
   312  }
   313  
   314  // K8sEventReceived does metric accounting for each received Kubernetes event
   315  func (d *Daemon) K8sEventReceived(scope string, action string, valid, equal bool) {
   316  	metrics.EventTSK8s.SetToCurrentTime()
   317  	k8smetrics.LastInteraction.Reset()
   318  
   319  	metrics.KubernetesEventReceived.WithLabelValues(scope, action, strconv.FormatBool(valid), strconv.FormatBool(equal)).Inc()
   320  }
   321  
   322  // EnableK8sWatcher watches for policy, services and endpoint changes on the Kubernetes
   323  // api server defined in the receiver's daemon k8sClient.
   324  // queueSize specifies the queue length used to serialize k8s events.
   325  func (d *Daemon) EnableK8sWatcher(queueSize uint) error {
   326  	if !k8s.IsEnabled() {
   327  		log.Debug("Not enabling k8s event listener because k8s is not enabled")
   328  		return nil
   329  	}
   330  	log.Info("Enabling k8s event listener")
   331  
   332  	d.k8sAPIGroups.addAPI(k8sAPIGroupCRD)
   333  
   334  	ciliumNPClient := k8s.CiliumClient()
   335  
   336  	serKNPs := serializer.NewFunctionQueue(queueSize)
   337  	serSvcs := serializer.NewFunctionQueue(queueSize)
   338  	serEps := serializer.NewFunctionQueue(queueSize)
   339  	serIngresses := serializer.NewFunctionQueue(queueSize)
   340  	serCNPs := serializer.NewFunctionQueue(queueSize)
   341  	serPods := serializer.NewFunctionQueue(queueSize)
   342  	serNodes := serializer.NewFunctionQueue(queueSize)
   343  	serCiliumEndpoints := serializer.NewFunctionQueue(queueSize)
   344  	serNamespaces := serializer.NewFunctionQueue(queueSize)
   345  
   346  	_, policyController := informer.NewInformer(
   347  		cache.NewListWatchFromClient(k8s.Client().NetworkingV1().RESTClient(),
   348  			"networkpolicies", v1.NamespaceAll, fields.Everything()),
   349  		&networkingv1.NetworkPolicy{},
   350  		0,
   351  		cache.ResourceEventHandlerFuncs{
   352  			AddFunc: func(obj interface{}) {
   353  				var valid, equal bool
   354  				defer func() { d.K8sEventReceived(metricKNP, metricCreate, valid, equal) }()
   355  				if k8sNP := k8s.CopyObjToV1NetworkPolicy(obj); k8sNP != nil {
   356  					valid = true
   357  					serKNPs.Enqueue(func() error {
   358  						err := d.addK8sNetworkPolicyV1(k8sNP)
   359  						d.K8sEventProcessed(metricKNP, metricCreate, err == nil)
   360  						return nil
   361  					}, serializer.NoRetry)
   362  				}
   363  			},
   364  			UpdateFunc: func(oldObj, newObj interface{}) {
   365  				var valid, equal bool
   366  				defer func() { d.K8sEventReceived(metricKNP, metricUpdate, valid, equal) }()
   367  				if oldK8sNP := k8s.CopyObjToV1NetworkPolicy(oldObj); oldK8sNP != nil {
   368  					valid = true
   369  					if newK8sNP := k8s.CopyObjToV1NetworkPolicy(newObj); newK8sNP != nil {
   370  						if k8s.EqualV1NetworkPolicy(oldK8sNP, newK8sNP) {
   371  							equal = true
   372  							return
   373  						}
   374  
   375  						serKNPs.Enqueue(func() error {
   376  							err := d.updateK8sNetworkPolicyV1(oldK8sNP, newK8sNP)
   377  							d.K8sEventProcessed(metricKNP, metricUpdate, err == nil)
   378  							return nil
   379  						}, serializer.NoRetry)
   380  					}
   381  				}
   382  			},
   383  			DeleteFunc: func(obj interface{}) {
   384  				var valid, equal bool
   385  				defer func() { d.K8sEventReceived(metricKNP, metricDelete, valid, equal) }()
   386  				k8sNP := k8s.CopyObjToV1NetworkPolicy(obj)
   387  				if k8sNP == nil {
   388  					deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   389  					if !ok {
   390  						return
   391  					}
   392  					// Delete was not observed by the watcher but is
   393  					// removed from kube-apiserver. This is the last
   394  					// known state and the object no longer exists.
   395  					k8sNP = k8s.CopyObjToV1NetworkPolicy(deletedObj.Obj)
   396  					if k8sNP == nil {
   397  						return
   398  					}
   399  				}
   400  
   401  				valid = true
   402  				serKNPs.Enqueue(func() error {
   403  					err := d.deleteK8sNetworkPolicyV1(k8sNP)
   404  					d.K8sEventProcessed(metricKNP, metricDelete, err == nil)
   405  					return nil
   406  				}, serializer.NoRetry)
   407  			},
   408  		},
   409  		k8s.ConvertToNetworkPolicy,
   410  	)
   411  	d.blockWaitGroupToSyncResources(wait.NeverStop, policyController, k8sAPIGroupNetworkingV1Core)
   412  	go policyController.Run(wait.NeverStop)
   413  
   414  	d.k8sAPIGroups.addAPI(k8sAPIGroupNetworkingV1Core)
   415  
   416  	_, svcController := informer.NewInformer(
   417  		cache.NewListWatchFromClient(k8s.Client().CoreV1().RESTClient(),
   418  			"services", v1.NamespaceAll, fields.Everything()),
   419  		&v1.Service{},
   420  		0,
   421  		cache.ResourceEventHandlerFuncs{
   422  			AddFunc: func(obj interface{}) {
   423  				var valid, equal bool
   424  				defer func() { d.K8sEventReceived(metricService, metricCreate, valid, equal) }()
   425  				if k8sSvc := k8s.CopyObjToV1Services(obj); k8sSvc != nil {
   426  					valid = true
   427  					serSvcs.Enqueue(func() error {
   428  						err := d.addK8sServiceV1(k8sSvc)
   429  						d.K8sEventProcessed(metricService, metricCreate, err == nil)
   430  						return nil
   431  					}, serializer.NoRetry)
   432  				}
   433  			},
   434  			UpdateFunc: func(oldObj, newObj interface{}) {
   435  				var valid, equal bool
   436  				defer func() { d.K8sEventReceived(metricService, metricUpdate, valid, equal) }()
   437  				if oldk8sSvc := k8s.CopyObjToV1Services(oldObj); oldk8sSvc != nil {
   438  					valid = true
   439  					if newk8sSvc := k8s.CopyObjToV1Services(newObj); newk8sSvc != nil {
   440  						if k8s.EqualV1Services(oldk8sSvc, newk8sSvc) {
   441  							equal = true
   442  							return
   443  						}
   444  
   445  						serSvcs.Enqueue(func() error {
   446  							err := d.updateK8sServiceV1(oldk8sSvc, newk8sSvc)
   447  							d.K8sEventProcessed(metricService, metricUpdate, err == nil)
   448  							return nil
   449  						}, serializer.NoRetry)
   450  					}
   451  				}
   452  			},
   453  			DeleteFunc: func(obj interface{}) {
   454  				var valid, equal bool
   455  				defer func() { d.K8sEventReceived(metricService, metricDelete, valid, equal) }()
   456  				k8sSvc := k8s.CopyObjToV1Services(obj)
   457  				if k8sSvc == nil {
   458  					deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   459  					if !ok {
   460  						return
   461  					}
   462  					// Delete was not observed by the watcher but is
   463  					// removed from kube-apiserver. This is the last
   464  					// known state and the object no longer exists.
   465  					k8sSvc = k8s.CopyObjToV1Services(deletedObj.Obj)
   466  					if k8sSvc == nil {
   467  						return
   468  					}
   469  				}
   470  
   471  				valid = true
   472  				serSvcs.Enqueue(func() error {
   473  					err := d.deleteK8sServiceV1(k8sSvc)
   474  					d.K8sEventProcessed(metricService, metricDelete, err == nil)
   475  					return nil
   476  				}, serializer.NoRetry)
   477  			},
   478  		},
   479  		k8s.ConvertToK8sService,
   480  	)
   481  	d.blockWaitGroupToSyncResources(wait.NeverStop, svcController, k8sAPIGroupServiceV1Core)
   482  	go svcController.Run(wait.NeverStop)
   483  	d.k8sAPIGroups.addAPI(k8sAPIGroupServiceV1Core)
   484  
   485  	_, endpointController := informer.NewInformer(
   486  		cache.NewListWatchFromClient(k8s.Client().CoreV1().RESTClient(),
   487  			"endpoints", v1.NamespaceAll,
   488  			fields.ParseSelectorOrDie(option.Config.K8sWatcherEndpointSelector),
   489  		),
   490  		&v1.Endpoints{},
   491  		0,
   492  		cache.ResourceEventHandlerFuncs{
   493  			AddFunc: func(obj interface{}) {
   494  				var valid, equal bool
   495  				defer func() { d.K8sEventReceived(metricEndpoint, metricCreate, valid, equal) }()
   496  				if k8sEP := k8s.CopyObjToV1Endpoints(obj); k8sEP != nil {
   497  					valid = true
   498  					serEps.Enqueue(func() error {
   499  						err := d.addK8sEndpointV1(k8sEP)
   500  						d.K8sEventProcessed(metricEndpoint, metricCreate, err == nil)
   501  						return nil
   502  					}, serializer.NoRetry)
   503  				}
   504  			},
   505  			UpdateFunc: func(oldObj, newObj interface{}) {
   506  				var valid, equal bool
   507  				defer func() { d.K8sEventReceived(metricEndpoint, metricUpdate, valid, equal) }()
   508  				if oldk8sEP := k8s.CopyObjToV1Endpoints(oldObj); oldk8sEP != nil {
   509  					valid = true
   510  					if newk8sEP := k8s.CopyObjToV1Endpoints(newObj); newk8sEP != nil {
   511  						if k8s.EqualV1Endpoints(oldk8sEP, newk8sEP) {
   512  							equal = true
   513  							return
   514  						}
   515  
   516  						serEps.Enqueue(func() error {
   517  							err := d.updateK8sEndpointV1(oldk8sEP, newk8sEP)
   518  							d.K8sEventProcessed(metricEndpoint, metricUpdate, err == nil)
   519  							return nil
   520  						}, serializer.NoRetry)
   521  					}
   522  				}
   523  			},
   524  			DeleteFunc: func(obj interface{}) {
   525  				var valid, equal bool
   526  				defer func() { d.K8sEventReceived(metricEndpoint, metricDelete, valid, equal) }()
   527  				k8sEP := k8s.CopyObjToV1Endpoints(obj)
   528  				if k8sEP == nil {
   529  					deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   530  					if !ok {
   531  						return
   532  					}
   533  					// Delete was not observed by the watcher but is
   534  					// removed from kube-apiserver. This is the last
   535  					// known state and the object no longer exists.
   536  					k8sEP = k8s.CopyObjToV1Endpoints(deletedObj.Obj)
   537  					if k8sEP == nil {
   538  						return
   539  					}
   540  				}
   541  				valid = true
   542  				serEps.Enqueue(func() error {
   543  					err := d.deleteK8sEndpointV1(k8sEP)
   544  					d.K8sEventProcessed(metricEndpoint, metricDelete, err == nil)
   545  					return nil
   546  				}, serializer.NoRetry)
   547  			},
   548  		},
   549  		k8s.ConvertToK8sEndpoints,
   550  	)
   551  	d.blockWaitGroupToSyncResources(wait.NeverStop, endpointController, k8sAPIGroupEndpointV1Core)
   552  	go endpointController.Run(wait.NeverStop)
   553  	d.k8sAPIGroups.addAPI(k8sAPIGroupEndpointV1Core)
   554  
   555  	if option.Config.IsLBEnabled() {
   556  		_, ingressController := informer.NewInformer(
   557  			cache.NewListWatchFromClient(k8s.Client().ExtensionsV1beta1().RESTClient(),
   558  				"ingresses", v1.NamespaceAll, fields.Everything()),
   559  			&v1beta1.Ingress{},
   560  			0,
   561  			cache.ResourceEventHandlerFuncs{
   562  				AddFunc: func(obj interface{}) {
   563  					var valid, equal bool
   564  					defer func() { d.K8sEventReceived(metricEndpoint, metricCreate, valid, equal) }()
   565  					if k8sIngress := k8s.CopyObjToV1beta1Ingress(obj); k8sIngress != nil {
   566  						valid = true
   567  						serIngresses.Enqueue(func() error {
   568  							err := d.addIngressV1beta1(k8sIngress)
   569  							d.K8sEventProcessed(metricIngress, metricCreate, err == nil)
   570  							return nil
   571  						}, serializer.NoRetry)
   572  					}
   573  				},
   574  				UpdateFunc: func(oldObj, newObj interface{}) {
   575  					var valid, equal bool
   576  					defer func() { d.K8sEventReceived(metricEndpoint, metricUpdate, valid, equal) }()
   577  					if oldk8sIngress := k8s.CopyObjToV1beta1Ingress(oldObj); oldk8sIngress != nil {
   578  						valid = true
   579  						if newk8sIngress := k8s.CopyObjToV1beta1Ingress(newObj); newk8sIngress != nil {
   580  							if k8s.EqualV1beta1Ingress(oldk8sIngress, newk8sIngress) {
   581  								equal = true
   582  								return
   583  							}
   584  
   585  							serIngresses.Enqueue(func() error {
   586  								err := d.updateIngressV1beta1(oldk8sIngress, newk8sIngress)
   587  								d.K8sEventProcessed(metricIngress, metricUpdate, err == nil)
   588  								return nil
   589  							}, serializer.NoRetry)
   590  						}
   591  					}
   592  				},
   593  				DeleteFunc: func(obj interface{}) {
   594  					var valid, equal bool
   595  					defer func() { d.K8sEventReceived(metricEndpoint, metricDelete, valid, equal) }()
   596  					k8sIngress := k8s.CopyObjToV1beta1Ingress(obj)
   597  					if k8sIngress == nil {
   598  						deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   599  						if !ok {
   600  							return
   601  						}
   602  						// Delete was not observed by the watcher but is
   603  						// removed from kube-apiserver. This is the last
   604  						// known state and the object no longer exists.
   605  						k8sIngress = k8s.CopyObjToV1beta1Ingress(deletedObj.Obj)
   606  						if k8sIngress == nil {
   607  							return
   608  						}
   609  					}
   610  					valid = true
   611  					serEps.Enqueue(func() error {
   612  						err := d.deleteIngressV1beta1(k8sIngress)
   613  						d.K8sEventProcessed(metricIngress, metricDelete, err == nil)
   614  						return nil
   615  					}, serializer.NoRetry)
   616  				},
   617  			},
   618  			k8s.ConvertToIngress,
   619  		)
   620  		d.blockWaitGroupToSyncResources(wait.NeverStop, ingressController, k8sAPIGroupIngressV1Beta1)
   621  		go ingressController.Run(wait.NeverStop)
   622  		d.k8sAPIGroups.addAPI(k8sAPIGroupIngressV1Beta1)
   623  	}
   624  
   625  	var (
   626  		cnpEventStore    cache.Store
   627  		cnpConverterFunc informer.ConvertFunc
   628  	)
   629  	cnpStore := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
   630  	switch {
   631  	case k8sversion.Capabilities().Patch:
   632  		// k8s >= 1.13 does not require a store to update CNP status so
   633  		// we don't even need to keep the status of a CNP with us.
   634  		cnpConverterFunc = k8s.ConvertToCNP
   635  	default:
   636  		cnpEventStore = cnpStore
   637  		cnpConverterFunc = k8s.ConvertToCNPWithStatus
   638  	}
   639  
   640  	ciliumV2Controller := informer.NewInformerWithStore(
   641  		cache.NewListWatchFromClient(ciliumNPClient.CiliumV2().RESTClient(),
   642  			"ciliumnetworkpolicies", v1.NamespaceAll, fields.Everything()),
   643  		&cilium_v2.CiliumNetworkPolicy{},
   644  		0,
   645  		cache.ResourceEventHandlerFuncs{
   646  			AddFunc: func(obj interface{}) {
   647  				var valid, equal bool
   648  				defer func() { d.K8sEventReceived(metricCNP, metricCreate, valid, equal) }()
   649  				if cnp := k8s.CopyObjToV2CNP(obj); cnp != nil {
   650  					valid = true
   651  					serCNPs.Enqueue(func() error {
   652  						if cnp.RequiresDerivative() {
   653  							return nil
   654  						}
   655  						err := d.addCiliumNetworkPolicyV2(ciliumNPClient, cnpEventStore, cnp)
   656  						d.K8sEventProcessed(metricCNP, metricCreate, err == nil)
   657  						return nil
   658  					}, serializer.NoRetry)
   659  				}
   660  			},
   661  			UpdateFunc: func(oldObj, newObj interface{}) {
   662  				var valid, equal bool
   663  				defer func() { d.K8sEventReceived(metricCNP, metricUpdate, valid, equal) }()
   664  				if oldCNP := k8s.CopyObjToV2CNP(oldObj); oldCNP != nil {
   665  					valid = true
   666  					if newCNP := k8s.CopyObjToV2CNP(newObj); newCNP != nil {
   667  						if k8s.EqualV2CNP(oldCNP, newCNP) {
   668  							equal = true
   669  							return
   670  						}
   671  
   672  						serCNPs.Enqueue(func() error {
   673  							if newCNP.RequiresDerivative() {
   674  								return nil
   675  							}
   676  
   677  							err := d.updateCiliumNetworkPolicyV2(ciliumNPClient, cnpEventStore, oldCNP, newCNP)
   678  							d.K8sEventProcessed(metricCNP, metricUpdate, err == nil)
   679  							return nil
   680  						}, serializer.NoRetry)
   681  					}
   682  				}
   683  			},
   684  			DeleteFunc: func(obj interface{}) {
   685  				var valid, equal bool
   686  				defer func() { d.K8sEventReceived(metricCNP, metricDelete, valid, equal) }()
   687  				cnp := k8s.CopyObjToV2CNP(obj)
   688  				if cnp == nil {
   689  					deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   690  					if !ok {
   691  						return
   692  					}
   693  					// Delete was not observed by the watcher but is
   694  					// removed from kube-apiserver. This is the last
   695  					// known state and the object no longer exists.
   696  					cnp = k8s.CopyObjToV2CNP(deletedObj.Obj)
   697  					if cnp == nil {
   698  						return
   699  					}
   700  				}
   701  				valid = true
   702  				serCNPs.Enqueue(func() error {
   703  					err := d.deleteCiliumNetworkPolicyV2(cnp)
   704  					d.K8sEventProcessed(metricCNP, metricDelete, err == nil)
   705  					return nil
   706  				}, serializer.NoRetry)
   707  			},
   708  		},
   709  		cnpConverterFunc,
   710  		cnpStore,
   711  	)
   712  	d.blockWaitGroupToSyncResources(wait.NeverStop, ciliumV2Controller, k8sAPIGroupCiliumNetworkPolicyV2)
   713  	go ciliumV2Controller.Run(wait.NeverStop)
   714  	d.k8sAPIGroups.addAPI(k8sAPIGroupCiliumNetworkPolicyV2)
   715  
   716  	asyncControllers := sync.WaitGroup{}
   717  	asyncControllers.Add(1)
   718  
   719  	// CiliumNode objects are used for node discovery until the key-value
   720  	// store is connected
   721  	go func() {
   722  		var once sync.Once
   723  		for {
   724  			_, ciliumNodeInformer := informer.NewInformer(
   725  				cache.NewListWatchFromClient(ciliumNPClient.CiliumV2().RESTClient(),
   726  					"ciliumnodes", v1.NamespaceAll, fields.Everything()),
   727  				&cilium_v2.CiliumNode{},
   728  				0,
   729  				cache.ResourceEventHandlerFuncs{
   730  					AddFunc: func(obj interface{}) {
   731  						var valid, equal bool
   732  						defer func() { d.K8sEventReceived(metricCiliumNode, metricCreate, valid, equal) }()
   733  						if ciliumNode, ok := obj.(*cilium_v2.CiliumNode); ok {
   734  							valid = true
   735  							n := node.ParseCiliumNode(ciliumNode)
   736  							if n.IsLocal() {
   737  								return
   738  							}
   739  							serNodes.Enqueue(func() error {
   740  								d.nodeDiscovery.Manager.NodeUpdated(n)
   741  								d.K8sEventProcessed(metricCiliumNode, metricCreate, true)
   742  								return nil
   743  							}, serializer.NoRetry)
   744  						}
   745  					},
   746  					UpdateFunc: func(oldObj, newObj interface{}) {
   747  						var valid, equal bool
   748  						defer func() { d.K8sEventReceived(metricCiliumNode, metricUpdate, valid, equal) }()
   749  						if ciliumNode, ok := newObj.(*cilium_v2.CiliumNode); ok {
   750  							valid = true
   751  							n := node.ParseCiliumNode(ciliumNode)
   752  							if n.IsLocal() {
   753  								return
   754  							}
   755  							serNodes.Enqueue(func() error {
   756  								d.nodeDiscovery.Manager.NodeUpdated(n)
   757  								d.K8sEventProcessed(metricCiliumNode, metricUpdate, true)
   758  								return nil
   759  							}, serializer.NoRetry)
   760  						}
   761  					},
   762  					DeleteFunc: func(obj interface{}) {
   763  						var valid, equal bool
   764  						defer func() { d.K8sEventReceived(metricCiliumNode, metricDelete, valid, equal) }()
   765  						ciliumNode := k8s.CopyObjToCiliumNode(obj)
   766  						if ciliumNode == nil {
   767  							deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   768  							if !ok {
   769  								return
   770  							}
   771  							// Delete was not observed by the watcher but is
   772  							// removed from kube-apiserver. This is the last
   773  							// known state and the object no longer exists.
   774  							ciliumNode = k8s.CopyObjToCiliumNode(deletedObj.Obj)
   775  							if ciliumNode == nil {
   776  								return
   777  							}
   778  						}
   779  						valid = true
   780  						n := node.ParseCiliumNode(ciliumNode)
   781  						serNodes.Enqueue(func() error {
   782  							d.nodeDiscovery.Manager.NodeDeleted(n)
   783  							return nil
   784  						}, serializer.NoRetry)
   785  					},
   786  				},
   787  				k8s.ConvertToCiliumNode,
   788  			)
   789  			isConnected := make(chan struct{})
   790  			// once isConnected is closed, it will stop waiting on caches to be
   791  			// synchronized.
   792  			d.blockWaitGroupToSyncResources(isConnected, ciliumNodeInformer, k8sAPIGroupCiliumNodeV2)
   793  
   794  			once.Do(func() {
   795  				// Signalize that we have put node controller in the wait group
   796  				// to sync resources.
   797  				asyncControllers.Done()
   798  			})
   799  			d.k8sAPIGroups.addAPI(k8sAPIGroupCiliumNodeV2)
   800  			go ciliumNodeInformer.Run(isConnected)
   801  
   802  			<-kvstore.Client().Connected()
   803  			close(isConnected)
   804  
   805  			log.Info("Connected to key-value store, stopping CiliumNode watcher")
   806  
   807  			d.k8sAPIGroups.removeAPI(k8sAPIGroupCiliumNodeV2)
   808  			// Create a new node controller when we are disconnected with the
   809  			// kvstore
   810  			<-kvstore.Client().Disconnected()
   811  
   812  			log.Info("Disconnected from key-value store, restarting CiliumNode watcher")
   813  		}
   814  	}()
   815  
   816  	asyncControllers.Add(1)
   817  
   818  	// CiliumEndpoint objects are used for ipcache discovery until the
   819  	// key-value store is connected
   820  	go func() {
   821  		var once sync.Once
   822  		for {
   823  			_, ciliumEndpointInformer := informer.NewInformer(
   824  				cache.NewListWatchFromClient(ciliumNPClient.CiliumV2().RESTClient(),
   825  					"ciliumendpoints", v1.NamespaceAll, fields.Everything()),
   826  				&cilium_v2.CiliumEndpoint{},
   827  				0,
   828  				cache.ResourceEventHandlerFuncs{
   829  					AddFunc: func(obj interface{}) {
   830  						var valid, equal bool
   831  						defer func() { d.K8sEventReceived(metricCiliumEndpoint, metricCreate, valid, equal) }()
   832  						if ciliumEndpoint, ok := obj.(*types.CiliumEndpoint); ok {
   833  							valid = true
   834  							endpoint := ciliumEndpoint.DeepCopy()
   835  							serCiliumEndpoints.Enqueue(func() error {
   836  								endpointUpdated(endpoint)
   837  								d.K8sEventProcessed(metricCiliumEndpoint, metricCreate, true)
   838  								return nil
   839  							}, serializer.NoRetry)
   840  						}
   841  					},
   842  					UpdateFunc: func(oldObj, newObj interface{}) {
   843  						var valid, equal bool
   844  						defer func() { d.K8sEventReceived(metricCiliumEndpoint, metricUpdate, valid, equal) }()
   845  						if ciliumEndpoint, ok := newObj.(*types.CiliumEndpoint); ok {
   846  							valid = true
   847  							endpoint := ciliumEndpoint.DeepCopy()
   848  							serCiliumEndpoints.Enqueue(func() error {
   849  								endpointUpdated(endpoint)
   850  								d.K8sEventProcessed(metricCiliumEndpoint, metricUpdate, true)
   851  								return nil
   852  							}, serializer.NoRetry)
   853  						}
   854  					},
   855  					DeleteFunc: func(obj interface{}) {
   856  						var valid, equal bool
   857  						defer func() { d.K8sEventReceived(metricCiliumEndpoint, metricDelete, valid, equal) }()
   858  						ciliumEndpoint := k8s.CopyObjToCiliumEndpoint(obj)
   859  						if ciliumEndpoint == nil {
   860  							deletedObj, ok := obj.(cache.DeletedFinalStateUnknown)
   861  							if !ok {
   862  								return
   863  							}
   864  							// Delete was not observed by the watcher but is
   865  							// removed from kube-apiserver. This is the last
   866  							// known state and the object no longer exists.
   867  							ciliumEndpoint = k8s.CopyObjToCiliumEndpoint(deletedObj.Obj)
   868  							if ciliumEndpoint == nil {
   869  								return
   870  							}
   871  						}
   872  						valid = true
   873  						serCiliumEndpoints.Enqueue(func() error {
   874  							endpointDeleted(ciliumEndpoint)
   875  							return nil
   876  						}, serializer.NoRetry)
   877  					},
   878  				},
   879  				k8s.ConvertToCiliumEndpoint,
   880  			)
   881  			isConnected := make(chan struct{})
   882  			// once isConnected is closed, it will stop waiting on caches to be
   883  			// synchronized.
   884  			d.blockWaitGroupToSyncResources(isConnected, ciliumEndpointInformer, k8sAPIGroupCiliumEndpointV2)
   885  
   886  			once.Do(func() {
   887  				// Signalize that we have put node controller in the wait group
   888  				// to sync resources.
   889  				asyncControllers.Done()
   890  			})
   891  			d.k8sAPIGroups.addAPI(k8sAPIGroupCiliumEndpointV2)
   892  			go ciliumEndpointInformer.Run(isConnected)
   893  
   894  			<-kvstore.Client().Connected()
   895  			close(isConnected)
   896  
   897  			log.Info("Connected to key-value store, stopping CiliumEndpoint watcher")
   898  
   899  			d.k8sAPIGroups.removeAPI(k8sAPIGroupCiliumEndpointV2)
   900  			// Create a new node controller when we are disconnected with the
   901  			// kvstore
   902  			<-kvstore.Client().Disconnected()
   903  
   904  			log.Info("Disconnected from key-value store, restarting CiliumEndpoint watcher")
   905  		}
   906  	}()
   907  
   908  	asyncControllers.Add(1)
   909  	go func() {
   910  		var once sync.Once
   911  		for {
   912  			createPodController := func(fieldSelector fields.Selector) cache.Controller {
   913  				_, podController := informer.NewInformer(
   914  					cache.NewListWatchFromClient(k8s.Client().CoreV1().RESTClient(),
   915  						"pods", v1.NamespaceAll, fieldSelector),
   916  					&v1.Pod{},
   917  					0,
   918  					cache.ResourceEventHandlerFuncs{
   919  						AddFunc: func(obj interface{}) {
   920  							var valid, equal bool
   921  							defer func() { d.K8sEventReceived(metricPod, metricCreate, valid, equal) }()
   922  							if pod := k8s.CopyObjToV1Pod(obj); pod != nil {
   923  								valid = true
   924  								serPods.Enqueue(func() error {
   925  									err := d.addK8sPodV1(pod)
   926  									d.K8sEventProcessed(metricPod, metricCreate, err == nil)
   927  									return nil
   928  								}, serializer.NoRetry)
   929  							}
   930  						},
   931  						UpdateFunc: func(oldObj, newObj interface{}) {
   932  							var valid, equal bool
   933  							defer func() { d.K8sEventReceived(metricPod, metricUpdate, valid, equal) }()
   934  							if oldPod := k8s.CopyObjToV1Pod(oldObj); oldPod != nil {
   935  								valid = true
   936  								if newPod := k8s.CopyObjToV1Pod(newObj); newPod != nil {
   937  									if k8s.EqualV1Pod(oldPod, newPod) {
   938  										equal = true
   939  										return
   940  									}
   941  
   942  									serPods.Enqueue(func() error {
   943  										err := d.updateK8sPodV1(oldPod, newPod)
   944  										d.K8sEventProcessed(metricPod, metricUpdate, err == nil)
   945  										return nil
   946  									}, serializer.NoRetry)
   947  								}
   948  							}
   949  						},
   950  						DeleteFunc: func(obj interface{}) {
   951  							var valid, equal bool
   952  							defer func() { d.K8sEventReceived(metricPod, metricDelete, valid, equal) }()
   953  							if pod := k8s.CopyObjToV1Pod(obj); pod != nil {
   954  								valid = true
   955  								serPods.Enqueue(func() error {
   956  									err := d.deleteK8sPodV1(pod)
   957  									d.K8sEventProcessed(metricPod, metricDelete, err == nil)
   958  									return nil
   959  								}, serializer.NoRetry)
   960  							}
   961  						},
   962  					},
   963  					k8s.ConvertToPod,
   964  				)
   965  				return podController
   966  			}
   967  			podController := createPodController(fields.Everything())
   968  
   969  			isConnected := make(chan struct{})
   970  			// once isConnected is closed, it will stop waiting on caches to be
   971  			// synchronized.
   972  			d.blockWaitGroupToSyncResources(isConnected, podController, k8sAPIGroupPodV1Core)
   973  			once.Do(func() {
   974  				asyncControllers.Done()
   975  				d.k8sAPIGroups.addAPI(k8sAPIGroupPodV1Core)
   976  			})
   977  			go podController.Run(isConnected)
   978  
   979  			if !option.Config.K8sEventHandover {
   980  				return
   981  			}
   982  
   983  			// Replace pod controller by only receiving events from our own
   984  			// node once we are connected to the kvstore.
   985  
   986  			<-kvstore.Client().Connected()
   987  			close(isConnected)
   988  
   989  			log.WithField(logfields.Node, node.GetName()).Info("Connected to KVStore, watching for pod events on node")
   990  			// Only watch for pod events for our node.
   991  			podController = createPodController(fields.ParseSelectorOrDie("spec.nodeName=" + node.GetName()))
   992  			isConnected = make(chan struct{})
   993  			go podController.Run(isConnected)
   994  
   995  			// Create a new pod controller when we are disconnected with the
   996  			// kvstore
   997  			<-kvstore.Client().Disconnected()
   998  			close(isConnected)
   999  			log.Info("Disconnected from KVStore, watching for pod events all nodes")
  1000  		}
  1001  	}()
  1002  
  1003  	_, namespaceController := informer.NewInformer(
  1004  		cache.NewListWatchFromClient(k8s.Client().CoreV1().RESTClient(),
  1005  			"namespaces", v1.NamespaceAll, fields.Everything()),
  1006  		&v1.Namespace{},
  1007  		0,
  1008  		cache.ResourceEventHandlerFuncs{
  1009  			// AddFunc does not matter since the endpoint will fetch
  1010  			// namespace labels when the endpoint is created
  1011  			// DelFunc does not matter since, when a namespace is deleted, all
  1012  			// pods belonging to that namespace are also deleted.
  1013  			UpdateFunc: func(oldObj, newObj interface{}) {
  1014  				var valid, equal bool
  1015  				defer func() { d.K8sEventReceived(metricNS, metricUpdate, valid, equal) }()
  1016  				if oldNS := k8s.CopyObjToV1Namespace(oldObj); oldNS != nil {
  1017  					valid = true
  1018  					if newNS := k8s.CopyObjToV1Namespace(newObj); newNS != nil {
  1019  						if k8s.EqualV1Namespace(oldNS, newNS) {
  1020  							equal = true
  1021  							return
  1022  						}
  1023  
  1024  						serNamespaces.Enqueue(func() error {
  1025  							err := d.updateK8sV1Namespace(oldNS, newNS)
  1026  							d.K8sEventProcessed(metricNS, metricUpdate, err == nil)
  1027  							return nil
  1028  						}, serializer.NoRetry)
  1029  					}
  1030  				}
  1031  			},
  1032  		},
  1033  		k8s.ConvertToNamespace,
  1034  	)
  1035  
  1036  	go namespaceController.Run(wait.NeverStop)
  1037  	d.k8sAPIGroups.addAPI(k8sAPIGroupNamespaceV1Core)
  1038  
  1039  	asyncControllers.Wait()
  1040  
  1041  	return nil
  1042  }
  1043  
  1044  func (d *Daemon) addK8sNetworkPolicyV1(k8sNP *types.NetworkPolicy) error {
  1045  	scopedLog := log.WithField(logfields.K8sAPIVersion, k8sNP.TypeMeta.APIVersion)
  1046  	rules, err := k8s.ParseNetworkPolicy(k8sNP.NetworkPolicy)
  1047  	if err != nil {
  1048  		scopedLog.WithError(err).WithFields(logrus.Fields{
  1049  			logfields.CiliumNetworkPolicy: logfields.Repr(k8sNP),
  1050  		}).Error("Error while parsing k8s kubernetes NetworkPolicy")
  1051  		return err
  1052  	}
  1053  	scopedLog = scopedLog.WithField(logfields.K8sNetworkPolicyName, k8sNP.ObjectMeta.Name)
  1054  
  1055  	opts := AddOptions{Replace: true, Source: metrics.LabelEventSourceK8s}
  1056  	if _, err := d.PolicyAdd(rules, &opts); err != nil {
  1057  		scopedLog.WithError(err).WithFields(logrus.Fields{
  1058  			logfields.CiliumNetworkPolicy: logfields.Repr(rules),
  1059  		}).Error("Unable to add NetworkPolicy rules to policy repository")
  1060  		return err
  1061  	}
  1062  
  1063  	scopedLog.Info("NetworkPolicy successfully added")
  1064  	return nil
  1065  }
  1066  
  1067  func (d *Daemon) updateK8sNetworkPolicyV1(oldk8sNP, newk8sNP *types.NetworkPolicy) error {
  1068  	log.WithFields(logrus.Fields{
  1069  		logfields.K8sAPIVersion:                 oldk8sNP.TypeMeta.APIVersion,
  1070  		logfields.K8sNetworkPolicyName + ".old": oldk8sNP.ObjectMeta.Name,
  1071  		logfields.K8sNamespace + ".old":         oldk8sNP.ObjectMeta.Namespace,
  1072  		logfields.K8sNetworkPolicyName:          newk8sNP.ObjectMeta.Name,
  1073  		logfields.K8sNamespace:                  newk8sNP.ObjectMeta.Namespace,
  1074  	}).Debug("Received policy update")
  1075  
  1076  	return d.addK8sNetworkPolicyV1(newk8sNP)
  1077  }
  1078  
  1079  func (d *Daemon) deleteK8sNetworkPolicyV1(k8sNP *types.NetworkPolicy) error {
  1080  	labels := k8s.GetPolicyLabelsv1(k8sNP.NetworkPolicy)
  1081  
  1082  	if labels == nil {
  1083  		log.Fatalf("provided v1 NetworkPolicy is nil, so cannot delete it")
  1084  	}
  1085  
  1086  	scopedLog := log.WithFields(logrus.Fields{
  1087  		logfields.K8sNetworkPolicyName: k8sNP.ObjectMeta.Name,
  1088  		logfields.K8sNamespace:         k8sNP.ObjectMeta.Namespace,
  1089  		logfields.K8sAPIVersion:        k8sNP.TypeMeta.APIVersion,
  1090  		logfields.Labels:               logfields.Repr(labels),
  1091  	})
  1092  	if _, err := d.PolicyDelete(labels); err != nil {
  1093  		scopedLog.WithError(err).Error("Error while deleting k8s NetworkPolicy")
  1094  		return err
  1095  	}
  1096  
  1097  	scopedLog.Info("NetworkPolicy successfully removed")
  1098  	return nil
  1099  }
  1100  
  1101  func (d *Daemon) k8sServiceHandler() {
  1102  	for {
  1103  		event, ok := <-d.k8sSvcCache.Events
  1104  		if !ok {
  1105  			return
  1106  		}
  1107  
  1108  		svc := event.Service
  1109  
  1110  		scopedLog := log.WithFields(logrus.Fields{
  1111  			logfields.K8sSvcName:   event.ID.Name,
  1112  			logfields.K8sNamespace: event.ID.Namespace,
  1113  		})
  1114  
  1115  		scopedLog.WithFields(logrus.Fields{
  1116  			"action":      event.Action.String(),
  1117  			"service":     event.Service.String(),
  1118  			"old-service": event.OldService.String(),
  1119  			"endpoints":   event.Endpoints.String(),
  1120  		}).Debug("Kubernetes service definition changed")
  1121  
  1122  		switch event.Action {
  1123  		case k8s.UpdateService, k8s.UpdateIngress:
  1124  			if err := d.addK8sSVCs(event.ID, event.OldService, svc, event.Endpoints); err != nil {
  1125  				scopedLog.WithError(err).Error("Unable to add/update service to implement k8s event")
  1126  			}
  1127  
  1128  			if !svc.IsExternal() {
  1129  				continue
  1130  			}
  1131  
  1132  			translator := k8s.NewK8sTranslator(event.ID, *event.Endpoints, false, svc.Labels, bpfIPCache.IPCache)
  1133  			result, err := d.policy.TranslateRules(translator)
  1134  			if err != nil {
  1135  				log.Errorf("Unable to repopulate egress policies from ToService rules: %v", err)
  1136  				break
  1137  			} else if result.NumToServicesRules > 0 {
  1138  				// Only trigger policy updates if ToServices rules are in effect
  1139  				d.TriggerPolicyUpdates(true, "Kubernetes service endpoint added")
  1140  			}
  1141  
  1142  		case k8s.DeleteService, k8s.DeleteIngress:
  1143  			if err := d.delK8sSVCs(event.ID, event.Service, event.Endpoints); err != nil {
  1144  				scopedLog.WithError(err).Error("Unable to delete service to implement k8s event")
  1145  			}
  1146  
  1147  			if !svc.IsExternal() {
  1148  				continue
  1149  			}
  1150  
  1151  			translator := k8s.NewK8sTranslator(event.ID, *event.Endpoints, true, svc.Labels, bpfIPCache.IPCache)
  1152  			result, err := d.policy.TranslateRules(translator)
  1153  			if err != nil {
  1154  				log.Errorf("Unable to depopulate egress policies from ToService rules: %v", err)
  1155  				break
  1156  			} else if result.NumToServicesRules > 0 {
  1157  				// Only trigger policy updates if ToServices rules are in effect
  1158  				d.TriggerPolicyUpdates(true, "Kubernetes service endpoint deleted")
  1159  			}
  1160  		}
  1161  	}
  1162  }
  1163  
  1164  func (d *Daemon) runK8sServiceHandler() {
  1165  	go d.k8sServiceHandler()
  1166  }
  1167  
  1168  func (d *Daemon) addK8sServiceV1(svc *types.Service) error {
  1169  	d.k8sSvcCache.UpdateService(svc)
  1170  	return nil
  1171  }
  1172  
  1173  func (d *Daemon) updateK8sServiceV1(oldSvc, newSvc *types.Service) error {
  1174  	return d.addK8sServiceV1(newSvc)
  1175  }
  1176  
  1177  func (d *Daemon) deleteK8sServiceV1(svc *types.Service) error {
  1178  	d.k8sSvcCache.DeleteService(svc)
  1179  	return nil
  1180  }
  1181  
  1182  func (d *Daemon) addK8sEndpointV1(ep *types.Endpoints) error {
  1183  	d.k8sSvcCache.UpdateEndpoints(ep)
  1184  	return nil
  1185  }
  1186  
  1187  func (d *Daemon) updateK8sEndpointV1(oldEP, newEP *types.Endpoints) error {
  1188  	d.k8sSvcCache.UpdateEndpoints(newEP)
  1189  	return nil
  1190  }
  1191  
  1192  func (d *Daemon) deleteK8sEndpointV1(ep *types.Endpoints) error {
  1193  	d.k8sSvcCache.DeleteEndpoints(ep)
  1194  	return nil
  1195  }
  1196  
  1197  func (d *Daemon) delK8sSVCs(svc k8s.ServiceID, svcInfo *k8s.Service, se *k8s.Endpoints) error {
  1198  	// If east-west load balancing is disabled, we should not sync(add or delete)
  1199  	// K8s service to a cilium service.
  1200  	if option.Config.DisableK8sServices {
  1201  		return nil
  1202  	}
  1203  
  1204  	// Headless services do not need any datapath implementation
  1205  	if svcInfo.IsHeadless {
  1206  		return nil
  1207  	}
  1208  
  1209  	scopedLog := log.WithFields(logrus.Fields{
  1210  		logfields.K8sSvcName:   svc.Name,
  1211  		logfields.K8sNamespace: svc.Namespace,
  1212  	})
  1213  
  1214  	repPorts := svcInfo.UniquePorts()
  1215  
  1216  	frontends := []*loadbalancer.L3n4AddrID{}
  1217  
  1218  	for portName, svcPort := range svcInfo.Ports {
  1219  		if !repPorts[svcPort.Port] {
  1220  			continue
  1221  		}
  1222  		repPorts[svcPort.Port] = false
  1223  
  1224  		fe := loadbalancer.NewL3n4AddrID(svcPort.Protocol, svcInfo.FrontendIP, svcPort.Port, loadbalancer.ID(svcPort.ID))
  1225  		frontends = append(frontends, fe)
  1226  
  1227  		for _, nodePortFE := range svcInfo.NodePorts[portName] {
  1228  			frontends = append(frontends, nodePortFE)
  1229  		}
  1230  	}
  1231  
  1232  	for _, fe := range frontends {
  1233  		if fe.ID != 0 {
  1234  			if err := service.DeleteID(uint32(fe.ID)); err != nil {
  1235  				scopedLog.WithError(err).Warn("Error while cleaning service ID")
  1236  			}
  1237  		}
  1238  
  1239  		if err := d.svcDeleteByFrontend(&fe.L3n4Addr); err != nil {
  1240  			l := scopedLog.WithError(err).WithField(logfields.Object, logfields.Repr(fe))
  1241  			msg := "Error deleting service by frontend"
  1242  			if _, ok := err.(*errSVCNotFound); ok {
  1243  				l.Info(msg)
  1244  			} else {
  1245  				l.Warn(msg)
  1246  			}
  1247  			continue
  1248  		} else {
  1249  			scopedLog.Debugf("# cilium lb delete-service %s %d 0", fe.IP, fe.Port)
  1250  		}
  1251  
  1252  		if err := d.RevNATDelete(loadbalancer.ServiceID(fe.ID)); err != nil {
  1253  			scopedLog.WithError(err).WithField(logfields.ServiceID, fe.ID).Warn("Error deleting reverse NAT")
  1254  		} else {
  1255  			scopedLog.Debugf("# cilium lb delete-rev-nat %d", fe.ID)
  1256  		}
  1257  	}
  1258  	return nil
  1259  }
  1260  
  1261  func genCartesianProduct(
  1262  	scopedLog *logrus.Entry,
  1263  	fe net.IP,
  1264  	isNodePort bool,
  1265  	ports map[loadbalancer.FEPortName]*loadbalancer.FEPort,
  1266  	bes *k8s.Endpoints,
  1267  ) []loadbalancer.LBSVC {
  1268  
  1269  	var svcs []loadbalancer.LBSVC
  1270  
  1271  	for fePortName, fePort := range ports {
  1272  
  1273  		if fePort.ID == 0 {
  1274  			feAddr := loadbalancer.NewL3n4Addr(fePort.Protocol, fe, fePort.Port)
  1275  			feAddrID, err := service.AcquireID(*feAddr, 0)
  1276  			if err != nil {
  1277  				scopedLog.WithError(err).WithFields(logrus.Fields{
  1278  					logfields.ServiceID: fePortName,
  1279  					logfields.IPAddr:    fe,
  1280  					logfields.Port:      fePort.Port,
  1281  					logfields.Protocol:  fePort.Protocol,
  1282  				}).Error("Error while getting a new service ID. Ignoring service...")
  1283  				continue
  1284  			}
  1285  			scopedLog.WithFields(logrus.Fields{
  1286  				logfields.ServiceName: fePortName,
  1287  				logfields.ServiceID:   feAddrID.ID,
  1288  				logfields.Object:      logfields.Repr(fe),
  1289  			}).Debug("Got feAddr ID for service")
  1290  			fePort.ID = loadbalancer.ServiceID(feAddrID.ID)
  1291  		}
  1292  
  1293  		var besValues []loadbalancer.LBBackEnd
  1294  		for ip, portConfiguration := range bes.Backends {
  1295  			if backendPort := portConfiguration[string(fePortName)]; backendPort != nil {
  1296  				besValues = append(besValues, loadbalancer.LBBackEnd{
  1297  					L3n4Addr: loadbalancer.L3n4Addr{
  1298  						IP:     net.ParseIP(ip),
  1299  						L4Addr: *backendPort,
  1300  					},
  1301  					Weight: 0,
  1302  				})
  1303  			}
  1304  		}
  1305  
  1306  		svcs = append(svcs,
  1307  			loadbalancer.LBSVC{
  1308  				FE: loadbalancer.L3n4AddrID{
  1309  					L3n4Addr: loadbalancer.L3n4Addr{
  1310  						IP: fe,
  1311  						L4Addr: loadbalancer.L4Addr{
  1312  							Protocol: fePort.Protocol,
  1313  							Port:     fePort.Port,
  1314  						},
  1315  					},
  1316  					ID: loadbalancer.ID(fePort.ID),
  1317  				},
  1318  				BES:      besValues,
  1319  				NodePort: isNodePort,
  1320  			})
  1321  	}
  1322  	return svcs
  1323  }
  1324  
  1325  // datapathSVCs returns all services that should be set in the datapath.
  1326  func datapathSVCs(
  1327  	scopedLog *logrus.Entry,
  1328  	svc *k8s.Service,
  1329  	endpoints *k8s.Endpoints) (svcs []loadbalancer.LBSVC) {
  1330  
  1331  	uniqPorts := svc.UniquePorts()
  1332  
  1333  	clusterIPPorts := map[loadbalancer.FEPortName]*loadbalancer.FEPort{}
  1334  	for fePortName, fePort := range svc.Ports {
  1335  		if !uniqPorts[fePort.Port] {
  1336  			continue
  1337  		}
  1338  		uniqPorts[fePort.Port] = false
  1339  		clusterIPPorts[fePortName] = fePort
  1340  	}
  1341  	if svc.FrontendIP != nil {
  1342  		dpSVC := genCartesianProduct(scopedLog, svc.FrontendIP, false, clusterIPPorts, endpoints)
  1343  		svcs = append(svcs, dpSVC...)
  1344  	}
  1345  
  1346  	for fePortName := range clusterIPPorts {
  1347  		for _, nodePortFE := range svc.NodePorts[fePortName] {
  1348  			nodePortPorts := map[loadbalancer.FEPortName]*loadbalancer.FEPort{
  1349  				fePortName: {
  1350  					L4Addr: &nodePortFE.L4Addr,
  1351  				},
  1352  			}
  1353  			dpSVC := genCartesianProduct(scopedLog, nodePortFE.IP, true, nodePortPorts, endpoints)
  1354  			svcs = append(svcs, dpSVC...)
  1355  		}
  1356  	}
  1357  	return svcs
  1358  }
  1359  
  1360  // hashSVCMap returns a mapping of all frontend's hash to the its corresponded
  1361  // value.
  1362  func hashSVCMap(svcs []loadbalancer.LBSVC) map[string]loadbalancer.L3n4Addr {
  1363  	m := map[string]loadbalancer.L3n4Addr{}
  1364  	for _, svc := range svcs {
  1365  		m[svc.FE.L3n4Addr.SHA256Sum()] = svc.FE.L3n4Addr
  1366  	}
  1367  	return m
  1368  }
  1369  
  1370  func (d *Daemon) addK8sSVCs(svcID k8s.ServiceID, oldSvc, svc *k8s.Service, endpoints *k8s.Endpoints) error {
  1371  	// If east-west load balancing is disabled, we should not sync(add or delete)
  1372  	// K8s service to a cilium service.
  1373  	if option.Config.DisableK8sServices {
  1374  		return nil
  1375  	}
  1376  
  1377  	// Headless services do not need any datapath implementation
  1378  	if svc.IsHeadless {
  1379  		return nil
  1380  	}
  1381  
  1382  	scopedLog := log.WithFields(logrus.Fields{
  1383  		logfields.K8sSvcName:   svcID.Name,
  1384  		logfields.K8sNamespace: svcID.Namespace,
  1385  	})
  1386  
  1387  	svcs := datapathSVCs(scopedLog, svc, endpoints)
  1388  	svcMap := hashSVCMap(svcs)
  1389  
  1390  	if oldSvc != nil {
  1391  		// If we have oldService then we need to detect which frontends
  1392  		// are no longer in the updated service and delete them in the datapath.
  1393  
  1394  		oldSVCs := datapathSVCs(scopedLog, oldSvc, endpoints)
  1395  		oldSVCMap := hashSVCMap(oldSVCs)
  1396  
  1397  		for svcHash, oldSvc := range oldSVCMap {
  1398  			if _, ok := svcMap[svcHash]; !ok {
  1399  				if err := d.svcDeleteByFrontend(&oldSvc); err != nil {
  1400  					scopedLog.WithError(err).WithField(logfields.Object, logfields.Repr(oldSvc)).
  1401  						Warn("Error deleting service by frontend")
  1402  				} else {
  1403  					scopedLog.Debugf("# cilium lb delete-service %s %d 0", oldSvc.IP, oldSvc.Port)
  1404  				}
  1405  			}
  1406  		}
  1407  	}
  1408  
  1409  	for _, dpSvc := range svcs {
  1410  		if _, err := d.svcAdd(dpSvc.FE, dpSvc.BES, true, dpSvc.NodePort); err != nil {
  1411  			scopedLog.WithError(err).Error("Error while inserting service in LB map")
  1412  		}
  1413  	}
  1414  	return nil
  1415  }
  1416  
  1417  func (d *Daemon) addIngressV1beta1(ingress *types.Ingress) error {
  1418  	scopedLog := log.WithFields(logrus.Fields{
  1419  		logfields.K8sIngressName: ingress.ObjectMeta.Name,
  1420  		logfields.K8sAPIVersion:  ingress.TypeMeta.APIVersion,
  1421  		logfields.K8sNamespace:   ingress.ObjectMeta.Namespace,
  1422  	})
  1423  	scopedLog.Info("Kubernetes ingress added")
  1424  
  1425  	var host net.IP
  1426  	switch {
  1427  	case option.Config.EnableIPv4:
  1428  		host = option.Config.HostV4Addr
  1429  	case option.Config.EnableIPv6:
  1430  		host = option.Config.HostV6Addr
  1431  	default:
  1432  		return fmt.Errorf("either IPv4 or IPv6 must be enabled")
  1433  	}
  1434  
  1435  	_, err := d.k8sSvcCache.UpdateIngress(ingress, host)
  1436  	if err != nil {
  1437  		return err
  1438  	}
  1439  
  1440  	hostname, _ := os.Hostname()
  1441  	dpyCopyIngress := ingress.DeepCopy()
  1442  	dpyCopyIngress.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{
  1443  		{
  1444  			IP:       host.String(),
  1445  			Hostname: hostname,
  1446  		},
  1447  	}
  1448  
  1449  	_, err = k8s.Client().ExtensionsV1beta1().Ingresses(dpyCopyIngress.ObjectMeta.Namespace).UpdateStatus(dpyCopyIngress.Ingress)
  1450  	if err != nil {
  1451  		scopedLog.WithError(err).WithFields(logrus.Fields{
  1452  			logfields.K8sIngress: dpyCopyIngress,
  1453  		}).Error("Unable to update status of ingress")
  1454  	}
  1455  	return err
  1456  }
  1457  
  1458  func (d *Daemon) updateIngressV1beta1(oldIngress, newIngress *types.Ingress) error {
  1459  	scopedLog := log.WithFields(logrus.Fields{
  1460  		logfields.K8sIngressName + ".old": oldIngress.ObjectMeta.Name,
  1461  		logfields.K8sAPIVersion + ".old":  oldIngress.TypeMeta.APIVersion,
  1462  		logfields.K8sNamespace + ".old":   oldIngress.ObjectMeta.Namespace,
  1463  		logfields.K8sIngressName:          newIngress.ObjectMeta.Name,
  1464  		logfields.K8sAPIVersion:           newIngress.TypeMeta.APIVersion,
  1465  		logfields.K8sNamespace:            newIngress.ObjectMeta.Namespace,
  1466  	})
  1467  
  1468  	if oldIngress.Spec.Backend == nil || newIngress.Spec.Backend == nil {
  1469  		// We only support Single Service Ingress for now
  1470  		scopedLog.Warn("Cilium only supports Single Service Ingress for now, ignoring ingress")
  1471  		return nil
  1472  	}
  1473  
  1474  	// Add RevNAT to the BPF Map for non-LB nodes when a LB node update the
  1475  	// ingress status with its address.
  1476  	if !option.Config.IsLBEnabled() {
  1477  		port := newIngress.Spec.Backend.ServicePort.IntValue()
  1478  		for _, lb := range newIngress.Status.LoadBalancer.Ingress {
  1479  			ingressIP := net.ParseIP(lb.IP)
  1480  			if ingressIP == nil {
  1481  				continue
  1482  			}
  1483  			feAddr := loadbalancer.NewL3n4Addr(loadbalancer.TCP, ingressIP, uint16(port))
  1484  			feAddrID, err := service.AcquireID(*feAddr, 0)
  1485  			if err != nil {
  1486  				scopedLog.WithError(err).Error("Error while getting a new service ID. Ignoring ingress...")
  1487  				continue
  1488  			}
  1489  			scopedLog.WithFields(logrus.Fields{
  1490  				logfields.ServiceID: feAddrID.ID,
  1491  			}).Debug("Got service ID for ingress")
  1492  
  1493  			if err := d.RevNATAdd(loadbalancer.ServiceID(feAddrID.ID),
  1494  				feAddrID.L3n4Addr); err != nil {
  1495  				scopedLog.WithError(err).WithFields(logrus.Fields{
  1496  					logfields.ServiceID: feAddrID.ID,
  1497  					logfields.IPAddr:    feAddrID.L3n4Addr.IP,
  1498  					logfields.Port:      feAddrID.L3n4Addr.Port,
  1499  					logfields.Protocol:  feAddrID.L3n4Addr.Protocol,
  1500  				}).Error("Unable to add reverse NAT ID for ingress")
  1501  			}
  1502  		}
  1503  		return nil
  1504  	}
  1505  
  1506  	if oldIngress.Spec.Backend.ServiceName == newIngress.Spec.Backend.ServiceName &&
  1507  		oldIngress.Spec.Backend.ServicePort == newIngress.Spec.Backend.ServicePort {
  1508  		return nil
  1509  	}
  1510  
  1511  	return d.addIngressV1beta1(newIngress)
  1512  }
  1513  
  1514  func (d *Daemon) deleteIngressV1beta1(ingress *types.Ingress) error {
  1515  	scopedLog := log.WithFields(logrus.Fields{
  1516  		logfields.K8sIngressName: ingress.ObjectMeta.Name,
  1517  		logfields.K8sAPIVersion:  ingress.TypeMeta.APIVersion,
  1518  		logfields.K8sNamespace:   ingress.ObjectMeta.Namespace,
  1519  	})
  1520  
  1521  	if ingress.Spec.Backend == nil {
  1522  		// We only support Single Service Ingress for now
  1523  		scopedLog.Warn("Cilium only supports Single Service Ingress for now, ignoring ingress deletion")
  1524  		return nil
  1525  	}
  1526  
  1527  	d.k8sSvcCache.DeleteIngress(ingress)
  1528  
  1529  	// Remove RevNAT from the BPF Map for non-LB nodes.
  1530  	if !option.Config.IsLBEnabled() {
  1531  		port := ingress.Spec.Backend.ServicePort.IntValue()
  1532  		for _, lb := range ingress.Status.LoadBalancer.Ingress {
  1533  			ingressIP := net.ParseIP(lb.IP)
  1534  			if ingressIP == nil {
  1535  				continue
  1536  			}
  1537  			feAddr := loadbalancer.NewL3n4Addr(loadbalancer.TCP, ingressIP, uint16(port))
  1538  			// This is the only way that we can get the service's ID
  1539  			// without accessing the KVStore.
  1540  			svc := d.svcGetBySHA256Sum(feAddr.SHA256Sum())
  1541  			if svc != nil {
  1542  				if err := d.RevNATDelete(loadbalancer.ServiceID(svc.FE.ID)); err != nil {
  1543  					scopedLog.WithError(err).WithFields(logrus.Fields{
  1544  						logfields.ServiceID: svc.FE.ID,
  1545  					}).Error("Error while removing RevNAT for ingress")
  1546  				}
  1547  			}
  1548  		}
  1549  		return nil
  1550  	}
  1551  
  1552  	return nil
  1553  }
  1554  
  1555  func (d *Daemon) updateCiliumNetworkPolicyV2AnnotationsOnly(ciliumNPClient clientset.Interface, ciliumV2Store cache.Store, cnp *types.SlimCNP) {
  1556  
  1557  	scopedLog := log.WithFields(logrus.Fields{
  1558  		logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name,
  1559  		logfields.K8sAPIVersion:           cnp.TypeMeta.APIVersion,
  1560  		logfields.K8sNamespace:            cnp.ObjectMeta.Namespace,
  1561  	})
  1562  
  1563  	scopedLog.Info("updating node status due to annotations-only change to CiliumNetworkPolicy")
  1564  
  1565  	ctrlName := cnp.GetControllerName()
  1566  
  1567  	// Revision will *always* be populated because importMetadataCache is guaranteed
  1568  	// to be updated by addCiliumNetworkPolicyV2 before calls to
  1569  	// updateCiliumNetworkPolicyV2 are invoked.
  1570  	meta, _ := importMetadataCache.get(cnp)
  1571  	updateContext := &k8s.CNPStatusUpdateContext{
  1572  		CiliumNPClient:              ciliumNPClient,
  1573  		CiliumV2Store:               ciliumV2Store,
  1574  		NodeName:                    node.GetName(),
  1575  		NodeManager:                 d.nodeDiscovery.Manager,
  1576  		UpdateDuration:              spanstat.Start(),
  1577  		WaitForEndpointsAtPolicyRev: endpointmanager.WaitForEndpointsAtPolicyRev,
  1578  	}
  1579  
  1580  	k8sCM.UpdateController(ctrlName,
  1581  		controller.ControllerParams{
  1582  			DoFunc: func(ctx context.Context) error {
  1583  				return updateContext.UpdateStatus(ctx, cnp, meta.revision, meta.policyImportError)
  1584  			},
  1585  		})
  1586  
  1587  }
  1588  
  1589  func (d *Daemon) addCiliumNetworkPolicyV2(ciliumNPClient clientset.Interface, ciliumV2Store cache.Store, cnp *types.SlimCNP) error {
  1590  	scopedLog := log.WithFields(logrus.Fields{
  1591  		logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name,
  1592  		logfields.K8sAPIVersion:           cnp.TypeMeta.APIVersion,
  1593  		logfields.K8sNamespace:            cnp.ObjectMeta.Namespace,
  1594  	})
  1595  
  1596  	scopedLog.Debug("Adding CiliumNetworkPolicy")
  1597  
  1598  	var rev uint64
  1599  
  1600  	rules, policyImportErr := cnp.Parse()
  1601  	if policyImportErr == nil {
  1602  		policyImportErr = k8s.PreprocessRules(rules, &d.k8sSvcCache)
  1603  		// Replace all rules with the same name, namespace and
  1604  		// resourceTypeCiliumNetworkPolicy
  1605  		rev, policyImportErr = d.PolicyAdd(rules, &AddOptions{
  1606  			ReplaceWithLabels: cnp.GetIdentityLabels(),
  1607  			Source:            metrics.LabelEventSourceK8s,
  1608  		})
  1609  	}
  1610  
  1611  	if policyImportErr != nil {
  1612  		scopedLog.WithError(policyImportErr).Warn("Unable to add CiliumNetworkPolicy")
  1613  	} else {
  1614  		scopedLog.Info("Imported CiliumNetworkPolicy")
  1615  	}
  1616  
  1617  	// Upsert to rule revision cache outside of controller, because upsertion
  1618  	// *must* be synchronous so that if we get an update for the CNP, the cache
  1619  	// is populated by the time updateCiliumNetworkPolicyV2 is invoked.
  1620  	importMetadataCache.upsert(cnp, rev, policyImportErr)
  1621  
  1622  	if !option.Config.DisableCNPStatusUpdates {
  1623  		updateContext := &k8s.CNPStatusUpdateContext{
  1624  			CiliumNPClient:              ciliumNPClient,
  1625  			CiliumV2Store:               ciliumV2Store,
  1626  			NodeName:                    node.GetName(),
  1627  			NodeManager:                 d.nodeDiscovery.Manager,
  1628  			UpdateDuration:              spanstat.Start(),
  1629  			WaitForEndpointsAtPolicyRev: endpointmanager.WaitForEndpointsAtPolicyRev,
  1630  		}
  1631  
  1632  		ctrlName := cnp.GetControllerName()
  1633  		k8sCM.UpdateController(ctrlName,
  1634  			controller.ControllerParams{
  1635  				DoFunc: func(ctx context.Context) error {
  1636  					return updateContext.UpdateStatus(ctx, cnp, rev, policyImportErr)
  1637  				},
  1638  			},
  1639  		)
  1640  	}
  1641  
  1642  	return policyImportErr
  1643  }
  1644  
  1645  func (d *Daemon) deleteCiliumNetworkPolicyV2(cnp *types.SlimCNP) error {
  1646  	scopedLog := log.WithFields(logrus.Fields{
  1647  		logfields.CiliumNetworkPolicyName: cnp.ObjectMeta.Name,
  1648  		logfields.K8sAPIVersion:           cnp.TypeMeta.APIVersion,
  1649  		logfields.K8sNamespace:            cnp.ObjectMeta.Namespace,
  1650  	})
  1651  
  1652  	scopedLog.Debug("Deleting CiliumNetworkPolicy")
  1653  
  1654  	importMetadataCache.delete(cnp)
  1655  	ctrlName := cnp.GetControllerName()
  1656  	err := k8sCM.RemoveControllerAndWait(ctrlName)
  1657  	if err != nil {
  1658  		log.Debugf("Unable to remove controller %s: %s", ctrlName, err)
  1659  	}
  1660  
  1661  	_, err = d.PolicyDelete(cnp.GetIdentityLabels())
  1662  	if err == nil {
  1663  		scopedLog.Info("Deleted CiliumNetworkPolicy")
  1664  	} else {
  1665  		scopedLog.WithError(err).Warn("Unable to delete CiliumNetworkPolicy")
  1666  	}
  1667  	return err
  1668  }
  1669  
  1670  func (d *Daemon) updateCiliumNetworkPolicyV2(ciliumNPClient clientset.Interface,
  1671  	ciliumV2Store cache.Store,
  1672  	oldRuleCpy, newRuleCpy *types.SlimCNP) error {
  1673  
  1674  	_, err := oldRuleCpy.Parse()
  1675  	if err != nil {
  1676  		log.WithError(err).WithField(logfields.Object, logfields.Repr(oldRuleCpy)).
  1677  			Warn("Error parsing old CiliumNetworkPolicy rule")
  1678  		return err
  1679  	}
  1680  	_, err = newRuleCpy.Parse()
  1681  	if err != nil {
  1682  		log.WithError(err).WithField(logfields.Object, logfields.Repr(newRuleCpy)).
  1683  			Warn("Error parsing new CiliumNetworkPolicy rule")
  1684  		return err
  1685  	}
  1686  
  1687  	log.WithFields(logrus.Fields{
  1688  		logfields.K8sAPIVersion:                    oldRuleCpy.TypeMeta.APIVersion,
  1689  		logfields.CiliumNetworkPolicyName + ".old": oldRuleCpy.ObjectMeta.Name,
  1690  		logfields.K8sNamespace + ".old":            oldRuleCpy.ObjectMeta.Namespace,
  1691  		logfields.CiliumNetworkPolicyName:          newRuleCpy.ObjectMeta.Name,
  1692  		logfields.K8sNamespace:                     newRuleCpy.ObjectMeta.Namespace,
  1693  		"annotations.old":                          oldRuleCpy.ObjectMeta.Annotations,
  1694  		"annotations":                              newRuleCpy.ObjectMeta.Annotations,
  1695  	}).Debug("Modified CiliumNetworkPolicy")
  1696  
  1697  	// Do not add rule into policy repository if the spec remains unchanged.
  1698  	if !option.Config.DisableCNPStatusUpdates {
  1699  		if oldRuleCpy.SpecEquals(newRuleCpy.CiliumNetworkPolicy) {
  1700  			if !oldRuleCpy.AnnotationsEquals(newRuleCpy.CiliumNetworkPolicy) {
  1701  
  1702  				// Update annotations within a controller so the status of the update
  1703  				// is trackable from the list of running controllers, and so we do
  1704  				// not block subsequent policy lifecycle operations from Kubernetes
  1705  				// until the update is complete.
  1706  				oldCtrlName := oldRuleCpy.GetControllerName()
  1707  				newCtrlName := newRuleCpy.GetControllerName()
  1708  
  1709  				// In case the controller name changes between copies of rules,
  1710  				// remove old controller so we do not leak goroutines.
  1711  				if oldCtrlName != newCtrlName {
  1712  					err := k8sCM.RemoveController(oldCtrlName)
  1713  					if err != nil {
  1714  						log.Debugf("Unable to remove controller %s: %s", oldCtrlName, err)
  1715  					}
  1716  				}
  1717  				d.updateCiliumNetworkPolicyV2AnnotationsOnly(ciliumNPClient, ciliumV2Store, newRuleCpy)
  1718  			}
  1719  			return nil
  1720  		}
  1721  	}
  1722  
  1723  	return d.addCiliumNetworkPolicyV2(ciliumNPClient, ciliumV2Store, newRuleCpy)
  1724  }
  1725  
  1726  func (d *Daemon) updatePodHostIP(pod *types.Pod) (bool, error) {
  1727  	if pod.SpecHostNetwork {
  1728  		return true, fmt.Errorf("pod is using host networking")
  1729  	}
  1730  
  1731  	hostIP := net.ParseIP(pod.StatusHostIP)
  1732  	if hostIP == nil {
  1733  		return true, fmt.Errorf("no/invalid HostIP: %s", pod.StatusHostIP)
  1734  	}
  1735  
  1736  	podIP := net.ParseIP(pod.StatusPodIP)
  1737  	if podIP == nil {
  1738  		return true, fmt.Errorf("no/invalid PodIP: %s", pod.StatusPodIP)
  1739  	}
  1740  
  1741  	hostKey := node.GetIPsecKeyIdentity()
  1742  
  1743  	// Initial mapping of podIP <-> hostIP <-> identity. The mapping is
  1744  	// later updated once the allocator has determined the real identity.
  1745  	// If the endpoint remains unmanaged, the identity remains untouched.
  1746  	selfOwned := ipcache.IPIdentityCache.Upsert(pod.StatusPodIP, hostIP, hostKey, ipcache.Identity{
  1747  		ID:     identity.ReservedIdentityUnmanaged,
  1748  		Source: source.Kubernetes,
  1749  	})
  1750  	if !selfOwned {
  1751  		return true, fmt.Errorf("ipcache entry owned by kvstore or agent")
  1752  	}
  1753  
  1754  	return false, nil
  1755  }
  1756  
  1757  func (d *Daemon) deletePodHostIP(pod *types.Pod) (bool, error) {
  1758  	if pod.SpecHostNetwork {
  1759  		return true, fmt.Errorf("pod is using host networking")
  1760  	}
  1761  
  1762  	podIP := net.ParseIP(pod.StatusPodIP)
  1763  	if podIP == nil {
  1764  		return true, fmt.Errorf("no/invalid PodIP: %s", pod.StatusPodIP)
  1765  	}
  1766  
  1767  	// a small race condition exists here as deletion could occur in
  1768  	// parallel based on another event but it doesn't matter as the
  1769  	// identity is going away
  1770  	id, exists := ipcache.IPIdentityCache.LookupByIP(pod.StatusPodIP)
  1771  	if !exists {
  1772  		return true, fmt.Errorf("identity for IP does not exist in case")
  1773  	}
  1774  
  1775  	if id.Source != source.Kubernetes {
  1776  		return true, fmt.Errorf("ipcache entry not owned by kubernetes source")
  1777  	}
  1778  
  1779  	ipcache.IPIdentityCache.Delete(pod.StatusPodIP, source.Kubernetes)
  1780  
  1781  	return false, nil
  1782  }
  1783  
  1784  func (d *Daemon) addK8sPodV1(pod *types.Pod) error {
  1785  	logger := log.WithFields(logrus.Fields{
  1786  		logfields.K8sPodName:   pod.ObjectMeta.Name,
  1787  		logfields.K8sNamespace: pod.ObjectMeta.Namespace,
  1788  		"podIP":                pod.StatusPodIP,
  1789  		"hostIP":               pod.StatusHostIP,
  1790  	})
  1791  
  1792  	skipped, err := d.updatePodHostIP(pod)
  1793  	switch {
  1794  	case skipped:
  1795  		logger.WithError(err).Debug("Skipped ipcache map update on pod add")
  1796  		return nil
  1797  	case err != nil:
  1798  		msg := "Unable to update ipcache map entry on pod add"
  1799  		if err == errIPCacheOwnedByNonK8s {
  1800  			logger.WithError(err).Debug(msg)
  1801  		} else {
  1802  			logger.WithError(err).Warning(msg)
  1803  		}
  1804  	default:
  1805  		logger.Debug("Updated ipcache map entry on pod add")
  1806  	}
  1807  	return err
  1808  }
  1809  
  1810  func (d *Daemon) updateK8sPodV1(oldK8sPod, newK8sPod *types.Pod) error {
  1811  	if oldK8sPod == nil || newK8sPod == nil {
  1812  		return nil
  1813  	}
  1814  
  1815  	// The pod IP can never change, it can only switch from unassigned to
  1816  	// assigned
  1817  	d.addK8sPodV1(newK8sPod)
  1818  
  1819  	// We only care about label updates
  1820  	oldPodLabels := oldK8sPod.GetLabels()
  1821  	newPodLabels := newK8sPod.GetLabels()
  1822  	if comparator.MapStringEquals(oldPodLabels, newPodLabels) {
  1823  		return nil
  1824  	}
  1825  
  1826  	podNSName := k8sUtils.GetObjNamespaceName(&newK8sPod.ObjectMeta)
  1827  
  1828  	podEP := endpointmanager.LookupPodName(podNSName)
  1829  	if podEP == nil {
  1830  		log.WithField("pod", podNSName).Debugf("Endpoint not found running for the given pod")
  1831  		return nil
  1832  	}
  1833  
  1834  	newLabels := labels.Map2Labels(newPodLabels, labels.LabelSourceK8s)
  1835  	newIdtyLabels, _ := labels.FilterLabels(newLabels)
  1836  	oldLabels := labels.Map2Labels(oldPodLabels, labels.LabelSourceK8s)
  1837  	oldIdtyLabels, _ := labels.FilterLabels(oldLabels)
  1838  
  1839  	err := podEP.ModifyIdentityLabels(newIdtyLabels, oldIdtyLabels)
  1840  	if err != nil {
  1841  		log.WithError(err).Debugf("error while updating endpoint with new labels")
  1842  		return err
  1843  	}
  1844  
  1845  	log.WithFields(logrus.Fields{
  1846  		logfields.EndpointID: podEP.GetID(),
  1847  		logfields.Labels:     logfields.Repr(newIdtyLabels),
  1848  	}).Debug("Update endpoint with new labels")
  1849  	return nil
  1850  }
  1851  
  1852  func (d *Daemon) deleteK8sPodV1(pod *types.Pod) error {
  1853  	logger := log.WithFields(logrus.Fields{
  1854  		logfields.K8sPodName:   pod.ObjectMeta.Name,
  1855  		logfields.K8sNamespace: pod.ObjectMeta.Namespace,
  1856  		"podIP":                pod.StatusPodIP,
  1857  		"hostIP":               pod.StatusHostIP,
  1858  	})
  1859  
  1860  	skipped, err := d.deletePodHostIP(pod)
  1861  	switch {
  1862  	case skipped:
  1863  		logger.WithError(err).Debug("Skipped ipcache map delete on pod delete")
  1864  	case err != nil:
  1865  		logger.WithError(err).Warning("Unable to delete ipcache map entry on pod delete")
  1866  	default:
  1867  		logger.Debug("Deleted ipcache map entry on pod delete")
  1868  	}
  1869  	return err
  1870  }
  1871  
  1872  func (d *Daemon) updateK8sV1Namespace(oldNS, newNS *types.Namespace) error {
  1873  	if oldNS == nil || newNS == nil {
  1874  		return nil
  1875  	}
  1876  
  1877  	// We only care about label updates
  1878  	if comparator.MapStringEquals(oldNS.GetLabels(), newNS.GetLabels()) {
  1879  		return nil
  1880  	}
  1881  
  1882  	oldNSLabels := map[string]string{}
  1883  	newNSLabels := map[string]string{}
  1884  
  1885  	for k, v := range oldNS.GetLabels() {
  1886  		oldNSLabels[policy.JoinPath(ciliumio.PodNamespaceMetaLabels, k)] = v
  1887  	}
  1888  	for k, v := range newNS.GetLabels() {
  1889  		newNSLabels[policy.JoinPath(ciliumio.PodNamespaceMetaLabels, k)] = v
  1890  	}
  1891  
  1892  	oldLabels := labels.Map2Labels(oldNSLabels, labels.LabelSourceK8s)
  1893  	newLabels := labels.Map2Labels(newNSLabels, labels.LabelSourceK8s)
  1894  
  1895  	oldIdtyLabels, _ := labels.FilterLabels(oldLabels)
  1896  	newIdtyLabels, _ := labels.FilterLabels(newLabels)
  1897  
  1898  	eps := endpointmanager.GetEndpoints()
  1899  	failed := false
  1900  	for _, ep := range eps {
  1901  		epNS := ep.GetK8sNamespace()
  1902  		if oldNS.Name == epNS {
  1903  			err := ep.ModifyIdentityLabels(newIdtyLabels, oldIdtyLabels)
  1904  			if err != nil {
  1905  				log.WithError(err).WithField(logfields.EndpointID, ep.ID).
  1906  					Warningf("unable to update endpoint with new namespace labels")
  1907  				failed = true
  1908  			}
  1909  		}
  1910  	}
  1911  	if failed {
  1912  		return errors.New("unable to update some endpoints with new namespace labels")
  1913  	}
  1914  	return nil
  1915  }
  1916  
  1917  // K8sEventProcessed is called to do metrics accounting for each processed
  1918  // Kubernetes event
  1919  func (d *Daemon) K8sEventProcessed(scope string, action string, status bool) {
  1920  	result := "success"
  1921  	if status == false {
  1922  		result = "failed"
  1923  	}
  1924  
  1925  	metrics.KubernetesEventProcessed.WithLabelValues(scope, action, result).Inc()
  1926  }
  1927  
  1928  func endpointUpdated(endpoint *types.CiliumEndpoint) {
  1929  	// default to the standard key
  1930  	encryptionKey := node.GetIPsecKeyIdentity()
  1931  
  1932  	id := identity.ReservedIdentityUnmanaged
  1933  	if endpoint.Identity != nil {
  1934  		id = identity.NumericIdentity(endpoint.Identity.ID)
  1935  	}
  1936  
  1937  	if endpoint.Encryption != nil {
  1938  		encryptionKey = uint8(endpoint.Encryption.Key)
  1939  	}
  1940  
  1941  	if endpoint.Networking != nil {
  1942  		if endpoint.Networking.NodeIP == "" {
  1943  			// When upgrading from an older version, the nodeIP may
  1944  			// not be available yet in the CiliumEndpoint and we
  1945  			// have to wait for it to be propagated
  1946  			return
  1947  		}
  1948  
  1949  		nodeIP := net.ParseIP(endpoint.Networking.NodeIP)
  1950  		if nodeIP == nil {
  1951  			log.WithField("nodeIP", endpoint.Networking.NodeIP).Warning("Unable to parse node IP while processing CiliumEndpoint update")
  1952  			return
  1953  		}
  1954  
  1955  		for _, pair := range endpoint.Networking.Addressing {
  1956  			if pair.IPV4 != "" {
  1957  				ipcache.IPIdentityCache.Upsert(pair.IPV4, nodeIP, encryptionKey,
  1958  					ipcache.Identity{ID: id, Source: source.CustomResource})
  1959  			}
  1960  
  1961  			if pair.IPV6 != "" {
  1962  				ipcache.IPIdentityCache.Upsert(pair.IPV6, nodeIP, encryptionKey,
  1963  					ipcache.Identity{ID: id, Source: source.CustomResource})
  1964  			}
  1965  		}
  1966  	}
  1967  }
  1968  
  1969  func endpointDeleted(endpoint *types.CiliumEndpoint) {
  1970  	if endpoint.Networking != nil {
  1971  		for _, pair := range endpoint.Networking.Addressing {
  1972  			if pair.IPV4 != "" {
  1973  				ipcache.IPIdentityCache.Delete(pair.IPV4, source.CustomResource)
  1974  			}
  1975  
  1976  			if pair.IPV6 != "" {
  1977  				ipcache.IPIdentityCache.Delete(pair.IPV6, source.CustomResource)
  1978  			}
  1979  		}
  1980  	}
  1981  }