istio.io/istio@v0.0.0-20240520182934-d79c90f27776/pilot/pkg/serviceregistry/kube/controller/controller.go (about)

     1  // Copyright Istio Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package controller
    16  
    17  import (
    18  	"fmt"
    19  	"sort"
    20  	"sync"
    21  	"time"
    22  
    23  	"github.com/hashicorp/go-multierror"
    24  	"go.uber.org/atomic"
    25  	v1 "k8s.io/api/core/v1"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	klabels "k8s.io/apimachinery/pkg/labels"
    28  	"k8s.io/apimachinery/pkg/types"
    29  
    30  	"istio.io/api/label"
    31  	"istio.io/istio/pilot/pkg/features"
    32  	"istio.io/istio/pilot/pkg/model"
    33  	"istio.io/istio/pilot/pkg/serviceregistry"
    34  	"istio.io/istio/pilot/pkg/serviceregistry/aggregate"
    35  	"istio.io/istio/pilot/pkg/serviceregistry/kube"
    36  	"istio.io/istio/pilot/pkg/serviceregistry/kube/controller/ambient"
    37  	"istio.io/istio/pilot/pkg/serviceregistry/provider"
    38  	labelutil "istio.io/istio/pilot/pkg/serviceregistry/util/label"
    39  	"istio.io/istio/pilot/pkg/serviceregistry/util/workloadinstances"
    40  	"istio.io/istio/pkg/cluster"
    41  	"istio.io/istio/pkg/config"
    42  	"istio.io/istio/pkg/config/host"
    43  	"istio.io/istio/pkg/config/labels"
    44  	"istio.io/istio/pkg/config/mesh"
    45  	"istio.io/istio/pkg/config/protocol"
    46  	"istio.io/istio/pkg/config/schema/kind"
    47  	"istio.io/istio/pkg/config/visibility"
    48  	kubelib "istio.io/istio/pkg/kube"
    49  	"istio.io/istio/pkg/kube/controllers"
    50  	"istio.io/istio/pkg/kube/kclient"
    51  	istiolog "istio.io/istio/pkg/log"
    52  	"istio.io/istio/pkg/maps"
    53  	"istio.io/istio/pkg/monitoring"
    54  	"istio.io/istio/pkg/network"
    55  	"istio.io/istio/pkg/ptr"
    56  	"istio.io/istio/pkg/queue"
    57  	"istio.io/istio/pkg/slices"
    58  	"istio.io/istio/pkg/util/sets"
    59  )
    60  
    61  const (
    62  	// NodeRegionLabel is the well-known label for kubernetes node region in beta
    63  	NodeRegionLabel = v1.LabelFailureDomainBetaRegion
    64  	// NodeZoneLabel is the well-known label for kubernetes node zone in beta
    65  	NodeZoneLabel = v1.LabelFailureDomainBetaZone
    66  	// NodeRegionLabelGA is the well-known label for kubernetes node region in ga
    67  	NodeRegionLabelGA = v1.LabelTopologyRegion
    68  	// NodeZoneLabelGA is the well-known label for kubernetes node zone in ga
    69  	NodeZoneLabelGA = v1.LabelTopologyZone
    70  
    71  	// DefaultNetworkGatewayPort is the port used by default for cross-network traffic if not otherwise specified
    72  	// by meshNetworks or "networking.istio.io/gatewayPort"
    73  	DefaultNetworkGatewayPort = 15443
    74  )
    75  
    76  var log = istiolog.RegisterScope("kube", "kubernetes service registry controller")
    77  
    78  var (
    79  	typeTag  = monitoring.CreateLabel("type")
    80  	eventTag = monitoring.CreateLabel("event")
    81  
    82  	k8sEvents = monitoring.NewSum(
    83  		"pilot_k8s_reg_events",
    84  		"Events from k8s registry.",
    85  	)
    86  
    87  	// nolint: gocritic
    88  	// This is deprecated in favor of `pilot_k8s_endpoints_pending_pod`, which is a gauge indicating the number of
    89  	// currently missing pods. This helps distinguish transient errors from permanent ones
    90  	endpointsWithNoPods = monitoring.NewSum(
    91  		"pilot_k8s_endpoints_with_no_pods",
    92  		"Endpoints that does not have any corresponding pods.")
    93  
    94  	endpointsPendingPodUpdate = monitoring.NewGauge(
    95  		"pilot_k8s_endpoints_pending_pod",
    96  		"Number of endpoints that do not currently have any corresponding pods.",
    97  	)
    98  )
    99  
   100  // Options stores the configurable attributes of a Controller.
   101  type Options struct {
   102  	SystemNamespace string
   103  
   104  	// MeshServiceController is a mesh-wide service Controller.
   105  	MeshServiceController *aggregate.Controller
   106  
   107  	DomainSuffix string
   108  
   109  	// ClusterID identifies the cluster which the controller communicate with.
   110  	ClusterID cluster.ID
   111  
   112  	// ClusterAliases are alias names for cluster. When a proxy connects with a cluster ID
   113  	// and if it has a different alias we should use that a cluster ID for proxy.
   114  	ClusterAliases map[string]string
   115  
   116  	// Metrics for capturing node-based metrics.
   117  	Metrics model.Metrics
   118  
   119  	// XDSUpdater will push changes to the xDS server.
   120  	XDSUpdater model.XDSUpdater
   121  
   122  	// MeshNetworksWatcher observes changes to the mesh networks config.
   123  	MeshNetworksWatcher mesh.NetworksWatcher
   124  
   125  	// MeshWatcher observes changes to the mesh config
   126  	MeshWatcher mesh.Watcher
   127  
   128  	// Maximum QPS when communicating with kubernetes API
   129  	KubernetesAPIQPS float32
   130  
   131  	// Maximum burst for throttle when communicating with the kubernetes API
   132  	KubernetesAPIBurst int
   133  
   134  	// SyncTimeout, if set, causes HasSynced to be returned when timeout.
   135  	SyncTimeout time.Duration
   136  
   137  	// Revision of this Istiod instance
   138  	Revision string
   139  
   140  	ConfigCluster bool
   141  
   142  	CniNamespace string
   143  }
   144  
   145  // kubernetesNode represents a kubernetes node that is reachable externally
   146  type kubernetesNode struct {
   147  	address string
   148  	labels  labels.Instance
   149  }
   150  
   151  // controllerInterface is a simplified interface for the Controller used for testing.
   152  type controllerInterface interface {
   153  	getPodLocality(pod *v1.Pod) string
   154  	Network(endpointIP string, labels labels.Instance) network.ID
   155  	Cluster() cluster.ID
   156  }
   157  
   158  var (
   159  	_ controllerInterface      = &Controller{}
   160  	_ serviceregistry.Instance = &Controller{}
   161  )
   162  
   163  type ambientIndex = ambient.Index
   164  
   165  // Controller is a collection of synchronized resource watchers
   166  // Caches are thread-safe
   167  type Controller struct {
   168  	opts Options
   169  
   170  	client kubelib.Client
   171  
   172  	queue queue.Instance
   173  
   174  	namespaces kclient.Client[*v1.Namespace]
   175  	services   kclient.Client[*v1.Service]
   176  
   177  	endpoints *endpointSliceController
   178  
   179  	// Used to watch node accessible from remote cluster.
   180  	// In multi-cluster(shared control plane multi-networks) scenario, ingress gateway service can be of nodePort type.
   181  	// With this, we can populate mesh's gateway address with the node ips.
   182  	nodes kclient.Client[*v1.Node]
   183  
   184  	exports serviceExportCache
   185  	imports serviceImportCache
   186  	pods    *PodCache
   187  
   188  	crdHandlers                []func(name string)
   189  	handlers                   model.ControllerHandlers
   190  	namespaceDiscoveryHandlers []func(ns string, event model.Event)
   191  
   192  	// This is only used for test
   193  	stop chan struct{}
   194  
   195  	sync.RWMutex
   196  	// servicesMap stores hostname ==> service, it is used to reduce convertService calls.
   197  	servicesMap map[host.Name]*model.Service
   198  	// nodeSelectorsForServices stores hostname => label selectors that can be used to
   199  	// refine the set of node port IPs for a service.
   200  	nodeSelectorsForServices map[host.Name]labels.Instance
   201  	// map of node name and its address+labels - this is the only thing we need from nodes
   202  	// for vm to k8s or cross cluster. When node port services select specific nodes by labels,
   203  	// we run through the label selectors here to pick only ones that we need.
   204  	// Only nodes with ExternalIP addresses are included in this map !
   205  	nodeInfoMap map[string]kubernetesNode
   206  	// index over workload instances from workload entries
   207  	workloadInstancesIndex workloadinstances.Index
   208  
   209  	*networkManager
   210  
   211  	ambientIndex
   212  
   213  	// initialSyncTimedout is set to true after performing an initial processing timed out.
   214  	initialSyncTimedout *atomic.Bool
   215  	meshWatcher         mesh.Watcher
   216  
   217  	podsClient kclient.Client[*v1.Pod]
   218  
   219  	configCluster bool
   220  
   221  	networksHandlerRegistration *mesh.WatcherHandlerRegistration
   222  	meshHandlerRegistration     *mesh.WatcherHandlerRegistration
   223  }
   224  
   225  // NewController creates a new Kubernetes controller
   226  // Created by bootstrap and multicluster (see multicluster.Controller).
   227  func NewController(kubeClient kubelib.Client, options Options) *Controller {
   228  	c := &Controller{
   229  		opts:                     options,
   230  		client:                   kubeClient,
   231  		queue:                    queue.NewQueueWithID(1*time.Second, string(options.ClusterID)),
   232  		servicesMap:              make(map[host.Name]*model.Service),
   233  		nodeSelectorsForServices: make(map[host.Name]labels.Instance),
   234  		nodeInfoMap:              make(map[string]kubernetesNode),
   235  		workloadInstancesIndex:   workloadinstances.NewIndex(),
   236  		initialSyncTimedout:      atomic.NewBool(false),
   237  
   238  		configCluster: options.ConfigCluster,
   239  	}
   240  	c.networkManager = initNetworkManager(c, options)
   241  
   242  	c.namespaces = kclient.NewFiltered[*v1.Namespace](kubeClient, kclient.Filter{ObjectFilter: kubeClient.ObjectFilter()})
   243  
   244  	if c.opts.SystemNamespace != "" {
   245  		registerHandlers[*v1.Namespace](
   246  			c,
   247  			c.namespaces,
   248  			"Namespaces",
   249  			func(old *v1.Namespace, cur *v1.Namespace, event model.Event) error {
   250  				if cur.Name == c.opts.SystemNamespace {
   251  					return c.onSystemNamespaceEvent(old, cur, event)
   252  				}
   253  				return nil
   254  			},
   255  			nil,
   256  		)
   257  	}
   258  
   259  	c.services = kclient.NewFiltered[*v1.Service](kubeClient, kclient.Filter{ObjectFilter: kubeClient.ObjectFilter()})
   260  
   261  	registerHandlers[*v1.Service](c, c.services, "Services", c.onServiceEvent, nil)
   262  
   263  	c.endpoints = newEndpointSliceController(c)
   264  
   265  	// This is for getting the node IPs of a selected set of nodes
   266  	c.nodes = kclient.NewFiltered[*v1.Node](kubeClient, kclient.Filter{ObjectTransform: kubelib.StripNodeUnusedFields})
   267  	registerHandlers[*v1.Node](c, c.nodes, "Nodes", c.onNodeEvent, nil)
   268  
   269  	c.podsClient = kclient.NewFiltered[*v1.Pod](kubeClient, kclient.Filter{
   270  		ObjectFilter:    kubeClient.ObjectFilter(),
   271  		ObjectTransform: kubelib.StripPodUnusedFields,
   272  	})
   273  	c.pods = newPodCache(c, c.podsClient, func(key types.NamespacedName) {
   274  		c.queue.Push(func() error {
   275  			return c.endpoints.podArrived(key.Name, key.Namespace)
   276  		})
   277  	})
   278  	registerHandlers[*v1.Pod](c, c.podsClient, "Pods", c.pods.onEvent, c.pods.labelFilter)
   279  
   280  	if features.EnableAmbient {
   281  		c.ambientIndex = ambient.New(ambient.Options{
   282  			Client:          kubeClient,
   283  			SystemNamespace: options.SystemNamespace,
   284  			DomainSuffix:    options.DomainSuffix,
   285  			ClusterID:       options.ClusterID,
   286  			Revision:        options.Revision,
   287  			XDSUpdater:      options.XDSUpdater,
   288  			LookupNetwork:   c.Network,
   289  		})
   290  	}
   291  	c.exports = newServiceExportCache(c)
   292  	c.imports = newServiceImportCache(c)
   293  
   294  	c.meshWatcher = options.MeshWatcher
   295  	if c.opts.MeshNetworksWatcher != nil {
   296  		c.networksHandlerRegistration = c.opts.MeshNetworksWatcher.AddNetworksHandler(func() {
   297  			c.reloadMeshNetworks()
   298  			c.onNetworkChange()
   299  		})
   300  		c.reloadMeshNetworks()
   301  	}
   302  	return c
   303  }
   304  
   305  func (c *Controller) Provider() provider.ID {
   306  	return provider.Kubernetes
   307  }
   308  
   309  func (c *Controller) Cluster() cluster.ID {
   310  	return c.opts.ClusterID
   311  }
   312  
   313  func (c *Controller) MCSServices() []model.MCSServiceInfo {
   314  	outMap := make(map[types.NamespacedName]model.MCSServiceInfo)
   315  
   316  	// Add the ServiceExport info.
   317  	for _, se := range c.exports.ExportedServices() {
   318  		mcsService := outMap[se.namespacedName]
   319  		mcsService.Cluster = c.Cluster()
   320  		mcsService.Name = se.namespacedName.Name
   321  		mcsService.Namespace = se.namespacedName.Namespace
   322  		mcsService.Exported = true
   323  		mcsService.Discoverability = se.discoverability
   324  		outMap[se.namespacedName] = mcsService
   325  	}
   326  
   327  	// Add the ServiceImport info.
   328  	for _, si := range c.imports.ImportedServices() {
   329  		mcsService := outMap[si.namespacedName]
   330  		mcsService.Cluster = c.Cluster()
   331  		mcsService.Name = si.namespacedName.Name
   332  		mcsService.Namespace = si.namespacedName.Namespace
   333  		mcsService.Imported = true
   334  		mcsService.ClusterSetVIP = si.clusterSetVIP
   335  		outMap[si.namespacedName] = mcsService
   336  	}
   337  
   338  	return maps.Values(outMap)
   339  }
   340  
   341  func (c *Controller) Network(endpointIP string, labels labels.Instance) network.ID {
   342  	// 1. check the pod/workloadEntry label
   343  	if nw := labels[label.TopologyNetwork.Name]; nw != "" {
   344  		return network.ID(nw)
   345  	}
   346  
   347  	// 2. check the system namespace labels
   348  	if nw := c.networkFromSystemNamespace(); nw != "" {
   349  		return nw
   350  	}
   351  
   352  	// 3. check the meshNetworks config
   353  	if nw := c.networkFromMeshNetworks(endpointIP); nw != "" {
   354  		return nw
   355  	}
   356  
   357  	return ""
   358  }
   359  
   360  func (c *Controller) Cleanup() error {
   361  	if err := queue.WaitForClose(c.queue, 30*time.Second); err != nil {
   362  		log.Warnf("queue for removed kube registry %q may not be done processing: %v", c.Cluster(), err)
   363  	}
   364  	if c.opts.XDSUpdater != nil {
   365  		c.opts.XDSUpdater.RemoveShard(model.ShardKeyFromRegistry(c))
   366  	}
   367  
   368  	// Unregister networks handler
   369  	if c.networksHandlerRegistration != nil {
   370  		c.opts.MeshNetworksWatcher.DeleteNetworksHandler(c.networksHandlerRegistration)
   371  	}
   372  
   373  	// Unregister mesh handler
   374  	if c.meshHandlerRegistration != nil {
   375  		c.opts.MeshWatcher.DeleteMeshHandler(c.meshHandlerRegistration)
   376  	}
   377  
   378  	return nil
   379  }
   380  
   381  func (c *Controller) onServiceEvent(pre, curr *v1.Service, event model.Event) error {
   382  	log.Debugf("Handle event %s for service %s in namespace %s", event, curr.Name, curr.Namespace)
   383  
   384  	// Create the standard (cluster.local) service.
   385  	svcConv := kube.ConvertService(*curr, c.opts.DomainSuffix, c.Cluster())
   386  
   387  	switch event {
   388  	case model.EventDelete:
   389  		c.deleteService(svcConv)
   390  	default:
   391  		c.addOrUpdateService(pre, curr, svcConv, event, false)
   392  	}
   393  
   394  	return nil
   395  }
   396  
   397  func (c *Controller) deleteService(svc *model.Service) {
   398  	c.Lock()
   399  	delete(c.servicesMap, svc.Hostname)
   400  	delete(c.nodeSelectorsForServices, svc.Hostname)
   401  	_, isNetworkGateway := c.networkGatewaysBySvc[svc.Hostname]
   402  	delete(c.networkGatewaysBySvc, svc.Hostname)
   403  	c.Unlock()
   404  
   405  	if isNetworkGateway {
   406  		c.NotifyGatewayHandlers()
   407  		// TODO trigger push via handler
   408  		// networks are different, we need to update all eds endpoints
   409  		c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{Full: true, Reason: model.NewReasonStats(model.NetworksTrigger)})
   410  	}
   411  
   412  	shard := model.ShardKeyFromRegistry(c)
   413  	event := model.EventDelete
   414  	c.opts.XDSUpdater.SvcUpdate(shard, string(svc.Hostname), svc.Attributes.Namespace, event)
   415  
   416  	c.handlers.NotifyServiceHandlers(nil, svc, event)
   417  }
   418  
   419  // recomputeServiceForPod is called when a pod changes and service endpoints need to be recomputed.
   420  // Most of Pod is immutable, so once it has been created we are ok to cache the internal representation.
   421  // However, a few fields (labels) are mutable. When these change, we call recomputeServiceForPod and rebuild the cache
   422  // for all service's the pod is a part of and push an update.
   423  func (c *Controller) recomputeServiceForPod(pod *v1.Pod) {
   424  	allServices := c.services.List(pod.Namespace, klabels.Everything())
   425  	cu := sets.New[model.ConfigKey]()
   426  	services := getPodServices(allServices, pod)
   427  	for _, svc := range services {
   428  		hostname := kube.ServiceHostname(svc.Name, svc.Namespace, c.opts.DomainSuffix)
   429  		c.Lock()
   430  		conv, f := c.servicesMap[hostname]
   431  		c.Unlock()
   432  		if !f {
   433  			return
   434  		}
   435  		shard := model.ShardKeyFromRegistry(c)
   436  		endpoints := c.buildEndpointsForService(conv, true)
   437  		if len(endpoints) > 0 {
   438  			c.opts.XDSUpdater.EDSCacheUpdate(shard, string(hostname), svc.Namespace, endpoints)
   439  		}
   440  		cu.Insert(model.ConfigKey{
   441  			Kind:      kind.ServiceEntry,
   442  			Name:      string(hostname),
   443  			Namespace: svc.Namespace,
   444  		})
   445  	}
   446  	if len(cu) > 0 {
   447  		c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
   448  			Full:           false,
   449  			ConfigsUpdated: cu,
   450  			Reason:         model.NewReasonStats(model.EndpointUpdate),
   451  		})
   452  	}
   453  }
   454  
   455  func (c *Controller) addOrUpdateService(pre, curr *v1.Service, currConv *model.Service, event model.Event, updateEDSCache bool) {
   456  	needsFullPush := false
   457  	// First, process nodePort gateway service, whose externalIPs specified
   458  	// and loadbalancer gateway service
   459  	if currConv.Attributes.ClusterExternalAddresses.Len() > 0 {
   460  		needsFullPush = c.extractGatewaysFromService(currConv)
   461  	} else if isNodePortGatewayService(curr) {
   462  		// We need to know which services are using node selectors because during node events,
   463  		// we have to update all the node port services accordingly.
   464  		nodeSelector := getNodeSelectorsForService(curr)
   465  		c.Lock()
   466  		// only add when it is nodePort gateway service
   467  		c.nodeSelectorsForServices[currConv.Hostname] = nodeSelector
   468  		c.Unlock()
   469  		needsFullPush = c.updateServiceNodePortAddresses(currConv)
   470  	}
   471  
   472  	// For ExternalName, we need to update the EndpointIndex, as we will store endpoints just based on the Service.
   473  	if !features.EnableExternalNameAlias && curr != nil && curr.Spec.Type == v1.ServiceTypeExternalName {
   474  		updateEDSCache = true
   475  	}
   476  
   477  	c.Lock()
   478  	prevConv := c.servicesMap[currConv.Hostname]
   479  	c.servicesMap[currConv.Hostname] = currConv
   480  	c.Unlock()
   481  	// This full push needed to update ALL ends endpoints, even though we do a full push on service add/update
   482  	// as that full push is only triggered for the specific service.
   483  	if needsFullPush {
   484  		// networks are different, we need to update all eds endpoints
   485  		c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{Full: true, Reason: model.NewReasonStats(model.NetworksTrigger)})
   486  	}
   487  
   488  	shard := model.ShardKeyFromRegistry(c)
   489  	ns := currConv.Attributes.Namespace
   490  	// We also need to update when the Service changes. For Kubernetes, a service change will result in Endpoint updates,
   491  	// but workload entries will also need to be updated.
   492  	// TODO(nmittler): Build different sets of endpoints for cluster.local and clusterset.local.
   493  	if updateEDSCache || features.EnableK8SServiceSelectWorkloadEntries {
   494  		endpoints := c.buildEndpointsForService(currConv, updateEDSCache)
   495  		if len(endpoints) > 0 {
   496  			c.opts.XDSUpdater.EDSCacheUpdate(shard, string(currConv.Hostname), ns, endpoints)
   497  		}
   498  	}
   499  
   500  	// filter out same service event
   501  	if event == model.EventUpdate && !serviceUpdateNeedsPush(pre, curr, prevConv, currConv) {
   502  		return
   503  	}
   504  
   505  	c.opts.XDSUpdater.SvcUpdate(shard, string(currConv.Hostname), ns, event)
   506  	c.handlers.NotifyServiceHandlers(prevConv, currConv, event)
   507  }
   508  
   509  func (c *Controller) buildEndpointsForService(svc *model.Service, updateCache bool) []*model.IstioEndpoint {
   510  	endpoints := c.endpoints.buildIstioEndpointsWithService(svc.Attributes.Name, svc.Attributes.Namespace, svc.Hostname, updateCache)
   511  	if features.EnableK8SServiceSelectWorkloadEntries {
   512  		fep := c.collectWorkloadInstanceEndpoints(svc)
   513  		endpoints = append(endpoints, fep...)
   514  	}
   515  	if !features.EnableExternalNameAlias {
   516  		endpoints = append(endpoints, kube.ExternalNameEndpoints(svc)...)
   517  	}
   518  	return endpoints
   519  }
   520  
   521  func (c *Controller) onNodeEvent(_, node *v1.Node, event model.Event) error {
   522  	var updatedNeeded bool
   523  	if event == model.EventDelete {
   524  		updatedNeeded = true
   525  		c.Lock()
   526  		delete(c.nodeInfoMap, node.Name)
   527  		c.Unlock()
   528  	} else {
   529  		k8sNode := kubernetesNode{labels: node.Labels}
   530  		for _, address := range node.Status.Addresses {
   531  			if address.Type == v1.NodeExternalIP && address.Address != "" {
   532  				k8sNode.address = address.Address
   533  				break
   534  			}
   535  		}
   536  		if k8sNode.address == "" {
   537  			return nil
   538  		}
   539  
   540  		c.Lock()
   541  		// check if the node exists as this add event could be due to controller resync
   542  		// if the stored object changes, then fire an update event. Otherwise, ignore this event.
   543  		currentNode, exists := c.nodeInfoMap[node.Name]
   544  		if !exists || !nodeEquals(currentNode, k8sNode) {
   545  			c.nodeInfoMap[node.Name] = k8sNode
   546  			updatedNeeded = true
   547  		}
   548  		c.Unlock()
   549  	}
   550  
   551  	// update all related services
   552  	if updatedNeeded && c.updateServiceNodePortAddresses() {
   553  		c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
   554  			Full:   true,
   555  			Reason: model.NewReasonStats(model.ServiceUpdate),
   556  		})
   557  	}
   558  	return nil
   559  }
   560  
   561  // FilterOutFunc func for filtering out objects during update callback
   562  type FilterOutFunc[T controllers.Object] func(old, cur T) bool
   563  
   564  // registerHandlers registers a handler for a given informer
   565  // Note: `otype` is used for metric, if empty, no metric will be reported
   566  func registerHandlers[T controllers.ComparableObject](c *Controller,
   567  	informer kclient.Informer[T], otype string,
   568  	handler func(T, T, model.Event) error, filter FilterOutFunc[T],
   569  ) {
   570  	wrappedHandler := func(prev, curr T, event model.Event) error {
   571  		curr = informer.Get(curr.GetName(), curr.GetNamespace())
   572  		if controllers.IsNil(curr) {
   573  			// this can happen when an immediate delete after update
   574  			// the delete event can be handled later
   575  			return nil
   576  		}
   577  		return handler(prev, curr, event)
   578  	}
   579  	// Pre-build our metric types to avoid recompute them on each event
   580  	adds := k8sEvents.With(typeTag.Value(otype), eventTag.Value("add"))
   581  	updatesames := k8sEvents.With(typeTag.Value(otype), eventTag.Value("updatesame"))
   582  	updates := k8sEvents.With(typeTag.Value(otype), eventTag.Value("update"))
   583  	deletes := k8sEvents.With(typeTag.Value(otype), eventTag.Value("delete"))
   584  
   585  	informer.AddEventHandler(
   586  		controllers.EventHandler[T]{
   587  			AddFunc: func(obj T) {
   588  				adds.Increment()
   589  				c.queue.Push(func() error {
   590  					return wrappedHandler(ptr.Empty[T](), obj, model.EventAdd)
   591  				})
   592  			},
   593  			UpdateFunc: func(old, cur T) {
   594  				if filter != nil {
   595  					if filter(old, cur) {
   596  						updatesames.Increment()
   597  						return
   598  					}
   599  				}
   600  				updates.Increment()
   601  				c.queue.Push(func() error {
   602  					return wrappedHandler(old, cur, model.EventUpdate)
   603  				})
   604  			},
   605  			DeleteFunc: func(obj T) {
   606  				deletes.Increment()
   607  				c.queue.Push(func() error {
   608  					return handler(ptr.Empty[T](), obj, model.EventDelete)
   609  				})
   610  			},
   611  		})
   612  }
   613  
   614  // HasSynced returns true after the initial state synchronization
   615  func (c *Controller) HasSynced() bool {
   616  	return c.queue.HasSynced() || c.initialSyncTimedout.Load()
   617  }
   618  
   619  func (c *Controller) informersSynced() bool {
   620  	if c.ambientIndex != nil && !c.ambientIndex.HasSynced() {
   621  		return false
   622  	}
   623  	return c.namespaces.HasSynced() &&
   624  		c.services.HasSynced() &&
   625  		c.endpoints.slices.HasSynced() &&
   626  		c.pods.pods.HasSynced() &&
   627  		c.nodes.HasSynced() &&
   628  		c.imports.HasSynced() &&
   629  		c.exports.HasSynced() &&
   630  		c.networkManager.HasSynced()
   631  }
   632  
   633  func (c *Controller) syncPods() error {
   634  	var err *multierror.Error
   635  	pods := c.podsClient.List(metav1.NamespaceAll, klabels.Everything())
   636  	log.Debugf("initializing %d pods", len(pods))
   637  	for _, s := range pods {
   638  		err = multierror.Append(err, c.pods.onEvent(nil, s, model.EventAdd))
   639  	}
   640  	return err.ErrorOrNil()
   641  }
   642  
   643  // Run all controllers until a signal is received
   644  func (c *Controller) Run(stop <-chan struct{}) {
   645  	if c.opts.SyncTimeout != 0 {
   646  		time.AfterFunc(c.opts.SyncTimeout, func() {
   647  			if !c.queue.HasSynced() {
   648  				log.Warnf("kube controller for %s initial sync timed out", c.opts.ClusterID)
   649  				c.initialSyncTimedout.Store(true)
   650  			}
   651  		})
   652  	}
   653  	st := time.Now()
   654  
   655  	go c.imports.Run(stop)
   656  	go c.exports.Run(stop)
   657  	kubelib.WaitForCacheSync("kube controller", stop, c.informersSynced)
   658  	log.Infof("kube controller for %s synced after %v", c.opts.ClusterID, time.Since(st))
   659  	// after the in-order sync we can start processing the queue
   660  	c.queue.Run(stop)
   661  	log.Infof("Controller terminated")
   662  }
   663  
   664  // Stop the controller. Only for tests, to simplify the code (defer c.Stop())
   665  func (c *Controller) Stop() {
   666  	if c.stop != nil {
   667  		close(c.stop)
   668  	}
   669  }
   670  
   671  // Services implements a service catalog operation
   672  func (c *Controller) Services() []*model.Service {
   673  	c.RLock()
   674  	out := make([]*model.Service, 0, len(c.servicesMap))
   675  	for _, svc := range c.servicesMap {
   676  		out = append(out, svc)
   677  	}
   678  	c.RUnlock()
   679  	sort.Slice(out, func(i, j int) bool { return out[i].Hostname < out[j].Hostname })
   680  	return out
   681  }
   682  
   683  // GetService implements a service catalog operation by hostname specified.
   684  func (c *Controller) GetService(hostname host.Name) *model.Service {
   685  	c.RLock()
   686  	svc := c.servicesMap[hostname]
   687  	c.RUnlock()
   688  	return svc
   689  }
   690  
   691  // getPodLocality retrieves the locality for a pod.
   692  func (c *Controller) getPodLocality(pod *v1.Pod) string {
   693  	// if pod has `istio-locality` label, skip below ops
   694  	if len(pod.Labels[model.LocalityLabel]) > 0 {
   695  		return model.GetLocalityLabel(pod.Labels[model.LocalityLabel])
   696  	}
   697  
   698  	// NodeName is set by the scheduler after the pod is created
   699  	// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#late-initialization
   700  	node := c.nodes.Get(pod.Spec.NodeName, "")
   701  	if node == nil {
   702  		if pod.Spec.NodeName != "" {
   703  			log.Warnf("unable to get node %q for pod %q/%q", pod.Spec.NodeName, pod.Namespace, pod.Name)
   704  		}
   705  		return ""
   706  	}
   707  
   708  	region := getLabelValue(node.ObjectMeta, NodeRegionLabelGA, NodeRegionLabel)
   709  	zone := getLabelValue(node.ObjectMeta, NodeZoneLabelGA, NodeZoneLabel)
   710  	subzone := getLabelValue(node.ObjectMeta, label.TopologySubzone.Name, "")
   711  
   712  	if region == "" && zone == "" && subzone == "" {
   713  		return ""
   714  	}
   715  
   716  	return region + "/" + zone + "/" + subzone // Format: "%s/%s/%s"
   717  }
   718  
   719  func (c *Controller) serviceInstancesFromWorkloadInstances(svc *model.Service, reqSvcPort int) []*model.ServiceInstance {
   720  	// Run through all the workload instances, select ones that match the service labels
   721  	// only if this is a kubernetes internal service and of ClientSideLB (eds) type
   722  	// as InstancesByPort is called by the aggregate controller. We dont want to include
   723  	// workload instances for any other registry
   724  	workloadInstancesExist := !c.workloadInstancesIndex.Empty()
   725  	c.RLock()
   726  	_, inRegistry := c.servicesMap[svc.Hostname]
   727  	c.RUnlock()
   728  
   729  	// Only select internal Kubernetes services with selectors
   730  	if !inRegistry || !workloadInstancesExist || svc.Attributes.ServiceRegistry != provider.Kubernetes ||
   731  		svc.MeshExternal || svc.Resolution != model.ClientSideLB || svc.Attributes.LabelSelectors == nil {
   732  		return nil
   733  	}
   734  
   735  	selector := labels.Instance(svc.Attributes.LabelSelectors)
   736  
   737  	// Get the service port name and target port so that we can construct the service instance
   738  	k8sService := c.services.Get(svc.Attributes.Name, svc.Attributes.Namespace)
   739  	// We did not find the k8s service. We cannot get the targetPort
   740  	if k8sService == nil {
   741  		log.Infof("serviceInstancesFromWorkloadInstances(%s.%s) failed to get k8s service",
   742  			svc.Attributes.Name, svc.Attributes.Namespace)
   743  		return nil
   744  	}
   745  
   746  	var servicePort *model.Port
   747  	for _, p := range svc.Ports {
   748  		if p.Port == reqSvcPort {
   749  			servicePort = p
   750  			break
   751  		}
   752  	}
   753  	if servicePort == nil {
   754  		return nil
   755  	}
   756  
   757  	// Now get the target Port for this service port
   758  	targetPort := findServiceTargetPort(servicePort, k8sService)
   759  	if targetPort.num == 0 {
   760  		targetPort.num = servicePort.Port
   761  	}
   762  
   763  	out := make([]*model.ServiceInstance, 0)
   764  
   765  	c.workloadInstancesIndex.ForEach(func(wi *model.WorkloadInstance) {
   766  		if wi.Namespace != svc.Attributes.Namespace {
   767  			return
   768  		}
   769  		if selector.Match(wi.Endpoint.Labels) {
   770  			instance := serviceInstanceFromWorkloadInstance(svc, servicePort, targetPort, wi)
   771  			if instance != nil {
   772  				out = append(out, instance)
   773  			}
   774  		}
   775  	})
   776  	return out
   777  }
   778  
   779  func serviceInstanceFromWorkloadInstance(svc *model.Service, servicePort *model.Port,
   780  	targetPort serviceTargetPort, wi *model.WorkloadInstance,
   781  ) *model.ServiceInstance {
   782  	// create an instance with endpoint whose service port name matches
   783  	istioEndpoint := wi.Endpoint.ShallowCopy()
   784  
   785  	// by default, use the numbered targetPort
   786  	istioEndpoint.EndpointPort = uint32(targetPort.num)
   787  
   788  	if targetPort.name != "" {
   789  		// This is a named port, find the corresponding port in the port map
   790  		matchedPort := wi.PortMap[targetPort.name]
   791  		if matchedPort != 0 {
   792  			istioEndpoint.EndpointPort = matchedPort
   793  		} else if targetPort.explicitName {
   794  			// No match found, and we expect the name explicitly in the service, skip this endpoint
   795  			return nil
   796  		}
   797  	}
   798  
   799  	istioEndpoint.ServicePortName = servicePort.Name
   800  	return &model.ServiceInstance{
   801  		Service:     svc,
   802  		ServicePort: servicePort,
   803  		Endpoint:    istioEndpoint,
   804  	}
   805  }
   806  
   807  // convenience function to collect all workload entry endpoints in updateEDS calls.
   808  func (c *Controller) collectWorkloadInstanceEndpoints(svc *model.Service) []*model.IstioEndpoint {
   809  	workloadInstancesExist := !c.workloadInstancesIndex.Empty()
   810  
   811  	if !workloadInstancesExist || svc.Resolution != model.ClientSideLB || len(svc.Ports) == 0 {
   812  		return nil
   813  	}
   814  
   815  	endpoints := make([]*model.IstioEndpoint, 0)
   816  	for _, port := range svc.Ports {
   817  		for _, instance := range c.serviceInstancesFromWorkloadInstances(svc, port.Port) {
   818  			endpoints = append(endpoints, instance.Endpoint)
   819  		}
   820  	}
   821  
   822  	return endpoints
   823  }
   824  
   825  // GetProxyServiceTargets returns service targets co-located with a given proxy
   826  // TODO: this code does not return k8s service instances when the proxy's IP is a workload entry
   827  // To tackle this, we need a ip2instance map like what we have in service entry.
   828  func (c *Controller) GetProxyServiceTargets(proxy *model.Proxy) []model.ServiceTarget {
   829  	if len(proxy.IPAddresses) > 0 {
   830  		proxyIP := proxy.IPAddresses[0]
   831  		// look up for a WorkloadEntry; if there are multiple WorkloadEntry(s)
   832  		// with the same IP, choose one deterministically
   833  		workload := workloadinstances.GetInstanceForProxy(c.workloadInstancesIndex, proxy, proxyIP)
   834  		if workload != nil {
   835  			return c.serviceTargetsFromWorkloadInstance(workload)
   836  		}
   837  		pod := c.pods.getPodByProxy(proxy)
   838  		if pod != nil && !proxy.IsVM() {
   839  			// we don't want to use this block for our test "VM" which is actually a Pod.
   840  
   841  			if !c.isControllerForProxy(proxy) {
   842  				log.Errorf("proxy is in cluster %v, but controller is for cluster %v", proxy.Metadata.ClusterID, c.Cluster())
   843  				return nil
   844  			}
   845  
   846  			// 1. find proxy service by label selector, if not any, there may exist headless service without selector
   847  			// failover to 2
   848  			allServices := c.services.List(pod.Namespace, klabels.Everything())
   849  			if services := getPodServices(allServices, pod); len(services) > 0 {
   850  				out := make([]model.ServiceTarget, 0)
   851  				for _, svc := range services {
   852  					out = append(out, c.GetProxyServiceTargetsByPod(pod, svc)...)
   853  				}
   854  				return out
   855  			}
   856  			// 2. Headless service without selector
   857  			return c.endpoints.GetProxyServiceTargets(proxy)
   858  		}
   859  
   860  		// 3. The pod is not present when this is called
   861  		// due to eventual consistency issues. However, we have a lot of information about the pod from the proxy
   862  		// metadata already. Because of this, we can still get most of the information we need.
   863  		// If we cannot accurately construct ServiceEndpoints from just the metadata, this will return an error and we can
   864  		// attempt to read the real pod.
   865  		out, err := c.GetProxyServiceTargetsFromMetadata(proxy)
   866  		if err != nil {
   867  			log.Warnf("GetProxyServiceTargetsFromMetadata for %v failed: %v", proxy.ID, err)
   868  		}
   869  		return out
   870  	}
   871  
   872  	// TODO: This could not happen, remove?
   873  	if c.opts.Metrics != nil {
   874  		c.opts.Metrics.AddMetric(model.ProxyStatusNoService, proxy.ID, proxy.ID, "")
   875  	} else {
   876  		log.Infof("Missing metrics env, empty list of services for pod %s", proxy.ID)
   877  	}
   878  	return nil
   879  }
   880  
   881  func (c *Controller) serviceTargetsFromWorkloadInstance(si *model.WorkloadInstance) []model.ServiceTarget {
   882  	out := make([]model.ServiceTarget, 0)
   883  	// find the workload entry's service by label selector
   884  	// rather than scanning through our internal map of model.services, get the services via the k8s apis
   885  	dummyPod := &v1.Pod{
   886  		ObjectMeta: metav1.ObjectMeta{Namespace: si.Namespace, Labels: si.Endpoint.Labels},
   887  	}
   888  
   889  	// find the services that map to this workload entry, fire off eds updates if the service is of type client-side lb
   890  	allServices := c.services.List(si.Namespace, klabels.Everything())
   891  	if k8sServices := getPodServices(allServices, dummyPod); len(k8sServices) > 0 {
   892  		for _, k8sSvc := range k8sServices {
   893  			service := c.GetService(kube.ServiceHostname(k8sSvc.Name, k8sSvc.Namespace, c.opts.DomainSuffix))
   894  			// Note that this cannot be an external service because k8s external services do not have label selectors.
   895  			if service == nil || service.Resolution != model.ClientSideLB {
   896  				// may be a headless service
   897  				continue
   898  			}
   899  
   900  			for _, servicePort := range service.Ports {
   901  				if servicePort.Protocol == protocol.UDP {
   902  					continue
   903  				}
   904  
   905  				// Now get the target Port for this service port
   906  				targetPort := findServiceTargetPort(servicePort, k8sSvc)
   907  				if targetPort.num == 0 {
   908  					targetPort.num = servicePort.Port
   909  				}
   910  
   911  				instance := serviceInstanceFromWorkloadInstance(service, servicePort, targetPort, si)
   912  				if instance != nil {
   913  					out = append(out, model.ServiceInstanceToTarget(instance))
   914  				}
   915  			}
   916  		}
   917  	}
   918  	return out
   919  }
   920  
   921  // WorkloadInstanceHandler defines the handler for service instances generated by other registries
   922  func (c *Controller) WorkloadInstanceHandler(si *model.WorkloadInstance, event model.Event) {
   923  	c.queue.Push(func() error {
   924  		c.workloadInstanceHandler(si, event)
   925  		return nil
   926  	})
   927  }
   928  
   929  func (c *Controller) workloadInstanceHandler(si *model.WorkloadInstance, event model.Event) {
   930  	// ignore malformed workload entries. And ignore any workload entry that does not have a label
   931  	// as there is no way for us to select them
   932  	if si.Namespace == "" || len(si.Endpoint.Labels) == 0 {
   933  		return
   934  	}
   935  
   936  	// this is from a workload entry. Store it in separate index so that
   937  	// the InstancesByPort can use these as well as the k8s pods.
   938  	switch event {
   939  	case model.EventDelete:
   940  		c.workloadInstancesIndex.Delete(si)
   941  	default: // add or update
   942  		c.workloadInstancesIndex.Insert(si)
   943  	}
   944  
   945  	// find the workload entry's service by label selector
   946  	// rather than scanning through our internal map of model.services, get the services via the k8s apis
   947  	dummyPod := &v1.Pod{
   948  		ObjectMeta: metav1.ObjectMeta{Namespace: si.Namespace, Labels: si.Endpoint.Labels},
   949  	}
   950  
   951  	// We got an instance update, which probably effects EDS. However, EDS is keyed by Hostname. We need to find all
   952  	// Hostnames (services) that were updated and recompute them
   953  	// find the services that map to this workload entry, fire off eds updates if the service is of type client-side lb
   954  	allServices := c.services.List(si.Namespace, klabels.Everything())
   955  	matchedServices := getPodServices(allServices, dummyPod)
   956  	matchedHostnames := slices.Map(matchedServices, func(e *v1.Service) host.Name {
   957  		return kube.ServiceHostname(e.Name, e.Namespace, c.opts.DomainSuffix)
   958  	})
   959  	c.endpoints.pushEDS(matchedHostnames, si.Namespace)
   960  }
   961  
   962  func (c *Controller) onSystemNamespaceEvent(_, ns *v1.Namespace, ev model.Event) error {
   963  	if ev == model.EventDelete {
   964  		return nil
   965  	}
   966  	if c.setNetworkFromNamespace(ns) {
   967  		// network changed, rarely happen
   968  		// refresh pods/endpoints/services
   969  		c.onNetworkChange()
   970  	}
   971  	return nil
   972  }
   973  
   974  // isControllerForProxy should be used for proxies assumed to be in the kube cluster for this controller. Workload Entries
   975  // may not necessarily pass this check, but we still want to allow kube services to select workload instances.
   976  func (c *Controller) isControllerForProxy(proxy *model.Proxy) bool {
   977  	return proxy.Metadata.ClusterID == "" || proxy.Metadata.ClusterID == c.Cluster()
   978  }
   979  
   980  // GetProxyServiceTargetsFromMetadata retrieves ServiceTargets using proxy Metadata rather than
   981  // from the Pod. This allows retrieving Instances immediately, regardless of delays in Kubernetes.
   982  // If the proxy doesn't have enough metadata, an error is returned
   983  func (c *Controller) GetProxyServiceTargetsFromMetadata(proxy *model.Proxy) ([]model.ServiceTarget, error) {
   984  	if len(proxy.Labels) == 0 {
   985  		return nil, nil
   986  	}
   987  
   988  	if !c.isControllerForProxy(proxy) {
   989  		return nil, fmt.Errorf("proxy is in cluster %v, but controller is for cluster %v", proxy.Metadata.ClusterID, c.Cluster())
   990  	}
   991  
   992  	// Create a pod with just the information needed to find the associated Services
   993  	dummyPod := &v1.Pod{
   994  		ObjectMeta: metav1.ObjectMeta{
   995  			Namespace: proxy.ConfigNamespace,
   996  			Labels:    proxy.Labels,
   997  		},
   998  	}
   999  
  1000  	// Find the Service associated with the pod.
  1001  	allServices := c.services.List(proxy.ConfigNamespace, klabels.Everything())
  1002  	services := getPodServices(allServices, dummyPod)
  1003  	if len(services) == 0 {
  1004  		return nil, fmt.Errorf("no instances found for %s", proxy.ID)
  1005  	}
  1006  
  1007  	out := make([]model.ServiceTarget, 0)
  1008  	for _, svc := range services {
  1009  		hostname := kube.ServiceHostname(svc.Name, svc.Namespace, c.opts.DomainSuffix)
  1010  		modelService := c.GetService(hostname)
  1011  		if modelService == nil {
  1012  			return nil, fmt.Errorf("failed to find model service for %v", hostname)
  1013  		}
  1014  
  1015  		for _, modelService := range c.servicesForNamespacedName(config.NamespacedName(svc)) {
  1016  			tps := make(map[model.Port]*model.Port)
  1017  			tpsList := make([]model.Port, 0)
  1018  			for _, port := range svc.Spec.Ports {
  1019  				svcPort, f := modelService.Ports.Get(port.Name)
  1020  				if !f {
  1021  					return nil, fmt.Errorf("failed to get svc port for %v", port.Name)
  1022  				}
  1023  
  1024  				var portNum int
  1025  				if len(proxy.Metadata.PodPorts) > 0 {
  1026  					var err error
  1027  					portNum, err = findPortFromMetadata(port, proxy.Metadata.PodPorts)
  1028  					if err != nil {
  1029  						return nil, fmt.Errorf("failed to find target port for %v: %v", proxy.ID, err)
  1030  					}
  1031  				} else {
  1032  					// most likely a VM - we assume the WorkloadEntry won't remap any ports
  1033  					portNum = port.TargetPort.IntValue()
  1034  				}
  1035  
  1036  				// Dedupe the target ports here - Service might have configured multiple ports to the same target port,
  1037  				// we will have to create only one ingress listener per port and protocol so that we do not endup
  1038  				// complaining about listener conflicts.
  1039  				targetPort := model.Port{
  1040  					Port:     portNum,
  1041  					Protocol: svcPort.Protocol,
  1042  				}
  1043  				if _, exists := tps[targetPort]; !exists {
  1044  					tps[targetPort] = svcPort
  1045  					tpsList = append(tpsList, targetPort)
  1046  				}
  1047  			}
  1048  
  1049  			// Iterate over target ports in the same order as defined in service spec, in case of
  1050  			// protocol conflict for a port causes unstable protocol selection for a port.
  1051  			for _, tp := range tpsList {
  1052  				svcPort := tps[tp]
  1053  				out = append(out, model.ServiceTarget{
  1054  					Service: modelService,
  1055  					Port: model.ServiceInstancePort{
  1056  						ServicePort: svcPort,
  1057  						TargetPort:  uint32(tp.Port),
  1058  					},
  1059  				})
  1060  			}
  1061  		}
  1062  	}
  1063  	return out, nil
  1064  }
  1065  
  1066  func (c *Controller) GetProxyServiceTargetsByPod(pod *v1.Pod, service *v1.Service) []model.ServiceTarget {
  1067  	var out []model.ServiceTarget
  1068  
  1069  	for _, svc := range c.servicesForNamespacedName(config.NamespacedName(service)) {
  1070  		tps := make(map[model.Port]*model.Port)
  1071  		tpsList := make([]model.Port, 0)
  1072  		for _, port := range service.Spec.Ports {
  1073  			svcPort, exists := svc.Ports.Get(port.Name)
  1074  			if !exists {
  1075  				continue
  1076  			}
  1077  			// find target port
  1078  			portNum, err := FindPort(pod, &port)
  1079  			if err != nil {
  1080  				log.Warnf("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
  1081  				continue
  1082  			}
  1083  			// Dedupe the target ports here - Service might have configured multiple ports to the same target port,
  1084  			// we will have to create only one ingress listener per port and protocol so that we do not endup
  1085  			// complaining about listener conflicts.
  1086  			targetPort := model.Port{
  1087  				Port:     portNum,
  1088  				Protocol: svcPort.Protocol,
  1089  			}
  1090  			if _, exists := tps[targetPort]; !exists {
  1091  				tps[targetPort] = svcPort
  1092  				tpsList = append(tpsList, targetPort)
  1093  			}
  1094  		}
  1095  		// Iterate over target ports in the same order as defined in service spec, in case of
  1096  		// protocol conflict for a port causes unstable protocol selection for a port.
  1097  		for _, tp := range tpsList {
  1098  			svcPort := tps[tp]
  1099  			out = append(out, model.ServiceTarget{
  1100  				Service: svc,
  1101  				Port: model.ServiceInstancePort{
  1102  					ServicePort: svcPort,
  1103  					TargetPort:  uint32(tp.Port),
  1104  				},
  1105  			})
  1106  		}
  1107  	}
  1108  
  1109  	return out
  1110  }
  1111  
  1112  func (c *Controller) GetProxyWorkloadLabels(proxy *model.Proxy) labels.Instance {
  1113  	pod := c.pods.getPodByProxy(proxy)
  1114  	if pod != nil {
  1115  		var locality, nodeName string
  1116  		locality = c.getPodLocality(pod)
  1117  		if len(proxy.GetNodeName()) == 0 {
  1118  			// this can happen for an "old" proxy with no `Metadata.NodeName` set
  1119  			// in this case we set the node name in labels on the fly
  1120  			// TODO: remove this when 1.16 is EOL?
  1121  			nodeName = pod.Spec.NodeName
  1122  		}
  1123  		if len(locality) == 0 && len(nodeName) == 0 {
  1124  			return pod.Labels
  1125  		}
  1126  		return labelutil.AugmentLabels(pod.Labels, c.clusterID, locality, nodeName, c.network)
  1127  	}
  1128  	return nil
  1129  }
  1130  
  1131  // AppendServiceHandler implements a service catalog operation
  1132  func (c *Controller) AppendServiceHandler(f model.ServiceHandler) {
  1133  	c.handlers.AppendServiceHandler(f)
  1134  }
  1135  
  1136  // AppendWorkloadHandler implements a service catalog operation
  1137  func (c *Controller) AppendWorkloadHandler(f func(*model.WorkloadInstance, model.Event)) {
  1138  	c.handlers.AppendWorkloadHandler(f)
  1139  }
  1140  
  1141  // AppendNamespaceDiscoveryHandlers register handlers on namespace selected/deselected by discovery selectors change.
  1142  func (c *Controller) AppendNamespaceDiscoveryHandlers(f func(string, model.Event)) {
  1143  	c.namespaceDiscoveryHandlers = append(c.namespaceDiscoveryHandlers, f)
  1144  }
  1145  
  1146  // AppendCrdHandlers register handlers on crd event.
  1147  func (c *Controller) AppendCrdHandlers(f func(name string)) {
  1148  	c.crdHandlers = append(c.crdHandlers, f)
  1149  }
  1150  
  1151  // hostNamesForNamespacedName returns all possible hostnames for the given service name.
  1152  // If Kubernetes Multi-Cluster Services (MCS) is enabled, this will contain the regular
  1153  // hostname as well as the MCS hostname (clusterset.local). Otherwise, only the regular
  1154  // hostname will be returned.
  1155  func (c *Controller) hostNamesForNamespacedName(name types.NamespacedName) []host.Name {
  1156  	if features.EnableMCSHost {
  1157  		return []host.Name{
  1158  			kube.ServiceHostname(name.Name, name.Namespace, c.opts.DomainSuffix),
  1159  			serviceClusterSetLocalHostname(name),
  1160  		}
  1161  	}
  1162  	return []host.Name{
  1163  		kube.ServiceHostname(name.Name, name.Namespace, c.opts.DomainSuffix),
  1164  	}
  1165  }
  1166  
  1167  // servicesForNamespacedName returns all services for the given service name.
  1168  // If Kubernetes Multi-Cluster Services (MCS) is enabled, this will contain the regular
  1169  // service as well as the MCS service (clusterset.local), if available. Otherwise,
  1170  // only the regular service will be returned.
  1171  func (c *Controller) servicesForNamespacedName(name types.NamespacedName) []*model.Service {
  1172  	if features.EnableMCSHost {
  1173  		out := make([]*model.Service, 0, 2)
  1174  
  1175  		c.RLock()
  1176  		if svc := c.servicesMap[kube.ServiceHostname(name.Name, name.Namespace, c.opts.DomainSuffix)]; svc != nil {
  1177  			out = append(out, svc)
  1178  		}
  1179  
  1180  		if svc := c.servicesMap[serviceClusterSetLocalHostname(name)]; svc != nil {
  1181  			out = append(out, svc)
  1182  		}
  1183  		c.RUnlock()
  1184  
  1185  		return out
  1186  	}
  1187  	if svc := c.GetService(kube.ServiceHostname(name.Name, name.Namespace, c.opts.DomainSuffix)); svc != nil {
  1188  		return []*model.Service{svc}
  1189  	}
  1190  	return nil
  1191  }
  1192  
  1193  func serviceUpdateNeedsPush(prev, curr *v1.Service, preConv, currConv *model.Service) bool {
  1194  	if !features.EnableOptimizedServicePush {
  1195  		return true
  1196  	}
  1197  	if preConv == nil {
  1198  		return !currConv.Attributes.ExportTo.Contains(visibility.None)
  1199  	}
  1200  	// if service are not exported, no need to push
  1201  	if preConv.Attributes.ExportTo.Contains(visibility.None) &&
  1202  		currConv.Attributes.ExportTo.Contains(visibility.None) {
  1203  		return false
  1204  	}
  1205  	// Check if there are any changes we care about by comparing `model.Service`s
  1206  	if !preConv.Equals(currConv) {
  1207  		return true
  1208  	}
  1209  	// Also check if target ports are changed since they are not included in `model.Service`
  1210  	// `preConv.Equals(currConv)` already makes sure the length of ports is not changed
  1211  	if prev != nil && curr != nil {
  1212  		if !slices.EqualFunc(prev.Spec.Ports, curr.Spec.Ports, func(a, b v1.ServicePort) bool {
  1213  			return a.TargetPort == b.TargetPort
  1214  		}) {
  1215  			return true
  1216  		}
  1217  	}
  1218  	return false
  1219  }