istio.io/istio@v0.0.0-20240520182934-d79c90f27776/pilot/pkg/serviceregistry/kube/controller/ambient/workloads.go (about)

     1  // Copyright Istio Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // nolint: gocritic
    16  package ambient
    17  
    18  import (
    19  	"net/netip"
    20  
    21  	v1 "k8s.io/api/core/v1"
    22  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    23  
    24  	"istio.io/api/label"
    25  	networkingv1alpha3 "istio.io/api/networking/v1alpha3"
    26  	networkingclient "istio.io/client-go/pkg/apis/networking/v1alpha3"
    27  	securityclient "istio.io/client-go/pkg/apis/security/v1beta1"
    28  	"istio.io/istio/pilot/pkg/features"
    29  	"istio.io/istio/pilot/pkg/model"
    30  	labelutil "istio.io/istio/pilot/pkg/serviceregistry/util/label"
    31  	"istio.io/istio/pkg/config/constants"
    32  	"istio.io/istio/pkg/config/labels"
    33  	"istio.io/istio/pkg/config/schema/kind"
    34  	kubeutil "istio.io/istio/pkg/kube"
    35  	"istio.io/istio/pkg/kube/krt"
    36  	kubelabels "istio.io/istio/pkg/kube/labels"
    37  	"istio.io/istio/pkg/log"
    38  	"istio.io/istio/pkg/ptr"
    39  	"istio.io/istio/pkg/slices"
    40  	"istio.io/istio/pkg/spiffe"
    41  	"istio.io/istio/pkg/workloadapi"
    42  )
    43  
    44  func (a *index) WorkloadsCollection(
    45  	Pods krt.Collection[*v1.Pod],
    46  	Nodes krt.Collection[*v1.Node],
    47  	MeshConfig krt.Singleton[MeshConfig],
    48  	AuthorizationPolicies krt.Collection[model.WorkloadAuthorization],
    49  	PeerAuths krt.Collection[*securityclient.PeerAuthentication],
    50  	Waypoints krt.Collection[Waypoint],
    51  	WorkloadServices krt.Collection[model.ServiceInfo],
    52  	WorkloadEntries krt.Collection[*networkingclient.WorkloadEntry],
    53  	ServiceEntries krt.Collection[*networkingclient.ServiceEntry],
    54  	AllPolicies krt.Collection[model.WorkloadAuthorization],
    55  	Namespaces krt.Collection[*v1.Namespace],
    56  ) krt.Collection[model.WorkloadInfo] {
    57  	WorkloadServicesNamespaceIndex := krt.NewNamespaceIndex(WorkloadServices)
    58  	PodWorkloads := krt.NewCollection(
    59  		Pods,
    60  		a.podWorkloadBuilder(MeshConfig, AuthorizationPolicies, PeerAuths, Waypoints, WorkloadServices, WorkloadServicesNamespaceIndex, Namespaces, Nodes),
    61  		krt.WithName("PodWorkloads"),
    62  	)
    63  	WorkloadEntryWorkloads := krt.NewCollection(
    64  		WorkloadEntries,
    65  		a.workloadEntryWorkloadBuilder(MeshConfig, AuthorizationPolicies, PeerAuths, Waypoints, WorkloadServices, WorkloadServicesNamespaceIndex, Namespaces),
    66  		krt.WithName("WorkloadEntryWorkloads"),
    67  	)
    68  	ServiceEntryWorkloads := krt.NewManyCollection(ServiceEntries, func(ctx krt.HandlerContext, se *networkingclient.ServiceEntry) []model.WorkloadInfo {
    69  		if len(se.Spec.Endpoints) == 0 {
    70  			return nil
    71  		}
    72  		res := make([]model.WorkloadInfo, 0, len(se.Spec.Endpoints))
    73  
    74  		wp := fetchWaypointForWorkload(ctx, Waypoints, Namespaces, se.ObjectMeta)
    75  
    76  		// this is some partial object meta we can pass through so that WL found in the Endpoints
    77  		// may inherit the namespace scope waypoint from the SE... the Endpoints do not have real object meta
    78  		// and therefore they can't be annotated with wl scope waypoints right now
    79  		someObjectMeta := metav1.ObjectMeta{
    80  			Namespace: se.Namespace,
    81  		}
    82  
    83  		svc := slices.First(a.serviceEntriesInfo(se, wp))
    84  		if svc == nil {
    85  			// Not ready yet
    86  			return nil
    87  		}
    88  		services := []model.ServiceInfo{*svc}
    89  
    90  		meshCfg := krt.FetchOne(ctx, MeshConfig.AsCollection())
    91  		for _, wle := range se.Spec.Endpoints {
    92  			// We need to filter from the policies that are present, which apply to us.
    93  			// We only want label selector ones; global ones are not attached to the final WorkloadInfo
    94  			// In general we just take all of the policies
    95  			basePolicies := krt.Fetch(ctx, AllPolicies, krt.FilterSelects(se.Labels), krt.FilterGeneric(func(a any) bool {
    96  				return a.(model.WorkloadAuthorization).GetLabelSelector() != nil
    97  			}))
    98  			policies := slices.Sort(slices.Map(basePolicies, func(t model.WorkloadAuthorization) string {
    99  				return t.ResourceName()
   100  			}))
   101  			// We could do a non-FilterGeneric but krt currently blows up if we depend on the same collection twice
   102  			auths := fetchPeerAuthentications(ctx, PeerAuths, meshCfg, se.Namespace, wle.Labels)
   103  			policies = append(policies, convertedSelectorPeerAuthentications(meshCfg.GetRootNamespace(), auths)...)
   104  			var waypoint *Waypoint
   105  			if wle.Labels[constants.ManagedGatewayLabel] != constants.ManagedGatewayMeshControllerLabel {
   106  				// this is using object meta which simply defines the namespace since the endpoint doesn't have it's own object meta
   107  				waypoint = fetchWaypointForWorkload(ctx, Waypoints, Namespaces, someObjectMeta)
   108  			}
   109  			var waypointAddress *workloadapi.GatewayAddress
   110  			if waypoint != nil {
   111  				waypointAddress = a.getWaypointAddress(waypoint)
   112  			}
   113  
   114  			// enforce traversing waypoints
   115  			policies = append(policies, implicitWaypointPolicies(ctx, Waypoints, waypoint, services)...)
   116  
   117  			a.networkUpdateTrigger.MarkDependant(ctx) // Mark we depend on out of band a.Network
   118  			network := a.Network(wle.Address, wle.Labels).String()
   119  			if wle.Network != "" {
   120  				network = wle.Network
   121  			}
   122  			w := &workloadapi.Workload{
   123  				Uid:                   a.generateServiceEntryUID(se.Namespace, se.Name, wle.Address),
   124  				Name:                  se.Name,
   125  				Namespace:             se.Namespace,
   126  				Network:               network,
   127  				ClusterId:             string(a.ClusterID),
   128  				ServiceAccount:        wle.ServiceAccount,
   129  				Services:              constructServicesFromWorkloadEntry(wle, services),
   130  				AuthorizationPolicies: policies,
   131  				Status:                workloadapi.WorkloadStatus_HEALTHY, // TODO: WE can be unhealthy
   132  				Waypoint:              waypointAddress,
   133  				TrustDomain:           pickTrustDomain(),
   134  				Locality:              getWorkloadEntryLocality(wle),
   135  			}
   136  
   137  			if addr, err := netip.ParseAddr(wle.Address); err == nil {
   138  				w.Addresses = [][]byte{addr.AsSlice()}
   139  			} else {
   140  				log.Warnf("skipping workload entry %s/%s; DNS Address resolution is not yet implemented", se.Namespace, se.Name)
   141  			}
   142  
   143  			w.WorkloadName, w.WorkloadType = se.Name, workloadapi.WorkloadType_POD // XXX(shashankram): HACK to impersonate pod
   144  			w.CanonicalName, w.CanonicalRevision = kubelabels.CanonicalService(se.Labels, w.WorkloadName)
   145  
   146  			setTunnelProtocol(se.Labels, se.Annotations, w)
   147  			res = append(res, model.WorkloadInfo{Workload: w, Labels: se.Labels, Source: kind.WorkloadEntry, CreationTime: se.CreationTimestamp.Time})
   148  		}
   149  		return res
   150  	}, krt.WithName("ServiceEntryWorkloads"))
   151  	Workloads := krt.JoinCollection([]krt.Collection[model.WorkloadInfo]{PodWorkloads, WorkloadEntryWorkloads, ServiceEntryWorkloads}, krt.WithName("Workloads"))
   152  	return Workloads
   153  }
   154  
   155  func (a *index) workloadEntryWorkloadBuilder(
   156  	MeshConfig krt.Singleton[MeshConfig],
   157  	AuthorizationPolicies krt.Collection[model.WorkloadAuthorization],
   158  	PeerAuths krt.Collection[*securityclient.PeerAuthentication],
   159  	Waypoints krt.Collection[Waypoint],
   160  	WorkloadServices krt.Collection[model.ServiceInfo],
   161  	WorkloadServicesNamespaceIndex *krt.Index[model.ServiceInfo, string],
   162  	Namespaces krt.Collection[*v1.Namespace],
   163  ) func(ctx krt.HandlerContext, wle *networkingclient.WorkloadEntry) *model.WorkloadInfo {
   164  	return func(ctx krt.HandlerContext, wle *networkingclient.WorkloadEntry) *model.WorkloadInfo {
   165  		meshCfg := krt.FetchOne(ctx, MeshConfig.AsCollection())
   166  		// We need to filter from the policies that are present, which apply to us.
   167  		// We only want label selector ones; global ones are not attached to the final WorkloadInfo
   168  		// In general we just take all of the policies
   169  		basePolicies := krt.Fetch(ctx, AuthorizationPolicies, krt.FilterSelects(wle.Labels), krt.FilterGeneric(func(a any) bool {
   170  			return a.(model.WorkloadAuthorization).GetLabelSelector() != nil
   171  		}))
   172  		policies := slices.Sort(slices.Map(basePolicies, func(t model.WorkloadAuthorization) string {
   173  			return t.ResourceName()
   174  		}))
   175  		// We could do a non-FilterGeneric but krt currently blows up if we depend on the same collection twice
   176  		auths := fetchPeerAuthentications(ctx, PeerAuths, meshCfg, wle.Namespace, wle.Labels)
   177  		policies = append(policies, convertedSelectorPeerAuthentications(meshCfg.GetRootNamespace(), auths)...)
   178  		var waypoint *Waypoint
   179  		if wle.Labels[constants.ManagedGatewayLabel] != constants.ManagedGatewayMeshControllerLabel {
   180  			waypoint = fetchWaypointForWorkload(ctx, Waypoints, Namespaces, wle.ObjectMeta)
   181  		}
   182  		var waypointAddress *workloadapi.GatewayAddress
   183  		if waypoint != nil {
   184  			waypointAddress = a.getWaypointAddress(waypoint)
   185  		}
   186  		fo := []krt.FetchOption{krt.FilterIndex(WorkloadServicesNamespaceIndex, wle.Namespace), krt.FilterSelectsNonEmpty(wle.GetLabels())}
   187  		if !features.EnableK8SServiceSelectWorkloadEntries {
   188  			fo = append(fo, krt.FilterGeneric(func(a any) bool {
   189  				return a.(model.ServiceInfo).Source == kind.ServiceEntry
   190  			}))
   191  		}
   192  		services := krt.Fetch(ctx, WorkloadServices, fo...)
   193  		a.networkUpdateTrigger.MarkDependant(ctx) // Mark we depend on out of band a.Network
   194  		network := a.Network(wle.Spec.Address, wle.Labels).String()
   195  		if wle.Spec.Network != "" {
   196  			network = wle.Spec.Network
   197  		}
   198  
   199  		// enforce traversing waypoints
   200  		policies = append(policies, implicitWaypointPolicies(ctx, Waypoints, waypoint, services)...)
   201  
   202  		w := &workloadapi.Workload{
   203  			Uid:                   a.generateWorkloadEntryUID(wle.Namespace, wle.Name),
   204  			Name:                  wle.Name,
   205  			Namespace:             wle.Namespace,
   206  			Network:               network,
   207  			ClusterId:             string(a.ClusterID),
   208  			ServiceAccount:        wle.Spec.ServiceAccount,
   209  			Services:              constructServicesFromWorkloadEntry(&wle.Spec, services),
   210  			AuthorizationPolicies: policies,
   211  			Status:                workloadapi.WorkloadStatus_HEALTHY, // TODO: WE can be unhealthy
   212  			Waypoint:              waypointAddress,
   213  			TrustDomain:           pickTrustDomain(),
   214  			Locality:              getWorkloadEntryLocality(&wle.Spec),
   215  		}
   216  
   217  		if addr, err := netip.ParseAddr(wle.Spec.Address); err == nil {
   218  			w.Addresses = [][]byte{addr.AsSlice()}
   219  		} else {
   220  			log.Warnf("skipping workload entry %s/%s; DNS Address resolution is not yet implemented", wle.Namespace, wle.Name)
   221  		}
   222  
   223  		w.WorkloadName, w.WorkloadType = wle.Name, workloadapi.WorkloadType_POD // XXX(shashankram): HACK to impersonate pod
   224  		w.CanonicalName, w.CanonicalRevision = kubelabels.CanonicalService(wle.Labels, w.WorkloadName)
   225  
   226  		setTunnelProtocol(wle.Labels, wle.Annotations, w)
   227  		return &model.WorkloadInfo{Workload: w, Labels: wle.Labels, Source: kind.WorkloadEntry, CreationTime: wle.CreationTimestamp.Time}
   228  	}
   229  }
   230  
   231  func (a *index) podWorkloadBuilder(
   232  	MeshConfig krt.Singleton[MeshConfig],
   233  	AuthorizationPolicies krt.Collection[model.WorkloadAuthorization],
   234  	PeerAuths krt.Collection[*securityclient.PeerAuthentication],
   235  	Waypoints krt.Collection[Waypoint],
   236  	WorkloadServices krt.Collection[model.ServiceInfo],
   237  	WorkloadServicesNamespaceIndex *krt.Index[model.ServiceInfo, string],
   238  	Namespaces krt.Collection[*v1.Namespace],
   239  	Nodes krt.Collection[*v1.Node],
   240  ) func(ctx krt.HandlerContext, p *v1.Pod) *model.WorkloadInfo {
   241  	return func(ctx krt.HandlerContext, p *v1.Pod) *model.WorkloadInfo {
   242  		// Pod Is Pending but have a pod IP should be a valid workload, we should build it ,
   243  		// Such as the pod have initContainer which is initialing.
   244  		// See https://github.com/istio/istio/issues/48854
   245  		if (!IsPodRunning(p) && !IsPodPending(p)) || p.Spec.HostNetwork {
   246  			return nil
   247  		}
   248  		podIP, err := netip.ParseAddr(p.Status.PodIP)
   249  		if err != nil {
   250  			// Is this possible? Probably not in typical case, but anyone could put garbage there.
   251  			return nil
   252  		}
   253  		meshCfg := krt.FetchOne(ctx, MeshConfig.AsCollection())
   254  		// We need to filter from the policies that are present, which apply to us.
   255  		// We only want label selector ones; global ones are not attached to the final WorkloadInfo
   256  		// In general we just take all of the policies
   257  		basePolicies := krt.Fetch(ctx, AuthorizationPolicies, krt.FilterSelects(p.Labels), krt.FilterGeneric(func(a any) bool {
   258  			return a.(model.WorkloadAuthorization).GetLabelSelector() != nil
   259  		}))
   260  		policies := slices.Sort(slices.Map(basePolicies, func(t model.WorkloadAuthorization) string {
   261  			return t.ResourceName()
   262  		}))
   263  		// We could do a non-FilterGeneric but krt currently blows up if we depend on the same collection twice
   264  		auths := fetchPeerAuthentications(ctx, PeerAuths, meshCfg, p.Namespace, p.Labels)
   265  		policies = append(policies, convertedSelectorPeerAuthentications(meshCfg.GetRootNamespace(), auths)...)
   266  		fo := []krt.FetchOption{krt.FilterIndex(WorkloadServicesNamespaceIndex, p.Namespace), krt.FilterSelectsNonEmpty(p.GetLabels())}
   267  		if !features.EnableServiceEntrySelectPods {
   268  			fo = append(fo, krt.FilterGeneric(func(a any) bool {
   269  				return a.(model.ServiceInfo).Source == kind.Service
   270  			}))
   271  		}
   272  		services := krt.Fetch(ctx, WorkloadServices, fo...)
   273  		status := workloadapi.WorkloadStatus_HEALTHY
   274  		if !IsPodReady(p) {
   275  			status = workloadapi.WorkloadStatus_UNHEALTHY
   276  		}
   277  		a.networkUpdateTrigger.MarkDependant(ctx) // Mark we depend on out of band a.Network
   278  		network := a.Network(p.Status.PodIP, p.Labels).String()
   279  
   280  		var appTunnel *workloadapi.ApplicationTunnel
   281  		var targetWaypoint *Waypoint
   282  		if instancedWaypoint := fetchWaypointForInstance(ctx, Waypoints, p.ObjectMeta); instancedWaypoint != nil {
   283  			// we're an instance of a waypoint, set inbound tunnel info
   284  			appTunnel = &workloadapi.ApplicationTunnel{
   285  				Protocol: instancedWaypoint.DefaultBinding.Protocol,
   286  				Port:     instancedWaypoint.DefaultBinding.Port,
   287  			}
   288  		} else if waypoint := fetchWaypointForWorkload(ctx, Waypoints, Namespaces, p.ObjectMeta); waypoint != nil {
   289  			// there is a workload-attached waypoint, point there with a GatewayAddress
   290  			targetWaypoint = waypoint
   291  		}
   292  
   293  		// enforce traversing waypoints
   294  		policies = append(policies, implicitWaypointPolicies(ctx, Waypoints, targetWaypoint, services)...)
   295  
   296  		w := &workloadapi.Workload{
   297  			Uid:                   a.generatePodUID(p),
   298  			Name:                  p.Name,
   299  			Namespace:             p.Namespace,
   300  			Network:               network,
   301  			ClusterId:             string(a.ClusterID),
   302  			Addresses:             [][]byte{podIP.AsSlice()},
   303  			ServiceAccount:        p.Spec.ServiceAccountName,
   304  			Waypoint:              a.getWaypointAddress(targetWaypoint),
   305  			Node:                  p.Spec.NodeName,
   306  			ApplicationTunnel:     appTunnel,
   307  			Services:              constructServices(p, services),
   308  			AuthorizationPolicies: policies,
   309  			Status:                status,
   310  			TrustDomain:           pickTrustDomain(),
   311  			Locality:              getPodLocality(ctx, Nodes, p),
   312  		}
   313  
   314  		w.WorkloadName, w.WorkloadType = workloadNameAndType(p)
   315  		w.CanonicalName, w.CanonicalRevision = kubelabels.CanonicalService(p.Labels, w.WorkloadName)
   316  
   317  		setTunnelProtocol(p.Labels, p.Annotations, w)
   318  		return &model.WorkloadInfo{Workload: w, Labels: p.Labels, Source: kind.Pod, CreationTime: p.CreationTimestamp.Time}
   319  	}
   320  }
   321  
   322  func setTunnelProtocol(labels, annotations map[string]string, w *workloadapi.Workload) {
   323  	if annotations[constants.AmbientRedirection] == constants.AmbientRedirectionEnabled {
   324  		// Configured for override
   325  		w.TunnelProtocol = workloadapi.TunnelProtocol_HBONE
   326  	}
   327  	// Otherwise supports tunnel directly
   328  	if model.SupportsTunnel(labels, model.TunnelHTTP) {
   329  		w.TunnelProtocol = workloadapi.TunnelProtocol_HBONE
   330  		w.NativeTunnel = true
   331  	}
   332  }
   333  
   334  func pickTrustDomain() string {
   335  	if td := spiffe.GetTrustDomain(); td != "cluster.local" {
   336  		return td
   337  	}
   338  	return ""
   339  }
   340  
   341  func fetchPeerAuthentications(
   342  	ctx krt.HandlerContext,
   343  	PeerAuths krt.Collection[*securityclient.PeerAuthentication],
   344  	meshCfg *MeshConfig,
   345  	ns string,
   346  	matchLabels map[string]string,
   347  ) []*securityclient.PeerAuthentication {
   348  	return krt.Fetch(ctx, PeerAuths, krt.FilterGeneric(func(a any) bool {
   349  		pol := a.(*securityclient.PeerAuthentication)
   350  		if pol.Namespace == meshCfg.GetRootNamespace() && pol.Spec.Selector == nil {
   351  			return true
   352  		}
   353  		if pol.Namespace != ns {
   354  			return false
   355  		}
   356  		sel := pol.Spec.Selector
   357  		if sel == nil {
   358  			return true // No selector matches everything
   359  		}
   360  		return labels.Instance(sel.MatchLabels).SubsetOf(matchLabels)
   361  	}))
   362  }
   363  
   364  func constructServicesFromWorkloadEntry(p *networkingv1alpha3.WorkloadEntry, services []model.ServiceInfo) map[string]*workloadapi.PortList {
   365  	res := map[string]*workloadapi.PortList{}
   366  	for _, svc := range services {
   367  		n := namespacedHostname(svc.Namespace, svc.Hostname)
   368  		pl := &workloadapi.PortList{}
   369  		res[n] = pl
   370  		for _, port := range svc.Ports {
   371  			targetPort := port.TargetPort
   372  			// Named targetPort has different semantics from Service vs ServiceEntry
   373  			if svc.Source == kind.Service {
   374  				// Service has explicit named targetPorts.
   375  				if named, f := svc.PortNames[int32(port.ServicePort)]; f && named.TargetPortName != "" {
   376  					// This port is a named target port, look it up
   377  					tv, ok := p.Ports[named.TargetPortName]
   378  					if !ok {
   379  						// We needed an explicit port, but didn't find one - skip this port
   380  						continue
   381  					}
   382  					targetPort = tv
   383  				}
   384  			} else {
   385  				// ServiceEntry has no explicit named targetPorts; targetPort only allows a number
   386  				// Instead, there is name matching between the port names
   387  				if named, f := svc.PortNames[int32(port.ServicePort)]; f {
   388  					// get port name or target port
   389  					tv, ok := p.Ports[named.PortName]
   390  					if ok {
   391  						// if we match one, override it. Otherwise, use the service port
   392  						targetPort = tv
   393  					} else if targetPort == 0 {
   394  						targetPort = port.ServicePort
   395  					}
   396  				}
   397  			}
   398  			pl.Ports = append(pl.Ports, &workloadapi.Port{
   399  				ServicePort: port.ServicePort,
   400  				TargetPort:  targetPort,
   401  			})
   402  		}
   403  	}
   404  	return res
   405  }
   406  
   407  func workloadNameAndType(pod *v1.Pod) (string, workloadapi.WorkloadType) {
   408  	objMeta, typeMeta := kubeutil.GetDeployMetaFromPod(pod)
   409  	switch typeMeta.Kind {
   410  	case "Deployment":
   411  		return objMeta.Name, workloadapi.WorkloadType_DEPLOYMENT
   412  	case "Job":
   413  		return objMeta.Name, workloadapi.WorkloadType_JOB
   414  	case "CronJob":
   415  		return objMeta.Name, workloadapi.WorkloadType_CRONJOB
   416  	default:
   417  		return pod.Name, workloadapi.WorkloadType_POD
   418  	}
   419  }
   420  
   421  func constructServices(p *v1.Pod, services []model.ServiceInfo) map[string]*workloadapi.PortList {
   422  	res := map[string]*workloadapi.PortList{}
   423  	for _, svc := range services {
   424  		n := namespacedHostname(svc.Namespace, svc.Hostname)
   425  		pl := &workloadapi.PortList{
   426  			Ports: make([]*workloadapi.Port, 0, len(svc.Ports)),
   427  		}
   428  		res[n] = pl
   429  		for _, port := range svc.Ports {
   430  			targetPort := port.TargetPort
   431  			// The svc.Ports represents the workloadapi.Service, which drops the port name info and just has numeric target Port.
   432  			// TargetPort can be 0 which indicates its a named port. Check if its a named port and replace with the real targetPort if so.
   433  			if named, f := svc.PortNames[int32(port.ServicePort)]; f && named.TargetPortName != "" {
   434  				// Pods only match on TargetPort names
   435  				tp, ok := FindPortName(p, named.TargetPortName)
   436  				if !ok {
   437  					// Port not present for this workload. Exclude the port entirely
   438  					continue
   439  				}
   440  				targetPort = uint32(tp)
   441  			}
   442  
   443  			pl.Ports = append(pl.Ports, &workloadapi.Port{
   444  				ServicePort: port.ServicePort,
   445  				TargetPort:  targetPort,
   446  			})
   447  		}
   448  	}
   449  	return res
   450  }
   451  
   452  func getPodLocality(ctx krt.HandlerContext, Nodes krt.Collection[*v1.Node], pod *v1.Pod) *workloadapi.Locality {
   453  	// NodeName is set by the scheduler after the pod is created
   454  	// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#late-initialization
   455  	node := ptr.Flatten(krt.FetchOne(ctx, Nodes, krt.FilterKey(pod.Spec.NodeName)))
   456  	if node == nil {
   457  		if pod.Spec.NodeName != "" {
   458  			log.Warnf("unable to get node %q for pod %q/%q", pod.Spec.NodeName, pod.Namespace, pod.Name)
   459  		}
   460  		return nil
   461  	}
   462  
   463  	region := node.GetLabels()[v1.LabelTopologyRegion]
   464  	zone := node.GetLabels()[v1.LabelTopologyZone]
   465  	subzone := node.GetLabels()[label.TopologySubzone.Name]
   466  
   467  	if region == "" && zone == "" && subzone == "" {
   468  		return nil
   469  	}
   470  
   471  	return &workloadapi.Locality{
   472  		Region:  region,
   473  		Zone:    zone,
   474  		Subzone: subzone,
   475  	}
   476  }
   477  
   478  func getWorkloadEntryLocality(p *networkingv1alpha3.WorkloadEntry) *workloadapi.Locality {
   479  	region, zone, subzone := labelutil.SplitLocalityLabel(p.GetLocality())
   480  	if region == "" && zone == "" && subzone == "" {
   481  		return nil
   482  	}
   483  	return &workloadapi.Locality{
   484  		Region:  region,
   485  		Zone:    zone,
   486  		Subzone: subzone,
   487  	}
   488  }
   489  
   490  func implicitWaypointPolicies(ctx krt.HandlerContext, Waypoints krt.Collection[Waypoint], waypoint *Waypoint, services []model.ServiceInfo) []string {
   491  	if !features.DefaultAllowFromWaypoint {
   492  		return nil
   493  	}
   494  	serviceWaypointKeys := slices.MapFilter(services, func(si model.ServiceInfo) *string {
   495  		if si.Waypoint == "" || (waypoint != nil && waypoint.ResourceName() == si.Waypoint) {
   496  			return nil
   497  		}
   498  		return ptr.Of(si.Waypoint)
   499  	})
   500  	if len(serviceWaypointKeys) == 0 {
   501  		if waypoint != nil {
   502  			n := implicitWaypointPolicyName(waypoint)
   503  			if n != "" {
   504  				return []string{waypoint.Namespace + "/" + n}
   505  			}
   506  		}
   507  		return nil
   508  	}
   509  	waypoints := krt.Fetch(ctx, Waypoints, krt.FilterKeys(serviceWaypointKeys...))
   510  	if waypoint != nil {
   511  		waypoints = append(waypoints, *waypoint)
   512  	}
   513  
   514  	return slices.MapFilter(waypoints, func(w Waypoint) *string {
   515  		policy := implicitWaypointPolicyName(&w)
   516  		if policy == "" {
   517  			return nil
   518  		}
   519  		return ptr.Of(w.Namespace + "/" + policy)
   520  	})
   521  }