istio.io/istio@v0.0.0-20240520182934-d79c90f27776/pilot/pkg/serviceregistry/serviceregistry_test.go (about)

     1  // Copyright Istio Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package serviceregistry_test
    16  
    17  import (
    18  	"context"
    19  	"fmt"
    20  	"reflect"
    21  	"sort"
    22  	"testing"
    23  	"time"
    24  
    25  	core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
    26  	v1 "k8s.io/api/core/v1"
    27  	discovery "k8s.io/api/discovery/v1"
    28  	kerrors "k8s.io/apimachinery/pkg/api/errors"
    29  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    30  	"k8s.io/apimachinery/pkg/util/intstr"
    31  	"k8s.io/client-go/kubernetes"
    32  
    33  	"istio.io/api/label"
    34  	meshconfig "istio.io/api/mesh/v1alpha1"
    35  	"istio.io/api/meta/v1alpha1"
    36  	networking "istio.io/api/networking/v1alpha3"
    37  	"istio.io/istio/pilot/pkg/config/memory"
    38  	"istio.io/istio/pilot/pkg/features"
    39  	"istio.io/istio/pilot/pkg/model"
    40  	"istio.io/istio/pilot/pkg/model/status"
    41  	"istio.io/istio/pilot/pkg/serviceregistry/aggregate"
    42  	kubecontroller "istio.io/istio/pilot/pkg/serviceregistry/kube/controller"
    43  	"istio.io/istio/pilot/pkg/serviceregistry/serviceentry"
    44  	"istio.io/istio/pilot/pkg/serviceregistry/util/xdsfake"
    45  	v3 "istio.io/istio/pilot/pkg/xds/v3"
    46  	xds "istio.io/istio/pilot/test/xds"
    47  	"istio.io/istio/pilot/test/xdstest"
    48  	"istio.io/istio/pkg/config"
    49  	"istio.io/istio/pkg/config/constants"
    50  	"istio.io/istio/pkg/config/mesh"
    51  	"istio.io/istio/pkg/config/schema/collections"
    52  	"istio.io/istio/pkg/config/schema/gvk"
    53  	kubeclient "istio.io/istio/pkg/kube"
    54  	"istio.io/istio/pkg/maps"
    55  	"istio.io/istio/pkg/slices"
    56  	istiotest "istio.io/istio/pkg/test"
    57  	"istio.io/istio/pkg/test/util/assert"
    58  	"istio.io/istio/pkg/test/util/retry"
    59  )
    60  
    61  func setupTest(t *testing.T) (model.ConfigStoreController, kubernetes.Interface, *xdsfake.Updater) {
    62  	t.Helper()
    63  	client := kubeclient.NewFakeClient()
    64  
    65  	endpoints := model.NewEndpointIndex(model.DisabledCache{})
    66  	delegate := model.NewEndpointIndexUpdater(endpoints)
    67  	xdsUpdater := xdsfake.NewWithDelegate(delegate)
    68  	delegate.ConfigUpdateFunc = xdsUpdater.ConfigUpdate
    69  	meshWatcher := mesh.NewFixedWatcher(&meshconfig.MeshConfig{})
    70  	kc := kubecontroller.NewController(
    71  		client,
    72  		kubecontroller.Options{
    73  			XDSUpdater:            xdsUpdater,
    74  			DomainSuffix:          "cluster.local",
    75  			MeshWatcher:           meshWatcher,
    76  			MeshServiceController: aggregate.NewController(aggregate.Options{MeshHolder: meshWatcher}),
    77  		},
    78  	)
    79  	configController := memory.NewController(memory.Make(collections.Pilot))
    80  
    81  	stop := istiotest.NewStop(t)
    82  	go configController.Run(stop)
    83  
    84  	se := serviceentry.NewController(configController, xdsUpdater)
    85  	client.RunAndWait(stop)
    86  
    87  	kc.AppendWorkloadHandler(se.WorkloadInstanceHandler)
    88  	se.AppendWorkloadHandler(kc.WorkloadInstanceHandler)
    89  
    90  	go kc.Run(stop)
    91  	go se.Run(stop)
    92  
    93  	return configController, client.Kube(), xdsUpdater
    94  }
    95  
    96  // TestWorkloadInstances is effectively an integration test of composing the Kubernetes service registry with the
    97  // external service registry, which have cross-references by workload instances.
    98  func TestWorkloadInstances(t *testing.T) {
    99  	istiotest.SetForTest(t, &features.WorkloadEntryHealthChecks, true)
   100  	port := &networking.ServicePort{
   101  		Name:     "http",
   102  		Number:   80,
   103  		Protocol: "http",
   104  	}
   105  	labels := map[string]string{
   106  		"app": "foo",
   107  	}
   108  	namespace := "namespace"
   109  	serviceEntry := config.Config{
   110  		Meta: config.Meta{
   111  			Name:             "service-entry",
   112  			Namespace:        namespace,
   113  			GroupVersionKind: gvk.ServiceEntry,
   114  			Domain:           "cluster.local",
   115  		},
   116  		Spec: &networking.ServiceEntry{
   117  			Hosts: []string{"service.namespace.svc.cluster.local"},
   118  			Ports: []*networking.ServicePort{port},
   119  			WorkloadSelector: &networking.WorkloadSelector{
   120  				Labels: labels,
   121  			},
   122  			Resolution: networking.ServiceEntry_STATIC,
   123  		},
   124  	}
   125  	service := &v1.Service{
   126  		ObjectMeta: metav1.ObjectMeta{
   127  			Name:      "service",
   128  			Namespace: namespace,
   129  		},
   130  		Spec: v1.ServiceSpec{
   131  			Ports: []v1.ServicePort{{
   132  				Name: "http",
   133  				Port: 80,
   134  			}},
   135  			Selector:  labels,
   136  			ClusterIP: "9.9.9.9",
   137  		},
   138  	}
   139  	headlessServiceHTTP := &v1.Service{
   140  		ObjectMeta: metav1.ObjectMeta{
   141  			Name:      "service",
   142  			Namespace: namespace,
   143  		},
   144  		Spec: v1.ServiceSpec{
   145  			Ports: []v1.ServicePort{{
   146  				Name: "http",
   147  				Port: 80,
   148  			}},
   149  			Selector:  labels,
   150  			ClusterIP: v1.ClusterIPNone,
   151  		},
   152  	}
   153  	headlessServiceTCP := &v1.Service{
   154  		ObjectMeta: metav1.ObjectMeta{
   155  			Name:      "service",
   156  			Namespace: namespace,
   157  		},
   158  		Spec: v1.ServiceSpec{
   159  			Ports: []v1.ServicePort{{
   160  				Name: "tcp",
   161  				Port: 80,
   162  			}},
   163  			Selector:  labels,
   164  			ClusterIP: v1.ClusterIPNone,
   165  		},
   166  	}
   167  	pod := &v1.Pod{
   168  		ObjectMeta: metav1.ObjectMeta{
   169  			Name:        "pod",
   170  			Namespace:   namespace,
   171  			Labels:      labels,
   172  			Annotations: map[string]string{},
   173  		},
   174  		Status: v1.PodStatus{
   175  			PodIP: "1.2.3.4",
   176  			Phase: v1.PodPending,
   177  		},
   178  	}
   179  	workloadEntry := config.Config{
   180  		Meta: config.Meta{
   181  			Name:             "workload",
   182  			Namespace:        namespace,
   183  			GroupVersionKind: gvk.WorkloadEntry,
   184  			Domain:           "cluster.local",
   185  		},
   186  		Spec: &networking.WorkloadEntry{
   187  			Address: "2.3.4.5",
   188  			Labels:  labels,
   189  		},
   190  	}
   191  	expectedSvc := &model.Service{
   192  		Hostname: "service.namespace.svc.cluster.local",
   193  		Ports: []*model.Port{{
   194  			Name:     "http",
   195  			Port:     80,
   196  			Protocol: "http",
   197  		}, {
   198  			Name:     "http2",
   199  			Port:     90,
   200  			Protocol: "http",
   201  		}, {
   202  			Name:     "tcp",
   203  			Port:     70,
   204  			Protocol: "tcp",
   205  		}},
   206  		Attributes: model.ServiceAttributes{
   207  			Namespace:      namespace,
   208  			Name:           "service",
   209  			LabelSelectors: labels,
   210  		},
   211  	}
   212  
   213  	t.Run("Kubernetes only", func(t *testing.T) {
   214  		_, kube, fx := setupTest(t)
   215  		makeService(t, kube, service)
   216  		makePod(t, kube, pod)
   217  		createEndpoints(t, kube, service.Name, namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{pod.Status.PodIP})
   218  
   219  		instances := []EndpointResponse{{
   220  			Address: pod.Status.PodIP,
   221  			Port:    80,
   222  		}}
   223  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   224  	})
   225  
   226  	t.Run("Kubernetes pod labels update", func(t *testing.T) {
   227  		_, kube, xdsUpdater := setupTest(t)
   228  		makeService(t, kube, service)
   229  		xdsUpdater.WaitOrFail(t, "service")
   230  		makePod(t, kube, pod)
   231  		xdsUpdater.WaitOrFail(t, "proxy")
   232  		newPod := pod.DeepCopy()
   233  		newPod.Labels["newlabel"] = "new"
   234  		makePod(t, kube, newPod)
   235  		xdsUpdater.WaitOrFail(t, "proxy")
   236  	})
   237  
   238  	t.Run("Kubernetes only: headless pure HTTP service", func(t *testing.T) {
   239  		_, kube, fx := setupTest(t)
   240  		makeService(t, kube, headlessServiceHTTP)
   241  		fx.WaitOrFail(t, "service")
   242  		makePod(t, kube, pod)
   243  		createEndpoints(t, kube, service.Name, namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{pod.Status.PodIP})
   244  		fx.WaitOrFail(t, "eds")
   245  		// Endpoint update is triggered since its a brand new service
   246  		if ev := fx.WaitOrFail(t, "xds full"); !ev.Reason.Has(model.EndpointUpdate) {
   247  			t.Fatalf("xds push reason does not contain %v: %v", model.EndpointUpdate, ev)
   248  		}
   249  		// headless service update must trigger nds push, so we trigger a full push.
   250  		if ev := fx.WaitOrFail(t, "xds full"); !ev.Reason.Has(model.HeadlessEndpointUpdate) {
   251  			t.Fatalf("xds push reason does not contain %v: %v", model.HeadlessEndpointUpdate, ev)
   252  		}
   253  
   254  		// pure HTTP headless services should not need a full push since they do not
   255  		// require a Listener based on IP: https://github.com/istio/istio/issues/48207
   256  		instances := []EndpointResponse{{
   257  			Address: pod.Status.PodIP,
   258  			Port:    80,
   259  		}}
   260  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   261  	})
   262  
   263  	t.Run("Kubernetes only: headless non-HTTP service", func(t *testing.T) {
   264  		_, kube, fx := setupTest(t)
   265  		makeService(t, kube, headlessServiceTCP)
   266  		fx.WaitOrFail(t, "service")
   267  		makePod(t, kube, pod)
   268  		createEndpoints(t, kube, service.Name, namespace, []v1.EndpointPort{{Name: "tcp", Port: 70}}, []string{pod.Status.PodIP})
   269  		fx.WaitOrFail(t, "eds")
   270  		// Endpoint update is triggered since its a brand new service
   271  		if ev := fx.WaitOrFail(t, "xds full"); !ev.Reason.Has(model.EndpointUpdate) {
   272  			t.Fatalf("xds push reason does not contain %v: %v", model.EndpointUpdate, ev)
   273  		}
   274  		// headless service update must trigger nds push, so we trigger a full push.
   275  		if ev := fx.WaitOrFail(t, "xds full"); !ev.Reason.Has(model.HeadlessEndpointUpdate) {
   276  			t.Fatalf("xds push reason does not contain %v: %v", model.HeadlessEndpointUpdate, ev)
   277  		}
   278  		instances := []EndpointResponse{{
   279  			Address: pod.Status.PodIP,
   280  			Port:    70,
   281  		}}
   282  		expectServiceEndpoints(t, fx, expectedSvc, 70, instances)
   283  	})
   284  
   285  	t.Run("Kubernetes only: endpoint occur earlier", func(t *testing.T) {
   286  		_, kube, fx := setupTest(t)
   287  		makePod(t, kube, pod)
   288  
   289  		createEndpoints(t, kube, service.Name, namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{pod.Status.PodIP})
   290  		waitForEdsUpdate(t, fx, 1)
   291  
   292  		// make service populated later than endpoint
   293  		makeService(t, kube, service)
   294  		fx.WaitOrFail(t, "eds cache")
   295  
   296  		instances := []EndpointResponse{{
   297  			Address: pod.Status.PodIP,
   298  			Port:    80,
   299  		}}
   300  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   301  	})
   302  
   303  	t.Run("External only: workLoadEntry port and serviceEntry target port is not set, use serviceEntry port.number", func(t *testing.T) {
   304  		store, _, fx := setupTest(t)
   305  		makeIstioObject(t, store, serviceEntry)
   306  		makeIstioObject(t, store, workloadEntry)
   307  
   308  		instances := []EndpointResponse{{
   309  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   310  			Port:    80,
   311  		}}
   312  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   313  	})
   314  
   315  	t.Run("External only: the port name of the workloadEntry and serviceEntry does match, use workloadEntry port to override", func(t *testing.T) {
   316  		store, _, fx := setupTest(t)
   317  		makeIstioObject(t, store, serviceEntry)
   318  		makeIstioObject(t, store, config.Config{
   319  			Meta: config.Meta{
   320  				Name:             "workload",
   321  				Namespace:        namespace,
   322  				GroupVersionKind: gvk.WorkloadEntry,
   323  				Domain:           "cluster.local",
   324  			},
   325  			Spec: &networking.WorkloadEntry{
   326  				Address: "2.3.4.5",
   327  				Labels:  labels,
   328  				Ports: map[string]uint32{
   329  					serviceEntry.Spec.(*networking.ServiceEntry).Ports[0].Name: 8080,
   330  				},
   331  			},
   332  		})
   333  
   334  		instances := []EndpointResponse{{
   335  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   336  			Port:    8080,
   337  		}}
   338  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   339  	})
   340  
   341  	t.Run("External only: the port name of the workloadEntry and serviceEntry does match, "+
   342  		"serviceEntry's targetPort not equal workloadEntry's, use workloadEntry port to override", func(t *testing.T) {
   343  		store, _, fx := setupTest(t)
   344  		se := serviceEntry.Spec.(*networking.ServiceEntry).DeepCopy()
   345  		se.Ports[0].TargetPort = 8081 // respect wle port firstly, does not care about this value at all.
   346  
   347  		makeIstioObject(t, store, config.Config{
   348  			Meta: config.Meta{
   349  				Name:             "workload",
   350  				Namespace:        namespace,
   351  				GroupVersionKind: gvk.WorkloadEntry,
   352  				Domain:           "cluster.local",
   353  			},
   354  			Spec: &networking.WorkloadEntry{
   355  				Address: "2.3.4.5",
   356  				Labels:  labels,
   357  				Ports: map[string]uint32{
   358  					serviceEntry.Spec.(*networking.ServiceEntry).Ports[0].Name: 8080,
   359  				},
   360  			},
   361  		})
   362  
   363  		makeIstioObject(t, store, serviceEntry)
   364  
   365  		instances := []EndpointResponse{{
   366  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   367  			Port:    8080,
   368  		}}
   369  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   370  	})
   371  
   372  	t.Run("External only: workloadEntry port is not set, use target port", func(t *testing.T) {
   373  		store, _, fx := setupTest(t)
   374  		makeIstioObject(t, store, config.Config{
   375  			Meta: config.Meta{
   376  				Name:             "service-entry",
   377  				Namespace:        namespace,
   378  				GroupVersionKind: gvk.ServiceEntry,
   379  				Domain:           "cluster.local",
   380  			},
   381  			Spec: &networking.ServiceEntry{
   382  				Hosts: []string{"service.namespace.svc.cluster.local"},
   383  				Ports: []*networking.ServicePort{{
   384  					Name:       "http",
   385  					Number:     80,
   386  					Protocol:   "http",
   387  					TargetPort: 8080,
   388  				}},
   389  				WorkloadSelector: &networking.WorkloadSelector{
   390  					Labels: labels,
   391  				},
   392  			},
   393  		})
   394  		makeIstioObject(t, store, workloadEntry)
   395  
   396  		instances := []EndpointResponse{{
   397  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   398  			Port:    8080,
   399  		}}
   400  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   401  	})
   402  
   403  	t.Run("External only: the port name of the workloadEntry and serviceEntry does not match, use target port", func(t *testing.T) {
   404  		store, _, fx := setupTest(t)
   405  		makeIstioObject(t, store, config.Config{
   406  			Meta: config.Meta{
   407  				Name:             "service-entry",
   408  				Namespace:        namespace,
   409  				GroupVersionKind: gvk.ServiceEntry,
   410  				Domain:           "cluster.local",
   411  			},
   412  			Spec: &networking.ServiceEntry{
   413  				Hosts: []string{"service.namespace.svc.cluster.local"},
   414  				Ports: []*networking.ServicePort{{
   415  					Name:       "http",
   416  					Number:     80,
   417  					Protocol:   "http",
   418  					TargetPort: 8080,
   419  				}},
   420  				WorkloadSelector: &networking.WorkloadSelector{
   421  					Labels: labels,
   422  				},
   423  			},
   424  		})
   425  		makeIstioObject(t, store, config.Config{
   426  			Meta: config.Meta{
   427  				Name:             "workload",
   428  				Namespace:        namespace,
   429  				GroupVersionKind: gvk.WorkloadEntry,
   430  				Domain:           "cluster.local",
   431  			},
   432  			Spec: &networking.WorkloadEntry{
   433  				Address: "2.3.4.5",
   434  				Labels:  labels,
   435  				Ports: map[string]uint32{
   436  					"different-port-name": 8081,
   437  				},
   438  			},
   439  		})
   440  
   441  		instances := []EndpointResponse{{
   442  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   443  			Port:    8080,
   444  		}}
   445  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   446  	})
   447  
   448  	t.Run("External only: the port name of the workloadEntry and serviceEntry does not match, "+
   449  		"and the serivceEntry target port is not set, use serviceEntry port.number", func(t *testing.T) {
   450  		store, _, fx := setupTest(t)
   451  		makeIstioObject(t, store, serviceEntry)
   452  		makeIstioObject(t, store, config.Config{
   453  			Meta: config.Meta{
   454  				Name:             "workload",
   455  				Namespace:        namespace,
   456  				GroupVersionKind: gvk.WorkloadEntry,
   457  				Domain:           "cluster.local",
   458  			},
   459  			Spec: &networking.WorkloadEntry{
   460  				Address: "2.3.4.5",
   461  				Labels:  labels,
   462  				Ports: map[string]uint32{
   463  					"different-port-name": 8081,
   464  				},
   465  			},
   466  		})
   467  
   468  		instances := []EndpointResponse{{
   469  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   470  			Port:    80,
   471  		}}
   472  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   473  	})
   474  
   475  	t.Run("External only: workloadEntry port is changed", func(t *testing.T) {
   476  		store, _, fx := setupTest(t)
   477  		makeIstioObject(t, store, config.Config{
   478  			Meta: config.Meta{
   479  				Name:             "service-entry",
   480  				Namespace:        namespace,
   481  				GroupVersionKind: gvk.ServiceEntry,
   482  				Domain:           "cluster.local",
   483  			},
   484  			Spec: &networking.ServiceEntry{
   485  				Hosts: []string{"service.namespace.svc.cluster.local"},
   486  				Ports: []*networking.ServicePort{{
   487  					Name:     "http",
   488  					Number:   80,
   489  					Protocol: "http",
   490  				}},
   491  				WorkloadSelector: &networking.WorkloadSelector{
   492  					Labels: labels,
   493  				},
   494  			},
   495  		})
   496  		makeIstioObject(t, store, workloadEntry)
   497  		fx.WaitOrFail(t, "xds full")
   498  
   499  		instances := []EndpointResponse{{
   500  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   501  			Port:    80,
   502  		}}
   503  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   504  
   505  		fx.Clear()
   506  		// Update the port
   507  		newWorkloadEntry := workloadEntry.DeepCopy()
   508  		spec := workloadEntry.Spec.(*networking.WorkloadEntry).DeepCopy()
   509  		spec.Ports = map[string]uint32{
   510  			"http": 1234,
   511  		}
   512  		newWorkloadEntry.Spec = spec
   513  		makeIstioObject(t, store, newWorkloadEntry)
   514  		instances = []EndpointResponse{{
   515  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   516  			Port:    1234,
   517  		}}
   518  		fx.WaitOrFail(t, "xds")
   519  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   520  	})
   521  
   522  	t.Run("Service selects WorkloadEntry", func(t *testing.T) {
   523  		store, kube, fx := setupTest(t)
   524  		makeService(t, kube, service)
   525  		makeIstioObject(t, store, workloadEntry)
   526  
   527  		instances := []EndpointResponse{{
   528  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   529  			Port:    80,
   530  		}}
   531  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   532  	})
   533  
   534  	t.Run("Service selects WorkloadEntry: wle occur earlier", func(t *testing.T) {
   535  		store, kube, fx := setupTest(t)
   536  		makeIstioObject(t, store, workloadEntry)
   537  		// 	Other than proxy update, no event pushed when workload entry created as no service entry
   538  		fx.WaitOrFail(t, "proxy")
   539  		fx.AssertEmpty(t, 40*time.Millisecond)
   540  
   541  		makeService(t, kube, service)
   542  		fx.MatchOrFail(t, xdsfake.Event{Type: "eds cache", EndpointCount: 1})
   543  
   544  		instances := []EndpointResponse{{
   545  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   546  			Port:    80,
   547  		}}
   548  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   549  	})
   550  
   551  	t.Run("Service selects both pods and WorkloadEntry", func(t *testing.T) {
   552  		store, kube, fx := setupTest(t)
   553  		makeService(t, kube, service)
   554  		fx.WaitOrFail(t, "service")
   555  
   556  		makeIstioObject(t, store, workloadEntry)
   557  		fx.WaitOrFail(t, "eds")
   558  
   559  		makePod(t, kube, pod)
   560  		createEndpoints(t, kube, service.Name, namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{pod.Status.PodIP})
   561  		waitForEdsUpdate(t, fx, 2)
   562  
   563  		instances := []EndpointResponse{
   564  			{
   565  				Address: pod.Status.PodIP,
   566  				Port:    80,
   567  			},
   568  			{
   569  				Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   570  				Port:    80,
   571  			},
   572  		}
   573  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   574  	})
   575  
   576  	t.Run("Service selects both pods and WorkloadEntry: wle occur earlier", func(t *testing.T) {
   577  		store, kube, fx := setupTest(t)
   578  		makeIstioObject(t, store, workloadEntry)
   579  
   580  		// 	Other than proxy update, no event pushed when workload entry created as no service entry
   581  		fx.WaitOrFail(t, "proxy")
   582  		fx.AssertEmpty(t, 200*time.Millisecond)
   583  
   584  		makePod(t, kube, pod)
   585  		createEndpoints(t, kube, service.Name, namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{pod.Status.PodIP})
   586  		waitForEdsUpdate(t, fx, 1)
   587  
   588  		makeService(t, kube, service)
   589  		fx.WaitOrFail(t, "eds cache")
   590  
   591  		instances := []EndpointResponse{
   592  			{
   593  				Address: pod.Status.PodIP,
   594  				Port:    80,
   595  			},
   596  			{
   597  				Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   598  				Port:    80,
   599  			},
   600  		}
   601  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   602  	})
   603  
   604  	t.Run("Service selects WorkloadEntry with port name", func(t *testing.T) {
   605  		store, kube, fx := setupTest(t)
   606  		expectedSvc := &model.Service{
   607  			Hostname: "service.namespace.svc.cluster.local",
   608  			Ports: []*model.Port{{
   609  				Name:     "my-port",
   610  				Port:     80,
   611  				Protocol: "http",
   612  			}},
   613  			Attributes: model.ServiceAttributes{
   614  				Namespace:      namespace,
   615  				Name:           "service",
   616  				LabelSelectors: labels,
   617  			},
   618  		}
   619  		makeService(t, kube, &v1.Service{
   620  			ObjectMeta: metav1.ObjectMeta{
   621  				Name:      "service",
   622  				Namespace: namespace,
   623  			},
   624  			Spec: v1.ServiceSpec{
   625  				Ports: []v1.ServicePort{{
   626  					Name: "my-port",
   627  					Port: 80,
   628  				}},
   629  				Selector:  labels,
   630  				ClusterIP: "9.9.9.9",
   631  			},
   632  		})
   633  		makeIstioObject(t, store, config.Config{
   634  			Meta: config.Meta{
   635  				Name:             "workload",
   636  				Namespace:        namespace,
   637  				GroupVersionKind: gvk.WorkloadEntry,
   638  				Domain:           "cluster.local",
   639  			},
   640  			Spec: &networking.WorkloadEntry{
   641  				Address: "2.3.4.5",
   642  				Labels:  labels,
   643  				Ports: map[string]uint32{
   644  					"my-port": 8080,
   645  				},
   646  			},
   647  		})
   648  
   649  		instances := []EndpointResponse{{
   650  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   651  			Port:    8080,
   652  		}}
   653  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   654  	})
   655  
   656  	t.Run("Service selects WorkloadEntry with targetPort name", func(t *testing.T) {
   657  		store, kube, fx := setupTest(t)
   658  		makeService(t, kube, &v1.Service{
   659  			ObjectMeta: metav1.ObjectMeta{
   660  				Name:      "service",
   661  				Namespace: namespace,
   662  			},
   663  			Spec: v1.ServiceSpec{
   664  				Ports: []v1.ServicePort{{
   665  					Name:       "http",
   666  					Port:       80,
   667  					TargetPort: intstr.Parse("my-port"),
   668  				}},
   669  				Selector:  labels,
   670  				ClusterIP: "9.9.9.9",
   671  			},
   672  		})
   673  		makeIstioObject(t, store, config.Config{
   674  			Meta: config.Meta{
   675  				Name:             "workload",
   676  				Namespace:        namespace,
   677  				GroupVersionKind: gvk.WorkloadEntry,
   678  				Domain:           "cluster.local",
   679  			},
   680  			Spec: &networking.WorkloadEntry{
   681  				Address: "2.3.4.5",
   682  				Labels:  labels,
   683  				Ports: map[string]uint32{
   684  					"my-port": 8080,
   685  				},
   686  			},
   687  		})
   688  
   689  		instances := []EndpointResponse{{
   690  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   691  			Port:    8080,
   692  		}}
   693  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   694  	})
   695  
   696  	t.Run("Service selects WorkloadEntry with targetPort number", func(t *testing.T) {
   697  		s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{})
   698  		ei := s.Discovery.Env.EndpointIndex
   699  		makeService(t, s.KubeClient().Kube(), &v1.Service{
   700  			ObjectMeta: metav1.ObjectMeta{
   701  				Name:      "service",
   702  				Namespace: namespace,
   703  			},
   704  			Spec: v1.ServiceSpec{
   705  				Ports: []v1.ServicePort{
   706  					{
   707  						Name:       "http",
   708  						Port:       80,
   709  						TargetPort: intstr.FromInt32(8080),
   710  					},
   711  					{
   712  						Name:       "http2",
   713  						Port:       90,
   714  						TargetPort: intstr.FromInt32(9090),
   715  					},
   716  				},
   717  				Selector:  labels,
   718  				ClusterIP: "9.9.9.9",
   719  			},
   720  		})
   721  		makeIstioObject(t, s.Store(), config.Config{
   722  			Meta: config.Meta{
   723  				Name:             "workload",
   724  				Namespace:        namespace,
   725  				GroupVersionKind: gvk.WorkloadEntry,
   726  				Domain:           "cluster.local",
   727  			},
   728  			Spec: &networking.WorkloadEntry{
   729  				Address: "2.3.4.5",
   730  				Labels:  labels,
   731  			},
   732  		})
   733  
   734  		instances := []EndpointResponse{{
   735  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   736  			Port:    8080,
   737  		}}
   738  		expectServiceEndpointsFromIndex(t, ei, expectedSvc, 80, instances)
   739  		expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"2.3.4.5:8080"}, nil)
   740  		instances = []EndpointResponse{{
   741  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   742  			Port:    9090,
   743  		}}
   744  		expectServiceEndpointsFromIndex(t, ei, expectedSvc, 90, instances)
   745  		expectEndpoints(t, s, "outbound|90||service.namespace.svc.cluster.local", []string{"2.3.4.5:9090"}, nil)
   746  	})
   747  
   748  	t.Run("ServiceEntry selects Pod", func(t *testing.T) {
   749  		store, kube, fx := setupTest(t)
   750  		makeIstioObject(t, store, serviceEntry)
   751  		makePod(t, kube, pod)
   752  
   753  		instances := []EndpointResponse{{
   754  			Address: pod.Status.PodIP,
   755  			Port:    80,
   756  		}}
   757  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   758  	})
   759  
   760  	t.Run("ServiceEntry selects Pod that is in transit states", func(t *testing.T) {
   761  		store, kube, fx := setupTest(t)
   762  		makeIstioObject(t, store, serviceEntry)
   763  		makePod(t, kube, pod)
   764  
   765  		instances := []EndpointResponse{{
   766  			Address: pod.Status.PodIP,
   767  			Port:    80,
   768  		}}
   769  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   770  
   771  		// when pods become unready, we should see the instances being removed from the registry
   772  		setPodUnready(pod)
   773  		_, err := kube.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{})
   774  		if err != nil {
   775  			t.Fatal(err)
   776  		}
   777  		expectServiceEndpoints(t, fx, expectedSvc, 80, []EndpointResponse{})
   778  
   779  		setPodReady(pod)
   780  		_, err = kube.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{})
   781  		if err != nil {
   782  			t.Fatal(err)
   783  		}
   784  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   785  	})
   786  
   787  	t.Run("ServiceEntry selects Pod with targetPort number", func(t *testing.T) {
   788  		store, kube, fx := setupTest(t)
   789  		makeIstioObject(t, store, config.Config{
   790  			Meta: config.Meta{
   791  				Name:             "service-entry",
   792  				Namespace:        namespace,
   793  				GroupVersionKind: gvk.ServiceEntry,
   794  				Domain:           "cluster.local",
   795  			},
   796  			Spec: &networking.ServiceEntry{
   797  				Hosts: []string{"service.namespace.svc.cluster.local"},
   798  				Ports: []*networking.ServicePort{{
   799  					Name:       "http",
   800  					Number:     80,
   801  					Protocol:   "http",
   802  					TargetPort: 8080,
   803  				}},
   804  				WorkloadSelector: &networking.WorkloadSelector{
   805  					Labels: labels,
   806  				},
   807  			},
   808  		})
   809  		makePod(t, kube, pod)
   810  
   811  		instances := []EndpointResponse{{
   812  			Address: pod.Status.PodIP,
   813  			Port:    8080,
   814  		}}
   815  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   816  	})
   817  
   818  	t.Run("All directions", func(t *testing.T) {
   819  		store, kube, fx := setupTest(t)
   820  		makeService(t, kube, service)
   821  		makeIstioObject(t, store, serviceEntry)
   822  
   823  		makePod(t, kube, pod)
   824  		createEndpoints(t, kube, service.Name, namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{pod.Status.PodIP})
   825  		makeIstioObject(t, store, workloadEntry)
   826  
   827  		instances := []EndpointResponse{
   828  			{Address: pod.Status.PodIP, Port: 80},
   829  			{Address: pod.Status.PodIP, Port: 80},
   830  			{Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address, Port: 80},
   831  			{Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address, Port: 80},
   832  		}
   833  
   834  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   835  	})
   836  
   837  	t.Run("All directions with deletion", func(t *testing.T) {
   838  		store, kube, fx := setupTest(t)
   839  		makeService(t, kube, service)
   840  		makeIstioObject(t, store, serviceEntry)
   841  
   842  		makePod(t, kube, pod)
   843  		createEndpoints(t, kube, service.Name, namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{pod.Status.PodIP})
   844  		makeIstioObject(t, store, workloadEntry)
   845  
   846  		instances := []EndpointResponse{
   847  			{Address: pod.Status.PodIP, Port: 80},
   848  			{Address: pod.Status.PodIP, Port: 80},
   849  			{Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address, Port: 80},
   850  			{Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address, Port: 80},
   851  		}
   852  
   853  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   854  
   855  		_ = kube.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
   856  		_ = kube.DiscoveryV1().EndpointSlices(pod.Namespace).Delete(context.TODO(), "service", metav1.DeleteOptions{})
   857  		_ = store.Delete(gvk.WorkloadEntry, workloadEntry.Name, workloadEntry.Namespace, nil)
   858  		expectServiceEndpoints(t, fx, expectedSvc, 80, []EndpointResponse{})
   859  	})
   860  
   861  	t.Run("Service selects WorkloadEntry: update service", func(t *testing.T) {
   862  		s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{})
   863  		makeService(t, s.KubeClient().Kube(), service)
   864  		makeIstioObject(t, s.Store(), workloadEntry)
   865  		expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"2.3.4.5:80"}, nil)
   866  
   867  		newSvc := service.DeepCopy()
   868  		newSvc.Spec.Ports[0].Port = 8080
   869  		makeService(t, s.KubeClient().Kube(), newSvc)
   870  		expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", nil, nil)
   871  		expectEndpoints(t, s, "outbound|8080||service.namespace.svc.cluster.local", []string{"2.3.4.5:8080"}, nil)
   872  
   873  		newSvc.Spec.Ports[0].TargetPort = intstr.IntOrString{IntVal: 9090}
   874  		makeService(t, s.KubeClient().Kube(), newSvc)
   875  		expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", nil, nil)
   876  		expectEndpoints(t, s, "outbound|8080||service.namespace.svc.cluster.local", []string{"2.3.4.5:9090"}, nil)
   877  
   878  		if err := s.KubeClient().Kube().CoreV1().Services(newSvc.Namespace).Delete(context.Background(), newSvc.Name, metav1.DeleteOptions{}); err != nil {
   879  			t.Fatal(err)
   880  		}
   881  		expectEndpoints(t, s, "outbound|8080||service.namespace.svc.cluster.local", nil, nil)
   882  	})
   883  
   884  	t.Run("Service selects WorkloadEntry: update workloadEntry", func(t *testing.T) {
   885  		s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{})
   886  		makeService(t, s.KubeClient().Kube(), service)
   887  		makeIstioObject(t, s.Store(), workloadEntry)
   888  		expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"2.3.4.5:80"}, nil)
   889  
   890  		newWE := workloadEntry.DeepCopy()
   891  		newWE.Spec.(*networking.WorkloadEntry).Address = "3.4.5.6"
   892  		makeIstioObject(t, s.Store(), newWE)
   893  		expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"3.4.5.6:80"}, nil)
   894  
   895  		if err := s.Store().Delete(gvk.WorkloadEntry, newWE.Name, newWE.Namespace, nil); err != nil {
   896  			t.Fatal(err)
   897  		}
   898  		expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", nil, nil)
   899  	})
   900  
   901  	t.Run("Service selects WorkloadEntry: health status", func(t *testing.T) {
   902  		store, kube, fx := setupTest(t)
   903  		makeService(t, kube, service)
   904  
   905  		// Start as unhealthy, should have no instances
   906  		makeIstioObject(t, store, setHealth(workloadEntry, false))
   907  		instances := []EndpointResponse{}
   908  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   909  
   910  		// Mark healthy, get instances
   911  		makeIstioObject(t, store, setHealth(workloadEntry, true))
   912  		instances = []EndpointResponse{{
   913  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   914  			Port:    80,
   915  		}}
   916  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   917  
   918  		// Set back to unhealthy
   919  		makeIstioObject(t, store, setHealth(workloadEntry, false))
   920  		instances = []EndpointResponse{}
   921  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   922  
   923  		// Remove health status entirely
   924  		makeIstioObject(t, store, workloadEntry)
   925  		instances = []EndpointResponse{{
   926  			Address: workloadEntry.Spec.(*networking.WorkloadEntry).Address,
   927  			Port:    80,
   928  		}}
   929  		expectServiceEndpoints(t, fx, expectedSvc, 80, instances)
   930  	})
   931  
   932  	istiotest.SetForTest(t, &features.EnableSidecarHBONEListening, true)
   933  	istiotest.SetForTest(t, &features.EnableAmbient, true)
   934  	for _, ambient := range []bool{false, true} {
   935  		name := "disabled"
   936  		if ambient {
   937  			name = "enabled"
   938  		}
   939  		m := mesh.DefaultMeshConfig()
   940  		var nodeMeta *model.NodeMetadata
   941  		if ambient {
   942  			nodeMeta = &model.NodeMetadata{EnableHBONE: true}
   943  			pod = pod.DeepCopy()
   944  			pod.Annotations[constants.AmbientRedirection] = constants.AmbientRedirectionEnabled
   945  		}
   946  		opts := xds.FakeOptions{MeshConfig: m}
   947  		t.Run("ambient "+name, func(t *testing.T) {
   948  			t.Run("ServiceEntry selects Pod: update service entry", func(t *testing.T) {
   949  				s := xds.NewFakeDiscoveryServer(t, opts)
   950  				makeIstioObject(t, s.Store(), serviceEntry)
   951  				makePod(t, s.KubeClient().Kube(), pod)
   952  				expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", expectAmbient([]string{"1.2.3.4:80"}, ambient), nodeMeta)
   953  
   954  				newSE := serviceEntry.DeepCopy()
   955  				newSE.Spec.(*networking.ServiceEntry).Ports = []*networking.ServicePort{{
   956  					Name:       "http",
   957  					Number:     80,
   958  					Protocol:   "http",
   959  					TargetPort: 8080,
   960  				}}
   961  				makeIstioObject(t, s.Store(), newSE)
   962  				expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", expectAmbient([]string{"1.2.3.4:8080"}, ambient), nodeMeta)
   963  
   964  				newSE = newSE.DeepCopy()
   965  				newSE.Spec.(*networking.ServiceEntry).Ports = []*networking.ServicePort{{
   966  					Name:       "http",
   967  					Number:     9090,
   968  					Protocol:   "http",
   969  					TargetPort: 9091,
   970  				}}
   971  				makeIstioObject(t, s.Store(), newSE)
   972  				expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", nil, nodeMeta)
   973  				expectEndpoints(t, s, "outbound|9090||service.namespace.svc.cluster.local", expectAmbient([]string{"1.2.3.4:9091"}, ambient), nodeMeta)
   974  
   975  				if err := s.Store().Delete(gvk.ServiceEntry, newSE.Name, newSE.Namespace, nil); err != nil {
   976  					t.Fatal(err)
   977  				}
   978  				expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", nil, nodeMeta)
   979  				expectEndpoints(t, s, "outbound|9090||service.namespace.svc.cluster.local", nil, nodeMeta)
   980  			})
   981  
   982  			t.Run("ServiceEntry selects Pod: update pod", func(t *testing.T) {
   983  				s := xds.NewFakeDiscoveryServer(t, opts)
   984  				makeIstioObject(t, s.Store(), serviceEntry)
   985  				makePod(t, s.KubeClient().Kube(), pod)
   986  				expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", expectAmbient([]string{"1.2.3.4:80"}, ambient), nodeMeta)
   987  
   988  				newPod := pod.DeepCopy()
   989  				newPod.Status.PodIP = "2.3.4.5"
   990  				makePod(t, s.KubeClient().Kube(), newPod)
   991  				expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", expectAmbient([]string{"2.3.4.5:80"}, ambient), nodeMeta)
   992  
   993  				if err := s.KubeClient().Kube().CoreV1().Pods(newPod.Namespace).Delete(context.Background(), newPod.Name, metav1.DeleteOptions{}); err != nil {
   994  					t.Fatal(err)
   995  				}
   996  				expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", nil, nodeMeta)
   997  			})
   998  
   999  			t.Run("ServiceEntry selects Pod: deleting pod", func(t *testing.T) {
  1000  				s := xds.NewFakeDiscoveryServer(t, opts)
  1001  				makeIstioObject(t, s.Store(), serviceEntry)
  1002  				makePod(t, s.KubeClient().Kube(), pod)
  1003  				expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", expectAmbient([]string{"1.2.3.4:80"}, ambient), nodeMeta)
  1004  
  1005  				// Simulate pod being deleted by setting deletion timestamp
  1006  				newPod := pod.DeepCopy()
  1007  				newPod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
  1008  				makePod(t, s.KubeClient().Kube(), newPod)
  1009  				expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", nil, nodeMeta)
  1010  
  1011  				if err := s.KubeClient().Kube().CoreV1().Pods(newPod.Namespace).Delete(context.Background(), newPod.Name, metav1.DeleteOptions{}); err != nil {
  1012  					t.Fatal(err)
  1013  				}
  1014  				expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", nil, nodeMeta)
  1015  			})
  1016  		})
  1017  	}
  1018  }
  1019  
  1020  func expectAmbient(strings []string, ambient bool) []string {
  1021  	if !ambient {
  1022  		return strings
  1023  	}
  1024  	out := make([]string, 0, len(strings))
  1025  	for _, s := range strings {
  1026  		out = append(out, "connect_originate;"+s)
  1027  	}
  1028  	return out
  1029  }
  1030  
  1031  func setHealth(cfg config.Config, healthy bool) config.Config {
  1032  	cfg = cfg.DeepCopy()
  1033  	if cfg.Annotations == nil {
  1034  		cfg.Annotations = map[string]string{}
  1035  	}
  1036  	cfg.Annotations[status.WorkloadEntryHealthCheckAnnotation] = "true"
  1037  	if healthy {
  1038  		return status.UpdateConfigCondition(cfg, &v1alpha1.IstioCondition{
  1039  			Type:   status.ConditionHealthy,
  1040  			Status: status.StatusTrue,
  1041  		})
  1042  	}
  1043  	return status.UpdateConfigCondition(cfg, &v1alpha1.IstioCondition{
  1044  		Type:   status.ConditionHealthy,
  1045  		Status: status.StatusFalse,
  1046  	})
  1047  }
  1048  
  1049  func waitForEdsUpdate(t *testing.T, xdsUpdater *xdsfake.Updater, expected int) {
  1050  	t.Helper()
  1051  	xdsUpdater.MatchOrFail(t, xdsfake.Event{Type: "eds", EndpointCount: expected})
  1052  }
  1053  
  1054  func TestEndpointsDeduping(t *testing.T) {
  1055  	s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{})
  1056  	namespace := "namespace"
  1057  	labels := map[string]string{
  1058  		"app": "bar",
  1059  	}
  1060  	ei := s.Env().EndpointIndex
  1061  	makeService(t, s.KubeClient().Kube(), &v1.Service{
  1062  		ObjectMeta: metav1.ObjectMeta{
  1063  			Name:      "service",
  1064  			Namespace: namespace,
  1065  		},
  1066  		Spec: v1.ServiceSpec{
  1067  			Ports: []v1.ServicePort{{
  1068  				Name: "http",
  1069  				Port: 80,
  1070  			}, {
  1071  				Name: "http-other",
  1072  				Port: 90,
  1073  			}},
  1074  			Selector:  labels,
  1075  			ClusterIP: "9.9.9.9",
  1076  		},
  1077  	})
  1078  	// Create an expect endpoint
  1079  	createEndpointSlice(t, s.KubeClient().Kube(), "slice1", "service", namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{"1.2.3.4"})
  1080  	expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"1.2.3.4:80"}, nil)
  1081  
  1082  	// create an FQDN endpoint that should be ignored
  1083  	createEndpointSliceWithType(t, s.KubeClient().Kube(), "slice1", "service",
  1084  		namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{"foo.com"}, discovery.AddressTypeFQDN)
  1085  	expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"1.2.3.4:80"}, nil)
  1086  
  1087  	// Add another port endpoint
  1088  	createEndpointSlice(t, s.KubeClient().Kube(), "slice1", "service", namespace,
  1089  		[]v1.EndpointPort{{Name: "http-other", Port: 90}, {Name: "http", Port: 80}}, []string{"1.2.3.4", "2.3.4.5"})
  1090  	expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"1.2.3.4:80", "2.3.4.5:80"}, nil)
  1091  	expectEndpoints(t, s, "outbound|90||service.namespace.svc.cluster.local", []string{"1.2.3.4:90", "2.3.4.5:90"}, nil)
  1092  
  1093  	// Move the endpoint to another slice - transition phase where its duplicated
  1094  	createEndpointSlice(t, s.KubeClient().Kube(), "slice1", "service", namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{"1.2.3.5", "2.3.4.5"})
  1095  	createEndpointSlice(t, s.KubeClient().Kube(), "slice2", "service", namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{"2.3.4.5"})
  1096  	expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"1.2.3.5:80", "2.3.4.5:80"}, nil)
  1097  
  1098  	// Move the endpoint to another slice - completed
  1099  	createEndpointSlice(t, s.KubeClient().Kube(), "slice1", "service", namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{"1.2.3.4"})
  1100  	createEndpointSlice(t, s.KubeClient().Kube(), "slice2", "service", namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{"2.3.4.5"})
  1101  	expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"1.2.3.4:80", "2.3.4.5:80"}, nil)
  1102  
  1103  	// Delete endpoint
  1104  	createEndpointSlice(t, s.KubeClient().Kube(), "slice1", "service", namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{"1.2.3.4"})
  1105  	createEndpointSlice(t, s.KubeClient().Kube(), "slice2", "service", namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{})
  1106  	expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"1.2.3.4:80"}, nil)
  1107  
  1108  	_ = s.KubeClient().Kube().DiscoveryV1().EndpointSlices(namespace).Delete(context.TODO(), "slice1", metav1.DeleteOptions{})
  1109  	expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", nil, nil)
  1110  
  1111  	// Ensure there is nothing is left over
  1112  	expectServiceEndpointsFromIndex(t, ei, &model.Service{
  1113  		Hostname: "service.namespace.svc.cluster.local",
  1114  		Ports: []*model.Port{{
  1115  			Name:     "http",
  1116  			Port:     80,
  1117  			Protocol: "http",
  1118  		}},
  1119  		Attributes: model.ServiceAttributes{
  1120  			Namespace:      namespace,
  1121  			Name:           "service",
  1122  			LabelSelectors: labels,
  1123  		},
  1124  	}, 80, []EndpointResponse{})
  1125  }
  1126  
  1127  // TestEndpointSlicingServiceUpdate is a regression test to ensure we do not end up with duplicate endpoints when a service changes.
  1128  func TestEndpointSlicingServiceUpdate(t *testing.T) {
  1129  	for _, version := range []string{"latest", "20"} {
  1130  		t.Run("kubernetes 1."+version, func(t *testing.T) {
  1131  			s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{
  1132  				KubernetesVersion:    version,
  1133  				EnableFakeXDSUpdater: true,
  1134  			})
  1135  			namespace := "namespace"
  1136  			labels := map[string]string{
  1137  				"app": "bar",
  1138  			}
  1139  			makeService(t, s.KubeClient().Kube(), &v1.Service{
  1140  				ObjectMeta: metav1.ObjectMeta{
  1141  					Name:      "service",
  1142  					Namespace: namespace,
  1143  				},
  1144  				Spec: v1.ServiceSpec{
  1145  					Ports: []v1.ServicePort{{
  1146  						Name: "http",
  1147  						Port: 80,
  1148  					}, {
  1149  						Name: "http-other",
  1150  						Port: 90,
  1151  					}},
  1152  					Selector:  labels,
  1153  					ClusterIP: "9.9.9.9",
  1154  				},
  1155  			})
  1156  			fx := s.XdsUpdater.(*xdsfake.Updater)
  1157  			createEndpointSlice(t, s.KubeClient().Kube(), "slice1", "service", namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{"1.2.3.4"})
  1158  			createEndpointSlice(t, s.KubeClient().Kube(), "slice2", "service", namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{"1.2.3.4"})
  1159  			expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"1.2.3.4:80"}, nil)
  1160  			fx.WaitOrFail(t, "service")
  1161  
  1162  			// Trigger a service updates
  1163  			makeService(t, s.KubeClient().Kube(), &v1.Service{
  1164  				ObjectMeta: metav1.ObjectMeta{
  1165  					Name:      "service",
  1166  					Namespace: namespace,
  1167  					Labels:    map[string]string{"foo": "bar"},
  1168  				},
  1169  				Spec: v1.ServiceSpec{
  1170  					Ports: []v1.ServicePort{{
  1171  						Name: "http",
  1172  						Port: 80,
  1173  					}, {
  1174  						Name: "http-other",
  1175  						Port: 90,
  1176  					}},
  1177  					Selector:  labels,
  1178  					ClusterIP: "9.9.9.9",
  1179  				},
  1180  			})
  1181  			fx.WaitOrFail(t, "service")
  1182  			expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"1.2.3.4:80"}, nil)
  1183  		})
  1184  	}
  1185  }
  1186  
  1187  func TestSameIPEndpointSlicing(t *testing.T) {
  1188  	s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{
  1189  		EnableFakeXDSUpdater: true,
  1190  	})
  1191  	namespace := "namespace"
  1192  	labels := map[string]string{
  1193  		"app": "bar",
  1194  	}
  1195  	makeService(t, s.KubeClient().Kube(), &v1.Service{
  1196  		ObjectMeta: metav1.ObjectMeta{
  1197  			Name:      "service",
  1198  			Namespace: namespace,
  1199  		},
  1200  		Spec: v1.ServiceSpec{
  1201  			Ports: []v1.ServicePort{{
  1202  				Name: "http",
  1203  				Port: 80,
  1204  			}, {
  1205  				Name: "http-other",
  1206  				Port: 90,
  1207  			}},
  1208  			Selector:  labels,
  1209  			ClusterIP: "9.9.9.9",
  1210  		},
  1211  	})
  1212  	fx := s.XdsUpdater.(*xdsfake.Updater)
  1213  
  1214  	// Delete endpoints with same IP
  1215  	createEndpointSlice(t, s.KubeClient().Kube(), "slice1", "service", namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{"1.2.3.4"})
  1216  	createEndpointSlice(t, s.KubeClient().Kube(), "slice2", "service", namespace, []v1.EndpointPort{{Name: "http", Port: 80}}, []string{"1.2.3.4"})
  1217  	expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"1.2.3.4:80"}, nil)
  1218  
  1219  	// delete slice 1, it should still exist
  1220  	_ = s.KubeClient().Kube().DiscoveryV1().EndpointSlices(namespace).Delete(context.TODO(), "slice1", metav1.DeleteOptions{})
  1221  	fx.WaitOrFail(t, "eds")
  1222  	expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", []string{"1.2.3.4:80"}, nil)
  1223  	_ = s.KubeClient().Kube().DiscoveryV1().EndpointSlices(namespace).Delete(context.TODO(), "slice2", metav1.DeleteOptions{})
  1224  	fx.WaitOrFail(t, "eds")
  1225  	expectEndpoints(t, s, "outbound|80||service.namespace.svc.cluster.local", nil, nil)
  1226  }
  1227  
  1228  type EndpointResponse struct {
  1229  	Address string
  1230  	Port    uint32
  1231  }
  1232  
  1233  func expectEndpoints(t *testing.T, s *xds.FakeDiscoveryServer, cluster string, expected []string, metadata *model.NodeMetadata) {
  1234  	t.Helper()
  1235  	retry.UntilSuccessOrFail(t, func() error {
  1236  		got := xdstest.ExtractLoadAssignments(s.Endpoints(s.SetupProxy(&model.Proxy{Metadata: metadata})))
  1237  		sort.Strings(got[cluster])
  1238  		sort.Strings(expected)
  1239  		if !reflect.DeepEqual(got[cluster], expected) {
  1240  			return fmt.Errorf("wanted %v got %v. All endpoints: %+v", expected, got[cluster], got)
  1241  		}
  1242  		return nil
  1243  	}, retry.Converge(2), retry.Timeout(time.Second*2), retry.Delay(time.Millisecond*10))
  1244  }
  1245  
  1246  func expectServiceEndpointsFromIndex(t *testing.T, ei *model.EndpointIndex, svc *model.Service, port int, expected []EndpointResponse) {
  1247  	t.Helper()
  1248  	// The system is eventually consistent, so add some retries
  1249  	retry.UntilSuccessOrFail(t, func() error {
  1250  		endpoints := GetEndpointsForPort(svc, ei, port)
  1251  		if endpoints == nil {
  1252  			endpoints = []*model.IstioEndpoint{} // To simplify tests a bit
  1253  		}
  1254  		got := slices.Map(endpoints, func(e *model.IstioEndpoint) EndpointResponse {
  1255  			return EndpointResponse{
  1256  				Address: e.Address,
  1257  				Port:    e.EndpointPort,
  1258  			}
  1259  		})
  1260  		slices.SortBy(got, func(a EndpointResponse) string {
  1261  			return a.Address
  1262  		})
  1263  		return assert.Compare(got, expected)
  1264  	}, retry.Converge(2), retry.Timeout(time.Second*2), retry.Delay(time.Millisecond*10))
  1265  }
  1266  
  1267  // nolint: unparam
  1268  func expectServiceEndpoints(t *testing.T, fx *xdsfake.Updater, svc *model.Service, port int, expected []EndpointResponse) {
  1269  	t.Helper()
  1270  	expectServiceEndpointsFromIndex(t, fx.Delegate.(*model.EndpointIndexUpdater).Index, svc, port, expected)
  1271  }
  1272  
  1273  func setPodReady(pod *v1.Pod) {
  1274  	pod.Status.Conditions = []v1.PodCondition{
  1275  		{
  1276  			Type:               v1.PodReady,
  1277  			Status:             v1.ConditionTrue,
  1278  			LastTransitionTime: metav1.Now(),
  1279  		},
  1280  	}
  1281  }
  1282  
  1283  func setPodUnready(pod *v1.Pod) {
  1284  	pod.Status.Conditions = []v1.PodCondition{
  1285  		{
  1286  			Type:               v1.PodReady,
  1287  			Status:             v1.ConditionFalse,
  1288  			LastTransitionTime: metav1.Now(),
  1289  		},
  1290  	}
  1291  }
  1292  
  1293  func makePod(t *testing.T, c kubernetes.Interface, pod *v1.Pod) {
  1294  	t.Helper()
  1295  	newPod, err := c.CoreV1().Pods(pod.Namespace).Create(context.Background(), pod, metav1.CreateOptions{})
  1296  	if kerrors.IsAlreadyExists(err) {
  1297  		newPod, err = c.CoreV1().Pods(pod.Namespace).Update(context.Background(), pod, metav1.UpdateOptions{})
  1298  	}
  1299  	if err != nil {
  1300  		t.Fatal(err)
  1301  	}
  1302  	// Apiserver doesn't allow Create/Update to modify the pod status. Creating doesn't result in
  1303  	// events - since PodIP will be "".
  1304  	newPod.Status.PodIP = pod.Status.PodIP
  1305  	newPod.Status.PodIPs = []v1.PodIP{
  1306  		{
  1307  			IP: pod.Status.PodIP,
  1308  		},
  1309  	}
  1310  	newPod.Status.Phase = v1.PodRunning
  1311  
  1312  	// Also need to sets the pod to be ready as now we only add pod into service entry endpoint when it's ready
  1313  	setPodReady(newPod)
  1314  	_, err = c.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), newPod, metav1.UpdateOptions{})
  1315  	if err != nil {
  1316  		t.Fatal(err)
  1317  	}
  1318  }
  1319  
  1320  func makeService(t *testing.T, c kubernetes.Interface, svc *v1.Service) {
  1321  	t.Helper()
  1322  	// avoid mutating input
  1323  	svc = svc.DeepCopy()
  1324  	// simulate actual k8s behavior
  1325  	for i, port := range svc.Spec.Ports {
  1326  		if port.TargetPort.IntVal == 0 && port.TargetPort.StrVal == "" {
  1327  			svc.Spec.Ports[i].TargetPort.IntVal = port.Port
  1328  		}
  1329  	}
  1330  
  1331  	_, err := c.CoreV1().Services(svc.Namespace).Create(context.Background(), svc, metav1.CreateOptions{})
  1332  	if kerrors.IsAlreadyExists(err) {
  1333  		_, err = c.CoreV1().Services(svc.Namespace).Update(context.Background(), svc, metav1.UpdateOptions{})
  1334  	}
  1335  	if err != nil {
  1336  		t.Fatal(err)
  1337  	}
  1338  }
  1339  
  1340  func makeIstioObject(t *testing.T, c model.ConfigStore, svc config.Config) {
  1341  	t.Helper()
  1342  	_, err := c.Create(svc)
  1343  	if err != nil && err.Error() == "item already exists" {
  1344  		_, err = c.Update(svc)
  1345  	}
  1346  	if err != nil {
  1347  		t.Fatal(err)
  1348  	}
  1349  }
  1350  
  1351  func createEndpoints(t *testing.T, c kubernetes.Interface, name, namespace string, ports []v1.EndpointPort, ips []string) {
  1352  	createEndpointSlice(t, c, name, name, namespace, ports, ips)
  1353  }
  1354  
  1355  // nolint: unparam
  1356  func createEndpointSlice(t *testing.T, c kubernetes.Interface, name, serviceName, namespace string, ports []v1.EndpointPort, addrs []string) {
  1357  	createEndpointSliceWithType(t, c, name, serviceName, namespace, ports, addrs, discovery.AddressTypeIPv4)
  1358  }
  1359  
  1360  // nolint: unparam
  1361  func createEndpointSliceWithType(t *testing.T, c kubernetes.Interface, name, serviceName, namespace string,
  1362  	ports []v1.EndpointPort, ips []string, addrType discovery.AddressType,
  1363  ) {
  1364  	esps := make([]discovery.EndpointPort, 0)
  1365  	for _, name := range ports {
  1366  		n := name // Create a stable reference to take the pointer from
  1367  		esps = append(esps, discovery.EndpointPort{
  1368  			Name:        &n.Name,
  1369  			Protocol:    &n.Protocol,
  1370  			Port:        &n.Port,
  1371  			AppProtocol: n.AppProtocol,
  1372  		})
  1373  	}
  1374  
  1375  	sliceEndpoint := []discovery.Endpoint{}
  1376  	for _, ip := range ips {
  1377  		sliceEndpoint = append(sliceEndpoint, discovery.Endpoint{
  1378  			Addresses: []string{ip},
  1379  		})
  1380  	}
  1381  
  1382  	endpointSlice := &discovery.EndpointSlice{
  1383  		ObjectMeta: metav1.ObjectMeta{
  1384  			Name:      name,
  1385  			Namespace: namespace,
  1386  			Labels: map[string]string{
  1387  				discovery.LabelServiceName: serviceName,
  1388  			},
  1389  		},
  1390  		AddressType: addrType,
  1391  		Endpoints:   sliceEndpoint,
  1392  		Ports:       esps,
  1393  	}
  1394  	if _, err := c.DiscoveryV1().EndpointSlices(namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{}); err != nil {
  1395  		if kerrors.IsAlreadyExists(err) {
  1396  			_, err = c.DiscoveryV1().EndpointSlices(namespace).Update(context.TODO(), endpointSlice, metav1.UpdateOptions{})
  1397  		}
  1398  		if err != nil {
  1399  			t.Fatalf("failed to create endpoint slice %s in namespace %s (error %v)", name, namespace, err)
  1400  		}
  1401  	}
  1402  }
  1403  
  1404  func TestLocality(t *testing.T) {
  1405  	namespace := "default"
  1406  	basePod := &v1.Pod{
  1407  		ObjectMeta: metav1.ObjectMeta{
  1408  			Name:      "pod",
  1409  			Namespace: namespace,
  1410  			Labels:    map[string]string{},
  1411  		},
  1412  		Spec: v1.PodSpec{NodeName: "node"},
  1413  		Status: v1.PodStatus{
  1414  			PodIP: "1.2.3.4",
  1415  			Phase: v1.PodRunning,
  1416  		},
  1417  	}
  1418  	setPodReady(basePod)
  1419  	baseNode := &v1.Node{
  1420  		ObjectMeta: metav1.ObjectMeta{
  1421  			Name:   "node",
  1422  			Labels: map[string]string{},
  1423  		},
  1424  	}
  1425  	cases := []struct {
  1426  		name     string
  1427  		pod      *v1.Pod
  1428  		node     *v1.Node
  1429  		obj      config.Config
  1430  		expected *core.Locality
  1431  	}{
  1432  		{
  1433  			name:     "no locality",
  1434  			pod:      basePod,
  1435  			node:     baseNode,
  1436  			expected: &core.Locality{},
  1437  		},
  1438  		{
  1439  			name: "pod specific label",
  1440  			pod: func() *v1.Pod {
  1441  				p := basePod.DeepCopy()
  1442  				p.Labels[model.LocalityLabel] = "r.z.s"
  1443  				return p
  1444  			}(),
  1445  			node: baseNode,
  1446  			expected: &core.Locality{
  1447  				Region:  "r",
  1448  				Zone:    "z",
  1449  				SubZone: "s",
  1450  			},
  1451  		},
  1452  		{
  1453  			name: "node specific label",
  1454  			pod:  basePod,
  1455  			node: func() *v1.Node {
  1456  				p := baseNode.DeepCopy()
  1457  				p.Labels[kubecontroller.NodeRegionLabelGA] = "r"
  1458  				p.Labels[kubecontroller.NodeZoneLabelGA] = "z"
  1459  				p.Labels[label.TopologySubzone.Name] = "s"
  1460  				return p
  1461  			}(),
  1462  			expected: &core.Locality{
  1463  				Region:  "r",
  1464  				Zone:    "z",
  1465  				SubZone: "s",
  1466  			},
  1467  		},
  1468  		{
  1469  			name: "pod and node labels",
  1470  			pod: func() *v1.Pod {
  1471  				p := basePod.DeepCopy()
  1472  				p.Labels[model.LocalityLabel] = "r.z.s"
  1473  				return p
  1474  			}(),
  1475  			node: func() *v1.Node {
  1476  				p := baseNode.DeepCopy()
  1477  				p.Labels[kubecontroller.NodeRegionLabelGA] = "nr"
  1478  				p.Labels[kubecontroller.NodeZoneLabelGA] = "nz"
  1479  				p.Labels[label.TopologySubzone.Name] = "ns"
  1480  				return p
  1481  			}(),
  1482  			expected: &core.Locality{
  1483  				Region:  "r",
  1484  				Zone:    "z",
  1485  				SubZone: "s",
  1486  			},
  1487  		},
  1488  		{
  1489  			name: "ServiceEntry with explicit locality",
  1490  			obj: config.Config{
  1491  				Meta: config.Meta{
  1492  					Name:             "service-entry",
  1493  					Namespace:        namespace,
  1494  					GroupVersionKind: gvk.ServiceEntry,
  1495  				},
  1496  				Spec: &networking.ServiceEntry{
  1497  					Hosts: []string{"service.namespace.svc.cluster.local"},
  1498  					Ports: []*networking.ServicePort{{Name: "http", Number: 80, Protocol: "http"}},
  1499  					Endpoints: []*networking.WorkloadEntry{{
  1500  						Address:  "1.2.3.4",
  1501  						Locality: "r/z/s",
  1502  					}},
  1503  					Resolution: networking.ServiceEntry_STATIC,
  1504  				},
  1505  			},
  1506  			expected: &core.Locality{
  1507  				Region:  "r",
  1508  				Zone:    "z",
  1509  				SubZone: "s",
  1510  			},
  1511  		},
  1512  		{
  1513  			name: "ServiceEntry with label locality",
  1514  			obj: config.Config{
  1515  				Meta: config.Meta{
  1516  					Name:             "service-entry",
  1517  					Namespace:        namespace,
  1518  					GroupVersionKind: gvk.ServiceEntry,
  1519  				},
  1520  				Spec: &networking.ServiceEntry{
  1521  					Hosts: []string{"service.namespace.svc.cluster.local"},
  1522  					Ports: []*networking.ServicePort{{Name: "http", Number: 80, Protocol: "http"}},
  1523  					Endpoints: []*networking.WorkloadEntry{{
  1524  						Address: "1.2.3.4",
  1525  						Labels: map[string]string{
  1526  							model.LocalityLabel: "r.z.s",
  1527  						},
  1528  					}},
  1529  					Resolution: networking.ServiceEntry_STATIC,
  1530  				},
  1531  			},
  1532  			expected: &core.Locality{
  1533  				Region:  "r",
  1534  				Zone:    "z",
  1535  				SubZone: "s",
  1536  			},
  1537  		},
  1538  		{
  1539  			name: "ServiceEntry with both locality",
  1540  			obj: config.Config{
  1541  				Meta: config.Meta{
  1542  					Name:             "service-entry",
  1543  					Namespace:        namespace,
  1544  					GroupVersionKind: gvk.ServiceEntry,
  1545  				},
  1546  				Spec: &networking.ServiceEntry{
  1547  					Hosts: []string{"service.namespace.svc.cluster.local"},
  1548  					Ports: []*networking.ServicePort{{Name: "http", Number: 80, Protocol: "http"}},
  1549  					Endpoints: []*networking.WorkloadEntry{{
  1550  						Address:  "1.2.3.4",
  1551  						Locality: "r/z/s",
  1552  						Labels: map[string]string{
  1553  							model.LocalityLabel: "lr.lz.ls",
  1554  						},
  1555  					}},
  1556  					Resolution: networking.ServiceEntry_STATIC,
  1557  				},
  1558  			},
  1559  			expected: &core.Locality{
  1560  				Region:  "r",
  1561  				Zone:    "z",
  1562  				SubZone: "s",
  1563  			},
  1564  		},
  1565  	}
  1566  	for _, tt := range cases {
  1567  		t.Run(tt.name, func(t *testing.T) {
  1568  			opts := xds.FakeOptions{}
  1569  			if tt.pod != nil {
  1570  				opts.KubernetesObjects = append(opts.KubernetesObjects, tt.pod)
  1571  			}
  1572  			if tt.node != nil {
  1573  				opts.KubernetesObjects = append(opts.KubernetesObjects, tt.node)
  1574  			}
  1575  			if tt.obj.Name != "" {
  1576  				opts.Configs = append(opts.Configs, tt.obj)
  1577  			}
  1578  			s := xds.NewFakeDiscoveryServer(t, opts)
  1579  			s.Connect(s.SetupProxy(&model.Proxy{IPAddresses: []string{"1.2.3.4"}}), nil, []string{v3.ClusterType})
  1580  			retry.UntilSuccessOrFail(t, func() error {
  1581  				clients := s.Discovery.AllClients()
  1582  				if len(clients) != 1 {
  1583  					return fmt.Errorf("got %d clients", len(clients))
  1584  				}
  1585  				locality := clients[0].Proxy().Locality
  1586  				return assert.Compare(tt.expected, locality)
  1587  			}, retry.Timeout(time.Second*2))
  1588  		})
  1589  	}
  1590  }
  1591  
  1592  func GetEndpointsForPort(s *model.Service, endpoints *model.EndpointIndex, port int) []*model.IstioEndpoint {
  1593  	shards, ok := endpoints.ShardsForService(string(s.Hostname), s.Attributes.Namespace)
  1594  	if !ok {
  1595  		return nil
  1596  	}
  1597  	var pn string
  1598  	for _, p := range s.Ports {
  1599  		if p.Port == port {
  1600  			pn = p.Name
  1601  			break
  1602  		}
  1603  	}
  1604  	if pn == "" && port != 0 {
  1605  		return nil
  1606  	}
  1607  	shards.RLock()
  1608  	defer shards.RUnlock()
  1609  	return slices.FilterInPlace(slices.Flatten(maps.Values(shards.Shards)), func(endpoint *model.IstioEndpoint) bool {
  1610  		return pn == "" || endpoint.ServicePortName == pn
  1611  	})
  1612  }