istio.io/istio@v0.0.0-20240520182934-d79c90f27776/pilot/pkg/xds/eds_test.go (about)

     1  // Copyright Istio Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //	http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  package xds_test
    15  
    16  import (
    17  	"errors"
    18  	"fmt"
    19  	"io"
    20  	"net/http"
    21  	"net/http/httptest"
    22  	"os"
    23  	"path/filepath"
    24  	"reflect"
    25  	"runtime"
    26  	"sort"
    27  	"strings"
    28  	"sync"
    29  	"testing"
    30  	"time"
    31  
    32  	endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
    33  	tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
    34  	uatomic "go.uber.org/atomic"
    35  
    36  	"istio.io/istio/pilot/pkg/features"
    37  	"istio.io/istio/pilot/pkg/model"
    38  	"istio.io/istio/pilot/pkg/networking/util"
    39  	"istio.io/istio/pilot/pkg/serviceregistry/memory"
    40  	"istio.io/istio/pilot/pkg/xds"
    41  	v3 "istio.io/istio/pilot/pkg/xds/v3"
    42  	xdsfake "istio.io/istio/pilot/test/xds"
    43  	"istio.io/istio/pilot/test/xdstest"
    44  	"istio.io/istio/pkg/adsc"
    45  	"istio.io/istio/pkg/config/host"
    46  	"istio.io/istio/pkg/config/protocol"
    47  	"istio.io/istio/pkg/config/schema/kind"
    48  	"istio.io/istio/pkg/log"
    49  	"istio.io/istio/pkg/slices"
    50  	"istio.io/istio/pkg/test"
    51  	"istio.io/istio/pkg/test/env"
    52  	"istio.io/istio/pkg/test/util/assert"
    53  	"istio.io/istio/pkg/util/sets"
    54  )
    55  
    56  // The connect and reconnect tests are removed - ADS already has coverage, and the
    57  // StreamEndpoints is not used in 1.0+
    58  
    59  const (
    60  	asdcLocality  = "region1/zone1/subzone1"
    61  	asdc2Locality = "region2/zone2/subzone2"
    62  
    63  	edsIncSvc = "eds.test.svc.cluster.local"
    64  	edsIncVip = "10.10.1.2"
    65  )
    66  
    67  func TestIncrementalPush(t *testing.T) {
    68  	s := xdsfake.NewFakeDiscoveryServer(t, xdsfake.FakeOptions{
    69  		ConfigString: mustReadFile(t, "tests/testdata/config/destination-rule-all.yaml") +
    70  			mustReadFile(t, "tests/testdata/config/static-weighted-se.yaml"),
    71  	})
    72  	ads := s.Connect(nil, nil, watchAll)
    73  	t.Run("Full Push", func(t *testing.T) {
    74  		s.Discovery.Push(&model.PushRequest{Full: true})
    75  		if _, err := ads.Wait(time.Second*5, watchAll...); err != nil {
    76  			t.Fatal(err)
    77  		}
    78  	})
    79  	t.Run("Incremental Push with updated services", func(t *testing.T) {
    80  		ads.WaitClear()
    81  		s.Discovery.Push(&model.PushRequest{
    82  			Full:           false,
    83  			ConfigsUpdated: sets.New(model.ConfigKey{Kind: kind.ServiceEntry, Name: "destall.default.svc.cluster.local", Namespace: "testns"}),
    84  		})
    85  		if err := ads.WaitSingle(time.Second*5, v3.EndpointType, v3.ClusterType); err != nil {
    86  			t.Fatal(err)
    87  		}
    88  	})
    89  	t.Run("Full Push with updated services", func(t *testing.T) {
    90  		ads.WaitClear()
    91  
    92  		s.Discovery.Push(&model.PushRequest{
    93  			Full:           true,
    94  			ConfigsUpdated: sets.New(model.ConfigKey{Kind: kind.ServiceEntry, Name: "weighted.static.svc.cluster.local", Namespace: "default"}),
    95  		})
    96  		if _, err := ads.Wait(time.Second*5, watchAll...); err != nil {
    97  			t.Fatal(err)
    98  		}
    99  		if len(ads.GetEndpoints()) != 1 {
   100  			t.Fatalf("Expected a partial EDS update, but got: %v", xdstest.MapKeys(ads.GetEndpoints()))
   101  		}
   102  	})
   103  	t.Run("Full Push with updated services and virtual services", func(t *testing.T) {
   104  		ads.WaitClear()
   105  		s.Discovery.Push(&model.PushRequest{
   106  			Full: true,
   107  			ConfigsUpdated: sets.New(
   108  				model.ConfigKey{Kind: kind.ServiceEntry, Name: "weighted.static.svc.cluster.local", Namespace: "default"},
   109  				model.ConfigKey{Kind: kind.VirtualService, Name: "vs", Namespace: "testns"},
   110  			),
   111  		})
   112  		if _, err := ads.Wait(time.Second*5, watchAll...); err != nil {
   113  			t.Fatal(err)
   114  		}
   115  		if len(ads.GetEndpoints()) != 1 {
   116  			t.Fatalf("Expected a partial EDS update, but got: %v", xdstest.MapKeys(ads.GetEndpoints()))
   117  		}
   118  	})
   119  	t.Run("Full Push with updated services and destination rules", func(t *testing.T) {
   120  		ads.WaitClear()
   121  		s.Discovery.Push(&model.PushRequest{
   122  			Full: true,
   123  			ConfigsUpdated: sets.New(
   124  				model.ConfigKey{Kind: kind.ServiceEntry, Name: "destall.default.svc.cluster.local", Namespace: "default"},
   125  				model.ConfigKey{Kind: kind.DestinationRule, Name: "destall", Namespace: "testns"}),
   126  		})
   127  		if _, err := ads.Wait(time.Second*5, watchAll...); err != nil {
   128  			t.Fatal(err)
   129  		}
   130  		if len(ads.GetEndpoints()) != 4 {
   131  			t.Fatalf("Expected a full EDS update, but got: %v", xdstest.MapKeys(ads.GetEndpoints()))
   132  		}
   133  	})
   134  	t.Run("Full Push with multiple updates", func(t *testing.T) {
   135  		ads.WaitClear()
   136  		s.Discovery.Push(&model.PushRequest{
   137  			Full: true,
   138  			ConfigsUpdated: sets.New(
   139  				model.ConfigKey{Kind: kind.ServiceEntry, Name: "destall.default.svc.cluster.local", Namespace: "default"},
   140  				model.ConfigKey{Kind: kind.VirtualService, Name: "vs", Namespace: "testns"},
   141  				model.ConfigKey{Kind: kind.DestinationRule, Name: "destall", Namespace: "testns"}),
   142  		})
   143  		if _, err := ads.Wait(time.Second*5, watchAll...); err != nil {
   144  			t.Fatal(err)
   145  		}
   146  		if len(ads.GetEndpoints()) != 4 {
   147  			t.Fatalf("Expected a full EDS update, but got: %v", xdstest.MapKeys(ads.GetEndpoints()))
   148  		}
   149  	})
   150  	t.Run("Full Push without updated services", func(t *testing.T) {
   151  		ads.WaitClear()
   152  		s.Discovery.Push(&model.PushRequest{
   153  			Full:           true,
   154  			ConfigsUpdated: sets.New(model.ConfigKey{Kind: kind.DestinationRule, Name: "destall", Namespace: "testns"}),
   155  		})
   156  		if _, err := ads.Wait(time.Second*5, v3.ClusterType, v3.EndpointType); err != nil {
   157  			t.Fatal(err)
   158  		}
   159  		if len(ads.GetEndpoints()) < 3 {
   160  			t.Fatalf("Expected a full EDS update, but got: %v", ads.GetEndpoints())
   161  		}
   162  	})
   163  }
   164  
   165  // Regression test for https://github.com/istio/istio/issues/38709
   166  func TestSAUpdate(t *testing.T) {
   167  	test.SetAtomicBoolForTest(t, features.SendUnhealthyEndpoints, false)
   168  	s := xdsfake.NewFakeDiscoveryServer(t, xdsfake.FakeOptions{})
   169  	ads := s.Connect(s.SetupProxy(nil), nil, []string{v3.ClusterType})
   170  
   171  	ports := model.PortList{
   172  		{
   173  			Name:     "http",
   174  			Port:     80,
   175  			Protocol: protocol.HTTP,
   176  		},
   177  	}
   178  	svc := &model.Service{
   179  		Ports:    ports,
   180  		Hostname: host.Name("test1"),
   181  	}
   182  	s.MemRegistry.AddService(svc)
   183  	if _, err := ads.Wait(time.Second*10, watchAll...); err != nil {
   184  		t.Fatal(err)
   185  	}
   186  	i := &model.ServiceInstance{
   187  		Service:     svc,
   188  		ServicePort: svc.Ports[0],
   189  		Endpoint: &model.IstioEndpoint{
   190  			Address:        "1.2.3.4",
   191  			ServiceAccount: "spiffe://td1/ns/def/sa/def",
   192  			HealthStatus:   model.UnHealthy,
   193  		},
   194  	}
   195  	s.MemRegistry.AddInstance(i)
   196  	if _, err := ads.Wait(time.Second*10, v3.EndpointType); err != nil {
   197  		t.Fatal(err)
   198  	}
   199  	transport := &tls.UpstreamTlsContext{}
   200  	ads.GetEdsClusters()["outbound|80||test1"].GetTransportSocketMatches()[0].GetTransportSocket().GetTypedConfig().UnmarshalTo(transport)
   201  	sans := transport.GetCommonTlsContext().GetCombinedValidationContext().GetDefaultValidationContext().GetMatchSubjectAltNames() //nolint: staticcheck
   202  	if len(sans) != 1 {
   203  		t.Fatalf("expected 1 san, got %v", sans)
   204  	}
   205  }
   206  
   207  func TestEds(t *testing.T) {
   208  	s := xdsfake.NewFakeDiscoveryServer(t, xdsfake.FakeOptions{
   209  		ConfigString: mustReadFile(t, "tests/testdata/config/destination-rule-locality.yaml"),
   210  	})
   211  
   212  	m := s.MemRegistry
   213  	addUdsEndpoint(s.Discovery, m)
   214  
   215  	// enable locality load balancing and add relevant endpoints in order to test
   216  	addLocalityEndpoints(m, "locality.cluster.local")
   217  	addLocalityEndpoints(m, "locality-no-outlier-detection.cluster.local")
   218  
   219  	// Add the test ads clients to list of service instances in order to test the context dependent locality coloring.
   220  	addTestClientEndpoints(m)
   221  
   222  	m.AddHTTPService(edsIncSvc, edsIncVip, 8080)
   223  	m.SetEndpoints(edsIncSvc, "", newEndpointWithAccount("127.0.0.1", "hello-sa", "v1"))
   224  	// Let initial updates settle
   225  	s.EnsureSynced(t)
   226  
   227  	adscConn := s.Connect(&model.Proxy{Locality: util.ConvertLocality(asdcLocality), IPAddresses: []string{"10.10.10.10"}}, nil, watchAll)
   228  	adscConn2 := s.Connect(&model.Proxy{Locality: util.ConvertLocality(asdc2Locality), IPAddresses: []string{"10.10.10.11"}}, nil, watchAll)
   229  
   230  	t.Run("TCPEndpoints", func(t *testing.T) {
   231  		testTCPEndpoints("127.0.0.1", adscConn, t)
   232  	})
   233  	t.Run("edsz", func(t *testing.T) {
   234  		testEdsz(t, s, "test-1.default")
   235  	})
   236  	t.Run("LocalityPrioritizedEndpoints", func(t *testing.T) {
   237  		testLocalityPrioritizedEndpoints(adscConn, adscConn2, t)
   238  	})
   239  	t.Run("UDSEndpoints", func(t *testing.T) {
   240  		testUdsEndpoints(adscConn, t)
   241  	})
   242  	t.Run("PushIncremental", func(t *testing.T) {
   243  		edsUpdateInc(s, adscConn, t)
   244  	})
   245  	t.Run("Push", func(t *testing.T) {
   246  		edsUpdates(s, adscConn, t)
   247  	})
   248  	t.Run("MultipleRequest", func(t *testing.T) {
   249  		multipleRequest(s, false, 20, 5, 25*time.Second, nil, t)
   250  	})
   251  	// 5 pushes for 100 clients, using EDS incremental only.
   252  	t.Run("MultipleRequestIncremental", func(t *testing.T) {
   253  		multipleRequest(s, true, 20, 5, 25*time.Second, nil, t)
   254  	})
   255  	t.Run("CDSSave", func(t *testing.T) {
   256  		// Moved from cds_test, using new client
   257  		clusters := adscConn.GetClusters()
   258  		if len(clusters) == 0 {
   259  			t.Error("No clusters in ADS response")
   260  		}
   261  	})
   262  }
   263  
   264  // newEndpointWithAccount is a helper for IstioEndpoint creation. Creates endpoints with
   265  // port name "http", with the given IP, service account and a 'version' label.
   266  // nolint: unparam
   267  func newEndpointWithAccount(ip, account, version string) []*model.IstioEndpoint {
   268  	return []*model.IstioEndpoint{
   269  		{
   270  			Address:         ip,
   271  			ServicePortName: "http-main",
   272  			EndpointPort:    80,
   273  			Labels:          map[string]string{"version": version},
   274  			ServiceAccount:  account,
   275  		},
   276  	}
   277  }
   278  
   279  func mustReadFile(t *testing.T, fpaths ...string) string {
   280  	result := ""
   281  	for _, fpath := range fpaths {
   282  		if !strings.HasPrefix(fpath, ".") {
   283  			fpath = filepath.Join(env.IstioSrc, fpath)
   284  		}
   285  		bytes, err := os.ReadFile(fpath)
   286  		if err != nil {
   287  			t.Fatal(err)
   288  		}
   289  		result += "---\n"
   290  		result += string(bytes)
   291  	}
   292  	return result
   293  }
   294  
   295  func mustReadfolder(t *testing.T, folder string) string {
   296  	result := ""
   297  	fpathRoot := folder
   298  	if !strings.HasPrefix(fpathRoot, ".") {
   299  		fpathRoot = filepath.Join(env.IstioSrc, folder)
   300  	}
   301  	f, err := os.ReadDir(fpathRoot)
   302  	if err != nil {
   303  		t.Fatal(err)
   304  	}
   305  	for _, fpath := range f {
   306  		bytes, err := os.ReadFile(filepath.Join(fpathRoot, fpath.Name()))
   307  		if err != nil {
   308  			t.Fatal(err)
   309  		}
   310  		result += "---\n"
   311  		result += string(bytes)
   312  	}
   313  	return result
   314  }
   315  
   316  func TestEdsWeightedServiceEntry(t *testing.T) {
   317  	s := xdsfake.NewFakeDiscoveryServer(t, xdsfake.FakeOptions{ConfigString: mustReadFile(t, "tests/testdata/config/static-weighted-se.yaml")})
   318  	adscConn := s.Connect(nil, nil, watchEds)
   319  	endpoints := adscConn.GetEndpoints()
   320  	lbe, f := endpoints["outbound|80||weighted.static.svc.cluster.local"]
   321  	if !f || len(lbe.Endpoints) == 0 {
   322  		t.Fatalf("No lb endpoints for %v, %v", "outbound|80||weighted.static.svc.cluster.local", adscConn.EndpointsJSON())
   323  	}
   324  	expected := map[string]uint32{
   325  		"a":       9, // sum of 1 and 8
   326  		"b":       3,
   327  		"3.3.3.3": 1, // no weight provided is normalized to 1
   328  		"2.2.2.2": 8,
   329  		"1.1.1.1": 3,
   330  	}
   331  	got := make(map[string]uint32)
   332  	for _, lbe := range lbe.Endpoints {
   333  		got[lbe.Locality.Region] = lbe.LoadBalancingWeight.Value
   334  		for _, e := range lbe.LbEndpoints {
   335  			got[e.GetEndpoint().Address.GetSocketAddress().Address] = e.LoadBalancingWeight.Value
   336  		}
   337  	}
   338  	if !reflect.DeepEqual(expected, got) {
   339  		t.Errorf("Expected LB weights %v got %v", expected, got)
   340  	}
   341  }
   342  
   343  var (
   344  	watchEds = []string{v3.ClusterType, v3.EndpointType}
   345  	watchAll = []string{v3.ClusterType, v3.EndpointType, v3.ListenerType, v3.RouteType}
   346  )
   347  
   348  func TestEDSOverlapping(t *testing.T) {
   349  	s := xdsfake.NewFakeDiscoveryServer(t, xdsfake.FakeOptions{})
   350  	addOverlappingEndpoints(s)
   351  	adscon := s.Connect(nil, nil, watchEds)
   352  	testOverlappingPorts(s, adscon, t)
   353  }
   354  
   355  func TestEDSUnhealthyEndpoints(t *testing.T) {
   356  	for _, sendUnhealthy := range []bool{true, false} {
   357  		t.Run(fmt.Sprint(sendUnhealthy), func(t *testing.T) {
   358  			test.SetAtomicBoolForTest(t, features.SendUnhealthyEndpoints, sendUnhealthy)
   359  			s := xdsfake.NewFakeDiscoveryServer(t, xdsfake.FakeOptions{})
   360  			addUnhealthyCluster(s)
   361  			s.EnsureSynced(t)
   362  			adscon := s.Connect(nil, nil, watchEds)
   363  
   364  			validateEndpoints := func(expectPush bool, healthy []string, unhealthy []string) {
   365  				t.Helper()
   366  				// Normalize lists to make comparison easier
   367  				if healthy == nil {
   368  					healthy = []string{}
   369  				}
   370  				if unhealthy == nil {
   371  					unhealthy = []string{}
   372  				}
   373  				sort.Strings(healthy)
   374  				sort.Strings(unhealthy)
   375  				if expectPush {
   376  					upd, err := adscon.Wait(5*time.Second, v3.EndpointType)
   377  					assert.NoError(t, err)
   378  					if len(upd) > 0 && !slices.Contains(upd, v3.EndpointType) {
   379  						t.Fatalf("Expecting EDS push as endpoint health is changed. But received %v", upd)
   380  					}
   381  				} else {
   382  					upd, _ := adscon.Wait(50*time.Millisecond, v3.EndpointType)
   383  					if slices.Contains(upd, v3.EndpointType) {
   384  						t.Fatalf("Expected no EDS push, got %v", upd)
   385  					}
   386  				}
   387  
   388  				// Validate that endpoints are pushed.
   389  				lbe := adscon.GetEndpoints()["outbound|53||unhealthy.svc.cluster.local"]
   390  				eh, euh := xdstest.ExtractHealthEndpoints(lbe)
   391  				gotHealthy := sets.SortedList(sets.New(eh...))
   392  				gotUnhealthy := sets.SortedList(sets.New(euh...))
   393  				if !reflect.DeepEqual(gotHealthy, healthy) {
   394  					t.Fatalf("did not get expected endpoints: got %v, want %v", gotHealthy, healthy)
   395  				}
   396  				if !reflect.DeepEqual(gotUnhealthy, unhealthy) {
   397  					t.Fatalf("did not get expected unhealthy endpoints: got %v, want %v", gotUnhealthy, unhealthy)
   398  				}
   399  			}
   400  
   401  			// Validate that we do send initial unhealthy endpoints.
   402  			// ExpectPush=false since we are just querying the initial state, we already got the responses in our initial connection
   403  			if sendUnhealthy {
   404  				validateEndpoints(false, nil, []string{"10.0.0.53:53"})
   405  			} else {
   406  				validateEndpoints(false, nil, nil)
   407  			}
   408  			adscon.WaitClear()
   409  
   410  			// Set additional unhealthy endpoint and validate Eds update is not triggered.
   411  			s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "",
   412  				[]*model.IstioEndpoint{
   413  					{
   414  						Address:         "10.0.0.53",
   415  						EndpointPort:    53,
   416  						ServicePortName: "tcp-dns",
   417  						HealthStatus:    model.UnHealthy,
   418  					},
   419  					{
   420  						Address:         "10.0.0.54",
   421  						EndpointPort:    53,
   422  						ServicePortName: "tcp-dns",
   423  						HealthStatus:    model.UnHealthy,
   424  					},
   425  				})
   426  
   427  			// Validate that endpoint is pushed.
   428  			if sendUnhealthy {
   429  				validateEndpoints(true, nil, []string{"10.0.0.53:53", "10.0.0.54:53"})
   430  			} else {
   431  				validateEndpoints(false, nil, nil)
   432  			}
   433  
   434  			// Change the status of endpoint to Healthy and validate Eds is pushed.
   435  			s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "",
   436  				[]*model.IstioEndpoint{
   437  					{
   438  						Address:         "10.0.0.53",
   439  						EndpointPort:    53,
   440  						ServicePortName: "tcp-dns",
   441  						HealthStatus:    model.Healthy,
   442  					},
   443  					{
   444  						Address:         "10.0.0.54",
   445  						EndpointPort:    53,
   446  						ServicePortName: "tcp-dns",
   447  						HealthStatus:    model.Healthy,
   448  					},
   449  				})
   450  
   451  			// Validate that endpoints are pushed.
   452  			validateEndpoints(true, []string{"10.0.0.53:53", "10.0.0.54:53"}, nil)
   453  
   454  			// Set to exact same endpoints
   455  			s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "",
   456  				[]*model.IstioEndpoint{
   457  					{
   458  						Address:         "10.0.0.53",
   459  						EndpointPort:    53,
   460  						ServicePortName: "tcp-dns",
   461  						HealthStatus:    model.Healthy,
   462  					},
   463  					{
   464  						Address:         "10.0.0.54",
   465  						EndpointPort:    53,
   466  						ServicePortName: "tcp-dns",
   467  						HealthStatus:    model.Healthy,
   468  					},
   469  				})
   470  			// Validate that endpoint is not pushed.
   471  			validateEndpoints(false, []string{"10.0.0.53:53", "10.0.0.54:53"}, nil)
   472  
   473  			// Now change the status of endpoint to UnHealthy and validate Eds is pushed.
   474  			s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "",
   475  				[]*model.IstioEndpoint{
   476  					{
   477  						Address:         "10.0.0.53",
   478  						EndpointPort:    53,
   479  						ServicePortName: "tcp-dns",
   480  						HealthStatus:    model.UnHealthy,
   481  					},
   482  					{
   483  						Address:         "10.0.0.54",
   484  						EndpointPort:    53,
   485  						ServicePortName: "tcp-dns",
   486  						HealthStatus:    model.Healthy,
   487  					},
   488  				})
   489  
   490  			// Validate that endpoints are pushed.
   491  			if sendUnhealthy {
   492  				validateEndpoints(true, []string{"10.0.0.54:53"}, []string{"10.0.0.53:53"})
   493  			} else {
   494  				validateEndpoints(true, []string{"10.0.0.54:53"}, nil)
   495  			}
   496  
   497  			// Change the status of endpoint to Healthy and validate Eds is pushed.
   498  			s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "",
   499  				[]*model.IstioEndpoint{
   500  					{
   501  						Address:         "10.0.0.53",
   502  						EndpointPort:    53,
   503  						ServicePortName: "tcp-dns",
   504  						HealthStatus:    model.Healthy,
   505  					},
   506  					{
   507  						Address:         "10.0.0.54",
   508  						EndpointPort:    53,
   509  						ServicePortName: "tcp-dns",
   510  						HealthStatus:    model.Healthy,
   511  					},
   512  				})
   513  
   514  			validateEndpoints(true, []string{"10.0.0.54:53", "10.0.0.53:53"}, nil)
   515  
   516  			// Remove a healthy endpoint
   517  			s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "",
   518  				[]*model.IstioEndpoint{
   519  					{
   520  						Address:         "10.0.0.53",
   521  						EndpointPort:    53,
   522  						ServicePortName: "tcp-dns",
   523  						HealthStatus:    model.Healthy,
   524  					},
   525  				})
   526  
   527  			validateEndpoints(true, []string{"10.0.0.53:53"}, nil)
   528  
   529  			// Add another healthy endpoint and validate Eds is pushed.
   530  			s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "",
   531  				[]*model.IstioEndpoint{
   532  					{
   533  						Address:         "10.0.0.53",
   534  						EndpointPort:    53,
   535  						ServicePortName: "tcp-dns",
   536  						HealthStatus:    model.Healthy,
   537  					},
   538  					{
   539  						Address:         "10.0.0.54",
   540  						EndpointPort:    53,
   541  						ServicePortName: "tcp-dns",
   542  						HealthStatus:    model.Healthy,
   543  					},
   544  				})
   545  
   546  			// Validate that endpoints are pushed.
   547  			validateEndpoints(true, []string{"10.0.0.53:53", "10.0.0.54:53"}, nil)
   548  
   549  			// Remove last healthy endpoints
   550  			s.MemRegistry.SetEndpoints("unhealthy.svc.cluster.local", "", []*model.IstioEndpoint{})
   551  			validateEndpoints(true, nil, nil)
   552  		})
   553  	}
   554  }
   555  
   556  // Validates the behavior when Service resolution type is updated after initial EDS push.
   557  // See https://github.com/istio/istio/issues/18355 for more details.
   558  func TestEDSServiceResolutionUpdate(t *testing.T) {
   559  	for _, resolution := range []model.Resolution{model.DNSLB, model.DNSRoundRobinLB} {
   560  		t.Run(fmt.Sprintf("resolution_%s", resolution), func(t *testing.T) {
   561  			s := xdsfake.NewFakeDiscoveryServer(t, xdsfake.FakeOptions{})
   562  			addEdsCluster(s, "edsdns.svc.cluster.local", "http", "10.0.0.53", 8080)
   563  			addEdsCluster(s, "other.local", "http", "1.1.1.1", 8080)
   564  			s.EnsureSynced(t) // Wait for debounce
   565  
   566  			adscConn := s.Connect(nil, nil, watchAll)
   567  
   568  			// Validate that endpoints are pushed correctly.
   569  			testEndpoints("10.0.0.53", "outbound|8080||edsdns.svc.cluster.local", adscConn, t)
   570  
   571  			// Now update the service resolution to DNSLB/DNSRRLB with a DNS endpoint.
   572  			updateServiceResolution(s, resolution)
   573  
   574  			if _, err := adscConn.Wait(5*time.Second, v3.EndpointType); err != nil {
   575  				t.Fatal(err)
   576  			}
   577  
   578  			// Validate that endpoints are skipped.
   579  			lbe := adscConn.GetEndpoints()["outbound|8080||edsdns.svc.cluster.local"]
   580  			if lbe != nil && len(lbe.Endpoints) > 0 {
   581  				t.Fatalf("endpoints not expected for  %s,  but got %v", "edsdns.svc.cluster.local", adscConn.EndpointsJSON())
   582  			}
   583  		})
   584  	}
   585  }
   586  
   587  // Validate that when endpoints of a service flipflop between 1 and 0 does not trigger a full push.
   588  func TestEndpointFlipFlops(t *testing.T) {
   589  	cases := []struct {
   590  		name           string
   591  		newSa          string
   592  		expectFullPush bool
   593  	}{
   594  		{
   595  			name:           "same service account",
   596  			newSa:          "sa",
   597  			expectFullPush: false,
   598  		},
   599  		{
   600  			name:           "different service account",
   601  			newSa:          "new-sa",
   602  			expectFullPush: true,
   603  		},
   604  	}
   605  	for _, tt := range cases {
   606  		t.Run(tt.name, func(t *testing.T) {
   607  			s := xdsfake.NewFakeDiscoveryServer(t, xdsfake.FakeOptions{})
   608  			addEdsCluster(s, "flipflop.com", "http", "10.0.0.53", 8080)
   609  			s.EnsureSynced(t) // Wait for debounce
   610  			adscConn := s.Connect(nil, nil, watchAll)
   611  
   612  			// Validate that endpoints are pushed correctly.
   613  			testEndpoints("10.0.0.53", "outbound|8080||flipflop.com", adscConn, t)
   614  
   615  			// Clear the endpoint and validate it does not trigger a full push.
   616  			s.MemRegistry.SetEndpoints("flipflop.com", "", []*model.IstioEndpoint{})
   617  
   618  			upd, _ := adscConn.Wait(5*time.Second, v3.EndpointType)
   619  			assert.Equal(t, upd, []string{v3.EndpointType}, "expected partial push")
   620  
   621  			lbe := adscConn.GetEndpoints()["outbound|8080||flipflop.com"]
   622  			if len(lbe.Endpoints) != 0 {
   623  				t.Fatalf("There should be no endpoints for outbound|8080||flipflop.com. Endpoints:\n%v", adscConn.EndpointsJSON())
   624  			}
   625  
   626  			// Validate that keys in service still exist in EndpointIndex - this prevents full push.
   627  			if _, ok := s.Discovery.Env.EndpointIndex.ShardsForService("flipflop.com", ""); !ok {
   628  				t.Fatalf("Expected service key %s to be present in EndpointIndex. But missing %v", "flipflop.com", s.Discovery.Env.EndpointIndex.Shardz())
   629  			}
   630  
   631  			// Set the endpoints again and validate it does not trigger full push.
   632  			s.MemRegistry.SetEndpoints("flipflop.com", "",
   633  				[]*model.IstioEndpoint{
   634  					{
   635  						Address:         "10.10.1.1",
   636  						ServicePortName: "http",
   637  						EndpointPort:    8080,
   638  						ServiceAccount:  tt.newSa,
   639  					},
   640  				})
   641  
   642  			upd, _ = adscConn.Wait(5*time.Second, v3.EndpointType)
   643  
   644  			if tt.expectFullPush {
   645  				if !slices.Contains(upd, v3.ClusterType) {
   646  					t.Fatalf("expected a CDS push, got: %+v", upd)
   647  				}
   648  
   649  				if !slices.Contains(upd, v3.EndpointType) {
   650  					t.Fatalf("expected an EDS push, got: %+v", upd)
   651  				}
   652  			} else {
   653  				if slices.Contains(upd, v3.ClusterType) {
   654  					t.Fatalf("expected no CDS push, got: %+v", upd)
   655  				}
   656  
   657  				if !slices.Contains(upd, v3.EndpointType) {
   658  					t.Fatalf("expected an EDS push, got: %+v", upd)
   659  				}
   660  			}
   661  
   662  			testEndpoints("10.10.1.1", "outbound|8080||flipflop.com", adscConn, t)
   663  			if shard, ok := s.Discovery.Env.EndpointIndex.ShardsForService("flipflop.com", ""); !ok {
   664  				t.Fatalf("Expected service key %s to be present in EndpointIndex. But missing %v", "flipflop.com", s.Discovery.Env.EndpointIndex.Shardz())
   665  			} else {
   666  				assert.Equal(t, sets.SortedList(shard.ServiceAccounts), []string{tt.newSa})
   667  			}
   668  		})
   669  	}
   670  }
   671  
   672  // Validate that deleting a service clears entries from EndpointIndex.
   673  func TestDeleteService(t *testing.T) {
   674  	s := xdsfake.NewFakeDiscoveryServer(t, xdsfake.FakeOptions{})
   675  	addEdsCluster(s, "removeservice.com", "http", "10.0.0.53", 8080)
   676  	adscConn := s.Connect(nil, nil, watchEds)
   677  
   678  	// Validate that endpoints are pushed correctly.
   679  	testEndpoints("10.0.0.53", "outbound|8080||removeservice.com", adscConn, t)
   680  
   681  	s.MemRegistry.RemoveService("removeservice.com")
   682  
   683  	if _, ok := s.Discovery.Env.EndpointIndex.ShardsForService("removeservice.com", ""); ok {
   684  		t.Fatalf("Expected service key %s to be deleted in EndpointIndex. But is still there %v",
   685  			"removeservice.com", s.Discovery.Env.EndpointIndex.Shardz())
   686  	}
   687  }
   688  
   689  func fullPush(s *xdsfake.FakeDiscoveryServer) {
   690  	s.Discovery.Push(&model.PushRequest{Full: true})
   691  }
   692  
   693  func addTestClientEndpoints(m *memory.ServiceDiscovery) {
   694  	svc := &model.Service{
   695  		Hostname: "test-1.default",
   696  		Ports: model.PortList{
   697  			{
   698  				Name:     "http",
   699  				Port:     80,
   700  				Protocol: protocol.HTTP,
   701  			},
   702  		},
   703  	}
   704  	m.AddService(svc)
   705  	m.AddInstance(&model.ServiceInstance{
   706  		Service: svc,
   707  		Endpoint: &model.IstioEndpoint{
   708  			Address:         "10.10.10.10",
   709  			ServicePortName: "http",
   710  			EndpointPort:    80,
   711  			Locality:        model.Locality{Label: asdcLocality},
   712  		},
   713  		ServicePort: &model.Port{
   714  			Name:     "http",
   715  			Port:     80,
   716  			Protocol: protocol.HTTP,
   717  		},
   718  	})
   719  	m.AddInstance(&model.ServiceInstance{
   720  		Service: svc,
   721  		Endpoint: &model.IstioEndpoint{
   722  			Address:         "10.10.10.11",
   723  			ServicePortName: "http",
   724  			EndpointPort:    80,
   725  			Locality:        model.Locality{Label: asdc2Locality},
   726  		},
   727  		ServicePort: &model.Port{
   728  			Name:     "http",
   729  			Port:     80,
   730  			Protocol: protocol.HTTP,
   731  		},
   732  	})
   733  }
   734  
   735  // Verify server sends the endpoint. This check for a single endpoint with the given
   736  // address.
   737  func testTCPEndpoints(expected string, adsc *adsc.ADSC, t *testing.T) {
   738  	t.Helper()
   739  	testEndpoints(expected, "outbound|8080||eds.test.svc.cluster.local", adsc, t)
   740  }
   741  
   742  // Verify server sends the endpoint. This check for a single endpoint with the given
   743  // address.
   744  func testEndpoints(expected string, cluster string, adsc *adsc.ADSC, t *testing.T) {
   745  	t.Helper()
   746  	lbe, f := adsc.GetEndpoints()[cluster]
   747  	if !f || len(lbe.Endpoints) == 0 {
   748  		t.Fatalf("No lb endpoints for %v, %v", cluster, adsc.EndpointsJSON())
   749  	}
   750  	var found []string
   751  	for _, lbe := range lbe.Endpoints {
   752  		for _, e := range lbe.LbEndpoints {
   753  			addr := e.GetEndpoint().Address.GetSocketAddress().Address
   754  			found = append(found, addr)
   755  			if expected == addr {
   756  				return
   757  			}
   758  		}
   759  	}
   760  	t.Fatalf("Expecting %s got %v", expected, found)
   761  }
   762  
   763  func testLocalityPrioritizedEndpoints(adsc *adsc.ADSC, adsc2 *adsc.ADSC, t *testing.T) {
   764  	endpoints1 := adsc.GetEndpoints()
   765  	endpoints2 := adsc2.GetEndpoints()
   766  
   767  	verifyLocalityPriorities(asdcLocality, endpoints1["outbound|80||locality.cluster.local"].GetEndpoints(), t)
   768  	verifyLocalityPriorities(asdc2Locality, endpoints2["outbound|80||locality.cluster.local"].GetEndpoints(), t)
   769  
   770  	// No outlier detection specified for this cluster, so we shouldn't apply priority.
   771  	verifyNoLocalityPriorities(endpoints1["outbound|80||locality-no-outlier-detection.cluster.local"].GetEndpoints(), t)
   772  	verifyNoLocalityPriorities(endpoints2["outbound|80||locality-no-outlier-detection.cluster.local"].GetEndpoints(), t)
   773  }
   774  
   775  // Tests that Services with multiple ports sharing the same port number are properly sent endpoints.
   776  // Real world use case for this is kube-dns, which uses port 53 for TCP and UDP.
   777  func testOverlappingPorts(s *xdsfake.FakeDiscoveryServer, adsc *adsc.ADSC, t *testing.T) {
   778  	// Test initial state
   779  	testEndpoints("10.0.0.53", "outbound|53||overlapping.cluster.local", adsc, t)
   780  
   781  	s.Discovery.Push(&model.PushRequest{
   782  		Full: true,
   783  		ConfigsUpdated: sets.New(model.ConfigKey{
   784  			Kind: kind.ServiceEntry,
   785  			Name: "overlapping.cluster.local",
   786  		}),
   787  	})
   788  	_, _ = adsc.Wait(5 * time.Second)
   789  
   790  	// After the incremental push, we should still see the endpoint
   791  	testEndpoints("10.0.0.53", "outbound|53||overlapping.cluster.local", adsc, t)
   792  }
   793  
   794  func verifyNoLocalityPriorities(eps []*endpoint.LocalityLbEndpoints, t *testing.T) {
   795  	for _, ep := range eps {
   796  		if ep.GetPriority() != 0 {
   797  			t.Errorf("expected no locality priorities to apply, got priority %v.", ep.GetPriority())
   798  		}
   799  	}
   800  }
   801  
   802  func verifyLocalityPriorities(proxyLocality string, eps []*endpoint.LocalityLbEndpoints, t *testing.T) {
   803  	items := strings.SplitN(proxyLocality, "/", 3)
   804  	region, zone, subzone := items[0], items[1], items[2]
   805  	for _, ep := range eps {
   806  		if ep.GetLocality().Region == region {
   807  			if ep.GetLocality().Zone == zone {
   808  				if ep.GetLocality().SubZone == subzone {
   809  					if ep.GetPriority() != 0 {
   810  						t.Errorf("expected endpoint pool from same locality to have priority of 0, got %v", ep.GetPriority())
   811  					}
   812  				} else if ep.GetPriority() != 1 {
   813  					t.Errorf("expected endpoint pool from a different subzone to have priority of 1, got %v", ep.GetPriority())
   814  				}
   815  			} else {
   816  				if ep.GetPriority() != 2 {
   817  					t.Errorf("expected endpoint pool from a different zone to have priority of 2, got %v", ep.GetPriority())
   818  				}
   819  			}
   820  		} else {
   821  			if ep.GetPriority() != 3 {
   822  				t.Errorf("expected endpoint pool from a different region to have priority of 3, got %v", ep.GetPriority())
   823  			}
   824  		}
   825  	}
   826  }
   827  
   828  // Verify server sends UDS endpoints
   829  func testUdsEndpoints(adsc *adsc.ADSC, t *testing.T) {
   830  	// Check the UDS endpoint ( used to be separate test - but using old unused GRPC method)
   831  	// The new test also verifies CDS is pusing the UDS cluster, since adsc.eds is
   832  	// populated using CDS response
   833  	lbe, f := adsc.GetEndpoints()["outbound|0||localuds.cluster.local"]
   834  	if !f || len(lbe.Endpoints) == 0 {
   835  		t.Error("No UDS lb endpoints")
   836  	} else {
   837  		ep0 := lbe.Endpoints[0]
   838  		if len(ep0.LbEndpoints) != 1 {
   839  			t.Fatalf("expected 1 LB endpoint but got %d", len(ep0.LbEndpoints))
   840  		}
   841  		lbep := ep0.LbEndpoints[0]
   842  		path := lbep.GetEndpoint().GetAddress().GetPipe().GetPath()
   843  		if path != udsPath {
   844  			t.Fatalf("expected Pipe to %s, got %s", udsPath, path)
   845  		}
   846  	}
   847  }
   848  
   849  // Update
   850  func edsUpdates(s *xdsfake.FakeDiscoveryServer, adsc *adsc.ADSC, t *testing.T) {
   851  	// Old style (non-incremental)
   852  	s.MemRegistry.SetEndpoints(edsIncSvc, "",
   853  		newEndpointWithAccount("127.0.0.3", "hello-sa", "v1"))
   854  
   855  	// will trigger recompute and push
   856  
   857  	if _, err := adsc.Wait(5*time.Second, v3.EndpointType); err != nil {
   858  		t.Fatal("EDS push failed", err)
   859  	}
   860  	testTCPEndpoints("127.0.0.3", adsc, t)
   861  }
   862  
   863  // edsFullUpdateCheck checks for updates required in a full push after the CDS update
   864  func edsFullUpdateCheck(adsc *adsc.ADSC, t *testing.T) {
   865  	t.Helper()
   866  	if upd, err := adsc.Wait(15*time.Second, watchAll...); err != nil {
   867  		t.Fatal("Expecting CDS, EDS, LDS, and RDS update as part of a full push", err, upd)
   868  	}
   869  }
   870  
   871  // This test must be run in isolation, can't be parallelized with any other v2 test.
   872  // It makes different kind of updates, and checks that incremental or full push happens.
   873  // In particular:
   874  // - just endpoint changes -> incremental
   875  // - service account changes -> full ( in future: CDS only )
   876  // - label changes -> full
   877  func edsUpdateInc(s *xdsfake.FakeDiscoveryServer, adsc *adsc.ADSC, t *testing.T) {
   878  	// TODO: set endpoints for a different cluster (new shard)
   879  
   880  	// Verify initial state
   881  	testTCPEndpoints("127.0.0.1", adsc, t)
   882  
   883  	adsc.WaitClear() // make sure there are no pending pushes.
   884  
   885  	// Equivalent with the event generated by K8S watching the Service.
   886  	// Will trigger a push.
   887  	s.MemRegistry.SetEndpoints(edsIncSvc, "",
   888  		newEndpointWithAccount("127.0.0.2", "hello-sa", "v1"))
   889  
   890  	upd, err := adsc.Wait(5*time.Second, v3.EndpointType)
   891  	if err != nil {
   892  		t.Fatal("Incremental push failed", err)
   893  	}
   894  	if slices.Contains(upd, v3.ClusterType) {
   895  		t.Fatal("Expecting EDS only update, got", upd)
   896  	}
   897  
   898  	testTCPEndpoints("127.0.0.2", adsc, t)
   899  
   900  	// Update the endpoint with different SA - expect full
   901  	s.MemRegistry.SetEndpoints(edsIncSvc, "",
   902  		newEndpointWithAccount("127.0.0.2", "account2", "v1"))
   903  
   904  	edsFullUpdateCheck(adsc, t)
   905  	testTCPEndpoints("127.0.0.2", adsc, t)
   906  
   907  	// Update the endpoint again, no SA change - expect incremental
   908  	s.MemRegistry.SetEndpoints(edsIncSvc, "",
   909  		newEndpointWithAccount("127.0.0.4", "account2", "v1"))
   910  
   911  	upd, err = adsc.Wait(5 * time.Second)
   912  	if err != nil {
   913  		t.Fatal("Incremental push failed", err)
   914  	}
   915  	if !reflect.DeepEqual(upd, []string{v3.EndpointType}) {
   916  		t.Fatal("Expecting EDS only update, got", upd)
   917  	}
   918  	testTCPEndpoints("127.0.0.4", adsc, t)
   919  
   920  	// Update the endpoint to original SA - expect full
   921  	s.MemRegistry.SetEndpoints(edsIncSvc, "",
   922  		newEndpointWithAccount("127.0.0.2", "hello-sa", "v1"))
   923  	edsFullUpdateCheck(adsc, t)
   924  	testTCPEndpoints("127.0.0.2", adsc, t)
   925  
   926  	// Update the endpoint again, no label change - expect incremental
   927  	s.MemRegistry.SetEndpoints(edsIncSvc, "",
   928  		newEndpointWithAccount("127.0.0.5", "hello-sa", "v1"))
   929  
   930  	upd, err = adsc.Wait(5 * time.Second)
   931  	if err != nil {
   932  		t.Fatal("Incremental push failed", err)
   933  	}
   934  	if !reflect.DeepEqual(upd, []string{v3.EndpointType}) {
   935  		t.Fatal("Expecting EDS only update, got", upd)
   936  	}
   937  	testTCPEndpoints("127.0.0.5", adsc, t)
   938  
   939  	// Wipe out all endpoints - expect full
   940  	s.MemRegistry.SetEndpoints(edsIncSvc, "", []*model.IstioEndpoint{})
   941  
   942  	if upd, err := adsc.Wait(15*time.Second, v3.EndpointType); err != nil {
   943  		t.Fatal("Expecting EDS update as part of a partial push", err, upd)
   944  	}
   945  
   946  	lbe := adsc.GetEndpoints()["outbound|8080||eds.test.svc.cluster.local"]
   947  	if len(lbe.Endpoints) != 0 {
   948  		t.Fatalf("There should be no endpoints for outbound|8080||eds.test.svc.cluster.local. Endpoints:\n%v", adsc.EndpointsJSON())
   949  	}
   950  }
   951  
   952  // Make a direct EDS grpc request to pilot, verify the result is as expected.
   953  // This test includes a 'bad client' regression test, which fails to read on the
   954  // stream.
   955  func multipleRequest(s *xdsfake.FakeDiscoveryServer, inc bool, nclients,
   956  	nPushes int, to time.Duration, _ map[string]string, t *testing.T,
   957  ) {
   958  	wgConnect := &sync.WaitGroup{}
   959  	wg := &sync.WaitGroup{}
   960  	errChan := make(chan error, nclients)
   961  
   962  	// Bad client - will not read any response. This triggers Write to block, which should
   963  	// be detected
   964  	// This is not using adsc, which consumes the events automatically.
   965  	ads := s.ConnectADS()
   966  	ads.Request(t, nil)
   967  
   968  	n := nclients
   969  	wg.Add(n)
   970  	wgConnect.Add(n)
   971  	rcvPush := uatomic.NewInt32(0)
   972  	rcvClients := uatomic.NewInt32(0)
   973  	for i := 0; i < n; i++ {
   974  		current := i
   975  		go func(id int) {
   976  			defer wg.Done()
   977  			// Connect and get initial response
   978  			adscConn := s.Connect(&model.Proxy{IPAddresses: []string{fmt.Sprintf("1.1.1.%d", id)}}, nil, nil)
   979  			_, err := adscConn.Wait(15*time.Second, v3.RouteType)
   980  			if err != nil {
   981  				errChan <- errors.New("failed to get initial rds: " + err.Error())
   982  				wgConnect.Done()
   983  				return
   984  			}
   985  
   986  			if len(adscConn.GetEndpoints()) == 0 {
   987  				errChan <- errors.New("no endpoints")
   988  				wgConnect.Done()
   989  				return
   990  			}
   991  
   992  			wgConnect.Done()
   993  
   994  			// Check we received all pushes
   995  			log.Infof("Waiting for pushes %v", id)
   996  
   997  			// Pushes may be merged so we may not get nPushes pushes
   998  			got, err := adscConn.Wait(15*time.Second, v3.EndpointType)
   999  
  1000  			// If in incremental mode, shouldn't receive cds|rds|lds here
  1001  			if inc {
  1002  				for _, g := range got {
  1003  					if g == "cds" || g == "rds" || g == "lds" {
  1004  						errChan <- fmt.Errorf("should be eds incremental but received cds. %v %v",
  1005  							err, id)
  1006  						return
  1007  					}
  1008  				}
  1009  			}
  1010  
  1011  			rcvPush.Inc()
  1012  			if err != nil {
  1013  				log.Infof("Recv %v failed: %v", id, err)
  1014  				errChan <- fmt.Errorf("failed to receive a response in 15 s %v %v",
  1015  					err, id)
  1016  				return
  1017  			}
  1018  
  1019  			log.Infof("Received all pushes %v", id)
  1020  			rcvClients.Inc()
  1021  
  1022  			adscConn.Close()
  1023  		}(current)
  1024  	}
  1025  	ok := waitTimeout(wgConnect, to)
  1026  	if !ok {
  1027  		t.Fatal("Failed to connect")
  1028  	}
  1029  	log.Info("Done connecting")
  1030  
  1031  	// All clients are connected - this can start pushing changes.
  1032  	for j := 0; j < nPushes; j++ {
  1033  		if inc {
  1034  			// This will be throttled - we want to trigger a single push
  1035  			s.Discovery.AdsPushAll(&model.PushRequest{
  1036  				Full: false,
  1037  				ConfigsUpdated: sets.New(model.ConfigKey{
  1038  					Kind: kind.ServiceEntry,
  1039  					Name: edsIncSvc,
  1040  				}),
  1041  				Push: s.Discovery.Env.PushContext(),
  1042  			})
  1043  		} else {
  1044  			xds.AdsPushAll(s.Discovery)
  1045  		}
  1046  		log.Infof("Push %v done", j)
  1047  	}
  1048  
  1049  	ok = waitTimeout(wg, to)
  1050  	if !ok {
  1051  		t.Errorf("Failed to receive all responses %d %d", rcvClients.Load(), rcvPush.Load())
  1052  		buf := make([]byte, 1<<16)
  1053  		runtime.Stack(buf, true)
  1054  		fmt.Printf("%s", buf)
  1055  	}
  1056  
  1057  	close(errChan)
  1058  
  1059  	// moved from ads_test, which had a duplicated test.
  1060  	for e := range errChan {
  1061  		t.Error(e)
  1062  	}
  1063  }
  1064  
  1065  func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
  1066  	c := make(chan struct{})
  1067  	go func() {
  1068  		defer close(c)
  1069  		wg.Wait()
  1070  	}()
  1071  	select {
  1072  	case <-c:
  1073  		return true
  1074  	case <-time.After(timeout):
  1075  		return false
  1076  	}
  1077  }
  1078  
  1079  const udsPath = "/var/run/test/socket"
  1080  
  1081  func addUdsEndpoint(s *xds.DiscoveryServer, m *memory.ServiceDiscovery) {
  1082  	svc := &model.Service{
  1083  		Hostname: "localuds.cluster.local",
  1084  		Ports: model.PortList{
  1085  			{
  1086  				Name:     "grpc",
  1087  				Port:     0,
  1088  				Protocol: protocol.GRPC,
  1089  			},
  1090  		},
  1091  		MeshExternal: true,
  1092  		Resolution:   model.ClientSideLB,
  1093  	}
  1094  	m.AddService(svc)
  1095  	m.AddInstance(&model.ServiceInstance{
  1096  		Service: &model.Service{
  1097  			Hostname: "localuds.cluster.local",
  1098  			Ports: model.PortList{
  1099  				{
  1100  					Name:     "grpc",
  1101  					Port:     0,
  1102  					Protocol: protocol.GRPC,
  1103  				},
  1104  			},
  1105  			MeshExternal: true,
  1106  			Resolution:   model.ClientSideLB,
  1107  		},
  1108  		Endpoint: &model.IstioEndpoint{
  1109  			Address:         udsPath,
  1110  			EndpointPort:    0,
  1111  			ServicePortName: "grpc",
  1112  			Locality:        model.Locality{Label: "localhost"},
  1113  			Labels:          map[string]string{"socket": "unix"},
  1114  		},
  1115  		ServicePort: &model.Port{
  1116  			Name:     "grpc",
  1117  			Port:     0,
  1118  			Protocol: protocol.GRPC,
  1119  		},
  1120  	})
  1121  
  1122  	pushReq := &model.PushRequest{
  1123  		Full:   true,
  1124  		Reason: model.NewReasonStats(model.ConfigUpdate),
  1125  	}
  1126  	s.ConfigUpdate(pushReq)
  1127  }
  1128  
  1129  func addLocalityEndpoints(m *memory.ServiceDiscovery, hostname host.Name) {
  1130  	svc := &model.Service{
  1131  		Hostname: hostname,
  1132  		Ports: model.PortList{
  1133  			{
  1134  				Name:     "http",
  1135  				Port:     80,
  1136  				Protocol: protocol.HTTP,
  1137  			},
  1138  		},
  1139  	}
  1140  	m.AddService(svc)
  1141  	localities := []string{
  1142  		"region1/zone1/subzone1",
  1143  		"region1/zone1/subzone2",
  1144  		"region1/zone2/subzone1",
  1145  		"region2/zone1/subzone1",
  1146  		"region2/zone1/subzone2",
  1147  		"region2/zone2/subzone1",
  1148  		"region2/zone2/subzone2",
  1149  	}
  1150  	for i, locality := range localities {
  1151  		_, _ = i, locality
  1152  		m.AddInstance(&model.ServiceInstance{
  1153  			Service: svc,
  1154  			Endpoint: &model.IstioEndpoint{
  1155  				Address:         fmt.Sprintf("10.0.0.%v", i),
  1156  				EndpointPort:    80,
  1157  				ServicePortName: "http",
  1158  				Locality:        model.Locality{Label: locality},
  1159  			},
  1160  			ServicePort: &model.Port{
  1161  				Name:     "http",
  1162  				Port:     80,
  1163  				Protocol: protocol.HTTP,
  1164  			},
  1165  		})
  1166  	}
  1167  }
  1168  
  1169  // nolint: unparam
  1170  func addEdsCluster(s *xdsfake.FakeDiscoveryServer, hostName string, portName string, address string, port int) {
  1171  	svc := &model.Service{
  1172  		Hostname: host.Name(hostName),
  1173  		Ports: model.PortList{
  1174  			{
  1175  				Name:     portName,
  1176  				Port:     port,
  1177  				Protocol: protocol.HTTP,
  1178  			},
  1179  		},
  1180  	}
  1181  	s.MemRegistry.AddService(svc)
  1182  
  1183  	s.MemRegistry.AddInstance(&model.ServiceInstance{
  1184  		Service: svc,
  1185  		Endpoint: &model.IstioEndpoint{
  1186  			Address:         address,
  1187  			EndpointPort:    uint32(port),
  1188  			ServicePortName: portName,
  1189  			ServiceAccount:  "sa",
  1190  		},
  1191  		ServicePort: &model.Port{
  1192  			Name:     portName,
  1193  			Port:     port,
  1194  			Protocol: protocol.HTTP,
  1195  		},
  1196  	})
  1197  	fullPush(s)
  1198  }
  1199  
  1200  func updateServiceResolution(s *xdsfake.FakeDiscoveryServer, resolution model.Resolution) {
  1201  	svc := &model.Service{
  1202  		Hostname: "edsdns.svc.cluster.local",
  1203  		Ports: model.PortList{
  1204  			{
  1205  				Name:     "http",
  1206  				Port:     8080,
  1207  				Protocol: protocol.HTTP,
  1208  			},
  1209  		},
  1210  		Resolution: resolution,
  1211  	}
  1212  	s.MemRegistry.AddService(svc)
  1213  
  1214  	s.MemRegistry.AddInstance(&model.ServiceInstance{
  1215  		Service: svc,
  1216  		Endpoint: &model.IstioEndpoint{
  1217  			Address:         "somevip.com",
  1218  			EndpointPort:    8080,
  1219  			ServicePortName: "http",
  1220  		},
  1221  		ServicePort: &model.Port{
  1222  			Name:     "http",
  1223  			Port:     8080,
  1224  			Protocol: protocol.HTTP,
  1225  		},
  1226  	})
  1227  
  1228  	fullPush(s)
  1229  }
  1230  
  1231  func addOverlappingEndpoints(s *xdsfake.FakeDiscoveryServer) {
  1232  	svc := &model.Service{
  1233  		Hostname: "overlapping.cluster.local",
  1234  		Ports: model.PortList{
  1235  			{
  1236  				Name:     "dns",
  1237  				Port:     53,
  1238  				Protocol: protocol.UDP,
  1239  			},
  1240  			{
  1241  				Name:     "tcp-dns",
  1242  				Port:     53,
  1243  				Protocol: protocol.TCP,
  1244  			},
  1245  		},
  1246  	}
  1247  	s.MemRegistry.AddService(svc)
  1248  	s.MemRegistry.AddInstance(&model.ServiceInstance{
  1249  		Service: svc,
  1250  		Endpoint: &model.IstioEndpoint{
  1251  			Address:         "10.0.0.53",
  1252  			EndpointPort:    53,
  1253  			ServicePortName: "tcp-dns",
  1254  		},
  1255  		ServicePort: &model.Port{
  1256  			Name:     "tcp-dns",
  1257  			Port:     53,
  1258  			Protocol: protocol.TCP,
  1259  		},
  1260  	})
  1261  	fullPush(s)
  1262  }
  1263  
  1264  func addUnhealthyCluster(s *xdsfake.FakeDiscoveryServer) {
  1265  	svc := &model.Service{
  1266  		Hostname: "unhealthy.svc.cluster.local",
  1267  		Ports: model.PortList{
  1268  			{
  1269  				Name:     "tcp-dns",
  1270  				Port:     53,
  1271  				Protocol: protocol.TCP,
  1272  			},
  1273  		},
  1274  	}
  1275  	s.MemRegistry.AddService(svc)
  1276  	s.MemRegistry.AddInstance(&model.ServiceInstance{
  1277  		Service: svc,
  1278  		Endpoint: &model.IstioEndpoint{
  1279  			Address:         "10.0.0.53",
  1280  			EndpointPort:    53,
  1281  			ServicePortName: "tcp-dns",
  1282  			HealthStatus:    model.UnHealthy,
  1283  		},
  1284  		ServicePort: &model.Port{
  1285  			Name:     "tcp-dns",
  1286  			Port:     53,
  1287  			Protocol: protocol.TCP,
  1288  		},
  1289  	})
  1290  	fullPush(s)
  1291  }
  1292  
  1293  // Verify the endpoint debug interface is installed and returns some string.
  1294  // TODO: parse response, check if data captured matches what we expect.
  1295  // TODO: use this in integration tests.
  1296  // TODO: refine the output
  1297  // TODO: dump the ServiceInstances as well
  1298  func testEdsz(t *testing.T, s *xdsfake.FakeDiscoveryServer, proxyID string) {
  1299  	req, err := http.NewRequest(http.MethodGet, "/debug/edsz?proxyID="+proxyID, nil)
  1300  	if err != nil {
  1301  		t.Fatal(err)
  1302  	}
  1303  	rr := httptest.NewRecorder()
  1304  	debug := http.HandlerFunc(s.Discovery.Edsz)
  1305  	debug.ServeHTTP(rr, req)
  1306  
  1307  	data, err := io.ReadAll(rr.Body)
  1308  	if err != nil {
  1309  		t.Fatalf("Failed to read /edsz")
  1310  	}
  1311  	statusStr := string(data)
  1312  
  1313  	if !strings.Contains(statusStr, "\"outbound|8080||eds.test.svc.cluster.local\"") {
  1314  		t.Fatal("Mock eds service not found ", statusStr)
  1315  	}
  1316  }