k8s.io/kubernetes@v1.29.3/pkg/controller/endpointslice/endpointslice_controller_test.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package endpointslice
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"reflect"
    23  	"strconv"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/stretchr/testify/assert"
    28  	"github.com/stretchr/testify/require"
    29  	v1 "k8s.io/api/core/v1"
    30  	discovery "k8s.io/api/discovery/v1"
    31  	"k8s.io/apimachinery/pkg/api/resource"
    32  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    33  	"k8s.io/apimachinery/pkg/runtime"
    34  	"k8s.io/apimachinery/pkg/runtime/schema"
    35  	"k8s.io/apimachinery/pkg/types"
    36  	"k8s.io/apimachinery/pkg/util/intstr"
    37  	"k8s.io/apimachinery/pkg/util/rand"
    38  	"k8s.io/apimachinery/pkg/util/wait"
    39  	"k8s.io/client-go/informers"
    40  	"k8s.io/client-go/kubernetes/fake"
    41  	k8stesting "k8s.io/client-go/testing"
    42  	"k8s.io/client-go/tools/cache"
    43  	"k8s.io/endpointslice/topologycache"
    44  	endpointsliceutil "k8s.io/endpointslice/util"
    45  	"k8s.io/klog/v2/ktesting"
    46  	"k8s.io/kubernetes/pkg/controller"
    47  	endpointslicepkg "k8s.io/kubernetes/pkg/controller/util/endpointslice"
    48  	"k8s.io/utils/pointer"
    49  )
    50  
    51  // Most of the tests related to EndpointSlice allocation can be found in reconciler_test.go
    52  // Tests here primarily focus on unique controller functionality before the reconciler begins
    53  
    54  var alwaysReady = func() bool { return true }
    55  
    56  type endpointSliceController struct {
    57  	*Controller
    58  	endpointSliceStore cache.Store
    59  	nodeStore          cache.Store
    60  	podStore           cache.Store
    61  	serviceStore       cache.Store
    62  }
    63  
    64  func newController(t *testing.T, nodeNames []string, batchPeriod time.Duration) (*fake.Clientset, *endpointSliceController) {
    65  	client := fake.NewSimpleClientset()
    66  
    67  	informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
    68  	nodeInformer := informerFactory.Core().V1().Nodes()
    69  	indexer := nodeInformer.Informer().GetIndexer()
    70  	for _, nodeName := range nodeNames {
    71  		indexer.Add(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
    72  	}
    73  
    74  	esInformer := informerFactory.Discovery().V1().EndpointSlices()
    75  	esIndexer := esInformer.Informer().GetIndexer()
    76  
    77  	// These reactors are required to mock functionality that would be covered
    78  	// automatically if we weren't using the fake client.
    79  	client.PrependReactor("create", "endpointslices", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
    80  		endpointSlice := action.(k8stesting.CreateAction).GetObject().(*discovery.EndpointSlice)
    81  
    82  		if endpointSlice.ObjectMeta.GenerateName != "" {
    83  			endpointSlice.ObjectMeta.Name = fmt.Sprintf("%s-%s", endpointSlice.ObjectMeta.GenerateName, rand.String(8))
    84  			endpointSlice.ObjectMeta.GenerateName = ""
    85  		}
    86  		endpointSlice.Generation = 1
    87  		esIndexer.Add(endpointSlice)
    88  
    89  		return false, endpointSlice, nil
    90  	}))
    91  	client.PrependReactor("update", "endpointslices", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
    92  		endpointSlice := action.(k8stesting.CreateAction).GetObject().(*discovery.EndpointSlice)
    93  		endpointSlice.Generation++
    94  		esIndexer.Update(endpointSlice)
    95  
    96  		return false, endpointSlice, nil
    97  	}))
    98  
    99  	_, ctx := ktesting.NewTestContext(t)
   100  	esController := NewController(
   101  		ctx,
   102  		informerFactory.Core().V1().Pods(),
   103  		informerFactory.Core().V1().Services(),
   104  		nodeInformer,
   105  		esInformer,
   106  		int32(100),
   107  		client,
   108  		batchPeriod)
   109  
   110  	esController.nodesSynced = alwaysReady
   111  	esController.podsSynced = alwaysReady
   112  	esController.servicesSynced = alwaysReady
   113  	esController.endpointSlicesSynced = alwaysReady
   114  
   115  	return client, &endpointSliceController{
   116  		esController,
   117  		informerFactory.Discovery().V1().EndpointSlices().Informer().GetStore(),
   118  		informerFactory.Core().V1().Nodes().Informer().GetStore(),
   119  		informerFactory.Core().V1().Pods().Informer().GetStore(),
   120  		informerFactory.Core().V1().Services().Informer().GetStore(),
   121  	}
   122  }
   123  
   124  func newPod(n int, namespace string, ready bool, nPorts int, terminating bool) *v1.Pod {
   125  	status := v1.ConditionTrue
   126  	if !ready {
   127  		status = v1.ConditionFalse
   128  	}
   129  
   130  	var deletionTimestamp *metav1.Time
   131  	if terminating {
   132  		deletionTimestamp = &metav1.Time{
   133  			Time: time.Now(),
   134  		}
   135  	}
   136  
   137  	p := &v1.Pod{
   138  		TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
   139  		ObjectMeta: metav1.ObjectMeta{
   140  			Namespace:         namespace,
   141  			Name:              fmt.Sprintf("pod%d", n),
   142  			Labels:            map[string]string{"foo": "bar"},
   143  			DeletionTimestamp: deletionTimestamp,
   144  			ResourceVersion:   fmt.Sprint(n),
   145  		},
   146  		Spec: v1.PodSpec{
   147  			Containers: []v1.Container{{
   148  				Name: "container-1",
   149  			}},
   150  			NodeName: "node-1",
   151  		},
   152  		Status: v1.PodStatus{
   153  			PodIP: fmt.Sprintf("1.2.3.%d", 4+n),
   154  			PodIPs: []v1.PodIP{{
   155  				IP: fmt.Sprintf("1.2.3.%d", 4+n),
   156  			}},
   157  			Conditions: []v1.PodCondition{
   158  				{
   159  					Type:   v1.PodReady,
   160  					Status: status,
   161  				},
   162  			},
   163  		},
   164  	}
   165  
   166  	return p
   167  }
   168  
   169  func expectActions(t *testing.T, actions []k8stesting.Action, num int, verb, resource string) {
   170  	t.Helper()
   171  	// if actions are less the below logic will panic
   172  	if num > len(actions) {
   173  		t.Fatalf("len of actions %v is unexpected. Expected to be at least %v", len(actions), num+1)
   174  	}
   175  
   176  	for i := 0; i < num; i++ {
   177  		relativePos := len(actions) - i - 1
   178  		assert.Equal(t, verb, actions[relativePos].GetVerb(), "Expected action -%d verb to be %s", i, verb)
   179  		assert.Equal(t, resource, actions[relativePos].GetResource().Resource, "Expected action -%d resource to be %s", i, resource)
   180  	}
   181  }
   182  
   183  // Ensure SyncService for service with no selector results in no action
   184  func TestSyncServiceNoSelector(t *testing.T) {
   185  	ns := metav1.NamespaceDefault
   186  	serviceName := "testing-1"
   187  	client, esController := newController(t, []string{"node-1"}, time.Duration(0))
   188  	esController.serviceStore.Add(&v1.Service{
   189  		ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns},
   190  		Spec: v1.ServiceSpec{
   191  			Ports: []v1.ServicePort{{TargetPort: intstr.FromInt32(80)}},
   192  		},
   193  	})
   194  
   195  	logger, _ := ktesting.NewTestContext(t)
   196  	err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
   197  	assert.NoError(t, err)
   198  	assert.Len(t, client.Actions(), 0)
   199  }
   200  
   201  func TestServiceExternalNameTypeSync(t *testing.T) {
   202  	serviceName := "testing-1"
   203  	namespace := metav1.NamespaceDefault
   204  
   205  	testCases := []struct {
   206  		desc    string
   207  		service *v1.Service
   208  	}{
   209  		{
   210  			desc: "External name with selector and ports should not receive endpoint slices",
   211  			service: &v1.Service{
   212  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespace},
   213  				Spec: v1.ServiceSpec{
   214  					Selector: map[string]string{"foo": "bar"},
   215  					Ports:    []v1.ServicePort{{Port: 80}},
   216  					Type:     v1.ServiceTypeExternalName,
   217  				},
   218  			},
   219  		},
   220  		{
   221  			desc: "External name with ports should not receive endpoint slices",
   222  			service: &v1.Service{
   223  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespace},
   224  				Spec: v1.ServiceSpec{
   225  					Ports: []v1.ServicePort{{Port: 80}},
   226  					Type:  v1.ServiceTypeExternalName,
   227  				},
   228  			},
   229  		},
   230  		{
   231  			desc: "External name with selector should not receive endpoint slices",
   232  			service: &v1.Service{
   233  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespace},
   234  				Spec: v1.ServiceSpec{
   235  					Selector: map[string]string{"foo": "bar"},
   236  					Type:     v1.ServiceTypeExternalName,
   237  				},
   238  			},
   239  		},
   240  		{
   241  			desc: "External name without selector and ports should not receive endpoint slices",
   242  			service: &v1.Service{
   243  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespace},
   244  				Spec: v1.ServiceSpec{
   245  					Type: v1.ServiceTypeExternalName,
   246  				},
   247  			},
   248  		},
   249  	}
   250  
   251  	for _, tc := range testCases {
   252  		t.Run(tc.desc, func(t *testing.T) {
   253  			client, esController := newController(t, []string{"node-1"}, time.Duration(0))
   254  			logger, _ := ktesting.NewTestContext(t)
   255  
   256  			pod := newPod(1, namespace, true, 0, false)
   257  			err := esController.podStore.Add(pod)
   258  			assert.NoError(t, err)
   259  
   260  			err = esController.serviceStore.Add(tc.service)
   261  			assert.NoError(t, err)
   262  
   263  			err = esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, serviceName))
   264  			assert.NoError(t, err)
   265  			assert.Len(t, client.Actions(), 0)
   266  
   267  			sliceList, err := client.DiscoveryV1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{})
   268  			assert.NoError(t, err)
   269  			assert.Len(t, sliceList.Items, 0, "Expected 0 endpoint slices")
   270  		})
   271  	}
   272  }
   273  
   274  // Ensure SyncService for service with pending deletion results in no action
   275  func TestSyncServicePendingDeletion(t *testing.T) {
   276  	ns := metav1.NamespaceDefault
   277  	serviceName := "testing-1"
   278  	deletionTimestamp := metav1.Now()
   279  	client, esController := newController(t, []string{"node-1"}, time.Duration(0))
   280  	esController.serviceStore.Add(&v1.Service{
   281  		ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns, DeletionTimestamp: &deletionTimestamp},
   282  		Spec: v1.ServiceSpec{
   283  			Selector: map[string]string{"foo": "bar"},
   284  			Ports:    []v1.ServicePort{{TargetPort: intstr.FromInt32(80)}},
   285  		},
   286  	})
   287  
   288  	logger, _ := ktesting.NewTestContext(t)
   289  	err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
   290  	assert.NoError(t, err)
   291  	assert.Len(t, client.Actions(), 0)
   292  }
   293  
   294  // Ensure SyncService for service with selector but no pods results in placeholder EndpointSlice
   295  func TestSyncServiceWithSelector(t *testing.T) {
   296  	ns := metav1.NamespaceDefault
   297  	serviceName := "testing-1"
   298  	client, esController := newController(t, []string{"node-1"}, time.Duration(0))
   299  	standardSyncService(t, esController, ns, serviceName)
   300  	expectActions(t, client.Actions(), 1, "create", "endpointslices")
   301  
   302  	sliceList, err := client.DiscoveryV1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{})
   303  	assert.Nil(t, err, "Expected no error fetching endpoint slices")
   304  	assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices")
   305  	slice := sliceList.Items[0]
   306  	assert.Regexp(t, "^"+serviceName, slice.Name)
   307  	assert.Equal(t, serviceName, slice.Labels[discovery.LabelServiceName])
   308  	assert.EqualValues(t, []discovery.EndpointPort{}, slice.Ports)
   309  	assert.EqualValues(t, []discovery.Endpoint{}, slice.Endpoints)
   310  	assert.NotEmpty(t, slice.Annotations["endpoints.kubernetes.io/last-change-trigger-time"])
   311  }
   312  
   313  // Ensure SyncService gracefully handles a missing service. This test also
   314  // populates another existing service to ensure a clean up process doesn't
   315  // remove too much.
   316  func TestSyncServiceMissing(t *testing.T) {
   317  	namespace := metav1.NamespaceDefault
   318  	client, esController := newController(t, []string{"node-1"}, time.Duration(0))
   319  
   320  	// Build up existing service
   321  	existingServiceName := "stillthere"
   322  	existingServiceKey := endpointsliceutil.ServiceKey{Name: existingServiceName, Namespace: namespace}
   323  	esController.triggerTimeTracker.ServiceStates[existingServiceKey] = endpointsliceutil.ServiceState{}
   324  	esController.serviceStore.Add(&v1.Service{
   325  		ObjectMeta: metav1.ObjectMeta{Name: existingServiceName, Namespace: namespace},
   326  		Spec: v1.ServiceSpec{
   327  			Ports:    []v1.ServicePort{{TargetPort: intstr.FromInt32(80)}},
   328  			Selector: map[string]string{"foo": "bar"},
   329  		},
   330  	})
   331  
   332  	// Add missing service to triggerTimeTracker to ensure the reference is cleaned up
   333  	missingServiceName := "notthere"
   334  	missingServiceKey := endpointsliceutil.ServiceKey{Name: missingServiceName, Namespace: namespace}
   335  	esController.triggerTimeTracker.ServiceStates[missingServiceKey] = endpointsliceutil.ServiceState{}
   336  
   337  	logger, _ := ktesting.NewTestContext(t)
   338  	err := esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, missingServiceName))
   339  
   340  	// nil should be returned when the service doesn't exist
   341  	assert.Nil(t, err, "Expected no error syncing service")
   342  
   343  	// That should mean no client actions were performed
   344  	assert.Len(t, client.Actions(), 0)
   345  
   346  	// TriggerTimeTracker should have removed the reference to the missing service
   347  	assert.NotContains(t, esController.triggerTimeTracker.ServiceStates, missingServiceKey)
   348  
   349  	// TriggerTimeTracker should have left the reference to the missing service
   350  	assert.Contains(t, esController.triggerTimeTracker.ServiceStates, existingServiceKey)
   351  }
   352  
   353  // Ensure SyncService correctly selects Pods.
   354  func TestSyncServicePodSelection(t *testing.T) {
   355  	client, esController := newController(t, []string{"node-1"}, time.Duration(0))
   356  	ns := metav1.NamespaceDefault
   357  
   358  	pod1 := newPod(1, ns, true, 0, false)
   359  	esController.podStore.Add(pod1)
   360  
   361  	// ensure this pod will not match the selector
   362  	pod2 := newPod(2, ns, true, 0, false)
   363  	pod2.Labels["foo"] = "boo"
   364  	esController.podStore.Add(pod2)
   365  
   366  	standardSyncService(t, esController, ns, "testing-1")
   367  	expectActions(t, client.Actions(), 1, "create", "endpointslices")
   368  
   369  	// an endpoint slice should be created, it should only reference pod1 (not pod2)
   370  	slices, err := client.DiscoveryV1().EndpointSlices(ns).List(context.TODO(), metav1.ListOptions{})
   371  	assert.Nil(t, err, "Expected no error fetching endpoint slices")
   372  	assert.Len(t, slices.Items, 1, "Expected 1 endpoint slices")
   373  	slice := slices.Items[0]
   374  	assert.Len(t, slice.Endpoints, 1, "Expected 1 endpoint in first slice")
   375  	assert.NotEmpty(t, slice.Annotations[v1.EndpointsLastChangeTriggerTime])
   376  	endpoint := slice.Endpoints[0]
   377  	assert.EqualValues(t, endpoint.TargetRef, &v1.ObjectReference{Kind: "Pod", Namespace: ns, Name: pod1.Name})
   378  }
   379  
   380  func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) {
   381  	client, esController := newController(t, []string{"node-1"}, time.Duration(0))
   382  	ns := metav1.NamespaceDefault
   383  	serviceName := "testing-1"
   384  	service := createService(t, esController, ns, serviceName)
   385  	logger, _ := ktesting.NewTestContext(t)
   386  	err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
   387  	assert.Nil(t, err, "Expected no error syncing service")
   388  
   389  	gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"}
   390  	ownerRef := metav1.NewControllerRef(service, gvk)
   391  
   392  	deletedTs := metav1.Now()
   393  	endpointSlice := &discovery.EndpointSlice{
   394  		ObjectMeta: metav1.ObjectMeta{
   395  			Name:            "epSlice-1",
   396  			Namespace:       ns,
   397  			OwnerReferences: []metav1.OwnerReference{*ownerRef},
   398  			Labels: map[string]string{
   399  				discovery.LabelServiceName: serviceName,
   400  				discovery.LabelManagedBy:   controllerName,
   401  			},
   402  			DeletionTimestamp: &deletedTs,
   403  		},
   404  		AddressType: discovery.AddressTypeIPv4,
   405  	}
   406  	err = esController.endpointSliceStore.Add(endpointSlice)
   407  	if err != nil {
   408  		t.Fatalf("Expected no error adding EndpointSlice: %v", err)
   409  	}
   410  	_, err = client.DiscoveryV1().EndpointSlices(ns).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
   411  	if err != nil {
   412  		t.Fatalf("Expected no error creating EndpointSlice: %v", err)
   413  	}
   414  
   415  	logger, _ = ktesting.NewTestContext(t)
   416  	numActionsBefore := len(client.Actions())
   417  	err = esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
   418  	assert.Nil(t, err, "Expected no error syncing service")
   419  
   420  	// The EndpointSlice marked for deletion should be ignored by the controller, and thus
   421  	// should not result in any action.
   422  	if len(client.Actions()) != numActionsBefore {
   423  		t.Errorf("Expected 0 more actions, got %d", len(client.Actions())-numActionsBefore)
   424  	}
   425  }
   426  
   427  // Ensure SyncService correctly selects and labels EndpointSlices.
   428  func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
   429  	client, esController := newController(t, []string{"node-1"}, time.Duration(0))
   430  	ns := metav1.NamespaceDefault
   431  	serviceName := "testing-1"
   432  	service := createService(t, esController, ns, serviceName)
   433  
   434  	gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"}
   435  	ownerRef := metav1.NewControllerRef(service, gvk)
   436  
   437  	// 5 slices, 3 with matching labels for our service
   438  	endpointSlices := []*discovery.EndpointSlice{{
   439  		ObjectMeta: metav1.ObjectMeta{
   440  			Name:            "matching-1",
   441  			Namespace:       ns,
   442  			OwnerReferences: []metav1.OwnerReference{*ownerRef},
   443  			Labels: map[string]string{
   444  				discovery.LabelServiceName: serviceName,
   445  				discovery.LabelManagedBy:   controllerName,
   446  			},
   447  		},
   448  		AddressType: discovery.AddressTypeIPv4,
   449  	}, {
   450  		ObjectMeta: metav1.ObjectMeta{
   451  			Name:            "matching-2",
   452  			Namespace:       ns,
   453  			OwnerReferences: []metav1.OwnerReference{*ownerRef},
   454  			Labels: map[string]string{
   455  				discovery.LabelServiceName: serviceName,
   456  				discovery.LabelManagedBy:   controllerName,
   457  			},
   458  		},
   459  		AddressType: discovery.AddressTypeIPv4,
   460  	}, {
   461  		ObjectMeta: metav1.ObjectMeta{
   462  			Name:      "partially-matching-1",
   463  			Namespace: ns,
   464  			Labels: map[string]string{
   465  				discovery.LabelServiceName: serviceName,
   466  			},
   467  		},
   468  		AddressType: discovery.AddressTypeIPv4,
   469  	}, {
   470  		ObjectMeta: metav1.ObjectMeta{
   471  			Name:      "not-matching-1",
   472  			Namespace: ns,
   473  			Labels: map[string]string{
   474  				discovery.LabelServiceName: "something-else",
   475  				discovery.LabelManagedBy:   controllerName,
   476  			},
   477  		},
   478  		AddressType: discovery.AddressTypeIPv4,
   479  	}, {
   480  		ObjectMeta: metav1.ObjectMeta{
   481  			Name:      "not-matching-2",
   482  			Namespace: ns,
   483  			Labels: map[string]string{
   484  				discovery.LabelServiceName: serviceName,
   485  				discovery.LabelManagedBy:   "something-else",
   486  			},
   487  		},
   488  		AddressType: discovery.AddressTypeIPv4,
   489  	}}
   490  
   491  	cmc := newCacheMutationCheck(endpointSlices)
   492  
   493  	// need to add them to both store and fake clientset
   494  	for _, endpointSlice := range endpointSlices {
   495  		err := esController.endpointSliceStore.Add(endpointSlice)
   496  		if err != nil {
   497  			t.Fatalf("Expected no error adding EndpointSlice: %v", err)
   498  		}
   499  		_, err = client.DiscoveryV1().EndpointSlices(ns).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
   500  		if err != nil {
   501  			t.Fatalf("Expected no error creating EndpointSlice: %v", err)
   502  		}
   503  	}
   504  
   505  	numActionsBefore := len(client.Actions())
   506  	logger, _ := ktesting.NewTestContext(t)
   507  	err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
   508  	assert.Nil(t, err, "Expected no error syncing service")
   509  
   510  	if len(client.Actions()) != numActionsBefore+2 {
   511  		t.Errorf("Expected 2 more actions, got %d", len(client.Actions())-numActionsBefore)
   512  	}
   513  
   514  	// only 2 slices should match, 2 should be deleted, 1 should be updated as a placeholder
   515  	expectAction(t, client.Actions(), numActionsBefore, "update", "endpointslices")
   516  	expectAction(t, client.Actions(), numActionsBefore+1, "delete", "endpointslices")
   517  
   518  	// ensure cache mutation has not occurred
   519  	cmc.Check(t)
   520  }
   521  
   522  func TestOnEndpointSliceUpdate(t *testing.T) {
   523  	_, esController := newController(t, []string{"node-1"}, time.Duration(0))
   524  	ns := metav1.NamespaceDefault
   525  	serviceName := "testing-1"
   526  	epSlice1 := &discovery.EndpointSlice{
   527  		ObjectMeta: metav1.ObjectMeta{
   528  			Name:      "matching-1",
   529  			Namespace: ns,
   530  			Labels: map[string]string{
   531  				discovery.LabelServiceName: serviceName,
   532  				discovery.LabelManagedBy:   controllerName,
   533  			},
   534  		},
   535  		AddressType: discovery.AddressTypeIPv4,
   536  	}
   537  
   538  	logger, _ := ktesting.NewTestContext(t)
   539  	epSlice2 := epSlice1.DeepCopy()
   540  	epSlice2.Labels[discovery.LabelManagedBy] = "something else"
   541  
   542  	assert.Equal(t, 0, esController.queue.Len())
   543  	esController.onEndpointSliceUpdate(logger, epSlice1, epSlice2)
   544  	err := wait.PollImmediate(100*time.Millisecond, 3*time.Second, func() (bool, error) {
   545  		if esController.queue.Len() > 0 {
   546  			return true, nil
   547  		}
   548  		return false, nil
   549  	})
   550  	if err != nil {
   551  		t.Fatalf("unexpected error waiting for add to queue")
   552  	}
   553  	assert.Equal(t, 1, esController.queue.Len())
   554  }
   555  
   556  func TestSyncService(t *testing.T) {
   557  	creationTimestamp := metav1.Now()
   558  	deletionTimestamp := metav1.Now()
   559  
   560  	testcases := []struct {
   561  		name                  string
   562  		service               *v1.Service
   563  		pods                  []*v1.Pod
   564  		expectedEndpointPorts []discovery.EndpointPort
   565  		expectedEndpoints     []discovery.Endpoint
   566  	}{
   567  		{
   568  			name: "pods with multiple IPs and Service with ipFamilies=ipv4",
   569  			service: &v1.Service{
   570  				ObjectMeta: metav1.ObjectMeta{
   571  					Name:              "foobar",
   572  					Namespace:         "default",
   573  					CreationTimestamp: creationTimestamp,
   574  				},
   575  				Spec: v1.ServiceSpec{
   576  					Ports: []v1.ServicePort{
   577  						{Name: "tcp-example", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP},
   578  						{Name: "udp-example", TargetPort: intstr.FromInt32(161), Protocol: v1.ProtocolUDP},
   579  						{Name: "sctp-example", TargetPort: intstr.FromInt32(3456), Protocol: v1.ProtocolSCTP},
   580  					},
   581  					Selector:   map[string]string{"foo": "bar"},
   582  					IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
   583  				},
   584  			},
   585  			pods: []*v1.Pod{
   586  				{
   587  					ObjectMeta: metav1.ObjectMeta{
   588  						Namespace:         "default",
   589  						Name:              "pod0",
   590  						Labels:            map[string]string{"foo": "bar"},
   591  						DeletionTimestamp: nil,
   592  					},
   593  					Spec: v1.PodSpec{
   594  						Containers: []v1.Container{{
   595  							Name: "container-1",
   596  						}},
   597  						NodeName: "node-1",
   598  					},
   599  					Status: v1.PodStatus{
   600  						PodIP: "10.0.0.1",
   601  						PodIPs: []v1.PodIP{{
   602  							IP: "10.0.0.1",
   603  						}},
   604  						Conditions: []v1.PodCondition{
   605  							{
   606  								Type:   v1.PodReady,
   607  								Status: v1.ConditionTrue,
   608  							},
   609  						},
   610  					},
   611  				},
   612  				{
   613  					ObjectMeta: metav1.ObjectMeta{
   614  						Namespace:         "default",
   615  						Name:              "pod1",
   616  						Labels:            map[string]string{"foo": "bar"},
   617  						DeletionTimestamp: nil,
   618  					},
   619  					Spec: v1.PodSpec{
   620  						Containers: []v1.Container{{
   621  							Name: "container-1",
   622  						}},
   623  						NodeName: "node-1",
   624  					},
   625  					Status: v1.PodStatus{
   626  						PodIP: "10.0.0.2",
   627  						PodIPs: []v1.PodIP{
   628  							{
   629  								IP: "10.0.0.2",
   630  							},
   631  							{
   632  								IP: "fd08::5678:0000:0000:9abc:def0",
   633  							},
   634  						},
   635  						Conditions: []v1.PodCondition{
   636  							{
   637  								Type:   v1.PodReady,
   638  								Status: v1.ConditionTrue,
   639  							},
   640  						},
   641  					},
   642  				},
   643  			},
   644  			expectedEndpointPorts: []discovery.EndpointPort{
   645  				{
   646  					Name:     pointer.String("sctp-example"),
   647  					Protocol: protoPtr(v1.ProtocolSCTP),
   648  					Port:     pointer.Int32(3456),
   649  				},
   650  				{
   651  					Name:     pointer.String("udp-example"),
   652  					Protocol: protoPtr(v1.ProtocolUDP),
   653  					Port:     pointer.Int32(161),
   654  				},
   655  				{
   656  					Name:     pointer.String("tcp-example"),
   657  					Protocol: protoPtr(v1.ProtocolTCP),
   658  					Port:     pointer.Int32(80),
   659  				},
   660  			},
   661  			expectedEndpoints: []discovery.Endpoint{
   662  				{
   663  					Conditions: discovery.EndpointConditions{
   664  						Ready:       pointer.Bool(true),
   665  						Serving:     pointer.Bool(true),
   666  						Terminating: pointer.Bool(false),
   667  					},
   668  					Addresses: []string{"10.0.0.1"},
   669  					TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
   670  					NodeName:  pointer.String("node-1"),
   671  				},
   672  				{
   673  					Conditions: discovery.EndpointConditions{
   674  						Ready:       pointer.Bool(true),
   675  						Serving:     pointer.Bool(true),
   676  						Terminating: pointer.Bool(false),
   677  					},
   678  					Addresses: []string{"10.0.0.2"},
   679  					TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod1"},
   680  					NodeName:  pointer.String("node-1"),
   681  				},
   682  			},
   683  		},
   684  		{
   685  			name: "pods with multiple IPs and Service with ipFamilies=ipv6",
   686  			service: &v1.Service{
   687  				ObjectMeta: metav1.ObjectMeta{
   688  					Name:              "foobar",
   689  					Namespace:         "default",
   690  					CreationTimestamp: creationTimestamp,
   691  				},
   692  				Spec: v1.ServiceSpec{
   693  					Ports: []v1.ServicePort{
   694  						{Name: "tcp-example", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP},
   695  						{Name: "udp-example", TargetPort: intstr.FromInt32(161), Protocol: v1.ProtocolUDP},
   696  						{Name: "sctp-example", TargetPort: intstr.FromInt32(3456), Protocol: v1.ProtocolSCTP},
   697  					},
   698  					Selector:   map[string]string{"foo": "bar"},
   699  					IPFamilies: []v1.IPFamily{v1.IPv6Protocol},
   700  				},
   701  			},
   702  			pods: []*v1.Pod{
   703  				{
   704  					ObjectMeta: metav1.ObjectMeta{
   705  						Namespace:         "default",
   706  						Name:              "pod0",
   707  						Labels:            map[string]string{"foo": "bar"},
   708  						DeletionTimestamp: nil,
   709  					},
   710  					Spec: v1.PodSpec{
   711  						Containers: []v1.Container{{
   712  							Name: "container-1",
   713  						}},
   714  						NodeName: "node-1",
   715  					},
   716  					Status: v1.PodStatus{
   717  						PodIP: "10.0.0.1",
   718  						PodIPs: []v1.PodIP{{
   719  							IP: "10.0.0.1",
   720  						}},
   721  						Conditions: []v1.PodCondition{
   722  							{
   723  								Type:   v1.PodReady,
   724  								Status: v1.ConditionTrue,
   725  							},
   726  						},
   727  					},
   728  				},
   729  				{
   730  					ObjectMeta: metav1.ObjectMeta{
   731  						Namespace:         "default",
   732  						Name:              "pod1",
   733  						Labels:            map[string]string{"foo": "bar"},
   734  						DeletionTimestamp: nil,
   735  					},
   736  					Spec: v1.PodSpec{
   737  						Containers: []v1.Container{{
   738  							Name: "container-1",
   739  						}},
   740  						NodeName: "node-1",
   741  					},
   742  					Status: v1.PodStatus{
   743  						PodIP: "10.0.0.2",
   744  						PodIPs: []v1.PodIP{
   745  							{
   746  								IP: "10.0.0.2",
   747  							},
   748  							{
   749  								IP: "fd08::5678:0000:0000:9abc:def0",
   750  							},
   751  						},
   752  						Conditions: []v1.PodCondition{
   753  							{
   754  								Type:   v1.PodReady,
   755  								Status: v1.ConditionTrue,
   756  							},
   757  						},
   758  					},
   759  				},
   760  			},
   761  			expectedEndpointPorts: []discovery.EndpointPort{
   762  				{
   763  					Name:     pointer.String("sctp-example"),
   764  					Protocol: protoPtr(v1.ProtocolSCTP),
   765  					Port:     pointer.Int32(3456),
   766  				},
   767  				{
   768  					Name:     pointer.String("udp-example"),
   769  					Protocol: protoPtr(v1.ProtocolUDP),
   770  					Port:     pointer.Int32(161),
   771  				},
   772  				{
   773  					Name:     pointer.String("tcp-example"),
   774  					Protocol: protoPtr(v1.ProtocolTCP),
   775  					Port:     pointer.Int32(80),
   776  				},
   777  			},
   778  			expectedEndpoints: []discovery.Endpoint{
   779  				{
   780  					Conditions: discovery.EndpointConditions{
   781  						Ready:       pointer.Bool(true),
   782  						Serving:     pointer.Bool(true),
   783  						Terminating: pointer.Bool(false),
   784  					},
   785  					Addresses: []string{"fd08::5678:0000:0000:9abc:def0"},
   786  					TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod1"},
   787  					NodeName:  pointer.String("node-1"),
   788  				},
   789  			},
   790  		},
   791  		{
   792  			name: "Terminating pods",
   793  			service: &v1.Service{
   794  				ObjectMeta: metav1.ObjectMeta{
   795  					Name:              "foobar",
   796  					Namespace:         "default",
   797  					CreationTimestamp: creationTimestamp,
   798  				},
   799  				Spec: v1.ServiceSpec{
   800  					Ports: []v1.ServicePort{
   801  						{Name: "tcp-example", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP},
   802  						{Name: "udp-example", TargetPort: intstr.FromInt32(161), Protocol: v1.ProtocolUDP},
   803  						{Name: "sctp-example", TargetPort: intstr.FromInt32(3456), Protocol: v1.ProtocolSCTP},
   804  					},
   805  					Selector:   map[string]string{"foo": "bar"},
   806  					IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
   807  				},
   808  			},
   809  			pods: []*v1.Pod{
   810  				{
   811  					// one ready pod for comparison
   812  					ObjectMeta: metav1.ObjectMeta{
   813  						Namespace:         "default",
   814  						Name:              "pod0",
   815  						Labels:            map[string]string{"foo": "bar"},
   816  						DeletionTimestamp: nil,
   817  					},
   818  					Spec: v1.PodSpec{
   819  						Containers: []v1.Container{{
   820  							Name: "container-1",
   821  						}},
   822  						NodeName: "node-1",
   823  					},
   824  					Status: v1.PodStatus{
   825  						PodIP: "10.0.0.1",
   826  						PodIPs: []v1.PodIP{{
   827  							IP: "10.0.0.1",
   828  						}},
   829  						Conditions: []v1.PodCondition{
   830  							{
   831  								Type:   v1.PodReady,
   832  								Status: v1.ConditionTrue,
   833  							},
   834  						},
   835  					},
   836  				},
   837  				{
   838  					ObjectMeta: metav1.ObjectMeta{
   839  						Namespace:         "default",
   840  						Name:              "pod1",
   841  						Labels:            map[string]string{"foo": "bar"},
   842  						DeletionTimestamp: &deletionTimestamp,
   843  					},
   844  					Spec: v1.PodSpec{
   845  						Containers: []v1.Container{{
   846  							Name: "container-1",
   847  						}},
   848  						NodeName: "node-1",
   849  					},
   850  					Status: v1.PodStatus{
   851  						PodIP: "10.0.0.2",
   852  						PodIPs: []v1.PodIP{
   853  							{
   854  								IP: "10.0.0.2",
   855  							},
   856  						},
   857  						Conditions: []v1.PodCondition{
   858  							{
   859  								Type:   v1.PodReady,
   860  								Status: v1.ConditionTrue,
   861  							},
   862  						},
   863  					},
   864  				},
   865  			},
   866  			expectedEndpointPorts: []discovery.EndpointPort{
   867  				{
   868  					Name:     pointer.String("sctp-example"),
   869  					Protocol: protoPtr(v1.ProtocolSCTP),
   870  					Port:     pointer.Int32(3456),
   871  				},
   872  				{
   873  					Name:     pointer.String("udp-example"),
   874  					Protocol: protoPtr(v1.ProtocolUDP),
   875  					Port:     pointer.Int32(161),
   876  				},
   877  				{
   878  					Name:     pointer.String("tcp-example"),
   879  					Protocol: protoPtr(v1.ProtocolTCP),
   880  					Port:     pointer.Int32(80),
   881  				},
   882  			},
   883  			expectedEndpoints: []discovery.Endpoint{
   884  				{
   885  					Conditions: discovery.EndpointConditions{
   886  						Ready:       pointer.Bool(true),
   887  						Serving:     pointer.Bool(true),
   888  						Terminating: pointer.Bool(false),
   889  					},
   890  					Addresses: []string{"10.0.0.1"},
   891  					TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
   892  					NodeName:  pointer.String("node-1"),
   893  				},
   894  				{
   895  					Conditions: discovery.EndpointConditions{
   896  						Ready:       pointer.Bool(false),
   897  						Serving:     pointer.Bool(true),
   898  						Terminating: pointer.Bool(true),
   899  					},
   900  					Addresses: []string{"10.0.0.2"},
   901  					TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod1"},
   902  					NodeName:  pointer.String("node-1"),
   903  				},
   904  			},
   905  		},
   906  		{
   907  			name: "Not ready terminating pods",
   908  			service: &v1.Service{
   909  				ObjectMeta: metav1.ObjectMeta{
   910  					Name:              "foobar",
   911  					Namespace:         "default",
   912  					CreationTimestamp: creationTimestamp,
   913  				},
   914  				Spec: v1.ServiceSpec{
   915  					Ports: []v1.ServicePort{
   916  						{Name: "tcp-example", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP},
   917  						{Name: "udp-example", TargetPort: intstr.FromInt32(161), Protocol: v1.ProtocolUDP},
   918  						{Name: "sctp-example", TargetPort: intstr.FromInt32(3456), Protocol: v1.ProtocolSCTP},
   919  					},
   920  					Selector:   map[string]string{"foo": "bar"},
   921  					IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
   922  				},
   923  			},
   924  			pods: []*v1.Pod{
   925  				{
   926  					// one ready pod for comparison
   927  					ObjectMeta: metav1.ObjectMeta{
   928  						Namespace:         "default",
   929  						Name:              "pod0",
   930  						Labels:            map[string]string{"foo": "bar"},
   931  						DeletionTimestamp: nil,
   932  					},
   933  					Spec: v1.PodSpec{
   934  						Containers: []v1.Container{{
   935  							Name: "container-1",
   936  						}},
   937  						NodeName: "node-1",
   938  					},
   939  					Status: v1.PodStatus{
   940  						PodIP: "10.0.0.1",
   941  						PodIPs: []v1.PodIP{{
   942  							IP: "10.0.0.1",
   943  						}},
   944  						Conditions: []v1.PodCondition{
   945  							{
   946  								Type:   v1.PodReady,
   947  								Status: v1.ConditionTrue,
   948  							},
   949  						},
   950  					},
   951  				},
   952  				{
   953  					ObjectMeta: metav1.ObjectMeta{
   954  						Namespace:         "default",
   955  						Name:              "pod1",
   956  						Labels:            map[string]string{"foo": "bar"},
   957  						DeletionTimestamp: &deletionTimestamp,
   958  					},
   959  					Spec: v1.PodSpec{
   960  						Containers: []v1.Container{{
   961  							Name: "container-1",
   962  						}},
   963  						NodeName: "node-1",
   964  					},
   965  					Status: v1.PodStatus{
   966  						PodIP: "10.0.0.2",
   967  						PodIPs: []v1.PodIP{
   968  							{
   969  								IP: "10.0.0.2",
   970  							},
   971  						},
   972  						Conditions: []v1.PodCondition{
   973  							{
   974  								Type:   v1.PodReady,
   975  								Status: v1.ConditionFalse,
   976  							},
   977  						},
   978  					},
   979  				},
   980  			},
   981  			expectedEndpointPorts: []discovery.EndpointPort{
   982  				{
   983  					Name:     pointer.String("sctp-example"),
   984  					Protocol: protoPtr(v1.ProtocolSCTP),
   985  					Port:     pointer.Int32(3456),
   986  				},
   987  				{
   988  					Name:     pointer.String("udp-example"),
   989  					Protocol: protoPtr(v1.ProtocolUDP),
   990  					Port:     pointer.Int32(161),
   991  				},
   992  				{
   993  					Name:     pointer.String("tcp-example"),
   994  					Protocol: protoPtr(v1.ProtocolTCP),
   995  					Port:     pointer.Int32(80),
   996  				},
   997  			},
   998  			expectedEndpoints: []discovery.Endpoint{
   999  				{
  1000  					Conditions: discovery.EndpointConditions{
  1001  						Ready:       pointer.Bool(true),
  1002  						Serving:     pointer.Bool(true),
  1003  						Terminating: pointer.Bool(false),
  1004  					},
  1005  					Addresses: []string{"10.0.0.1"},
  1006  					TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
  1007  					NodeName:  pointer.String("node-1"),
  1008  				},
  1009  				{
  1010  					Conditions: discovery.EndpointConditions{
  1011  						Ready:       pointer.Bool(false),
  1012  						Serving:     pointer.Bool(false),
  1013  						Terminating: pointer.Bool(true),
  1014  					},
  1015  					Addresses: []string{"10.0.0.2"},
  1016  					TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod1"},
  1017  					NodeName:  pointer.String("node-1"),
  1018  				},
  1019  			},
  1020  		},
  1021  		{
  1022  			name: "Ready and Complete pods with same IPs",
  1023  			service: &v1.Service{
  1024  				ObjectMeta: metav1.ObjectMeta{
  1025  					Name:              "foobar",
  1026  					Namespace:         "default",
  1027  					CreationTimestamp: creationTimestamp,
  1028  				},
  1029  				Spec: v1.ServiceSpec{
  1030  					Ports: []v1.ServicePort{
  1031  						{Name: "tcp-example", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP},
  1032  						{Name: "udp-example", TargetPort: intstr.FromInt32(161), Protocol: v1.ProtocolUDP},
  1033  						{Name: "sctp-example", TargetPort: intstr.FromInt32(3456), Protocol: v1.ProtocolSCTP},
  1034  					},
  1035  					Selector:   map[string]string{"foo": "bar"},
  1036  					IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
  1037  				},
  1038  			},
  1039  			pods: []*v1.Pod{
  1040  				{
  1041  					ObjectMeta: metav1.ObjectMeta{
  1042  						Namespace:         "default",
  1043  						Name:              "pod0",
  1044  						Labels:            map[string]string{"foo": "bar"},
  1045  						DeletionTimestamp: nil,
  1046  					},
  1047  					Spec: v1.PodSpec{
  1048  						Containers: []v1.Container{{
  1049  							Name: "container-1",
  1050  						}},
  1051  						NodeName: "node-1",
  1052  					},
  1053  					Status: v1.PodStatus{
  1054  						PodIP: "10.0.0.1",
  1055  						PodIPs: []v1.PodIP{{
  1056  							IP: "10.0.0.1",
  1057  						}},
  1058  						Conditions: []v1.PodCondition{
  1059  							{
  1060  								Type:   v1.PodInitialized,
  1061  								Status: v1.ConditionTrue,
  1062  							},
  1063  							{
  1064  								Type:   v1.PodReady,
  1065  								Status: v1.ConditionTrue,
  1066  							},
  1067  							{
  1068  								Type:   v1.ContainersReady,
  1069  								Status: v1.ConditionTrue,
  1070  							},
  1071  						},
  1072  					},
  1073  				},
  1074  				{
  1075  					ObjectMeta: metav1.ObjectMeta{
  1076  						Namespace:         "default",
  1077  						Name:              "pod1",
  1078  						Labels:            map[string]string{"foo": "bar"},
  1079  						DeletionTimestamp: nil,
  1080  					},
  1081  					Spec: v1.PodSpec{
  1082  						Containers: []v1.Container{{
  1083  							Name: "container-1",
  1084  						}},
  1085  						NodeName: "node-1",
  1086  					},
  1087  					Status: v1.PodStatus{
  1088  						PodIP: "10.0.0.1",
  1089  						PodIPs: []v1.PodIP{
  1090  							{
  1091  								IP: "10.0.0.1",
  1092  							},
  1093  						},
  1094  						Conditions: []v1.PodCondition{
  1095  							{
  1096  								Type:   v1.PodInitialized,
  1097  								Status: v1.ConditionTrue,
  1098  							},
  1099  							{
  1100  								Type:   v1.PodReady,
  1101  								Status: v1.ConditionFalse,
  1102  							},
  1103  							{
  1104  								Type:   v1.ContainersReady,
  1105  								Status: v1.ConditionFalse,
  1106  							},
  1107  						},
  1108  					},
  1109  				},
  1110  			},
  1111  			expectedEndpointPorts: []discovery.EndpointPort{
  1112  				{
  1113  					Name:     pointer.StringPtr("sctp-example"),
  1114  					Protocol: protoPtr(v1.ProtocolSCTP),
  1115  					Port:     pointer.Int32Ptr(int32(3456)),
  1116  				},
  1117  				{
  1118  					Name:     pointer.StringPtr("udp-example"),
  1119  					Protocol: protoPtr(v1.ProtocolUDP),
  1120  					Port:     pointer.Int32Ptr(int32(161)),
  1121  				},
  1122  				{
  1123  					Name:     pointer.StringPtr("tcp-example"),
  1124  					Protocol: protoPtr(v1.ProtocolTCP),
  1125  					Port:     pointer.Int32Ptr(int32(80)),
  1126  				},
  1127  			},
  1128  			expectedEndpoints: []discovery.Endpoint{
  1129  				{
  1130  					Conditions: discovery.EndpointConditions{
  1131  						Ready:       pointer.BoolPtr(true),
  1132  						Serving:     pointer.BoolPtr(true),
  1133  						Terminating: pointer.BoolPtr(false),
  1134  					},
  1135  					Addresses: []string{"10.0.0.1"},
  1136  					TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
  1137  					NodeName:  pointer.StringPtr("node-1"),
  1138  				},
  1139  				{
  1140  					Conditions: discovery.EndpointConditions{
  1141  						Ready:       pointer.BoolPtr(false),
  1142  						Serving:     pointer.BoolPtr(false),
  1143  						Terminating: pointer.BoolPtr(false),
  1144  					},
  1145  					Addresses: []string{"10.0.0.1"},
  1146  					TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod1"},
  1147  					NodeName:  pointer.StringPtr("node-1"),
  1148  				},
  1149  			},
  1150  		},
  1151  		{
  1152  			// Any client reading EndpointSlices already has to handle deduplicating endpoints by IP address.
  1153  			// If 2 pods are ready, something has gone wrong further up the stack, we shouldn't try to hide that.
  1154  			name: "Two Ready pods with same IPs",
  1155  			service: &v1.Service{
  1156  				ObjectMeta: metav1.ObjectMeta{
  1157  					Name:              "foobar",
  1158  					Namespace:         "default",
  1159  					CreationTimestamp: creationTimestamp,
  1160  				},
  1161  				Spec: v1.ServiceSpec{
  1162  					Ports: []v1.ServicePort{
  1163  						{Name: "tcp-example", TargetPort: intstr.FromInt32(80), Protocol: v1.ProtocolTCP},
  1164  						{Name: "udp-example", TargetPort: intstr.FromInt32(161), Protocol: v1.ProtocolUDP},
  1165  						{Name: "sctp-example", TargetPort: intstr.FromInt32(3456), Protocol: v1.ProtocolSCTP},
  1166  					},
  1167  					Selector:   map[string]string{"foo": "bar"},
  1168  					IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
  1169  				},
  1170  			},
  1171  			pods: []*v1.Pod{
  1172  				{
  1173  					ObjectMeta: metav1.ObjectMeta{
  1174  						Namespace:         "default",
  1175  						Name:              "pod0",
  1176  						Labels:            map[string]string{"foo": "bar"},
  1177  						DeletionTimestamp: nil,
  1178  					},
  1179  					Spec: v1.PodSpec{
  1180  						Containers: []v1.Container{{
  1181  							Name: "container-1",
  1182  						}},
  1183  						NodeName: "node-1",
  1184  					},
  1185  					Status: v1.PodStatus{
  1186  						PodIP: "10.0.0.1",
  1187  						PodIPs: []v1.PodIP{{
  1188  							IP: "10.0.0.1",
  1189  						}},
  1190  						Conditions: []v1.PodCondition{
  1191  							{
  1192  								Type:   v1.PodInitialized,
  1193  								Status: v1.ConditionTrue,
  1194  							},
  1195  							{
  1196  								Type:   v1.PodReady,
  1197  								Status: v1.ConditionTrue,
  1198  							},
  1199  							{
  1200  								Type:   v1.ContainersReady,
  1201  								Status: v1.ConditionTrue,
  1202  							},
  1203  						},
  1204  					},
  1205  				},
  1206  				{
  1207  					ObjectMeta: metav1.ObjectMeta{
  1208  						Namespace:         "default",
  1209  						Name:              "pod1",
  1210  						Labels:            map[string]string{"foo": "bar"},
  1211  						DeletionTimestamp: nil,
  1212  					},
  1213  					Spec: v1.PodSpec{
  1214  						Containers: []v1.Container{{
  1215  							Name: "container-1",
  1216  						}},
  1217  						NodeName: "node-1",
  1218  					},
  1219  					Status: v1.PodStatus{
  1220  						PodIP: "10.0.0.1",
  1221  						PodIPs: []v1.PodIP{
  1222  							{
  1223  								IP: "10.0.0.1",
  1224  							},
  1225  						},
  1226  						Conditions: []v1.PodCondition{
  1227  							{
  1228  								Type:   v1.PodInitialized,
  1229  								Status: v1.ConditionTrue,
  1230  							},
  1231  							{
  1232  								Type:   v1.PodReady,
  1233  								Status: v1.ConditionTrue,
  1234  							},
  1235  							{
  1236  								Type:   v1.ContainersReady,
  1237  								Status: v1.ConditionTrue,
  1238  							},
  1239  						},
  1240  					},
  1241  				},
  1242  			},
  1243  			expectedEndpointPorts: []discovery.EndpointPort{
  1244  				{
  1245  					Name:     pointer.StringPtr("sctp-example"),
  1246  					Protocol: protoPtr(v1.ProtocolSCTP),
  1247  					Port:     pointer.Int32Ptr(int32(3456)),
  1248  				},
  1249  				{
  1250  					Name:     pointer.StringPtr("udp-example"),
  1251  					Protocol: protoPtr(v1.ProtocolUDP),
  1252  					Port:     pointer.Int32Ptr(int32(161)),
  1253  				},
  1254  				{
  1255  					Name:     pointer.StringPtr("tcp-example"),
  1256  					Protocol: protoPtr(v1.ProtocolTCP),
  1257  					Port:     pointer.Int32Ptr(int32(80)),
  1258  				},
  1259  			},
  1260  			expectedEndpoints: []discovery.Endpoint{
  1261  				{
  1262  					Conditions: discovery.EndpointConditions{
  1263  						Ready:       pointer.BoolPtr(true),
  1264  						Serving:     pointer.BoolPtr(true),
  1265  						Terminating: pointer.BoolPtr(false),
  1266  					},
  1267  					Addresses: []string{"10.0.0.1"},
  1268  					TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
  1269  					NodeName:  pointer.StringPtr("node-1"),
  1270  				},
  1271  				{
  1272  					Conditions: discovery.EndpointConditions{
  1273  						Ready:       pointer.BoolPtr(true),
  1274  						Serving:     pointer.BoolPtr(true),
  1275  						Terminating: pointer.BoolPtr(false),
  1276  					},
  1277  					Addresses: []string{"10.0.0.1"},
  1278  					TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod1"},
  1279  					NodeName:  pointer.StringPtr("node-1"),
  1280  				},
  1281  			},
  1282  		},
  1283  	}
  1284  
  1285  	for _, testcase := range testcases {
  1286  		t.Run(testcase.name, func(t *testing.T) {
  1287  			client, esController := newController(t, []string{"node-1"}, time.Duration(0))
  1288  
  1289  			for _, pod := range testcase.pods {
  1290  				esController.podStore.Add(pod)
  1291  			}
  1292  			esController.serviceStore.Add(testcase.service)
  1293  
  1294  			_, err := esController.client.CoreV1().Services(testcase.service.Namespace).Create(context.TODO(), testcase.service, metav1.CreateOptions{})
  1295  			assert.Nil(t, err, "Expected no error creating service")
  1296  
  1297  			logger, _ := ktesting.NewTestContext(t)
  1298  			err = esController.syncService(logger, fmt.Sprintf("%s/%s", testcase.service.Namespace, testcase.service.Name))
  1299  			assert.Nil(t, err)
  1300  
  1301  			// last action should be to create endpoint slice
  1302  			expectActions(t, client.Actions(), 1, "create", "endpointslices")
  1303  			sliceList, err := client.DiscoveryV1().EndpointSlices(testcase.service.Namespace).List(context.TODO(), metav1.ListOptions{})
  1304  			assert.Nil(t, err, "Expected no error fetching endpoint slices")
  1305  			assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices")
  1306  
  1307  			// ensure all attributes of endpoint slice match expected state
  1308  			slice := sliceList.Items[0]
  1309  			assert.Equal(t, slice.Annotations[v1.EndpointsLastChangeTriggerTime], creationTimestamp.UTC().Format(time.RFC3339Nano))
  1310  			assert.ElementsMatch(t, testcase.expectedEndpointPorts, slice.Ports)
  1311  			assert.ElementsMatch(t, testcase.expectedEndpoints, slice.Endpoints)
  1312  		})
  1313  	}
  1314  }
  1315  
  1316  // TestPodAddsBatching verifies that endpoint updates caused by pod addition are batched together.
  1317  // This test uses real time.Sleep, as there is no easy way to mock time in endpoints controller now.
  1318  // TODO(mborsz): Migrate this test to mock clock when possible.
  1319  func TestPodAddsBatching(t *testing.T) {
  1320  	t.Parallel()
  1321  
  1322  	type podAdd struct {
  1323  		delay time.Duration
  1324  	}
  1325  
  1326  	tests := []struct {
  1327  		name             string
  1328  		batchPeriod      time.Duration
  1329  		adds             []podAdd
  1330  		finalDelay       time.Duration
  1331  		wantRequestCount int
  1332  	}{
  1333  		{
  1334  			name:        "three adds with no batching",
  1335  			batchPeriod: 0 * time.Second,
  1336  			adds: []podAdd{
  1337  				{
  1338  					// endpoints.Run needs ~100 ms to start processing updates.
  1339  					delay: 200 * time.Millisecond,
  1340  				},
  1341  				{
  1342  					delay: 100 * time.Millisecond,
  1343  				},
  1344  				{
  1345  					delay: 100 * time.Millisecond,
  1346  				},
  1347  			},
  1348  			finalDelay:       3 * time.Second,
  1349  			wantRequestCount: 3,
  1350  		},
  1351  		{
  1352  			name:        "three adds in one batch",
  1353  			batchPeriod: 1 * time.Second,
  1354  			adds: []podAdd{
  1355  				{
  1356  					// endpoints.Run needs ~100 ms to start processing updates.
  1357  					delay: 200 * time.Millisecond,
  1358  				},
  1359  				{
  1360  					delay: 100 * time.Millisecond,
  1361  				},
  1362  				{
  1363  					delay: 100 * time.Millisecond,
  1364  				},
  1365  			},
  1366  			finalDelay:       3 * time.Second,
  1367  			wantRequestCount: 1,
  1368  		},
  1369  		{
  1370  			name:        "three adds in two batches",
  1371  			batchPeriod: 1 * time.Second,
  1372  			adds: []podAdd{
  1373  				{
  1374  					// endpoints.Run needs ~100 ms to start processing updates.
  1375  					delay: 200 * time.Millisecond,
  1376  				},
  1377  				{
  1378  					delay: 100 * time.Millisecond,
  1379  				},
  1380  				{
  1381  					delay: 1 * time.Second,
  1382  				},
  1383  			},
  1384  			finalDelay:       3 * time.Second,
  1385  			wantRequestCount: 2,
  1386  		},
  1387  	}
  1388  
  1389  	for _, tc := range tests {
  1390  		t.Run(tc.name, func(t *testing.T) {
  1391  			ns := metav1.NamespaceDefault
  1392  			client, esController := newController(t, []string{"node-1"}, tc.batchPeriod)
  1393  			stopCh := make(chan struct{})
  1394  			defer close(stopCh)
  1395  
  1396  			_, ctx := ktesting.NewTestContext(t)
  1397  			go esController.Run(ctx, 1)
  1398  
  1399  			esController.serviceStore.Add(&v1.Service{
  1400  				ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
  1401  				Spec: v1.ServiceSpec{
  1402  					Selector:   map[string]string{"foo": "bar"},
  1403  					IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
  1404  					Ports:      []v1.ServicePort{{Port: 80}},
  1405  				},
  1406  			})
  1407  
  1408  			for i, add := range tc.adds {
  1409  				time.Sleep(add.delay)
  1410  
  1411  				p := newPod(i, ns, true, 0, false)
  1412  				esController.podStore.Add(p)
  1413  				esController.addPod(p)
  1414  			}
  1415  
  1416  			time.Sleep(tc.finalDelay)
  1417  			assert.Len(t, client.Actions(), tc.wantRequestCount)
  1418  			// In case of error, make debugging easier.
  1419  			for _, action := range client.Actions() {
  1420  				t.Logf("action: %v %v", action.GetVerb(), action.GetResource())
  1421  			}
  1422  		})
  1423  	}
  1424  }
  1425  
  1426  // TestPodUpdatesBatching verifies that endpoint updates caused by pod updates are batched together.
  1427  // This test uses real time.Sleep, as there is no easy way to mock time in endpoints controller now.
  1428  // TODO(mborsz): Migrate this test to mock clock when possible.
  1429  func TestPodUpdatesBatching(t *testing.T) {
  1430  	t.Parallel()
  1431  
  1432  	resourceVersion := 1
  1433  	type podUpdate struct {
  1434  		delay   time.Duration
  1435  		podName string
  1436  		podIP   string
  1437  	}
  1438  
  1439  	tests := []struct {
  1440  		name             string
  1441  		batchPeriod      time.Duration
  1442  		podsCount        int
  1443  		updates          []podUpdate
  1444  		finalDelay       time.Duration
  1445  		wantRequestCount int
  1446  	}{
  1447  		{
  1448  			name:        "three updates with no batching",
  1449  			batchPeriod: 0 * time.Second,
  1450  			podsCount:   10,
  1451  			updates: []podUpdate{
  1452  				{
  1453  					// endpoints.Run needs ~100 ms to start processing updates.
  1454  					delay:   200 * time.Millisecond,
  1455  					podName: "pod0",
  1456  					podIP:   "10.0.0.0",
  1457  				},
  1458  				{
  1459  					delay:   100 * time.Millisecond,
  1460  					podName: "pod1",
  1461  					podIP:   "10.0.0.1",
  1462  				},
  1463  				{
  1464  					delay:   100 * time.Millisecond,
  1465  					podName: "pod2",
  1466  					podIP:   "10.0.0.2",
  1467  				},
  1468  			},
  1469  			finalDelay:       3 * time.Second,
  1470  			wantRequestCount: 3,
  1471  		},
  1472  		{
  1473  			name:        "three updates in one batch",
  1474  			batchPeriod: 1 * time.Second,
  1475  			podsCount:   10,
  1476  			updates: []podUpdate{
  1477  				{
  1478  					// endpoints.Run needs ~100 ms to start processing updates.
  1479  					delay:   200 * time.Millisecond,
  1480  					podName: "pod0",
  1481  					podIP:   "10.0.0.0",
  1482  				},
  1483  				{
  1484  					delay:   100 * time.Millisecond,
  1485  					podName: "pod1",
  1486  					podIP:   "10.0.0.1",
  1487  				},
  1488  				{
  1489  					delay:   100 * time.Millisecond,
  1490  					podName: "pod2",
  1491  					podIP:   "10.0.0.2",
  1492  				},
  1493  			},
  1494  			finalDelay:       3 * time.Second,
  1495  			wantRequestCount: 1,
  1496  		},
  1497  		{
  1498  			name:        "three updates in two batches",
  1499  			batchPeriod: 1 * time.Second,
  1500  			podsCount:   10,
  1501  			updates: []podUpdate{
  1502  				{
  1503  					// endpoints.Run needs ~100 ms to start processing updates.
  1504  					delay:   200 * time.Millisecond,
  1505  					podName: "pod0",
  1506  					podIP:   "10.0.0.0",
  1507  				},
  1508  				{
  1509  					delay:   100 * time.Millisecond,
  1510  					podName: "pod1",
  1511  					podIP:   "10.0.0.1",
  1512  				},
  1513  				{
  1514  					delay:   1 * time.Second,
  1515  					podName: "pod2",
  1516  					podIP:   "10.0.0.2",
  1517  				},
  1518  			},
  1519  			finalDelay:       3 * time.Second,
  1520  			wantRequestCount: 2,
  1521  		},
  1522  	}
  1523  
  1524  	for _, tc := range tests {
  1525  		t.Run(tc.name, func(t *testing.T) {
  1526  			ns := metav1.NamespaceDefault
  1527  			client, esController := newController(t, []string{"node-1"}, tc.batchPeriod)
  1528  			stopCh := make(chan struct{})
  1529  			defer close(stopCh)
  1530  
  1531  			_, ctx := ktesting.NewTestContext(t)
  1532  			go esController.Run(ctx, 1)
  1533  
  1534  			addPods(t, esController, ns, tc.podsCount)
  1535  
  1536  			esController.serviceStore.Add(&v1.Service{
  1537  				ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
  1538  				Spec: v1.ServiceSpec{
  1539  					Selector:   map[string]string{"foo": "bar"},
  1540  					IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
  1541  					Ports:      []v1.ServicePort{{Port: 80}},
  1542  				},
  1543  			})
  1544  
  1545  			for _, update := range tc.updates {
  1546  				time.Sleep(update.delay)
  1547  
  1548  				old, exists, err := esController.podStore.GetByKey(fmt.Sprintf("%s/%s", ns, update.podName))
  1549  				if err != nil {
  1550  					t.Fatalf("Error while retrieving old value of %q: %v", update.podName, err)
  1551  				}
  1552  				if !exists {
  1553  					t.Fatalf("Pod %q doesn't exist", update.podName)
  1554  				}
  1555  				oldPod := old.(*v1.Pod)
  1556  				newPod := oldPod.DeepCopy()
  1557  				newPod.Status.PodIPs[0].IP = update.podIP
  1558  				newPod.ResourceVersion = strconv.Itoa(resourceVersion)
  1559  				resourceVersion++
  1560  
  1561  				esController.podStore.Update(newPod)
  1562  				esController.updatePod(oldPod, newPod)
  1563  			}
  1564  
  1565  			time.Sleep(tc.finalDelay)
  1566  			assert.Len(t, client.Actions(), tc.wantRequestCount)
  1567  			// In case of error, make debugging easier.
  1568  			for _, action := range client.Actions() {
  1569  				t.Logf("action: %v %v", action.GetVerb(), action.GetResource())
  1570  			}
  1571  		})
  1572  	}
  1573  }
  1574  
  1575  // TestPodDeleteBatching verifies that endpoint updates caused by pod deletion are batched together.
  1576  // This test uses real time.Sleep, as there is no easy way to mock time in endpoints controller now.
  1577  // TODO(mborsz): Migrate this test to mock clock when possible.
  1578  func TestPodDeleteBatching(t *testing.T) {
  1579  	t.Parallel()
  1580  
  1581  	type podDelete struct {
  1582  		delay   time.Duration
  1583  		podName string
  1584  	}
  1585  
  1586  	tests := []struct {
  1587  		name             string
  1588  		batchPeriod      time.Duration
  1589  		podsCount        int
  1590  		deletes          []podDelete
  1591  		finalDelay       time.Duration
  1592  		wantRequestCount int
  1593  	}{
  1594  		{
  1595  			name:        "three deletes with no batching",
  1596  			batchPeriod: 0 * time.Second,
  1597  			podsCount:   10,
  1598  			deletes: []podDelete{
  1599  				{
  1600  					// endpoints.Run needs ~100 ms to start processing updates.
  1601  					delay:   200 * time.Millisecond,
  1602  					podName: "pod0",
  1603  				},
  1604  				{
  1605  					delay:   100 * time.Millisecond,
  1606  					podName: "pod1",
  1607  				},
  1608  				{
  1609  					delay:   100 * time.Millisecond,
  1610  					podName: "pod2",
  1611  				},
  1612  			},
  1613  			finalDelay:       3 * time.Second,
  1614  			wantRequestCount: 3,
  1615  		},
  1616  		{
  1617  			name:        "three deletes in one batch",
  1618  			batchPeriod: 1 * time.Second,
  1619  			podsCount:   10,
  1620  			deletes: []podDelete{
  1621  				{
  1622  					// endpoints.Run needs ~100 ms to start processing updates.
  1623  					delay:   200 * time.Millisecond,
  1624  					podName: "pod0",
  1625  				},
  1626  				{
  1627  					delay:   100 * time.Millisecond,
  1628  					podName: "pod1",
  1629  				},
  1630  				{
  1631  					delay:   100 * time.Millisecond,
  1632  					podName: "pod2",
  1633  				},
  1634  			},
  1635  			finalDelay:       3 * time.Second,
  1636  			wantRequestCount: 1,
  1637  		},
  1638  		{
  1639  			name:        "three deletes in two batches",
  1640  			batchPeriod: 1 * time.Second,
  1641  			podsCount:   10,
  1642  			deletes: []podDelete{
  1643  				{
  1644  					// endpoints.Run needs ~100 ms to start processing updates.
  1645  					delay:   200 * time.Millisecond,
  1646  					podName: "pod0",
  1647  				},
  1648  				{
  1649  					delay:   100 * time.Millisecond,
  1650  					podName: "pod1",
  1651  				},
  1652  				{
  1653  					delay:   1 * time.Second,
  1654  					podName: "pod2",
  1655  				},
  1656  			},
  1657  			finalDelay:       3 * time.Second,
  1658  			wantRequestCount: 2,
  1659  		},
  1660  	}
  1661  
  1662  	for _, tc := range tests {
  1663  		t.Run(tc.name, func(t *testing.T) {
  1664  			ns := metav1.NamespaceDefault
  1665  			client, esController := newController(t, []string{"node-1"}, tc.batchPeriod)
  1666  			stopCh := make(chan struct{})
  1667  			defer close(stopCh)
  1668  
  1669  			_, ctx := ktesting.NewTestContext(t)
  1670  			go esController.Run(ctx, 1)
  1671  
  1672  			addPods(t, esController, ns, tc.podsCount)
  1673  
  1674  			esController.serviceStore.Add(&v1.Service{
  1675  				ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
  1676  				Spec: v1.ServiceSpec{
  1677  					Selector:   map[string]string{"foo": "bar"},
  1678  					IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
  1679  					Ports:      []v1.ServicePort{{Port: 80}},
  1680  				},
  1681  			})
  1682  
  1683  			for _, update := range tc.deletes {
  1684  				time.Sleep(update.delay)
  1685  
  1686  				old, exists, err := esController.podStore.GetByKey(fmt.Sprintf("%s/%s", ns, update.podName))
  1687  				assert.Nil(t, err, "error while retrieving old value of %q: %v", update.podName, err)
  1688  				assert.Equal(t, true, exists, "pod should exist")
  1689  				esController.podStore.Delete(old)
  1690  				esController.deletePod(old)
  1691  			}
  1692  
  1693  			time.Sleep(tc.finalDelay)
  1694  			assert.Len(t, client.Actions(), tc.wantRequestCount)
  1695  			// In case of error, make debugging easier.
  1696  			for _, action := range client.Actions() {
  1697  				t.Logf("action: %v %v", action.GetVerb(), action.GetResource())
  1698  			}
  1699  		})
  1700  	}
  1701  }
  1702  
  1703  func TestSyncServiceStaleInformer(t *testing.T) {
  1704  	testcases := []struct {
  1705  		name                     string
  1706  		informerGenerationNumber int64
  1707  		trackerGenerationNumber  int64
  1708  		expectError              bool
  1709  	}{
  1710  		{
  1711  			name:                     "informer cache outdated",
  1712  			informerGenerationNumber: 10,
  1713  			trackerGenerationNumber:  12,
  1714  			expectError:              true,
  1715  		},
  1716  		{
  1717  			name:                     "cache and tracker synced",
  1718  			informerGenerationNumber: 10,
  1719  			trackerGenerationNumber:  10,
  1720  			expectError:              false,
  1721  		},
  1722  		{
  1723  			name:                     "tracker outdated",
  1724  			informerGenerationNumber: 10,
  1725  			trackerGenerationNumber:  1,
  1726  			expectError:              false,
  1727  		},
  1728  	}
  1729  
  1730  	for _, testcase := range testcases {
  1731  		t.Run(testcase.name, func(t *testing.T) {
  1732  			_, esController := newController(t, []string{"node-1"}, time.Duration(0))
  1733  			ns := metav1.NamespaceDefault
  1734  			serviceName := "testing-1"
  1735  
  1736  			// Store Service in the cache
  1737  			esController.serviceStore.Add(&v1.Service{
  1738  				ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns},
  1739  				Spec: v1.ServiceSpec{
  1740  					Selector: map[string]string{"foo": "bar"},
  1741  					Ports:    []v1.ServicePort{{TargetPort: intstr.FromInt32(80)}},
  1742  				},
  1743  			})
  1744  
  1745  			// Create EndpointSlice in the informer cache with informerGenerationNumber
  1746  			epSlice1 := &discovery.EndpointSlice{
  1747  				ObjectMeta: metav1.ObjectMeta{
  1748  					Name:       "matching-1",
  1749  					Namespace:  ns,
  1750  					Generation: testcase.informerGenerationNumber,
  1751  					Labels: map[string]string{
  1752  						discovery.LabelServiceName: serviceName,
  1753  						discovery.LabelManagedBy:   controllerName,
  1754  					},
  1755  				},
  1756  				AddressType: discovery.AddressTypeIPv4,
  1757  			}
  1758  			err := esController.endpointSliceStore.Add(epSlice1)
  1759  			if err != nil {
  1760  				t.Fatalf("Expected no error adding EndpointSlice: %v", err)
  1761  			}
  1762  
  1763  			// Create EndpointSlice in the tracker with trackerGenerationNumber
  1764  			epSlice2 := epSlice1.DeepCopy()
  1765  			epSlice2.Generation = testcase.trackerGenerationNumber
  1766  			esController.endpointSliceTracker.Update(epSlice2)
  1767  
  1768  			logger, _ := ktesting.NewTestContext(t)
  1769  			err = esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
  1770  			// Check if we got a StaleInformerCache error
  1771  			if endpointslicepkg.IsStaleInformerCacheErr(err) != testcase.expectError {
  1772  				t.Fatalf("Expected error because informer cache is outdated")
  1773  			}
  1774  
  1775  		})
  1776  	}
  1777  }
  1778  
  1779  func Test_checkNodeTopologyDistribution(t *testing.T) {
  1780  	zoneA := "zone-a"
  1781  	zoneB := "zone-b"
  1782  	zoneC := "zone-c"
  1783  
  1784  	readyTrue := true
  1785  	readyFalse := false
  1786  
  1787  	cpu100 := resource.MustParse("100m")
  1788  	cpu1000 := resource.MustParse("1000m")
  1789  	cpu2000 := resource.MustParse("2000m")
  1790  
  1791  	type nodeInfo struct {
  1792  		zoneLabel *string
  1793  		ready     *bool
  1794  		cpu       *resource.Quantity
  1795  	}
  1796  
  1797  	testCases := []struct {
  1798  		name                 string
  1799  		nodes                []nodeInfo
  1800  		topologyCacheEnabled bool
  1801  		endpointZoneInfo     map[string]topologycache.EndpointZoneInfo
  1802  		expectedQueueLen     int
  1803  	}{{
  1804  		name:                 "empty",
  1805  		nodes:                []nodeInfo{},
  1806  		topologyCacheEnabled: false,
  1807  		endpointZoneInfo:     map[string]topologycache.EndpointZoneInfo{},
  1808  		expectedQueueLen:     0,
  1809  	}, {
  1810  		name: "lopsided, queue required",
  1811  		nodes: []nodeInfo{
  1812  			{zoneLabel: &zoneA, ready: &readyTrue, cpu: &cpu100},
  1813  			{zoneLabel: &zoneB, ready: &readyTrue, cpu: &cpu1000},
  1814  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu2000},
  1815  		},
  1816  		topologyCacheEnabled: true,
  1817  		endpointZoneInfo: map[string]topologycache.EndpointZoneInfo{
  1818  			"ns/svc1": {zoneA: 1, zoneB: 2, zoneC: 3},
  1819  		},
  1820  		expectedQueueLen: 1,
  1821  	}, {
  1822  		name: "lopsided but 1 unready, queue required because unready node means 0 CPU in one zone",
  1823  		nodes: []nodeInfo{
  1824  			{zoneLabel: &zoneA, ready: &readyFalse, cpu: &cpu100},
  1825  			{zoneLabel: &zoneB, ready: &readyTrue, cpu: &cpu1000},
  1826  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu2000},
  1827  		},
  1828  		topologyCacheEnabled: true,
  1829  		endpointZoneInfo: map[string]topologycache.EndpointZoneInfo{
  1830  			"ns/svc1": {zoneA: 1, zoneB: 2, zoneC: 3},
  1831  		},
  1832  		expectedQueueLen: 1,
  1833  	}, {
  1834  		name: "even zones, uneven endpoint distribution but within threshold, no sync required",
  1835  		nodes: []nodeInfo{
  1836  			{zoneLabel: &zoneB, ready: &readyTrue, cpu: &cpu2000},
  1837  			{zoneLabel: &zoneB, ready: &readyTrue, cpu: &cpu2000},
  1838  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu2000},
  1839  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu2000},
  1840  		},
  1841  		topologyCacheEnabled: true,
  1842  		endpointZoneInfo: map[string]topologycache.EndpointZoneInfo{
  1843  			"ns/svc1": {zoneB: 5, zoneC: 4},
  1844  		},
  1845  		expectedQueueLen: 0,
  1846  	}, {
  1847  		name: "even zones but node missing zone, sync required",
  1848  		nodes: []nodeInfo{
  1849  			{zoneLabel: &zoneB, ready: &readyTrue, cpu: &cpu2000},
  1850  			{ready: &readyTrue, cpu: &cpu2000},
  1851  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu2000},
  1852  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu2000},
  1853  		},
  1854  		topologyCacheEnabled: true,
  1855  		endpointZoneInfo: map[string]topologycache.EndpointZoneInfo{
  1856  			"ns/svc1": {zoneB: 5, zoneC: 4},
  1857  		},
  1858  		expectedQueueLen: 1,
  1859  	}, {
  1860  		name: "even zones but node missing cpu, sync required",
  1861  		nodes: []nodeInfo{
  1862  			{zoneLabel: &zoneB, ready: &readyTrue, cpu: &cpu2000},
  1863  			{zoneLabel: &zoneB, ready: &readyTrue},
  1864  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu2000},
  1865  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu2000},
  1866  		},
  1867  		topologyCacheEnabled: true,
  1868  		endpointZoneInfo: map[string]topologycache.EndpointZoneInfo{
  1869  			"ns/svc1": {zoneB: 5, zoneC: 4},
  1870  		},
  1871  		expectedQueueLen: 1,
  1872  	}, {
  1873  		name: "even zones, uneven endpoint distribution beyond threshold, no sync required",
  1874  		nodes: []nodeInfo{
  1875  			{zoneLabel: &zoneB, ready: &readyTrue, cpu: &cpu2000},
  1876  			{zoneLabel: &zoneB, ready: &readyTrue, cpu: &cpu2000},
  1877  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu2000},
  1878  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu2000},
  1879  		},
  1880  		topologyCacheEnabled: true,
  1881  		endpointZoneInfo: map[string]topologycache.EndpointZoneInfo{
  1882  			"ns/svc1": {zoneB: 6, zoneC: 4},
  1883  		},
  1884  		expectedQueueLen: 1,
  1885  	}, {
  1886  		name: "3 uneven zones, matching endpoint distribution, no sync required",
  1887  		nodes: []nodeInfo{
  1888  			{zoneLabel: &zoneA, ready: &readyTrue, cpu: &cpu2000},
  1889  			{zoneLabel: &zoneB, ready: &readyTrue, cpu: &cpu1000},
  1890  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu100},
  1891  		},
  1892  		topologyCacheEnabled: true,
  1893  		endpointZoneInfo: map[string]topologycache.EndpointZoneInfo{
  1894  			"ns/svc1": {zoneA: 20, zoneB: 10, zoneC: 1},
  1895  		},
  1896  		expectedQueueLen: 0,
  1897  	}, {
  1898  		name: "3 uneven zones, endpoint distribution within threshold but below 1, sync required",
  1899  		nodes: []nodeInfo{
  1900  			{zoneLabel: &zoneA, ready: &readyTrue, cpu: &cpu2000},
  1901  			{zoneLabel: &zoneB, ready: &readyTrue, cpu: &cpu1000},
  1902  			{zoneLabel: &zoneC, ready: &readyTrue, cpu: &cpu100},
  1903  		},
  1904  		topologyCacheEnabled: true,
  1905  		endpointZoneInfo: map[string]topologycache.EndpointZoneInfo{
  1906  			"ns/svc1": {zoneA: 20, zoneB: 10, zoneC: 0},
  1907  		},
  1908  		expectedQueueLen: 1,
  1909  	}}
  1910  
  1911  	for _, tc := range testCases {
  1912  		t.Run(tc.name, func(t *testing.T) {
  1913  			_, esController := newController(t, []string{}, time.Duration(0))
  1914  
  1915  			for i, nodeInfo := range tc.nodes {
  1916  				node := &v1.Node{
  1917  					ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("node-%d", i)},
  1918  					Status:     v1.NodeStatus{},
  1919  				}
  1920  				if nodeInfo.zoneLabel != nil {
  1921  					node.Labels = map[string]string{v1.LabelTopologyZone: *nodeInfo.zoneLabel}
  1922  				}
  1923  				if nodeInfo.ready != nil {
  1924  					status := v1.ConditionFalse
  1925  					if *nodeInfo.ready {
  1926  						status = v1.ConditionTrue
  1927  					}
  1928  					node.Status.Conditions = []v1.NodeCondition{{
  1929  						Type:   v1.NodeReady,
  1930  						Status: status,
  1931  					}}
  1932  				}
  1933  				if nodeInfo.cpu != nil {
  1934  					node.Status.Allocatable = v1.ResourceList{
  1935  						v1.ResourceCPU: *nodeInfo.cpu,
  1936  					}
  1937  				}
  1938  				esController.nodeStore.Add(node)
  1939  				if tc.topologyCacheEnabled {
  1940  					esController.topologyCache = topologycache.NewTopologyCache()
  1941  					for serviceKey, endpointZoneInfo := range tc.endpointZoneInfo {
  1942  						esController.topologyCache.SetHints(serviceKey, discovery.AddressTypeIPv4, endpointZoneInfo)
  1943  					}
  1944  				}
  1945  			}
  1946  
  1947  			logger, _ := ktesting.NewTestContext(t)
  1948  			esController.checkNodeTopologyDistribution(logger)
  1949  
  1950  			if esController.queue.Len() != tc.expectedQueueLen {
  1951  				t.Errorf("Expected %d services to be queued, got %d", tc.expectedQueueLen, esController.queue.Len())
  1952  			}
  1953  		})
  1954  	}
  1955  }
  1956  
  1957  func TestUpdateNode(t *testing.T) {
  1958  	nodeReadyStatus := v1.NodeStatus{
  1959  		Allocatable: map[v1.ResourceName]resource.Quantity{
  1960  			v1.ResourceCPU: resource.MustParse("100m"),
  1961  		},
  1962  		Conditions: []v1.NodeCondition{
  1963  			{
  1964  				Type:   v1.NodeReady,
  1965  				Status: v1.ConditionTrue,
  1966  			},
  1967  		},
  1968  	}
  1969  	_, esController := newController(t, nil, time.Duration(0))
  1970  	sliceInfo := &topologycache.SliceInfo{
  1971  		ServiceKey:  "ns/svc",
  1972  		AddressType: discovery.AddressTypeIPv4,
  1973  		ToCreate: []*discovery.EndpointSlice{
  1974  			{
  1975  				ObjectMeta: metav1.ObjectMeta{
  1976  					Name:      "svc-abc",
  1977  					Namespace: "ns",
  1978  					Labels: map[string]string{
  1979  						discovery.LabelServiceName: "svc",
  1980  						discovery.LabelManagedBy:   controllerName,
  1981  					},
  1982  				},
  1983  				Endpoints: []discovery.Endpoint{
  1984  					{
  1985  						Addresses:  []string{"172.18.0.2"},
  1986  						Zone:       pointer.String("zone-a"),
  1987  						Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)},
  1988  					},
  1989  					{
  1990  						Addresses:  []string{"172.18.1.2"},
  1991  						Zone:       pointer.String("zone-b"),
  1992  						Conditions: discovery.EndpointConditions{Ready: pointer.Bool(true)},
  1993  					},
  1994  				},
  1995  				AddressType: discovery.AddressTypeIPv4,
  1996  			},
  1997  		},
  1998  	}
  1999  	node1 := &v1.Node{
  2000  		ObjectMeta: metav1.ObjectMeta{Name: "node-1"},
  2001  		Status:     nodeReadyStatus,
  2002  	}
  2003  	node2 := &v1.Node{
  2004  		ObjectMeta: metav1.ObjectMeta{Name: "node-2"},
  2005  		Status:     nodeReadyStatus,
  2006  	}
  2007  	logger, _ := ktesting.NewTestContext(t)
  2008  	esController.nodeStore.Add(node1)
  2009  	esController.nodeStore.Add(node2)
  2010  	esController.addNode(logger, node1)
  2011  	esController.addNode(logger, node2)
  2012  	// The Nodes don't have the zone label, AddHints should fail.
  2013  	_, _, eventsBuilders := esController.topologyCache.AddHints(logger, sliceInfo)
  2014  	require.Len(t, eventsBuilders, 1)
  2015  	assert.Contains(t, eventsBuilders[0].Message, topologycache.InsufficientNodeInfo)
  2016  
  2017  	updateNode1 := node1.DeepCopy()
  2018  	updateNode1.Labels = map[string]string{v1.LabelTopologyZone: "zone-a"}
  2019  	updateNode2 := node2.DeepCopy()
  2020  	updateNode2.Labels = map[string]string{v1.LabelTopologyZone: "zone-b"}
  2021  
  2022  	// After adding the zone label to the Nodes and calling the event handler updateNode, AddHints should succeed.
  2023  	esController.nodeStore.Update(updateNode1)
  2024  	esController.nodeStore.Update(updateNode2)
  2025  	esController.updateNode(logger, node1, updateNode1)
  2026  	esController.updateNode(logger, node2, updateNode2)
  2027  	_, _, eventsBuilders = esController.topologyCache.AddHints(logger, sliceInfo)
  2028  	require.Len(t, eventsBuilders, 1)
  2029  	assert.Contains(t, eventsBuilders[0].Message, topologycache.TopologyAwareHintsEnabled)
  2030  }
  2031  
  2032  // Test helpers
  2033  func addPods(t *testing.T, esController *endpointSliceController, namespace string, podsCount int) {
  2034  	t.Helper()
  2035  	for i := 0; i < podsCount; i++ {
  2036  		pod := newPod(i, namespace, true, 0, false)
  2037  		esController.podStore.Add(pod)
  2038  	}
  2039  }
  2040  
  2041  func standardSyncService(t *testing.T, esController *endpointSliceController, namespace, serviceName string) {
  2042  	t.Helper()
  2043  	createService(t, esController, namespace, serviceName)
  2044  
  2045  	logger, _ := ktesting.NewTestContext(t)
  2046  	err := esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, serviceName))
  2047  	assert.Nil(t, err, "Expected no error syncing service")
  2048  }
  2049  
  2050  func createService(t *testing.T, esController *endpointSliceController, namespace, serviceName string) *v1.Service {
  2051  	t.Helper()
  2052  	service := &v1.Service{
  2053  		ObjectMeta: metav1.ObjectMeta{
  2054  			Name:              serviceName,
  2055  			Namespace:         namespace,
  2056  			CreationTimestamp: metav1.NewTime(time.Now()),
  2057  			UID:               types.UID(namespace + "-" + serviceName),
  2058  		},
  2059  		Spec: v1.ServiceSpec{
  2060  			Ports:      []v1.ServicePort{{TargetPort: intstr.FromInt32(80)}},
  2061  			Selector:   map[string]string{"foo": "bar"},
  2062  			IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
  2063  		},
  2064  	}
  2065  	esController.serviceStore.Add(service)
  2066  	_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
  2067  	assert.Nil(t, err, "Expected no error creating service")
  2068  	return service
  2069  }
  2070  
  2071  func expectAction(t *testing.T, actions []k8stesting.Action, index int, verb, resource string) {
  2072  	t.Helper()
  2073  	if len(actions) <= index {
  2074  		t.Fatalf("Expected at least %d actions, got %d", index+1, len(actions))
  2075  	}
  2076  
  2077  	action := actions[index]
  2078  	if action.GetVerb() != verb {
  2079  		t.Errorf("Expected action %d verb to be %s, got %s", index, verb, action.GetVerb())
  2080  	}
  2081  
  2082  	if action.GetResource().Resource != resource {
  2083  		t.Errorf("Expected action %d resource to be %s, got %s", index, resource, action.GetResource().Resource)
  2084  	}
  2085  }
  2086  
  2087  // protoPtr takes a Protocol and returns a pointer to it.
  2088  func protoPtr(proto v1.Protocol) *v1.Protocol {
  2089  	return &proto
  2090  }
  2091  
  2092  // cacheMutationCheck helps ensure that cached objects have not been changed
  2093  // in any way throughout a test run.
  2094  type cacheMutationCheck struct {
  2095  	objects []cacheObject
  2096  }
  2097  
  2098  // cacheObject stores a reference to an original object as well as a deep copy
  2099  // of that object to track any mutations in the original object.
  2100  type cacheObject struct {
  2101  	original runtime.Object
  2102  	deepCopy runtime.Object
  2103  }
  2104  
  2105  // newCacheMutationCheck initializes a cacheMutationCheck with EndpointSlices.
  2106  func newCacheMutationCheck(endpointSlices []*discovery.EndpointSlice) cacheMutationCheck {
  2107  	cmc := cacheMutationCheck{}
  2108  	for _, endpointSlice := range endpointSlices {
  2109  		cmc.Add(endpointSlice)
  2110  	}
  2111  	return cmc
  2112  }
  2113  
  2114  // Add appends a runtime.Object and a deep copy of that object into the
  2115  // cacheMutationCheck.
  2116  func (cmc *cacheMutationCheck) Add(o runtime.Object) {
  2117  	cmc.objects = append(cmc.objects, cacheObject{
  2118  		original: o,
  2119  		deepCopy: o.DeepCopyObject(),
  2120  	})
  2121  }
  2122  
  2123  // Check verifies that no objects in the cacheMutationCheck have been mutated.
  2124  func (cmc *cacheMutationCheck) Check(t *testing.T) {
  2125  	for _, o := range cmc.objects {
  2126  		if !reflect.DeepEqual(o.original, o.deepCopy) {
  2127  			// Cached objects can't be safely mutated and instead should be deep
  2128  			// copied before changed in any way.
  2129  			t.Errorf("Cached object was unexpectedly mutated. Original: %+v, Mutated: %+v", o.deepCopy, o.original)
  2130  		}
  2131  	}
  2132  }
  2133  
  2134  func Test_dropEndpointSlicesPendingDeletion(t *testing.T) {
  2135  	now := metav1.Now()
  2136  	endpointSlices := []*discovery.EndpointSlice{
  2137  		{
  2138  			ObjectMeta: metav1.ObjectMeta{
  2139  				Name:              "epSlice1",
  2140  				DeletionTimestamp: &now,
  2141  			},
  2142  		},
  2143  		{
  2144  			ObjectMeta: metav1.ObjectMeta{
  2145  				Name: "epSlice2",
  2146  			},
  2147  			AddressType: discovery.AddressTypeIPv4,
  2148  			Endpoints: []discovery.Endpoint{
  2149  				{
  2150  					Addresses: []string{"172.18.0.2"},
  2151  				},
  2152  			},
  2153  		},
  2154  		{
  2155  			ObjectMeta: metav1.ObjectMeta{
  2156  				Name: "epSlice3",
  2157  			},
  2158  			AddressType: discovery.AddressTypeIPv6,
  2159  			Endpoints: []discovery.Endpoint{
  2160  				{
  2161  					Addresses: []string{"3001:0da8:75a3:0000:0000:8a2e:0370:7334"},
  2162  				},
  2163  			},
  2164  		},
  2165  	}
  2166  
  2167  	epSlice2 := endpointSlices[1]
  2168  	epSlice3 := endpointSlices[2]
  2169  
  2170  	result := dropEndpointSlicesPendingDeletion(endpointSlices)
  2171  
  2172  	assert.Len(t, result, 2)
  2173  	for _, epSlice := range result {
  2174  		if epSlice.Name == "epSlice1" {
  2175  			t.Errorf("Expected EndpointSlice marked for deletion to be dropped.")
  2176  		}
  2177  	}
  2178  
  2179  	// We don't use endpointSlices and instead check manually for equality, because
  2180  	// `dropEndpointSlicesPendingDeletion` mutates the slice it receives, so it's easy
  2181  	// to break this test later. This way, we can be absolutely sure that the result
  2182  	// has exactly what we expect it to.
  2183  	if !reflect.DeepEqual(epSlice2, result[0]) {
  2184  		t.Errorf("EndpointSlice was unexpectedly mutated. Expected: %+v, Mutated: %+v", epSlice2, result[0])
  2185  	}
  2186  	if !reflect.DeepEqual(epSlice3, result[1]) {
  2187  		t.Errorf("EndpointSlice was unexpectedly mutated. Expected: %+v, Mutated: %+v", epSlice3, result[1])
  2188  	}
  2189  }