github.com/cilium/cilium@v1.16.2/pkg/redirectpolicy/manager_test.go (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright Authors of Cilium
     3  
     4  package redirectpolicy
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"net"
    10  	"net/netip"
    11  	"sync"
    12  	"testing"
    13  
    14  	"github.com/stretchr/testify/require"
    15  	"k8s.io/apimachinery/pkg/util/sets"
    16  	"k8s.io/client-go/tools/cache"
    17  
    18  	cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
    19  	"github.com/cilium/cilium/pkg/endpoint"
    20  	"github.com/cilium/cilium/pkg/endpointmanager"
    21  	"github.com/cilium/cilium/pkg/k8s"
    22  	"github.com/cilium/cilium/pkg/k8s/resource"
    23  	slimcorev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
    24  	slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
    25  	"github.com/cilium/cilium/pkg/k8s/utils"
    26  	lb "github.com/cilium/cilium/pkg/loadbalancer"
    27  	"github.com/cilium/cilium/pkg/option"
    28  	"github.com/cilium/cilium/pkg/policy/api"
    29  	"github.com/cilium/cilium/pkg/testutils"
    30  )
    31  
    32  type ManagerSuite struct {
    33  	rpm *Manager
    34  	svc svcManager
    35  	epM endpointManager
    36  }
    37  
    38  func setupManagerSuite(tb testing.TB) *ManagerSuite {
    39  	testutils.PrivilegedTest(tb)
    40  
    41  	m := &ManagerSuite{}
    42  	m.svc = &fakeSvcManager{}
    43  	fpr := &fakePodResource{
    44  		fakePodStore{},
    45  	}
    46  	m.epM = &fakeEpManager{}
    47  	m.rpm = NewRedirectPolicyManager(m.svc, nil, fpr, m.epM)
    48  	configAddrType = LRPConfig{
    49  		id: k8s.ServiceID{
    50  			Name:      "test-foo",
    51  			Namespace: "ns1",
    52  		},
    53  		lrpType:      lrpConfigTypeAddr,
    54  		frontendType: addrFrontendSinglePort,
    55  		frontendMappings: []*feMapping{{
    56  			feAddr:      fe1,
    57  			podBackends: nil,
    58  			fePort:      portName1,
    59  		}},
    60  		backendSelector: api.EndpointSelector{
    61  			LabelSelector: &slim_metav1.LabelSelector{
    62  				MatchLabels: map[string]string{
    63  					"test": "foo",
    64  				},
    65  			},
    66  		},
    67  		backendPorts: []bePortInfo{beP1},
    68  	}
    69  	configSvcType = LRPConfig{
    70  		id: k8s.ServiceID{
    71  			Name:      "test-foo",
    72  			Namespace: "ns1",
    73  		},
    74  		lrpType: lrpConfigTypeSvc,
    75  		backendSelector: api.EndpointSelector{
    76  			LabelSelector: &slim_metav1.LabelSelector{
    77  				MatchLabels: map[string]string{
    78  					"test": "foo",
    79  				},
    80  			},
    81  		},
    82  	}
    83  
    84  	return m
    85  }
    86  
    87  type fakeSvcManager struct {
    88  	upsertEvents            chan *lb.SVC
    89  	destroyConnectionEvents chan lb.L3n4Addr
    90  }
    91  
    92  func (f *fakeSvcManager) DeleteService(lb.L3n4Addr) (bool, error) {
    93  	return true, nil
    94  }
    95  
    96  func (f *fakeSvcManager) UpsertService(s *lb.SVC) (bool, lb.ID, error) {
    97  	if f.upsertEvents != nil {
    98  		f.upsertEvents <- s
    99  	}
   100  	return true, 1, nil
   101  }
   102  
   103  func (f *fakeSvcManager) TerminateUDPConnectionsToBackend(l3n4Addr *lb.L3n4Addr) {
   104  	if f.destroyConnectionEvents != nil {
   105  		f.destroyConnectionEvents <- *l3n4Addr
   106  	}
   107  }
   108  
   109  type fakePodResource struct {
   110  	store fakePodStore
   111  }
   112  
   113  func (fpr *fakePodResource) Observe(ctx context.Context, next func(resource.Event[*slimcorev1.Pod]), complete func(error)) {
   114  	panic("unimplemented")
   115  }
   116  func (fpr *fakePodResource) Events(ctx context.Context, opts ...resource.EventsOpt) <-chan resource.Event[*slimcorev1.Pod] {
   117  	panic("unimplemented")
   118  }
   119  func (fpr *fakePodResource) Store(context.Context) (resource.Store[*slimcorev1.Pod], error) {
   120  	return &fpr.store, nil
   121  }
   122  
   123  type fakePodStore struct {
   124  	OnList func() []*slimcorev1.Pod
   125  	Pods   map[resource.Key]*slimcorev1.Pod
   126  }
   127  
   128  func (ps *fakePodStore) List() []*slimcorev1.Pod {
   129  	if ps.OnList != nil {
   130  		return ps.OnList()
   131  	}
   132  	pods := []*slimcorev1.Pod{pod1, pod2}
   133  	return pods
   134  }
   135  
   136  func (ps *fakePodStore) IterKeys() resource.KeyIter { return nil }
   137  func (ps *fakePodStore) Get(obj *slimcorev1.Pod) (item *slimcorev1.Pod, exists bool, err error) {
   138  	return nil, false, nil
   139  }
   140  func (ps *fakePodStore) GetByKey(key resource.Key) (item *slimcorev1.Pod, exists bool, err error) {
   141  	if len(ps.Pods) != 0 {
   142  		return ps.Pods[key], true, nil
   143  	}
   144  	return nil, false, nil
   145  }
   146  func (ps *fakePodStore) CacheStore() cache.Store { return nil }
   147  
   148  func (ps *fakePodStore) IndexKeys(indexName, indexedValue string) ([]string, error) { return nil, nil }
   149  func (ps *fakePodStore) ByIndex(indexName, indexedValue string) ([]*slimcorev1.Pod, error) {
   150  	return nil, nil
   151  }
   152  func (ps *fakePodStore) Release() {
   153  }
   154  
   155  type fakeEpManager struct {
   156  	cookies map[netip.Addr]uint64
   157  }
   158  
   159  func (ps *fakeEpManager) Subscribe(s endpointmanager.Subscriber) {
   160  }
   161  
   162  func (ps *fakeEpManager) GetEndpointNetnsCookieByIP(ip netip.Addr) (uint64, error) {
   163  	c, ok := ps.cookies[ip]
   164  	if !ok {
   165  		return 0, fmt.Errorf("endpoint not found")
   166  	}
   167  	return c, nil
   168  }
   169  
   170  type fakeSkipLBMap struct {
   171  	lb4Events chan skipLBParams
   172  	lb6Events chan skipLBParams
   173  }
   174  
   175  type skipLBParams struct {
   176  	cookie uint64
   177  	ip     net.IP
   178  	port   uint16
   179  }
   180  
   181  func (f fakeSkipLBMap) AddLB4(netnsCookie uint64, ip net.IP, port uint16) error {
   182  	f.lb4Events <- skipLBParams{
   183  		cookie: netnsCookie,
   184  		ip:     ip,
   185  		port:   port,
   186  	}
   187  
   188  	return nil
   189  }
   190  
   191  func (f fakeSkipLBMap) AddLB6(netnsCookie uint64, ip net.IP, port uint16) error {
   192  	f.lb6Events <- skipLBParams{
   193  		cookie: netnsCookie,
   194  		ip:     ip,
   195  		port:   port,
   196  	}
   197  
   198  	return nil
   199  }
   200  
   201  func (f fakeSkipLBMap) DeleteLB4ByAddrPort(ip net.IP, port uint16) {
   202  	panic("implement me")
   203  }
   204  
   205  func (f fakeSkipLBMap) DeleteLB6ByAddrPort(ip net.IP, port uint16) {
   206  	panic("implement me")
   207  }
   208  
   209  func (f fakeSkipLBMap) DeleteLB4ByNetnsCookie(cookie uint64) {
   210  	panic("implement me")
   211  }
   212  
   213  func (f fakeSkipLBMap) DeleteLB6ByNetnsCookie(cookie uint64) {
   214  	panic("implement me")
   215  }
   216  
   217  var (
   218  	tcpStr    = "TCP"
   219  	udpStr    = "UDP"
   220  	proto1, _ = lb.NewL4Type(tcpStr)
   221  	proto2, _ = lb.NewL4Type(udpStr)
   222  	fe1       = lb.NewL3n4Addr(
   223  		proto1,
   224  		cmtypes.MustParseAddrCluster("1.1.1.1"),
   225  		80,
   226  		lb.ScopeExternal)
   227  	fe2 = lb.NewL3n4Addr(
   228  		proto2,
   229  		cmtypes.MustParseAddrCluster("2.2.2.2"),
   230  		81,
   231  		lb.ScopeExternal)
   232  	fe3v6 = lb.NewL3n4Addr(
   233  		proto1,
   234  		cmtypes.MustParseAddrCluster("fd00::2"),
   235  		80,
   236  		lb.ScopeExternal)
   237  	portName1 = "test1"
   238  	portName2 = "test2"
   239  	beP1      = bePortInfo{
   240  		l4Addr: lb.L4Addr{
   241  			Protocol: tcpStr,
   242  			Port:     8080,
   243  		},
   244  		name: portName1,
   245  	}
   246  	beP2 = bePortInfo{
   247  		l4Addr: lb.L4Addr{
   248  			Protocol: udpStr,
   249  			Port:     8081,
   250  		},
   251  		name: portName2,
   252  	}
   253  	configAddrType LRPConfig
   254  	configSvcType  LRPConfig
   255  
   256  	podReady = slimcorev1.PodCondition{
   257  		Type:               slimcorev1.PodReady,
   258  		Status:             slimcorev1.ConditionTrue,
   259  		LastProbeTime:      slim_metav1.Now(),
   260  		LastTransitionTime: slim_metav1.Now(),
   261  		Reason:             "",
   262  		Message:            "",
   263  	}
   264  
   265  	podNotReady = slimcorev1.PodCondition{
   266  		Type:               slimcorev1.PodReady,
   267  		Status:             slimcorev1.ConditionTrue,
   268  		LastProbeTime:      slim_metav1.Now(),
   269  		LastTransitionTime: slim_metav1.Now(),
   270  		Reason:             "",
   271  		Message:            "",
   272  	}
   273  
   274  	pod1IP1    = slimcorev1.PodIP{IP: "1.2.3.4"}
   275  	pod1IP2    = slimcorev1.PodIP{IP: "5.6.7.8"}
   276  	pod1Port1  = int32(8080)
   277  	pod1Port2  = int32(8081)
   278  	pod1Proto1 = slimcorev1.ProtocolTCP
   279  	pod1Proto2 = slimcorev1.ProtocolUDP
   280  	pod1       = &slimcorev1.Pod{
   281  		ObjectMeta: slim_metav1.ObjectMeta{
   282  			Name:      "foo-be",
   283  			Namespace: "ns1",
   284  			Labels:    map[string]string{"test": "foo"},
   285  		},
   286  		Spec: slimcorev1.PodSpec{
   287  			Containers: []slimcorev1.Container{
   288  				{
   289  					Ports: []slimcorev1.ContainerPort{
   290  						{
   291  							Name:          portName1,
   292  							ContainerPort: pod1Port1,
   293  							Protocol:      pod1Proto1,
   294  						},
   295  						{
   296  							Name:          portName2,
   297  							ContainerPort: pod1Port2,
   298  							Protocol:      pod1Proto2,
   299  						},
   300  					},
   301  				},
   302  			},
   303  		},
   304  		Status: slimcorev1.PodStatus{
   305  			PodIP:      pod1IP1.IP,
   306  			PodIPs:     []slimcorev1.PodIP{pod1IP1, pod1IP2},
   307  			Conditions: []slimcorev1.PodCondition{podReady},
   308  		},
   309  	}
   310  	pod1ID = k8s.ServiceID{
   311  		Name:      pod1.Name,
   312  		Namespace: pod1.Namespace,
   313  	}
   314  	pod2IP1    = slimcorev1.PodIP{IP: "5.6.7.9"}
   315  	pod2IP2    = slimcorev1.PodIP{IP: "5.6.7.10"}
   316  	pod2Port1  = int32(8080)
   317  	pod2Port2  = int32(8081)
   318  	pod2Proto1 = slimcorev1.ProtocolTCP
   319  	pod2Proto2 = slimcorev1.ProtocolUDP
   320  	pod2       = &slimcorev1.Pod{
   321  		ObjectMeta: slim_metav1.ObjectMeta{
   322  			Name:      "foo-be2",
   323  			Namespace: "ns1",
   324  			Labels:    map[string]string{"test": "bar"},
   325  		},
   326  		Spec: slimcorev1.PodSpec{
   327  			Containers: []slimcorev1.Container{
   328  				{
   329  					Ports: []slimcorev1.ContainerPort{
   330  						{
   331  							Name:          portName1,
   332  							ContainerPort: pod2Port1,
   333  							Protocol:      pod2Proto1,
   334  						},
   335  						{
   336  							Name:          portName2,
   337  							ContainerPort: pod2Port2,
   338  							Protocol:      pod2Proto2,
   339  						},
   340  					},
   341  				},
   342  			},
   343  		},
   344  		Status: slimcorev1.PodStatus{
   345  			PodIP:      pod2IP1.IP,
   346  			PodIPs:     []slimcorev1.PodIP{pod2IP1, pod2IP2},
   347  			Conditions: []slimcorev1.PodCondition{podReady},
   348  		},
   349  	}
   350  	pod2ID = k8s.ServiceID{
   351  		Name:      pod2.Name,
   352  		Namespace: pod2.Namespace,
   353  	}
   354  )
   355  
   356  // Tests if duplicate addressMatcher configs are not added.
   357  func TestManager_AddRedirectPolicy_AddrMatcherDuplicateConfig(t *testing.T) {
   358  	m := setupManagerSuite(t)
   359  
   360  	configFe := configAddrType
   361  	m.rpm.policyFrontendsByHash[fe1.Hash()] = configFe.id
   362  	dupConfigFe := configFe
   363  	dupConfigFe.id.Name = "test-foo2"
   364  
   365  	added, err := m.rpm.AddRedirectPolicy(dupConfigFe)
   366  
   367  	require.Equal(t, false, added)
   368  	require.Error(t, err)
   369  }
   370  
   371  // Tests if duplicate svcMatcher configs are not added.
   372  func TestManager_AddRedirectPolicy_SvcMatcherDuplicateConfig(t *testing.T) {
   373  	m := setupManagerSuite(t)
   374  
   375  	configSvc := configSvcType
   376  	configSvc.serviceID = &k8s.ServiceID{
   377  		Name:      "foo",
   378  		Namespace: "ns1",
   379  	}
   380  	m.rpm.policyConfigs[configSvc.id] = &configSvc
   381  	m.rpm.policyServices[*configSvc.serviceID] = configSvc.id
   382  	invalidConfigSvc := configSvc
   383  	invalidConfigSvc.id.Name = "test-foo3"
   384  
   385  	added, err := m.rpm.AddRedirectPolicy(invalidConfigSvc)
   386  
   387  	require.Equal(t, false, added)
   388  	require.Error(t, err)
   389  }
   390  
   391  // Tests add redirect policy, add pod, delete pod and delete redirect policy events
   392  // for an addressMatcher config with a frontend having single port.
   393  func TestManager_AddrMatcherConfigSinglePort(t *testing.T) {
   394  	m := setupManagerSuite(t)
   395  
   396  	// Add an addressMatcher type LRP with single port. The policy config
   397  	// frontend should have 2 pod backends with each of the podIPs.
   398  	podIPs := utils.ValidIPs(pod1.Status)
   399  	expectedbes := make([]backend, len(podIPs))
   400  	for i := range podIPs {
   401  		expectedbes[i] = backend{
   402  			L3n4Addr: lb.L3n4Addr{AddrCluster: cmtypes.MustParseAddrCluster(podIPs[i]), L4Addr: beP1.l4Addr},
   403  			podID:    pod1ID,
   404  		}
   405  	}
   406  
   407  	added, err := m.rpm.AddRedirectPolicy(configAddrType)
   408  
   409  	require.Equal(t, true, added)
   410  	require.Nil(t, err)
   411  	require.Equal(t, 1, len(m.rpm.policyConfigs))
   412  	require.Equal(t, configAddrType.id.Name, m.rpm.policyConfigs[configAddrType.id].id.Name)
   413  	require.Equal(t, configAddrType.id.Namespace, m.rpm.policyConfigs[configAddrType.id].id.Namespace)
   414  	require.Equal(t, 1, len(m.rpm.policyFrontendsByHash))
   415  	require.Equal(t, configAddrType.id, m.rpm.policyFrontendsByHash[configAddrType.frontendMappings[0].feAddr.Hash()])
   416  	require.Equal(t, 2, len(configAddrType.frontendMappings[0].podBackends))
   417  	for i := range configAddrType.frontendMappings[0].podBackends {
   418  		require.Equal(t, expectedbes[i], configAddrType.frontendMappings[0].podBackends[i])
   419  	}
   420  	require.Equal(t, 1, len(m.rpm.policyPods))
   421  	require.Equal(t, 1, len(m.rpm.policyPods[pod1ID]))
   422  	require.Equal(t, configAddrType.id, m.rpm.policyPods[pod1ID][0])
   423  
   424  	// Add a new backend pod, this will add 2 more pod backends with each of the podIPs.
   425  	pod3 := pod2.DeepCopy()
   426  	pod3.Labels["test"] = "foo"
   427  	pod3ID := pod2ID
   428  	podIPs = utils.ValidIPs(pod3.Status)
   429  	expectedbes2 := make([]backend, 0, len(expectedbes)+len(podIPs))
   430  	expectedbes2 = append(expectedbes2, expectedbes...)
   431  	for i := range podIPs {
   432  		expectedbes2 = append(expectedbes2, backend{
   433  			L3n4Addr: lb.L3n4Addr{AddrCluster: cmtypes.MustParseAddrCluster(podIPs[i]), L4Addr: beP1.l4Addr},
   434  			podID:    pod3ID,
   435  		})
   436  	}
   437  
   438  	m.rpm.OnAddPod(pod3)
   439  
   440  	require.Equal(t, 2, len(m.rpm.policyPods))
   441  	require.Equal(t, 1, len(m.rpm.policyPods[pod3ID]))
   442  	require.Equal(t, configAddrType.id, m.rpm.policyPods[pod1ID][0])
   443  	require.Equal(t, 4, len(configAddrType.frontendMappings[0].podBackends))
   444  	for i := range configAddrType.frontendMappings[0].podBackends {
   445  		require.Equal(t, expectedbes2[i], configAddrType.frontendMappings[0].podBackends[i])
   446  	}
   447  
   448  	// When pod becomes un-ready
   449  	pod3.Status.Conditions = []slimcorev1.PodCondition{podNotReady}
   450  	m.rpm.OnUpdatePod(pod3, false, false)
   451  
   452  	require.Equal(t, 2, len(m.rpm.policyPods))
   453  	require.Equal(t, 1, len(m.rpm.policyPods[pod3ID]))
   454  	require.Equal(t, 2, len(configAddrType.frontendMappings[0].podBackends))
   455  	for i := range configAddrType.frontendMappings[0].podBackends {
   456  		require.Equal(t, expectedbes[i], configAddrType.frontendMappings[0].podBackends[i])
   457  	}
   458  
   459  	// When pod becomes ready
   460  	pod3.Status.Conditions = []slimcorev1.PodCondition{podReady}
   461  	m.rpm.OnUpdatePod(pod3, false, true)
   462  
   463  	require.Equal(t, 2, len(m.rpm.policyPods))
   464  	require.Equal(t, 1, len(m.rpm.policyPods[pod3ID]))
   465  	require.Equal(t, configAddrType.id, m.rpm.policyPods[pod1ID][0])
   466  	require.Equal(t, 4, len(configAddrType.frontendMappings[0].podBackends))
   467  	for i := range configAddrType.frontendMappings[0].podBackends {
   468  		require.Equal(t, expectedbes2[i], configAddrType.frontendMappings[0].podBackends[i])
   469  	}
   470  
   471  	// Delete the pod. This should delete the pod's backends.
   472  	m.rpm.OnDeletePod(pod3)
   473  
   474  	require.Equal(t, 1, len(m.rpm.policyPods))
   475  	_, found := m.rpm.policyPods[pod3ID]
   476  	require.Equal(t, false, found)
   477  	require.Equal(t, 2, len(configAddrType.frontendMappings[0].podBackends))
   478  	for i := range configAddrType.frontendMappings[0].podBackends {
   479  		require.Equal(t, expectedbes[i], configAddrType.frontendMappings[0].podBackends[i])
   480  	}
   481  
   482  	// Delete the LRP.
   483  	err = m.rpm.DeleteRedirectPolicy(configAddrType)
   484  
   485  	require.Nil(t, err)
   486  	require.Equal(t, 0, len(m.rpm.policyFrontendsByHash))
   487  	require.Equal(t, 0, len(m.rpm.policyPods))
   488  	require.Equal(t, 0, len(m.rpm.policyConfigs))
   489  }
   490  
   491  // Tests add redirect policy, add pod, delete pod and delete redirect policy events
   492  // for an addressMatcher config with a frontend having multiple named ports.
   493  func TestManager_AddrMatcherConfigMultiplePorts(t *testing.T) {
   494  	m := setupManagerSuite(t)
   495  
   496  	// Add an addressMatcher type LRP with multiple named ports.
   497  	configAddrType.frontendType = addrFrontendNamedPorts
   498  	configAddrType.frontendMappings = append(configAddrType.frontendMappings, &feMapping{
   499  		feAddr:      fe2,
   500  		podBackends: nil,
   501  		fePort:      portName2,
   502  	})
   503  	beP1.name = portName1
   504  	beP2.name = portName2
   505  	configAddrType.backendPorts = []bePortInfo{beP1, beP2}
   506  	configAddrType.backendPortsByPortName = map[string]*bePortInfo{
   507  		beP1.name: &configAddrType.backendPorts[0],
   508  		beP2.name: &configAddrType.backendPorts[1]}
   509  	podIPs := utils.ValidIPs(pod1.Status)
   510  	expectedbes := make([]backend, 0, len(podIPs))
   511  	for i := range podIPs {
   512  		expectedbes = append(expectedbes, backend{
   513  			L3n4Addr: lb.L3n4Addr{AddrCluster: cmtypes.MustParseAddrCluster(podIPs[i]), L4Addr: beP1.l4Addr},
   514  			podID:    pod1ID,
   515  		})
   516  	}
   517  
   518  	added, err := m.rpm.AddRedirectPolicy(configAddrType)
   519  
   520  	require.Equal(t, true, added)
   521  	require.Nil(t, err)
   522  	require.Equal(t, 1, len(m.rpm.policyConfigs))
   523  	require.Equal(t, configAddrType.id.Name, m.rpm.policyConfigs[configAddrType.id].id.Name)
   524  	require.Equal(t, configAddrType.id.Namespace, m.rpm.policyConfigs[configAddrType.id].id.Namespace)
   525  	require.Equal(t, 2, len(m.rpm.policyFrontendsByHash))
   526  	for _, id := range m.rpm.policyFrontendsByHash {
   527  		require.Equal(t, configAddrType.id, id)
   528  	}
   529  	// Frontend ports should be mapped to the corresponding backend ports.
   530  	for _, feM := range configAddrType.frontendMappings {
   531  		switch feM.fePort {
   532  		case "test1":
   533  			require.Equal(t, 2, len(feM.podBackends))
   534  			for i := range podIPs {
   535  				expectedbes[i] = backend{
   536  					L3n4Addr: lb.L3n4Addr{AddrCluster: cmtypes.MustParseAddrCluster(podIPs[i]), L4Addr: beP1.l4Addr},
   537  					podID:    pod1ID,
   538  				}
   539  			}
   540  			for i := range feM.podBackends {
   541  				require.Equal(t, expectedbes[i], feM.podBackends[i])
   542  			}
   543  		case "test2":
   544  			require.Equal(t, 2, len(feM.podBackends))
   545  			for i := range podIPs {
   546  				expectedbes[i] = backend{
   547  					L3n4Addr: lb.L3n4Addr{AddrCluster: cmtypes.MustParseAddrCluster(podIPs[i]), L4Addr: beP2.l4Addr},
   548  					podID:    pod1ID,
   549  				}
   550  			}
   551  			for i := range feM.podBackends {
   552  				require.Equal(t, expectedbes[i], feM.podBackends[i])
   553  			}
   554  		default:
   555  			log.Errorf("Unknown port %s", feM.fePort)
   556  		}
   557  	}
   558  	require.Equal(t, 1, len(m.rpm.policyPods))
   559  	require.Equal(t, 1, len(m.rpm.policyPods[pod1ID]))
   560  	require.Equal(t, configAddrType.id, m.rpm.policyPods[pod1ID][0])
   561  
   562  	// Delete the LRP.
   563  	err = m.rpm.DeleteRedirectPolicy(configAddrType)
   564  
   565  	require.Nil(t, err)
   566  	require.Equal(t, 0, len(m.rpm.policyFrontendsByHash))
   567  	require.Equal(t, 0, len(m.rpm.policyPods))
   568  	require.Equal(t, 0, len(m.rpm.policyConfigs))
   569  }
   570  
   571  // Tests if frontend ipv4 and ipv6 addresses are mapped to the ipv4 and ipv6
   572  // backends, respectively.
   573  func TestManager_AddrMatcherConfigDualStack(t *testing.T) {
   574  	m := setupManagerSuite(t)
   575  
   576  	// Only ipv4 backend(s) for ipv4 frontend
   577  	pod3 := pod1.DeepCopy()
   578  	pod3ID := pod1ID
   579  	podIPs := utils.ValidIPs(pod3.Status)
   580  	expectedbes4 := make([]backend, 0, len(podIPs))
   581  	for i := range podIPs {
   582  		expectedbes4 = append(expectedbes4, backend{
   583  			L3n4Addr: lb.L3n4Addr{AddrCluster: cmtypes.MustParseAddrCluster(podIPs[i]), L4Addr: beP1.l4Addr},
   584  			podID:    pod3ID,
   585  		})
   586  	}
   587  	pod3v6 := slimcorev1.PodIP{IP: "fd00::40"}
   588  	expectedbes6 := []backend{{
   589  		L3n4Addr: lb.L3n4Addr{AddrCluster: cmtypes.MustParseAddrCluster(pod3v6.IP), L4Addr: beP1.l4Addr},
   590  		podID:    pod3ID,
   591  	}}
   592  	pod3.Status.PodIPs = append(pod3.Status.PodIPs, pod3v6)
   593  	psg := &fakePodResource{
   594  		fakePodStore{
   595  			OnList: func() []*slimcorev1.Pod {
   596  				return []*slimcorev1.Pod{pod3}
   597  			},
   598  		},
   599  	}
   600  	m.rpm.localPods = psg
   601  
   602  	added, err := m.rpm.AddRedirectPolicy(configAddrType)
   603  
   604  	require.Equal(t, true, added)
   605  	require.Nil(t, err)
   606  	require.Equal(t, len(expectedbes4), len(configAddrType.frontendMappings[0].podBackends))
   607  	for i := range configAddrType.frontendMappings[0].podBackends {
   608  		require.Equal(t, expectedbes4[i], configAddrType.frontendMappings[0].podBackends[i])
   609  	}
   610  
   611  	// Only ipv6 backend(s) for ipv6 frontend
   612  	feM := []*feMapping{{
   613  		feAddr:      fe3v6,
   614  		podBackends: nil,
   615  	}}
   616  	configAddrType.id.Name = "test-bar"
   617  	configAddrType.frontendMappings = feM
   618  
   619  	added, err = m.rpm.AddRedirectPolicy(configAddrType)
   620  
   621  	require.Equal(t, true, added)
   622  	require.Nil(t, err)
   623  	require.Equal(t, len(expectedbes6), len(configAddrType.frontendMappings[0].podBackends))
   624  
   625  	for i := range configAddrType.frontendMappings[0].podBackends {
   626  		require.Equal(t, expectedbes6[i], configAddrType.frontendMappings[0].podBackends[i])
   627  	}
   628  }
   629  
   630  // Tests add and update pod operations with namespace mismatched pods.
   631  func TestManager_OnAddandUpdatePod(t *testing.T) {
   632  	m := setupManagerSuite(t)
   633  
   634  	configFe := configAddrType
   635  	m.rpm.policyFrontendsByHash[fe1.Hash()] = configFe.id
   636  	configSvc := configSvcType
   637  	m.rpm.policyConfigs[configSvc.id] = &configSvc
   638  	pod := pod1.DeepCopy()
   639  	pod.Namespace = "ns2"
   640  	podID := k8s.ServiceID{
   641  		Name:      pod.Name,
   642  		Namespace: pod.Namespace,
   643  	}
   644  
   645  	m.rpm.OnAddPod(pod)
   646  
   647  	// Namespace mismatched pod not selected.
   648  	require.Equal(t, 0, len(m.rpm.policyPods))
   649  	_, found := m.rpm.policyPods[podID]
   650  	require.Equal(t, false, found)
   651  
   652  	m.rpm.OnUpdatePod(pod, true, true)
   653  
   654  	// Namespace mismatched pod not selected.
   655  	require.Equal(t, 0, len(m.rpm.policyPods))
   656  	_, found = m.rpm.policyPods[podID]
   657  	require.Equal(t, false, found)
   658  }
   659  
   660  // Tests policies with skipRedirectFromBackend flag set.
   661  func TestManager_OnAddRedirectPolicy(t *testing.T) {
   662  	m := setupManagerSuite(t)
   663  
   664  	// Sequence of events: Pods -> RedirectPolicy -> Endpoint
   665  	sMgr := &fakeSvcManager{}
   666  	sMgr.upsertEvents = make(chan *lb.SVC)
   667  	m.svc = sMgr
   668  	lbEvents := make(chan skipLBParams)
   669  	pc := configAddrType
   670  	pc.skipRedirectFromBackend = true
   671  	pods := make(map[resource.Key]*slimcorev1.Pod)
   672  	pk1 := resource.Key{
   673  		Name:      pod1.Name,
   674  		Namespace: pod1.Namespace,
   675  	}
   676  	pod := pod1.DeepCopy()
   677  	pod.Status.PodIPs = []slimcorev1.PodIP{pod1IP1}
   678  	pods[pk1] = pod
   679  	fps := &fakePodResource{
   680  		fakePodStore{
   681  			Pods: pods,
   682  		},
   683  	}
   684  	m.rpm.localPods = fps
   685  	ep := &endpoint.Endpoint{
   686  		K8sPodName:   pod.Name,
   687  		K8sNamespace: pod.Namespace,
   688  		NetNsCookie:  1234,
   689  	}
   690  	m.rpm = NewRedirectPolicyManager(m.svc, nil, fps, m.epM)
   691  	m.rpm.skipLBMap = &fakeSkipLBMap{lb4Events: lbEvents}
   692  
   693  	added, err := m.rpm.AddRedirectPolicy(pc)
   694  
   695  	require.Equal(t, true, added)
   696  	require.Nil(t, err)
   697  
   698  	wg := sync.WaitGroup{}
   699  	// Asserts skipLBMap events
   700  	wg.Add(1)
   701  	go func() {
   702  		ev := <-lbEvents
   703  
   704  		require.Equal(t, ep.NetNsCookie, ev.cookie)
   705  		require.Equal(t, fe1.AddrCluster.Addr().String(), ev.ip.String())
   706  		require.Equal(t, fe1.L4Addr.Port, ev.port)
   707  
   708  		wg.Done()
   709  	}()
   710  	// Asserts UpsertService events
   711  	wg.Add(1)
   712  	go func() {
   713  		ev := <-sMgr.upsertEvents
   714  
   715  		require.Equal(t, lb.SVCTypeLocalRedirect, ev.Type)
   716  		require.Equal(t, configAddrType.frontendMappings[0].feAddr.String(), ev.Frontend.String())
   717  		require.Equal(t, 1, len(ev.Backends))
   718  		require.Equal(t, backend{
   719  			L3n4Addr: lb.L3n4Addr{AddrCluster: cmtypes.MustParseAddrCluster(pod1.Status.PodIP), L4Addr: beP1.l4Addr},
   720  			podID:    pod1ID,
   721  		}.Hash(), ev.Backends[0].Hash())
   722  
   723  		wg.Done()
   724  	}()
   725  
   726  	// Add an endpoint for the policy selected pod.
   727  	m.rpm.EndpointCreated(ep)
   728  
   729  	// Wait for the skipLBMap and Upsert service events
   730  	wg.Wait()
   731  
   732  	// Sequence of events: Pod -> Endpoint -> RedirectPolicy
   733  	sMgr = &fakeSvcManager{}
   734  	sMgr.upsertEvents = make(chan *lb.SVC)
   735  	m.svc = sMgr
   736  	pod = pod1.DeepCopy()
   737  	pod.Status.PodIPs = []slimcorev1.PodIP{pod1IP1}
   738  	cookie := uint64(1235)
   739  	ep = &endpoint.Endpoint{
   740  		K8sPodName:   pod1.Name,
   741  		K8sNamespace: pod1.Namespace,
   742  		NetNsCookie:  cookie,
   743  	}
   744  	cookies := map[netip.Addr]uint64{}
   745  	addr, _ := netip.ParseAddr(pod.Status.PodIP)
   746  	cookies[addr] = cookie
   747  	m.epM = &fakeEpManager{cookies: cookies}
   748  	fps = &fakePodResource{
   749  		fakePodStore{
   750  			OnList: func() []*slimcorev1.Pod {
   751  				return []*slimcorev1.Pod{pod}
   752  			},
   753  		},
   754  	}
   755  	m.rpm = NewRedirectPolicyManager(m.svc, nil, fps, m.epM)
   756  	lbEvents = make(chan skipLBParams)
   757  	m.rpm.skipLBMap = &fakeSkipLBMap{lb4Events: lbEvents}
   758  
   759  	wg = sync.WaitGroup{}
   760  	// Asserts skipLBMap events
   761  	wg.Add(1)
   762  	go func() {
   763  		ev := <-lbEvents
   764  
   765  		require.Equal(t, cookie, ev.cookie)
   766  		require.Equal(t, fe1.AddrCluster.Addr().String(), ev.ip.String())
   767  		require.Equal(t, fe1.L4Addr.Port, ev.port)
   768  
   769  		wg.Done()
   770  	}()
   771  	// Asserts UpsertService events
   772  	wg.Add(1)
   773  	go func() {
   774  		ev := <-sMgr.upsertEvents
   775  
   776  		require.Equal(t, lb.SVCTypeLocalRedirect, ev.Type)
   777  		require.Equal(t, configAddrType.frontendMappings[0].feAddr.String(), ev.Frontend.String())
   778  		require.Equal(t, 1, len(ev.Backends))
   779  		require.Equal(t, backend{
   780  			L3n4Addr: lb.L3n4Addr{AddrCluster: cmtypes.MustParseAddrCluster(pod.Status.PodIP), L4Addr: beP1.l4Addr},
   781  			podID:    pod1ID,
   782  		}.Hash(), ev.Backends[0].Hash())
   783  
   784  		wg.Done()
   785  	}()
   786  
   787  	// Policy is added.
   788  	added, err = m.rpm.AddRedirectPolicy(pc)
   789  
   790  	require.Equal(t, true, added)
   791  	require.Nil(t, err)
   792  
   793  	wg.Wait()
   794  
   795  	// Sequence of events: RedirectPolicy -> Pod -> Endpoint
   796  	sMgr = &fakeSvcManager{}
   797  	sMgr.upsertEvents = make(chan *lb.SVC)
   798  	m.svc = sMgr
   799  	pod = pod1.DeepCopy()
   800  	pod.Status.PodIPs = []slimcorev1.PodIP{pod1IP1}
   801  	cookie = uint64(1235)
   802  	ep = &endpoint.Endpoint{
   803  		K8sPodName:   pod1.Name,
   804  		K8sNamespace: pod1.Namespace,
   805  		NetNsCookie:  cookie,
   806  	}
   807  	m.epM = &fakeEpManager{}
   808  	pods = make(map[resource.Key]*slimcorev1.Pod)
   809  	pk1 = resource.Key{
   810  		Name:      pod1.Name,
   811  		Namespace: pod1.Namespace,
   812  	}
   813  	pods[pk1] = pod
   814  	fps = &fakePodResource{
   815  		fakePodStore{
   816  			Pods: pods,
   817  		},
   818  	}
   819  	m.rpm = NewRedirectPolicyManager(m.svc, nil, fps, m.epM)
   820  	lbEvents = make(chan skipLBParams)
   821  	m.rpm.skipLBMap = &fakeSkipLBMap{lb4Events: lbEvents}
   822  
   823  	wg = sync.WaitGroup{}
   824  	// Asserts skipLBMap events
   825  	wg.Add(1)
   826  	go func() {
   827  		ev := <-lbEvents
   828  
   829  		require.Equal(t, cookie, ev.cookie)
   830  		require.Equal(t, fe1.AddrCluster.Addr().String(), ev.ip.String())
   831  		require.Equal(t, fe1.L4Addr.Port, ev.port)
   832  
   833  		wg.Done()
   834  	}()
   835  	// Asserts UpsertService events
   836  	wg.Add(1)
   837  	go func() {
   838  		ev := <-sMgr.upsertEvents
   839  
   840  		require.Equal(t, lb.SVCTypeLocalRedirect, ev.Type)
   841  		require.Equal(t, configAddrType.frontendMappings[0].feAddr.String(), ev.Frontend.String())
   842  		require.Equal(t, 1, len(ev.Backends))
   843  		require.Equal(t, backend{
   844  			L3n4Addr: lb.L3n4Addr{AddrCluster: cmtypes.MustParseAddrCluster(pod.Status.PodIP), L4Addr: beP1.l4Addr},
   845  			podID:    pod1ID,
   846  		}.Hash(), ev.Backends[0].Hash())
   847  
   848  		wg.Done()
   849  	}()
   850  
   851  	// Policy is added.
   852  	added, err = m.rpm.AddRedirectPolicy(pc)
   853  	require.Equal(t, true, added)
   854  	require.Nil(t, err)
   855  
   856  	// Pod selected by the policy added.
   857  	m.rpm.OnAddPod(pod)
   858  
   859  	// Add an endpoint for the policy selected pod.
   860  	m.rpm.EndpointCreated(ep)
   861  
   862  	wg.Wait()
   863  }
   864  
   865  // Tests connections to deleted LRP backend pods getting terminated.
   866  func TestManager_OnDeletePod(t *testing.T) {
   867  	m := setupManagerSuite(t)
   868  
   869  	option.Config.EnableSocketLB = true
   870  	// Create an unbuffered channel so that the test blocks on unexpected events.
   871  	events := make(chan lb.L3n4Addr)
   872  	m.rpm.svcManager = &fakeSvcManager{destroyConnectionEvents: events}
   873  	labels := map[string]string{"test": "foo-bar-term"}
   874  	podUDP := &slimcorev1.Pod{
   875  		ObjectMeta: slim_metav1.ObjectMeta{
   876  			Name:      "foo-be",
   877  			Namespace: "ns1",
   878  			Labels:    labels,
   879  		},
   880  		Spec: slimcorev1.PodSpec{
   881  			Containers: []slimcorev1.Container{
   882  				{
   883  					Ports: []slimcorev1.ContainerPort{
   884  						{
   885  							Name:          portName1,
   886  							ContainerPort: pod2Port1,
   887  							Protocol:      pod2Proto2,
   888  						},
   889  						{
   890  							Name:          portName2,
   891  							ContainerPort: pod2Port2,
   892  							Protocol:      pod2Proto2,
   893  						},
   894  					},
   895  				},
   896  			},
   897  		},
   898  		Status: slimcorev1.PodStatus{
   899  			PodIP:      pod2IP1.IP,
   900  			PodIPs:     []slimcorev1.PodIP{pod2IP1},
   901  			Conditions: []slimcorev1.PodCondition{podReady},
   902  		},
   903  	}
   904  	beUDPP1 := bePortInfo{
   905  		l4Addr: lb.L4Addr{
   906  			Protocol: udpStr,
   907  			Port:     uint16(podUDP.Spec.Containers[0].Ports[0].ContainerPort),
   908  		},
   909  		name: portName1,
   910  	}
   911  	beUDPP2 := bePortInfo{
   912  		l4Addr: lb.L4Addr{
   913  			Protocol: udpStr,
   914  			Port:     uint16(podUDP.Spec.Containers[0].Ports[1].ContainerPort),
   915  		},
   916  		name: portName2,
   917  	}
   918  	beAddrs := sets.New[lb.L3n4Addr]()
   919  	beAddrs.Insert(lb.L3n4Addr{
   920  		AddrCluster: cmtypes.MustParseAddrCluster(podUDP.Status.PodIP), L4Addr: beUDPP1.l4Addr})
   921  	beAddrs.Insert(lb.L3n4Addr{
   922  		AddrCluster: cmtypes.MustParseAddrCluster(podUDP.Status.PodIP), L4Addr: beUDPP2.l4Addr})
   923  	pc := LRPConfig{
   924  		id: k8s.ServiceID{
   925  			Name:      "test-foo",
   926  			Namespace: "ns1",
   927  		},
   928  		lrpType:      lrpConfigTypeAddr,
   929  		frontendType: addrFrontendNamedPorts,
   930  		frontendMappings: []*feMapping{{
   931  			feAddr:      fe2,
   932  			podBackends: nil,
   933  			fePort:      beUDPP1.name,
   934  		}, {
   935  			feAddr:      fe2,
   936  			podBackends: nil,
   937  			fePort:      beUDPP2.name,
   938  		}},
   939  		backendSelector: api.EndpointSelector{
   940  			LabelSelector: &slim_metav1.LabelSelector{
   941  				MatchLabels: labels,
   942  			},
   943  		},
   944  		backendPorts: []bePortInfo{beUDPP1, beUDPP2},
   945  		backendPortsByPortName: map[string]*bePortInfo{
   946  			beUDPP1.name: &beUDPP1,
   947  			beUDPP2.name: &beUDPP2,
   948  		},
   949  	}
   950  
   951  	// Add an LRP.
   952  	added, err := m.rpm.AddRedirectPolicy(pc)
   953  
   954  	require.True(t, added)
   955  	require.NoError(t, err)
   956  
   957  	// Add LRP selected pod with UDP ports.
   958  	m.rpm.OnAddPod(podUDP)
   959  	// Assert connection termination events asynchronously.
   960  	wg := sync.WaitGroup{}
   961  	wg.Add(1)
   962  	got := 0
   963  	go func() {
   964  		for {
   965  			addr := <-events
   966  			if beAddrs.Has(addr) {
   967  				got++
   968  			}
   969  			if got == beAddrs.Len() {
   970  				wg.Done()
   971  				break
   972  			}
   973  		}
   974  	}()
   975  	// Delete the pod.
   976  	m.rpm.OnDeletePod(podUDP)
   977  
   978  	wg.Wait()
   979  }