k8s.io/kubernetes@v1.29.3/test/e2e/network/ingress.go (about)

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package network
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"fmt"
    23  	"path/filepath"
    24  	"time"
    25  
    26  	v1 "k8s.io/api/core/v1"
    27  	networkingv1 "k8s.io/api/networking/v1"
    28  	rbacv1 "k8s.io/api/rbac/v1"
    29  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    32  	"k8s.io/apimachinery/pkg/runtime/schema"
    33  	types "k8s.io/apimachinery/pkg/types"
    34  	"k8s.io/apimachinery/pkg/util/wait"
    35  	"k8s.io/apimachinery/pkg/watch"
    36  	"k8s.io/apiserver/pkg/authentication/serviceaccount"
    37  	"k8s.io/client-go/util/retry"
    38  	"k8s.io/kubernetes/test/e2e/feature"
    39  	"k8s.io/kubernetes/test/e2e/framework"
    40  	e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
    41  	e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress"
    42  	"k8s.io/kubernetes/test/e2e/framework/providers/gce"
    43  	e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
    44  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    45  	"k8s.io/kubernetes/test/e2e/network/common"
    46  	admissionapi "k8s.io/pod-security-admission/api"
    47  
    48  	"github.com/onsi/ginkgo/v2"
    49  	"github.com/onsi/gomega"
    50  )
    51  
    52  const (
    53  	negUpdateTimeout = 2 * time.Minute
    54  )
    55  
    56  var _ = common.SIGDescribe("Loadbalancing: L7", func() {
    57  	defer ginkgo.GinkgoRecover()
    58  	var (
    59  		ns               string
    60  		jig              *e2eingress.TestJig
    61  		conformanceTests []e2eingress.ConformanceTests
    62  	)
    63  	f := framework.NewDefaultFramework("ingress")
    64  	f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
    65  
    66  	ginkgo.BeforeEach(func(ctx context.Context) {
    67  		jig = e2eingress.NewIngressTestJig(f.ClientSet)
    68  		ns = f.Namespace.Name
    69  
    70  		// this test wants powerful permissions.  Since the namespace names are unique, we can leave this
    71  		// lying around so we don't have to race any caches
    72  		err := e2eauth.BindClusterRole(ctx, jig.Client.RbacV1(), "cluster-admin", f.Namespace.Name,
    73  			rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
    74  		framework.ExpectNoError(err)
    75  
    76  		err = e2eauth.WaitForAuthorizationUpdate(ctx, jig.Client.AuthorizationV1(),
    77  			serviceaccount.MakeUsername(f.Namespace.Name, "default"),
    78  			"", "create", schema.GroupResource{Resource: "pods"}, true)
    79  		framework.ExpectNoError(err)
    80  	})
    81  
    82  	// Before enabling this loadbalancer test in any other test list you must
    83  	// make sure the associated project has enough quota. At the time of this
    84  	// writing a GCE project is allowed 3 backend services by default. This
    85  	// test requires at least 5.
    86  	//
    87  	// Slow by design ~10m for each "It" block dominated by loadbalancer setup time
    88  	// TODO: write similar tests for nginx, haproxy and AWS Ingress.
    89  	f.Describe("GCE", framework.WithSlow(), feature.Ingress, func() {
    90  		var gceController *gce.IngressController
    91  
    92  		// Platform specific setup
    93  		ginkgo.BeforeEach(func(ctx context.Context) {
    94  			e2eskipper.SkipUnlessProviderIs("gce", "gke")
    95  			ginkgo.By("Initializing gce controller")
    96  			gceController = &gce.IngressController{
    97  				Ns:     ns,
    98  				Client: jig.Client,
    99  				Cloud:  framework.TestContext.CloudConfig,
   100  			}
   101  			err := gceController.Init(ctx)
   102  			framework.ExpectNoError(err)
   103  		})
   104  
   105  		// Platform specific cleanup
   106  		ginkgo.AfterEach(func(ctx context.Context) {
   107  			if ginkgo.CurrentSpecReport().Failed() {
   108  				e2eingress.DescribeIng(ns)
   109  			}
   110  			if jig.Ingress == nil {
   111  				ginkgo.By("No ingress created, no cleanup necessary")
   112  				return
   113  			}
   114  			ginkgo.By("Deleting ingress")
   115  			jig.TryDeleteIngress(ctx)
   116  
   117  			ginkgo.By("Cleaning up cloud resources")
   118  			err := gceController.CleanupIngressController(ctx)
   119  			framework.ExpectNoError(err)
   120  		})
   121  
   122  		ginkgo.It("should conform to Ingress spec", func(ctx context.Context) {
   123  			conformanceTests = e2eingress.CreateIngressComformanceTests(ctx, jig, ns, map[string]string{})
   124  			for _, t := range conformanceTests {
   125  				ginkgo.By(t.EntryLog)
   126  				t.Execute()
   127  				ginkgo.By(t.ExitLog)
   128  				jig.WaitForIngress(ctx, true)
   129  			}
   130  		})
   131  
   132  	})
   133  
   134  	f.Describe("GCE", framework.WithSlow(), feature.NEG, func() {
   135  		var gceController *gce.IngressController
   136  
   137  		// Platform specific setup
   138  		ginkgo.BeforeEach(func(ctx context.Context) {
   139  			e2eskipper.SkipUnlessProviderIs("gce", "gke")
   140  			ginkgo.By("Initializing gce controller")
   141  			gceController = &gce.IngressController{
   142  				Ns:     ns,
   143  				Client: jig.Client,
   144  				Cloud:  framework.TestContext.CloudConfig,
   145  			}
   146  			err := gceController.Init(ctx)
   147  			framework.ExpectNoError(err)
   148  		})
   149  
   150  		// Platform specific cleanup
   151  		ginkgo.AfterEach(func(ctx context.Context) {
   152  			if ginkgo.CurrentSpecReport().Failed() {
   153  				e2eingress.DescribeIng(ns)
   154  			}
   155  			if jig.Ingress == nil {
   156  				ginkgo.By("No ingress created, no cleanup necessary")
   157  				return
   158  			}
   159  			ginkgo.By("Deleting ingress")
   160  			jig.TryDeleteIngress(ctx)
   161  
   162  			ginkgo.By("Cleaning up cloud resources")
   163  			err := gceController.CleanupIngressController(ctx)
   164  			framework.ExpectNoError(err)
   165  		})
   166  
   167  		ginkgo.It("should conform to Ingress spec", func(ctx context.Context) {
   168  			jig.PollInterval = 5 * time.Second
   169  			conformanceTests = e2eingress.CreateIngressComformanceTests(ctx, jig, ns, map[string]string{
   170  				e2eingress.NEGAnnotation: `{"ingress": true}`,
   171  			})
   172  			for _, t := range conformanceTests {
   173  				ginkgo.By(t.EntryLog)
   174  				t.Execute()
   175  				ginkgo.By(t.ExitLog)
   176  				jig.WaitForIngress(ctx, true)
   177  				err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
   178  				framework.ExpectNoError(err)
   179  			}
   180  		})
   181  
   182  		ginkgo.It("should be able to switch between IG and NEG modes", func(ctx context.Context) {
   183  			var err error
   184  			propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, f.ClientSet)
   185  			ginkgo.By("Create a basic HTTP ingress using NEG")
   186  			jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
   187  			jig.WaitForIngress(ctx, true)
   188  			err = gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
   189  			framework.ExpectNoError(err)
   190  
   191  			ginkgo.By("Switch backend service to use IG")
   192  			svcList, err := f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
   193  			framework.ExpectNoError(err)
   194  			for _, svc := range svcList.Items {
   195  				svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": false}`
   196  				_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
   197  				framework.ExpectNoError(err)
   198  			}
   199  			err = wait.PollWithContext(ctx, 5*time.Second, propagationTimeout, func(ctx context.Context) (bool, error) {
   200  				if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(ctx, false)); err != nil {
   201  					framework.Logf("ginkgo.Failed to verify IG backend service: %v", err)
   202  					return false, nil
   203  				}
   204  				return true, nil
   205  			})
   206  			framework.ExpectNoError(err, "Expect backend service to target IG, but failed to observe")
   207  			jig.WaitForIngress(ctx, true)
   208  
   209  			ginkgo.By("Switch backend service to use NEG")
   210  			svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
   211  			framework.ExpectNoError(err)
   212  			for _, svc := range svcList.Items {
   213  				svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress": true}`
   214  				_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
   215  				framework.ExpectNoError(err)
   216  			}
   217  			err = wait.PollWithContext(ctx, 5*time.Second, propagationTimeout, func(ctx context.Context) (bool, error) {
   218  				if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(ctx, false)); err != nil {
   219  					framework.Logf("ginkgo.Failed to verify NEG backend service: %v", err)
   220  					return false, nil
   221  				}
   222  				return true, nil
   223  			})
   224  			framework.ExpectNoError(err, "Expect backend service to target NEG, but failed to observe")
   225  			jig.WaitForIngress(ctx, true)
   226  		})
   227  
   228  		ginkgo.It("should be able to create a ClusterIP service", func(ctx context.Context) {
   229  			ginkgo.By("Create a basic HTTP ingress using NEG")
   230  			jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
   231  			jig.WaitForIngress(ctx, true)
   232  			svcPorts := jig.GetServicePorts(ctx, false)
   233  			err := gceController.WaitForNegBackendService(ctx, svcPorts)
   234  			framework.ExpectNoError(err)
   235  
   236  			// ClusterIP ServicePorts have no NodePort
   237  			for _, sp := range svcPorts {
   238  				gomega.Expect(sp.NodePort).To(gomega.Equal(int32(0)))
   239  			}
   240  		})
   241  
   242  		ginkgo.It("should sync endpoints to NEG", func(ctx context.Context) {
   243  			name := "hostname"
   244  			scaleAndValidateNEG := func(num int) {
   245  				scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{})
   246  				framework.ExpectNoError(err)
   247  				if scale.Spec.Replicas != int32(num) {
   248  					scale.ResourceVersion = "" // indicate the scale update should be unconditional
   249  					scale.Spec.Replicas = int32(num)
   250  					_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{})
   251  					framework.ExpectNoError(err)
   252  				}
   253  				err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) {
   254  					res, err := jig.GetDistinctResponseFromIngress(ctx)
   255  					if err != nil {
   256  						return false, nil
   257  					}
   258  					framework.Logf("Expecting %d backends, got %d", num, res.Len())
   259  					return res.Len() == num, nil
   260  				})
   261  				framework.ExpectNoError(err)
   262  			}
   263  
   264  			ginkgo.By("Create a basic HTTP ingress using NEG")
   265  			jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
   266  			jig.WaitForIngress(ctx, true)
   267  			jig.WaitForIngressToStable(ctx)
   268  			err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
   269  			framework.ExpectNoError(err)
   270  			// initial replicas number is 1
   271  			scaleAndValidateNEG(1)
   272  
   273  			ginkgo.By("Scale up number of backends to 5")
   274  			scaleAndValidateNEG(5)
   275  
   276  			ginkgo.By("Scale down number of backends to 3")
   277  			scaleAndValidateNEG(3)
   278  
   279  			ginkgo.By("Scale up number of backends to 6")
   280  			scaleAndValidateNEG(6)
   281  
   282  			ginkgo.By("Scale down number of backends to 2")
   283  			scaleAndValidateNEG(3)
   284  		})
   285  
   286  		ginkgo.It("rolling update backend pods should not cause service disruption", func(ctx context.Context) {
   287  			name := "hostname"
   288  			replicas := 8
   289  			ginkgo.By("Create a basic HTTP ingress using NEG")
   290  			jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
   291  			jig.WaitForIngress(ctx, true)
   292  			jig.WaitForIngressToStable(ctx)
   293  			err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
   294  			framework.ExpectNoError(err)
   295  
   296  			ginkgo.By(fmt.Sprintf("Scale backend replicas to %d", replicas))
   297  			scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{})
   298  			framework.ExpectNoError(err)
   299  			scale.ResourceVersion = "" // indicate the scale update should be unconditional
   300  			scale.Spec.Replicas = int32(replicas)
   301  			_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{})
   302  			framework.ExpectNoError(err)
   303  
   304  			propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(ctx, f.ClientSet)
   305  			err = wait.Poll(10*time.Second, propagationTimeout, func() (bool, error) {
   306  				res, err := jig.GetDistinctResponseFromIngress(ctx)
   307  				if err != nil {
   308  					return false, nil
   309  				}
   310  				return res.Len() == replicas, nil
   311  			})
   312  			framework.ExpectNoError(err)
   313  
   314  			ginkgo.By("Trigger rolling update and observe service disruption")
   315  			deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{})
   316  			framework.ExpectNoError(err)
   317  			// trigger by changing graceful termination period to 60 seconds
   318  			gracePeriod := int64(60)
   319  			deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod
   320  			_, err = f.ClientSet.AppsV1().Deployments(ns).Update(ctx, deploy, metav1.UpdateOptions{})
   321  			framework.ExpectNoError(err)
   322  			err = wait.Poll(10*time.Second, propagationTimeout, func() (bool, error) {
   323  				res, err := jig.GetDistinctResponseFromIngress(ctx)
   324  				if err != nil {
   325  					return false, err
   326  				}
   327  				deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{})
   328  				if err != nil {
   329  					return false, err
   330  				}
   331  				if int(deploy.Status.UpdatedReplicas) == replicas {
   332  					if res.Len() == replicas {
   333  						return true, nil
   334  					}
   335  					framework.Logf("Expecting %d different responses, but got %d.", replicas, res.Len())
   336  					return false, nil
   337  
   338  				}
   339  				framework.Logf("Waiting for rolling update to finished. Keep sending traffic.")
   340  				return false, nil
   341  			})
   342  			framework.ExpectNoError(err)
   343  		})
   344  
   345  		ginkgo.It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func(ctx context.Context) {
   346  			name := "hostname"
   347  			expectedKeys := []int32{80, 443}
   348  
   349  			scaleAndValidateExposedNEG := func(num int) {
   350  				scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(ctx, name, metav1.GetOptions{})
   351  				framework.ExpectNoError(err)
   352  				if scale.Spec.Replicas != int32(num) {
   353  					scale.ResourceVersion = "" // indicate the scale update should be unconditional
   354  					scale.Spec.Replicas = int32(num)
   355  					_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(ctx, name, scale, metav1.UpdateOptions{})
   356  					framework.ExpectNoError(err)
   357  				}
   358  				err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) {
   359  					svc, err := f.ClientSet.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})
   360  					framework.ExpectNoError(err)
   361  
   362  					var status e2eingress.NegStatus
   363  					v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation]
   364  					if !ok {
   365  						// Wait for NEG sync loop to find NEGs
   366  						framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations)
   367  						return false, nil
   368  					}
   369  					err = json.Unmarshal([]byte(v), &status)
   370  					if err != nil {
   371  						framework.Logf("Error in parsing Expose NEG annotation: %v", err)
   372  						return false, nil
   373  					}
   374  					framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v)
   375  
   376  					// Expect 2 NEGs to be created based on the test setup (neg-exposed)
   377  					if len(status.NetworkEndpointGroups) != 2 {
   378  						framework.Logf("Expected 2 NEGs, got %d", len(status.NetworkEndpointGroups))
   379  						return false, nil
   380  					}
   381  
   382  					for _, port := range expectedKeys {
   383  						if _, ok := status.NetworkEndpointGroups[port]; !ok {
   384  							framework.Logf("Expected ServicePort key %v, but does not exist", port)
   385  						}
   386  					}
   387  
   388  					if len(status.NetworkEndpointGroups) != len(expectedKeys) {
   389  						framework.Logf("Expected length of %+v to equal length of %+v, but does not", status.NetworkEndpointGroups, expectedKeys)
   390  					}
   391  
   392  					gceCloud, err := gce.GetGCECloud()
   393  					framework.ExpectNoError(err)
   394  					for _, neg := range status.NetworkEndpointGroups {
   395  						networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
   396  						framework.ExpectNoError(err)
   397  						if len(networkEndpoints) != num {
   398  							framework.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints))
   399  							return false, nil
   400  						}
   401  					}
   402  
   403  					return true, nil
   404  				})
   405  				framework.ExpectNoError(err)
   406  			}
   407  
   408  			ginkgo.By("Create a basic HTTP ingress using NEG")
   409  			jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
   410  			jig.WaitForIngress(ctx, true)
   411  			err := gceController.WaitForNegBackendService(ctx, jig.GetServicePorts(ctx, false))
   412  			framework.ExpectNoError(err)
   413  			// initial replicas number is 1
   414  			scaleAndValidateExposedNEG(1)
   415  
   416  			ginkgo.By("Scale up number of backends to 5")
   417  			scaleAndValidateExposedNEG(5)
   418  
   419  			ginkgo.By("Scale down number of backends to 3")
   420  			scaleAndValidateExposedNEG(3)
   421  
   422  			ginkgo.By("Scale up number of backends to 6")
   423  			scaleAndValidateExposedNEG(6)
   424  
   425  			ginkgo.By("Scale down number of backends to 2")
   426  			scaleAndValidateExposedNEG(3)
   427  		})
   428  
   429  		ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func(ctx context.Context) {
   430  			ginkgo.By("Create a basic HTTP ingress using standalone NEG")
   431  			jig.CreateIngress(ctx, filepath.Join(e2eingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
   432  			jig.WaitForIngress(ctx, true)
   433  
   434  			name := "hostname"
   435  			detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2)
   436  
   437  			// Add Ingress annotation - NEGs should stay the same.
   438  			ginkgo.By("Adding NEG Ingress annotation")
   439  			svcList, err := f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
   440  			framework.ExpectNoError(err)
   441  			for _, svc := range svcList.Items {
   442  				svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}`
   443  				_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
   444  				framework.ExpectNoError(err)
   445  			}
   446  			detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2)
   447  
   448  			// Modify exposed NEG annotation, but keep ingress annotation
   449  			ginkgo.By("Modifying exposed NEG annotation, but keep Ingress annotation")
   450  			svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
   451  			framework.ExpectNoError(err)
   452  			for _, svc := range svcList.Items {
   453  				svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}`
   454  				_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
   455  				framework.ExpectNoError(err)
   456  			}
   457  			detectNegAnnotation(ctx, f, jig, gceController, ns, name, 2)
   458  
   459  			// Remove Ingress annotation. Expect 1 NEG
   460  			ginkgo.By("Disabling Ingress annotation, but keeping one standalone NEG")
   461  			svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
   462  			framework.ExpectNoError(err)
   463  			for _, svc := range svcList.Items {
   464  				svc.Annotations[e2eingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}`
   465  				_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
   466  				framework.ExpectNoError(err)
   467  			}
   468  			detectNegAnnotation(ctx, f, jig, gceController, ns, name, 1)
   469  
   470  			// Remove NEG annotation entirely. Expect 0 NEGs.
   471  			ginkgo.By("Removing NEG annotation")
   472  			svcList, err = f.ClientSet.CoreV1().Services(ns).List(ctx, metav1.ListOptions{})
   473  			framework.ExpectNoError(err)
   474  			for _, svc := range svcList.Items {
   475  				delete(svc.Annotations, e2eingress.NEGAnnotation)
   476  				// Service cannot be ClusterIP if it's using Instance Groups.
   477  				svc.Spec.Type = v1.ServiceTypeNodePort
   478  				_, err = f.ClientSet.CoreV1().Services(ns).Update(ctx, &svc, metav1.UpdateOptions{})
   479  				framework.ExpectNoError(err)
   480  			}
   481  			detectNegAnnotation(ctx, f, jig, gceController, ns, name, 0)
   482  		})
   483  	})
   484  })
   485  
   486  func detectNegAnnotation(ctx context.Context, f *framework.Framework, jig *e2eingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) {
   487  	if err := wait.Poll(5*time.Second, negUpdateTimeout, func() (bool, error) {
   488  		svc, err := f.ClientSet.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})
   489  		if err != nil {
   490  			return false, nil
   491  		}
   492  
   493  		// if we expect no NEGs, then we should be using IGs
   494  		if negs == 0 {
   495  			err := gceController.BackendServiceUsingIG(jig.GetServicePorts(ctx, false))
   496  			if err != nil {
   497  				framework.Logf("ginkgo.Failed to validate IG backend service: %v", err)
   498  				return false, nil
   499  			}
   500  			return true, nil
   501  		}
   502  
   503  		var status e2eingress.NegStatus
   504  		v, ok := svc.Annotations[e2eingress.NEGStatusAnnotation]
   505  		if !ok {
   506  			framework.Logf("Waiting for %v, got: %+v", e2eingress.NEGStatusAnnotation, svc.Annotations)
   507  			return false, nil
   508  		}
   509  
   510  		err = json.Unmarshal([]byte(v), &status)
   511  		if err != nil {
   512  			framework.Logf("Error in parsing Expose NEG annotation: %v", err)
   513  			return false, nil
   514  		}
   515  		framework.Logf("Got %v: %v", e2eingress.NEGStatusAnnotation, v)
   516  
   517  		if len(status.NetworkEndpointGroups) != negs {
   518  			framework.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups))
   519  			return false, nil
   520  		}
   521  
   522  		gceCloud, err := gce.GetGCECloud()
   523  		framework.ExpectNoError(err)
   524  		for _, neg := range status.NetworkEndpointGroups {
   525  			networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
   526  			framework.ExpectNoError(err)
   527  			if len(networkEndpoints) != 1 {
   528  				framework.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints))
   529  				return false, nil
   530  			}
   531  		}
   532  
   533  		err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(ctx, false))
   534  		if err != nil {
   535  			framework.Logf("ginkgo.Failed to validate NEG backend service: %v", err)
   536  			return false, nil
   537  		}
   538  		return true, nil
   539  	}); err != nil {
   540  		framework.ExpectNoError(err)
   541  	}
   542  }
   543  
   544  var _ = common.SIGDescribe("Ingress API", func() {
   545  	f := framework.NewDefaultFramework("ingress")
   546  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   547  	/*
   548  		Release: v1.19
   549  		Testname: Ingress API
   550  		Description:
   551  		The networking.k8s.io API group MUST exist in the /apis discovery document.
   552  		The networking.k8s.io/v1 API group/version MUST exist in the /apis/networking.k8s.io discovery document.
   553  		The ingresses resources MUST exist in the /apis/networking.k8s.io/v1 discovery document.
   554  		The ingresses resource must support create, get, list, watch, update, patch, delete, and deletecollection.
   555  		The ingresses/status resource must support update and patch
   556  	*/
   557  
   558  	framework.ConformanceIt("should support creating Ingress API operations", func(ctx context.Context) {
   559  		// Setup
   560  		ns := f.Namespace.Name
   561  		ingVersion := "v1"
   562  		ingClient := f.ClientSet.NetworkingV1().Ingresses(ns)
   563  
   564  		prefixPathType := networkingv1.PathTypeImplementationSpecific
   565  		serviceBackend := &networkingv1.IngressServiceBackend{
   566  			Name: "default-backend",
   567  			Port: networkingv1.ServiceBackendPort{
   568  				Name:   "",
   569  				Number: 8080,
   570  			},
   571  		}
   572  		defaultBackend := networkingv1.IngressBackend{
   573  			Service: serviceBackend,
   574  		}
   575  
   576  		ingTemplate := &networkingv1.Ingress{
   577  			ObjectMeta: metav1.ObjectMeta{GenerateName: "e2e-example-ing",
   578  				Labels: map[string]string{
   579  					"special-label": f.UniqueName,
   580  				}},
   581  			Spec: networkingv1.IngressSpec{
   582  				DefaultBackend: &defaultBackend,
   583  				Rules: []networkingv1.IngressRule{
   584  					{
   585  						Host: "foo.bar.com",
   586  						IngressRuleValue: networkingv1.IngressRuleValue{
   587  							HTTP: &networkingv1.HTTPIngressRuleValue{
   588  								Paths: []networkingv1.HTTPIngressPath{{
   589  									Path:     "/",
   590  									PathType: &prefixPathType,
   591  									Backend: networkingv1.IngressBackend{
   592  										Service: &networkingv1.IngressServiceBackend{
   593  											Name: "test-backend",
   594  											Port: networkingv1.ServiceBackendPort{
   595  												Number: 8080,
   596  											},
   597  										},
   598  									},
   599  								}},
   600  							},
   601  						},
   602  					},
   603  				},
   604  			},
   605  			Status: networkingv1.IngressStatus{LoadBalancer: networkingv1.IngressLoadBalancerStatus{}},
   606  		}
   607  
   608  		ingress1 := ingTemplate.DeepCopy()
   609  		ingress1.Spec.Rules[0].Host = "host1.bar.com"
   610  		ingress2 := ingTemplate.DeepCopy()
   611  		ingress2.Spec.Rules[0].Host = "host2.bar.com"
   612  		ingress3 := ingTemplate.DeepCopy()
   613  		ingress3.Spec.Rules[0].Host = "host3.bar.com"
   614  
   615  		// Discovery
   616  		ginkgo.By("getting /apis")
   617  		{
   618  			discoveryGroups, err := f.ClientSet.Discovery().ServerGroups()
   619  			framework.ExpectNoError(err)
   620  			found := false
   621  			for _, group := range discoveryGroups.Groups {
   622  				if group.Name == networkingv1.GroupName {
   623  					for _, version := range group.Versions {
   624  						if version.Version == ingVersion {
   625  							found = true
   626  							break
   627  						}
   628  					}
   629  				}
   630  			}
   631  			if !found {
   632  				framework.Failf("expected networking API group/version, got %#v", discoveryGroups.Groups)
   633  			}
   634  		}
   635  
   636  		ginkgo.By("getting /apis/networking.k8s.io")
   637  		{
   638  			group := &metav1.APIGroup{}
   639  			err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/networking.k8s.io").Do(ctx).Into(group)
   640  			framework.ExpectNoError(err)
   641  			found := false
   642  			for _, version := range group.Versions {
   643  				if version.Version == ingVersion {
   644  					found = true
   645  					break
   646  				}
   647  			}
   648  			if !found {
   649  				framework.Failf("expected networking API version, got %#v", group.Versions)
   650  			}
   651  		}
   652  
   653  		ginkgo.By("getting /apis/networking.k8s.io" + ingVersion)
   654  		{
   655  			resources, err := f.ClientSet.Discovery().ServerResourcesForGroupVersion(networkingv1.SchemeGroupVersion.String())
   656  			framework.ExpectNoError(err)
   657  			foundIngress := false
   658  			for _, resource := range resources.APIResources {
   659  				switch resource.Name {
   660  				case "ingresses":
   661  					foundIngress = true
   662  				}
   663  			}
   664  			if !foundIngress {
   665  				framework.Failf("expected ingresses, got %#v", resources.APIResources)
   666  			}
   667  		}
   668  
   669  		// Ingress resource create/read/update/watch verbs
   670  		ginkgo.By("creating")
   671  		_, err := ingClient.Create(ctx, ingress1, metav1.CreateOptions{})
   672  		framework.ExpectNoError(err)
   673  		_, err = ingClient.Create(ctx, ingress2, metav1.CreateOptions{})
   674  		framework.ExpectNoError(err)
   675  		createdIngress, err := ingClient.Create(ctx, ingress3, metav1.CreateOptions{})
   676  		framework.ExpectNoError(err)
   677  
   678  		ginkgo.By("getting")
   679  		gottenIngress, err := ingClient.Get(ctx, createdIngress.Name, metav1.GetOptions{})
   680  		framework.ExpectNoError(err)
   681  		gomega.Expect(gottenIngress.UID).To(gomega.Equal(createdIngress.UID))
   682  
   683  		ginkgo.By("listing")
   684  		ings, err := ingClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
   685  		framework.ExpectNoError(err)
   686  		gomega.Expect(ings.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
   687  
   688  		ginkgo.By("watching")
   689  		framework.Logf("starting watch")
   690  		ingWatch, err := ingClient.Watch(ctx, metav1.ListOptions{ResourceVersion: ings.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName})
   691  		framework.ExpectNoError(err)
   692  
   693  		// Test cluster-wide list and watch
   694  		clusterIngClient := f.ClientSet.NetworkingV1().Ingresses("")
   695  		ginkgo.By("cluster-wide listing")
   696  		clusterIngs, err := clusterIngClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
   697  		framework.ExpectNoError(err)
   698  		gomega.Expect(clusterIngs.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
   699  
   700  		ginkgo.By("cluster-wide watching")
   701  		framework.Logf("starting watch")
   702  		_, err = clusterIngClient.Watch(ctx, metav1.ListOptions{ResourceVersion: ings.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName})
   703  		framework.ExpectNoError(err)
   704  
   705  		ginkgo.By("patching")
   706  		patchedIngress, err := ingClient.Patch(ctx, createdIngress.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
   707  		framework.ExpectNoError(err)
   708  		gomega.Expect(patchedIngress.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation")
   709  
   710  		ginkgo.By("updating")
   711  		var ingToUpdate, updatedIngress *networkingv1.Ingress
   712  		err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
   713  			ingToUpdate, err = ingClient.Get(ctx, createdIngress.Name, metav1.GetOptions{})
   714  			if err != nil {
   715  				return err
   716  			}
   717  			ingToUpdate.Annotations["updated"] = "true"
   718  			updatedIngress, err = ingClient.Update(ctx, ingToUpdate, metav1.UpdateOptions{})
   719  			return err
   720  		})
   721  		framework.ExpectNoError(err)
   722  		gomega.Expect(updatedIngress.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated object should have the applied annotation")
   723  
   724  		framework.Logf("waiting for watch events with expected annotations")
   725  		for sawAnnotations := false; !sawAnnotations; {
   726  			select {
   727  			case evt, ok := <-ingWatch.ResultChan():
   728  				if !ok {
   729  					framework.Fail("watch channel should not close")
   730  				}
   731  				gomega.Expect(evt.Type).To(gomega.Equal(watch.Modified))
   732  				watchedIngress, isIngress := evt.Object.(*networkingv1.Ingress)
   733  				if !isIngress {
   734  					framework.Failf("expected Ingress, got %T", evt.Object)
   735  				}
   736  				if watchedIngress.Annotations["patched"] == "true" {
   737  					framework.Logf("saw patched and updated annotations")
   738  					sawAnnotations = true
   739  					ingWatch.Stop()
   740  				} else {
   741  					framework.Logf("missing expected annotations, waiting: %#v", watchedIngress.Annotations)
   742  				}
   743  			case <-time.After(wait.ForeverTestTimeout):
   744  				framework.Fail("timed out waiting for watch event")
   745  			}
   746  		}
   747  
   748  		// /status subresource operations
   749  		ginkgo.By("patching /status")
   750  		lbStatus := networkingv1.IngressLoadBalancerStatus{
   751  			Ingress: []networkingv1.IngressLoadBalancerIngress{{IP: "169.1.1.1"}},
   752  		}
   753  		lbStatusJSON, err := json.Marshal(lbStatus)
   754  		framework.ExpectNoError(err)
   755  		patchedStatus, err := ingClient.Patch(ctx, createdIngress.Name, types.MergePatchType,
   756  			[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":{"loadBalancer":`+string(lbStatusJSON)+`}}`),
   757  			metav1.PatchOptions{}, "status")
   758  		framework.ExpectNoError(err)
   759  		gomega.Expect(patchedStatus.Status.LoadBalancer).To(gomega.Equal(lbStatus), "patched object should have the applied loadBalancer status")
   760  		gomega.Expect(patchedStatus.Annotations).To(gomega.HaveKeyWithValue("patchedstatus", "true"), "patched object should have the applied annotation")
   761  
   762  		ginkgo.By("updating /status")
   763  		var statusToUpdate, updatedStatus *networkingv1.Ingress
   764  		err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
   765  			statusToUpdate, err = ingClient.Get(ctx, createdIngress.Name, metav1.GetOptions{})
   766  			if err != nil {
   767  				return err
   768  			}
   769  			statusToUpdate.Status.LoadBalancer = networkingv1.IngressLoadBalancerStatus{
   770  				Ingress: []networkingv1.IngressLoadBalancerIngress{{IP: "169.1.1.2"}},
   771  			}
   772  			updatedStatus, err = ingClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
   773  			return err
   774  		})
   775  		framework.ExpectNoError(err)
   776  		gomega.Expect(updatedStatus.Status.LoadBalancer).To(gomega.Equal(statusToUpdate.Status.LoadBalancer), "updated object expected to have updated loadbalancer status %#v, got %#v", statusToUpdate.Status.LoadBalancer, updatedStatus.Status.LoadBalancer)
   777  
   778  		ginkgo.By("get /status")
   779  		ingResource := schema.GroupVersionResource{Group: "networking.k8s.io", Version: ingVersion, Resource: "ingresses"}
   780  		gottenStatus, err := f.DynamicClient.Resource(ingResource).Namespace(ns).Get(ctx, createdIngress.Name, metav1.GetOptions{}, "status")
   781  		framework.ExpectNoError(err)
   782  		statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid")
   783  		framework.ExpectNoError(err)
   784  		gomega.Expect(string(createdIngress.UID)).To(gomega.Equal(statusUID), "createdIngress.UID: %v expected to match statusUID: %v ", createdIngress.UID, statusUID)
   785  
   786  		// Ingress resource delete operations
   787  		ginkgo.By("deleting")
   788  
   789  		expectFinalizer := func(ing *networkingv1.Ingress, msg string) {
   790  			gomega.Expect(ing.DeletionTimestamp).ToNot(gomega.BeNil(), "expected deletionTimestamp, got nil on step: %q, ingress: %+v", msg, ing)
   791  			if len(ing.Finalizers) == 0 {
   792  				framework.Failf("expected finalizers on ingress, got none on step: %q, ingress: %+v", msg, ing)
   793  			}
   794  		}
   795  
   796  		err = ingClient.Delete(ctx, createdIngress.Name, metav1.DeleteOptions{})
   797  		framework.ExpectNoError(err)
   798  		ing, err := ingClient.Get(ctx, createdIngress.Name, metav1.GetOptions{})
   799  		// If ingress controller does not support finalizers, we expect a 404.  Otherwise we validate finalizer behavior.
   800  		if err == nil {
   801  			expectFinalizer(ing, "deleting createdIngress")
   802  		} else {
   803  			if !apierrors.IsNotFound(err) {
   804  				framework.Failf("expected 404, got %v", err)
   805  			}
   806  		}
   807  		ings, err = ingClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
   808  		framework.ExpectNoError(err)
   809  		// Should have <= 3 items since some ingresses might not have been deleted yet due to finalizers
   810  		if len(ings.Items) > 3 {
   811  			framework.Fail("filtered list should have <= 3 items")
   812  		}
   813  		// Validate finalizer on the deleted ingress
   814  		for _, ing := range ings.Items {
   815  			if ing.Namespace == createdIngress.Namespace && ing.Name == createdIngress.Name {
   816  				expectFinalizer(&ing, "listing after deleting createdIngress")
   817  			}
   818  		}
   819  
   820  		ginkgo.By("deleting a collection")
   821  		err = ingClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
   822  		framework.ExpectNoError(err)
   823  		ings, err = ingClient.List(ctx, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
   824  		framework.ExpectNoError(err)
   825  		// Should have <= 3 items since some ingresses might not have been deleted yet due to finalizers
   826  		if len(ings.Items) > 3 {
   827  			framework.Fail("filtered list should have <= 3 items")
   828  		}
   829  		// Validate finalizers
   830  		for _, ing := range ings.Items {
   831  			expectFinalizer(&ing, "deleting ingress collection")
   832  		}
   833  	})
   834  })