k8s.io/kubernetes@v1.29.3/test/e2e/common/node/runtimeclass.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package node
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"time"
    23  
    24  	v1 "k8s.io/api/core/v1"
    25  	nodev1 "k8s.io/api/node/v1"
    26  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    27  	"k8s.io/apimachinery/pkg/api/resource"
    28  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    29  	"k8s.io/apimachinery/pkg/fields"
    30  	"k8s.io/apimachinery/pkg/labels"
    31  	types "k8s.io/apimachinery/pkg/types"
    32  	"k8s.io/apimachinery/pkg/util/wait"
    33  	"k8s.io/apimachinery/pkg/watch"
    34  	"k8s.io/kubernetes/pkg/kubelet/events"
    35  	runtimeclasstest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing"
    36  	"k8s.io/kubernetes/test/e2e/framework"
    37  	e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
    38  	e2eruntimeclass "k8s.io/kubernetes/test/e2e/framework/node/runtimeclass"
    39  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    40  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    41  	"k8s.io/kubernetes/test/e2e/nodefeature"
    42  	admissionapi "k8s.io/pod-security-admission/api"
    43  
    44  	"github.com/onsi/ginkgo/v2"
    45  	"github.com/onsi/gomega"
    46  )
    47  
    48  var _ = SIGDescribe("RuntimeClass", func() {
    49  	f := framework.NewDefaultFramework("runtimeclass")
    50  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    51  
    52  	/*
    53  		Release: v1.20
    54  		Testname: Pod with the non-existing RuntimeClass is rejected.
    55  		Description: The Pod requesting the non-existing RuntimeClass must be rejected.
    56  	*/
    57  	framework.ConformanceIt("should reject a Pod requesting a non-existent RuntimeClass", f.WithNodeConformance(), func(ctx context.Context) {
    58  		rcName := f.Namespace.Name + "-nonexistent"
    59  		expectPodRejection(ctx, f, e2eruntimeclass.NewRuntimeClassPod(rcName))
    60  	})
    61  
    62  	// The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler not being installed.
    63  	f.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler", nodefeature.RuntimeHandler, func(ctx context.Context) {
    64  		handler := f.Namespace.Name + "-handler"
    65  		rcName := createRuntimeClass(ctx, f, "unconfigured-handler", handler, nil)
    66  		ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
    67  		pod := e2epod.NewPodClient(f).Create(ctx, e2eruntimeclass.NewRuntimeClassPod(rcName))
    68  		eventSelector := fields.Set{
    69  			"involvedObject.kind":      "Pod",
    70  			"involvedObject.name":      pod.Name,
    71  			"involvedObject.namespace": f.Namespace.Name,
    72  			"reason":                   events.FailedCreatePodSandBox,
    73  		}.AsSelector().String()
    74  		// Events are unreliable, don't depend on the event. It's used only to speed up the test.
    75  		err := e2eevents.WaitTimeoutForEvent(ctx, f.ClientSet, f.Namespace.Name, eventSelector, handler, framework.PodEventTimeout)
    76  		if err != nil {
    77  			framework.Logf("Warning: did not get event about FailedCreatePodSandBox. Err: %v", err)
    78  		}
    79  		// Check the pod is still not running
    80  		p, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
    81  		framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
    82  		gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodPending), "Pod phase isn't pending")
    83  	})
    84  
    85  	// This test requires that the PreconfiguredRuntimeClassHandler has already been set up on nodes.
    86  	// The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler installed and working.
    87  	f.It("should run a Pod requesting a RuntimeClass with a configured handler", nodefeature.RuntimeHandler, func(ctx context.Context) {
    88  		if err := e2eruntimeclass.NodeSupportsPreconfiguredRuntimeClassHandler(ctx, f); err != nil {
    89  			e2eskipper.Skipf("Skipping test as node does not have E2E runtime class handler preconfigured in container runtime config: %v", err)
    90  		}
    91  
    92  		rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2eruntimeclass.PreconfiguredRuntimeClassHandler, nil)
    93  		ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
    94  		pod := e2epod.NewPodClient(f).Create(ctx, e2eruntimeclass.NewRuntimeClassPod(rcName))
    95  		expectPodSuccess(ctx, f, pod)
    96  	})
    97  
    98  	/*
    99  		Release: v1.20
   100  		Testname: Can schedule a pod requesting existing RuntimeClass.
   101  		Description: The Pod requesting the existing RuntimeClass must be scheduled.
   102  		This test doesn't validate that the Pod will actually start because this functionality
   103  		depends on container runtime and preconfigured handler. Runtime-specific functionality
   104  		is not being tested here.
   105  	*/
   106  	framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead", f.WithNodeConformance(), func(ctx context.Context) {
   107  		rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2eruntimeclass.PreconfiguredRuntimeClassHandler, nil)
   108  		ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
   109  		pod := e2epod.NewPodClient(f).Create(ctx, e2eruntimeclass.NewRuntimeClassPod(rcName))
   110  		// there is only one pod in the namespace
   111  		label := labels.SelectorFromSet(labels.Set(map[string]string{}))
   112  		pods, err := e2epod.WaitForPodsWithLabelScheduled(ctx, f.ClientSet, f.Namespace.Name, label)
   113  		framework.ExpectNoError(err, "Failed to schedule Pod with the RuntimeClass")
   114  
   115  		gomega.Expect(pods.Items).To(gomega.HaveLen(1))
   116  		scheduledPod := &pods.Items[0]
   117  		gomega.Expect(scheduledPod.Name).To(gomega.Equal(pod.Name))
   118  
   119  		// Overhead should not be set
   120  		gomega.Expect(scheduledPod.Spec.Overhead).To(gomega.BeEmpty())
   121  	})
   122  
   123  	/*
   124  		Release: v1.24
   125  		Testname: RuntimeClass Overhead field must be respected.
   126  		Description: The Pod requesting the existing RuntimeClass must be scheduled.
   127  		This test doesn't validate that the Pod will actually start because this functionality
   128  		depends on container runtime and preconfigured handler. Runtime-specific functionality
   129  		is not being tested here.
   130  	*/
   131  	framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass and initialize its Overhead", f.WithNodeConformance(), func(ctx context.Context) {
   132  		rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2eruntimeclass.PreconfiguredRuntimeClassHandler, &nodev1.Overhead{
   133  			PodFixed: v1.ResourceList{
   134  				v1.ResourceName(v1.ResourceCPU):    resource.MustParse("10m"),
   135  				v1.ResourceName(v1.ResourceMemory): resource.MustParse("1Mi"),
   136  			},
   137  		})
   138  		ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
   139  		pod := e2epod.NewPodClient(f).Create(ctx, e2eruntimeclass.NewRuntimeClassPod(rcName))
   140  		// there is only one pod in the namespace
   141  		label := labels.SelectorFromSet(labels.Set(map[string]string{}))
   142  		pods, err := e2epod.WaitForPodsWithLabelScheduled(ctx, f.ClientSet, f.Namespace.Name, label)
   143  		framework.ExpectNoError(err, "Failed to schedule Pod with the RuntimeClass")
   144  
   145  		gomega.Expect(pods.Items).To(gomega.HaveLen(1))
   146  		scheduledPod := &pods.Items[0]
   147  		gomega.Expect(scheduledPod.Name).To(gomega.Equal(pod.Name))
   148  
   149  		gomega.Expect(scheduledPod.Spec.Overhead[v1.ResourceCPU]).To(gomega.Equal(resource.MustParse("10m")))
   150  		gomega.Expect(scheduledPod.Spec.Overhead[v1.ResourceMemory]).To(gomega.Equal(resource.MustParse("1Mi")))
   151  	})
   152  
   153  	/*
   154  		Release: v1.20
   155  		Testname: Pod with the deleted RuntimeClass is rejected.
   156  		Description: Pod requesting the deleted RuntimeClass must be rejected.
   157  	*/
   158  	framework.ConformanceIt("should reject a Pod requesting a deleted RuntimeClass", f.WithNodeConformance(), func(ctx context.Context) {
   159  		rcName := createRuntimeClass(ctx, f, "delete-me", "runc", nil)
   160  		rcClient := f.ClientSet.NodeV1().RuntimeClasses()
   161  
   162  		ginkgo.By("Deleting RuntimeClass "+rcName, func() {
   163  			err := rcClient.Delete(ctx, rcName, metav1.DeleteOptions{})
   164  			framework.ExpectNoError(err, "failed to delete RuntimeClass %s", rcName)
   165  
   166  			ginkgo.By("Waiting for the RuntimeClass to disappear")
   167  			framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) {
   168  				_, err := rcClient.Get(ctx, rcName, metav1.GetOptions{})
   169  				if apierrors.IsNotFound(err) {
   170  					return true, nil // done
   171  				}
   172  				if err != nil {
   173  					return true, err // stop wait with error
   174  				}
   175  				return false, nil
   176  			}))
   177  		})
   178  
   179  		expectPodRejection(ctx, f, e2eruntimeclass.NewRuntimeClassPod(rcName))
   180  	})
   181  
   182  	/*
   183  		Release: v1.20
   184  		Testname: RuntimeClass API
   185  		Description:
   186  		The node.k8s.io API group MUST exist in the /apis discovery document.
   187  		The node.k8s.io/v1 API group/version MUST exist in the /apis/mode.k8s.io discovery document.
   188  		The runtimeclasses resource MUST exist in the /apis/node.k8s.io/v1 discovery document.
   189  		The runtimeclasses resource must support create, get, list, watch, update, patch, delete, and deletecollection.
   190  	*/
   191  	framework.ConformanceIt("should support RuntimeClasses API operations", func(ctx context.Context) {
   192  		// Setup
   193  		rcVersion := "v1"
   194  		rcClient := f.ClientSet.NodeV1().RuntimeClasses()
   195  
   196  		// This is a conformance test that must configure opaque handlers to validate CRUD operations.
   197  		// Test should not use any existing handler like gVisor or runc
   198  		//
   199  		// All CRUD operations in this test are limited to the objects with the label test=f.UniqueName
   200  		rc := runtimeclasstest.NewRuntimeClass(f.UniqueName+"-handler", f.UniqueName+"-conformance-runtime-class")
   201  		rc.SetLabels(map[string]string{"test": f.UniqueName})
   202  		rc2 := runtimeclasstest.NewRuntimeClass(f.UniqueName+"-handler2", f.UniqueName+"-conformance-runtime-class2")
   203  		rc2.SetLabels(map[string]string{"test": f.UniqueName})
   204  		rc3 := runtimeclasstest.NewRuntimeClass(f.UniqueName+"-handler3", f.UniqueName+"-conformance-runtime-class3")
   205  		rc3.SetLabels(map[string]string{"test": f.UniqueName})
   206  
   207  		// Discovery
   208  
   209  		ginkgo.By("getting /apis")
   210  		{
   211  			discoveryGroups, err := f.ClientSet.Discovery().ServerGroups()
   212  			framework.ExpectNoError(err)
   213  			found := false
   214  			for _, group := range discoveryGroups.Groups {
   215  				if group.Name == nodev1.GroupName {
   216  					for _, version := range group.Versions {
   217  						if version.Version == rcVersion {
   218  							found = true
   219  							break
   220  						}
   221  					}
   222  				}
   223  			}
   224  			if !found {
   225  				framework.Failf("expected RuntimeClass API group/version, got %#v", discoveryGroups.Groups)
   226  			}
   227  		}
   228  
   229  		ginkgo.By("getting /apis/node.k8s.io")
   230  		{
   231  			group := &metav1.APIGroup{}
   232  			err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/node.k8s.io").Do(ctx).Into(group)
   233  			framework.ExpectNoError(err)
   234  			found := false
   235  			for _, version := range group.Versions {
   236  				if version.Version == rcVersion {
   237  					found = true
   238  					break
   239  				}
   240  			}
   241  			if !found {
   242  				framework.Failf("expected RuntimeClass API version, got %#v", group.Versions)
   243  			}
   244  		}
   245  
   246  		ginkgo.By("getting /apis/node.k8s.io/" + rcVersion)
   247  		{
   248  			resources, err := f.ClientSet.Discovery().ServerResourcesForGroupVersion(nodev1.SchemeGroupVersion.String())
   249  			framework.ExpectNoError(err)
   250  			found := false
   251  			for _, resource := range resources.APIResources {
   252  				switch resource.Name {
   253  				case "runtimeclasses":
   254  					found = true
   255  				}
   256  			}
   257  			if !found {
   258  				framework.Failf("expected runtimeclasses, got %#v", resources.APIResources)
   259  			}
   260  		}
   261  
   262  		// Main resource create/read/update/watch operations
   263  
   264  		ginkgo.By("creating")
   265  		createdRC, err := rcClient.Create(ctx, rc, metav1.CreateOptions{})
   266  		framework.ExpectNoError(err)
   267  		_, err = rcClient.Create(ctx, rc, metav1.CreateOptions{})
   268  		if !apierrors.IsAlreadyExists(err) {
   269  			framework.Failf("expected 409, got %#v", err)
   270  		}
   271  		_, err = rcClient.Create(ctx, rc2, metav1.CreateOptions{})
   272  		framework.ExpectNoError(err)
   273  
   274  		ginkgo.By("watching")
   275  		framework.Logf("starting watch")
   276  		rcWatch, err := rcClient.Watch(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
   277  		framework.ExpectNoError(err)
   278  
   279  		// added for a watch
   280  		_, err = rcClient.Create(ctx, rc3, metav1.CreateOptions{})
   281  		framework.ExpectNoError(err)
   282  
   283  		ginkgo.By("getting")
   284  		gottenRC, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
   285  		framework.ExpectNoError(err)
   286  		gomega.Expect(gottenRC.UID).To(gomega.Equal(createdRC.UID))
   287  
   288  		ginkgo.By("listing")
   289  		rcs, err := rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
   290  		framework.ExpectNoError(err)
   291  		gomega.Expect(rcs.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
   292  
   293  		ginkgo.By("patching")
   294  		patchedRC, err := rcClient.Patch(ctx, createdRC.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
   295  		framework.ExpectNoError(err)
   296  		gomega.Expect(patchedRC.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation")
   297  
   298  		ginkgo.By("updating")
   299  		csrToUpdate := patchedRC.DeepCopy()
   300  		csrToUpdate.Annotations["updated"] = "true"
   301  		updatedRC, err := rcClient.Update(ctx, csrToUpdate, metav1.UpdateOptions{})
   302  		framework.ExpectNoError(err)
   303  		gomega.Expect(updatedRC.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated object should have the applied annotation")
   304  
   305  		framework.Logf("waiting for watch events with expected annotations")
   306  		for sawAdded, sawPatched, sawUpdated := false, false, false; !sawAdded && !sawPatched && !sawUpdated; {
   307  			select {
   308  			case evt, ok := <-rcWatch.ResultChan():
   309  				if !ok {
   310  					framework.Fail("watch channel should not close")
   311  				}
   312  				if evt.Type == watch.Modified {
   313  					watchedRC, isRC := evt.Object.(*nodev1.RuntimeClass)
   314  					if !isRC {
   315  						framework.Failf("expected RC, got %T", evt.Object)
   316  					}
   317  					if watchedRC.Annotations["patched"] == "true" {
   318  						framework.Logf("saw patched annotations")
   319  						sawPatched = true
   320  					} else if watchedRC.Annotations["updated"] == "true" {
   321  						framework.Logf("saw updated annotations")
   322  						sawUpdated = true
   323  					} else {
   324  						framework.Logf("missing expected annotations, waiting: %#v", watchedRC.Annotations)
   325  					}
   326  				} else if evt.Type == watch.Added {
   327  					_, isRC := evt.Object.(*nodev1.RuntimeClass)
   328  					if !isRC {
   329  						framework.Failf("expected RC, got %T", evt.Object)
   330  					}
   331  					sawAdded = true
   332  				}
   333  
   334  			case <-time.After(wait.ForeverTestTimeout):
   335  				framework.Fail("timed out waiting for watch event")
   336  			}
   337  		}
   338  		rcWatch.Stop()
   339  
   340  		// main resource delete operations
   341  
   342  		ginkgo.By("deleting")
   343  		err = rcClient.Delete(ctx, createdRC.Name, metav1.DeleteOptions{})
   344  		framework.ExpectNoError(err)
   345  		_, err = rcClient.Get(ctx, createdRC.Name, metav1.GetOptions{})
   346  		if !apierrors.IsNotFound(err) {
   347  			framework.Failf("expected 404, got %#v", err)
   348  		}
   349  		rcs, err = rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
   350  		framework.ExpectNoError(err)
   351  		gomega.Expect(rcs.Items).To(gomega.HaveLen(2), "filtered list should have 2 items")
   352  
   353  		ginkgo.By("deleting a collection")
   354  		err = rcClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
   355  		framework.ExpectNoError(err)
   356  		rcs, err = rcClient.List(ctx, metav1.ListOptions{LabelSelector: "test=" + f.UniqueName})
   357  		framework.ExpectNoError(err)
   358  		gomega.Expect(rcs.Items).To(gomega.BeEmpty(), "filtered list should have 0 items")
   359  	})
   360  })
   361  
   362  func deleteRuntimeClass(ctx context.Context, f *framework.Framework, name string) {
   363  	err := f.ClientSet.NodeV1().RuntimeClasses().Delete(ctx, name, metav1.DeleteOptions{})
   364  	framework.ExpectNoError(err, "failed to delete RuntimeClass resource")
   365  }
   366  
   367  // createRuntimeClass generates a RuntimeClass with the desired handler and a "namespaced" name,
   368  // synchronously creates it, and returns the generated name.
   369  func createRuntimeClass(ctx context.Context, f *framework.Framework, name, handler string, overhead *nodev1.Overhead) string {
   370  	uniqueName := fmt.Sprintf("%s-%s", f.Namespace.Name, name)
   371  	rc := runtimeclasstest.NewRuntimeClass(uniqueName, handler)
   372  	rc.Overhead = overhead
   373  	rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(ctx, rc, metav1.CreateOptions{})
   374  	framework.ExpectNoError(err, "failed to create RuntimeClass resource")
   375  	return rc.GetName()
   376  }
   377  
   378  func expectPodRejection(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
   379  	_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   380  	gomega.Expect(err).To(gomega.HaveOccurred(), "should be forbidden")
   381  	if !apierrors.IsForbidden(err) {
   382  		framework.Failf("expected forbidden error, got %#v", err)
   383  	}
   384  }
   385  
   386  // expectPodSuccess waits for the given pod to terminate successfully.
   387  func expectPodSuccess(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
   388  	framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(
   389  		ctx, f.ClientSet, pod.Name, f.Namespace.Name))
   390  }