k8s.io/kubernetes@v1.29.3/pkg/kubelet/lifecycle/predicate_test.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package lifecycle
    18  
    19  import (
    20  	goruntime "runtime"
    21  	"testing"
    22  
    23  	"github.com/google/go-cmp/cmp"
    24  	v1 "k8s.io/api/core/v1"
    25  	"k8s.io/apimachinery/pkg/api/resource"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
    28  	"k8s.io/kubernetes/pkg/kubelet/types"
    29  	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
    30  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
    31  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
    32  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
    33  )
    34  
    35  var (
    36  	quantity = *resource.NewQuantity(1, resource.DecimalSI)
    37  )
    38  
    39  func TestRemoveMissingExtendedResources(t *testing.T) {
    40  	for _, test := range []struct {
    41  		desc string
    42  		pod  *v1.Pod
    43  		node *v1.Node
    44  
    45  		expectedPod *v1.Pod
    46  	}{
    47  		{
    48  			desc: "requests in Limits should be ignored",
    49  			pod: makeTestPod(
    50  				v1.ResourceList{},                        // Requests
    51  				v1.ResourceList{"foo.com/bar": quantity}, // Limits
    52  			),
    53  			node: makeTestNode(
    54  				v1.ResourceList{"foo.com/baz": quantity}, // Allocatable
    55  			),
    56  			expectedPod: makeTestPod(
    57  				v1.ResourceList{},                        // Requests
    58  				v1.ResourceList{"foo.com/bar": quantity}, // Limits
    59  			),
    60  		},
    61  		{
    62  			desc: "requests for resources available in node should not be removed",
    63  			pod: makeTestPod(
    64  				v1.ResourceList{"foo.com/bar": quantity}, // Requests
    65  				v1.ResourceList{},                        // Limits
    66  			),
    67  			node: makeTestNode(
    68  				v1.ResourceList{"foo.com/bar": quantity}, // Allocatable
    69  			),
    70  			expectedPod: makeTestPod(
    71  				v1.ResourceList{"foo.com/bar": quantity}, // Requests
    72  				v1.ResourceList{}),                       // Limits
    73  		},
    74  		{
    75  			desc: "requests for resources unavailable in node should be removed",
    76  			pod: makeTestPod(
    77  				v1.ResourceList{"foo.com/bar": quantity}, // Requests
    78  				v1.ResourceList{},                        // Limits
    79  			),
    80  			node: makeTestNode(
    81  				v1.ResourceList{"foo.com/baz": quantity}, // Allocatable
    82  			),
    83  			expectedPod: makeTestPod(
    84  				v1.ResourceList{}, // Requests
    85  				v1.ResourceList{}, // Limits
    86  			),
    87  		},
    88  	} {
    89  		nodeInfo := schedulerframework.NewNodeInfo()
    90  		nodeInfo.SetNode(test.node)
    91  		pod := removeMissingExtendedResources(test.pod, nodeInfo)
    92  		if diff := cmp.Diff(test.expectedPod, pod); diff != "" {
    93  			t.Errorf("unexpected pod (-want, +got):\n%s", diff)
    94  		}
    95  	}
    96  }
    97  
    98  func makeTestPod(requests, limits v1.ResourceList) *v1.Pod {
    99  	return &v1.Pod{
   100  		Spec: v1.PodSpec{
   101  			Containers: []v1.Container{
   102  				{
   103  					Resources: v1.ResourceRequirements{
   104  						Requests: requests,
   105  						Limits:   limits,
   106  					},
   107  				},
   108  			},
   109  		},
   110  	}
   111  }
   112  
   113  func makeTestNode(allocatable v1.ResourceList) *v1.Node {
   114  	return &v1.Node{
   115  		Status: v1.NodeStatus{
   116  			Allocatable: allocatable,
   117  		},
   118  	}
   119  }
   120  
   121  var (
   122  	extendedResourceA = v1.ResourceName("example.com/aaa")
   123  	hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
   124  )
   125  
   126  func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.NodeResources {
   127  	return v1.NodeResources{
   128  		Capacity: v1.ResourceList{
   129  			v1.ResourceCPU:              *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
   130  			v1.ResourceMemory:           *resource.NewQuantity(memory, resource.BinarySI),
   131  			v1.ResourcePods:             *resource.NewQuantity(pods, resource.DecimalSI),
   132  			extendedResourceA:           *resource.NewQuantity(extendedA, resource.DecimalSI),
   133  			v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
   134  			hugePageResourceA:           *resource.NewQuantity(hugePageA, resource.BinarySI),
   135  		},
   136  	}
   137  }
   138  
   139  func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.ResourceList {
   140  	return v1.ResourceList{
   141  		v1.ResourceCPU:              *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
   142  		v1.ResourceMemory:           *resource.NewQuantity(memory, resource.BinarySI),
   143  		v1.ResourcePods:             *resource.NewQuantity(pods, resource.DecimalSI),
   144  		extendedResourceA:           *resource.NewQuantity(extendedA, resource.DecimalSI),
   145  		v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
   146  		hugePageResourceA:           *resource.NewQuantity(hugePageA, resource.BinarySI),
   147  	}
   148  }
   149  
   150  func newResourcePod(containerResources ...v1.ResourceList) *v1.Pod {
   151  	containers := []v1.Container{}
   152  	for _, rl := range containerResources {
   153  		containers = append(containers, v1.Container{
   154  			Resources: v1.ResourceRequirements{Requests: rl},
   155  		})
   156  	}
   157  	return &v1.Pod{
   158  		Spec: v1.PodSpec{
   159  			Containers: containers,
   160  		},
   161  	}
   162  }
   163  
   164  func newPodWithPort(hostPorts ...int) *v1.Pod {
   165  	networkPorts := []v1.ContainerPort{}
   166  	for _, port := range hostPorts {
   167  		networkPorts = append(networkPorts, v1.ContainerPort{HostPort: int32(port)})
   168  	}
   169  	return &v1.Pod{
   170  		Spec: v1.PodSpec{
   171  			Containers: []v1.Container{
   172  				{
   173  					Ports: networkPorts,
   174  				},
   175  			},
   176  		},
   177  	}
   178  }
   179  
   180  func TestGeneralPredicates(t *testing.T) {
   181  	resourceTests := []struct {
   182  		pod      *v1.Pod
   183  		nodeInfo *schedulerframework.NodeInfo
   184  		node     *v1.Node
   185  		name     string
   186  		reasons  []PredicateFailureReason
   187  	}{
   188  		{
   189  			pod: &v1.Pod{},
   190  			nodeInfo: schedulerframework.NewNodeInfo(
   191  				newResourcePod(v1.ResourceList{
   192  					v1.ResourceCPU:    *resource.NewMilliQuantity(9, resource.DecimalSI),
   193  					v1.ResourceMemory: *resource.NewQuantity(19, resource.BinarySI),
   194  				})),
   195  			node: &v1.Node{
   196  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   197  				Status:     v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   198  			},
   199  			name: "no resources/port/host requested always fits",
   200  		},
   201  		{
   202  			pod: newResourcePod(v1.ResourceList{
   203  				v1.ResourceCPU:    *resource.NewMilliQuantity(8, resource.DecimalSI),
   204  				v1.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
   205  			}),
   206  			nodeInfo: schedulerframework.NewNodeInfo(
   207  				newResourcePod(v1.ResourceList{
   208  					v1.ResourceCPU:    *resource.NewMilliQuantity(5, resource.DecimalSI),
   209  					v1.ResourceMemory: *resource.NewQuantity(19, resource.BinarySI),
   210  				})),
   211  			node: &v1.Node{
   212  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   213  				Status:     v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   214  			},
   215  			reasons: []PredicateFailureReason{
   216  				&InsufficientResourceError{ResourceName: v1.ResourceCPU, Requested: 8, Used: 5, Capacity: 10},
   217  				&InsufficientResourceError{ResourceName: v1.ResourceMemory, Requested: 10, Used: 19, Capacity: 20},
   218  			},
   219  			name: "not enough cpu and memory resource",
   220  		},
   221  		{
   222  			pod: &v1.Pod{
   223  				Spec: v1.PodSpec{
   224  					NodeName: "machine2",
   225  				},
   226  			},
   227  			nodeInfo: schedulerframework.NewNodeInfo(),
   228  			node: &v1.Node{
   229  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   230  				Status:     v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   231  			},
   232  			reasons: []PredicateFailureReason{&PredicateFailureError{nodename.Name, nodename.ErrReason}},
   233  			name:    "host not match",
   234  		},
   235  		{
   236  			pod:      newPodWithPort(123),
   237  			nodeInfo: schedulerframework.NewNodeInfo(newPodWithPort(123)),
   238  			node: &v1.Node{
   239  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   240  				Status:     v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   241  			},
   242  			reasons: []PredicateFailureReason{&PredicateFailureError{nodeports.Name, nodeports.ErrReason}},
   243  			name:    "hostport conflict",
   244  		},
   245  		{
   246  			pod: &v1.Pod{
   247  				Spec: v1.PodSpec{
   248  					Tolerations: []v1.Toleration{
   249  						{Key: "foo"},
   250  						{Key: "bar"},
   251  					},
   252  				},
   253  			},
   254  			nodeInfo: schedulerframework.NewNodeInfo(),
   255  			node: &v1.Node{
   256  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   257  				Spec: v1.NodeSpec{
   258  					Taints: []v1.Taint{
   259  						{Key: "foo", Effect: v1.TaintEffectNoSchedule},
   260  						{Key: "bar", Effect: v1.TaintEffectNoExecute},
   261  					},
   262  				},
   263  				Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   264  			},
   265  			name: "taint/toleration match",
   266  		},
   267  		{
   268  			pod:      &v1.Pod{},
   269  			nodeInfo: schedulerframework.NewNodeInfo(),
   270  			node: &v1.Node{
   271  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   272  				Spec: v1.NodeSpec{
   273  					Taints: []v1.Taint{
   274  						{Key: "foo", Effect: v1.TaintEffectNoSchedule},
   275  					},
   276  				},
   277  				Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   278  			},
   279  			name: "NoSchedule taint/toleration not match",
   280  		},
   281  		{
   282  			pod:      &v1.Pod{},
   283  			nodeInfo: schedulerframework.NewNodeInfo(),
   284  			node: &v1.Node{
   285  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   286  				Spec: v1.NodeSpec{
   287  					Taints: []v1.Taint{
   288  						{Key: "bar", Effect: v1.TaintEffectNoExecute},
   289  					},
   290  				},
   291  				Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   292  			},
   293  			reasons: []PredicateFailureReason{&PredicateFailureError{tainttoleration.Name, tainttoleration.ErrReasonNotMatch}},
   294  			name:    "NoExecute taint/toleration not match",
   295  		},
   296  		{
   297  			pod:      &v1.Pod{},
   298  			nodeInfo: schedulerframework.NewNodeInfo(),
   299  			node: &v1.Node{
   300  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   301  				Spec: v1.NodeSpec{
   302  					Taints: []v1.Taint{
   303  						{Key: "baz", Effect: v1.TaintEffectPreferNoSchedule},
   304  					},
   305  				},
   306  				Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   307  			},
   308  			name: "PreferNoSchedule taint/toleration not match",
   309  		},
   310  		{
   311  			pod: &v1.Pod{
   312  				ObjectMeta: metav1.ObjectMeta{
   313  					Annotations: map[string]string{
   314  						types.ConfigSourceAnnotationKey: types.FileSource,
   315  					},
   316  				},
   317  			},
   318  			nodeInfo: schedulerframework.NewNodeInfo(),
   319  			node: &v1.Node{
   320  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   321  				Spec: v1.NodeSpec{
   322  					Taints: []v1.Taint{
   323  						{Key: "foo", Effect: v1.TaintEffectNoSchedule},
   324  						{Key: "bar", Effect: v1.TaintEffectNoExecute},
   325  					},
   326  				},
   327  				Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   328  			},
   329  			name: "static pods ignore taints",
   330  		},
   331  	}
   332  	for _, test := range resourceTests {
   333  		t.Run(test.name, func(t *testing.T) {
   334  			test.nodeInfo.SetNode(test.node)
   335  			reasons := generalFilter(test.pod, test.nodeInfo)
   336  			if diff := cmp.Diff(test.reasons, reasons); diff != "" {
   337  				t.Errorf("unexpected failure reasons (-want, +got):\n%s", diff)
   338  			}
   339  		})
   340  	}
   341  }
   342  
   343  func TestRejectPodAdmissionBasedOnOSSelector(t *testing.T) {
   344  	tests := []struct {
   345  		name            string
   346  		pod             *v1.Pod
   347  		node            *v1.Node
   348  		expectRejection bool
   349  	}{
   350  		{
   351  			name:            "OS label match",
   352  			pod:             &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS}}},
   353  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS}}},
   354  			expectRejection: false,
   355  		},
   356  		{
   357  			name:            "dummyOS label, but the underlying OS matches",
   358  			pod:             &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS}}},
   359  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   360  			expectRejection: false,
   361  		},
   362  		{
   363  			name:            "dummyOS label, but the underlying OS doesn't match",
   364  			pod:             &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   365  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   366  			expectRejection: true,
   367  		},
   368  		{
   369  			name:            "dummyOS label, but the underlying OS doesn't match",
   370  			pod:             &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   371  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   372  			expectRejection: true,
   373  		},
   374  		{
   375  			name:            "OS field mismatch, OS label on node object would be reset to correct value",
   376  			pod:             &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   377  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   378  			expectRejection: true,
   379  		},
   380  		{
   381  			name:            "No label selector on the pod, should be admitted",
   382  			pod:             &v1.Pod{},
   383  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   384  			expectRejection: false,
   385  		},
   386  	}
   387  	for _, test := range tests {
   388  		t.Run(test.name, func(t *testing.T) {
   389  			actualResult := rejectPodAdmissionBasedOnOSSelector(test.pod, test.node)
   390  			if test.expectRejection != actualResult {
   391  				t.Errorf("unexpected result, expected %v but got %v", test.expectRejection, actualResult)
   392  			}
   393  		})
   394  	}
   395  }
   396  
   397  func TestRejectPodAdmissionBasedOnOSField(t *testing.T) {
   398  	tests := []struct {
   399  		name            string
   400  		pod             *v1.Pod
   401  		expectRejection bool
   402  	}{
   403  		{
   404  			name:            "OS field match",
   405  			pod:             &v1.Pod{Spec: v1.PodSpec{OS: &v1.PodOS{Name: v1.OSName(goruntime.GOOS)}}},
   406  			expectRejection: false,
   407  		},
   408  		{
   409  			name:            "OS field mismatch",
   410  			pod:             &v1.Pod{Spec: v1.PodSpec{OS: &v1.PodOS{Name: "dummyOS"}}},
   411  			expectRejection: true,
   412  		},
   413  		{
   414  			name:            "no OS field",
   415  			pod:             &v1.Pod{Spec: v1.PodSpec{}},
   416  			expectRejection: false,
   417  		},
   418  	}
   419  	for _, test := range tests {
   420  		t.Run(test.name, func(t *testing.T) {
   421  			actualResult := rejectPodAdmissionBasedOnOSField(test.pod)
   422  			if test.expectRejection != actualResult {
   423  				t.Errorf("unexpected result, expected %v but got %v", test.expectRejection, actualResult)
   424  			}
   425  		})
   426  	}
   427  }