k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/controller/resourceclaim/controller_test.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package resourceclaim
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"sort"
    24  	"sync"
    25  	"testing"
    26  
    27  	"github.com/stretchr/testify/assert"
    28  
    29  	v1 "k8s.io/api/core/v1"
    30  	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    31  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    32  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    33  	"k8s.io/apimachinery/pkg/runtime"
    34  	"k8s.io/apimachinery/pkg/types"
    35  	"k8s.io/client-go/informers"
    36  	"k8s.io/client-go/kubernetes/fake"
    37  	k8stesting "k8s.io/client-go/testing"
    38  	"k8s.io/component-base/metrics/testutil"
    39  	"k8s.io/klog/v2"
    40  	"k8s.io/kubernetes/pkg/controller"
    41  	ephemeralvolumemetrics "k8s.io/kubernetes/pkg/controller/resourceclaim/metrics"
    42  	"k8s.io/utils/pointer"
    43  )
    44  
    45  var (
    46  	testPodName          = "test-pod"
    47  	testNamespace        = "my-namespace"
    48  	testPodUID           = types.UID("uidpod1")
    49  	otherNamespace       = "not-my-namespace"
    50  	podResourceClaimName = "acme-resource"
    51  	templateName         = "my-template"
    52  	className            = "my-resource-class"
    53  	nodeName             = "worker"
    54  
    55  	testPod             = makePod(testPodName, testNamespace, testPodUID)
    56  	testPodWithResource = makePod(testPodName, testNamespace, testPodUID, *makePodResourceClaim(podResourceClaimName, templateName))
    57  	otherTestPod        = makePod(testPodName+"-II", testNamespace, testPodUID+"-II")
    58  
    59  	testClaim              = makeClaim(testPodName+"-"+podResourceClaimName, testNamespace, className, makeOwnerReference(testPodWithResource, true))
    60  	testClaimAllocated     = allocateClaim(testClaim)
    61  	testClaimReserved      = reserveClaim(testClaimAllocated, testPodWithResource)
    62  	testClaimReservedTwice = reserveClaim(testClaimReserved, otherTestPod)
    63  
    64  	generatedTestClaim          = makeGeneratedClaim(podResourceClaimName, testPodName+"-"+podResourceClaimName+"-", testNamespace, className, 1, makeOwnerReference(testPodWithResource, true))
    65  	generatedTestClaimAllocated = allocateClaim(generatedTestClaim)
    66  	generatedTestClaimReserved  = reserveClaim(generatedTestClaimAllocated, testPodWithResource)
    67  
    68  	conflictingClaim    = makeClaim(testPodName+"-"+podResourceClaimName, testNamespace, className, nil)
    69  	otherNamespaceClaim = makeClaim(testPodName+"-"+podResourceClaimName, otherNamespace, className, nil)
    70  	template            = makeTemplate(templateName, testNamespace, className)
    71  
    72  	testPodWithNodeName = func() *v1.Pod {
    73  		pod := testPodWithResource.DeepCopy()
    74  		pod.Spec.NodeName = nodeName
    75  		pod.Status.ResourceClaimStatuses = append(pod.Status.ResourceClaimStatuses, v1.PodResourceClaimStatus{
    76  			Name:              pod.Spec.ResourceClaims[0].Name,
    77  			ResourceClaimName: &generatedTestClaim.Name,
    78  		})
    79  		return pod
    80  	}()
    81  
    82  	podSchedulingContext = resourcev1alpha2.PodSchedulingContext{
    83  		ObjectMeta: metav1.ObjectMeta{
    84  			Name:      testPodName,
    85  			Namespace: testNamespace,
    86  			OwnerReferences: []metav1.OwnerReference{
    87  				{
    88  					APIVersion: "v1",
    89  					Kind:       "Pod",
    90  					Name:       testPodName,
    91  					UID:        testPodUID,
    92  					Controller: pointer.Bool(true),
    93  				},
    94  			},
    95  		},
    96  		Spec: resourcev1alpha2.PodSchedulingContextSpec{
    97  			SelectedNode: nodeName,
    98  		},
    99  	}
   100  )
   101  
   102  func init() {
   103  	klog.InitFlags(nil)
   104  }
   105  
   106  func TestSyncHandler(t *testing.T) {
   107  	tests := []struct {
   108  		name                          string
   109  		key                           string
   110  		claims                        []*resourcev1alpha2.ResourceClaim
   111  		claimsInCache                 []*resourcev1alpha2.ResourceClaim
   112  		pods                          []*v1.Pod
   113  		podsLater                     []*v1.Pod
   114  		templates                     []*resourcev1alpha2.ResourceClaimTemplate
   115  		expectedClaims                []resourcev1alpha2.ResourceClaim
   116  		expectedPodSchedulingContexts []resourcev1alpha2.PodSchedulingContext
   117  		expectedStatuses              map[string][]v1.PodResourceClaimStatus
   118  		expectedError                 bool
   119  		expectedMetrics               expectedMetrics
   120  	}{
   121  		{
   122  			name:           "create",
   123  			pods:           []*v1.Pod{testPodWithResource},
   124  			templates:      []*resourcev1alpha2.ResourceClaimTemplate{template},
   125  			key:            podKey(testPodWithResource),
   126  			expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
   127  			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
   128  				testPodWithResource.Name: {
   129  					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
   130  				},
   131  			},
   132  			expectedMetrics: expectedMetrics{1, 0},
   133  		},
   134  		{
   135  			name: "nop",
   136  			pods: []*v1.Pod{func() *v1.Pod {
   137  				pod := testPodWithResource.DeepCopy()
   138  				pod.Status.ResourceClaimStatuses = []v1.PodResourceClaimStatus{
   139  					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
   140  				}
   141  				return pod
   142  			}()},
   143  			templates:      []*resourcev1alpha2.ResourceClaimTemplate{template},
   144  			key:            podKey(testPodWithResource),
   145  			claims:         []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
   146  			expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
   147  			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
   148  				testPodWithResource.Name: {
   149  					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
   150  				},
   151  			},
   152  			expectedMetrics: expectedMetrics{0, 0},
   153  		},
   154  		{
   155  			name: "recreate",
   156  			pods: []*v1.Pod{func() *v1.Pod {
   157  				pod := testPodWithResource.DeepCopy()
   158  				pod.Status.ResourceClaimStatuses = []v1.PodResourceClaimStatus{
   159  					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
   160  				}
   161  				return pod
   162  			}()},
   163  			templates:      []*resourcev1alpha2.ResourceClaimTemplate{template},
   164  			key:            podKey(testPodWithResource),
   165  			expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
   166  			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
   167  				testPodWithResource.Name: {
   168  					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
   169  				},
   170  			},
   171  			expectedMetrics: expectedMetrics{1, 0},
   172  		},
   173  		{
   174  			name:          "missing-template",
   175  			pods:          []*v1.Pod{testPodWithResource},
   176  			templates:     nil,
   177  			key:           podKey(testPodWithResource),
   178  			expectedError: true,
   179  		},
   180  		{
   181  			name:           "find-existing-claim-by-label",
   182  			pods:           []*v1.Pod{testPodWithResource},
   183  			key:            podKey(testPodWithResource),
   184  			claims:         []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
   185  			expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
   186  			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
   187  				testPodWithResource.Name: {
   188  					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
   189  				},
   190  			},
   191  			expectedMetrics: expectedMetrics{0, 0},
   192  		},
   193  		{
   194  			name:           "find-existing-claim-by-name",
   195  			pods:           []*v1.Pod{testPodWithResource},
   196  			key:            podKey(testPodWithResource),
   197  			claims:         []*resourcev1alpha2.ResourceClaim{testClaim},
   198  			expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaim},
   199  			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
   200  				testPodWithResource.Name: {
   201  					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &testClaim.Name},
   202  				},
   203  			},
   204  			expectedMetrics: expectedMetrics{0, 0},
   205  		},
   206  		{
   207  			name:          "find-created-claim-in-cache",
   208  			pods:          []*v1.Pod{testPodWithResource},
   209  			key:           podKey(testPodWithResource),
   210  			claimsInCache: []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
   211  			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
   212  				testPodWithResource.Name: {
   213  					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
   214  				},
   215  			},
   216  			expectedMetrics: expectedMetrics{0, 0},
   217  		},
   218  		{
   219  			name: "no-such-pod",
   220  			key:  podKey(testPodWithResource),
   221  		},
   222  		{
   223  			name: "pod-deleted",
   224  			pods: func() []*v1.Pod {
   225  				deleted := metav1.Now()
   226  				pods := []*v1.Pod{testPodWithResource.DeepCopy()}
   227  				pods[0].DeletionTimestamp = &deleted
   228  				return pods
   229  			}(),
   230  			key: podKey(testPodWithResource),
   231  		},
   232  		{
   233  			name: "no-volumes",
   234  			pods: []*v1.Pod{testPod},
   235  			key:  podKey(testPod),
   236  		},
   237  		{
   238  			name:           "create-with-other-claim",
   239  			pods:           []*v1.Pod{testPodWithResource},
   240  			templates:      []*resourcev1alpha2.ResourceClaimTemplate{template},
   241  			key:            podKey(testPodWithResource),
   242  			claims:         []*resourcev1alpha2.ResourceClaim{otherNamespaceClaim},
   243  			expectedClaims: []resourcev1alpha2.ResourceClaim{*otherNamespaceClaim, *generatedTestClaim},
   244  			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
   245  				testPodWithResource.Name: {
   246  					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
   247  				},
   248  			},
   249  			expectedMetrics: expectedMetrics{1, 0},
   250  		},
   251  		{
   252  			name:           "wrong-claim-owner",
   253  			pods:           []*v1.Pod{testPodWithResource},
   254  			key:            podKey(testPodWithResource),
   255  			claims:         []*resourcev1alpha2.ResourceClaim{conflictingClaim},
   256  			expectedClaims: []resourcev1alpha2.ResourceClaim{*conflictingClaim},
   257  			expectedError:  true,
   258  		},
   259  		{
   260  			name:            "create-conflict",
   261  			pods:            []*v1.Pod{testPodWithResource},
   262  			templates:       []*resourcev1alpha2.ResourceClaimTemplate{template},
   263  			key:             podKey(testPodWithResource),
   264  			expectedMetrics: expectedMetrics{1, 1},
   265  			expectedError:   true,
   266  		},
   267  		{
   268  			name:            "stay-reserved-seen",
   269  			pods:            []*v1.Pod{testPodWithResource},
   270  			key:             claimKey(testClaimReserved),
   271  			claims:          []*resourcev1alpha2.ResourceClaim{testClaimReserved},
   272  			expectedClaims:  []resourcev1alpha2.ResourceClaim{*testClaimReserved},
   273  			expectedMetrics: expectedMetrics{0, 0},
   274  		},
   275  		{
   276  			name:            "stay-reserved-not-seen",
   277  			podsLater:       []*v1.Pod{testPodWithResource},
   278  			key:             claimKey(testClaimReserved),
   279  			claims:          []*resourcev1alpha2.ResourceClaim{testClaimReserved},
   280  			expectedClaims:  []resourcev1alpha2.ResourceClaim{*testClaimReserved},
   281  			expectedMetrics: expectedMetrics{0, 0},
   282  		},
   283  		{
   284  			name:   "clear-reserved-delayed-allocation",
   285  			pods:   []*v1.Pod{},
   286  			key:    claimKey(testClaimReserved),
   287  			claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved},
   288  			expectedClaims: func() []resourcev1alpha2.ResourceClaim {
   289  				claim := testClaimAllocated.DeepCopy()
   290  				claim.Status.DeallocationRequested = true
   291  				return []resourcev1alpha2.ResourceClaim{*claim}
   292  			}(),
   293  			expectedMetrics: expectedMetrics{0, 0},
   294  		},
   295  		{
   296  			name:   "clear-reserved-delayed-allocation-structured",
   297  			pods:   []*v1.Pod{},
   298  			key:    claimKey(testClaimReserved),
   299  			claims: []*resourcev1alpha2.ResourceClaim{structuredParameters(testClaimReserved)},
   300  			expectedClaims: func() []resourcev1alpha2.ResourceClaim {
   301  				claim := testClaimAllocated.DeepCopy()
   302  				claim.Finalizers = []string{}
   303  				claim.Status.Allocation = nil
   304  				return []resourcev1alpha2.ResourceClaim{*claim}
   305  			}(),
   306  			expectedMetrics: expectedMetrics{0, 0},
   307  		},
   308  		{
   309  			name: "dont-clear-reserved-delayed-allocation-structured",
   310  			pods: []*v1.Pod{testPodWithResource},
   311  			key:  claimKey(testClaimReserved),
   312  			claims: func() []*resourcev1alpha2.ResourceClaim {
   313  				claim := structuredParameters(testClaimReserved)
   314  				claim = reserveClaim(claim, otherTestPod)
   315  				return []*resourcev1alpha2.ResourceClaim{claim}
   316  			}(),
   317  			expectedClaims:  []resourcev1alpha2.ResourceClaim{*structuredParameters(testClaimReserved)},
   318  			expectedMetrics: expectedMetrics{0, 0},
   319  		},
   320  		{
   321  			name: "clear-reserved-immediate-allocation",
   322  			pods: []*v1.Pod{},
   323  			key:  claimKey(testClaimReserved),
   324  			claims: func() []*resourcev1alpha2.ResourceClaim {
   325  				claim := testClaimReserved.DeepCopy()
   326  				claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
   327  				return []*resourcev1alpha2.ResourceClaim{claim}
   328  			}(),
   329  			expectedClaims: func() []resourcev1alpha2.ResourceClaim {
   330  				claim := testClaimAllocated.DeepCopy()
   331  				claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
   332  				return []resourcev1alpha2.ResourceClaim{*claim}
   333  			}(),
   334  			expectedMetrics: expectedMetrics{0, 0},
   335  		},
   336  		{
   337  			name: "clear-reserved-immediate-allocation-structured",
   338  			pods: []*v1.Pod{},
   339  			key:  claimKey(testClaimReserved),
   340  			claims: func() []*resourcev1alpha2.ResourceClaim {
   341  				claim := structuredParameters(testClaimReserved.DeepCopy())
   342  				claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
   343  				return []*resourcev1alpha2.ResourceClaim{claim}
   344  			}(),
   345  			expectedClaims: func() []resourcev1alpha2.ResourceClaim {
   346  				claim := structuredParameters(testClaimAllocated.DeepCopy())
   347  				claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
   348  				return []resourcev1alpha2.ResourceClaim{*claim}
   349  			}(),
   350  			expectedMetrics: expectedMetrics{0, 0},
   351  		},
   352  		{
   353  			name: "clear-reserved-immediate-allocation-structured-deleted",
   354  			pods: []*v1.Pod{},
   355  			key:  claimKey(testClaimReserved),
   356  			claims: func() []*resourcev1alpha2.ResourceClaim {
   357  				claim := structuredParameters(testClaimReserved.DeepCopy())
   358  				claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
   359  				claim.DeletionTimestamp = &metav1.Time{}
   360  				return []*resourcev1alpha2.ResourceClaim{claim}
   361  			}(),
   362  			expectedClaims: func() []resourcev1alpha2.ResourceClaim {
   363  				claim := structuredParameters(testClaimAllocated.DeepCopy())
   364  				claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
   365  				claim.DeletionTimestamp = &metav1.Time{}
   366  				claim.Finalizers = []string{}
   367  				claim.Status.Allocation = nil
   368  				return []resourcev1alpha2.ResourceClaim{*claim}
   369  			}(),
   370  			expectedMetrics: expectedMetrics{0, 0},
   371  		},
   372  		{
   373  			name: "immediate-allocation-structured-deleted",
   374  			pods: []*v1.Pod{},
   375  			key:  claimKey(testClaimReserved),
   376  			claims: func() []*resourcev1alpha2.ResourceClaim {
   377  				claim := structuredParameters(testClaimAllocated.DeepCopy())
   378  				claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
   379  				claim.DeletionTimestamp = &metav1.Time{}
   380  				return []*resourcev1alpha2.ResourceClaim{claim}
   381  			}(),
   382  			expectedClaims: func() []resourcev1alpha2.ResourceClaim {
   383  				claim := structuredParameters(testClaimAllocated.DeepCopy())
   384  				claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
   385  				claim.DeletionTimestamp = &metav1.Time{}
   386  				claim.Finalizers = []string{}
   387  				claim.Status.Allocation = nil
   388  				return []resourcev1alpha2.ResourceClaim{*claim}
   389  			}(),
   390  			expectedMetrics: expectedMetrics{0, 0},
   391  		},
   392  		{
   393  			name: "clear-reserved-when-done-delayed-allocation",
   394  			pods: func() []*v1.Pod {
   395  				pods := []*v1.Pod{testPodWithResource.DeepCopy()}
   396  				pods[0].Status.Phase = v1.PodSucceeded
   397  				return pods
   398  			}(),
   399  			key: claimKey(testClaimReserved),
   400  			claims: func() []*resourcev1alpha2.ResourceClaim {
   401  				claims := []*resourcev1alpha2.ResourceClaim{testClaimReserved.DeepCopy()}
   402  				claims[0].OwnerReferences = nil
   403  				return claims
   404  			}(),
   405  			expectedClaims: func() []resourcev1alpha2.ResourceClaim {
   406  				claims := []resourcev1alpha2.ResourceClaim{*testClaimAllocated.DeepCopy()}
   407  				claims[0].OwnerReferences = nil
   408  				claims[0].Status.DeallocationRequested = true
   409  				return claims
   410  			}(),
   411  			expectedMetrics: expectedMetrics{0, 0},
   412  		},
   413  		{
   414  			name: "clear-reserved-when-done-immediate-allocation",
   415  			pods: func() []*v1.Pod {
   416  				pods := []*v1.Pod{testPodWithResource.DeepCopy()}
   417  				pods[0].Status.Phase = v1.PodSucceeded
   418  				return pods
   419  			}(),
   420  			key: claimKey(testClaimReserved),
   421  			claims: func() []*resourcev1alpha2.ResourceClaim {
   422  				claims := []*resourcev1alpha2.ResourceClaim{testClaimReserved.DeepCopy()}
   423  				claims[0].OwnerReferences = nil
   424  				claims[0].Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
   425  				return claims
   426  			}(),
   427  			expectedClaims: func() []resourcev1alpha2.ResourceClaim {
   428  				claims := []resourcev1alpha2.ResourceClaim{*testClaimAllocated.DeepCopy()}
   429  				claims[0].OwnerReferences = nil
   430  				claims[0].Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
   431  				return claims
   432  			}(),
   433  			expectedMetrics: expectedMetrics{0, 0},
   434  		},
   435  		{
   436  			name:            "remove-reserved",
   437  			pods:            []*v1.Pod{testPod},
   438  			key:             claimKey(testClaimReservedTwice),
   439  			claims:          []*resourcev1alpha2.ResourceClaim{testClaimReservedTwice},
   440  			expectedClaims:  []resourcev1alpha2.ResourceClaim{*testClaimReserved},
   441  			expectedMetrics: expectedMetrics{0, 0},
   442  		},
   443  		{
   444  			name: "delete-claim-when-done",
   445  			pods: func() []*v1.Pod {
   446  				pods := []*v1.Pod{testPodWithResource.DeepCopy()}
   447  				pods[0].Status.Phase = v1.PodSucceeded
   448  				return pods
   449  			}(),
   450  			key:             claimKey(testClaimReserved),
   451  			claims:          []*resourcev1alpha2.ResourceClaim{testClaimReserved},
   452  			expectedClaims:  nil,
   453  			expectedMetrics: expectedMetrics{0, 0},
   454  		},
   455  		{
   456  			name:           "trigger-allocation",
   457  			pods:           []*v1.Pod{testPodWithNodeName},
   458  			key:            podKey(testPodWithNodeName),
   459  			templates:      []*resourcev1alpha2.ResourceClaimTemplate{template},
   460  			claims:         []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
   461  			expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
   462  			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
   463  				testPodWithNodeName.Name: {
   464  					{Name: testPodWithNodeName.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
   465  				},
   466  			},
   467  			expectedPodSchedulingContexts: []resourcev1alpha2.PodSchedulingContext{podSchedulingContext},
   468  			expectedMetrics:               expectedMetrics{0, 0},
   469  		},
   470  		{
   471  			name:           "add-reserved",
   472  			pods:           []*v1.Pod{testPodWithNodeName},
   473  			key:            podKey(testPodWithNodeName),
   474  			templates:      []*resourcev1alpha2.ResourceClaimTemplate{template},
   475  			claims:         []*resourcev1alpha2.ResourceClaim{generatedTestClaimAllocated},
   476  			expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaimReserved},
   477  			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
   478  				testPodWithNodeName.Name: {
   479  					{Name: testPodWithNodeName.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
   480  				},
   481  			},
   482  			expectedMetrics: expectedMetrics{0, 0},
   483  		},
   484  	}
   485  
   486  	for _, tc := range tests {
   487  		// Run sequentially because of global logging and global metrics.
   488  		t.Run(tc.name, func(t *testing.T) {
   489  			ctx, cancel := context.WithCancel(context.Background())
   490  			defer cancel()
   491  
   492  			var objects []runtime.Object
   493  			for _, pod := range tc.pods {
   494  				objects = append(objects, pod)
   495  			}
   496  			for _, claim := range tc.claims {
   497  				objects = append(objects, claim)
   498  			}
   499  			for _, template := range tc.templates {
   500  				objects = append(objects, template)
   501  			}
   502  
   503  			fakeKubeClient := createTestClient(objects...)
   504  			if tc.expectedMetrics.numFailures > 0 {
   505  				fakeKubeClient.PrependReactor("create", "resourceclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
   506  					return true, nil, apierrors.NewConflict(action.GetResource().GroupResource(), "fake name", errors.New("fake conflict"))
   507  				})
   508  			}
   509  			setupMetrics()
   510  			informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
   511  			podInformer := informerFactory.Core().V1().Pods()
   512  			podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
   513  			claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
   514  			templateInformer := informerFactory.Resource().V1alpha2().ResourceClaimTemplates()
   515  
   516  			ec, err := NewController(klog.FromContext(ctx), fakeKubeClient, podInformer, podSchedulingInformer, claimInformer, templateInformer)
   517  			if err != nil {
   518  				t.Fatalf("error creating ephemeral controller : %v", err)
   519  			}
   520  
   521  			// Ensure informers are up-to-date.
   522  			informerFactory.Start(ctx.Done())
   523  			stopInformers := func() {
   524  				cancel()
   525  				informerFactory.Shutdown()
   526  			}
   527  			defer stopInformers()
   528  			informerFactory.WaitForCacheSync(ctx.Done())
   529  
   530  			// Add claims that only exist in the mutation cache.
   531  			for _, claim := range tc.claimsInCache {
   532  				ec.claimCache.Mutation(claim)
   533  			}
   534  
   535  			// Simulate race: stop informers, add more pods that the controller doesn't know about.
   536  			stopInformers()
   537  			for _, pod := range tc.podsLater {
   538  				_, err := fakeKubeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
   539  				if err != nil {
   540  					t.Fatalf("unexpected error while creating pod: %v", err)
   541  				}
   542  			}
   543  
   544  			err = ec.syncHandler(context.TODO(), tc.key)
   545  			if err != nil && !tc.expectedError {
   546  				t.Fatalf("unexpected error while running handler: %v", err)
   547  			}
   548  			if err == nil && tc.expectedError {
   549  				t.Fatalf("unexpected success")
   550  			}
   551  
   552  			claims, err := fakeKubeClient.ResourceV1alpha2().ResourceClaims("").List(ctx, metav1.ListOptions{})
   553  			if err != nil {
   554  				t.Fatalf("unexpected error while listing claims: %v", err)
   555  			}
   556  			assert.Equal(t, normalizeClaims(tc.expectedClaims), normalizeClaims(claims.Items))
   557  
   558  			pods, err := fakeKubeClient.CoreV1().Pods("").List(ctx, metav1.ListOptions{})
   559  			if err != nil {
   560  				t.Fatalf("unexpected error while listing pods: %v", err)
   561  			}
   562  			var actualStatuses map[string][]v1.PodResourceClaimStatus
   563  			for _, pod := range pods.Items {
   564  				if len(pod.Status.ResourceClaimStatuses) == 0 {
   565  					continue
   566  				}
   567  				if actualStatuses == nil {
   568  					actualStatuses = make(map[string][]v1.PodResourceClaimStatus)
   569  				}
   570  				actualStatuses[pod.Name] = pod.Status.ResourceClaimStatuses
   571  			}
   572  			assert.Equal(t, tc.expectedStatuses, actualStatuses, "pod resource claim statuses")
   573  
   574  			scheduling, err := fakeKubeClient.ResourceV1alpha2().PodSchedulingContexts("").List(ctx, metav1.ListOptions{})
   575  			if err != nil {
   576  				t.Fatalf("unexpected error while listing claims: %v", err)
   577  			}
   578  			assert.Equal(t, normalizeScheduling(tc.expectedPodSchedulingContexts), normalizeScheduling(scheduling.Items))
   579  
   580  			expectMetrics(t, tc.expectedMetrics)
   581  		})
   582  	}
   583  }
   584  
   585  func makeClaim(name, namespace, classname string, owner *metav1.OwnerReference) *resourcev1alpha2.ResourceClaim {
   586  	claim := &resourcev1alpha2.ResourceClaim{
   587  		ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
   588  		Spec: resourcev1alpha2.ResourceClaimSpec{
   589  			ResourceClassName: classname,
   590  			AllocationMode:    resourcev1alpha2.AllocationModeWaitForFirstConsumer,
   591  		},
   592  	}
   593  	if owner != nil {
   594  		claim.OwnerReferences = []metav1.OwnerReference{*owner}
   595  	}
   596  
   597  	return claim
   598  }
   599  
   600  func makeGeneratedClaim(podClaimName, generateName, namespace, classname string, createCounter int, owner *metav1.OwnerReference) *resourcev1alpha2.ResourceClaim {
   601  	claim := &resourcev1alpha2.ResourceClaim{
   602  		ObjectMeta: metav1.ObjectMeta{
   603  			Name:         fmt.Sprintf("%s-%d", generateName, createCounter),
   604  			GenerateName: generateName,
   605  			Namespace:    namespace,
   606  			Annotations:  map[string]string{"resource.kubernetes.io/pod-claim-name": podClaimName},
   607  		},
   608  		Spec: resourcev1alpha2.ResourceClaimSpec{
   609  			ResourceClassName: classname,
   610  			AllocationMode:    resourcev1alpha2.AllocationModeWaitForFirstConsumer,
   611  		},
   612  	}
   613  	if owner != nil {
   614  		claim.OwnerReferences = []metav1.OwnerReference{*owner}
   615  	}
   616  
   617  	return claim
   618  }
   619  
   620  func allocateClaim(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   621  	claim = claim.DeepCopy()
   622  	claim.Status.Allocation = &resourcev1alpha2.AllocationResult{
   623  		Shareable: true,
   624  	}
   625  	return claim
   626  }
   627  
   628  func structuredParameters(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   629  	claim = claim.DeepCopy()
   630  	// As far the controller is concerned, a claim was allocated by us if it has
   631  	// this finalizer. For testing we don't need to update the allocation result.
   632  	claim.Finalizers = append(claim.Finalizers, resourcev1alpha2.Finalizer)
   633  	return claim
   634  }
   635  
   636  func reserveClaim(claim *resourcev1alpha2.ResourceClaim, pod *v1.Pod) *resourcev1alpha2.ResourceClaim {
   637  	claim = claim.DeepCopy()
   638  	claim.Status.ReservedFor = append(claim.Status.ReservedFor,
   639  		resourcev1alpha2.ResourceClaimConsumerReference{
   640  			Resource: "pods",
   641  			Name:     pod.Name,
   642  			UID:      pod.UID,
   643  		},
   644  	)
   645  	return claim
   646  }
   647  
   648  func makePodResourceClaim(name, templateName string) *v1.PodResourceClaim {
   649  	return &v1.PodResourceClaim{
   650  		Name: name,
   651  		Source: v1.ClaimSource{
   652  			ResourceClaimTemplateName: &templateName,
   653  		},
   654  	}
   655  }
   656  
   657  func makePod(name, namespace string, uid types.UID, podClaims ...v1.PodResourceClaim) *v1.Pod {
   658  	pod := &v1.Pod{
   659  		ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, UID: uid},
   660  		Spec: v1.PodSpec{
   661  			ResourceClaims: podClaims,
   662  		},
   663  	}
   664  
   665  	return pod
   666  }
   667  
   668  func makeTemplate(name, namespace, classname string) *resourcev1alpha2.ResourceClaimTemplate {
   669  	template := &resourcev1alpha2.ResourceClaimTemplate{
   670  		ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
   671  		Spec: resourcev1alpha2.ResourceClaimTemplateSpec{
   672  			Spec: resourcev1alpha2.ResourceClaimSpec{
   673  				ResourceClassName: classname,
   674  			},
   675  		},
   676  	}
   677  	return template
   678  }
   679  
   680  func podKey(pod *v1.Pod) string {
   681  	return podKeyPrefix + pod.Namespace + "/" + pod.Name
   682  }
   683  
   684  func claimKey(claim *resourcev1alpha2.ResourceClaim) string {
   685  	return claimKeyPrefix + claim.Namespace + "/" + claim.Name
   686  }
   687  
   688  func makeOwnerReference(pod *v1.Pod, isController bool) *metav1.OwnerReference {
   689  	isTrue := true
   690  	return &metav1.OwnerReference{
   691  		APIVersion:         "v1",
   692  		Kind:               "Pod",
   693  		Name:               pod.Name,
   694  		UID:                pod.UID,
   695  		Controller:         &isController,
   696  		BlockOwnerDeletion: &isTrue,
   697  	}
   698  }
   699  
   700  func normalizeClaims(claims []resourcev1alpha2.ResourceClaim) []resourcev1alpha2.ResourceClaim {
   701  	sort.Slice(claims, func(i, j int) bool {
   702  		if claims[i].Namespace < claims[j].Namespace {
   703  			return true
   704  		}
   705  		if claims[i].Namespace > claims[j].Namespace {
   706  			return false
   707  		}
   708  		return claims[i].Name < claims[j].Name
   709  	})
   710  	for i := range claims {
   711  		if len(claims[i].Status.ReservedFor) == 0 {
   712  			claims[i].Status.ReservedFor = nil
   713  		}
   714  		if claims[i].Spec.AllocationMode == "" {
   715  			// This emulates defaulting.
   716  			claims[i].Spec.AllocationMode = resourcev1alpha2.AllocationModeWaitForFirstConsumer
   717  		}
   718  	}
   719  	return claims
   720  }
   721  
   722  func normalizeScheduling(scheduling []resourcev1alpha2.PodSchedulingContext) []resourcev1alpha2.PodSchedulingContext {
   723  	sort.Slice(scheduling, func(i, j int) bool {
   724  		return scheduling[i].Namespace < scheduling[j].Namespace ||
   725  			scheduling[i].Name < scheduling[j].Name
   726  	})
   727  	return scheduling
   728  }
   729  
   730  func createTestClient(objects ...runtime.Object) *fake.Clientset {
   731  	fakeClient := fake.NewSimpleClientset(objects...)
   732  	fakeClient.PrependReactor("create", "resourceclaims", createResourceClaimReactor())
   733  	return fakeClient
   734  }
   735  
   736  // createResourceClaimReactor implements the logic required for the GenerateName field to work when using
   737  // the fake client. Add it with client.PrependReactor to your fake client.
   738  func createResourceClaimReactor() func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
   739  	nameCounter := 1
   740  	var mutex sync.Mutex
   741  	return func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
   742  		mutex.Lock()
   743  		defer mutex.Unlock()
   744  		claim := action.(k8stesting.CreateAction).GetObject().(*resourcev1alpha2.ResourceClaim)
   745  		if claim.Name == "" && claim.GenerateName != "" {
   746  			claim.Name = fmt.Sprintf("%s-%d", claim.GenerateName, nameCounter)
   747  		}
   748  		nameCounter++
   749  		return false, nil, nil
   750  	}
   751  }
   752  
   753  // Metrics helpers
   754  
   755  type expectedMetrics struct {
   756  	numCreated  int
   757  	numFailures int
   758  }
   759  
   760  func expectMetrics(t *testing.T, em expectedMetrics) {
   761  	t.Helper()
   762  
   763  	actualCreated, err := testutil.GetCounterMetricValue(ephemeralvolumemetrics.ResourceClaimCreateAttempts)
   764  	handleErr(t, err, "ResourceClaimCreate")
   765  	if actualCreated != float64(em.numCreated) {
   766  		t.Errorf("Expected claims to be created %d, got %v", em.numCreated, actualCreated)
   767  	}
   768  	actualConflicts, err := testutil.GetCounterMetricValue(ephemeralvolumemetrics.ResourceClaimCreateFailures)
   769  	handleErr(t, err, "ResourceClaimCreate/Conflict")
   770  	if actualConflicts != float64(em.numFailures) {
   771  		t.Errorf("Expected claims to have conflicts %d, got %v", em.numFailures, actualConflicts)
   772  	}
   773  }
   774  
   775  func handleErr(t *testing.T, err error, metricName string) {
   776  	if err != nil {
   777  		t.Errorf("Failed to get %s value, err: %v", metricName, err)
   778  	}
   779  }
   780  
   781  func setupMetrics() {
   782  	ephemeralvolumemetrics.RegisterMetrics()
   783  	ephemeralvolumemetrics.ResourceClaimCreateAttempts.Reset()
   784  	ephemeralvolumemetrics.ResourceClaimCreateFailures.Reset()
   785  }