k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go (about)

     1  /*
     2  Copyright 2022 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package dynamicresources
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"sort"
    24  	"sync"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/google/go-cmp/cmp"
    29  	"github.com/google/go-cmp/cmp/cmpopts"
    30  	"github.com/stretchr/testify/assert"
    31  	"github.com/stretchr/testify/require"
    32  
    33  	v1 "k8s.io/api/core/v1"
    34  	resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
    35  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    36  	apiruntime "k8s.io/apimachinery/pkg/runtime"
    37  	"k8s.io/apimachinery/pkg/runtime/schema"
    38  	"k8s.io/apimachinery/pkg/types"
    39  	"k8s.io/client-go/informers"
    40  	"k8s.io/client-go/kubernetes/fake"
    41  	cgotesting "k8s.io/client-go/testing"
    42  	"k8s.io/client-go/tools/cache"
    43  	"k8s.io/kubernetes/pkg/scheduler/framework"
    44  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
    45  	"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
    46  	st "k8s.io/kubernetes/pkg/scheduler/testing"
    47  	"k8s.io/kubernetes/test/utils/ktesting"
    48  	"k8s.io/utils/ptr"
    49  )
    50  
    51  var (
    52  	podKind = v1.SchemeGroupVersion.WithKind("Pod")
    53  
    54  	podName       = "my-pod"
    55  	podUID        = "1234"
    56  	resourceName  = "my-resource"
    57  	resourceName2 = resourceName + "-2"
    58  	claimName     = podName + "-" + resourceName
    59  	claimName2    = podName + "-" + resourceName + "-2"
    60  	className     = "my-resource-class"
    61  	namespace     = "default"
    62  
    63  	resourceClass = &resourcev1alpha2.ResourceClass{
    64  		ObjectMeta: metav1.ObjectMeta{
    65  			Name: className,
    66  		},
    67  		DriverName: "some-driver",
    68  	}
    69  	structuredResourceClass = &resourcev1alpha2.ResourceClass{
    70  		ObjectMeta: metav1.ObjectMeta{
    71  			Name: className,
    72  		},
    73  		DriverName:           "some-driver",
    74  		StructuredParameters: ptr.To(true),
    75  	}
    76  	structuredResourceClassWithParams = &resourcev1alpha2.ResourceClass{
    77  		ObjectMeta: metav1.ObjectMeta{
    78  			Name: className,
    79  		},
    80  		DriverName:           "some-driver",
    81  		StructuredParameters: ptr.To(true),
    82  		ParametersRef: &resourcev1alpha2.ResourceClassParametersReference{
    83  			Name:      className,
    84  			Namespace: namespace,
    85  			Kind:      "ResourceClassParameters",
    86  			APIGroup:  "resource.k8s.io",
    87  		},
    88  	}
    89  	structuredResourceClassWithCRD = &resourcev1alpha2.ResourceClass{
    90  		ObjectMeta: metav1.ObjectMeta{
    91  			Name: className,
    92  		},
    93  		DriverName:           "some-driver",
    94  		StructuredParameters: ptr.To(true),
    95  		ParametersRef: &resourcev1alpha2.ResourceClassParametersReference{
    96  			Name:      className,
    97  			Namespace: namespace,
    98  			Kind:      "ResourceClassParameters",
    99  			APIGroup:  "example.com",
   100  		},
   101  	}
   102  
   103  	podWithClaimName = st.MakePod().Name(podName).Namespace(namespace).
   104  				UID(podUID).
   105  				PodResourceClaims(v1.PodResourceClaim{Name: resourceName, Source: v1.ClaimSource{ResourceClaimName: &claimName}}).
   106  				Obj()
   107  	otherPodWithClaimName = st.MakePod().Name(podName).Namespace(namespace).
   108  				UID(podUID + "-II").
   109  				PodResourceClaims(v1.PodResourceClaim{Name: resourceName, Source: v1.ClaimSource{ResourceClaimName: &claimName}}).
   110  				Obj()
   111  	podWithClaimTemplate = st.MakePod().Name(podName).Namespace(namespace).
   112  				UID(podUID).
   113  				PodResourceClaims(v1.PodResourceClaim{Name: resourceName, Source: v1.ClaimSource{ResourceClaimTemplateName: &claimName}}).
   114  				Obj()
   115  	podWithClaimTemplateInStatus = func() *v1.Pod {
   116  		pod := podWithClaimTemplate.DeepCopy()
   117  		pod.Status.ResourceClaimStatuses = []v1.PodResourceClaimStatus{
   118  			{
   119  				Name:              pod.Spec.ResourceClaims[0].Name,
   120  				ResourceClaimName: &claimName,
   121  			},
   122  		}
   123  		return pod
   124  	}()
   125  	podWithTwoClaimNames = st.MakePod().Name(podName).Namespace(namespace).
   126  				UID(podUID).
   127  				PodResourceClaims(v1.PodResourceClaim{Name: resourceName, Source: v1.ClaimSource{ResourceClaimName: &claimName}}).
   128  				PodResourceClaims(v1.PodResourceClaim{Name: resourceName2, Source: v1.ClaimSource{ResourceClaimName: &claimName2}}).
   129  				Obj()
   130  
   131  	workerNode      = &st.MakeNode().Name("worker").Label("kubernetes.io/hostname", "worker").Node
   132  	workerNodeSlice = st.MakeResourceSlice("worker", "some-driver").NamedResourcesInstances("instance-1").Obj()
   133  
   134  	claimParameters = st.MakeClaimParameters().Name(claimName).Namespace(namespace).
   135  			NamedResourcesRequests("some-driver", "true").
   136  			Shareable(true).
   137  			GeneratedFrom(&resourcev1alpha2.ResourceClaimParametersReference{
   138  			Name:     claimName,
   139  			Kind:     "ResourceClaimParameters",
   140  			APIGroup: "example.com",
   141  		}).
   142  		Obj()
   143  	classParameters = st.MakeClassParameters().Name(className).Namespace(namespace).
   144  			NamedResourcesFilters("some-driver", "true").
   145  			GeneratedFrom(&resourcev1alpha2.ResourceClassParametersReference{
   146  			Name:      className,
   147  			Namespace: namespace,
   148  			Kind:      "ResourceClassParameters",
   149  			APIGroup:  "example.com",
   150  		}).
   151  		Obj()
   152  
   153  	claim = st.MakeResourceClaim().
   154  		Name(claimName).
   155  		Namespace(namespace).
   156  		ResourceClassName(className).
   157  		Obj()
   158  	pendingImmediateClaim = st.FromResourceClaim(claim).
   159  				AllocationMode(resourcev1alpha2.AllocationModeImmediate).
   160  				Obj()
   161  	structuredAllocatedImmediateClaim = st.FromResourceClaim(pendingImmediateClaim).
   162  						Allocation("some-driver", &resourcev1alpha2.AllocationResult{}).
   163  						Structured("worker", "instance-1").
   164  						Obj()
   165  	pendingDelayedClaim = st.FromResourceClaim(claim).
   166  				OwnerReference(podName, podUID, podKind).
   167  				AllocationMode(resourcev1alpha2.AllocationModeWaitForFirstConsumer).
   168  				Obj()
   169  	pendingDelayedClaim2 = st.FromResourceClaim(pendingDelayedClaim).
   170  				Name(claimName2).
   171  				Obj()
   172  	deallocatingClaim = st.FromResourceClaim(pendingImmediateClaim).
   173  				Allocation("some-driver", &resourcev1alpha2.AllocationResult{}).
   174  				DeallocationRequested(true).
   175  				Obj()
   176  	inUseClaim = st.FromResourceClaim(pendingImmediateClaim).
   177  			Allocation("some-driver", &resourcev1alpha2.AllocationResult{}).
   178  			ReservedForPod(podName, types.UID(podUID)).
   179  			Obj()
   180  	structuredInUseClaim = st.FromResourceClaim(inUseClaim).
   181  				Structured("worker", "instance-1").
   182  				Obj()
   183  	allocatedClaim = st.FromResourceClaim(pendingDelayedClaim).
   184  			Allocation("some-driver", &resourcev1alpha2.AllocationResult{}).
   185  			Obj()
   186  
   187  	pendingDelayedClaimWithParams      = st.FromResourceClaim(pendingDelayedClaim).ParametersRef(claimName).Obj()
   188  	structuredAllocatedClaim           = st.FromResourceClaim(allocatedClaim).Structured("worker", "instance-1").Obj()
   189  	structuredAllocatedClaimWithParams = st.FromResourceClaim(structuredAllocatedClaim).ParametersRef(claimName).Obj()
   190  
   191  	otherStructuredAllocatedClaim = st.FromResourceClaim(structuredAllocatedClaim).Name(structuredAllocatedClaim.Name + "-other").Obj()
   192  
   193  	allocatedDelayedClaimWithWrongTopology = st.FromResourceClaim(allocatedClaim).
   194  						Allocation("some-driver", &resourcev1alpha2.AllocationResult{AvailableOnNodes: st.MakeNodeSelector().In("no-such-label", []string{"no-such-value"}).Obj()}).
   195  						Obj()
   196  	structuredAllocatedDelayedClaimWithWrongTopology = st.FromResourceClaim(allocatedDelayedClaimWithWrongTopology).
   197  								Structured("worker-2", "instance-1").
   198  								Obj()
   199  	allocatedImmediateClaimWithWrongTopology = st.FromResourceClaim(allocatedDelayedClaimWithWrongTopology).
   200  							AllocationMode(resourcev1alpha2.AllocationModeImmediate).
   201  							Obj()
   202  	structuredAllocatedImmediateClaimWithWrongTopology = st.FromResourceClaim(allocatedImmediateClaimWithWrongTopology).
   203  								Structured("worker-2", "instance-1").
   204  								Obj()
   205  	allocatedClaimWithGoodTopology = st.FromResourceClaim(allocatedClaim).
   206  					Allocation("some-driver", &resourcev1alpha2.AllocationResult{AvailableOnNodes: st.MakeNodeSelector().In("kubernetes.io/hostname", []string{"worker"}).Obj()}).
   207  					Obj()
   208  	structuredAllocatedClaimWithGoodTopology = st.FromResourceClaim(allocatedClaimWithGoodTopology).
   209  							Structured("worker", "instance-1").
   210  							Obj()
   211  	otherClaim = st.MakeResourceClaim().
   212  			Name("not-my-claim").
   213  			Namespace(namespace).
   214  			ResourceClassName(className).
   215  			Obj()
   216  
   217  	scheduling = st.MakePodSchedulingContexts().Name(podName).Namespace(namespace).
   218  			OwnerReference(podName, podUID, podKind).
   219  			Obj()
   220  	schedulingPotential = st.FromPodSchedulingContexts(scheduling).
   221  				PotentialNodes(workerNode.Name).
   222  				Obj()
   223  	schedulingSelectedPotential = st.FromPodSchedulingContexts(schedulingPotential).
   224  					SelectedNode(workerNode.Name).
   225  					Obj()
   226  	schedulingInfo = st.FromPodSchedulingContexts(schedulingPotential).
   227  			ResourceClaims(resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName},
   228  			resourcev1alpha2.ResourceClaimSchedulingStatus{Name: resourceName2}).
   229  		Obj()
   230  )
   231  
   232  func reserve(claim *resourcev1alpha2.ResourceClaim, pod *v1.Pod) *resourcev1alpha2.ResourceClaim {
   233  	return st.FromResourceClaim(claim).
   234  		ReservedForPod(pod.Name, types.UID(pod.UID)).
   235  		Obj()
   236  }
   237  
   238  // claimWithCRD replaces the in-tree group with "example.com".
   239  func claimWithCRD(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   240  	claim = claim.DeepCopy()
   241  	claim.Spec.ParametersRef.APIGroup = "example.com"
   242  	return claim
   243  }
   244  
   245  // classWithCRD replaces the in-tree group with "example.com".
   246  func classWithCRD(class *resourcev1alpha2.ResourceClass) *resourcev1alpha2.ResourceClass {
   247  	class = class.DeepCopy()
   248  	class.ParametersRef.APIGroup = "example.com"
   249  	return class
   250  }
   251  
   252  func breakCELInClaimParameters(parameters *resourcev1alpha2.ResourceClaimParameters) *resourcev1alpha2.ResourceClaimParameters {
   253  	parameters = parameters.DeepCopy()
   254  	for i := range parameters.DriverRequests {
   255  		for e := range parameters.DriverRequests[i].Requests {
   256  			parameters.DriverRequests[i].Requests[e].NamedResources.Selector = `attributes.bool["no-such-attribute"]`
   257  		}
   258  	}
   259  	return parameters
   260  }
   261  
   262  func breakCELInClassParameters(parameters *resourcev1alpha2.ResourceClassParameters) *resourcev1alpha2.ResourceClassParameters {
   263  	parameters = parameters.DeepCopy()
   264  	for i := range parameters.Filters {
   265  		parameters.Filters[i].NamedResources.Selector = `attributes.bool["no-such-attribute"]`
   266  	}
   267  	return parameters
   268  }
   269  
   270  // result defines the expected outcome of some operation. It covers
   271  // operation's status and the state of the world (= objects).
   272  type result struct {
   273  	status *framework.Status
   274  	// changes contains a mapping of name to an update function for
   275  	// the corresponding object. These functions apply exactly the expected
   276  	// changes to a copy of the object as it existed before the operation.
   277  	changes change
   278  
   279  	// added contains objects created by the operation.
   280  	added []metav1.Object
   281  
   282  	// removed contains objects deleted by the operation.
   283  	removed []metav1.Object
   284  
   285  	// assumedClaim is the one claim which is expected to be assumed,
   286  	// nil if none.
   287  	assumedClaim *resourcev1alpha2.ResourceClaim
   288  
   289  	// inFlightClaim is the one claim which is expected to be tracked as
   290  	// in flight, nil if none.
   291  	inFlightClaim *resourcev1alpha2.ResourceClaim
   292  }
   293  
   294  // change contains functions for modifying objects of a certain type. These
   295  // functions will get called for all objects of that type. If they needs to
   296  // make changes only to a particular instance, then it must check the name.
   297  type change struct {
   298  	scheduling func(*resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext
   299  	claim      func(*resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim
   300  }
   301  type perNodeResult map[string]result
   302  
   303  func (p perNodeResult) forNode(nodeName string) result {
   304  	if p == nil {
   305  		return result{}
   306  	}
   307  	return p[nodeName]
   308  }
   309  
   310  type want struct {
   311  	preenqueue       result
   312  	preFilterResult  *framework.PreFilterResult
   313  	prefilter        result
   314  	filter           perNodeResult
   315  	prescore         result
   316  	reserve          result
   317  	unreserve        result
   318  	prebind          result
   319  	postbind         result
   320  	postFilterResult *framework.PostFilterResult
   321  	postfilter       result
   322  
   323  	// unreserveAfterBindFailure, if set, triggers a call to Unreserve
   324  	// after PreBind, as if the actual Bind had failed.
   325  	unreserveAfterBindFailure *result
   326  
   327  	// unreserveBeforePreBind, if set, triggers a call to Unreserve
   328  	// before PreBind, as if the some other PreBind plugin had failed.
   329  	unreserveBeforePreBind *result
   330  }
   331  
   332  // prepare contains changes for objects in the API server.
   333  // Those changes are applied before running the steps. This can
   334  // be used to simulate concurrent changes by some other entities
   335  // like a resource driver.
   336  type prepare struct {
   337  	filter     change
   338  	prescore   change
   339  	reserve    change
   340  	unreserve  change
   341  	prebind    change
   342  	postbind   change
   343  	postfilter change
   344  }
   345  
   346  func TestPlugin(t *testing.T) {
   347  	testcases := map[string]struct {
   348  		nodes       []*v1.Node // default if unset is workerNode
   349  		pod         *v1.Pod
   350  		claims      []*resourcev1alpha2.ResourceClaim
   351  		classes     []*resourcev1alpha2.ResourceClass
   352  		schedulings []*resourcev1alpha2.PodSchedulingContext
   353  
   354  		// objs get stored directly in the fake client, without passing
   355  		// through reactors, in contrast to the types above.
   356  		objs []apiruntime.Object
   357  
   358  		prepare prepare
   359  		want    want
   360  		disable bool
   361  	}{
   362  		"empty": {
   363  			pod: st.MakePod().Name("foo").Namespace("default").Obj(),
   364  			want: want{
   365  				prefilter: result{
   366  					status: framework.NewStatus(framework.Skip),
   367  				},
   368  				postfilter: result{
   369  					status: framework.NewStatus(framework.Unschedulable, `no new claims to deallocate`),
   370  				},
   371  			},
   372  		},
   373  		"claim-reference": {
   374  			pod:    podWithClaimName,
   375  			claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim, otherClaim},
   376  			want: want{
   377  				prebind: result{
   378  					changes: change{
   379  						claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   380  							if claim.Name == claimName {
   381  								claim = claim.DeepCopy()
   382  								claim.Status.ReservedFor = inUseClaim.Status.ReservedFor
   383  							}
   384  							return claim
   385  						},
   386  					},
   387  				},
   388  			},
   389  		},
   390  		"claim-reference-structured": {
   391  			pod:    podWithClaimName,
   392  			claims: []*resourcev1alpha2.ResourceClaim{structuredAllocatedClaim, otherClaim},
   393  			want: want{
   394  				prebind: result{
   395  					changes: change{
   396  						claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   397  							if claim.Name == claimName {
   398  								claim = claim.DeepCopy()
   399  								claim.Status.ReservedFor = inUseClaim.Status.ReservedFor
   400  							}
   401  							return claim
   402  						},
   403  					},
   404  				},
   405  			},
   406  		},
   407  		"claim-template": {
   408  			pod:    podWithClaimTemplateInStatus,
   409  			claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim, otherClaim},
   410  			want: want{
   411  				prebind: result{
   412  					changes: change{
   413  						claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   414  							if claim.Name == claimName {
   415  								claim = claim.DeepCopy()
   416  								claim.Status.ReservedFor = inUseClaim.Status.ReservedFor
   417  							}
   418  							return claim
   419  						},
   420  					},
   421  				},
   422  			},
   423  		},
   424  		"claim-template-structured": {
   425  			pod:    podWithClaimTemplateInStatus,
   426  			claims: []*resourcev1alpha2.ResourceClaim{structuredAllocatedClaim, otherClaim},
   427  			want: want{
   428  				prebind: result{
   429  					changes: change{
   430  						claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   431  							if claim.Name == claimName {
   432  								claim = claim.DeepCopy()
   433  								claim.Status.ReservedFor = inUseClaim.Status.ReservedFor
   434  							}
   435  							return claim
   436  						},
   437  					},
   438  				},
   439  			},
   440  		},
   441  		"missing-claim": {
   442  			pod:    podWithClaimTemplate, // status not set
   443  			claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim, otherClaim},
   444  			want: want{
   445  				preenqueue: result{
   446  					status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `pod "default/my-pod": ResourceClaim not created yet`),
   447  				},
   448  			},
   449  		},
   450  		"deleted-claim": {
   451  			pod: podWithClaimTemplateInStatus,
   452  			claims: func() []*resourcev1alpha2.ResourceClaim {
   453  				claim := allocatedClaim.DeepCopy()
   454  				claim.DeletionTimestamp = &metav1.Time{Time: time.Now()}
   455  				return []*resourcev1alpha2.ResourceClaim{claim}
   456  			}(),
   457  			want: want{
   458  				preenqueue: result{
   459  					status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim "my-pod-my-resource" is being deleted`),
   460  				},
   461  			},
   462  		},
   463  		"wrong-claim": {
   464  			pod: podWithClaimTemplateInStatus,
   465  			claims: func() []*resourcev1alpha2.ResourceClaim {
   466  				claim := allocatedClaim.DeepCopy()
   467  				claim.OwnerReferences[0].UID += "123"
   468  				return []*resourcev1alpha2.ResourceClaim{claim}
   469  			}(),
   470  			want: want{
   471  				preenqueue: result{
   472  					status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `ResourceClaim default/my-pod-my-resource was not created for pod default/my-pod (pod is not owner)`),
   473  				},
   474  			},
   475  		},
   476  		"waiting-for-immediate-allocation": {
   477  			pod:     podWithClaimName,
   478  			claims:  []*resourcev1alpha2.ResourceClaim{pendingImmediateClaim},
   479  			classes: []*resourcev1alpha2.ResourceClass{resourceClass},
   480  			want: want{
   481  				prefilter: result{
   482  					status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `unallocated immediate resourceclaim`),
   483  				},
   484  				postfilter: result{
   485  					status: framework.NewStatus(framework.Unschedulable, `no new claims to deallocate`),
   486  				},
   487  			},
   488  		},
   489  		"immediate-allocation-structured-no-resources": {
   490  			pod:     podWithClaimName,
   491  			claims:  []*resourcev1alpha2.ResourceClaim{pendingImmediateClaim},
   492  			classes: []*resourcev1alpha2.ResourceClass{structuredResourceClass},
   493  			want: want{
   494  				filter: perNodeResult{
   495  					workerNode.Name: {
   496  						status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim cannot be allocated for the node (unsuitable)`),
   497  					},
   498  				},
   499  				postfilter: result{
   500  					status: framework.NewStatus(framework.Unschedulable, `still not schedulable`),
   501  				},
   502  			},
   503  		},
   504  		"immediate-allocation-structured-with-resources": {
   505  			pod:     podWithClaimName,
   506  			claims:  []*resourcev1alpha2.ResourceClaim{pendingImmediateClaim},
   507  			classes: []*resourcev1alpha2.ResourceClass{structuredResourceClass},
   508  			objs:    []apiruntime.Object{workerNodeSlice},
   509  			want: want{
   510  				reserve: result{
   511  					inFlightClaim: structuredAllocatedImmediateClaim,
   512  				},
   513  				prebind: result{
   514  					assumedClaim: reserve(structuredAllocatedImmediateClaim, podWithClaimName),
   515  					changes: change{
   516  						claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   517  							if claim.Name == claimName {
   518  								claim = claim.DeepCopy()
   519  								claim.Finalizers = structuredAllocatedImmediateClaim.Finalizers
   520  								claim.Status = structuredInUseClaim.Status
   521  							}
   522  							return claim
   523  						},
   524  					},
   525  				},
   526  				postbind: result{
   527  					assumedClaim: reserve(structuredAllocatedImmediateClaim, podWithClaimName),
   528  				},
   529  			},
   530  		},
   531  		"delayed-allocation-structured-no-resources": {
   532  			pod:     podWithClaimName,
   533  			claims:  []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
   534  			classes: []*resourcev1alpha2.ResourceClass{structuredResourceClass},
   535  			want: want{
   536  				filter: perNodeResult{
   537  					workerNode.Name: {
   538  						status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim cannot be allocated for the node (unsuitable)`),
   539  					},
   540  				},
   541  				postfilter: result{
   542  					status: framework.NewStatus(framework.Unschedulable, `still not schedulable`),
   543  				},
   544  			},
   545  		},
   546  		"delayed-allocation-structured-with-resources": {
   547  			pod:     podWithClaimName,
   548  			claims:  []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
   549  			classes: []*resourcev1alpha2.ResourceClass{structuredResourceClass},
   550  			objs:    []apiruntime.Object{workerNodeSlice},
   551  			want: want{
   552  				reserve: result{
   553  					inFlightClaim: structuredAllocatedClaim,
   554  				},
   555  				prebind: result{
   556  					assumedClaim: reserve(structuredAllocatedClaim, podWithClaimName),
   557  					changes: change{
   558  						claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   559  							if claim.Name == claimName {
   560  								claim = claim.DeepCopy()
   561  								claim.Finalizers = structuredAllocatedClaim.Finalizers
   562  								claim.Status = structuredInUseClaim.Status
   563  							}
   564  							return claim
   565  						},
   566  					},
   567  				},
   568  				postbind: result{
   569  					assumedClaim: reserve(structuredAllocatedClaim, podWithClaimName),
   570  				},
   571  			},
   572  		},
   573  		"delayed-allocation-structured-skip-bind": {
   574  			pod:     podWithClaimName,
   575  			claims:  []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
   576  			classes: []*resourcev1alpha2.ResourceClass{structuredResourceClass},
   577  			objs:    []apiruntime.Object{workerNodeSlice},
   578  			want: want{
   579  				reserve: result{
   580  					inFlightClaim: structuredAllocatedClaim,
   581  				},
   582  				unreserveBeforePreBind: &result{},
   583  			},
   584  		},
   585  		"delayed-allocation-structured-exhausted-resources": {
   586  			pod:     podWithClaimName,
   587  			claims:  []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim, otherStructuredAllocatedClaim},
   588  			classes: []*resourcev1alpha2.ResourceClass{structuredResourceClass},
   589  			objs:    []apiruntime.Object{workerNodeSlice},
   590  			want: want{
   591  				filter: perNodeResult{
   592  					workerNode.Name: {
   593  						status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim cannot be allocated for the node (unsuitable)`),
   594  					},
   595  				},
   596  				postfilter: result{
   597  					status: framework.NewStatus(framework.Unschedulable, `still not schedulable`),
   598  				},
   599  			},
   600  		},
   601  
   602  		"with-parameters": {
   603  			pod:     podWithClaimName,
   604  			claims:  []*resourcev1alpha2.ResourceClaim{pendingDelayedClaimWithParams},
   605  			classes: []*resourcev1alpha2.ResourceClass{structuredResourceClassWithParams},
   606  			objs:    []apiruntime.Object{claimParameters, classParameters, workerNodeSlice},
   607  			want: want{
   608  				reserve: result{
   609  					inFlightClaim: structuredAllocatedClaimWithParams,
   610  				},
   611  				prebind: result{
   612  					assumedClaim: reserve(structuredAllocatedClaimWithParams, podWithClaimName),
   613  					changes: change{
   614  						claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   615  							if claim.Name == claimName {
   616  								claim = claim.DeepCopy()
   617  								claim.Finalizers = structuredAllocatedClaim.Finalizers
   618  								claim.Status = structuredInUseClaim.Status
   619  							}
   620  							return claim
   621  						},
   622  					},
   623  				},
   624  				postbind: result{
   625  					assumedClaim: reserve(structuredAllocatedClaimWithParams, podWithClaimName),
   626  				},
   627  			},
   628  		},
   629  
   630  		"with-translated-parameters": {
   631  			pod:     podWithClaimName,
   632  			claims:  []*resourcev1alpha2.ResourceClaim{claimWithCRD(pendingDelayedClaimWithParams)},
   633  			classes: []*resourcev1alpha2.ResourceClass{classWithCRD(structuredResourceClassWithCRD)},
   634  			objs:    []apiruntime.Object{claimParameters, classParameters, workerNodeSlice},
   635  			want: want{
   636  				reserve: result{
   637  					inFlightClaim: claimWithCRD(structuredAllocatedClaimWithParams),
   638  				},
   639  				prebind: result{
   640  					assumedClaim: reserve(claimWithCRD(structuredAllocatedClaimWithParams), podWithClaimName),
   641  					changes: change{
   642  						claim: func(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   643  							if claim.Name == claimName {
   644  								claim = claim.DeepCopy()
   645  								claim.Finalizers = structuredAllocatedClaim.Finalizers
   646  								claim.Status = structuredInUseClaim.Status
   647  							}
   648  							return claim
   649  						},
   650  					},
   651  				},
   652  				postbind: result{
   653  					assumedClaim: reserve(claimWithCRD(structuredAllocatedClaimWithParams), podWithClaimName),
   654  				},
   655  			},
   656  		},
   657  
   658  		"missing-class-parameters": {
   659  			pod:     podWithClaimName,
   660  			claims:  []*resourcev1alpha2.ResourceClaim{pendingDelayedClaimWithParams},
   661  			classes: []*resourcev1alpha2.ResourceClass{structuredResourceClassWithParams},
   662  			objs:    []apiruntime.Object{claimParameters /* classParameters, */, workerNodeSlice},
   663  			want: want{
   664  				prefilter: result{
   665  					status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `class parameters default/my-resource-class not found`),
   666  				},
   667  				postfilter: result{
   668  					status: framework.NewStatus(framework.Unschedulable, `no new claims to deallocate`),
   669  				},
   670  			},
   671  		},
   672  
   673  		"missing-claim-parameters": {
   674  			pod:     podWithClaimName,
   675  			claims:  []*resourcev1alpha2.ResourceClaim{pendingDelayedClaimWithParams},
   676  			classes: []*resourcev1alpha2.ResourceClass{structuredResourceClassWithParams},
   677  			objs:    []apiruntime.Object{ /* claimParameters, */ classParameters, workerNodeSlice},
   678  			want: want{
   679  				prefilter: result{
   680  					status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `claim parameters default/my-pod-my-resource not found`),
   681  				},
   682  				postfilter: result{
   683  					status: framework.NewStatus(framework.Unschedulable, `no new claims to deallocate`),
   684  				},
   685  			},
   686  		},
   687  
   688  		"claim-parameters-CEL-runtime-error": {
   689  			pod:     podWithClaimName,
   690  			claims:  []*resourcev1alpha2.ResourceClaim{pendingDelayedClaimWithParams},
   691  			classes: []*resourcev1alpha2.ResourceClass{structuredResourceClassWithParams},
   692  			objs:    []apiruntime.Object{breakCELInClaimParameters(claimParameters), classParameters, workerNodeSlice},
   693  			want: want{
   694  				filter: perNodeResult{
   695  					workerNode.Name: {
   696  						status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `checking structured parameters failed: checking node "worker" and resources of driver "some-driver": evaluate request CEL expression: no such key: no-such-attribute`),
   697  					},
   698  				},
   699  				postfilter: result{
   700  					status: framework.NewStatus(framework.Unschedulable, `still not schedulable`),
   701  				},
   702  			},
   703  		},
   704  
   705  		"class-parameters-CEL-runtime-error": {
   706  			pod:     podWithClaimName,
   707  			claims:  []*resourcev1alpha2.ResourceClaim{pendingDelayedClaimWithParams},
   708  			classes: []*resourcev1alpha2.ResourceClass{structuredResourceClassWithParams},
   709  			objs:    []apiruntime.Object{claimParameters, breakCELInClassParameters(classParameters), workerNodeSlice},
   710  			want: want{
   711  				filter: perNodeResult{
   712  					workerNode.Name: {
   713  						status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `checking structured parameters failed: checking node "worker" and resources of driver "some-driver": evaluate filter CEL expression: no such key: no-such-attribute`),
   714  					},
   715  				},
   716  				postfilter: result{
   717  					status: framework.NewStatus(framework.Unschedulable, `still not schedulable`),
   718  				},
   719  			},
   720  		},
   721  
   722  		"waiting-for-deallocation": {
   723  			pod:    podWithClaimName,
   724  			claims: []*resourcev1alpha2.ResourceClaim{deallocatingClaim},
   725  			want: want{
   726  				prefilter: result{
   727  					status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim must be reallocated`),
   728  				},
   729  				postfilter: result{
   730  					status: framework.NewStatus(framework.Unschedulable, `no new claims to deallocate`),
   731  				},
   732  			},
   733  		},
   734  		"delayed-allocation-missing-class": {
   735  			pod:    podWithClaimName,
   736  			claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
   737  			want: want{
   738  				prefilter: result{
   739  					status: framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("resource class %s does not exist", className)),
   740  				},
   741  				postfilter: result{
   742  					status: framework.NewStatus(framework.Unschedulable, `no new claims to deallocate`),
   743  				},
   744  			},
   745  		},
   746  		"delayed-allocation-scheduling-select-immediately": {
   747  			// Create the PodSchedulingContext object, ask for information
   748  			// and select a node.
   749  			pod:     podWithClaimName,
   750  			claims:  []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
   751  			classes: []*resourcev1alpha2.ResourceClass{resourceClass},
   752  			want: want{
   753  				prebind: result{
   754  					status: framework.NewStatus(framework.Pending, `waiting for resource driver`),
   755  					added:  []metav1.Object{schedulingSelectedPotential},
   756  				},
   757  			},
   758  		},
   759  		"delayed-allocation-scheduling-ask": {
   760  			// Create the PodSchedulingContext object, ask for
   761  			// information, but do not select a node because
   762  			// there are multiple claims.
   763  			pod:     podWithTwoClaimNames,
   764  			claims:  []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim, pendingDelayedClaim2},
   765  			classes: []*resourcev1alpha2.ResourceClass{resourceClass},
   766  			want: want{
   767  				prebind: result{
   768  					status: framework.NewStatus(framework.Pending, `waiting for resource driver`),
   769  					added:  []metav1.Object{schedulingPotential},
   770  				},
   771  			},
   772  		},
   773  		"delayed-allocation-scheduling-finish": {
   774  			// Use the populated PodSchedulingContext object to select a
   775  			// node.
   776  			pod:         podWithClaimName,
   777  			claims:      []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
   778  			schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
   779  			classes:     []*resourcev1alpha2.ResourceClass{resourceClass},
   780  			want: want{
   781  				prebind: result{
   782  					status: framework.NewStatus(framework.Pending, `waiting for resource driver`),
   783  					changes: change{
   784  						scheduling: func(in *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
   785  							return st.FromPodSchedulingContexts(in).
   786  								SelectedNode(workerNode.Name).
   787  								Obj()
   788  						},
   789  					},
   790  				},
   791  			},
   792  		},
   793  		"delayed-allocation-scheduling-finish-concurrent-label-update": {
   794  			// Use the populated PodSchedulingContext object to select a
   795  			// node.
   796  			pod:         podWithClaimName,
   797  			claims:      []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
   798  			schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
   799  			classes:     []*resourcev1alpha2.ResourceClass{resourceClass},
   800  			prepare: prepare{
   801  				prebind: change{
   802  					scheduling: func(in *resourcev1alpha2.PodSchedulingContext) *resourcev1alpha2.PodSchedulingContext {
   803  						// This does not actually conflict with setting the
   804  						// selected node, but because the plugin is not using
   805  						// patching yet, Update nonetheless fails.
   806  						return st.FromPodSchedulingContexts(in).
   807  							Label("hello", "world").
   808  							Obj()
   809  					},
   810  				},
   811  			},
   812  			want: want{
   813  				prebind: result{
   814  					status: framework.AsStatus(errors.New(`ResourceVersion must match the object that gets updated`)),
   815  				},
   816  			},
   817  		},
   818  		"delayed-allocation-scheduling-completed": {
   819  			// Remove PodSchedulingContext object once the pod is scheduled.
   820  			pod:         podWithClaimName,
   821  			claims:      []*resourcev1alpha2.ResourceClaim{allocatedClaim},
   822  			schedulings: []*resourcev1alpha2.PodSchedulingContext{schedulingInfo},
   823  			classes:     []*resourcev1alpha2.ResourceClass{resourceClass},
   824  			want: want{
   825  				prebind: result{
   826  					changes: change{
   827  						claim: func(in *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   828  							return st.FromResourceClaim(in).
   829  								ReservedFor(resourcev1alpha2.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}).
   830  								Obj()
   831  						},
   832  					},
   833  				},
   834  				postbind: result{
   835  					removed: []metav1.Object{schedulingInfo},
   836  				},
   837  			},
   838  		},
   839  		"in-use-by-other": {
   840  			nodes:       []*v1.Node{},
   841  			pod:         otherPodWithClaimName,
   842  			claims:      []*resourcev1alpha2.ResourceClaim{inUseClaim},
   843  			classes:     []*resourcev1alpha2.ResourceClass{},
   844  			schedulings: []*resourcev1alpha2.PodSchedulingContext{},
   845  			prepare:     prepare{},
   846  			want: want{
   847  				prefilter: result{
   848  					status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim in use`),
   849  				},
   850  				postfilter: result{
   851  					status: framework.NewStatus(framework.Unschedulable, `no new claims to deallocate`),
   852  				},
   853  			},
   854  		},
   855  		"wrong-topology-delayed-allocation": {
   856  			// PostFilter tries to get the pod scheduleable by
   857  			// deallocating the claim.
   858  			pod:    podWithClaimName,
   859  			claims: []*resourcev1alpha2.ResourceClaim{allocatedDelayedClaimWithWrongTopology},
   860  			want: want{
   861  				filter: perNodeResult{
   862  					workerNode.Name: {
   863  						status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim not available on the node`),
   864  					},
   865  				},
   866  				postfilter: result{
   867  					// Claims with delayed allocation get deallocated.
   868  					changes: change{
   869  						claim: func(in *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   870  							return st.FromResourceClaim(in).
   871  								DeallocationRequested(true).
   872  								Obj()
   873  						},
   874  					},
   875  					status: framework.NewStatus(framework.Unschedulable, `deallocation of ResourceClaim completed`),
   876  				},
   877  			},
   878  		},
   879  		"wrong-topology-immediate-allocation": {
   880  			// PostFilter tries to get the pod scheduleable by
   881  			// deallocating the claim.
   882  			pod:    podWithClaimName,
   883  			claims: []*resourcev1alpha2.ResourceClaim{allocatedImmediateClaimWithWrongTopology},
   884  			want: want{
   885  				filter: perNodeResult{
   886  					workerNode.Name: {
   887  						status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim not available on the node`),
   888  					},
   889  				},
   890  				postfilter: result{
   891  					// Claims with immediate allocation don't. They would just get allocated again right
   892  					// away, without considering the needs of the pod.
   893  					status: framework.NewStatus(framework.Unschedulable, `still not schedulable`),
   894  				},
   895  			},
   896  		},
   897  		"wrong-topology-delayed-allocation-structured": {
   898  			// PostFilter tries to get the pod scheduleable by
   899  			// deallocating the claim.
   900  			pod:    podWithClaimName,
   901  			claims: []*resourcev1alpha2.ResourceClaim{structuredAllocatedDelayedClaimWithWrongTopology},
   902  			want: want{
   903  				filter: perNodeResult{
   904  					workerNode.Name: {
   905  						status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim not available on the node`),
   906  					},
   907  				},
   908  				postfilter: result{
   909  					// Claims with delayed allocation and structured parameters get deallocated immediately.
   910  					changes: change{
   911  						claim: func(in *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   912  							return st.FromResourceClaim(in).
   913  								Allocation("", nil).
   914  								Obj()
   915  						},
   916  					},
   917  					status: framework.NewStatus(framework.Unschedulable, `deallocation of ResourceClaim completed`),
   918  				},
   919  			},
   920  		},
   921  		"wrong-topology-immediate-allocation-structured": {
   922  			// PostFilter tries to get the pod scheduleable by
   923  			// deallocating the claim.
   924  			pod:    podWithClaimName,
   925  			claims: []*resourcev1alpha2.ResourceClaim{structuredAllocatedImmediateClaimWithWrongTopology},
   926  			want: want{
   927  				filter: perNodeResult{
   928  					workerNode.Name: {
   929  						status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim not available on the node`),
   930  					},
   931  				},
   932  				postfilter: result{
   933  					// Claims with immediate allocation don't. The allocation is considered
   934  					// more important than the pod and pods need to wait for the node to
   935  					// become available again.
   936  					status: framework.NewStatus(framework.Unschedulable, `still not schedulable`),
   937  				},
   938  			},
   939  		},
   940  		"good-topology": {
   941  			pod:    podWithClaimName,
   942  			claims: []*resourcev1alpha2.ResourceClaim{allocatedClaimWithGoodTopology},
   943  			want: want{
   944  				prebind: result{
   945  					changes: change{
   946  						claim: func(in *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   947  							return st.FromResourceClaim(in).
   948  								ReservedFor(resourcev1alpha2.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}).
   949  								Obj()
   950  						},
   951  					},
   952  				},
   953  			},
   954  		},
   955  		"bind-failure": {
   956  			pod:    podWithClaimName,
   957  			claims: []*resourcev1alpha2.ResourceClaim{allocatedClaimWithGoodTopology},
   958  			want: want{
   959  				prebind: result{
   960  					changes: change{
   961  						claim: func(in *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   962  							return st.FromResourceClaim(in).
   963  								ReservedFor(resourcev1alpha2.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}).
   964  								Obj()
   965  						},
   966  					},
   967  				},
   968  				unreserveAfterBindFailure: &result{
   969  					changes: change{
   970  						claim: func(in *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   971  							out := in.DeepCopy()
   972  							out.Status.ReservedFor = []resourcev1alpha2.ResourceClaimConsumerReference{}
   973  							return out
   974  						},
   975  					},
   976  				},
   977  			},
   978  		},
   979  		"bind-failure-structured": {
   980  			pod:    podWithClaimName,
   981  			claims: []*resourcev1alpha2.ResourceClaim{structuredAllocatedClaimWithGoodTopology},
   982  			want: want{
   983  				prebind: result{
   984  					changes: change{
   985  						claim: func(in *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   986  							return st.FromResourceClaim(in).
   987  								ReservedFor(resourcev1alpha2.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}).
   988  								Obj()
   989  						},
   990  					},
   991  				},
   992  				unreserveAfterBindFailure: &result{
   993  					changes: change{
   994  						claim: func(in *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
   995  							out := in.DeepCopy()
   996  							out.Status.ReservedFor = []resourcev1alpha2.ResourceClaimConsumerReference{}
   997  							return out
   998  						},
   999  					},
  1000  				},
  1001  			},
  1002  		},
  1003  		"reserved-okay": {
  1004  			pod:    podWithClaimName,
  1005  			claims: []*resourcev1alpha2.ResourceClaim{inUseClaim},
  1006  		},
  1007  		"disable": {
  1008  			pod:    podWithClaimName,
  1009  			claims: []*resourcev1alpha2.ResourceClaim{inUseClaim},
  1010  			want: want{
  1011  				prefilter: result{
  1012  					status: framework.NewStatus(framework.Skip),
  1013  				},
  1014  			},
  1015  			disable: true,
  1016  		},
  1017  	}
  1018  
  1019  	for name, tc := range testcases {
  1020  		// We can run in parallel because logging is per-test.
  1021  		tc := tc
  1022  		t.Run(name, func(t *testing.T) {
  1023  			t.Parallel()
  1024  			nodes := tc.nodes
  1025  			if nodes == nil {
  1026  				nodes = []*v1.Node{workerNode}
  1027  			}
  1028  			testCtx := setup(t, nodes, tc.claims, tc.classes, tc.schedulings, tc.objs)
  1029  			testCtx.p.enabled = !tc.disable
  1030  			initialObjects := testCtx.listAll(t)
  1031  
  1032  			status := testCtx.p.PreEnqueue(testCtx.ctx, tc.pod)
  1033  			t.Run("PreEnqueue", func(t *testing.T) {
  1034  				testCtx.verify(t, tc.want.preenqueue, initialObjects, nil, status)
  1035  			})
  1036  			if !status.IsSuccess() {
  1037  				return
  1038  			}
  1039  
  1040  			result, status := testCtx.p.PreFilter(testCtx.ctx, testCtx.state, tc.pod)
  1041  			t.Run("prefilter", func(t *testing.T) {
  1042  				assert.Equal(t, tc.want.preFilterResult, result)
  1043  				testCtx.verify(t, tc.want.prefilter, initialObjects, result, status)
  1044  			})
  1045  			if status.IsSkip() {
  1046  				return
  1047  			}
  1048  			unschedulable := status.Code() != framework.Success
  1049  
  1050  			var potentialNodes []*framework.NodeInfo
  1051  
  1052  			initialObjects = testCtx.listAll(t)
  1053  			testCtx.updateAPIServer(t, initialObjects, tc.prepare.filter)
  1054  			if !unschedulable {
  1055  				for _, nodeInfo := range testCtx.nodeInfos {
  1056  					initialObjects = testCtx.listAll(t)
  1057  					status := testCtx.p.Filter(testCtx.ctx, testCtx.state, tc.pod, nodeInfo)
  1058  					nodeName := nodeInfo.Node().Name
  1059  					t.Run(fmt.Sprintf("filter/%s", nodeInfo.Node().Name), func(t *testing.T) {
  1060  						testCtx.verify(t, tc.want.filter.forNode(nodeName), initialObjects, nil, status)
  1061  					})
  1062  					if status.Code() != framework.Success {
  1063  						unschedulable = true
  1064  					} else {
  1065  						potentialNodes = append(potentialNodes, nodeInfo)
  1066  					}
  1067  				}
  1068  			}
  1069  
  1070  			if !unschedulable && len(potentialNodes) > 0 {
  1071  				initialObjects = testCtx.listAll(t)
  1072  				initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.prescore)
  1073  				status := testCtx.p.PreScore(testCtx.ctx, testCtx.state, tc.pod, potentialNodes)
  1074  				t.Run("prescore", func(t *testing.T) {
  1075  					testCtx.verify(t, tc.want.prescore, initialObjects, nil, status)
  1076  				})
  1077  				if status.Code() != framework.Success {
  1078  					unschedulable = true
  1079  				}
  1080  			}
  1081  
  1082  			var selectedNode *framework.NodeInfo
  1083  			if !unschedulable && len(potentialNodes) > 0 {
  1084  				selectedNode = potentialNodes[0]
  1085  
  1086  				initialObjects = testCtx.listAll(t)
  1087  				initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.reserve)
  1088  				status := testCtx.p.Reserve(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Node().Name)
  1089  				t.Run("reserve", func(t *testing.T) {
  1090  					testCtx.verify(t, tc.want.reserve, initialObjects, nil, status)
  1091  				})
  1092  				if status.Code() != framework.Success {
  1093  					unschedulable = true
  1094  				}
  1095  			}
  1096  
  1097  			if selectedNode != nil {
  1098  				if unschedulable {
  1099  					initialObjects = testCtx.listAll(t)
  1100  					initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.unreserve)
  1101  					testCtx.p.Unreserve(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Node().Name)
  1102  					t.Run("unreserve", func(t *testing.T) {
  1103  						testCtx.verify(t, tc.want.unreserve, initialObjects, nil, status)
  1104  					})
  1105  				} else {
  1106  					if tc.want.unreserveBeforePreBind != nil {
  1107  						initialObjects = testCtx.listAll(t)
  1108  						testCtx.p.Unreserve(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Node().Name)
  1109  						t.Run("unreserveBeforePreBind", func(t *testing.T) {
  1110  							testCtx.verify(t, *tc.want.unreserveBeforePreBind, initialObjects, nil, status)
  1111  						})
  1112  						return
  1113  					}
  1114  
  1115  					initialObjects = testCtx.listAll(t)
  1116  					initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.prebind)
  1117  					status := testCtx.p.PreBind(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Node().Name)
  1118  					t.Run("prebind", func(t *testing.T) {
  1119  						testCtx.verify(t, tc.want.prebind, initialObjects, nil, status)
  1120  					})
  1121  
  1122  					if tc.want.unreserveAfterBindFailure != nil {
  1123  						initialObjects = testCtx.listAll(t)
  1124  						testCtx.p.Unreserve(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Node().Name)
  1125  						t.Run("unreserverAfterBindFailure", func(t *testing.T) {
  1126  							testCtx.verify(t, *tc.want.unreserveAfterBindFailure, initialObjects, nil, status)
  1127  						})
  1128  					} else if status.IsSuccess() {
  1129  						initialObjects = testCtx.listAll(t)
  1130  						initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.postbind)
  1131  						testCtx.p.PostBind(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Node().Name)
  1132  						t.Run("postbind", func(t *testing.T) {
  1133  							testCtx.verify(t, tc.want.postbind, initialObjects, nil, nil)
  1134  						})
  1135  					}
  1136  				}
  1137  			} else {
  1138  				initialObjects = testCtx.listAll(t)
  1139  				initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.postfilter)
  1140  				result, status := testCtx.p.PostFilter(testCtx.ctx, testCtx.state, tc.pod, nil /* filteredNodeStatusMap not used by plugin */)
  1141  				t.Run("postfilter", func(t *testing.T) {
  1142  					assert.Equal(t, tc.want.postFilterResult, result)
  1143  					testCtx.verify(t, tc.want.postfilter, initialObjects, nil, status)
  1144  				})
  1145  			}
  1146  		})
  1147  	}
  1148  }
  1149  
  1150  type testContext struct {
  1151  	ctx             context.Context
  1152  	client          *fake.Clientset
  1153  	informerFactory informers.SharedInformerFactory
  1154  	p               *dynamicResources
  1155  	nodeInfos       []*framework.NodeInfo
  1156  	state           *framework.CycleState
  1157  }
  1158  
  1159  func (tc *testContext) verify(t *testing.T, expected result, initialObjects []metav1.Object, result interface{}, status *framework.Status) {
  1160  	t.Helper()
  1161  	assert.Equal(t, expected.status, status)
  1162  	objects := tc.listAll(t)
  1163  	wantObjects := update(t, initialObjects, expected.changes)
  1164  	wantObjects = append(wantObjects, expected.added...)
  1165  	for _, remove := range expected.removed {
  1166  		for i, obj := range wantObjects {
  1167  			// This is a bit relaxed (no GVR comparison, no UID
  1168  			// comparison) to simplify writing the test cases.
  1169  			if obj.GetName() == remove.GetName() && obj.GetNamespace() == remove.GetNamespace() {
  1170  				wantObjects = append(wantObjects[0:i], wantObjects[i+1:]...)
  1171  				break
  1172  			}
  1173  		}
  1174  	}
  1175  	sortObjects(wantObjects)
  1176  	// Sometimes assert strips the diff too much, let's do it ourselves...
  1177  	if diff := cmp.Diff(wantObjects, objects, cmpopts.IgnoreFields(metav1.ObjectMeta{}, "UID", "ResourceVersion")); diff != "" {
  1178  		t.Errorf("Stored objects are different (- expected, + actual):\n%s", diff)
  1179  	}
  1180  
  1181  	var expectAssumedClaims []metav1.Object
  1182  	if expected.assumedClaim != nil {
  1183  		expectAssumedClaims = append(expectAssumedClaims, expected.assumedClaim)
  1184  	}
  1185  	actualAssumedClaims := tc.listAssumedClaims()
  1186  	if diff := cmp.Diff(expectAssumedClaims, actualAssumedClaims, cmpopts.IgnoreFields(metav1.ObjectMeta{}, "UID", "ResourceVersion")); diff != "" {
  1187  		t.Errorf("Assumed claims are different (- expected, + actual):\n%s", diff)
  1188  	}
  1189  
  1190  	var expectInFlightClaims []metav1.Object
  1191  	if expected.inFlightClaim != nil {
  1192  		expectInFlightClaims = append(expectInFlightClaims, expected.inFlightClaim)
  1193  	}
  1194  	actualInFlightClaims := tc.listInFlightClaims()
  1195  	if diff := cmp.Diff(expectInFlightClaims, actualInFlightClaims, cmpopts.IgnoreFields(metav1.ObjectMeta{}, "UID", "ResourceVersion")); diff != "" {
  1196  		t.Errorf("In-flight claims are different (- expected, + actual):\n%s", diff)
  1197  	}
  1198  }
  1199  
  1200  func (tc *testContext) listAll(t *testing.T) (objects []metav1.Object) {
  1201  	t.Helper()
  1202  	claims, err := tc.client.ResourceV1alpha2().ResourceClaims("").List(tc.ctx, metav1.ListOptions{})
  1203  	require.NoError(t, err, "list claims")
  1204  	for _, claim := range claims.Items {
  1205  		claim := claim
  1206  		objects = append(objects, &claim)
  1207  	}
  1208  	schedulings, err := tc.client.ResourceV1alpha2().PodSchedulingContexts("").List(tc.ctx, metav1.ListOptions{})
  1209  	require.NoError(t, err, "list pod scheduling")
  1210  	for _, scheduling := range schedulings.Items {
  1211  		scheduling := scheduling
  1212  		objects = append(objects, &scheduling)
  1213  	}
  1214  
  1215  	sortObjects(objects)
  1216  	return
  1217  }
  1218  
  1219  func (tc *testContext) listAssumedClaims() []metav1.Object {
  1220  	var assumedClaims []metav1.Object
  1221  	for _, obj := range tc.p.claimAssumeCache.List(nil) {
  1222  		claim := obj.(*resourcev1alpha2.ResourceClaim)
  1223  		obj, _ := tc.p.claimAssumeCache.Get(claim.Namespace + "/" + claim.Name)
  1224  		apiObj, _ := tc.p.claimAssumeCache.GetAPIObj(claim.Namespace + "/" + claim.Name)
  1225  		if obj != apiObj {
  1226  			assumedClaims = append(assumedClaims, claim)
  1227  		}
  1228  	}
  1229  	sortObjects(assumedClaims)
  1230  	return assumedClaims
  1231  }
  1232  
  1233  func (tc *testContext) listInFlightClaims() []metav1.Object {
  1234  	var inFlightClaims []metav1.Object
  1235  	tc.p.inFlightAllocations.Range(func(key, value any) bool {
  1236  		inFlightClaims = append(inFlightClaims, value.(*resourcev1alpha2.ResourceClaim))
  1237  		return true
  1238  	})
  1239  	sortObjects(inFlightClaims)
  1240  	return inFlightClaims
  1241  }
  1242  
  1243  // updateAPIServer modifies objects and stores any changed object in the API server.
  1244  func (tc *testContext) updateAPIServer(t *testing.T, objects []metav1.Object, updates change) []metav1.Object {
  1245  	modified := update(t, objects, updates)
  1246  	for i := range modified {
  1247  		obj := modified[i]
  1248  		if diff := cmp.Diff(objects[i], obj); diff != "" {
  1249  			t.Logf("Updating %T %q, diff (-old, +new):\n%s", obj, obj.GetName(), diff)
  1250  			switch obj := obj.(type) {
  1251  			case *resourcev1alpha2.ResourceClaim:
  1252  				obj, err := tc.client.ResourceV1alpha2().ResourceClaims(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{})
  1253  				if err != nil {
  1254  					t.Fatalf("unexpected error during prepare update: %v", err)
  1255  				}
  1256  				modified[i] = obj
  1257  			case *resourcev1alpha2.PodSchedulingContext:
  1258  				obj, err := tc.client.ResourceV1alpha2().PodSchedulingContexts(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{})
  1259  				if err != nil {
  1260  					t.Fatalf("unexpected error during prepare update: %v", err)
  1261  				}
  1262  				modified[i] = obj
  1263  			default:
  1264  				t.Fatalf("unsupported object type %T", obj)
  1265  			}
  1266  		}
  1267  	}
  1268  	return modified
  1269  }
  1270  
  1271  func sortObjects(objects []metav1.Object) {
  1272  	sort.Slice(objects, func(i, j int) bool {
  1273  		if objects[i].GetNamespace() < objects[j].GetNamespace() {
  1274  			return true
  1275  		}
  1276  		return objects[i].GetName() < objects[j].GetName()
  1277  	})
  1278  }
  1279  
  1280  // update walks through all existing objects, finds the corresponding update
  1281  // function based on name and kind, and replaces those objects that have an
  1282  // update function. The rest is left unchanged.
  1283  func update(t *testing.T, objects []metav1.Object, updates change) []metav1.Object {
  1284  	var updated []metav1.Object
  1285  
  1286  	for _, obj := range objects {
  1287  		switch in := obj.(type) {
  1288  		case *resourcev1alpha2.ResourceClaim:
  1289  			if updates.claim != nil {
  1290  				obj = updates.claim(in)
  1291  			}
  1292  		case *resourcev1alpha2.PodSchedulingContext:
  1293  			if updates.scheduling != nil {
  1294  				obj = updates.scheduling(in)
  1295  			}
  1296  		}
  1297  		updated = append(updated, obj)
  1298  	}
  1299  
  1300  	return updated
  1301  }
  1302  
  1303  func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceClaim, classes []*resourcev1alpha2.ResourceClass, schedulings []*resourcev1alpha2.PodSchedulingContext, objs []apiruntime.Object) (result *testContext) {
  1304  	t.Helper()
  1305  
  1306  	tc := &testContext{}
  1307  	tCtx := ktesting.Init(t)
  1308  	tc.ctx = tCtx
  1309  
  1310  	tc.client = fake.NewSimpleClientset(objs...)
  1311  	reactor := createReactor(tc.client.Tracker())
  1312  	tc.client.PrependReactor("*", "*", reactor)
  1313  
  1314  	// Quick-and-dirty workaround for fake client storing ResourceClassParameters and
  1315  	// ResourceClaimParameters as "resourceclassparameterses" and "resourceclaimparameterses":
  1316  	// intercept the correct LIST from the informers and reply to them with the incorrect
  1317  	// LIST result.
  1318  	tc.client.PrependReactor("list", "resourceclaimparameters", createListReactor(tc.client.Tracker(), "ResourceClaimParameters"))
  1319  	tc.client.PrependReactor("list", "resourceclassparameters", createListReactor(tc.client.Tracker(), "ResourceClassParameters"))
  1320  
  1321  	tc.informerFactory = informers.NewSharedInformerFactory(tc.client, 0)
  1322  
  1323  	opts := []runtime.Option{
  1324  		runtime.WithClientSet(tc.client),
  1325  		runtime.WithInformerFactory(tc.informerFactory),
  1326  	}
  1327  	fh, err := runtime.NewFramework(tCtx, nil, nil, opts...)
  1328  	if err != nil {
  1329  		t.Fatal(err)
  1330  	}
  1331  
  1332  	pl, err := New(tCtx, nil, fh, feature.Features{EnableDynamicResourceAllocation: true})
  1333  	if err != nil {
  1334  		t.Fatal(err)
  1335  	}
  1336  	tc.p = pl.(*dynamicResources)
  1337  
  1338  	// The tests use the API to create the objects because then reactors
  1339  	// get triggered.
  1340  	for _, claim := range claims {
  1341  		_, err := tc.client.ResourceV1alpha2().ResourceClaims(claim.Namespace).Create(tc.ctx, claim, metav1.CreateOptions{})
  1342  		require.NoError(t, err, "create resource claim")
  1343  	}
  1344  	for _, class := range classes {
  1345  		_, err := tc.client.ResourceV1alpha2().ResourceClasses().Create(tc.ctx, class, metav1.CreateOptions{})
  1346  		require.NoError(t, err, "create resource class")
  1347  	}
  1348  	for _, scheduling := range schedulings {
  1349  		_, err := tc.client.ResourceV1alpha2().PodSchedulingContexts(scheduling.Namespace).Create(tc.ctx, scheduling, metav1.CreateOptions{})
  1350  		require.NoError(t, err, "create pod scheduling")
  1351  	}
  1352  
  1353  	tc.informerFactory.Start(tc.ctx.Done())
  1354  	t.Cleanup(func() {
  1355  		// Need to cancel before waiting for the shutdown.
  1356  		tCtx.Cancel("test is done")
  1357  		// Now we can wait for all goroutines to stop.
  1358  		tc.informerFactory.Shutdown()
  1359  	})
  1360  
  1361  	tc.informerFactory.WaitForCacheSync(tc.ctx.Done())
  1362  
  1363  	for _, node := range nodes {
  1364  		nodeInfo := framework.NewNodeInfo()
  1365  		nodeInfo.SetNode(node)
  1366  		tc.nodeInfos = append(tc.nodeInfos, nodeInfo)
  1367  	}
  1368  	tc.state = framework.NewCycleState()
  1369  
  1370  	return tc
  1371  }
  1372  
  1373  // createReactor implements the logic required for the UID and ResourceVersion
  1374  // fields to work when using the fake client. Add it with client.PrependReactor
  1375  // to your fake client. ResourceVersion handling is required for conflict
  1376  // detection during updates, which is covered by some scenarios.
  1377  func createReactor(tracker cgotesting.ObjectTracker) func(action cgotesting.Action) (handled bool, ret apiruntime.Object, err error) {
  1378  	var uidCounter int
  1379  	var resourceVersionCounter int
  1380  	var mutex sync.Mutex
  1381  
  1382  	return func(action cgotesting.Action) (handled bool, ret apiruntime.Object, err error) {
  1383  		createAction, ok := action.(cgotesting.CreateAction)
  1384  		if !ok {
  1385  			return false, nil, nil
  1386  		}
  1387  		obj, ok := createAction.GetObject().(metav1.Object)
  1388  		if !ok {
  1389  			return false, nil, nil
  1390  		}
  1391  
  1392  		mutex.Lock()
  1393  		defer mutex.Unlock()
  1394  		switch action.GetVerb() {
  1395  		case "create":
  1396  			if obj.GetUID() != "" {
  1397  				return true, nil, errors.New("UID must not be set on create")
  1398  			}
  1399  			if obj.GetResourceVersion() != "" {
  1400  				return true, nil, errors.New("ResourceVersion must not be set on create")
  1401  			}
  1402  			obj.SetUID(types.UID(fmt.Sprintf("UID-%d", uidCounter)))
  1403  			uidCounter++
  1404  			obj.SetResourceVersion(fmt.Sprintf("%d", resourceVersionCounter))
  1405  			resourceVersionCounter++
  1406  		case "update":
  1407  			uid := obj.GetUID()
  1408  			resourceVersion := obj.GetResourceVersion()
  1409  			if uid == "" {
  1410  				return true, nil, errors.New("UID must be set on update")
  1411  			}
  1412  			if resourceVersion == "" {
  1413  				return true, nil, errors.New("ResourceVersion must be set on update")
  1414  			}
  1415  
  1416  			oldObj, err := tracker.Get(action.GetResource(), obj.GetNamespace(), obj.GetName())
  1417  			if err != nil {
  1418  				return true, nil, err
  1419  			}
  1420  			oldObjMeta, ok := oldObj.(metav1.Object)
  1421  			if !ok {
  1422  				return true, nil, errors.New("internal error: unexpected old object type")
  1423  			}
  1424  			if oldObjMeta.GetResourceVersion() != resourceVersion {
  1425  				return true, nil, errors.New("ResourceVersion must match the object that gets updated")
  1426  			}
  1427  
  1428  			obj.SetResourceVersion(fmt.Sprintf("%d", resourceVersionCounter))
  1429  			resourceVersionCounter++
  1430  		}
  1431  		return false, nil, nil
  1432  	}
  1433  }
  1434  
  1435  func createListReactor(tracker cgotesting.ObjectTracker, kind string) func(action cgotesting.Action) (handled bool, ret apiruntime.Object, err error) {
  1436  	return func(action cgotesting.Action) (handled bool, ret apiruntime.Object, err error) {
  1437  		// listAction := action.(cgotesting.ListAction)
  1438  		gvr := action.GetResource()
  1439  		ns := action.GetNamespace()
  1440  		gvr.Resource += "es"
  1441  		list, err := tracker.List(gvr, schema.GroupVersionKind{Group: gvr.Group, Version: gvr.Version, Kind: kind}, ns)
  1442  		return true, list, err
  1443  	}
  1444  }
  1445  
  1446  func Test_isSchedulableAfterClaimChange(t *testing.T) {
  1447  	testcases := map[string]struct {
  1448  		pod            *v1.Pod
  1449  		claims         []*resourcev1alpha2.ResourceClaim
  1450  		oldObj, newObj interface{}
  1451  		expectedHint   framework.QueueingHint
  1452  		expectedErr    bool
  1453  	}{
  1454  		"skip-deletes": {
  1455  			pod:          podWithClaimTemplate,
  1456  			oldObj:       allocatedClaim,
  1457  			newObj:       nil,
  1458  			expectedHint: framework.QueueSkip,
  1459  		},
  1460  		"backoff-wrong-new-object": {
  1461  			pod:         podWithClaimTemplate,
  1462  			newObj:      "not-a-claim",
  1463  			expectedErr: true,
  1464  		},
  1465  		"skip-wrong-claim": {
  1466  			pod: podWithClaimTemplate,
  1467  			newObj: func() *resourcev1alpha2.ResourceClaim {
  1468  				claim := allocatedClaim.DeepCopy()
  1469  				claim.OwnerReferences[0].UID += "123"
  1470  				return claim
  1471  			}(),
  1472  			expectedHint: framework.QueueSkip,
  1473  		},
  1474  		"skip-unrelated-claim": {
  1475  			pod:    podWithClaimTemplate,
  1476  			claims: []*resourcev1alpha2.ResourceClaim{allocatedClaim},
  1477  			newObj: func() *resourcev1alpha2.ResourceClaim {
  1478  				claim := allocatedClaim.DeepCopy()
  1479  				claim.Name += "-foo"
  1480  				claim.UID += "123"
  1481  				return claim
  1482  			}(),
  1483  			expectedHint: framework.QueueSkip,
  1484  		},
  1485  		"queue-on-add": {
  1486  			pod:          podWithClaimName,
  1487  			newObj:       pendingImmediateClaim,
  1488  			expectedHint: framework.Queue,
  1489  		},
  1490  		"backoff-wrong-old-object": {
  1491  			pod:         podWithClaimName,
  1492  			oldObj:      "not-a-claim",
  1493  			newObj:      pendingImmediateClaim,
  1494  			expectedErr: true,
  1495  		},
  1496  		"skip-adding-finalizer": {
  1497  			pod:    podWithClaimName,
  1498  			claims: []*resourcev1alpha2.ResourceClaim{pendingImmediateClaim},
  1499  			oldObj: pendingImmediateClaim,
  1500  			newObj: func() *resourcev1alpha2.ResourceClaim {
  1501  				claim := pendingImmediateClaim.DeepCopy()
  1502  				claim.Finalizers = append(claim.Finalizers, "foo")
  1503  				return claim
  1504  			}(),
  1505  			expectedHint: framework.QueueSkip,
  1506  		},
  1507  		"queue-on-status-change": {
  1508  			pod:    podWithClaimName,
  1509  			claims: []*resourcev1alpha2.ResourceClaim{pendingImmediateClaim},
  1510  			oldObj: pendingImmediateClaim,
  1511  			newObj: func() *resourcev1alpha2.ResourceClaim {
  1512  				claim := pendingImmediateClaim.DeepCopy()
  1513  				claim.Status.Allocation = &resourcev1alpha2.AllocationResult{}
  1514  				return claim
  1515  			}(),
  1516  			expectedHint: framework.Queue,
  1517  		},
  1518  		"structured-claim-deallocate": {
  1519  			pod:    podWithClaimName,
  1520  			claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
  1521  			oldObj: func() *resourcev1alpha2.ResourceClaim {
  1522  				claim := structuredAllocatedClaim.DeepCopy()
  1523  				claim.Name += "-other"
  1524  				return claim
  1525  			}(),
  1526  			newObj: func() *resourcev1alpha2.ResourceClaim {
  1527  				claim := structuredAllocatedClaim.DeepCopy()
  1528  				claim.Name += "-other"
  1529  				claim.Status.Allocation = nil
  1530  				return claim
  1531  			}(),
  1532  			// TODO (https://github.com/kubernetes/kubernetes/issues/123697): don't wake up
  1533  			// claims not using structured parameters.
  1534  			expectedHint: framework.Queue,
  1535  		},
  1536  	}
  1537  
  1538  	for name, tc := range testcases {
  1539  		t.Run(name, func(t *testing.T) {
  1540  			logger, _ := ktesting.NewTestContext(t)
  1541  			testCtx := setup(t, nil, tc.claims, nil, nil, nil)
  1542  			if claim, ok := tc.newObj.(*resourcev1alpha2.ResourceClaim); ok {
  1543  				// Update the informer because the lister gets called and must have the claim.
  1544  				store := testCtx.informerFactory.Resource().V1alpha2().ResourceClaims().Informer().GetStore()
  1545  				if tc.oldObj == nil {
  1546  					require.NoError(t, store.Add(claim))
  1547  				} else {
  1548  					require.NoError(t, store.Update(claim))
  1549  				}
  1550  			}
  1551  			actualHint, err := testCtx.p.isSchedulableAfterClaimChange(logger, tc.pod, tc.oldObj, tc.newObj)
  1552  			if tc.expectedErr {
  1553  				require.Error(t, err)
  1554  				return
  1555  			}
  1556  
  1557  			require.NoError(t, err)
  1558  			require.Equal(t, tc.expectedHint, actualHint)
  1559  		})
  1560  	}
  1561  }
  1562  
  1563  func Test_isSchedulableAfterPodSchedulingContextChange(t *testing.T) {
  1564  	testcases := map[string]struct {
  1565  		pod            *v1.Pod
  1566  		schedulings    []*resourcev1alpha2.PodSchedulingContext
  1567  		claims         []*resourcev1alpha2.ResourceClaim
  1568  		oldObj, newObj interface{}
  1569  		expectedHint   framework.QueueingHint
  1570  		expectedErr    bool
  1571  	}{
  1572  		"skip-deleted": {
  1573  			pod:          podWithClaimTemplate,
  1574  			oldObj:       scheduling,
  1575  			expectedHint: framework.QueueSkip,
  1576  		},
  1577  		"skip-missed-deleted": {
  1578  			pod: podWithClaimTemplate,
  1579  			oldObj: cache.DeletedFinalStateUnknown{
  1580  				Obj: scheduling,
  1581  			},
  1582  			expectedHint: framework.QueueSkip,
  1583  		},
  1584  		"backoff-wrong-old-object": {
  1585  			pod:         podWithClaimTemplate,
  1586  			oldObj:      "not-a-scheduling-context",
  1587  			newObj:      scheduling,
  1588  			expectedErr: true,
  1589  		},
  1590  		"backoff-missed-wrong-old-object": {
  1591  			pod: podWithClaimTemplate,
  1592  			oldObj: cache.DeletedFinalStateUnknown{
  1593  				Obj: "not-a-scheduling-context",
  1594  			},
  1595  			newObj:      scheduling,
  1596  			expectedErr: true,
  1597  		},
  1598  		"skip-unrelated-object": {
  1599  			pod:    podWithClaimTemplate,
  1600  			claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
  1601  			newObj: func() *resourcev1alpha2.PodSchedulingContext {
  1602  				scheduling := scheduling.DeepCopy()
  1603  				scheduling.Name += "-foo"
  1604  				return scheduling
  1605  			}(),
  1606  			expectedHint: framework.QueueSkip,
  1607  		},
  1608  		"backoff-wrong-new-object": {
  1609  			pod:         podWithClaimTemplate,
  1610  			oldObj:      scheduling,
  1611  			newObj:      "not-a-scheduling-context",
  1612  			expectedErr: true,
  1613  		},
  1614  		"skip-missing-claim": {
  1615  			pod:          podWithClaimTemplate,
  1616  			oldObj:       scheduling,
  1617  			newObj:       schedulingInfo,
  1618  			expectedHint: framework.QueueSkip,
  1619  		},
  1620  		"skip-missing-infos": {
  1621  			pod:          podWithClaimTemplateInStatus,
  1622  			claims:       []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
  1623  			oldObj:       scheduling,
  1624  			newObj:       scheduling,
  1625  			expectedHint: framework.QueueSkip,
  1626  		},
  1627  		"queue-new-infos": {
  1628  			pod:          podWithClaimTemplateInStatus,
  1629  			claims:       []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
  1630  			oldObj:       scheduling,
  1631  			newObj:       schedulingInfo,
  1632  			expectedHint: framework.Queue,
  1633  		},
  1634  		"queue-bad-selected-node": {
  1635  			pod:    podWithClaimTemplateInStatus,
  1636  			claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
  1637  			oldObj: func() *resourcev1alpha2.PodSchedulingContext {
  1638  				scheduling := schedulingInfo.DeepCopy()
  1639  				scheduling.Spec.SelectedNode = workerNode.Name
  1640  				return scheduling
  1641  			}(),
  1642  			newObj: func() *resourcev1alpha2.PodSchedulingContext {
  1643  				scheduling := schedulingInfo.DeepCopy()
  1644  				scheduling.Spec.SelectedNode = workerNode.Name
  1645  				scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(scheduling.Status.ResourceClaims[0].UnsuitableNodes, scheduling.Spec.SelectedNode)
  1646  				return scheduling
  1647  			}(),
  1648  			expectedHint: framework.Queue,
  1649  		},
  1650  		"skip-spec-changes": {
  1651  			pod:    podWithClaimTemplateInStatus,
  1652  			claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
  1653  			oldObj: schedulingInfo,
  1654  			newObj: func() *resourcev1alpha2.PodSchedulingContext {
  1655  				scheduling := schedulingInfo.DeepCopy()
  1656  				scheduling.Spec.SelectedNode = workerNode.Name
  1657  				return scheduling
  1658  			}(),
  1659  			expectedHint: framework.QueueSkip,
  1660  		},
  1661  		"backoff-other-changes": {
  1662  			pod:    podWithClaimTemplateInStatus,
  1663  			claims: []*resourcev1alpha2.ResourceClaim{pendingDelayedClaim},
  1664  			oldObj: schedulingInfo,
  1665  			newObj: func() *resourcev1alpha2.PodSchedulingContext {
  1666  				scheduling := schedulingInfo.DeepCopy()
  1667  				scheduling.Finalizers = append(scheduling.Finalizers, "foo")
  1668  				return scheduling
  1669  			}(),
  1670  			expectedHint: framework.Queue,
  1671  		},
  1672  	}
  1673  
  1674  	for name, tc := range testcases {
  1675  		tc := tc
  1676  		t.Run(name, func(t *testing.T) {
  1677  			t.Parallel()
  1678  			logger, _ := ktesting.NewTestContext(t)
  1679  			testCtx := setup(t, nil, tc.claims, nil, tc.schedulings, nil)
  1680  			actualHint, err := testCtx.p.isSchedulableAfterPodSchedulingContextChange(logger, tc.pod, tc.oldObj, tc.newObj)
  1681  			if tc.expectedErr {
  1682  				require.Error(t, err)
  1683  				return
  1684  			}
  1685  
  1686  			require.NoError(t, err)
  1687  			require.Equal(t, tc.expectedHint, actualHint)
  1688  		})
  1689  	}
  1690  }