k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/scheduler/schedule_one_test.go (about)

     1  /*
     2  Copyright 2014 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package scheduler
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/rand"
    25  	"reflect"
    26  	"regexp"
    27  	goruntime "runtime"
    28  	"sort"
    29  	"strconv"
    30  	"sync"
    31  	"testing"
    32  	"time"
    33  
    34  	"github.com/google/go-cmp/cmp"
    35  	v1 "k8s.io/api/core/v1"
    36  	eventsv1 "k8s.io/api/events/v1"
    37  	"k8s.io/apimachinery/pkg/api/resource"
    38  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    39  	"k8s.io/apimachinery/pkg/runtime"
    40  	"k8s.io/apimachinery/pkg/types"
    41  	"k8s.io/apimachinery/pkg/util/sets"
    42  	"k8s.io/apimachinery/pkg/util/wait"
    43  	"k8s.io/client-go/informers"
    44  	clientsetfake "k8s.io/client-go/kubernetes/fake"
    45  	"k8s.io/client-go/kubernetes/scheme"
    46  	clienttesting "k8s.io/client-go/testing"
    47  	clientcache "k8s.io/client-go/tools/cache"
    48  	"k8s.io/client-go/tools/events"
    49  	"k8s.io/component-helpers/storage/volume"
    50  	"k8s.io/klog/v2"
    51  	"k8s.io/klog/v2/ktesting"
    52  	extenderv1 "k8s.io/kube-scheduler/extender/v1"
    53  	schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
    54  	"k8s.io/kubernetes/pkg/scheduler/framework"
    55  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
    56  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
    57  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality"
    58  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
    59  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
    60  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
    61  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
    62  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
    63  	frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
    64  	internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
    65  	fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
    66  	internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
    67  	"k8s.io/kubernetes/pkg/scheduler/profile"
    68  	st "k8s.io/kubernetes/pkg/scheduler/testing"
    69  	tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
    70  	schedutil "k8s.io/kubernetes/pkg/scheduler/util"
    71  	"k8s.io/utils/ptr"
    72  )
    73  
    74  const (
    75  	testSchedulerName       = "test-scheduler"
    76  	mb                int64 = 1024 * 1024
    77  )
    78  
    79  var (
    80  	emptySnapshot         = internalcache.NewEmptySnapshot()
    81  	podTopologySpreadFunc = frameworkruntime.FactoryAdapter(feature.Features{}, podtopologyspread.New)
    82  	errPrioritize         = fmt.Errorf("priority map encounters an error")
    83  )
    84  
    85  type mockScheduleResult struct {
    86  	result ScheduleResult
    87  	err    error
    88  }
    89  
    90  type fakeExtender struct {
    91  	isBinder          bool
    92  	interestedPodName string
    93  	ignorable         bool
    94  	gotBind           bool
    95  	errBind           bool
    96  	isPrioritizer     bool
    97  	isFilter          bool
    98  }
    99  
   100  func (f *fakeExtender) Name() string {
   101  	return "fakeExtender"
   102  }
   103  
   104  func (f *fakeExtender) IsIgnorable() bool {
   105  	return f.ignorable
   106  }
   107  
   108  func (f *fakeExtender) ProcessPreemption(
   109  	_ *v1.Pod,
   110  	_ map[string]*extenderv1.Victims,
   111  	_ framework.NodeInfoLister,
   112  ) (map[string]*extenderv1.Victims, error) {
   113  	return nil, nil
   114  }
   115  
   116  func (f *fakeExtender) SupportsPreemption() bool {
   117  	return false
   118  }
   119  
   120  func (f *fakeExtender) Filter(pod *v1.Pod, nodes []*framework.NodeInfo) ([]*framework.NodeInfo, extenderv1.FailedNodesMap, extenderv1.FailedNodesMap, error) {
   121  	return nil, nil, nil, nil
   122  }
   123  
   124  func (f *fakeExtender) Prioritize(
   125  	_ *v1.Pod,
   126  	_ []*framework.NodeInfo,
   127  ) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error) {
   128  	return nil, 0, nil
   129  }
   130  
   131  func (f *fakeExtender) Bind(binding *v1.Binding) error {
   132  	if f.isBinder {
   133  		if f.errBind {
   134  			return errors.New("bind error")
   135  		}
   136  		f.gotBind = true
   137  		return nil
   138  	}
   139  	return errors.New("not a binder")
   140  }
   141  
   142  func (f *fakeExtender) IsBinder() bool {
   143  	return f.isBinder
   144  }
   145  
   146  func (f *fakeExtender) IsInterested(pod *v1.Pod) bool {
   147  	return pod != nil && pod.Name == f.interestedPodName
   148  }
   149  
   150  func (f *fakeExtender) IsPrioritizer() bool {
   151  	return f.isPrioritizer
   152  }
   153  
   154  func (f *fakeExtender) IsFilter() bool {
   155  	return f.isFilter
   156  }
   157  
   158  type falseMapPlugin struct{}
   159  
   160  func newFalseMapPlugin() frameworkruntime.PluginFactory {
   161  	return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   162  		return &falseMapPlugin{}, nil
   163  	}
   164  }
   165  
   166  func (pl *falseMapPlugin) Name() string {
   167  	return "FalseMap"
   168  }
   169  
   170  func (pl *falseMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ string) (int64, *framework.Status) {
   171  	return 0, framework.AsStatus(errPrioritize)
   172  }
   173  
   174  func (pl *falseMapPlugin) ScoreExtensions() framework.ScoreExtensions {
   175  	return nil
   176  }
   177  
   178  type numericMapPlugin struct{}
   179  
   180  func newNumericMapPlugin() frameworkruntime.PluginFactory {
   181  	return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   182  		return &numericMapPlugin{}, nil
   183  	}
   184  }
   185  
   186  func (pl *numericMapPlugin) Name() string {
   187  	return "NumericMap"
   188  }
   189  
   190  func (pl *numericMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeName string) (int64, *framework.Status) {
   191  	score, err := strconv.Atoi(nodeName)
   192  	if err != nil {
   193  		return 0, framework.NewStatus(framework.Error, fmt.Sprintf("Error converting nodename to int: %+v", nodeName))
   194  	}
   195  	return int64(score), nil
   196  }
   197  
   198  func (pl *numericMapPlugin) ScoreExtensions() framework.ScoreExtensions {
   199  	return nil
   200  }
   201  
   202  // NewNoPodsFilterPlugin initializes a noPodsFilterPlugin and returns it.
   203  func NewNoPodsFilterPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   204  	return &noPodsFilterPlugin{}, nil
   205  }
   206  
   207  type reverseNumericMapPlugin struct{}
   208  
   209  func (pl *reverseNumericMapPlugin) Name() string {
   210  	return "ReverseNumericMap"
   211  }
   212  
   213  func (pl *reverseNumericMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeName string) (int64, *framework.Status) {
   214  	score, err := strconv.Atoi(nodeName)
   215  	if err != nil {
   216  		return 0, framework.NewStatus(framework.Error, fmt.Sprintf("Error converting nodename to int: %+v", nodeName))
   217  	}
   218  	return int64(score), nil
   219  }
   220  
   221  func (pl *reverseNumericMapPlugin) ScoreExtensions() framework.ScoreExtensions {
   222  	return pl
   223  }
   224  
   225  func (pl *reverseNumericMapPlugin) NormalizeScore(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *framework.Status {
   226  	var maxScore float64
   227  	minScore := math.MaxFloat64
   228  
   229  	for _, hostPriority := range nodeScores {
   230  		maxScore = math.Max(maxScore, float64(hostPriority.Score))
   231  		minScore = math.Min(minScore, float64(hostPriority.Score))
   232  	}
   233  	for i, hostPriority := range nodeScores {
   234  		nodeScores[i] = framework.NodeScore{
   235  			Name:  hostPriority.Name,
   236  			Score: int64(maxScore + minScore - float64(hostPriority.Score)),
   237  		}
   238  	}
   239  	return nil
   240  }
   241  
   242  func newReverseNumericMapPlugin() frameworkruntime.PluginFactory {
   243  	return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   244  		return &reverseNumericMapPlugin{}, nil
   245  	}
   246  }
   247  
   248  type trueMapPlugin struct{}
   249  
   250  func (pl *trueMapPlugin) Name() string {
   251  	return "TrueMap"
   252  }
   253  
   254  func (pl *trueMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ string) (int64, *framework.Status) {
   255  	return 1, nil
   256  }
   257  
   258  func (pl *trueMapPlugin) ScoreExtensions() framework.ScoreExtensions {
   259  	return pl
   260  }
   261  
   262  func (pl *trueMapPlugin) NormalizeScore(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *framework.Status {
   263  	for _, host := range nodeScores {
   264  		if host.Name == "" {
   265  			return framework.NewStatus(framework.Error, "unexpected empty host name")
   266  		}
   267  	}
   268  	return nil
   269  }
   270  
   271  func newTrueMapPlugin() frameworkruntime.PluginFactory {
   272  	return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   273  		return &trueMapPlugin{}, nil
   274  	}
   275  }
   276  
   277  type noPodsFilterPlugin struct{}
   278  
   279  // Name returns name of the plugin.
   280  func (pl *noPodsFilterPlugin) Name() string {
   281  	return "NoPodsFilter"
   282  }
   283  
   284  // Filter invoked at the filter extension point.
   285  func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
   286  	if len(nodeInfo.Pods) == 0 {
   287  		return nil
   288  	}
   289  	return framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake)
   290  }
   291  
   292  type fakeNodeSelectorArgs struct {
   293  	NodeName string `json:"nodeName"`
   294  }
   295  
   296  type fakeNodeSelector struct {
   297  	fakeNodeSelectorArgs
   298  }
   299  
   300  func (s *fakeNodeSelector) Name() string {
   301  	return "FakeNodeSelector"
   302  }
   303  
   304  func (s *fakeNodeSelector) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
   305  	if nodeInfo.Node().Name != s.NodeName {
   306  		return framework.NewStatus(framework.UnschedulableAndUnresolvable)
   307  	}
   308  	return nil
   309  }
   310  
   311  func newFakeNodeSelector(_ context.Context, args runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   312  	pl := &fakeNodeSelector{}
   313  	if err := frameworkruntime.DecodeInto(args, &pl.fakeNodeSelectorArgs); err != nil {
   314  		return nil, err
   315  	}
   316  	return pl, nil
   317  }
   318  
   319  const (
   320  	fakeSpecifiedNodeNameAnnotation = "fake-specified-node-name"
   321  )
   322  
   323  // fakeNodeSelectorDependOnPodAnnotation schedules pod to the specified one node from pod.Annotations[fakeSpecifiedNodeNameAnnotation].
   324  type fakeNodeSelectorDependOnPodAnnotation struct{}
   325  
   326  func (f *fakeNodeSelectorDependOnPodAnnotation) Name() string {
   327  	return "FakeNodeSelectorDependOnPodAnnotation"
   328  }
   329  
   330  // Filter selects the specified one node and rejects other non-specified nodes.
   331  func (f *fakeNodeSelectorDependOnPodAnnotation) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
   332  	resolveNodeNameFromPodAnnotation := func(pod *v1.Pod) (string, error) {
   333  		if pod == nil {
   334  			return "", fmt.Errorf("empty pod")
   335  		}
   336  		nodeName, ok := pod.Annotations[fakeSpecifiedNodeNameAnnotation]
   337  		if !ok {
   338  			return "", fmt.Errorf("no specified node name on pod %s/%s annotation", pod.Namespace, pod.Name)
   339  		}
   340  		return nodeName, nil
   341  	}
   342  
   343  	nodeName, err := resolveNodeNameFromPodAnnotation(pod)
   344  	if err != nil {
   345  		return framework.AsStatus(err)
   346  	}
   347  	if nodeInfo.Node().Name != nodeName {
   348  		return framework.NewStatus(framework.UnschedulableAndUnresolvable)
   349  	}
   350  	return nil
   351  }
   352  
   353  func newFakeNodeSelectorDependOnPodAnnotation(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   354  	return &fakeNodeSelectorDependOnPodAnnotation{}, nil
   355  }
   356  
   357  type TestPlugin struct {
   358  	name string
   359  }
   360  
   361  var _ framework.ScorePlugin = &TestPlugin{}
   362  var _ framework.FilterPlugin = &TestPlugin{}
   363  
   364  func (t *TestPlugin) Name() string {
   365  	return t.name
   366  }
   367  
   368  func (t *TestPlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) {
   369  	return 1, nil
   370  }
   371  
   372  func (t *TestPlugin) ScoreExtensions() framework.ScoreExtensions {
   373  	return nil
   374  }
   375  
   376  func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
   377  	return nil
   378  }
   379  
   380  func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
   381  	nodes := []runtime.Object{
   382  		st.MakeNode().Name("node1").UID("node1").Obj(),
   383  		st.MakeNode().Name("node2").UID("node2").Obj(),
   384  		st.MakeNode().Name("node3").UID("node3").Obj(),
   385  	}
   386  	pods := []*v1.Pod{
   387  		st.MakePod().Name("pod1").UID("pod1").SchedulerName("match-node3").Obj(),
   388  		st.MakePod().Name("pod2").UID("pod2").SchedulerName("match-node2").Obj(),
   389  		st.MakePod().Name("pod3").UID("pod3").SchedulerName("match-node2").Obj(),
   390  		st.MakePod().Name("pod4").UID("pod4").SchedulerName("match-node3").Obj(),
   391  	}
   392  	wantBindings := map[string]string{
   393  		"pod1": "node3",
   394  		"pod2": "node2",
   395  		"pod3": "node2",
   396  		"pod4": "node3",
   397  	}
   398  	wantControllers := map[string]string{
   399  		"pod1": "match-node3",
   400  		"pod2": "match-node2",
   401  		"pod3": "match-node2",
   402  		"pod4": "match-node3",
   403  	}
   404  
   405  	// Set up scheduler for the 3 nodes.
   406  	// We use a fake filter that only allows one particular node. We create two
   407  	// profiles, each with a different node in the filter configuration.
   408  	objs := append([]runtime.Object{
   409  		&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, nodes...)
   410  	client := clientsetfake.NewSimpleClientset(objs...)
   411  	broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
   412  	ctx, cancel := context.WithCancel(context.Background())
   413  	defer cancel()
   414  
   415  	informerFactory := informers.NewSharedInformerFactory(client, 0)
   416  	sched, err := New(
   417  		ctx,
   418  		client,
   419  		informerFactory,
   420  		nil,
   421  		profile.NewRecorderFactory(broadcaster),
   422  		WithProfiles(
   423  			schedulerapi.KubeSchedulerProfile{SchedulerName: "match-node2",
   424  				Plugins: &schedulerapi.Plugins{
   425  					Filter:    schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "FakeNodeSelector"}}},
   426  					QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
   427  					Bind:      schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
   428  				},
   429  				PluginConfig: []schedulerapi.PluginConfig{
   430  					{
   431  						Name: "FakeNodeSelector",
   432  						Args: &runtime.Unknown{Raw: []byte(`{"nodeName":"node2"}`)},
   433  					},
   434  				},
   435  			},
   436  			schedulerapi.KubeSchedulerProfile{
   437  				SchedulerName: "match-node3",
   438  				Plugins: &schedulerapi.Plugins{
   439  					Filter:    schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "FakeNodeSelector"}}},
   440  					QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
   441  					Bind:      schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
   442  				},
   443  				PluginConfig: []schedulerapi.PluginConfig{
   444  					{
   445  						Name: "FakeNodeSelector",
   446  						Args: &runtime.Unknown{Raw: []byte(`{"nodeName":"node3"}`)},
   447  					},
   448  				},
   449  			},
   450  		),
   451  		WithFrameworkOutOfTreeRegistry(frameworkruntime.Registry{
   452  			"FakeNodeSelector": newFakeNodeSelector,
   453  		}),
   454  	)
   455  	if err != nil {
   456  		t.Fatal(err)
   457  	}
   458  
   459  	// Capture the bindings and events' controllers.
   460  	var wg sync.WaitGroup
   461  	wg.Add(2 * len(pods))
   462  	bindings := make(map[string]string)
   463  	client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
   464  		if action.GetSubresource() != "binding" {
   465  			return false, nil, nil
   466  		}
   467  		binding := action.(clienttesting.CreateAction).GetObject().(*v1.Binding)
   468  		bindings[binding.Name] = binding.Target.Name
   469  		wg.Done()
   470  		return true, binding, nil
   471  	})
   472  	controllers := make(map[string]string)
   473  	stopFn, err := broadcaster.StartEventWatcher(func(obj runtime.Object) {
   474  		e, ok := obj.(*eventsv1.Event)
   475  		if !ok || e.Reason != "Scheduled" {
   476  			return
   477  		}
   478  		controllers[e.Regarding.Name] = e.ReportingController
   479  		wg.Done()
   480  	})
   481  	if err != nil {
   482  		t.Fatal(err)
   483  	}
   484  	defer stopFn()
   485  
   486  	// Run scheduler.
   487  	informerFactory.Start(ctx.Done())
   488  	informerFactory.WaitForCacheSync(ctx.Done())
   489  	if err = sched.WaitForHandlersSync(ctx); err != nil {
   490  		t.Fatalf("Handlers failed to sync: %v: ", err)
   491  	}
   492  	go sched.Run(ctx)
   493  
   494  	// Send pods to be scheduled.
   495  	for _, p := range pods {
   496  		_, err := client.CoreV1().Pods("").Create(ctx, p, metav1.CreateOptions{})
   497  		if err != nil {
   498  			t.Fatal(err)
   499  		}
   500  	}
   501  	wg.Wait()
   502  
   503  	// Verify correct bindings and reporting controllers.
   504  	if diff := cmp.Diff(wantBindings, bindings); diff != "" {
   505  		t.Errorf("pods were scheduled incorrectly (-want, +got):\n%s", diff)
   506  	}
   507  	if diff := cmp.Diff(wantControllers, controllers); diff != "" {
   508  		t.Errorf("events were reported with wrong controllers (-want, +got):\n%s", diff)
   509  	}
   510  }
   511  
   512  // TestSchedulerGuaranteeNonNilNodeInSchedulingCycle is for detecting potential panic on nil Node when iterating Nodes.
   513  func TestSchedulerGuaranteeNonNilNodeInSchedulingCycle(t *testing.T) {
   514  	if goruntime.GOOS == "windows" {
   515  		// TODO: remove skip once the failing test has been fixed.
   516  		t.Skip("Skip failing test on Windows.")
   517  	}
   518  	random := rand.New(rand.NewSource(time.Now().UnixNano()))
   519  	ctx, cancel := context.WithCancel(context.Background())
   520  	defer cancel()
   521  
   522  	var (
   523  		initialNodeNumber        = 1000
   524  		initialPodNumber         = 500
   525  		waitSchedulingPodNumber  = 200
   526  		deleteNodeNumberPerRound = 20
   527  		createPodNumberPerRound  = 50
   528  
   529  		fakeSchedulerName = "fake-scheduler"
   530  		fakeNamespace     = "fake-namespace"
   531  
   532  		initialNodes []runtime.Object
   533  		initialPods  []runtime.Object
   534  	)
   535  
   536  	for i := 0; i < initialNodeNumber; i++ {
   537  		nodeName := fmt.Sprintf("node%d", i)
   538  		initialNodes = append(initialNodes, st.MakeNode().Name(nodeName).UID(nodeName).Obj())
   539  	}
   540  	// Randomly scatter initial pods onto nodes.
   541  	for i := 0; i < initialPodNumber; i++ {
   542  		podName := fmt.Sprintf("scheduled-pod%d", i)
   543  		assignedNodeName := fmt.Sprintf("node%d", random.Intn(initialNodeNumber))
   544  		initialPods = append(initialPods, st.MakePod().Name(podName).UID(podName).Node(assignedNodeName).Obj())
   545  	}
   546  
   547  	objs := []runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: fakeNamespace}}}
   548  	objs = append(objs, initialNodes...)
   549  	objs = append(objs, initialPods...)
   550  	client := clientsetfake.NewSimpleClientset(objs...)
   551  	broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
   552  
   553  	informerFactory := informers.NewSharedInformerFactory(client, 0)
   554  	sched, err := New(
   555  		ctx,
   556  		client,
   557  		informerFactory,
   558  		nil,
   559  		profile.NewRecorderFactory(broadcaster),
   560  		WithProfiles(
   561  			schedulerapi.KubeSchedulerProfile{SchedulerName: fakeSchedulerName,
   562  				Plugins: &schedulerapi.Plugins{
   563  					Filter:    schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "FakeNodeSelectorDependOnPodAnnotation"}}},
   564  					QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
   565  					Bind:      schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
   566  				},
   567  			},
   568  		),
   569  		WithFrameworkOutOfTreeRegistry(frameworkruntime.Registry{
   570  			"FakeNodeSelectorDependOnPodAnnotation": newFakeNodeSelectorDependOnPodAnnotation,
   571  		}),
   572  	)
   573  	if err != nil {
   574  		t.Fatal(err)
   575  	}
   576  
   577  	// Run scheduler.
   578  	informerFactory.Start(ctx.Done())
   579  	informerFactory.WaitForCacheSync(ctx.Done())
   580  	go sched.Run(ctx)
   581  
   582  	var deleteNodeIndex int
   583  	deleteNodesOneRound := func() {
   584  		for i := 0; i < deleteNodeNumberPerRound; i++ {
   585  			if deleteNodeIndex >= initialNodeNumber {
   586  				// all initial nodes are already deleted
   587  				return
   588  			}
   589  			deleteNodeName := fmt.Sprintf("node%d", deleteNodeIndex)
   590  			if err := client.CoreV1().Nodes().Delete(ctx, deleteNodeName, metav1.DeleteOptions{}); err != nil {
   591  				t.Fatal(err)
   592  			}
   593  			deleteNodeIndex++
   594  		}
   595  	}
   596  	var createPodIndex int
   597  	createPodsOneRound := func() {
   598  		if createPodIndex > waitSchedulingPodNumber {
   599  			return
   600  		}
   601  		for i := 0; i < createPodNumberPerRound; i++ {
   602  			podName := fmt.Sprintf("pod%d", createPodIndex)
   603  			// Note: the node(specifiedNodeName) may already be deleted, which leads pod scheduled failed.
   604  			specifiedNodeName := fmt.Sprintf("node%d", random.Intn(initialNodeNumber))
   605  
   606  			waitSchedulingPod := st.MakePod().Namespace(fakeNamespace).Name(podName).UID(podName).Annotation(fakeSpecifiedNodeNameAnnotation, specifiedNodeName).SchedulerName(fakeSchedulerName).Obj()
   607  			if _, err := client.CoreV1().Pods(fakeNamespace).Create(ctx, waitSchedulingPod, metav1.CreateOptions{}); err != nil {
   608  				t.Fatal(err)
   609  			}
   610  			createPodIndex++
   611  		}
   612  	}
   613  
   614  	// Following we start 2 goroutines asynchronously to detect potential racing issues:
   615  	// 1) One is responsible for deleting several nodes in each round;
   616  	// 2) Another is creating several pods in each round to trigger scheduling;
   617  	// Those two goroutines will stop until ctx.Done() is called, which means all waiting pods are scheduled at least once.
   618  	go wait.Until(deleteNodesOneRound, 10*time.Millisecond, ctx.Done())
   619  	go wait.Until(createPodsOneRound, 9*time.Millisecond, ctx.Done())
   620  
   621  	// Capture the events to wait all pods to be scheduled at least once.
   622  	allWaitSchedulingPods := sets.New[string]()
   623  	for i := 0; i < waitSchedulingPodNumber; i++ {
   624  		allWaitSchedulingPods.Insert(fmt.Sprintf("pod%d", i))
   625  	}
   626  	var wg sync.WaitGroup
   627  	wg.Add(waitSchedulingPodNumber)
   628  	stopFn, err := broadcaster.StartEventWatcher(func(obj runtime.Object) {
   629  		e, ok := obj.(*eventsv1.Event)
   630  		if !ok || (e.Reason != "Scheduled" && e.Reason != "FailedScheduling") {
   631  			return
   632  		}
   633  		if allWaitSchedulingPods.Has(e.Regarding.Name) {
   634  			wg.Done()
   635  			allWaitSchedulingPods.Delete(e.Regarding.Name)
   636  		}
   637  	})
   638  	if err != nil {
   639  		t.Fatal(err)
   640  	}
   641  	defer stopFn()
   642  
   643  	wg.Wait()
   644  }
   645  
   646  func TestSchedulerScheduleOne(t *testing.T) {
   647  	testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
   648  	client := clientsetfake.NewSimpleClientset(&testNode)
   649  	eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
   650  	errS := errors.New("scheduler")
   651  	errB := errors.New("binder")
   652  	preBindErr := errors.New("on PreBind")
   653  
   654  	table := []struct {
   655  		name                string
   656  		injectBindError     error
   657  		sendPod             *v1.Pod
   658  		registerPluginFuncs []tf.RegisterPluginFunc
   659  		expectErrorPod      *v1.Pod
   660  		expectForgetPod     *v1.Pod
   661  		expectAssumedPod    *v1.Pod
   662  		expectError         error
   663  		expectBind          *v1.Binding
   664  		eventReason         string
   665  		mockResult          mockScheduleResult
   666  	}{
   667  		{
   668  			name:       "error reserve pod",
   669  			sendPod:    podWithID("foo", ""),
   670  			mockResult: mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil},
   671  			registerPluginFuncs: []tf.RegisterPluginFunc{
   672  				tf.RegisterReservePlugin("FakeReserve", tf.NewFakeReservePlugin(framework.NewStatus(framework.Error, "reserve error"))),
   673  			},
   674  			expectErrorPod:   podWithID("foo", testNode.Name),
   675  			expectForgetPod:  podWithID("foo", testNode.Name),
   676  			expectAssumedPod: podWithID("foo", testNode.Name),
   677  			expectError:      fmt.Errorf(`running Reserve plugin "FakeReserve": %w`, errors.New("reserve error")),
   678  			eventReason:      "FailedScheduling",
   679  		},
   680  		{
   681  			name:       "error permit pod",
   682  			sendPod:    podWithID("foo", ""),
   683  			mockResult: mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil},
   684  			registerPluginFuncs: []tf.RegisterPluginFunc{
   685  				tf.RegisterPermitPlugin("FakePermit", tf.NewFakePermitPlugin(framework.NewStatus(framework.Error, "permit error"), time.Minute)),
   686  			},
   687  			expectErrorPod:   podWithID("foo", testNode.Name),
   688  			expectForgetPod:  podWithID("foo", testNode.Name),
   689  			expectAssumedPod: podWithID("foo", testNode.Name),
   690  			expectError:      fmt.Errorf(`running Permit plugin "FakePermit": %w`, errors.New("permit error")),
   691  			eventReason:      "FailedScheduling",
   692  		},
   693  		{
   694  			name:       "error prebind pod",
   695  			sendPod:    podWithID("foo", ""),
   696  			mockResult: mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil},
   697  			registerPluginFuncs: []tf.RegisterPluginFunc{
   698  				tf.RegisterPreBindPlugin("FakePreBind", tf.NewFakePreBindPlugin(framework.AsStatus(preBindErr))),
   699  			},
   700  			expectErrorPod:   podWithID("foo", testNode.Name),
   701  			expectForgetPod:  podWithID("foo", testNode.Name),
   702  			expectAssumedPod: podWithID("foo", testNode.Name),
   703  			expectError:      fmt.Errorf(`running PreBind plugin "FakePreBind": %w`, preBindErr),
   704  			eventReason:      "FailedScheduling",
   705  		},
   706  		{
   707  			name:             "bind assumed pod scheduled",
   708  			sendPod:          podWithID("foo", ""),
   709  			mockResult:       mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil},
   710  			expectBind:       &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: testNode.Name}},
   711  			expectAssumedPod: podWithID("foo", testNode.Name),
   712  			eventReason:      "Scheduled",
   713  		},
   714  		{
   715  			name:           "error pod failed scheduling",
   716  			sendPod:        podWithID("foo", ""),
   717  			mockResult:     mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, errS},
   718  			expectError:    errS,
   719  			expectErrorPod: podWithID("foo", ""),
   720  			eventReason:    "FailedScheduling",
   721  		},
   722  		{
   723  			name:             "error bind forget pod failed scheduling",
   724  			sendPod:          podWithID("foo", ""),
   725  			mockResult:       mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil},
   726  			expectBind:       &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: testNode.Name}},
   727  			expectAssumedPod: podWithID("foo", testNode.Name),
   728  			injectBindError:  errB,
   729  			expectError:      fmt.Errorf("running Bind plugin %q: %w", "DefaultBinder", errors.New("binder")),
   730  			expectErrorPod:   podWithID("foo", testNode.Name),
   731  			expectForgetPod:  podWithID("foo", testNode.Name),
   732  			eventReason:      "FailedScheduling",
   733  		},
   734  		{
   735  			name:        "deleting pod",
   736  			sendPod:     deletingPod("foo"),
   737  			mockResult:  mockScheduleResult{ScheduleResult{}, nil},
   738  			eventReason: "FailedScheduling",
   739  		},
   740  	}
   741  
   742  	for _, item := range table {
   743  		t.Run(item.name, func(t *testing.T) {
   744  			var gotError error
   745  			var gotPod *v1.Pod
   746  			var gotForgetPod *v1.Pod
   747  			var gotAssumedPod *v1.Pod
   748  			var gotBinding *v1.Binding
   749  			cache := &fakecache.Cache{
   750  				ForgetFunc: func(pod *v1.Pod) {
   751  					gotForgetPod = pod
   752  				},
   753  				AssumeFunc: func(pod *v1.Pod) {
   754  					gotAssumedPod = pod
   755  				},
   756  				IsAssumedPodFunc: func(pod *v1.Pod) bool {
   757  					if pod == nil || gotAssumedPod == nil {
   758  						return false
   759  					}
   760  					return pod.UID == gotAssumedPod.UID
   761  				},
   762  			}
   763  			client := clientsetfake.NewSimpleClientset(item.sendPod)
   764  			client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
   765  				if action.GetSubresource() != "binding" {
   766  					return false, nil, nil
   767  				}
   768  				gotBinding = action.(clienttesting.CreateAction).GetObject().(*v1.Binding)
   769  				return true, gotBinding, item.injectBindError
   770  			})
   771  			registerPluginFuncs := append(item.registerPluginFuncs,
   772  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
   773  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
   774  			)
   775  			ctx, cancel := context.WithCancel(context.Background())
   776  			defer cancel()
   777  			fwk, err := tf.NewFramework(ctx,
   778  				registerPluginFuncs,
   779  				testSchedulerName,
   780  				frameworkruntime.WithClientSet(client),
   781  				frameworkruntime.WithEventRecorder(eventBroadcaster.NewRecorder(scheme.Scheme, testSchedulerName)))
   782  			if err != nil {
   783  				t.Fatal(err)
   784  			}
   785  
   786  			sched := &Scheduler{
   787  				Cache:  cache,
   788  				client: client,
   789  				NextPod: func(logger klog.Logger) (*framework.QueuedPodInfo, error) {
   790  					return &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(t, item.sendPod)}, nil
   791  				},
   792  				SchedulingQueue: internalqueue.NewTestQueue(ctx, nil),
   793  				Profiles:        profile.Map{testSchedulerName: fwk},
   794  			}
   795  
   796  			sched.SchedulePod = func(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) (ScheduleResult, error) {
   797  				return item.mockResult.result, item.mockResult.err
   798  			}
   799  			sched.FailureHandler = func(_ context.Context, fwk framework.Framework, p *framework.QueuedPodInfo, status *framework.Status, _ *framework.NominatingInfo, _ time.Time) {
   800  				gotPod = p.Pod
   801  				gotError = status.AsError()
   802  
   803  				msg := truncateMessage(gotError.Error())
   804  				fwk.EventRecorder().Eventf(p.Pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", msg)
   805  			}
   806  			called := make(chan struct{})
   807  			stopFunc, err := eventBroadcaster.StartEventWatcher(func(obj runtime.Object) {
   808  				e, _ := obj.(*eventsv1.Event)
   809  				if e.Reason != item.eventReason {
   810  					t.Errorf("got event %v, want %v", e.Reason, item.eventReason)
   811  				}
   812  				close(called)
   813  			})
   814  			if err != nil {
   815  				t.Fatal(err)
   816  			}
   817  			sched.ScheduleOne(ctx)
   818  			<-called
   819  			if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
   820  				t.Errorf("assumed pod: wanted %v, got %v", e, a)
   821  			}
   822  			if e, a := item.expectErrorPod, gotPod; !reflect.DeepEqual(e, a) {
   823  				t.Errorf("error pod: wanted %v, got %v", e, a)
   824  			}
   825  			if e, a := item.expectForgetPod, gotForgetPod; !reflect.DeepEqual(e, a) {
   826  				t.Errorf("forget pod: wanted %v, got %v", e, a)
   827  			}
   828  			if e, a := item.expectError, gotError; !reflect.DeepEqual(e, a) {
   829  				t.Errorf("error: wanted %v, got %v", e, a)
   830  			}
   831  			if diff := cmp.Diff(item.expectBind, gotBinding); diff != "" {
   832  				t.Errorf("got binding diff (-want, +got): %s", diff)
   833  			}
   834  			stopFunc()
   835  		})
   836  	}
   837  }
   838  
   839  func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
   840  	logger, ctx := ktesting.NewTestContext(t)
   841  	ctx, cancel := context.WithCancel(ctx)
   842  	defer cancel()
   843  	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
   844  	scache := internalcache.New(ctx, 100*time.Millisecond)
   845  	pod := podWithPort("pod.Name", "", 8080)
   846  	node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
   847  	scache.AddNode(logger, &node)
   848  
   849  	fns := []tf.RegisterPluginFunc{
   850  		tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
   851  		tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
   852  		tf.RegisterPluginAsExtensions(nodeports.Name, nodeports.New, "Filter", "PreFilter"),
   853  	}
   854  	scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(ctx, t, queuedPodStore, scache, pod, &node, fns...)
   855  
   856  	waitPodExpireChan := make(chan struct{})
   857  	timeout := make(chan struct{})
   858  	go func() {
   859  		for {
   860  			select {
   861  			case <-timeout:
   862  				return
   863  			default:
   864  			}
   865  			pods, err := scache.PodCount()
   866  			if err != nil {
   867  				errChan <- fmt.Errorf("cache.List failed: %v", err)
   868  				return
   869  			}
   870  			if pods == 0 {
   871  				close(waitPodExpireChan)
   872  				return
   873  			}
   874  			time.Sleep(100 * time.Millisecond)
   875  		}
   876  	}()
   877  	// waiting for the assumed pod to expire
   878  	select {
   879  	case err := <-errChan:
   880  		t.Fatal(err)
   881  	case <-waitPodExpireChan:
   882  	case <-time.After(wait.ForeverTestTimeout):
   883  		close(timeout)
   884  		t.Fatalf("timeout timeout in waiting pod expire after %v", wait.ForeverTestTimeout)
   885  	}
   886  
   887  	// We use conflicted pod ports to incur fit predicate failure if first pod not removed.
   888  	secondPod := podWithPort("bar", "", 8080)
   889  	queuedPodStore.Add(secondPod)
   890  	scheduler.ScheduleOne(ctx)
   891  	select {
   892  	case b := <-bindingChan:
   893  		expectBinding := &v1.Binding{
   894  			ObjectMeta: metav1.ObjectMeta{Name: "bar", UID: types.UID("bar")},
   895  			Target:     v1.ObjectReference{Kind: "Node", Name: node.Name},
   896  		}
   897  		if !reflect.DeepEqual(expectBinding, b) {
   898  			t.Errorf("binding want=%v, get=%v", expectBinding, b)
   899  		}
   900  	case <-time.After(wait.ForeverTestTimeout):
   901  		t.Fatalf("timeout in binding after %v", wait.ForeverTestTimeout)
   902  	}
   903  }
   904  
   905  func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
   906  	logger, ctx := ktesting.NewTestContext(t)
   907  	ctx, cancel := context.WithCancel(ctx)
   908  	defer cancel()
   909  	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
   910  	scache := internalcache.New(ctx, 10*time.Minute)
   911  	firstPod := podWithPort("pod.Name", "", 8080)
   912  	node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
   913  	scache.AddNode(logger, &node)
   914  	fns := []tf.RegisterPluginFunc{
   915  		tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
   916  		tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
   917  		tf.RegisterPluginAsExtensions(nodeports.Name, nodeports.New, "Filter", "PreFilter"),
   918  	}
   919  	scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(ctx, t, queuedPodStore, scache, firstPod, &node, fns...)
   920  
   921  	// We use conflicted pod ports to incur fit predicate failure.
   922  	secondPod := podWithPort("bar", "", 8080)
   923  	queuedPodStore.Add(secondPod)
   924  	// queuedPodStore: [bar:8080]
   925  	// cache: [(assumed)foo:8080]
   926  
   927  	scheduler.ScheduleOne(ctx)
   928  	select {
   929  	case err := <-errChan:
   930  		expectErr := &framework.FitError{
   931  			Pod:         secondPod,
   932  			NumAllNodes: 1,
   933  			Diagnosis: framework.Diagnosis{
   934  				NodeToStatusMap: framework.NodeToStatusMap{
   935  					node.Name: framework.NewStatus(framework.Unschedulable, nodeports.ErrReason).WithPlugin(nodeports.Name),
   936  				},
   937  				UnschedulablePlugins: sets.New(nodeports.Name),
   938  				EvaluatedNodes:       1,
   939  			},
   940  		}
   941  		if !reflect.DeepEqual(expectErr, err) {
   942  			t.Errorf("err want=%v, get=%v", expectErr, err)
   943  		}
   944  	case <-time.After(wait.ForeverTestTimeout):
   945  		t.Fatalf("timeout in fitting after %v", wait.ForeverTestTimeout)
   946  	}
   947  
   948  	// We mimic the workflow of cache behavior when a pod is removed by user.
   949  	// Note: if the schedulernodeinfo timeout would be super short, the first pod would expire
   950  	// and would be removed itself (without any explicit actions on schedulernodeinfo). Even in that case,
   951  	// explicitly AddPod will as well correct the behavior.
   952  	firstPod.Spec.NodeName = node.Name
   953  	if err := scache.AddPod(logger, firstPod); err != nil {
   954  		t.Fatalf("err: %v", err)
   955  	}
   956  	if err := scache.RemovePod(logger, firstPod); err != nil {
   957  		t.Fatalf("err: %v", err)
   958  	}
   959  
   960  	queuedPodStore.Add(secondPod)
   961  	scheduler.ScheduleOne(ctx)
   962  	select {
   963  	case b := <-bindingChan:
   964  		expectBinding := &v1.Binding{
   965  			ObjectMeta: metav1.ObjectMeta{Name: "bar", UID: types.UID("bar")},
   966  			Target:     v1.ObjectReference{Kind: "Node", Name: node.Name},
   967  		}
   968  		if !reflect.DeepEqual(expectBinding, b) {
   969  			t.Errorf("binding want=%v, get=%v", expectBinding, b)
   970  		}
   971  	case <-time.After(wait.ForeverTestTimeout):
   972  		t.Fatalf("timeout in binding after %v", wait.ForeverTestTimeout)
   973  	}
   974  }
   975  
   976  func TestSchedulerFailedSchedulingReasons(t *testing.T) {
   977  	logger, ctx := ktesting.NewTestContext(t)
   978  	ctx, cancel := context.WithCancel(ctx)
   979  	defer cancel()
   980  	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
   981  	scache := internalcache.New(ctx, 10*time.Minute)
   982  
   983  	// Design the baseline for the pods, and we will make nodes that don't fit it later.
   984  	var cpu = int64(4)
   985  	var mem = int64(500)
   986  	podWithTooBigResourceRequests := podWithResources("bar", "", v1.ResourceList{
   987  		v1.ResourceCPU:    *(resource.NewQuantity(cpu, resource.DecimalSI)),
   988  		v1.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)),
   989  	}, v1.ResourceList{
   990  		v1.ResourceCPU:    *(resource.NewQuantity(cpu, resource.DecimalSI)),
   991  		v1.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)),
   992  	})
   993  
   994  	// create several nodes which cannot schedule the above pod
   995  	var nodes []*v1.Node
   996  	var objects []runtime.Object
   997  	for i := 0; i < 100; i++ {
   998  		uid := fmt.Sprintf("node%v", i)
   999  		node := v1.Node{
  1000  			ObjectMeta: metav1.ObjectMeta{Name: uid, UID: types.UID(uid)},
  1001  			Status: v1.NodeStatus{
  1002  				Capacity: v1.ResourceList{
  1003  					v1.ResourceCPU:    *(resource.NewQuantity(cpu/2, resource.DecimalSI)),
  1004  					v1.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)),
  1005  					v1.ResourcePods:   *(resource.NewQuantity(10, resource.DecimalSI)),
  1006  				},
  1007  				Allocatable: v1.ResourceList{
  1008  					v1.ResourceCPU:    *(resource.NewQuantity(cpu/2, resource.DecimalSI)),
  1009  					v1.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)),
  1010  					v1.ResourcePods:   *(resource.NewQuantity(10, resource.DecimalSI)),
  1011  				}},
  1012  		}
  1013  		scache.AddNode(logger, &node)
  1014  		nodes = append(nodes, &node)
  1015  		objects = append(objects, &node)
  1016  	}
  1017  
  1018  	// Create expected failure reasons for all the nodes. Hopefully they will get rolled up into a non-spammy summary.
  1019  	failedNodeStatues := framework.NodeToStatusMap{}
  1020  	for _, node := range nodes {
  1021  		failedNodeStatues[node.Name] = framework.NewStatus(
  1022  			framework.Unschedulable,
  1023  			fmt.Sprintf("Insufficient %v", v1.ResourceCPU),
  1024  			fmt.Sprintf("Insufficient %v", v1.ResourceMemory),
  1025  		).WithPlugin(noderesources.Name)
  1026  	}
  1027  	fns := []tf.RegisterPluginFunc{
  1028  		tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1029  		tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1030  		tf.RegisterPluginAsExtensions(noderesources.Name, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewFit), "Filter", "PreFilter"),
  1031  	}
  1032  
  1033  	informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objects...), 0)
  1034  	scheduler, _, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, informerFactory, nil, fns...)
  1035  
  1036  	queuedPodStore.Add(podWithTooBigResourceRequests)
  1037  	scheduler.ScheduleOne(ctx)
  1038  	select {
  1039  	case err := <-errChan:
  1040  		expectErr := &framework.FitError{
  1041  			Pod:         podWithTooBigResourceRequests,
  1042  			NumAllNodes: len(nodes),
  1043  			Diagnosis: framework.Diagnosis{
  1044  				NodeToStatusMap:      failedNodeStatues,
  1045  				UnschedulablePlugins: sets.New(noderesources.Name),
  1046  				EvaluatedNodes:       100,
  1047  			},
  1048  		}
  1049  		if len(fmt.Sprint(expectErr)) > 150 {
  1050  			t.Errorf("message is too spammy ! %v ", len(fmt.Sprint(expectErr)))
  1051  		}
  1052  		if !reflect.DeepEqual(expectErr, err) {
  1053  			t.Errorf("\n err \nWANT=%+v,\nGOT=%+v", expectErr, err)
  1054  		}
  1055  	case <-time.After(wait.ForeverTestTimeout):
  1056  		t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
  1057  	}
  1058  }
  1059  
  1060  func TestSchedulerWithVolumeBinding(t *testing.T) {
  1061  	findErr := fmt.Errorf("find err")
  1062  	assumeErr := fmt.Errorf("assume err")
  1063  	bindErr := fmt.Errorf("bind err")
  1064  	client := clientsetfake.NewSimpleClientset()
  1065  
  1066  	eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
  1067  
  1068  	// This can be small because we wait for pod to finish scheduling first
  1069  	chanTimeout := 2 * time.Second
  1070  
  1071  	table := []struct {
  1072  		name               string
  1073  		expectError        error
  1074  		expectPodBind      *v1.Binding
  1075  		expectAssumeCalled bool
  1076  		expectBindCalled   bool
  1077  		eventReason        string
  1078  		volumeBinderConfig *volumebinding.FakeVolumeBinderConfig
  1079  	}{
  1080  		{
  1081  			name: "all bound",
  1082  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1083  				AllBound: true,
  1084  			},
  1085  			expectAssumeCalled: true,
  1086  			expectPodBind:      &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "node1"}},
  1087  			eventReason:        "Scheduled",
  1088  		},
  1089  		{
  1090  			name: "bound/invalid pv affinity",
  1091  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1092  				AllBound:    true,
  1093  				FindReasons: volumebinding.ConflictReasons{volumebinding.ErrReasonNodeConflict},
  1094  			},
  1095  			eventReason: "FailedScheduling",
  1096  			expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
  1097  		},
  1098  		{
  1099  			name: "unbound/no matches",
  1100  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1101  				FindReasons: volumebinding.ConflictReasons{volumebinding.ErrReasonBindConflict},
  1102  			},
  1103  			eventReason: "FailedScheduling",
  1104  			expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
  1105  		},
  1106  		{
  1107  			name: "bound and unbound unsatisfied",
  1108  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1109  				FindReasons: volumebinding.ConflictReasons{volumebinding.ErrReasonBindConflict, volumebinding.ErrReasonNodeConflict},
  1110  			},
  1111  			eventReason: "FailedScheduling",
  1112  			expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),
  1113  		},
  1114  		{
  1115  			name:               "unbound/found matches/bind succeeds",
  1116  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{},
  1117  			expectAssumeCalled: true,
  1118  			expectBindCalled:   true,
  1119  			expectPodBind:      &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "node1"}},
  1120  			eventReason:        "Scheduled",
  1121  		},
  1122  		{
  1123  			name: "predicate error",
  1124  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1125  				FindErr: findErr,
  1126  			},
  1127  			eventReason: "FailedScheduling",
  1128  			expectError: fmt.Errorf("running %q filter plugin: %v", volumebinding.Name, findErr),
  1129  		},
  1130  		{
  1131  			name: "assume error",
  1132  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1133  				AssumeErr: assumeErr,
  1134  			},
  1135  			expectAssumeCalled: true,
  1136  			eventReason:        "FailedScheduling",
  1137  			expectError:        fmt.Errorf("running Reserve plugin %q: %w", volumebinding.Name, assumeErr),
  1138  		},
  1139  		{
  1140  			name: "bind error",
  1141  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1142  				BindErr: bindErr,
  1143  			},
  1144  			expectAssumeCalled: true,
  1145  			expectBindCalled:   true,
  1146  			eventReason:        "FailedScheduling",
  1147  			expectError:        fmt.Errorf("running PreBind plugin %q: %w", volumebinding.Name, bindErr),
  1148  		},
  1149  	}
  1150  
  1151  	for _, item := range table {
  1152  		t.Run(item.name, func(t *testing.T) {
  1153  			ctx, cancel := context.WithCancel(context.Background())
  1154  			defer cancel()
  1155  			fakeVolumeBinder := volumebinding.NewFakeVolumeBinder(item.volumeBinderConfig)
  1156  			s, bindingChan, errChan := setupTestSchedulerWithVolumeBinding(ctx, t, fakeVolumeBinder, eventBroadcaster)
  1157  			eventChan := make(chan struct{})
  1158  			stopFunc, err := eventBroadcaster.StartEventWatcher(func(obj runtime.Object) {
  1159  				e, _ := obj.(*eventsv1.Event)
  1160  				if e, a := item.eventReason, e.Reason; e != a {
  1161  					t.Errorf("expected %v, got %v", e, a)
  1162  				}
  1163  				close(eventChan)
  1164  			})
  1165  			if err != nil {
  1166  				t.Fatal(err)
  1167  			}
  1168  			s.ScheduleOne(ctx)
  1169  			// Wait for pod to succeed or fail scheduling
  1170  			select {
  1171  			case <-eventChan:
  1172  			case <-time.After(wait.ForeverTestTimeout):
  1173  				t.Fatalf("scheduling timeout after %v", wait.ForeverTestTimeout)
  1174  			}
  1175  			stopFunc()
  1176  			// Wait for scheduling to return an error or succeed binding.
  1177  			var (
  1178  				gotErr  error
  1179  				gotBind *v1.Binding
  1180  			)
  1181  			select {
  1182  			case gotErr = <-errChan:
  1183  			case gotBind = <-bindingChan:
  1184  			case <-time.After(chanTimeout):
  1185  				t.Fatalf("did not receive pod binding or error after %v", chanTimeout)
  1186  			}
  1187  			if item.expectError != nil {
  1188  				if gotErr == nil || item.expectError.Error() != gotErr.Error() {
  1189  					t.Errorf("err \nWANT=%+v,\nGOT=%+v", item.expectError, gotErr)
  1190  				}
  1191  			} else if gotErr != nil {
  1192  				t.Errorf("err \nWANT=%+v,\nGOT=%+v", item.expectError, gotErr)
  1193  			}
  1194  			if !cmp.Equal(item.expectPodBind, gotBind) {
  1195  				t.Errorf("err \nWANT=%+v,\nGOT=%+v", item.expectPodBind, gotBind)
  1196  			}
  1197  
  1198  			if item.expectAssumeCalled != fakeVolumeBinder.AssumeCalled {
  1199  				t.Errorf("expectedAssumeCall %v", item.expectAssumeCalled)
  1200  			}
  1201  
  1202  			if item.expectBindCalled != fakeVolumeBinder.BindCalled {
  1203  				t.Errorf("expectedBindCall %v", item.expectBindCalled)
  1204  			}
  1205  		})
  1206  	}
  1207  }
  1208  
  1209  func TestSchedulerBinding(t *testing.T) {
  1210  	table := []struct {
  1211  		podName      string
  1212  		extenders    []framework.Extender
  1213  		wantBinderID int
  1214  		name         string
  1215  	}{
  1216  		{
  1217  			name:    "the extender is not a binder",
  1218  			podName: "pod0",
  1219  			extenders: []framework.Extender{
  1220  				&fakeExtender{isBinder: false, interestedPodName: "pod0"},
  1221  			},
  1222  			wantBinderID: -1, // default binding.
  1223  		},
  1224  		{
  1225  			name:    "one of the extenders is a binder and interested in pod",
  1226  			podName: "pod0",
  1227  			extenders: []framework.Extender{
  1228  				&fakeExtender{isBinder: false, interestedPodName: "pod0"},
  1229  				&fakeExtender{isBinder: true, interestedPodName: "pod0"},
  1230  			},
  1231  			wantBinderID: 1,
  1232  		},
  1233  		{
  1234  			name:    "one of the extenders is a binder, but not interested in pod",
  1235  			podName: "pod1",
  1236  			extenders: []framework.Extender{
  1237  				&fakeExtender{isBinder: false, interestedPodName: "pod1"},
  1238  				&fakeExtender{isBinder: true, interestedPodName: "pod0"},
  1239  			},
  1240  			wantBinderID: -1, // default binding.
  1241  		},
  1242  		{
  1243  			name:    "ignore when extender bind failed",
  1244  			podName: "pod1",
  1245  			extenders: []framework.Extender{
  1246  				&fakeExtender{isBinder: true, errBind: true, interestedPodName: "pod1", ignorable: true},
  1247  			},
  1248  			wantBinderID: -1, // default binding.
  1249  		},
  1250  	}
  1251  
  1252  	for _, test := range table {
  1253  		t.Run(test.name, func(t *testing.T) {
  1254  			pod := st.MakePod().Name(test.podName).Obj()
  1255  			defaultBound := false
  1256  			client := clientsetfake.NewSimpleClientset(pod)
  1257  			client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
  1258  				if action.GetSubresource() == "binding" {
  1259  					defaultBound = true
  1260  				}
  1261  				return false, nil, nil
  1262  			})
  1263  			_, ctx := ktesting.NewTestContext(t)
  1264  			ctx, cancel := context.WithCancel(ctx)
  1265  			defer cancel()
  1266  			fwk, err := tf.NewFramework(ctx,
  1267  				[]tf.RegisterPluginFunc{
  1268  					tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1269  					tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1270  				}, "", frameworkruntime.WithClientSet(client), frameworkruntime.WithEventRecorder(&events.FakeRecorder{}))
  1271  			if err != nil {
  1272  				t.Fatal(err)
  1273  			}
  1274  			sched := &Scheduler{
  1275  				Extenders:                test.extenders,
  1276  				Cache:                    internalcache.New(ctx, 100*time.Millisecond),
  1277  				nodeInfoSnapshot:         nil,
  1278  				percentageOfNodesToScore: 0,
  1279  			}
  1280  			status := sched.bind(ctx, fwk, pod, "node", nil)
  1281  			if !status.IsSuccess() {
  1282  				t.Error(status.AsError())
  1283  			}
  1284  
  1285  			// Checking default binding.
  1286  			if wantBound := test.wantBinderID == -1; defaultBound != wantBound {
  1287  				t.Errorf("got bound with default binding: %v, want %v", defaultBound, wantBound)
  1288  			}
  1289  
  1290  			// Checking extenders binding.
  1291  			for i, ext := range test.extenders {
  1292  				wantBound := i == test.wantBinderID
  1293  				if gotBound := ext.(*fakeExtender).gotBind; gotBound != wantBound {
  1294  					t.Errorf("got bound with extender #%d: %v, want %v", i, gotBound, wantBound)
  1295  				}
  1296  			}
  1297  
  1298  		})
  1299  	}
  1300  }
  1301  
  1302  func TestUpdatePod(t *testing.T) {
  1303  	tests := []struct {
  1304  		name                     string
  1305  		currentPodConditions     []v1.PodCondition
  1306  		newPodCondition          *v1.PodCondition
  1307  		currentNominatedNodeName string
  1308  		newNominatingInfo        *framework.NominatingInfo
  1309  		expectedPatchRequests    int
  1310  		expectedPatchDataPattern string
  1311  	}{
  1312  		{
  1313  			name:                 "Should make patch request to add pod condition when there are none currently",
  1314  			currentPodConditions: []v1.PodCondition{},
  1315  			newPodCondition: &v1.PodCondition{
  1316  				Type:               "newType",
  1317  				Status:             "newStatus",
  1318  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 1, 1, 1, 1, time.UTC)),
  1319  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 1, 1, 1, 1, time.UTC)),
  1320  				Reason:             "newReason",
  1321  				Message:            "newMessage",
  1322  			},
  1323  			expectedPatchRequests:    1,
  1324  			expectedPatchDataPattern: `{"status":{"conditions":\[{"lastProbeTime":"2020-05-13T01:01:01Z","lastTransitionTime":".*","message":"newMessage","reason":"newReason","status":"newStatus","type":"newType"}]}}`,
  1325  		},
  1326  		{
  1327  			name: "Should make patch request to add a new pod condition when there is already one with another type",
  1328  			currentPodConditions: []v1.PodCondition{
  1329  				{
  1330  					Type:               "someOtherType",
  1331  					Status:             "someOtherTypeStatus",
  1332  					LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 11, 0, 0, 0, 0, time.UTC)),
  1333  					LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 10, 0, 0, 0, 0, time.UTC)),
  1334  					Reason:             "someOtherTypeReason",
  1335  					Message:            "someOtherTypeMessage",
  1336  				},
  1337  			},
  1338  			newPodCondition: &v1.PodCondition{
  1339  				Type:               "newType",
  1340  				Status:             "newStatus",
  1341  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 1, 1, 1, 1, time.UTC)),
  1342  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 1, 1, 1, 1, time.UTC)),
  1343  				Reason:             "newReason",
  1344  				Message:            "newMessage",
  1345  			},
  1346  			expectedPatchRequests:    1,
  1347  			expectedPatchDataPattern: `{"status":{"\$setElementOrder/conditions":\[{"type":"someOtherType"},{"type":"newType"}],"conditions":\[{"lastProbeTime":"2020-05-13T01:01:01Z","lastTransitionTime":".*","message":"newMessage","reason":"newReason","status":"newStatus","type":"newType"}]}}`,
  1348  		},
  1349  		{
  1350  			name: "Should make patch request to update an existing pod condition",
  1351  			currentPodConditions: []v1.PodCondition{
  1352  				{
  1353  					Type:               "currentType",
  1354  					Status:             "currentStatus",
  1355  					LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1356  					LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1357  					Reason:             "currentReason",
  1358  					Message:            "currentMessage",
  1359  				},
  1360  			},
  1361  			newPodCondition: &v1.PodCondition{
  1362  				Type:               "currentType",
  1363  				Status:             "newStatus",
  1364  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 1, 1, 1, 1, time.UTC)),
  1365  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 1, 1, 1, 1, time.UTC)),
  1366  				Reason:             "newReason",
  1367  				Message:            "newMessage",
  1368  			},
  1369  			expectedPatchRequests:    1,
  1370  			expectedPatchDataPattern: `{"status":{"\$setElementOrder/conditions":\[{"type":"currentType"}],"conditions":\[{"lastProbeTime":"2020-05-13T01:01:01Z","lastTransitionTime":".*","message":"newMessage","reason":"newReason","status":"newStatus","type":"currentType"}]}}`,
  1371  		},
  1372  		{
  1373  			name: "Should make patch request to update an existing pod condition, but the transition time should remain unchanged because the status is the same",
  1374  			currentPodConditions: []v1.PodCondition{
  1375  				{
  1376  					Type:               "currentType",
  1377  					Status:             "currentStatus",
  1378  					LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1379  					LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1380  					Reason:             "currentReason",
  1381  					Message:            "currentMessage",
  1382  				},
  1383  			},
  1384  			newPodCondition: &v1.PodCondition{
  1385  				Type:               "currentType",
  1386  				Status:             "currentStatus",
  1387  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 1, 1, 1, 1, time.UTC)),
  1388  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1389  				Reason:             "newReason",
  1390  				Message:            "newMessage",
  1391  			},
  1392  			expectedPatchRequests:    1,
  1393  			expectedPatchDataPattern: `{"status":{"\$setElementOrder/conditions":\[{"type":"currentType"}],"conditions":\[{"lastProbeTime":"2020-05-13T01:01:01Z","message":"newMessage","reason":"newReason","type":"currentType"}]}}`,
  1394  		},
  1395  		{
  1396  			name: "Should not make patch request if pod condition already exists and is identical and nominated node name is not set",
  1397  			currentPodConditions: []v1.PodCondition{
  1398  				{
  1399  					Type:               "currentType",
  1400  					Status:             "currentStatus",
  1401  					LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1402  					LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1403  					Reason:             "currentReason",
  1404  					Message:            "currentMessage",
  1405  				},
  1406  			},
  1407  			newPodCondition: &v1.PodCondition{
  1408  				Type:               "currentType",
  1409  				Status:             "currentStatus",
  1410  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1411  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1412  				Reason:             "currentReason",
  1413  				Message:            "currentMessage",
  1414  			},
  1415  			currentNominatedNodeName: "node1",
  1416  			expectedPatchRequests:    0,
  1417  		},
  1418  		{
  1419  			name: "Should make patch request if pod condition already exists and is identical but nominated node name is set and different",
  1420  			currentPodConditions: []v1.PodCondition{
  1421  				{
  1422  					Type:               "currentType",
  1423  					Status:             "currentStatus",
  1424  					LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1425  					LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1426  					Reason:             "currentReason",
  1427  					Message:            "currentMessage",
  1428  				},
  1429  			},
  1430  			newPodCondition: &v1.PodCondition{
  1431  				Type:               "currentType",
  1432  				Status:             "currentStatus",
  1433  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1434  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1435  				Reason:             "currentReason",
  1436  				Message:            "currentMessage",
  1437  			},
  1438  			newNominatingInfo:        &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node1"},
  1439  			expectedPatchRequests:    1,
  1440  			expectedPatchDataPattern: `{"status":{"nominatedNodeName":"node1"}}`,
  1441  		},
  1442  	}
  1443  	for _, test := range tests {
  1444  		t.Run(test.name, func(t *testing.T) {
  1445  			actualPatchRequests := 0
  1446  			var actualPatchData string
  1447  			cs := &clientsetfake.Clientset{}
  1448  			cs.AddReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
  1449  				actualPatchRequests++
  1450  				patch := action.(clienttesting.PatchAction)
  1451  				actualPatchData = string(patch.GetPatch())
  1452  				// For this test, we don't care about the result of the patched pod, just that we got the expected
  1453  				// patch request, so just returning &v1.Pod{} here is OK because scheduler doesn't use the response.
  1454  				return true, &v1.Pod{}, nil
  1455  			})
  1456  
  1457  			pod := st.MakePod().Name("foo").NominatedNodeName(test.currentNominatedNodeName).Conditions(test.currentPodConditions).Obj()
  1458  
  1459  			ctx, cancel := context.WithCancel(context.Background())
  1460  			defer cancel()
  1461  			if err := updatePod(ctx, cs, pod, test.newPodCondition, test.newNominatingInfo); err != nil {
  1462  				t.Fatalf("Error calling update: %v", err)
  1463  			}
  1464  
  1465  			if actualPatchRequests != test.expectedPatchRequests {
  1466  				t.Fatalf("Actual patch requests (%d) does not equal expected patch requests (%d), actual patch data: %v", actualPatchRequests, test.expectedPatchRequests, actualPatchData)
  1467  			}
  1468  
  1469  			regex, err := regexp.Compile(test.expectedPatchDataPattern)
  1470  			if err != nil {
  1471  				t.Fatalf("Error compiling regexp for %v: %v", test.expectedPatchDataPattern, err)
  1472  			}
  1473  
  1474  			if test.expectedPatchRequests > 0 && !regex.MatchString(actualPatchData) {
  1475  				t.Fatalf("Patch data mismatch: Actual was %v, but expected to match regexp %v", actualPatchData, test.expectedPatchDataPattern)
  1476  			}
  1477  		})
  1478  	}
  1479  }
  1480  
  1481  func Test_SelectHost(t *testing.T) {
  1482  	tests := []struct {
  1483  		name              string
  1484  		list              []framework.NodePluginScores
  1485  		topNodesCnt       int
  1486  		possibleNodes     sets.Set[string]
  1487  		possibleNodeLists [][]framework.NodePluginScores
  1488  		wantError         error
  1489  	}{
  1490  		{
  1491  			name: "unique properly ordered scores",
  1492  			list: []framework.NodePluginScores{
  1493  				{Name: "node1", TotalScore: 1},
  1494  				{Name: "node2", TotalScore: 2},
  1495  			},
  1496  			topNodesCnt:   2,
  1497  			possibleNodes: sets.New("node2"),
  1498  			possibleNodeLists: [][]framework.NodePluginScores{
  1499  				{
  1500  					{Name: "node2", TotalScore: 2},
  1501  					{Name: "node1", TotalScore: 1},
  1502  				},
  1503  			},
  1504  		},
  1505  		{
  1506  			name: "numberOfNodeScoresToReturn > len(list)",
  1507  			list: []framework.NodePluginScores{
  1508  				{Name: "node1", TotalScore: 1},
  1509  				{Name: "node2", TotalScore: 2},
  1510  			},
  1511  			topNodesCnt:   100,
  1512  			possibleNodes: sets.New("node2"),
  1513  			possibleNodeLists: [][]framework.NodePluginScores{
  1514  				{
  1515  					{Name: "node2", TotalScore: 2},
  1516  					{Name: "node1", TotalScore: 1},
  1517  				},
  1518  			},
  1519  		},
  1520  		{
  1521  			name: "equal scores",
  1522  			list: []framework.NodePluginScores{
  1523  				{Name: "node2.1", TotalScore: 2},
  1524  				{Name: "node2.2", TotalScore: 2},
  1525  				{Name: "node2.3", TotalScore: 2},
  1526  			},
  1527  			topNodesCnt:   2,
  1528  			possibleNodes: sets.New("node2.1", "node2.2", "node2.3"),
  1529  			possibleNodeLists: [][]framework.NodePluginScores{
  1530  				{
  1531  					{Name: "node2.1", TotalScore: 2},
  1532  					{Name: "node2.2", TotalScore: 2},
  1533  				},
  1534  				{
  1535  					{Name: "node2.1", TotalScore: 2},
  1536  					{Name: "node2.3", TotalScore: 2},
  1537  				},
  1538  				{
  1539  					{Name: "node2.2", TotalScore: 2},
  1540  					{Name: "node2.1", TotalScore: 2},
  1541  				},
  1542  				{
  1543  					{Name: "node2.2", TotalScore: 2},
  1544  					{Name: "node2.3", TotalScore: 2},
  1545  				},
  1546  				{
  1547  					{Name: "node2.3", TotalScore: 2},
  1548  					{Name: "node2.1", TotalScore: 2},
  1549  				},
  1550  				{
  1551  					{Name: "node2.3", TotalScore: 2},
  1552  					{Name: "node2.2", TotalScore: 2},
  1553  				},
  1554  			},
  1555  		},
  1556  		{
  1557  			name: "out of order scores",
  1558  			list: []framework.NodePluginScores{
  1559  				{Name: "node3.1", TotalScore: 3},
  1560  				{Name: "node2.1", TotalScore: 2},
  1561  				{Name: "node1.1", TotalScore: 1},
  1562  				{Name: "node3.2", TotalScore: 3},
  1563  			},
  1564  			topNodesCnt:   3,
  1565  			possibleNodes: sets.New("node3.1", "node3.2"),
  1566  			possibleNodeLists: [][]framework.NodePluginScores{
  1567  				{
  1568  					{Name: "node3.1", TotalScore: 3},
  1569  					{Name: "node3.2", TotalScore: 3},
  1570  					{Name: "node2.1", TotalScore: 2},
  1571  				},
  1572  				{
  1573  					{Name: "node3.2", TotalScore: 3},
  1574  					{Name: "node3.1", TotalScore: 3},
  1575  					{Name: "node2.1", TotalScore: 2},
  1576  				},
  1577  			},
  1578  		},
  1579  		{
  1580  			name:          "empty priority list",
  1581  			list:          []framework.NodePluginScores{},
  1582  			possibleNodes: sets.Set[string]{},
  1583  			wantError:     errEmptyPriorityList,
  1584  		},
  1585  	}
  1586  
  1587  	for _, test := range tests {
  1588  		t.Run(test.name, func(t *testing.T) {
  1589  			// increase the randomness
  1590  			for i := 0; i < 10; i++ {
  1591  				got, scoreList, err := selectHost(test.list, test.topNodesCnt)
  1592  				if err != test.wantError {
  1593  					t.Fatalf("unexpected error is returned from selectHost: got: %v want: %v", err, test.wantError)
  1594  				}
  1595  				if test.possibleNodes.Len() == 0 {
  1596  					if got != "" {
  1597  						t.Fatalf("expected nothing returned as selected Node, but actually %s is returned from selectHost", got)
  1598  					}
  1599  					return
  1600  				}
  1601  				if !test.possibleNodes.Has(got) {
  1602  					t.Errorf("got %s is not in the possible map %v", got, test.possibleNodes)
  1603  				}
  1604  				if got != scoreList[0].Name {
  1605  					t.Errorf("The head of list should be the selected Node's score: got: %v, expected: %v", scoreList[0], got)
  1606  				}
  1607  				for _, list := range test.possibleNodeLists {
  1608  					if cmp.Equal(list, scoreList) {
  1609  						return
  1610  					}
  1611  				}
  1612  				t.Errorf("Unexpected scoreList: %v", scoreList)
  1613  			}
  1614  		})
  1615  	}
  1616  }
  1617  
  1618  func TestFindNodesThatPassExtenders(t *testing.T) {
  1619  	tests := []struct {
  1620  		name                  string
  1621  		extenders             []tf.FakeExtender
  1622  		nodes                 []*v1.Node
  1623  		filteredNodesStatuses framework.NodeToStatusMap
  1624  		expectsErr            bool
  1625  		expectedNodes         []*v1.Node
  1626  		expectedStatuses      framework.NodeToStatusMap
  1627  	}{
  1628  		{
  1629  			name: "error",
  1630  			extenders: []tf.FakeExtender{
  1631  				{
  1632  					ExtenderName: "FakeExtender1",
  1633  					Predicates:   []tf.FitPredicate{tf.ErrorPredicateExtender},
  1634  				},
  1635  			},
  1636  			nodes:                 makeNodeList([]string{"a"}),
  1637  			filteredNodesStatuses: make(framework.NodeToStatusMap),
  1638  			expectsErr:            true,
  1639  		},
  1640  		{
  1641  			name: "success",
  1642  			extenders: []tf.FakeExtender{
  1643  				{
  1644  					ExtenderName: "FakeExtender1",
  1645  					Predicates:   []tf.FitPredicate{tf.TruePredicateExtender},
  1646  				},
  1647  			},
  1648  			nodes:                 makeNodeList([]string{"a"}),
  1649  			filteredNodesStatuses: make(framework.NodeToStatusMap),
  1650  			expectsErr:            false,
  1651  			expectedNodes:         makeNodeList([]string{"a"}),
  1652  			expectedStatuses:      make(framework.NodeToStatusMap),
  1653  		},
  1654  		{
  1655  			name: "unschedulable",
  1656  			extenders: []tf.FakeExtender{
  1657  				{
  1658  					ExtenderName: "FakeExtender1",
  1659  					Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
  1660  						if node.Node().Name == "a" {
  1661  							return framework.NewStatus(framework.Success)
  1662  						}
  1663  						return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
  1664  					}},
  1665  				},
  1666  			},
  1667  			nodes:                 makeNodeList([]string{"a", "b"}),
  1668  			filteredNodesStatuses: make(framework.NodeToStatusMap),
  1669  			expectsErr:            false,
  1670  			expectedNodes:         makeNodeList([]string{"a"}),
  1671  			expectedStatuses: framework.NodeToStatusMap{
  1672  				"b": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("FakeExtender: node %q failed", "b")),
  1673  			},
  1674  		},
  1675  		{
  1676  			name: "unschedulable and unresolvable",
  1677  			extenders: []tf.FakeExtender{
  1678  				{
  1679  					ExtenderName: "FakeExtender1",
  1680  					Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
  1681  						if node.Node().Name == "a" {
  1682  							return framework.NewStatus(framework.Success)
  1683  						}
  1684  						if node.Node().Name == "b" {
  1685  							return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
  1686  						}
  1687  						return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
  1688  					}},
  1689  				},
  1690  			},
  1691  			nodes:                 makeNodeList([]string{"a", "b", "c"}),
  1692  			filteredNodesStatuses: make(framework.NodeToStatusMap),
  1693  			expectsErr:            false,
  1694  			expectedNodes:         makeNodeList([]string{"a"}),
  1695  			expectedStatuses: framework.NodeToStatusMap{
  1696  				"b": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("FakeExtender: node %q failed", "b")),
  1697  				"c": framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("FakeExtender: node %q failed and unresolvable", "c")),
  1698  			},
  1699  		},
  1700  		{
  1701  			name: "extender may overwrite the statuses",
  1702  			extenders: []tf.FakeExtender{
  1703  				{
  1704  					ExtenderName: "FakeExtender1",
  1705  					Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
  1706  						if node.Node().Name == "a" {
  1707  							return framework.NewStatus(framework.Success)
  1708  						}
  1709  						if node.Node().Name == "b" {
  1710  							return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
  1711  						}
  1712  						return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
  1713  					}},
  1714  				},
  1715  			},
  1716  			nodes: makeNodeList([]string{"a", "b", "c"}),
  1717  			filteredNodesStatuses: framework.NodeToStatusMap{
  1718  				"c": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("FakeFilterPlugin: node %q failed", "c")),
  1719  			},
  1720  			expectsErr:    false,
  1721  			expectedNodes: makeNodeList([]string{"a"}),
  1722  			expectedStatuses: framework.NodeToStatusMap{
  1723  				"b": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("FakeExtender: node %q failed", "b")),
  1724  				"c": framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("FakeFilterPlugin: node %q failed", "c"), fmt.Sprintf("FakeExtender: node %q failed and unresolvable", "c")),
  1725  			},
  1726  		},
  1727  		{
  1728  			name: "multiple extenders",
  1729  			extenders: []tf.FakeExtender{
  1730  				{
  1731  					ExtenderName: "FakeExtender1",
  1732  					Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
  1733  						if node.Node().Name == "a" {
  1734  							return framework.NewStatus(framework.Success)
  1735  						}
  1736  						if node.Node().Name == "b" {
  1737  							return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
  1738  						}
  1739  						return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
  1740  					}},
  1741  				},
  1742  				{
  1743  					ExtenderName: "FakeExtender1",
  1744  					Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
  1745  						if node.Node().Name == "a" {
  1746  							return framework.NewStatus(framework.Success)
  1747  						}
  1748  						return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
  1749  					}},
  1750  				},
  1751  			},
  1752  			nodes:                 makeNodeList([]string{"a", "b", "c"}),
  1753  			filteredNodesStatuses: make(framework.NodeToStatusMap),
  1754  			expectsErr:            false,
  1755  			expectedNodes:         makeNodeList([]string{"a"}),
  1756  			expectedStatuses: framework.NodeToStatusMap{
  1757  				"b": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("FakeExtender: node %q failed", "b")),
  1758  				"c": framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("FakeExtender: node %q failed and unresolvable", "c")),
  1759  			},
  1760  		},
  1761  	}
  1762  
  1763  	cmpOpts := []cmp.Option{
  1764  		cmp.Comparer(func(s1 framework.Status, s2 framework.Status) bool {
  1765  			return s1.Code() == s2.Code() && reflect.DeepEqual(s1.Reasons(), s2.Reasons())
  1766  		}),
  1767  	}
  1768  
  1769  	for _, tt := range tests {
  1770  		t.Run(tt.name, func(t *testing.T) {
  1771  			_, ctx := ktesting.NewTestContext(t)
  1772  			var extenders []framework.Extender
  1773  			for ii := range tt.extenders {
  1774  				extenders = append(extenders, &tt.extenders[ii])
  1775  			}
  1776  
  1777  			pod := st.MakePod().Name("1").UID("1").Obj()
  1778  			got, err := findNodesThatPassExtenders(ctx, extenders, pod, tf.BuildNodeInfos(tt.nodes), tt.filteredNodesStatuses)
  1779  			nodes := make([]*v1.Node, len(got))
  1780  			for i := 0; i < len(got); i++ {
  1781  				nodes[i] = got[i].Node()
  1782  			}
  1783  			if tt.expectsErr {
  1784  				if err == nil {
  1785  					t.Error("Unexpected non-error")
  1786  				}
  1787  			} else {
  1788  				if err != nil {
  1789  					t.Errorf("Unexpected error: %v", err)
  1790  				}
  1791  				if diff := cmp.Diff(tt.expectedNodes, nodes); diff != "" {
  1792  					t.Errorf("filtered nodes (-want,+got):\n%s", diff)
  1793  				}
  1794  				if diff := cmp.Diff(tt.expectedStatuses, tt.filteredNodesStatuses, cmpOpts...); diff != "" {
  1795  					t.Errorf("filtered statuses (-want,+got):\n%s", diff)
  1796  				}
  1797  			}
  1798  		})
  1799  	}
  1800  }
  1801  
  1802  func TestSchedulerSchedulePod(t *testing.T) {
  1803  	fts := feature.Features{}
  1804  	tests := []struct {
  1805  		name               string
  1806  		registerPlugins    []tf.RegisterPluginFunc
  1807  		extenders          []tf.FakeExtender
  1808  		nodes              []string
  1809  		pvcs               []v1.PersistentVolumeClaim
  1810  		pod                *v1.Pod
  1811  		pods               []*v1.Pod
  1812  		wantNodes          sets.Set[string]
  1813  		wantEvaluatedNodes *int32
  1814  		wErr               error
  1815  	}{
  1816  		{
  1817  			registerPlugins: []tf.RegisterPluginFunc{
  1818  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1819  				tf.RegisterFilterPlugin("FalseFilter", tf.NewFalseFilterPlugin),
  1820  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1821  			},
  1822  			nodes: []string{"node1", "node2"},
  1823  			pod:   st.MakePod().Name("2").UID("2").Obj(),
  1824  			name:  "test 1",
  1825  			wErr: &framework.FitError{
  1826  				Pod:         st.MakePod().Name("2").UID("2").Obj(),
  1827  				NumAllNodes: 2,
  1828  				Diagnosis: framework.Diagnosis{
  1829  					NodeToStatusMap: framework.NodeToStatusMap{
  1830  						"node1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"),
  1831  						"node2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"),
  1832  					},
  1833  					UnschedulablePlugins: sets.New("FalseFilter"),
  1834  					EvaluatedNodes:       2,
  1835  				},
  1836  			},
  1837  		},
  1838  		{
  1839  			registerPlugins: []tf.RegisterPluginFunc{
  1840  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1841  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1842  				tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1),
  1843  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1844  			},
  1845  			nodes:     []string{"node1", "node2"},
  1846  			pod:       st.MakePod().Name("ignore").UID("ignore").Obj(),
  1847  			wantNodes: sets.New("node1", "node2"),
  1848  			name:      "test 2",
  1849  			wErr:      nil,
  1850  		},
  1851  		{
  1852  			// Fits on a node where the pod ID matches the node name
  1853  			registerPlugins: []tf.RegisterPluginFunc{
  1854  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1855  				tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin),
  1856  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1857  			},
  1858  			nodes:     []string{"node1", "node2"},
  1859  			pod:       st.MakePod().Name("node2").UID("node2").Obj(),
  1860  			wantNodes: sets.New("node2"),
  1861  			name:      "test 3",
  1862  			wErr:      nil,
  1863  		},
  1864  		{
  1865  			registerPlugins: []tf.RegisterPluginFunc{
  1866  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1867  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1868  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  1869  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1870  			},
  1871  			nodes:     []string{"3", "2", "1"},
  1872  			pod:       st.MakePod().Name("ignore").UID("ignore").Obj(),
  1873  			wantNodes: sets.New("3"),
  1874  			name:      "test 4",
  1875  			wErr:      nil,
  1876  		},
  1877  		{
  1878  			registerPlugins: []tf.RegisterPluginFunc{
  1879  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1880  				tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin),
  1881  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  1882  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1883  			},
  1884  			nodes:     []string{"3", "2", "1"},
  1885  			pod:       st.MakePod().Name("2").UID("2").Obj(),
  1886  			wantNodes: sets.New("2"),
  1887  			name:      "test 5",
  1888  			wErr:      nil,
  1889  		},
  1890  		{
  1891  			registerPlugins: []tf.RegisterPluginFunc{
  1892  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1893  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1894  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  1895  				tf.RegisterScorePlugin("ReverseNumericMap", newReverseNumericMapPlugin(), 2),
  1896  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1897  			},
  1898  			nodes:     []string{"3", "2", "1"},
  1899  			pod:       st.MakePod().Name("2").UID("2").Obj(),
  1900  			wantNodes: sets.New("1"),
  1901  			name:      "test 6",
  1902  			wErr:      nil,
  1903  		},
  1904  		{
  1905  			registerPlugins: []tf.RegisterPluginFunc{
  1906  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1907  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1908  				tf.RegisterFilterPlugin("FalseFilter", tf.NewFalseFilterPlugin),
  1909  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  1910  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1911  			},
  1912  			nodes: []string{"3", "2", "1"},
  1913  			pod:   st.MakePod().Name("2").UID("2").Obj(),
  1914  			name:  "test 7",
  1915  			wErr: &framework.FitError{
  1916  				Pod:         st.MakePod().Name("2").UID("2").Obj(),
  1917  				NumAllNodes: 3,
  1918  				Diagnosis: framework.Diagnosis{
  1919  					NodeToStatusMap: framework.NodeToStatusMap{
  1920  						"3": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"),
  1921  						"2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"),
  1922  						"1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"),
  1923  					},
  1924  					UnschedulablePlugins: sets.New("FalseFilter"),
  1925  					EvaluatedNodes:       3,
  1926  				},
  1927  			},
  1928  		},
  1929  		{
  1930  			registerPlugins: []tf.RegisterPluginFunc{
  1931  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1932  				tf.RegisterFilterPlugin("NoPodsFilter", NewNoPodsFilterPlugin),
  1933  				tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin),
  1934  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  1935  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1936  			},
  1937  			pods: []*v1.Pod{
  1938  				st.MakePod().Name("2").UID("2").Node("2").Phase(v1.PodRunning).Obj(),
  1939  			},
  1940  			pod:   st.MakePod().Name("2").UID("2").Obj(),
  1941  			nodes: []string{"1", "2"},
  1942  			name:  "test 8",
  1943  			wErr: &framework.FitError{
  1944  				Pod:         st.MakePod().Name("2").UID("2").Obj(),
  1945  				NumAllNodes: 2,
  1946  				Diagnosis: framework.Diagnosis{
  1947  					NodeToStatusMap: framework.NodeToStatusMap{
  1948  						"1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"),
  1949  						"2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("NoPodsFilter"),
  1950  					},
  1951  					UnschedulablePlugins: sets.New("MatchFilter", "NoPodsFilter"),
  1952  					EvaluatedNodes:       2,
  1953  				},
  1954  			},
  1955  		},
  1956  		{
  1957  			// Pod with existing PVC
  1958  			registerPlugins: []tf.RegisterPluginFunc{
  1959  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1960  				tf.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)),
  1961  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1962  				tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1),
  1963  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1964  			},
  1965  			nodes: []string{"node1", "node2"},
  1966  			pvcs: []v1.PersistentVolumeClaim{
  1967  				{
  1968  					ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault},
  1969  					Spec:       v1.PersistentVolumeClaimSpec{VolumeName: "existingPV"},
  1970  				},
  1971  			},
  1972  			pod:       st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
  1973  			wantNodes: sets.New("node1", "node2"),
  1974  			name:      "existing PVC",
  1975  			wErr:      nil,
  1976  		},
  1977  		{
  1978  			// Pod with non existing PVC
  1979  			registerPlugins: []tf.RegisterPluginFunc{
  1980  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1981  				tf.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)),
  1982  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1983  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1984  			},
  1985  			nodes: []string{"node1", "node2"},
  1986  			pod:   st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(),
  1987  			name:  "unknown PVC",
  1988  			wErr: &framework.FitError{
  1989  				Pod:         st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(),
  1990  				NumAllNodes: 2,
  1991  				Diagnosis: framework.Diagnosis{
  1992  					NodeToStatusMap: framework.NodeToStatusMap{
  1993  						"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithPlugin("VolumeBinding"),
  1994  						"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithPlugin("VolumeBinding"),
  1995  					},
  1996  					PreFilterMsg:         `persistentvolumeclaim "unknownPVC" not found`,
  1997  					UnschedulablePlugins: sets.New(volumebinding.Name),
  1998  				},
  1999  			},
  2000  		},
  2001  		{
  2002  			// Pod with deleting PVC
  2003  			registerPlugins: []tf.RegisterPluginFunc{
  2004  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2005  				tf.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)),
  2006  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  2007  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2008  			},
  2009  			nodes: []string{"node1", "node2"},
  2010  			pvcs:  []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault, DeletionTimestamp: &metav1.Time{}}}},
  2011  			pod:   st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
  2012  			name:  "deleted PVC",
  2013  			wErr: &framework.FitError{
  2014  				Pod:         st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
  2015  				NumAllNodes: 2,
  2016  				Diagnosis: framework.Diagnosis{
  2017  					NodeToStatusMap: framework.NodeToStatusMap{
  2018  						"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithPlugin("VolumeBinding"),
  2019  						"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithPlugin("VolumeBinding"),
  2020  					},
  2021  					PreFilterMsg:         `persistentvolumeclaim "existingPVC" is being deleted`,
  2022  					UnschedulablePlugins: sets.New(volumebinding.Name),
  2023  				},
  2024  			},
  2025  		},
  2026  		{
  2027  			registerPlugins: []tf.RegisterPluginFunc{
  2028  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2029  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  2030  				tf.RegisterScorePlugin("FalseMap", newFalseMapPlugin(), 1),
  2031  				tf.RegisterScorePlugin("TrueMap", newTrueMapPlugin(), 2),
  2032  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2033  			},
  2034  			nodes: []string{"2", "1"},
  2035  			pod:   st.MakePod().Name("2").Obj(),
  2036  			name:  "test error with priority map",
  2037  			wErr:  fmt.Errorf("running Score plugins: %w", fmt.Errorf(`plugin "FalseMap" failed with: %w`, errPrioritize)),
  2038  		},
  2039  		{
  2040  			name: "test podtopologyspread plugin - 2 nodes with maxskew=1",
  2041  			registerPlugins: []tf.RegisterPluginFunc{
  2042  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2043  				tf.RegisterPluginAsExtensions(
  2044  					podtopologyspread.Name,
  2045  					podTopologySpreadFunc,
  2046  					"PreFilter",
  2047  					"Filter",
  2048  				),
  2049  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2050  			},
  2051  			nodes: []string{"node1", "node2"},
  2052  			pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(1, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{
  2053  				MatchExpressions: []metav1.LabelSelectorRequirement{
  2054  					{
  2055  						Key:      "foo",
  2056  						Operator: metav1.LabelSelectorOpExists,
  2057  					},
  2058  				},
  2059  			}, nil, nil, nil, nil).Obj(),
  2060  			pods: []*v1.Pod{
  2061  				st.MakePod().Name("pod1").UID("pod1").Label("foo", "").Node("node1").Phase(v1.PodRunning).Obj(),
  2062  			},
  2063  			wantNodes: sets.New("node2"),
  2064  			wErr:      nil,
  2065  		},
  2066  		{
  2067  			name: "test podtopologyspread plugin - 3 nodes with maxskew=2",
  2068  			registerPlugins: []tf.RegisterPluginFunc{
  2069  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2070  				tf.RegisterPluginAsExtensions(
  2071  					podtopologyspread.Name,
  2072  					podTopologySpreadFunc,
  2073  					"PreFilter",
  2074  					"Filter",
  2075  				),
  2076  				tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1),
  2077  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2078  			},
  2079  			nodes: []string{"node1", "node2", "node3"},
  2080  			pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(2, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{
  2081  				MatchExpressions: []metav1.LabelSelectorRequirement{
  2082  					{
  2083  						Key:      "foo",
  2084  						Operator: metav1.LabelSelectorOpExists,
  2085  					},
  2086  				},
  2087  			}, nil, nil, nil, nil).Obj(),
  2088  			pods: []*v1.Pod{
  2089  				st.MakePod().Name("pod1a").UID("pod1a").Label("foo", "").Node("node1").Phase(v1.PodRunning).Obj(),
  2090  				st.MakePod().Name("pod1b").UID("pod1b").Label("foo", "").Node("node1").Phase(v1.PodRunning).Obj(),
  2091  				st.MakePod().Name("pod2").UID("pod2").Label("foo", "").Node("node2").Phase(v1.PodRunning).Obj(),
  2092  			},
  2093  			wantNodes: sets.New("node2", "node3"),
  2094  			wErr:      nil,
  2095  		},
  2096  		{
  2097  			name: "test with filter plugin returning Unschedulable status",
  2098  			registerPlugins: []tf.RegisterPluginFunc{
  2099  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2100  				tf.RegisterFilterPlugin(
  2101  					"FakeFilter",
  2102  					tf.NewFakeFilterPlugin(map[string]framework.Code{"3": framework.Unschedulable}),
  2103  				),
  2104  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  2105  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2106  			},
  2107  			nodes:     []string{"3"},
  2108  			pod:       st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2109  			wantNodes: nil,
  2110  			wErr: &framework.FitError{
  2111  				Pod:         st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2112  				NumAllNodes: 1,
  2113  				Diagnosis: framework.Diagnosis{
  2114  					NodeToStatusMap: framework.NodeToStatusMap{
  2115  						"3": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-filter").WithPlugin("FakeFilter"),
  2116  					},
  2117  					UnschedulablePlugins: sets.New("FakeFilter"),
  2118  					EvaluatedNodes:       1,
  2119  				},
  2120  			},
  2121  		},
  2122  		{
  2123  			name: "test with extender which filters out some Nodes",
  2124  			registerPlugins: []tf.RegisterPluginFunc{
  2125  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2126  				tf.RegisterFilterPlugin(
  2127  					"FakeFilter",
  2128  					tf.NewFakeFilterPlugin(map[string]framework.Code{"3": framework.Unschedulable}),
  2129  				),
  2130  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  2131  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2132  			},
  2133  			extenders: []tf.FakeExtender{
  2134  				{
  2135  					ExtenderName: "FakeExtender1",
  2136  					Predicates:   []tf.FitPredicate{tf.FalsePredicateExtender},
  2137  				},
  2138  			},
  2139  			nodes:     []string{"1", "2", "3"},
  2140  			pod:       st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2141  			wantNodes: nil,
  2142  			wErr: &framework.FitError{
  2143  				Pod:         st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2144  				NumAllNodes: 3,
  2145  				Diagnosis: framework.Diagnosis{
  2146  					NodeToStatusMap: framework.NodeToStatusMap{
  2147  						"1": framework.NewStatus(framework.Unschedulable, `FakeExtender: node "1" failed`),
  2148  						"2": framework.NewStatus(framework.Unschedulable, `FakeExtender: node "2" failed`),
  2149  						"3": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-filter").WithPlugin("FakeFilter"),
  2150  					},
  2151  					UnschedulablePlugins: sets.New("FakeFilter", framework.ExtenderName),
  2152  					EvaluatedNodes:       3,
  2153  				},
  2154  			},
  2155  		},
  2156  		{
  2157  			name: "test with filter plugin returning UnschedulableAndUnresolvable status",
  2158  			registerPlugins: []tf.RegisterPluginFunc{
  2159  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2160  				tf.RegisterFilterPlugin(
  2161  					"FakeFilter",
  2162  					tf.NewFakeFilterPlugin(map[string]framework.Code{"3": framework.UnschedulableAndUnresolvable}),
  2163  				),
  2164  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  2165  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2166  			},
  2167  			nodes:     []string{"3"},
  2168  			pod:       st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2169  			wantNodes: nil,
  2170  			wErr: &framework.FitError{
  2171  				Pod:         st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2172  				NumAllNodes: 1,
  2173  				Diagnosis: framework.Diagnosis{
  2174  					NodeToStatusMap: framework.NodeToStatusMap{
  2175  						"3": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injecting failure for pod test-filter").WithPlugin("FakeFilter"),
  2176  					},
  2177  					UnschedulablePlugins: sets.New("FakeFilter"),
  2178  					EvaluatedNodes:       1,
  2179  				},
  2180  			},
  2181  		},
  2182  		{
  2183  			name: "test with partial failed filter plugin",
  2184  			registerPlugins: []tf.RegisterPluginFunc{
  2185  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2186  				tf.RegisterFilterPlugin(
  2187  					"FakeFilter",
  2188  					tf.NewFakeFilterPlugin(map[string]framework.Code{"1": framework.Unschedulable}),
  2189  				),
  2190  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  2191  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2192  			},
  2193  			nodes:     []string{"1", "2"},
  2194  			pod:       st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2195  			wantNodes: nil,
  2196  			wErr:      nil,
  2197  		},
  2198  		{
  2199  			name: "test prefilter plugin returning Unschedulable status",
  2200  			registerPlugins: []tf.RegisterPluginFunc{
  2201  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2202  				tf.RegisterPreFilterPlugin(
  2203  					"FakePreFilter",
  2204  					tf.NewFakePreFilterPlugin("FakePreFilter", nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status")),
  2205  				),
  2206  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2207  			},
  2208  			nodes:     []string{"1", "2"},
  2209  			pod:       st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2210  			wantNodes: nil,
  2211  			wErr: &framework.FitError{
  2212  				Pod:         st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2213  				NumAllNodes: 2,
  2214  				Diagnosis: framework.Diagnosis{
  2215  					NodeToStatusMap: framework.NodeToStatusMap{
  2216  						"1": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status").WithPlugin("FakePreFilter"),
  2217  						"2": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status").WithPlugin("FakePreFilter"),
  2218  					},
  2219  					PreFilterMsg:         "injected unschedulable status",
  2220  					UnschedulablePlugins: sets.New("FakePreFilter"),
  2221  				},
  2222  			},
  2223  		},
  2224  		{
  2225  			name: "test prefilter plugin returning error status",
  2226  			registerPlugins: []tf.RegisterPluginFunc{
  2227  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2228  				tf.RegisterPreFilterPlugin(
  2229  					"FakePreFilter",
  2230  					tf.NewFakePreFilterPlugin("FakePreFilter", nil, framework.NewStatus(framework.Error, "injected error status")),
  2231  				),
  2232  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2233  			},
  2234  			nodes:     []string{"1", "2"},
  2235  			pod:       st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2236  			wantNodes: nil,
  2237  			wErr:      fmt.Errorf(`running PreFilter plugin "FakePreFilter": %w`, errors.New("injected error status")),
  2238  		},
  2239  		{
  2240  			name: "test prefilter plugin returning node",
  2241  			registerPlugins: []tf.RegisterPluginFunc{
  2242  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2243  				tf.RegisterPreFilterPlugin(
  2244  					"FakePreFilter1",
  2245  					tf.NewFakePreFilterPlugin("FakePreFilter1", nil, nil),
  2246  				),
  2247  				tf.RegisterPreFilterPlugin(
  2248  					"FakePreFilter2",
  2249  					tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil),
  2250  				),
  2251  				tf.RegisterPreFilterPlugin(
  2252  					"FakePreFilter3",
  2253  					tf.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1", "node2")}, nil),
  2254  				),
  2255  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2256  			},
  2257  			nodes:              []string{"node1", "node2", "node3"},
  2258  			pod:                st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2259  			wantNodes:          sets.New("node2"),
  2260  			wantEvaluatedNodes: ptr.To[int32](1),
  2261  		},
  2262  		{
  2263  			name: "test prefilter plugin returning non-intersecting nodes",
  2264  			registerPlugins: []tf.RegisterPluginFunc{
  2265  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2266  				tf.RegisterPreFilterPlugin(
  2267  					"FakePreFilter1",
  2268  					tf.NewFakePreFilterPlugin("FakePreFilter1", nil, nil),
  2269  				),
  2270  				tf.RegisterPreFilterPlugin(
  2271  					"FakePreFilter2",
  2272  					tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil),
  2273  				),
  2274  				tf.RegisterPreFilterPlugin(
  2275  					"FakePreFilter3",
  2276  					tf.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1")}, nil),
  2277  				),
  2278  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2279  			},
  2280  			nodes: []string{"node1", "node2", "node3"},
  2281  			pod:   st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2282  			wErr: &framework.FitError{
  2283  				Pod:         st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2284  				NumAllNodes: 3,
  2285  				Diagnosis: framework.Diagnosis{
  2286  					NodeToStatusMap: framework.NodeToStatusMap{
  2287  						"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, "node(s) didn't satisfy plugin(s) [FakePreFilter2 FakePreFilter3] simultaneously"),
  2288  						"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, "node(s) didn't satisfy plugin(s) [FakePreFilter2 FakePreFilter3] simultaneously"),
  2289  						"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, "node(s) didn't satisfy plugin(s) [FakePreFilter2 FakePreFilter3] simultaneously"),
  2290  					},
  2291  					UnschedulablePlugins: sets.Set[string]{},
  2292  					PreFilterMsg:         "node(s) didn't satisfy plugin(s) [FakePreFilter2 FakePreFilter3] simultaneously",
  2293  				},
  2294  			},
  2295  		},
  2296  		{
  2297  			name: "test prefilter plugin returning empty node set",
  2298  			registerPlugins: []tf.RegisterPluginFunc{
  2299  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2300  				tf.RegisterPreFilterPlugin(
  2301  					"FakePreFilter1",
  2302  					tf.NewFakePreFilterPlugin("FakePreFilter1", nil, nil),
  2303  				),
  2304  				tf.RegisterPreFilterPlugin(
  2305  					"FakePreFilter2",
  2306  					tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New[string]()}, nil),
  2307  				),
  2308  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2309  			},
  2310  			nodes: []string{"node1"},
  2311  			pod:   st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2312  			wErr: &framework.FitError{
  2313  				Pod:         st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2314  				NumAllNodes: 1,
  2315  				Diagnosis: framework.Diagnosis{
  2316  					NodeToStatusMap: framework.NodeToStatusMap{
  2317  						"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, "node(s) didn't satisfy plugin FakePreFilter2"),
  2318  					},
  2319  					UnschedulablePlugins: sets.Set[string]{},
  2320  					PreFilterMsg:         "node(s) didn't satisfy plugin FakePreFilter2",
  2321  				},
  2322  			},
  2323  		},
  2324  		{
  2325  			name: "test some nodes are filtered out by prefilter plugin and other are filtered out by filter plugin",
  2326  			registerPlugins: []tf.RegisterPluginFunc{
  2327  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2328  				tf.RegisterPreFilterPlugin(
  2329  					"FakePreFilter",
  2330  					tf.NewFakePreFilterPlugin("FakePreFilter", &framework.PreFilterResult{NodeNames: sets.New[string]("node2")}, nil),
  2331  				),
  2332  				tf.RegisterFilterPlugin(
  2333  					"FakeFilter",
  2334  					tf.NewFakeFilterPlugin(map[string]framework.Code{"node2": framework.Unschedulable}),
  2335  				),
  2336  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2337  			},
  2338  			nodes: []string{"node1", "node2"},
  2339  			pod:   st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2340  			wErr: &framework.FitError{
  2341  				Pod:         st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2342  				NumAllNodes: 2,
  2343  				Diagnosis: framework.Diagnosis{
  2344  					NodeToStatusMap: framework.NodeToStatusMap{
  2345  						"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, "node is filtered out by the prefilter result"),
  2346  						"node2": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-prefilter").WithPlugin("FakeFilter"),
  2347  					},
  2348  					UnschedulablePlugins: sets.New("FakeFilter"),
  2349  					EvaluatedNodes:       1,
  2350  					PreFilterMsg:         "",
  2351  				},
  2352  			},
  2353  		},
  2354  		{
  2355  			name: "test prefilter plugin returning skip",
  2356  			registerPlugins: []tf.RegisterPluginFunc{
  2357  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2358  				tf.RegisterPreFilterPlugin(
  2359  					"FakePreFilter1",
  2360  					tf.NewFakePreFilterPlugin("FakeFilter1", nil, nil),
  2361  				),
  2362  				tf.RegisterFilterPlugin(
  2363  					"FakeFilter1",
  2364  					tf.NewFakeFilterPlugin(map[string]framework.Code{
  2365  						"node1": framework.Unschedulable,
  2366  					}),
  2367  				),
  2368  				tf.RegisterPluginAsExtensions("FakeFilter2", func(_ context.Context, configuration runtime.Object, f framework.Handle) (framework.Plugin, error) {
  2369  					return tf.FakePreFilterAndFilterPlugin{
  2370  						FakePreFilterPlugin: &tf.FakePreFilterPlugin{
  2371  							Result: nil,
  2372  							Status: framework.NewStatus(framework.Skip),
  2373  						},
  2374  						FakeFilterPlugin: &tf.FakeFilterPlugin{
  2375  							// This Filter plugin shouldn't be executed in the Filter extension point due to skip.
  2376  							// To confirm that, return the status code Error to all Nodes.
  2377  							FailedNodeReturnCodeMap: map[string]framework.Code{
  2378  								"node1": framework.Error, "node2": framework.Error, "node3": framework.Error,
  2379  							},
  2380  						},
  2381  					}, nil
  2382  				}, "PreFilter", "Filter"),
  2383  				tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1),
  2384  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2385  			},
  2386  			nodes:              []string{"node1", "node2", "node3"},
  2387  			pod:                st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2388  			wantNodes:          sets.New("node2", "node3"),
  2389  			wantEvaluatedNodes: ptr.To[int32](3),
  2390  		},
  2391  		{
  2392  			name: "test all prescore plugins return skip",
  2393  			registerPlugins: []tf.RegisterPluginFunc{
  2394  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2395  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  2396  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2397  				tf.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", tf.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0,
  2398  					framework.NewStatus(framework.Skip, "fake skip"),
  2399  					framework.NewStatus(framework.Error, "this score function shouldn't be executed because this plugin returned Skip in the PreScore"),
  2400  				), "PreScore", "Score"),
  2401  			},
  2402  			nodes:     []string{"node1", "node2"},
  2403  			pod:       st.MakePod().Name("ignore").UID("ignore").Obj(),
  2404  			wantNodes: sets.New("node1", "node2"),
  2405  		},
  2406  		{
  2407  			name: "test without score plugin no extra nodes are evaluated",
  2408  			registerPlugins: []tf.RegisterPluginFunc{
  2409  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2410  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  2411  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2412  			},
  2413  			nodes:              []string{"node1", "node2", "node3"},
  2414  			pod:                st.MakePod().Name("pod1").UID("pod1").Obj(),
  2415  			wantNodes:          sets.New("node1", "node2", "node3"),
  2416  			wantEvaluatedNodes: ptr.To[int32](1),
  2417  		},
  2418  		{
  2419  			name: "test no score plugin, prefilter plugin returning 2 nodes",
  2420  			registerPlugins: []tf.RegisterPluginFunc{
  2421  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2422  				tf.RegisterPreFilterPlugin(
  2423  					"FakePreFilter",
  2424  					tf.NewFakePreFilterPlugin("FakePreFilter", &framework.PreFilterResult{NodeNames: sets.New("node1", "node2")}, nil),
  2425  				),
  2426  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2427  			},
  2428  			nodes:     []string{"node1", "node2", "node3"},
  2429  			pod:       st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2430  			wantNodes: sets.New("node1", "node2"),
  2431  			// since this case has no score plugin, we'll only try to find one node in Filter stage
  2432  			wantEvaluatedNodes: ptr.To[int32](1),
  2433  		},
  2434  	}
  2435  	for _, test := range tests {
  2436  		t.Run(test.name, func(t *testing.T) {
  2437  			logger, ctx := ktesting.NewTestContext(t)
  2438  			ctx, cancel := context.WithCancel(ctx)
  2439  			defer cancel()
  2440  
  2441  			cache := internalcache.New(ctx, time.Duration(0))
  2442  			for _, pod := range test.pods {
  2443  				cache.AddPod(logger, pod)
  2444  			}
  2445  			var nodes []*v1.Node
  2446  			for _, name := range test.nodes {
  2447  				node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"hostname": name}}}
  2448  				nodes = append(nodes, node)
  2449  				cache.AddNode(logger, node)
  2450  			}
  2451  
  2452  			cs := clientsetfake.NewSimpleClientset()
  2453  			informerFactory := informers.NewSharedInformerFactory(cs, 0)
  2454  			for _, pvc := range test.pvcs {
  2455  				metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, volume.AnnBindCompleted, "true")
  2456  				cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, &pvc, metav1.CreateOptions{})
  2457  				if pvName := pvc.Spec.VolumeName; pvName != "" {
  2458  					pv := v1.PersistentVolume{ObjectMeta: metav1.ObjectMeta{Name: pvName}}
  2459  					cs.CoreV1().PersistentVolumes().Create(ctx, &pv, metav1.CreateOptions{})
  2460  				}
  2461  			}
  2462  			snapshot := internalcache.NewSnapshot(test.pods, nodes)
  2463  			fwk, err := tf.NewFramework(
  2464  				ctx,
  2465  				test.registerPlugins, "",
  2466  				frameworkruntime.WithSnapshotSharedLister(snapshot),
  2467  				frameworkruntime.WithInformerFactory(informerFactory),
  2468  				frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())),
  2469  			)
  2470  			if err != nil {
  2471  				t.Fatal(err)
  2472  			}
  2473  
  2474  			var extenders []framework.Extender
  2475  			for ii := range test.extenders {
  2476  				extenders = append(extenders, &test.extenders[ii])
  2477  			}
  2478  			sched := &Scheduler{
  2479  				Cache:                    cache,
  2480  				nodeInfoSnapshot:         snapshot,
  2481  				percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
  2482  				Extenders:                extenders,
  2483  			}
  2484  			sched.applyDefaultHandlers()
  2485  
  2486  			informerFactory.Start(ctx.Done())
  2487  			informerFactory.WaitForCacheSync(ctx.Done())
  2488  
  2489  			result, err := sched.SchedulePod(ctx, fwk, framework.NewCycleState(), test.pod)
  2490  			if err != test.wErr {
  2491  				gotFitErr, gotOK := err.(*framework.FitError)
  2492  				wantFitErr, wantOK := test.wErr.(*framework.FitError)
  2493  				if gotOK != wantOK {
  2494  					t.Errorf("Expected err to be FitError: %v, but got %v (error: %v)", wantOK, gotOK, err)
  2495  				} else if gotOK {
  2496  					if diff := cmp.Diff(wantFitErr, gotFitErr); diff != "" {
  2497  						t.Errorf("Unexpected fitErr: (-want, +got): %s", diff)
  2498  					}
  2499  				}
  2500  			}
  2501  			if test.wantNodes != nil && !test.wantNodes.Has(result.SuggestedHost) {
  2502  				t.Errorf("Expected: %s, got: %s", test.wantNodes, result.SuggestedHost)
  2503  			}
  2504  			wantEvaluatedNodes := len(test.nodes)
  2505  			if test.wantEvaluatedNodes != nil {
  2506  				wantEvaluatedNodes = int(*test.wantEvaluatedNodes)
  2507  			}
  2508  			if test.wErr == nil && wantEvaluatedNodes != result.EvaluatedNodes {
  2509  				t.Errorf("Expected EvaluatedNodes: %d, got: %d", wantEvaluatedNodes, result.EvaluatedNodes)
  2510  			}
  2511  		})
  2512  	}
  2513  }
  2514  
  2515  func TestFindFitAllError(t *testing.T) {
  2516  	ctx, cancel := context.WithCancel(context.Background())
  2517  	defer cancel()
  2518  
  2519  	nodes := makeNodeList([]string{"3", "2", "1"})
  2520  	scheduler := makeScheduler(ctx, nodes)
  2521  
  2522  	fwk, err := tf.NewFramework(
  2523  		ctx,
  2524  		[]tf.RegisterPluginFunc{
  2525  			tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2526  			tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  2527  			tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin),
  2528  			tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2529  		},
  2530  		"",
  2531  		frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)),
  2532  	)
  2533  	if err != nil {
  2534  		t.Fatal(err)
  2535  	}
  2536  
  2537  	_, diagnosis, err := scheduler.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), &v1.Pod{})
  2538  	if err != nil {
  2539  		t.Errorf("unexpected error: %v", err)
  2540  	}
  2541  
  2542  	expected := framework.Diagnosis{
  2543  		NodeToStatusMap: framework.NodeToStatusMap{
  2544  			"1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"),
  2545  			"2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"),
  2546  			"3": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"),
  2547  		},
  2548  		UnschedulablePlugins: sets.New("MatchFilter"),
  2549  		EvaluatedNodes:       3,
  2550  	}
  2551  	if diff := cmp.Diff(diagnosis, expected); diff != "" {
  2552  		t.Errorf("Unexpected diagnosis: (-want, +got): %s", diff)
  2553  	}
  2554  }
  2555  
  2556  func TestFindFitSomeError(t *testing.T) {
  2557  	ctx, cancel := context.WithCancel(context.Background())
  2558  	defer cancel()
  2559  
  2560  	nodes := makeNodeList([]string{"3", "2", "1"})
  2561  	scheduler := makeScheduler(ctx, nodes)
  2562  
  2563  	fwk, err := tf.NewFramework(
  2564  		ctx,
  2565  		[]tf.RegisterPluginFunc{
  2566  			tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2567  			tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  2568  			tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin),
  2569  			tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2570  		},
  2571  		"",
  2572  		frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)),
  2573  	)
  2574  	if err != nil {
  2575  		t.Fatal(err)
  2576  	}
  2577  
  2578  	pod := st.MakePod().Name("1").UID("1").Obj()
  2579  	_, diagnosis, err := scheduler.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), pod)
  2580  	if err != nil {
  2581  		t.Errorf("unexpected error: %v", err)
  2582  	}
  2583  
  2584  	if len(diagnosis.NodeToStatusMap) != len(nodes)-1 {
  2585  		t.Errorf("unexpected failed status map: %v", diagnosis.NodeToStatusMap)
  2586  	}
  2587  
  2588  	if diff := cmp.Diff(sets.New("MatchFilter"), diagnosis.UnschedulablePlugins); diff != "" {
  2589  		t.Errorf("Unexpected unschedulablePlugins: (-want, +got): %s", diagnosis.UnschedulablePlugins)
  2590  	}
  2591  
  2592  	for _, node := range nodes {
  2593  		if node.Name == pod.Name {
  2594  			continue
  2595  		}
  2596  		t.Run(node.Name, func(t *testing.T) {
  2597  			status, found := diagnosis.NodeToStatusMap[node.Name]
  2598  			if !found {
  2599  				t.Errorf("failed to find node %v in %v", node.Name, diagnosis.NodeToStatusMap)
  2600  			}
  2601  			reasons := status.Reasons()
  2602  			if len(reasons) != 1 || reasons[0] != tf.ErrReasonFake {
  2603  				t.Errorf("unexpected failures: %v", reasons)
  2604  			}
  2605  		})
  2606  	}
  2607  }
  2608  
  2609  func TestFindFitPredicateCallCounts(t *testing.T) {
  2610  	tests := []struct {
  2611  		name          string
  2612  		pod           *v1.Pod
  2613  		expectedCount int32
  2614  	}{
  2615  		{
  2616  			name:          "nominated pods have lower priority, predicate is called once",
  2617  			pod:           st.MakePod().Name("1").UID("1").Priority(highPriority).Obj(),
  2618  			expectedCount: 1,
  2619  		},
  2620  		{
  2621  			name:          "nominated pods have higher priority, predicate is called twice",
  2622  			pod:           st.MakePod().Name("1").UID("1").Priority(lowPriority).Obj(),
  2623  			expectedCount: 2,
  2624  		},
  2625  	}
  2626  
  2627  	for _, test := range tests {
  2628  		t.Run(test.name, func(t *testing.T) {
  2629  			nodes := makeNodeList([]string{"1"})
  2630  
  2631  			plugin := tf.FakeFilterPlugin{}
  2632  			registerFakeFilterFunc := tf.RegisterFilterPlugin(
  2633  				"FakeFilter",
  2634  				func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
  2635  					return &plugin, nil
  2636  				},
  2637  			)
  2638  			registerPlugins := []tf.RegisterPluginFunc{
  2639  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2640  				registerFakeFilterFunc,
  2641  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2642  			}
  2643  			logger, ctx := ktesting.NewTestContext(t)
  2644  			ctx, cancel := context.WithCancel(ctx)
  2645  			defer cancel()
  2646  			fwk, err := tf.NewFramework(
  2647  				ctx,
  2648  				registerPlugins, "",
  2649  				frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)),
  2650  			)
  2651  			if err != nil {
  2652  				t.Fatal(err)
  2653  			}
  2654  
  2655  			scheduler := makeScheduler(ctx, nodes)
  2656  			if err := scheduler.Cache.UpdateSnapshot(logger, scheduler.nodeInfoSnapshot); err != nil {
  2657  				t.Fatal(err)
  2658  			}
  2659  			podinfo, err := framework.NewPodInfo(st.MakePod().UID("nominated").Priority(midPriority).Obj())
  2660  			if err != nil {
  2661  				t.Fatal(err)
  2662  			}
  2663  			fwk.AddNominatedPod(logger, podinfo, &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "1"})
  2664  
  2665  			_, _, err = scheduler.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), test.pod)
  2666  			if err != nil {
  2667  				t.Errorf("unexpected error: %v", err)
  2668  			}
  2669  			if test.expectedCount != plugin.NumFilterCalled {
  2670  				t.Errorf("predicate was called %d times, expected is %d", plugin.NumFilterCalled, test.expectedCount)
  2671  			}
  2672  		})
  2673  	}
  2674  }
  2675  
  2676  // The point of this test is to show that you:
  2677  //   - get the same priority for a zero-request pod as for a pod with the defaults requests,
  2678  //     both when the zero-request pod is already on the node and when the zero-request pod
  2679  //     is the one being scheduled.
  2680  //   - don't get the same score no matter what we schedule.
  2681  func TestZeroRequest(t *testing.T) {
  2682  	// A pod with no resources. We expect spreading to count it as having the default resources.
  2683  	noResources := v1.PodSpec{
  2684  		Containers: []v1.Container{
  2685  			{},
  2686  		},
  2687  	}
  2688  	noResources1 := noResources
  2689  	noResources1.NodeName = "node1"
  2690  	// A pod with the same resources as a 0-request pod gets by default as its resources (for spreading).
  2691  	small := v1.PodSpec{
  2692  		Containers: []v1.Container{
  2693  			{
  2694  				Resources: v1.ResourceRequirements{
  2695  					Requests: v1.ResourceList{
  2696  						v1.ResourceCPU: resource.MustParse(
  2697  							strconv.FormatInt(schedutil.DefaultMilliCPURequest, 10) + "m"),
  2698  						v1.ResourceMemory: resource.MustParse(
  2699  							strconv.FormatInt(schedutil.DefaultMemoryRequest, 10)),
  2700  					},
  2701  				},
  2702  			},
  2703  		},
  2704  	}
  2705  	small2 := small
  2706  	small2.NodeName = "node2"
  2707  	// A larger pod.
  2708  	large := v1.PodSpec{
  2709  		Containers: []v1.Container{
  2710  			{
  2711  				Resources: v1.ResourceRequirements{
  2712  					Requests: v1.ResourceList{
  2713  						v1.ResourceCPU: resource.MustParse(
  2714  							strconv.FormatInt(schedutil.DefaultMilliCPURequest*3, 10) + "m"),
  2715  						v1.ResourceMemory: resource.MustParse(
  2716  							strconv.FormatInt(schedutil.DefaultMemoryRequest*3, 10)),
  2717  					},
  2718  				},
  2719  			},
  2720  		},
  2721  	}
  2722  	large1 := large
  2723  	large1.NodeName = "node1"
  2724  	large2 := large
  2725  	large2.NodeName = "node2"
  2726  	tests := []struct {
  2727  		pod           *v1.Pod
  2728  		pods          []*v1.Pod
  2729  		nodes         []*v1.Node
  2730  		name          string
  2731  		expectedScore int64
  2732  	}{
  2733  		// The point of these next two tests is to show you get the same priority for a zero-request pod
  2734  		// as for a pod with the defaults requests, both when the zero-request pod is already on the node
  2735  		// and when the zero-request pod is the one being scheduled.
  2736  		{
  2737  			pod:   &v1.Pod{Spec: noResources},
  2738  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2739  			name:  "test priority of zero-request pod with node with zero-request pod",
  2740  			pods: []*v1.Pod{
  2741  				{Spec: large1}, {Spec: noResources1},
  2742  				{Spec: large2}, {Spec: small2},
  2743  			},
  2744  			expectedScore: 150,
  2745  		},
  2746  		{
  2747  			pod:   &v1.Pod{Spec: small},
  2748  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2749  			name:  "test priority of nonzero-request pod with node with zero-request pod",
  2750  			pods: []*v1.Pod{
  2751  				{Spec: large1}, {Spec: noResources1},
  2752  				{Spec: large2}, {Spec: small2},
  2753  			},
  2754  			expectedScore: 150,
  2755  		},
  2756  		// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
  2757  		{
  2758  			pod:   &v1.Pod{Spec: large},
  2759  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2760  			name:  "test priority of larger pod with node with zero-request pod",
  2761  			pods: []*v1.Pod{
  2762  				{Spec: large1}, {Spec: noResources1},
  2763  				{Spec: large2}, {Spec: small2},
  2764  			},
  2765  			expectedScore: 130,
  2766  		},
  2767  	}
  2768  
  2769  	for _, test := range tests {
  2770  		t.Run(test.name, func(t *testing.T) {
  2771  			client := clientsetfake.NewSimpleClientset()
  2772  			informerFactory := informers.NewSharedInformerFactory(client, 0)
  2773  
  2774  			snapshot := internalcache.NewSnapshot(test.pods, test.nodes)
  2775  			fts := feature.Features{}
  2776  			pluginRegistrations := []tf.RegisterPluginFunc{
  2777  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2778  				tf.RegisterScorePlugin(noderesources.Name, frameworkruntime.FactoryAdapter(fts, noderesources.NewFit), 1),
  2779  				tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(fts, noderesources.NewBalancedAllocation), 1),
  2780  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2781  			}
  2782  			ctx, cancel := context.WithCancel(context.Background())
  2783  			defer cancel()
  2784  			fwk, err := tf.NewFramework(
  2785  				ctx,
  2786  				pluginRegistrations, "",
  2787  				frameworkruntime.WithInformerFactory(informerFactory),
  2788  				frameworkruntime.WithSnapshotSharedLister(snapshot),
  2789  				frameworkruntime.WithClientSet(client),
  2790  				frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())),
  2791  			)
  2792  			if err != nil {
  2793  				t.Fatalf("error creating framework: %+v", err)
  2794  			}
  2795  
  2796  			sched := &Scheduler{
  2797  				nodeInfoSnapshot:         snapshot,
  2798  				percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
  2799  			}
  2800  			sched.applyDefaultHandlers()
  2801  
  2802  			state := framework.NewCycleState()
  2803  			_, _, err = sched.findNodesThatFitPod(ctx, fwk, state, test.pod)
  2804  			if err != nil {
  2805  				t.Fatalf("error filtering nodes: %+v", err)
  2806  			}
  2807  			fwk.RunPreScorePlugins(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
  2808  			list, err := prioritizeNodes(ctx, nil, fwk, state, test.pod, tf.BuildNodeInfos(test.nodes))
  2809  			if err != nil {
  2810  				t.Errorf("unexpected error: %v", err)
  2811  			}
  2812  			for _, hp := range list {
  2813  				if hp.TotalScore != test.expectedScore {
  2814  					t.Errorf("expected %d for all priorities, got list %#v", test.expectedScore, list)
  2815  				}
  2816  			}
  2817  		})
  2818  	}
  2819  }
  2820  
  2821  func Test_prioritizeNodes(t *testing.T) {
  2822  	imageStatus1 := []v1.ContainerImage{
  2823  		{
  2824  			Names: []string{
  2825  				"gcr.io/40:latest",
  2826  				"gcr.io/40:v1",
  2827  			},
  2828  			SizeBytes: int64(80 * mb),
  2829  		},
  2830  		{
  2831  			Names: []string{
  2832  				"gcr.io/300:latest",
  2833  				"gcr.io/300:v1",
  2834  			},
  2835  			SizeBytes: int64(300 * mb),
  2836  		},
  2837  	}
  2838  
  2839  	imageStatus2 := []v1.ContainerImage{
  2840  		{
  2841  			Names: []string{
  2842  				"gcr.io/300:latest",
  2843  			},
  2844  			SizeBytes: int64(300 * mb),
  2845  		},
  2846  		{
  2847  			Names: []string{
  2848  				"gcr.io/40:latest",
  2849  				"gcr.io/40:v1",
  2850  			},
  2851  			SizeBytes: int64(80 * mb),
  2852  		},
  2853  	}
  2854  
  2855  	imageStatus3 := []v1.ContainerImage{
  2856  		{
  2857  			Names: []string{
  2858  				"gcr.io/600:latest",
  2859  			},
  2860  			SizeBytes: int64(600 * mb),
  2861  		},
  2862  		{
  2863  			Names: []string{
  2864  				"gcr.io/40:latest",
  2865  			},
  2866  			SizeBytes: int64(80 * mb),
  2867  		},
  2868  		{
  2869  			Names: []string{
  2870  				"gcr.io/900:latest",
  2871  			},
  2872  			SizeBytes: int64(900 * mb),
  2873  		},
  2874  	}
  2875  	tests := []struct {
  2876  		name                string
  2877  		pod                 *v1.Pod
  2878  		pods                []*v1.Pod
  2879  		nodes               []*v1.Node
  2880  		pluginRegistrations []tf.RegisterPluginFunc
  2881  		extenders           []tf.FakeExtender
  2882  		want                []framework.NodePluginScores
  2883  	}{
  2884  		{
  2885  			name:  "the score from all plugins should be recorded in PluginToNodeScores",
  2886  			pod:   &v1.Pod{},
  2887  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2888  			pluginRegistrations: []tf.RegisterPluginFunc{
  2889  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2890  				tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1),
  2891  				tf.RegisterScorePlugin("Node2Prioritizer", tf.NewNode2PrioritizerPlugin(), 1),
  2892  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2893  			},
  2894  			extenders: nil,
  2895  			want: []framework.NodePluginScores{
  2896  				{
  2897  					Name: "node1",
  2898  					Scores: []framework.PluginScore{
  2899  						{
  2900  							Name:  "Node2Prioritizer",
  2901  							Score: 10,
  2902  						},
  2903  						{
  2904  							Name:  "NodeResourcesBalancedAllocation",
  2905  							Score: 100,
  2906  						},
  2907  					},
  2908  					TotalScore: 110,
  2909  				},
  2910  				{
  2911  					Name: "node2",
  2912  					Scores: []framework.PluginScore{
  2913  						{
  2914  							Name:  "Node2Prioritizer",
  2915  							Score: 100,
  2916  						},
  2917  						{
  2918  							Name:  "NodeResourcesBalancedAllocation",
  2919  							Score: 100,
  2920  						},
  2921  					},
  2922  					TotalScore: 200,
  2923  				},
  2924  			},
  2925  		},
  2926  		{
  2927  			name:  "the score from extender should also be recorded in PluginToNodeScores with plugin scores",
  2928  			pod:   &v1.Pod{},
  2929  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2930  			pluginRegistrations: []tf.RegisterPluginFunc{
  2931  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2932  				tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1),
  2933  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2934  			},
  2935  			extenders: []tf.FakeExtender{
  2936  				{
  2937  					ExtenderName: "FakeExtender1",
  2938  					Weight:       1,
  2939  					Prioritizers: []tf.PriorityConfig{
  2940  						{
  2941  							Weight:   3,
  2942  							Function: tf.Node1PrioritizerExtender,
  2943  						},
  2944  					},
  2945  				},
  2946  				{
  2947  					ExtenderName: "FakeExtender2",
  2948  					Weight:       1,
  2949  					Prioritizers: []tf.PriorityConfig{
  2950  						{
  2951  							Weight:   2,
  2952  							Function: tf.Node2PrioritizerExtender,
  2953  						},
  2954  					},
  2955  				},
  2956  			},
  2957  			want: []framework.NodePluginScores{
  2958  				{
  2959  					Name: "node1",
  2960  					Scores: []framework.PluginScore{
  2961  
  2962  						{
  2963  							Name:  "FakeExtender1",
  2964  							Score: 300,
  2965  						},
  2966  						{
  2967  							Name:  "FakeExtender2",
  2968  							Score: 20,
  2969  						},
  2970  						{
  2971  							Name:  "NodeResourcesBalancedAllocation",
  2972  							Score: 100,
  2973  						},
  2974  					},
  2975  					TotalScore: 420,
  2976  				},
  2977  				{
  2978  					Name: "node2",
  2979  					Scores: []framework.PluginScore{
  2980  						{
  2981  							Name:  "FakeExtender1",
  2982  							Score: 30,
  2983  						},
  2984  						{
  2985  							Name:  "FakeExtender2",
  2986  							Score: 200,
  2987  						},
  2988  						{
  2989  							Name:  "NodeResourcesBalancedAllocation",
  2990  							Score: 100,
  2991  						},
  2992  					},
  2993  					TotalScore: 330,
  2994  				},
  2995  			},
  2996  		},
  2997  		{
  2998  			name:  "plugin which returned skip in preScore shouldn't be executed in the score phase",
  2999  			pod:   &v1.Pod{},
  3000  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  3001  			pluginRegistrations: []tf.RegisterPluginFunc{
  3002  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  3003  				tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1),
  3004  				tf.RegisterScorePlugin("Node2Prioritizer", tf.NewNode2PrioritizerPlugin(), 1),
  3005  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  3006  				tf.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", tf.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0,
  3007  					framework.NewStatus(framework.Skip, "fake skip"),
  3008  					framework.NewStatus(framework.Error, "this score function shouldn't be executed because this plugin returned Skip in the PreScore"),
  3009  				), "PreScore", "Score"),
  3010  			},
  3011  			extenders: nil,
  3012  			want: []framework.NodePluginScores{
  3013  				{
  3014  					Name: "node1",
  3015  					Scores: []framework.PluginScore{
  3016  						{
  3017  							Name:  "Node2Prioritizer",
  3018  							Score: 10,
  3019  						},
  3020  						{
  3021  							Name:  "NodeResourcesBalancedAllocation",
  3022  							Score: 100,
  3023  						},
  3024  					},
  3025  					TotalScore: 110,
  3026  				},
  3027  				{
  3028  					Name: "node2",
  3029  					Scores: []framework.PluginScore{
  3030  						{
  3031  							Name:  "Node2Prioritizer",
  3032  							Score: 100,
  3033  						},
  3034  						{
  3035  							Name:  "NodeResourcesBalancedAllocation",
  3036  							Score: 100,
  3037  						},
  3038  					},
  3039  					TotalScore: 200,
  3040  				},
  3041  			},
  3042  		},
  3043  		{
  3044  			name:  "all score plugins are skipped",
  3045  			pod:   &v1.Pod{},
  3046  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  3047  			pluginRegistrations: []tf.RegisterPluginFunc{
  3048  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  3049  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  3050  				tf.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", tf.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0,
  3051  					framework.NewStatus(framework.Skip, "fake skip"),
  3052  					framework.NewStatus(framework.Error, "this score function shouldn't be executed because this plugin returned Skip in the PreScore"),
  3053  				), "PreScore", "Score"),
  3054  			},
  3055  			extenders: nil,
  3056  			want: []framework.NodePluginScores{
  3057  				{Name: "node1", Scores: []framework.PluginScore{}},
  3058  				{Name: "node2", Scores: []framework.PluginScore{}},
  3059  			},
  3060  		},
  3061  		{
  3062  			name: "the score from Image Locality plugin with image in all nodes",
  3063  			pod: &v1.Pod{
  3064  				Spec: v1.PodSpec{
  3065  					Containers: []v1.Container{
  3066  						{
  3067  							Image: "gcr.io/40",
  3068  						},
  3069  					},
  3070  				},
  3071  			},
  3072  			nodes: []*v1.Node{
  3073  				makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10, imageStatus1...),
  3074  				makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10, imageStatus2...),
  3075  				makeNode("node3", 1000, schedutil.DefaultMemoryRequest*10, imageStatus3...),
  3076  			},
  3077  			pluginRegistrations: []tf.RegisterPluginFunc{
  3078  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  3079  				tf.RegisterScorePlugin(imagelocality.Name, imagelocality.New, 1),
  3080  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  3081  			},
  3082  			extenders: nil,
  3083  			want: []framework.NodePluginScores{
  3084  				{
  3085  					Name: "node1",
  3086  					Scores: []framework.PluginScore{
  3087  						{
  3088  							Name:  "ImageLocality",
  3089  							Score: 5,
  3090  						},
  3091  					},
  3092  					TotalScore: 5,
  3093  				},
  3094  				{
  3095  					Name: "node2",
  3096  					Scores: []framework.PluginScore{
  3097  						{
  3098  							Name:  "ImageLocality",
  3099  							Score: 5,
  3100  						},
  3101  					},
  3102  					TotalScore: 5,
  3103  				},
  3104  				{
  3105  					Name: "node3",
  3106  					Scores: []framework.PluginScore{
  3107  						{
  3108  							Name:  "ImageLocality",
  3109  							Score: 5,
  3110  						},
  3111  					},
  3112  					TotalScore: 5,
  3113  				},
  3114  			},
  3115  		},
  3116  		{
  3117  			name: "the score from Image Locality plugin with image in partial nodes",
  3118  			pod: &v1.Pod{
  3119  				Spec: v1.PodSpec{
  3120  					Containers: []v1.Container{
  3121  						{
  3122  							Image: "gcr.io/300",
  3123  						},
  3124  					},
  3125  				},
  3126  			},
  3127  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10, imageStatus1...),
  3128  				makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10, imageStatus2...),
  3129  				makeNode("node3", 1000, schedutil.DefaultMemoryRequest*10, imageStatus3...),
  3130  			},
  3131  			pluginRegistrations: []tf.RegisterPluginFunc{
  3132  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  3133  				tf.RegisterScorePlugin(imagelocality.Name, imagelocality.New, 1),
  3134  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  3135  			},
  3136  			extenders: nil,
  3137  			want: []framework.NodePluginScores{
  3138  				{
  3139  					Name: "node1",
  3140  					Scores: []framework.PluginScore{
  3141  						{
  3142  							Name:  "ImageLocality",
  3143  							Score: 18,
  3144  						},
  3145  					},
  3146  					TotalScore: 18,
  3147  				},
  3148  				{
  3149  					Name: "node2",
  3150  					Scores: []framework.PluginScore{
  3151  						{
  3152  							Name:  "ImageLocality",
  3153  							Score: 18,
  3154  						},
  3155  					},
  3156  					TotalScore: 18,
  3157  				},
  3158  				{
  3159  					Name: "node3",
  3160  					Scores: []framework.PluginScore{
  3161  						{
  3162  							Name:  "ImageLocality",
  3163  							Score: 0,
  3164  						},
  3165  					},
  3166  					TotalScore: 0,
  3167  				},
  3168  			},
  3169  		},
  3170  	}
  3171  
  3172  	for _, test := range tests {
  3173  		t.Run(test.name, func(t *testing.T) {
  3174  			client := clientsetfake.NewSimpleClientset()
  3175  			informerFactory := informers.NewSharedInformerFactory(client, 0)
  3176  
  3177  			ctx, cancel := context.WithCancel(context.Background())
  3178  			defer cancel()
  3179  			cache := internalcache.New(ctx, time.Duration(0))
  3180  			for _, node := range test.nodes {
  3181  				cache.AddNode(klog.FromContext(ctx), node)
  3182  			}
  3183  			snapshot := internalcache.NewEmptySnapshot()
  3184  			if err := cache.UpdateSnapshot(klog.FromContext(ctx), snapshot); err != nil {
  3185  				t.Fatal(err)
  3186  			}
  3187  			fwk, err := tf.NewFramework(
  3188  				ctx,
  3189  				test.pluginRegistrations, "",
  3190  				frameworkruntime.WithInformerFactory(informerFactory),
  3191  				frameworkruntime.WithSnapshotSharedLister(snapshot),
  3192  				frameworkruntime.WithClientSet(client),
  3193  				frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())),
  3194  			)
  3195  			if err != nil {
  3196  				t.Fatalf("error creating framework: %+v", err)
  3197  			}
  3198  
  3199  			state := framework.NewCycleState()
  3200  			var extenders []framework.Extender
  3201  			for ii := range test.extenders {
  3202  				extenders = append(extenders, &test.extenders[ii])
  3203  			}
  3204  			nodesscores, err := prioritizeNodes(ctx, extenders, fwk, state, test.pod, tf.BuildNodeInfos(test.nodes))
  3205  			if err != nil {
  3206  				t.Errorf("unexpected error: %v", err)
  3207  			}
  3208  			for i := range nodesscores {
  3209  				sort.Slice(nodesscores[i].Scores, func(j, k int) bool {
  3210  					return nodesscores[i].Scores[j].Name < nodesscores[i].Scores[k].Name
  3211  				})
  3212  			}
  3213  
  3214  			if diff := cmp.Diff(test.want, nodesscores); diff != "" {
  3215  				t.Errorf("returned nodes scores (-want,+got):\n%s", diff)
  3216  			}
  3217  		})
  3218  	}
  3219  }
  3220  
  3221  var lowPriority, midPriority, highPriority = int32(0), int32(100), int32(1000)
  3222  
  3223  func TestNumFeasibleNodesToFind(t *testing.T) {
  3224  	tests := []struct {
  3225  		name              string
  3226  		globalPercentage  int32
  3227  		profilePercentage *int32
  3228  		numAllNodes       int32
  3229  		wantNumNodes      int32
  3230  	}{
  3231  		{
  3232  			name:         "not set percentageOfNodesToScore and nodes number not more than 50",
  3233  			numAllNodes:  10,
  3234  			wantNumNodes: 10,
  3235  		},
  3236  		{
  3237  			name:              "set profile percentageOfNodesToScore and nodes number not more than 50",
  3238  			profilePercentage: ptr.To[int32](40),
  3239  			numAllNodes:       10,
  3240  			wantNumNodes:      10,
  3241  		},
  3242  		{
  3243  			name:         "not set percentageOfNodesToScore and nodes number more than 50",
  3244  			numAllNodes:  1000,
  3245  			wantNumNodes: 420,
  3246  		},
  3247  		{
  3248  			name:              "set profile percentageOfNodesToScore and nodes number more than 50",
  3249  			profilePercentage: ptr.To[int32](40),
  3250  			numAllNodes:       1000,
  3251  			wantNumNodes:      400,
  3252  		},
  3253  		{
  3254  			name:              "set global and profile percentageOfNodesToScore and nodes number more than 50",
  3255  			globalPercentage:  100,
  3256  			profilePercentage: ptr.To[int32](40),
  3257  			numAllNodes:       1000,
  3258  			wantNumNodes:      400,
  3259  		},
  3260  		{
  3261  			name:             "set global percentageOfNodesToScore and nodes number more than 50",
  3262  			globalPercentage: 40,
  3263  			numAllNodes:      1000,
  3264  			wantNumNodes:     400,
  3265  		},
  3266  		{
  3267  			name:         "not set profile percentageOfNodesToScore and nodes number more than 50*125",
  3268  			numAllNodes:  6000,
  3269  			wantNumNodes: 300,
  3270  		},
  3271  		{
  3272  			name:              "set profile percentageOfNodesToScore and nodes number more than 50*125",
  3273  			profilePercentage: ptr.To[int32](40),
  3274  			numAllNodes:       6000,
  3275  			wantNumNodes:      2400,
  3276  		},
  3277  	}
  3278  
  3279  	for _, tt := range tests {
  3280  		t.Run(tt.name, func(t *testing.T) {
  3281  			sched := &Scheduler{
  3282  				percentageOfNodesToScore: tt.globalPercentage,
  3283  			}
  3284  			if gotNumNodes := sched.numFeasibleNodesToFind(tt.profilePercentage, tt.numAllNodes); gotNumNodes != tt.wantNumNodes {
  3285  				t.Errorf("Scheduler.numFeasibleNodesToFind() = %v, want %v", gotNumNodes, tt.wantNumNodes)
  3286  			}
  3287  		})
  3288  	}
  3289  }
  3290  
  3291  func TestFairEvaluationForNodes(t *testing.T) {
  3292  	numAllNodes := 500
  3293  	nodeNames := make([]string, 0, numAllNodes)
  3294  	for i := 0; i < numAllNodes; i++ {
  3295  		nodeNames = append(nodeNames, strconv.Itoa(i))
  3296  	}
  3297  	nodes := makeNodeList(nodeNames)
  3298  	ctx, cancel := context.WithCancel(context.Background())
  3299  	defer cancel()
  3300  	sched := makeScheduler(ctx, nodes)
  3301  
  3302  	fwk, err := tf.NewFramework(
  3303  		ctx,
  3304  		[]tf.RegisterPluginFunc{
  3305  			tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  3306  			tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  3307  			tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1),
  3308  			tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  3309  		},
  3310  		"",
  3311  		frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)),
  3312  	)
  3313  	if err != nil {
  3314  		t.Fatal(err)
  3315  	}
  3316  
  3317  	// To make numAllNodes % nodesToFind != 0
  3318  	sched.percentageOfNodesToScore = 30
  3319  	nodesToFind := int(sched.numFeasibleNodesToFind(fwk.PercentageOfNodesToScore(), int32(numAllNodes)))
  3320  
  3321  	// Iterating over all nodes more than twice
  3322  	for i := 0; i < 2*(numAllNodes/nodesToFind+1); i++ {
  3323  		nodesThatFit, _, err := sched.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), &v1.Pod{})
  3324  		if err != nil {
  3325  			t.Errorf("unexpected error: %v", err)
  3326  		}
  3327  		if len(nodesThatFit) != nodesToFind {
  3328  			t.Errorf("got %d nodes filtered, want %d", len(nodesThatFit), nodesToFind)
  3329  		}
  3330  		if sched.nextStartNodeIndex != (i+1)*nodesToFind%numAllNodes {
  3331  			t.Errorf("got %d lastProcessedNodeIndex, want %d", sched.nextStartNodeIndex, (i+1)*nodesToFind%numAllNodes)
  3332  		}
  3333  	}
  3334  }
  3335  
  3336  func TestPreferNominatedNodeFilterCallCounts(t *testing.T) {
  3337  	tests := []struct {
  3338  		name                  string
  3339  		pod                   *v1.Pod
  3340  		nodeReturnCodeMap     map[string]framework.Code
  3341  		expectedCount         int32
  3342  		expectedPatchRequests int
  3343  	}{
  3344  		{
  3345  			name:          "pod has the nominated node set, filter is called only once",
  3346  			pod:           st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
  3347  			expectedCount: 1,
  3348  		},
  3349  		{
  3350  			name:          "pod without the nominated pod, filter is called for each node",
  3351  			pod:           st.MakePod().Name("p_without_nominated_node").UID("p").Priority(highPriority).Obj(),
  3352  			expectedCount: 3,
  3353  		},
  3354  		{
  3355  			name:              "nominated pod cannot pass the filter, filter is called for each node",
  3356  			pod:               st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
  3357  			nodeReturnCodeMap: map[string]framework.Code{"node1": framework.Unschedulable},
  3358  			expectedCount:     4,
  3359  		},
  3360  	}
  3361  
  3362  	for _, test := range tests {
  3363  		t.Run(test.name, func(t *testing.T) {
  3364  			logger, ctx := ktesting.NewTestContext(t)
  3365  			ctx, cancel := context.WithCancel(ctx)
  3366  			defer cancel()
  3367  
  3368  			// create three nodes in the cluster.
  3369  			nodes := makeNodeList([]string{"node1", "node2", "node3"})
  3370  			client := clientsetfake.NewSimpleClientset(test.pod)
  3371  			informerFactory := informers.NewSharedInformerFactory(client, 0)
  3372  			cache := internalcache.New(ctx, time.Duration(0))
  3373  			for _, n := range nodes {
  3374  				cache.AddNode(logger, n)
  3375  			}
  3376  			plugin := tf.FakeFilterPlugin{FailedNodeReturnCodeMap: test.nodeReturnCodeMap}
  3377  			registerFakeFilterFunc := tf.RegisterFilterPlugin(
  3378  				"FakeFilter",
  3379  				func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
  3380  					return &plugin, nil
  3381  				},
  3382  			)
  3383  			registerPlugins := []tf.RegisterPluginFunc{
  3384  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  3385  				registerFakeFilterFunc,
  3386  				tf.RegisterScorePlugin("EqualPrioritizerPlugin", tf.NewEqualPrioritizerPlugin(), 1),
  3387  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  3388  			}
  3389  			fwk, err := tf.NewFramework(
  3390  				ctx,
  3391  				registerPlugins, "",
  3392  				frameworkruntime.WithClientSet(client),
  3393  				frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())),
  3394  			)
  3395  			if err != nil {
  3396  				t.Fatal(err)
  3397  			}
  3398  			snapshot := internalcache.NewSnapshot(nil, nodes)
  3399  
  3400  			sched := &Scheduler{
  3401  				Cache:                    cache,
  3402  				nodeInfoSnapshot:         snapshot,
  3403  				percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
  3404  			}
  3405  			sched.applyDefaultHandlers()
  3406  
  3407  			_, _, err = sched.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), test.pod)
  3408  			if err != nil {
  3409  				t.Errorf("unexpected error: %v", err)
  3410  			}
  3411  			if test.expectedCount != plugin.NumFilterCalled {
  3412  				t.Errorf("predicate was called %d times, expected is %d", plugin.NumFilterCalled, test.expectedCount)
  3413  			}
  3414  		})
  3415  	}
  3416  }
  3417  
  3418  func podWithID(id, desiredHost string) *v1.Pod {
  3419  	return st.MakePod().Name(id).UID(id).Node(desiredHost).SchedulerName(testSchedulerName).Obj()
  3420  }
  3421  
  3422  func deletingPod(id string) *v1.Pod {
  3423  	return st.MakePod().Name(id).UID(id).Terminating().Node("").SchedulerName(testSchedulerName).Obj()
  3424  }
  3425  
  3426  func podWithPort(id, desiredHost string, port int) *v1.Pod {
  3427  	pod := podWithID(id, desiredHost)
  3428  	pod.Spec.Containers = []v1.Container{
  3429  		{Name: "ctr", Ports: []v1.ContainerPort{{HostPort: int32(port)}}},
  3430  	}
  3431  	return pod
  3432  }
  3433  
  3434  func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v1.ResourceList) *v1.Pod {
  3435  	pod := podWithID(id, desiredHost)
  3436  	pod.Spec.Containers = []v1.Container{
  3437  		{Name: "ctr", Resources: v1.ResourceRequirements{Limits: limits, Requests: requests}},
  3438  	}
  3439  	return pod
  3440  }
  3441  
  3442  func makeNodeList(nodeNames []string) []*v1.Node {
  3443  	result := make([]*v1.Node, 0, len(nodeNames))
  3444  	for _, nodeName := range nodeNames {
  3445  		result = append(result, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
  3446  	}
  3447  	return result
  3448  }
  3449  
  3450  // makeScheduler makes a simple Scheduler for testing.
  3451  func makeScheduler(ctx context.Context, nodes []*v1.Node) *Scheduler {
  3452  	logger := klog.FromContext(ctx)
  3453  	cache := internalcache.New(ctx, time.Duration(0))
  3454  	for _, n := range nodes {
  3455  		cache.AddNode(logger, n)
  3456  	}
  3457  
  3458  	sched := &Scheduler{
  3459  		Cache:                    cache,
  3460  		nodeInfoSnapshot:         emptySnapshot,
  3461  		percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
  3462  	}
  3463  	sched.applyDefaultHandlers()
  3464  	cache.UpdateSnapshot(logger, sched.nodeInfoSnapshot)
  3465  	return sched
  3466  }
  3467  
  3468  func makeNode(node string, milliCPU, memory int64, images ...v1.ContainerImage) *v1.Node {
  3469  	return &v1.Node{
  3470  		ObjectMeta: metav1.ObjectMeta{Name: node},
  3471  		Status: v1.NodeStatus{
  3472  			Capacity: v1.ResourceList{
  3473  				v1.ResourceCPU:    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
  3474  				v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
  3475  				"pods":            *resource.NewQuantity(100, resource.DecimalSI),
  3476  			},
  3477  			Allocatable: v1.ResourceList{
  3478  
  3479  				v1.ResourceCPU:    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
  3480  				v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
  3481  				"pods":            *resource.NewQuantity(100, resource.DecimalSI),
  3482  			},
  3483  			Images: images,
  3484  		},
  3485  	}
  3486  }
  3487  
  3488  // queuedPodStore: pods queued before processing.
  3489  // cache: scheduler cache that might contain assumed pods.
  3490  func setupTestSchedulerWithOnePodOnNode(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, scache internalcache.Cache,
  3491  	pod *v1.Pod, node *v1.Node, fns ...tf.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) {
  3492  	scheduler, bindingChan, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, nil, nil, fns...)
  3493  
  3494  	queuedPodStore.Add(pod)
  3495  	// queuedPodStore: [foo:8080]
  3496  	// cache: []
  3497  
  3498  	scheduler.ScheduleOne(ctx)
  3499  	// queuedPodStore: []
  3500  	// cache: [(assumed)foo:8080]
  3501  
  3502  	select {
  3503  	case b := <-bindingChan:
  3504  		expectBinding := &v1.Binding{
  3505  			ObjectMeta: metav1.ObjectMeta{Name: pod.Name, UID: types.UID(pod.Name)},
  3506  			Target:     v1.ObjectReference{Kind: "Node", Name: node.Name},
  3507  		}
  3508  		if !reflect.DeepEqual(expectBinding, b) {
  3509  			t.Errorf("binding want=%v, get=%v", expectBinding, b)
  3510  		}
  3511  	case <-time.After(wait.ForeverTestTimeout):
  3512  		t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
  3513  	}
  3514  	return scheduler, bindingChan, errChan
  3515  }
  3516  
  3517  // queuedPodStore: pods queued before processing.
  3518  // scache: scheduler cache that might contain assumed pods.
  3519  func setupTestScheduler(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, cache internalcache.Cache, informerFactory informers.SharedInformerFactory, broadcaster events.EventBroadcaster, fns ...tf.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) {
  3520  	bindingChan := make(chan *v1.Binding, 1)
  3521  	client := clientsetfake.NewSimpleClientset()
  3522  	client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
  3523  		var b *v1.Binding
  3524  		if action.GetSubresource() == "binding" {
  3525  			b := action.(clienttesting.CreateAction).GetObject().(*v1.Binding)
  3526  			bindingChan <- b
  3527  		}
  3528  		return true, b, nil
  3529  	})
  3530  
  3531  	var recorder events.EventRecorder
  3532  	if broadcaster != nil {
  3533  		recorder = broadcaster.NewRecorder(scheme.Scheme, testSchedulerName)
  3534  	} else {
  3535  		recorder = &events.FakeRecorder{}
  3536  	}
  3537  
  3538  	if informerFactory == nil {
  3539  		informerFactory = informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(), 0)
  3540  	}
  3541  	schedulingQueue := internalqueue.NewTestQueueWithInformerFactory(ctx, nil, informerFactory)
  3542  
  3543  	fwk, _ := tf.NewFramework(
  3544  		ctx,
  3545  		fns,
  3546  		testSchedulerName,
  3547  		frameworkruntime.WithClientSet(client),
  3548  		frameworkruntime.WithEventRecorder(recorder),
  3549  		frameworkruntime.WithInformerFactory(informerFactory),
  3550  		frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())),
  3551  	)
  3552  
  3553  	errChan := make(chan error, 1)
  3554  	sched := &Scheduler{
  3555  		Cache:                    cache,
  3556  		client:                   client,
  3557  		nodeInfoSnapshot:         internalcache.NewEmptySnapshot(),
  3558  		percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
  3559  		NextPod: func(logger klog.Logger) (*framework.QueuedPodInfo, error) {
  3560  			return &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(t, clientcache.Pop(queuedPodStore).(*v1.Pod))}, nil
  3561  		},
  3562  		SchedulingQueue: schedulingQueue,
  3563  		Profiles:        profile.Map{testSchedulerName: fwk},
  3564  	}
  3565  
  3566  	sched.SchedulePod = sched.schedulePod
  3567  	sched.FailureHandler = func(_ context.Context, _ framework.Framework, p *framework.QueuedPodInfo, status *framework.Status, _ *framework.NominatingInfo, _ time.Time) {
  3568  		err := status.AsError()
  3569  		errChan <- err
  3570  
  3571  		msg := truncateMessage(err.Error())
  3572  		fwk.EventRecorder().Eventf(p.Pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", msg)
  3573  	}
  3574  	return sched, bindingChan, errChan
  3575  }
  3576  
  3577  func setupTestSchedulerWithVolumeBinding(ctx context.Context, t *testing.T, volumeBinder volumebinding.SchedulerVolumeBinder, broadcaster events.EventBroadcaster) (*Scheduler, chan *v1.Binding, chan error) {
  3578  	logger := klog.FromContext(ctx)
  3579  	testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
  3580  	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
  3581  	pod := podWithID("foo", "")
  3582  	pod.Namespace = "foo-ns"
  3583  	pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{Name: "testVol",
  3584  		VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "testPVC"}}})
  3585  	queuedPodStore.Add(pod)
  3586  	scache := internalcache.New(ctx, 10*time.Minute)
  3587  	scache.AddNode(logger, &testNode)
  3588  	testPVC := v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "testPVC", Namespace: pod.Namespace, UID: types.UID("testPVC")}}
  3589  	client := clientsetfake.NewSimpleClientset(&testNode, &testPVC)
  3590  	informerFactory := informers.NewSharedInformerFactory(client, 0)
  3591  	pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims()
  3592  	pvcInformer.Informer().GetStore().Add(&testPVC)
  3593  
  3594  	fns := []tf.RegisterPluginFunc{
  3595  		tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  3596  		tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  3597  		tf.RegisterPluginAsExtensions(volumebinding.Name, func(ctx context.Context, plArgs runtime.Object, handle framework.Handle) (framework.Plugin, error) {
  3598  			return &volumebinding.VolumeBinding{Binder: volumeBinder, PVCLister: pvcInformer.Lister()}, nil
  3599  		}, "PreFilter", "Filter", "Reserve", "PreBind"),
  3600  	}
  3601  	s, bindingChan, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, informerFactory, broadcaster, fns...)
  3602  	return s, bindingChan, errChan
  3603  }
  3604  
  3605  // This is a workaround because golint complains that errors cannot
  3606  // end with punctuation.  However, the real predicate error message does
  3607  // end with a period.
  3608  func makePredicateError(failReason string) error {
  3609  	s := fmt.Sprintf("0/1 nodes are available: %v.", failReason)
  3610  	return fmt.Errorf(s)
  3611  }
  3612  
  3613  func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
  3614  	podInfo, err := framework.NewPodInfo(pod)
  3615  	if err != nil {
  3616  		t.Fatal(err)
  3617  	}
  3618  	return podInfo
  3619  }