k8s.io/kubernetes@v1.29.3/pkg/scheduler/schedule_one_test.go (about)

     1  /*
     2  Copyright 2014 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package scheduler
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/rand"
    25  	"reflect"
    26  	"regexp"
    27  	"sort"
    28  	"strconv"
    29  	"sync"
    30  	"testing"
    31  	"time"
    32  
    33  	"github.com/google/go-cmp/cmp"
    34  	v1 "k8s.io/api/core/v1"
    35  	eventsv1 "k8s.io/api/events/v1"
    36  	"k8s.io/apimachinery/pkg/api/resource"
    37  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    38  	"k8s.io/apimachinery/pkg/runtime"
    39  	"k8s.io/apimachinery/pkg/types"
    40  	"k8s.io/apimachinery/pkg/util/sets"
    41  	"k8s.io/apimachinery/pkg/util/wait"
    42  	"k8s.io/client-go/informers"
    43  	clientsetfake "k8s.io/client-go/kubernetes/fake"
    44  	"k8s.io/client-go/kubernetes/scheme"
    45  	clienttesting "k8s.io/client-go/testing"
    46  	clientcache "k8s.io/client-go/tools/cache"
    47  	"k8s.io/client-go/tools/events"
    48  	"k8s.io/component-helpers/storage/volume"
    49  	"k8s.io/klog/v2"
    50  	"k8s.io/klog/v2/ktesting"
    51  	extenderv1 "k8s.io/kube-scheduler/extender/v1"
    52  	schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
    53  	"k8s.io/kubernetes/pkg/scheduler/framework"
    54  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
    55  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
    56  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality"
    57  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
    58  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
    59  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
    60  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
    61  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
    62  	frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
    63  	internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
    64  	fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
    65  	internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
    66  	"k8s.io/kubernetes/pkg/scheduler/profile"
    67  	st "k8s.io/kubernetes/pkg/scheduler/testing"
    68  	tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
    69  	schedutil "k8s.io/kubernetes/pkg/scheduler/util"
    70  	"k8s.io/utils/ptr"
    71  )
    72  
    73  const (
    74  	testSchedulerName       = "test-scheduler"
    75  	mb                int64 = 1024 * 1024
    76  )
    77  
    78  var (
    79  	emptySnapshot         = internalcache.NewEmptySnapshot()
    80  	podTopologySpreadFunc = frameworkruntime.FactoryAdapter(feature.Features{}, podtopologyspread.New)
    81  	errPrioritize         = fmt.Errorf("priority map encounters an error")
    82  )
    83  
    84  type mockScheduleResult struct {
    85  	result ScheduleResult
    86  	err    error
    87  }
    88  
    89  type fakeExtender struct {
    90  	isBinder          bool
    91  	interestedPodName string
    92  	ignorable         bool
    93  	gotBind           bool
    94  }
    95  
    96  func (f *fakeExtender) Name() string {
    97  	return "fakeExtender"
    98  }
    99  
   100  func (f *fakeExtender) IsIgnorable() bool {
   101  	return f.ignorable
   102  }
   103  
   104  func (f *fakeExtender) ProcessPreemption(
   105  	_ *v1.Pod,
   106  	_ map[string]*extenderv1.Victims,
   107  	_ framework.NodeInfoLister,
   108  ) (map[string]*extenderv1.Victims, error) {
   109  	return nil, nil
   110  }
   111  
   112  func (f *fakeExtender) SupportsPreemption() bool {
   113  	return false
   114  }
   115  
   116  func (f *fakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, extenderv1.FailedNodesMap, extenderv1.FailedNodesMap, error) {
   117  	return nil, nil, nil, nil
   118  }
   119  
   120  func (f *fakeExtender) Prioritize(
   121  	_ *v1.Pod,
   122  	_ []*v1.Node,
   123  ) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error) {
   124  	return nil, 0, nil
   125  }
   126  
   127  func (f *fakeExtender) Bind(binding *v1.Binding) error {
   128  	if f.isBinder {
   129  		f.gotBind = true
   130  		return nil
   131  	}
   132  	return errors.New("not a binder")
   133  }
   134  
   135  func (f *fakeExtender) IsBinder() bool {
   136  	return f.isBinder
   137  }
   138  
   139  func (f *fakeExtender) IsInterested(pod *v1.Pod) bool {
   140  	return pod != nil && pod.Name == f.interestedPodName
   141  }
   142  
   143  type falseMapPlugin struct{}
   144  
   145  func newFalseMapPlugin() frameworkruntime.PluginFactory {
   146  	return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   147  		return &falseMapPlugin{}, nil
   148  	}
   149  }
   150  
   151  func (pl *falseMapPlugin) Name() string {
   152  	return "FalseMap"
   153  }
   154  
   155  func (pl *falseMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ string) (int64, *framework.Status) {
   156  	return 0, framework.AsStatus(errPrioritize)
   157  }
   158  
   159  func (pl *falseMapPlugin) ScoreExtensions() framework.ScoreExtensions {
   160  	return nil
   161  }
   162  
   163  type numericMapPlugin struct{}
   164  
   165  func newNumericMapPlugin() frameworkruntime.PluginFactory {
   166  	return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   167  		return &numericMapPlugin{}, nil
   168  	}
   169  }
   170  
   171  func (pl *numericMapPlugin) Name() string {
   172  	return "NumericMap"
   173  }
   174  
   175  func (pl *numericMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeName string) (int64, *framework.Status) {
   176  	score, err := strconv.Atoi(nodeName)
   177  	if err != nil {
   178  		return 0, framework.NewStatus(framework.Error, fmt.Sprintf("Error converting nodename to int: %+v", nodeName))
   179  	}
   180  	return int64(score), nil
   181  }
   182  
   183  func (pl *numericMapPlugin) ScoreExtensions() framework.ScoreExtensions {
   184  	return nil
   185  }
   186  
   187  // NewNoPodsFilterPlugin initializes a noPodsFilterPlugin and returns it.
   188  func NewNoPodsFilterPlugin(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   189  	return &noPodsFilterPlugin{}, nil
   190  }
   191  
   192  type reverseNumericMapPlugin struct{}
   193  
   194  func (pl *reverseNumericMapPlugin) Name() string {
   195  	return "ReverseNumericMap"
   196  }
   197  
   198  func (pl *reverseNumericMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeName string) (int64, *framework.Status) {
   199  	score, err := strconv.Atoi(nodeName)
   200  	if err != nil {
   201  		return 0, framework.NewStatus(framework.Error, fmt.Sprintf("Error converting nodename to int: %+v", nodeName))
   202  	}
   203  	return int64(score), nil
   204  }
   205  
   206  func (pl *reverseNumericMapPlugin) ScoreExtensions() framework.ScoreExtensions {
   207  	return pl
   208  }
   209  
   210  func (pl *reverseNumericMapPlugin) NormalizeScore(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *framework.Status {
   211  	var maxScore float64
   212  	minScore := math.MaxFloat64
   213  
   214  	for _, hostPriority := range nodeScores {
   215  		maxScore = math.Max(maxScore, float64(hostPriority.Score))
   216  		minScore = math.Min(minScore, float64(hostPriority.Score))
   217  	}
   218  	for i, hostPriority := range nodeScores {
   219  		nodeScores[i] = framework.NodeScore{
   220  			Name:  hostPriority.Name,
   221  			Score: int64(maxScore + minScore - float64(hostPriority.Score)),
   222  		}
   223  	}
   224  	return nil
   225  }
   226  
   227  func newReverseNumericMapPlugin() frameworkruntime.PluginFactory {
   228  	return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   229  		return &reverseNumericMapPlugin{}, nil
   230  	}
   231  }
   232  
   233  type trueMapPlugin struct{}
   234  
   235  func (pl *trueMapPlugin) Name() string {
   236  	return "TrueMap"
   237  }
   238  
   239  func (pl *trueMapPlugin) Score(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ string) (int64, *framework.Status) {
   240  	return 1, nil
   241  }
   242  
   243  func (pl *trueMapPlugin) ScoreExtensions() framework.ScoreExtensions {
   244  	return pl
   245  }
   246  
   247  func (pl *trueMapPlugin) NormalizeScore(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeScores framework.NodeScoreList) *framework.Status {
   248  	for _, host := range nodeScores {
   249  		if host.Name == "" {
   250  			return framework.NewStatus(framework.Error, "unexpected empty host name")
   251  		}
   252  	}
   253  	return nil
   254  }
   255  
   256  func newTrueMapPlugin() frameworkruntime.PluginFactory {
   257  	return func(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   258  		return &trueMapPlugin{}, nil
   259  	}
   260  }
   261  
   262  type noPodsFilterPlugin struct{}
   263  
   264  // Name returns name of the plugin.
   265  func (pl *noPodsFilterPlugin) Name() string {
   266  	return "NoPodsFilter"
   267  }
   268  
   269  // Filter invoked at the filter extension point.
   270  func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
   271  	if len(nodeInfo.Pods) == 0 {
   272  		return nil
   273  	}
   274  	return framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake)
   275  }
   276  
   277  type fakeNodeSelectorArgs struct {
   278  	NodeName string `json:"nodeName"`
   279  }
   280  
   281  type fakeNodeSelector struct {
   282  	fakeNodeSelectorArgs
   283  }
   284  
   285  func (s *fakeNodeSelector) Name() string {
   286  	return "FakeNodeSelector"
   287  }
   288  
   289  func (s *fakeNodeSelector) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
   290  	if nodeInfo.Node().Name != s.NodeName {
   291  		return framework.NewStatus(framework.UnschedulableAndUnresolvable)
   292  	}
   293  	return nil
   294  }
   295  
   296  func newFakeNodeSelector(_ context.Context, args runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   297  	pl := &fakeNodeSelector{}
   298  	if err := frameworkruntime.DecodeInto(args, &pl.fakeNodeSelectorArgs); err != nil {
   299  		return nil, err
   300  	}
   301  	return pl, nil
   302  }
   303  
   304  const (
   305  	fakeSpecifiedNodeNameAnnotation = "fake-specified-node-name"
   306  )
   307  
   308  // fakeNodeSelectorDependOnPodAnnotation schedules pod to the specified one node from pod.Annotations[fakeSpecifiedNodeNameAnnotation].
   309  type fakeNodeSelectorDependOnPodAnnotation struct{}
   310  
   311  func (f *fakeNodeSelectorDependOnPodAnnotation) Name() string {
   312  	return "FakeNodeSelectorDependOnPodAnnotation"
   313  }
   314  
   315  // Filter selects the specified one node and rejects other non-specified nodes.
   316  func (f *fakeNodeSelectorDependOnPodAnnotation) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
   317  	resolveNodeNameFromPodAnnotation := func(pod *v1.Pod) (string, error) {
   318  		if pod == nil {
   319  			return "", fmt.Errorf("empty pod")
   320  		}
   321  		nodeName, ok := pod.Annotations[fakeSpecifiedNodeNameAnnotation]
   322  		if !ok {
   323  			return "", fmt.Errorf("no specified node name on pod %s/%s annotation", pod.Namespace, pod.Name)
   324  		}
   325  		return nodeName, nil
   326  	}
   327  
   328  	nodeName, err := resolveNodeNameFromPodAnnotation(pod)
   329  	if err != nil {
   330  		return framework.AsStatus(err)
   331  	}
   332  	if nodeInfo.Node().Name != nodeName {
   333  		return framework.NewStatus(framework.UnschedulableAndUnresolvable)
   334  	}
   335  	return nil
   336  }
   337  
   338  func newFakeNodeSelectorDependOnPodAnnotation(_ context.Context, _ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
   339  	return &fakeNodeSelectorDependOnPodAnnotation{}, nil
   340  }
   341  
   342  type TestPlugin struct {
   343  	name string
   344  }
   345  
   346  var _ framework.ScorePlugin = &TestPlugin{}
   347  var _ framework.FilterPlugin = &TestPlugin{}
   348  
   349  func (t *TestPlugin) Name() string {
   350  	return t.name
   351  }
   352  
   353  func (t *TestPlugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) {
   354  	return 1, nil
   355  }
   356  
   357  func (t *TestPlugin) ScoreExtensions() framework.ScoreExtensions {
   358  	return nil
   359  }
   360  
   361  func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
   362  	return nil
   363  }
   364  
   365  func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
   366  	nodes := []runtime.Object{
   367  		st.MakeNode().Name("node1").UID("node1").Obj(),
   368  		st.MakeNode().Name("node2").UID("node2").Obj(),
   369  		st.MakeNode().Name("node3").UID("node3").Obj(),
   370  	}
   371  	pods := []*v1.Pod{
   372  		st.MakePod().Name("pod1").UID("pod1").SchedulerName("match-node3").Obj(),
   373  		st.MakePod().Name("pod2").UID("pod2").SchedulerName("match-node2").Obj(),
   374  		st.MakePod().Name("pod3").UID("pod3").SchedulerName("match-node2").Obj(),
   375  		st.MakePod().Name("pod4").UID("pod4").SchedulerName("match-node3").Obj(),
   376  	}
   377  	wantBindings := map[string]string{
   378  		"pod1": "node3",
   379  		"pod2": "node2",
   380  		"pod3": "node2",
   381  		"pod4": "node3",
   382  	}
   383  	wantControllers := map[string]string{
   384  		"pod1": "match-node3",
   385  		"pod2": "match-node2",
   386  		"pod3": "match-node2",
   387  		"pod4": "match-node3",
   388  	}
   389  
   390  	// Set up scheduler for the 3 nodes.
   391  	// We use a fake filter that only allows one particular node. We create two
   392  	// profiles, each with a different node in the filter configuration.
   393  	objs := append([]runtime.Object{
   394  		&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, nodes...)
   395  	client := clientsetfake.NewSimpleClientset(objs...)
   396  	broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
   397  	ctx, cancel := context.WithCancel(context.Background())
   398  	defer cancel()
   399  
   400  	informerFactory := informers.NewSharedInformerFactory(client, 0)
   401  	sched, err := New(
   402  		ctx,
   403  		client,
   404  		informerFactory,
   405  		nil,
   406  		profile.NewRecorderFactory(broadcaster),
   407  		WithProfiles(
   408  			schedulerapi.KubeSchedulerProfile{SchedulerName: "match-node2",
   409  				Plugins: &schedulerapi.Plugins{
   410  					Filter:    schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "FakeNodeSelector"}}},
   411  					QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
   412  					Bind:      schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
   413  				},
   414  				PluginConfig: []schedulerapi.PluginConfig{
   415  					{
   416  						Name: "FakeNodeSelector",
   417  						Args: &runtime.Unknown{Raw: []byte(`{"nodeName":"node2"}`)},
   418  					},
   419  				},
   420  			},
   421  			schedulerapi.KubeSchedulerProfile{
   422  				SchedulerName: "match-node3",
   423  				Plugins: &schedulerapi.Plugins{
   424  					Filter:    schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "FakeNodeSelector"}}},
   425  					QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
   426  					Bind:      schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
   427  				},
   428  				PluginConfig: []schedulerapi.PluginConfig{
   429  					{
   430  						Name: "FakeNodeSelector",
   431  						Args: &runtime.Unknown{Raw: []byte(`{"nodeName":"node3"}`)},
   432  					},
   433  				},
   434  			},
   435  		),
   436  		WithFrameworkOutOfTreeRegistry(frameworkruntime.Registry{
   437  			"FakeNodeSelector": newFakeNodeSelector,
   438  		}),
   439  	)
   440  	if err != nil {
   441  		t.Fatal(err)
   442  	}
   443  
   444  	// Capture the bindings and events' controllers.
   445  	var wg sync.WaitGroup
   446  	wg.Add(2 * len(pods))
   447  	bindings := make(map[string]string)
   448  	client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
   449  		if action.GetSubresource() != "binding" {
   450  			return false, nil, nil
   451  		}
   452  		binding := action.(clienttesting.CreateAction).GetObject().(*v1.Binding)
   453  		bindings[binding.Name] = binding.Target.Name
   454  		wg.Done()
   455  		return true, binding, nil
   456  	})
   457  	controllers := make(map[string]string)
   458  	stopFn, err := broadcaster.StartEventWatcher(func(obj runtime.Object) {
   459  		e, ok := obj.(*eventsv1.Event)
   460  		if !ok || e.Reason != "Scheduled" {
   461  			return
   462  		}
   463  		controllers[e.Regarding.Name] = e.ReportingController
   464  		wg.Done()
   465  	})
   466  	if err != nil {
   467  		t.Fatal(err)
   468  	}
   469  	defer stopFn()
   470  
   471  	// Run scheduler.
   472  	informerFactory.Start(ctx.Done())
   473  	informerFactory.WaitForCacheSync(ctx.Done())
   474  	if err = sched.WaitForHandlersSync(ctx); err != nil {
   475  		t.Fatalf("Handlers failed to sync: %v: ", err)
   476  	}
   477  	go sched.Run(ctx)
   478  
   479  	// Send pods to be scheduled.
   480  	for _, p := range pods {
   481  		_, err := client.CoreV1().Pods("").Create(ctx, p, metav1.CreateOptions{})
   482  		if err != nil {
   483  			t.Fatal(err)
   484  		}
   485  	}
   486  	wg.Wait()
   487  
   488  	// Verify correct bindings and reporting controllers.
   489  	if diff := cmp.Diff(wantBindings, bindings); diff != "" {
   490  		t.Errorf("pods were scheduled incorrectly (-want, +got):\n%s", diff)
   491  	}
   492  	if diff := cmp.Diff(wantControllers, controllers); diff != "" {
   493  		t.Errorf("events were reported with wrong controllers (-want, +got):\n%s", diff)
   494  	}
   495  }
   496  
   497  // TestSchedulerGuaranteeNonNilNodeInSchedulingCycle is for detecting potential panic on nil Node when iterating Nodes.
   498  func TestSchedulerGuaranteeNonNilNodeInSchedulingCycle(t *testing.T) {
   499  	random := rand.New(rand.NewSource(time.Now().UnixNano()))
   500  	ctx, cancel := context.WithCancel(context.Background())
   501  	defer cancel()
   502  
   503  	var (
   504  		initialNodeNumber        = 1000
   505  		initialPodNumber         = 500
   506  		waitSchedulingPodNumber  = 200
   507  		deleteNodeNumberPerRound = 20
   508  		createPodNumberPerRound  = 50
   509  
   510  		fakeSchedulerName = "fake-scheduler"
   511  		fakeNamespace     = "fake-namespace"
   512  
   513  		initialNodes []runtime.Object
   514  		initialPods  []runtime.Object
   515  	)
   516  
   517  	for i := 0; i < initialNodeNumber; i++ {
   518  		nodeName := fmt.Sprintf("node%d", i)
   519  		initialNodes = append(initialNodes, st.MakeNode().Name(nodeName).UID(nodeName).Obj())
   520  	}
   521  	// Randomly scatter initial pods onto nodes.
   522  	for i := 0; i < initialPodNumber; i++ {
   523  		podName := fmt.Sprintf("scheduled-pod%d", i)
   524  		assignedNodeName := fmt.Sprintf("node%d", random.Intn(initialNodeNumber))
   525  		initialPods = append(initialPods, st.MakePod().Name(podName).UID(podName).Node(assignedNodeName).Obj())
   526  	}
   527  
   528  	objs := []runtime.Object{&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: fakeNamespace}}}
   529  	objs = append(objs, initialNodes...)
   530  	objs = append(objs, initialPods...)
   531  	client := clientsetfake.NewSimpleClientset(objs...)
   532  	broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
   533  
   534  	informerFactory := informers.NewSharedInformerFactory(client, 0)
   535  	sched, err := New(
   536  		ctx,
   537  		client,
   538  		informerFactory,
   539  		nil,
   540  		profile.NewRecorderFactory(broadcaster),
   541  		WithProfiles(
   542  			schedulerapi.KubeSchedulerProfile{SchedulerName: fakeSchedulerName,
   543  				Plugins: &schedulerapi.Plugins{
   544  					Filter:    schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "FakeNodeSelectorDependOnPodAnnotation"}}},
   545  					QueueSort: schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "PrioritySort"}}},
   546  					Bind:      schedulerapi.PluginSet{Enabled: []schedulerapi.Plugin{{Name: "DefaultBinder"}}},
   547  				},
   548  			},
   549  		),
   550  		WithFrameworkOutOfTreeRegistry(frameworkruntime.Registry{
   551  			"FakeNodeSelectorDependOnPodAnnotation": newFakeNodeSelectorDependOnPodAnnotation,
   552  		}),
   553  	)
   554  	if err != nil {
   555  		t.Fatal(err)
   556  	}
   557  
   558  	// Run scheduler.
   559  	informerFactory.Start(ctx.Done())
   560  	informerFactory.WaitForCacheSync(ctx.Done())
   561  	go sched.Run(ctx)
   562  
   563  	var deleteNodeIndex int
   564  	deleteNodesOneRound := func() {
   565  		for i := 0; i < deleteNodeNumberPerRound; i++ {
   566  			if deleteNodeIndex >= initialNodeNumber {
   567  				// all initial nodes are already deleted
   568  				return
   569  			}
   570  			deleteNodeName := fmt.Sprintf("node%d", deleteNodeIndex)
   571  			if err := client.CoreV1().Nodes().Delete(ctx, deleteNodeName, metav1.DeleteOptions{}); err != nil {
   572  				t.Fatal(err)
   573  			}
   574  			deleteNodeIndex++
   575  		}
   576  	}
   577  	var createPodIndex int
   578  	createPodsOneRound := func() {
   579  		if createPodIndex > waitSchedulingPodNumber {
   580  			return
   581  		}
   582  		for i := 0; i < createPodNumberPerRound; i++ {
   583  			podName := fmt.Sprintf("pod%d", createPodIndex)
   584  			// Note: the node(specifiedNodeName) may already be deleted, which leads pod scheduled failed.
   585  			specifiedNodeName := fmt.Sprintf("node%d", random.Intn(initialNodeNumber))
   586  
   587  			waitSchedulingPod := st.MakePod().Namespace(fakeNamespace).Name(podName).UID(podName).Annotation(fakeSpecifiedNodeNameAnnotation, specifiedNodeName).SchedulerName(fakeSchedulerName).Obj()
   588  			if _, err := client.CoreV1().Pods(fakeNamespace).Create(ctx, waitSchedulingPod, metav1.CreateOptions{}); err != nil {
   589  				t.Fatal(err)
   590  			}
   591  			createPodIndex++
   592  		}
   593  	}
   594  
   595  	// Following we start 2 goroutines asynchronously to detect potential racing issues:
   596  	// 1) One is responsible for deleting several nodes in each round;
   597  	// 2) Another is creating several pods in each round to trigger scheduling;
   598  	// Those two goroutines will stop until ctx.Done() is called, which means all waiting pods are scheduled at least once.
   599  	go wait.Until(deleteNodesOneRound, 10*time.Millisecond, ctx.Done())
   600  	go wait.Until(createPodsOneRound, 9*time.Millisecond, ctx.Done())
   601  
   602  	// Capture the events to wait all pods to be scheduled at least once.
   603  	allWaitSchedulingPods := sets.New[string]()
   604  	for i := 0; i < waitSchedulingPodNumber; i++ {
   605  		allWaitSchedulingPods.Insert(fmt.Sprintf("pod%d", i))
   606  	}
   607  	var wg sync.WaitGroup
   608  	wg.Add(waitSchedulingPodNumber)
   609  	stopFn, err := broadcaster.StartEventWatcher(func(obj runtime.Object) {
   610  		e, ok := obj.(*eventsv1.Event)
   611  		if !ok || (e.Reason != "Scheduled" && e.Reason != "FailedScheduling") {
   612  			return
   613  		}
   614  		if allWaitSchedulingPods.Has(e.Regarding.Name) {
   615  			wg.Done()
   616  			allWaitSchedulingPods.Delete(e.Regarding.Name)
   617  		}
   618  	})
   619  	if err != nil {
   620  		t.Fatal(err)
   621  	}
   622  	defer stopFn()
   623  
   624  	wg.Wait()
   625  }
   626  
   627  func TestSchedulerScheduleOne(t *testing.T) {
   628  	testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
   629  	client := clientsetfake.NewSimpleClientset(&testNode)
   630  	eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
   631  	errS := errors.New("scheduler")
   632  	errB := errors.New("binder")
   633  	preBindErr := errors.New("on PreBind")
   634  
   635  	table := []struct {
   636  		name                string
   637  		injectBindError     error
   638  		sendPod             *v1.Pod
   639  		registerPluginFuncs []tf.RegisterPluginFunc
   640  		expectErrorPod      *v1.Pod
   641  		expectForgetPod     *v1.Pod
   642  		expectAssumedPod    *v1.Pod
   643  		expectError         error
   644  		expectBind          *v1.Binding
   645  		eventReason         string
   646  		mockResult          mockScheduleResult
   647  	}{
   648  		{
   649  			name:       "error reserve pod",
   650  			sendPod:    podWithID("foo", ""),
   651  			mockResult: mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil},
   652  			registerPluginFuncs: []tf.RegisterPluginFunc{
   653  				tf.RegisterReservePlugin("FakeReserve", tf.NewFakeReservePlugin(framework.NewStatus(framework.Error, "reserve error"))),
   654  			},
   655  			expectErrorPod:   podWithID("foo", testNode.Name),
   656  			expectForgetPod:  podWithID("foo", testNode.Name),
   657  			expectAssumedPod: podWithID("foo", testNode.Name),
   658  			expectError:      fmt.Errorf(`running Reserve plugin "FakeReserve": %w`, errors.New("reserve error")),
   659  			eventReason:      "FailedScheduling",
   660  		},
   661  		{
   662  			name:       "error permit pod",
   663  			sendPod:    podWithID("foo", ""),
   664  			mockResult: mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil},
   665  			registerPluginFuncs: []tf.RegisterPluginFunc{
   666  				tf.RegisterPermitPlugin("FakePermit", tf.NewFakePermitPlugin(framework.NewStatus(framework.Error, "permit error"), time.Minute)),
   667  			},
   668  			expectErrorPod:   podWithID("foo", testNode.Name),
   669  			expectForgetPod:  podWithID("foo", testNode.Name),
   670  			expectAssumedPod: podWithID("foo", testNode.Name),
   671  			expectError:      fmt.Errorf(`running Permit plugin "FakePermit": %w`, errors.New("permit error")),
   672  			eventReason:      "FailedScheduling",
   673  		},
   674  		{
   675  			name:       "error prebind pod",
   676  			sendPod:    podWithID("foo", ""),
   677  			mockResult: mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil},
   678  			registerPluginFuncs: []tf.RegisterPluginFunc{
   679  				tf.RegisterPreBindPlugin("FakePreBind", tf.NewFakePreBindPlugin(framework.AsStatus(preBindErr))),
   680  			},
   681  			expectErrorPod:   podWithID("foo", testNode.Name),
   682  			expectForgetPod:  podWithID("foo", testNode.Name),
   683  			expectAssumedPod: podWithID("foo", testNode.Name),
   684  			expectError:      fmt.Errorf(`running PreBind plugin "FakePreBind": %w`, preBindErr),
   685  			eventReason:      "FailedScheduling",
   686  		},
   687  		{
   688  			name:             "bind assumed pod scheduled",
   689  			sendPod:          podWithID("foo", ""),
   690  			mockResult:       mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil},
   691  			expectBind:       &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: testNode.Name}},
   692  			expectAssumedPod: podWithID("foo", testNode.Name),
   693  			eventReason:      "Scheduled",
   694  		},
   695  		{
   696  			name:           "error pod failed scheduling",
   697  			sendPod:        podWithID("foo", ""),
   698  			mockResult:     mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, errS},
   699  			expectError:    errS,
   700  			expectErrorPod: podWithID("foo", ""),
   701  			eventReason:    "FailedScheduling",
   702  		},
   703  		{
   704  			name:             "error bind forget pod failed scheduling",
   705  			sendPod:          podWithID("foo", ""),
   706  			mockResult:       mockScheduleResult{ScheduleResult{SuggestedHost: testNode.Name, EvaluatedNodes: 1, FeasibleNodes: 1}, nil},
   707  			expectBind:       &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: testNode.Name}},
   708  			expectAssumedPod: podWithID("foo", testNode.Name),
   709  			injectBindError:  errB,
   710  			expectError:      fmt.Errorf("running Bind plugin %q: %w", "DefaultBinder", errors.New("binder")),
   711  			expectErrorPod:   podWithID("foo", testNode.Name),
   712  			expectForgetPod:  podWithID("foo", testNode.Name),
   713  			eventReason:      "FailedScheduling",
   714  		},
   715  		{
   716  			name:        "deleting pod",
   717  			sendPod:     deletingPod("foo"),
   718  			mockResult:  mockScheduleResult{ScheduleResult{}, nil},
   719  			eventReason: "FailedScheduling",
   720  		},
   721  	}
   722  
   723  	for _, item := range table {
   724  		t.Run(item.name, func(t *testing.T) {
   725  			var gotError error
   726  			var gotPod *v1.Pod
   727  			var gotForgetPod *v1.Pod
   728  			var gotAssumedPod *v1.Pod
   729  			var gotBinding *v1.Binding
   730  			cache := &fakecache.Cache{
   731  				ForgetFunc: func(pod *v1.Pod) {
   732  					gotForgetPod = pod
   733  				},
   734  				AssumeFunc: func(pod *v1.Pod) {
   735  					gotAssumedPod = pod
   736  				},
   737  				IsAssumedPodFunc: func(pod *v1.Pod) bool {
   738  					if pod == nil || gotAssumedPod == nil {
   739  						return false
   740  					}
   741  					return pod.UID == gotAssumedPod.UID
   742  				},
   743  			}
   744  			client := clientsetfake.NewSimpleClientset(item.sendPod)
   745  			client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
   746  				if action.GetSubresource() != "binding" {
   747  					return false, nil, nil
   748  				}
   749  				gotBinding = action.(clienttesting.CreateAction).GetObject().(*v1.Binding)
   750  				return true, gotBinding, item.injectBindError
   751  			})
   752  			registerPluginFuncs := append(item.registerPluginFuncs,
   753  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
   754  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
   755  			)
   756  			ctx, cancel := context.WithCancel(context.Background())
   757  			defer cancel()
   758  			fwk, err := tf.NewFramework(ctx,
   759  				registerPluginFuncs,
   760  				testSchedulerName,
   761  				frameworkruntime.WithClientSet(client),
   762  				frameworkruntime.WithEventRecorder(eventBroadcaster.NewRecorder(scheme.Scheme, testSchedulerName)))
   763  			if err != nil {
   764  				t.Fatal(err)
   765  			}
   766  
   767  			sched := &Scheduler{
   768  				Cache:  cache,
   769  				client: client,
   770  				NextPod: func(logger klog.Logger) (*framework.QueuedPodInfo, error) {
   771  					return &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(t, item.sendPod)}, nil
   772  				},
   773  				SchedulingQueue: internalqueue.NewTestQueue(ctx, nil),
   774  				Profiles:        profile.Map{testSchedulerName: fwk},
   775  			}
   776  
   777  			sched.SchedulePod = func(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) (ScheduleResult, error) {
   778  				return item.mockResult.result, item.mockResult.err
   779  			}
   780  			sched.FailureHandler = func(_ context.Context, fwk framework.Framework, p *framework.QueuedPodInfo, status *framework.Status, _ *framework.NominatingInfo, _ time.Time) {
   781  				gotPod = p.Pod
   782  				gotError = status.AsError()
   783  
   784  				msg := truncateMessage(gotError.Error())
   785  				fwk.EventRecorder().Eventf(p.Pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", msg)
   786  			}
   787  			called := make(chan struct{})
   788  			stopFunc, err := eventBroadcaster.StartEventWatcher(func(obj runtime.Object) {
   789  				e, _ := obj.(*eventsv1.Event)
   790  				if e.Reason != item.eventReason {
   791  					t.Errorf("got event %v, want %v", e.Reason, item.eventReason)
   792  				}
   793  				close(called)
   794  			})
   795  			if err != nil {
   796  				t.Fatal(err)
   797  			}
   798  			sched.scheduleOne(ctx)
   799  			<-called
   800  			if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
   801  				t.Errorf("assumed pod: wanted %v, got %v", e, a)
   802  			}
   803  			if e, a := item.expectErrorPod, gotPod; !reflect.DeepEqual(e, a) {
   804  				t.Errorf("error pod: wanted %v, got %v", e, a)
   805  			}
   806  			if e, a := item.expectForgetPod, gotForgetPod; !reflect.DeepEqual(e, a) {
   807  				t.Errorf("forget pod: wanted %v, got %v", e, a)
   808  			}
   809  			if e, a := item.expectError, gotError; !reflect.DeepEqual(e, a) {
   810  				t.Errorf("error: wanted %v, got %v", e, a)
   811  			}
   812  			if diff := cmp.Diff(item.expectBind, gotBinding); diff != "" {
   813  				t.Errorf("got binding diff (-want, +got): %s", diff)
   814  			}
   815  			stopFunc()
   816  		})
   817  	}
   818  }
   819  
   820  func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
   821  	logger, ctx := ktesting.NewTestContext(t)
   822  	ctx, cancel := context.WithCancel(ctx)
   823  	defer cancel()
   824  	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
   825  	scache := internalcache.New(ctx, 100*time.Millisecond)
   826  	pod := podWithPort("pod.Name", "", 8080)
   827  	node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
   828  	scache.AddNode(logger, &node)
   829  
   830  	fns := []tf.RegisterPluginFunc{
   831  		tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
   832  		tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
   833  		tf.RegisterPluginAsExtensions(nodeports.Name, nodeports.New, "Filter", "PreFilter"),
   834  	}
   835  	scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(ctx, t, queuedPodStore, scache, pod, &node, fns...)
   836  
   837  	waitPodExpireChan := make(chan struct{})
   838  	timeout := make(chan struct{})
   839  	go func() {
   840  		for {
   841  			select {
   842  			case <-timeout:
   843  				return
   844  			default:
   845  			}
   846  			pods, err := scache.PodCount()
   847  			if err != nil {
   848  				errChan <- fmt.Errorf("cache.List failed: %v", err)
   849  				return
   850  			}
   851  			if pods == 0 {
   852  				close(waitPodExpireChan)
   853  				return
   854  			}
   855  			time.Sleep(100 * time.Millisecond)
   856  		}
   857  	}()
   858  	// waiting for the assumed pod to expire
   859  	select {
   860  	case err := <-errChan:
   861  		t.Fatal(err)
   862  	case <-waitPodExpireChan:
   863  	case <-time.After(wait.ForeverTestTimeout):
   864  		close(timeout)
   865  		t.Fatalf("timeout timeout in waiting pod expire after %v", wait.ForeverTestTimeout)
   866  	}
   867  
   868  	// We use conflicted pod ports to incur fit predicate failure if first pod not removed.
   869  	secondPod := podWithPort("bar", "", 8080)
   870  	queuedPodStore.Add(secondPod)
   871  	scheduler.scheduleOne(ctx)
   872  	select {
   873  	case b := <-bindingChan:
   874  		expectBinding := &v1.Binding{
   875  			ObjectMeta: metav1.ObjectMeta{Name: "bar", UID: types.UID("bar")},
   876  			Target:     v1.ObjectReference{Kind: "Node", Name: node.Name},
   877  		}
   878  		if !reflect.DeepEqual(expectBinding, b) {
   879  			t.Errorf("binding want=%v, get=%v", expectBinding, b)
   880  		}
   881  	case <-time.After(wait.ForeverTestTimeout):
   882  		t.Fatalf("timeout in binding after %v", wait.ForeverTestTimeout)
   883  	}
   884  }
   885  
   886  func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
   887  	logger, ctx := ktesting.NewTestContext(t)
   888  	ctx, cancel := context.WithCancel(ctx)
   889  	defer cancel()
   890  	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
   891  	scache := internalcache.New(ctx, 10*time.Minute)
   892  	firstPod := podWithPort("pod.Name", "", 8080)
   893  	node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
   894  	scache.AddNode(logger, &node)
   895  	fns := []tf.RegisterPluginFunc{
   896  		tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
   897  		tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
   898  		tf.RegisterPluginAsExtensions(nodeports.Name, nodeports.New, "Filter", "PreFilter"),
   899  	}
   900  	scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(ctx, t, queuedPodStore, scache, firstPod, &node, fns...)
   901  
   902  	// We use conflicted pod ports to incur fit predicate failure.
   903  	secondPod := podWithPort("bar", "", 8080)
   904  	queuedPodStore.Add(secondPod)
   905  	// queuedPodStore: [bar:8080]
   906  	// cache: [(assumed)foo:8080]
   907  
   908  	scheduler.scheduleOne(ctx)
   909  	select {
   910  	case err := <-errChan:
   911  		expectErr := &framework.FitError{
   912  			Pod:         secondPod,
   913  			NumAllNodes: 1,
   914  			Diagnosis: framework.Diagnosis{
   915  				NodeToStatusMap: framework.NodeToStatusMap{
   916  					node.Name: framework.NewStatus(framework.Unschedulable, nodeports.ErrReason).WithPlugin(nodeports.Name),
   917  				},
   918  				UnschedulablePlugins: sets.New(nodeports.Name),
   919  			},
   920  		}
   921  		if !reflect.DeepEqual(expectErr, err) {
   922  			t.Errorf("err want=%v, get=%v", expectErr, err)
   923  		}
   924  	case <-time.After(wait.ForeverTestTimeout):
   925  		t.Fatalf("timeout in fitting after %v", wait.ForeverTestTimeout)
   926  	}
   927  
   928  	// We mimic the workflow of cache behavior when a pod is removed by user.
   929  	// Note: if the schedulernodeinfo timeout would be super short, the first pod would expire
   930  	// and would be removed itself (without any explicit actions on schedulernodeinfo). Even in that case,
   931  	// explicitly AddPod will as well correct the behavior.
   932  	firstPod.Spec.NodeName = node.Name
   933  	if err := scache.AddPod(logger, firstPod); err != nil {
   934  		t.Fatalf("err: %v", err)
   935  	}
   936  	if err := scache.RemovePod(logger, firstPod); err != nil {
   937  		t.Fatalf("err: %v", err)
   938  	}
   939  
   940  	queuedPodStore.Add(secondPod)
   941  	scheduler.scheduleOne(ctx)
   942  	select {
   943  	case b := <-bindingChan:
   944  		expectBinding := &v1.Binding{
   945  			ObjectMeta: metav1.ObjectMeta{Name: "bar", UID: types.UID("bar")},
   946  			Target:     v1.ObjectReference{Kind: "Node", Name: node.Name},
   947  		}
   948  		if !reflect.DeepEqual(expectBinding, b) {
   949  			t.Errorf("binding want=%v, get=%v", expectBinding, b)
   950  		}
   951  	case <-time.After(wait.ForeverTestTimeout):
   952  		t.Fatalf("timeout in binding after %v", wait.ForeverTestTimeout)
   953  	}
   954  }
   955  
   956  func TestSchedulerFailedSchedulingReasons(t *testing.T) {
   957  	logger, ctx := ktesting.NewTestContext(t)
   958  	ctx, cancel := context.WithCancel(ctx)
   959  	defer cancel()
   960  	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
   961  	scache := internalcache.New(ctx, 10*time.Minute)
   962  
   963  	// Design the baseline for the pods, and we will make nodes that don't fit it later.
   964  	var cpu = int64(4)
   965  	var mem = int64(500)
   966  	podWithTooBigResourceRequests := podWithResources("bar", "", v1.ResourceList{
   967  		v1.ResourceCPU:    *(resource.NewQuantity(cpu, resource.DecimalSI)),
   968  		v1.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)),
   969  	}, v1.ResourceList{
   970  		v1.ResourceCPU:    *(resource.NewQuantity(cpu, resource.DecimalSI)),
   971  		v1.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)),
   972  	})
   973  
   974  	// create several nodes which cannot schedule the above pod
   975  	var nodes []*v1.Node
   976  	var objects []runtime.Object
   977  	for i := 0; i < 100; i++ {
   978  		uid := fmt.Sprintf("node%v", i)
   979  		node := v1.Node{
   980  			ObjectMeta: metav1.ObjectMeta{Name: uid, UID: types.UID(uid)},
   981  			Status: v1.NodeStatus{
   982  				Capacity: v1.ResourceList{
   983  					v1.ResourceCPU:    *(resource.NewQuantity(cpu/2, resource.DecimalSI)),
   984  					v1.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)),
   985  					v1.ResourcePods:   *(resource.NewQuantity(10, resource.DecimalSI)),
   986  				},
   987  				Allocatable: v1.ResourceList{
   988  					v1.ResourceCPU:    *(resource.NewQuantity(cpu/2, resource.DecimalSI)),
   989  					v1.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)),
   990  					v1.ResourcePods:   *(resource.NewQuantity(10, resource.DecimalSI)),
   991  				}},
   992  		}
   993  		scache.AddNode(logger, &node)
   994  		nodes = append(nodes, &node)
   995  		objects = append(objects, &node)
   996  	}
   997  
   998  	// Create expected failure reasons for all the nodes. Hopefully they will get rolled up into a non-spammy summary.
   999  	failedNodeStatues := framework.NodeToStatusMap{}
  1000  	for _, node := range nodes {
  1001  		failedNodeStatues[node.Name] = framework.NewStatus(
  1002  			framework.Unschedulable,
  1003  			fmt.Sprintf("Insufficient %v", v1.ResourceCPU),
  1004  			fmt.Sprintf("Insufficient %v", v1.ResourceMemory),
  1005  		).WithPlugin(noderesources.Name)
  1006  	}
  1007  	fns := []tf.RegisterPluginFunc{
  1008  		tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1009  		tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1010  		tf.RegisterPluginAsExtensions(noderesources.Name, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewFit), "Filter", "PreFilter"),
  1011  	}
  1012  
  1013  	informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(objects...), 0)
  1014  	scheduler, _, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, informerFactory, nil, fns...)
  1015  
  1016  	queuedPodStore.Add(podWithTooBigResourceRequests)
  1017  	scheduler.scheduleOne(ctx)
  1018  	select {
  1019  	case err := <-errChan:
  1020  		expectErr := &framework.FitError{
  1021  			Pod:         podWithTooBigResourceRequests,
  1022  			NumAllNodes: len(nodes),
  1023  			Diagnosis: framework.Diagnosis{
  1024  				NodeToStatusMap:      failedNodeStatues,
  1025  				UnschedulablePlugins: sets.New(noderesources.Name),
  1026  			},
  1027  		}
  1028  		if len(fmt.Sprint(expectErr)) > 150 {
  1029  			t.Errorf("message is too spammy ! %v ", len(fmt.Sprint(expectErr)))
  1030  		}
  1031  		if !reflect.DeepEqual(expectErr, err) {
  1032  			t.Errorf("\n err \nWANT=%+v,\nGOT=%+v", expectErr, err)
  1033  		}
  1034  	case <-time.After(wait.ForeverTestTimeout):
  1035  		t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
  1036  	}
  1037  }
  1038  
  1039  func TestSchedulerWithVolumeBinding(t *testing.T) {
  1040  	findErr := fmt.Errorf("find err")
  1041  	assumeErr := fmt.Errorf("assume err")
  1042  	bindErr := fmt.Errorf("bind err")
  1043  	client := clientsetfake.NewSimpleClientset()
  1044  
  1045  	eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
  1046  
  1047  	// This can be small because we wait for pod to finish scheduling first
  1048  	chanTimeout := 2 * time.Second
  1049  
  1050  	table := []struct {
  1051  		name               string
  1052  		expectError        error
  1053  		expectPodBind      *v1.Binding
  1054  		expectAssumeCalled bool
  1055  		expectBindCalled   bool
  1056  		eventReason        string
  1057  		volumeBinderConfig *volumebinding.FakeVolumeBinderConfig
  1058  	}{
  1059  		{
  1060  			name: "all bound",
  1061  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1062  				AllBound: true,
  1063  			},
  1064  			expectAssumeCalled: true,
  1065  			expectPodBind:      &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "node1"}},
  1066  			eventReason:        "Scheduled",
  1067  		},
  1068  		{
  1069  			name: "bound/invalid pv affinity",
  1070  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1071  				AllBound:    true,
  1072  				FindReasons: volumebinding.ConflictReasons{volumebinding.ErrReasonNodeConflict},
  1073  			},
  1074  			eventReason: "FailedScheduling",
  1075  			expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
  1076  		},
  1077  		{
  1078  			name: "unbound/no matches",
  1079  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1080  				FindReasons: volumebinding.ConflictReasons{volumebinding.ErrReasonBindConflict},
  1081  			},
  1082  			eventReason: "FailedScheduling",
  1083  			expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
  1084  		},
  1085  		{
  1086  			name: "bound and unbound unsatisfied",
  1087  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1088  				FindReasons: volumebinding.ConflictReasons{volumebinding.ErrReasonBindConflict, volumebinding.ErrReasonNodeConflict},
  1089  			},
  1090  			eventReason: "FailedScheduling",
  1091  			expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),
  1092  		},
  1093  		{
  1094  			name:               "unbound/found matches/bind succeeds",
  1095  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{},
  1096  			expectAssumeCalled: true,
  1097  			expectBindCalled:   true,
  1098  			expectPodBind:      &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "node1"}},
  1099  			eventReason:        "Scheduled",
  1100  		},
  1101  		{
  1102  			name: "predicate error",
  1103  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1104  				FindErr: findErr,
  1105  			},
  1106  			eventReason: "FailedScheduling",
  1107  			expectError: fmt.Errorf("running %q filter plugin: %v", volumebinding.Name, findErr),
  1108  		},
  1109  		{
  1110  			name: "assume error",
  1111  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1112  				AssumeErr: assumeErr,
  1113  			},
  1114  			expectAssumeCalled: true,
  1115  			eventReason:        "FailedScheduling",
  1116  			expectError:        fmt.Errorf("running Reserve plugin %q: %w", volumebinding.Name, assumeErr),
  1117  		},
  1118  		{
  1119  			name: "bind error",
  1120  			volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
  1121  				BindErr: bindErr,
  1122  			},
  1123  			expectAssumeCalled: true,
  1124  			expectBindCalled:   true,
  1125  			eventReason:        "FailedScheduling",
  1126  			expectError:        fmt.Errorf("running PreBind plugin %q: %w", volumebinding.Name, bindErr),
  1127  		},
  1128  	}
  1129  
  1130  	for _, item := range table {
  1131  		t.Run(item.name, func(t *testing.T) {
  1132  			ctx, cancel := context.WithCancel(context.Background())
  1133  			defer cancel()
  1134  			fakeVolumeBinder := volumebinding.NewFakeVolumeBinder(item.volumeBinderConfig)
  1135  			s, bindingChan, errChan := setupTestSchedulerWithVolumeBinding(ctx, t, fakeVolumeBinder, eventBroadcaster)
  1136  			eventChan := make(chan struct{})
  1137  			stopFunc, err := eventBroadcaster.StartEventWatcher(func(obj runtime.Object) {
  1138  				e, _ := obj.(*eventsv1.Event)
  1139  				if e, a := item.eventReason, e.Reason; e != a {
  1140  					t.Errorf("expected %v, got %v", e, a)
  1141  				}
  1142  				close(eventChan)
  1143  			})
  1144  			if err != nil {
  1145  				t.Fatal(err)
  1146  			}
  1147  			s.scheduleOne(ctx)
  1148  			// Wait for pod to succeed or fail scheduling
  1149  			select {
  1150  			case <-eventChan:
  1151  			case <-time.After(wait.ForeverTestTimeout):
  1152  				t.Fatalf("scheduling timeout after %v", wait.ForeverTestTimeout)
  1153  			}
  1154  			stopFunc()
  1155  			// Wait for scheduling to return an error or succeed binding.
  1156  			var (
  1157  				gotErr  error
  1158  				gotBind *v1.Binding
  1159  			)
  1160  			select {
  1161  			case gotErr = <-errChan:
  1162  			case gotBind = <-bindingChan:
  1163  			case <-time.After(chanTimeout):
  1164  				t.Fatalf("did not receive pod binding or error after %v", chanTimeout)
  1165  			}
  1166  			if item.expectError != nil {
  1167  				if gotErr == nil || item.expectError.Error() != gotErr.Error() {
  1168  					t.Errorf("err \nWANT=%+v,\nGOT=%+v", item.expectError, gotErr)
  1169  				}
  1170  			} else if gotErr != nil {
  1171  				t.Errorf("err \nWANT=%+v,\nGOT=%+v", item.expectError, gotErr)
  1172  			}
  1173  			if !cmp.Equal(item.expectPodBind, gotBind) {
  1174  				t.Errorf("err \nWANT=%+v,\nGOT=%+v", item.expectPodBind, gotBind)
  1175  			}
  1176  
  1177  			if item.expectAssumeCalled != fakeVolumeBinder.AssumeCalled {
  1178  				t.Errorf("expectedAssumeCall %v", item.expectAssumeCalled)
  1179  			}
  1180  
  1181  			if item.expectBindCalled != fakeVolumeBinder.BindCalled {
  1182  				t.Errorf("expectedBindCall %v", item.expectBindCalled)
  1183  			}
  1184  		})
  1185  	}
  1186  }
  1187  
  1188  func TestSchedulerBinding(t *testing.T) {
  1189  	table := []struct {
  1190  		podName      string
  1191  		extenders    []framework.Extender
  1192  		wantBinderID int
  1193  		name         string
  1194  	}{
  1195  		{
  1196  			name:    "the extender is not a binder",
  1197  			podName: "pod0",
  1198  			extenders: []framework.Extender{
  1199  				&fakeExtender{isBinder: false, interestedPodName: "pod0"},
  1200  			},
  1201  			wantBinderID: -1, // default binding.
  1202  		},
  1203  		{
  1204  			name:    "one of the extenders is a binder and interested in pod",
  1205  			podName: "pod0",
  1206  			extenders: []framework.Extender{
  1207  				&fakeExtender{isBinder: false, interestedPodName: "pod0"},
  1208  				&fakeExtender{isBinder: true, interestedPodName: "pod0"},
  1209  			},
  1210  			wantBinderID: 1,
  1211  		},
  1212  		{
  1213  			name:    "one of the extenders is a binder, but not interested in pod",
  1214  			podName: "pod1",
  1215  			extenders: []framework.Extender{
  1216  				&fakeExtender{isBinder: false, interestedPodName: "pod1"},
  1217  				&fakeExtender{isBinder: true, interestedPodName: "pod0"},
  1218  			},
  1219  			wantBinderID: -1, // default binding.
  1220  		},
  1221  	}
  1222  
  1223  	for _, test := range table {
  1224  		t.Run(test.name, func(t *testing.T) {
  1225  			pod := st.MakePod().Name(test.podName).Obj()
  1226  			defaultBound := false
  1227  			client := clientsetfake.NewSimpleClientset(pod)
  1228  			client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
  1229  				if action.GetSubresource() == "binding" {
  1230  					defaultBound = true
  1231  				}
  1232  				return false, nil, nil
  1233  			})
  1234  			_, ctx := ktesting.NewTestContext(t)
  1235  			ctx, cancel := context.WithCancel(ctx)
  1236  			defer cancel()
  1237  			fwk, err := tf.NewFramework(ctx,
  1238  				[]tf.RegisterPluginFunc{
  1239  					tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1240  					tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1241  				}, "", frameworkruntime.WithClientSet(client), frameworkruntime.WithEventRecorder(&events.FakeRecorder{}))
  1242  			if err != nil {
  1243  				t.Fatal(err)
  1244  			}
  1245  			sched := &Scheduler{
  1246  				Extenders:                test.extenders,
  1247  				Cache:                    internalcache.New(ctx, 100*time.Millisecond),
  1248  				nodeInfoSnapshot:         nil,
  1249  				percentageOfNodesToScore: 0,
  1250  			}
  1251  			status := sched.bind(ctx, fwk, pod, "node", nil)
  1252  			if !status.IsSuccess() {
  1253  				t.Error(status.AsError())
  1254  			}
  1255  
  1256  			// Checking default binding.
  1257  			if wantBound := test.wantBinderID == -1; defaultBound != wantBound {
  1258  				t.Errorf("got bound with default binding: %v, want %v", defaultBound, wantBound)
  1259  			}
  1260  
  1261  			// Checking extenders binding.
  1262  			for i, ext := range test.extenders {
  1263  				wantBound := i == test.wantBinderID
  1264  				if gotBound := ext.(*fakeExtender).gotBind; gotBound != wantBound {
  1265  					t.Errorf("got bound with extender #%d: %v, want %v", i, gotBound, wantBound)
  1266  				}
  1267  			}
  1268  
  1269  		})
  1270  	}
  1271  }
  1272  
  1273  func TestUpdatePod(t *testing.T) {
  1274  	tests := []struct {
  1275  		name                     string
  1276  		currentPodConditions     []v1.PodCondition
  1277  		newPodCondition          *v1.PodCondition
  1278  		currentNominatedNodeName string
  1279  		newNominatingInfo        *framework.NominatingInfo
  1280  		expectedPatchRequests    int
  1281  		expectedPatchDataPattern string
  1282  	}{
  1283  		{
  1284  			name:                 "Should make patch request to add pod condition when there are none currently",
  1285  			currentPodConditions: []v1.PodCondition{},
  1286  			newPodCondition: &v1.PodCondition{
  1287  				Type:               "newType",
  1288  				Status:             "newStatus",
  1289  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 1, 1, 1, 1, time.UTC)),
  1290  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 1, 1, 1, 1, time.UTC)),
  1291  				Reason:             "newReason",
  1292  				Message:            "newMessage",
  1293  			},
  1294  			expectedPatchRequests:    1,
  1295  			expectedPatchDataPattern: `{"status":{"conditions":\[{"lastProbeTime":"2020-05-13T01:01:01Z","lastTransitionTime":".*","message":"newMessage","reason":"newReason","status":"newStatus","type":"newType"}]}}`,
  1296  		},
  1297  		{
  1298  			name: "Should make patch request to add a new pod condition when there is already one with another type",
  1299  			currentPodConditions: []v1.PodCondition{
  1300  				{
  1301  					Type:               "someOtherType",
  1302  					Status:             "someOtherTypeStatus",
  1303  					LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 11, 0, 0, 0, 0, time.UTC)),
  1304  					LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 10, 0, 0, 0, 0, time.UTC)),
  1305  					Reason:             "someOtherTypeReason",
  1306  					Message:            "someOtherTypeMessage",
  1307  				},
  1308  			},
  1309  			newPodCondition: &v1.PodCondition{
  1310  				Type:               "newType",
  1311  				Status:             "newStatus",
  1312  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 1, 1, 1, 1, time.UTC)),
  1313  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 1, 1, 1, 1, time.UTC)),
  1314  				Reason:             "newReason",
  1315  				Message:            "newMessage",
  1316  			},
  1317  			expectedPatchRequests:    1,
  1318  			expectedPatchDataPattern: `{"status":{"\$setElementOrder/conditions":\[{"type":"someOtherType"},{"type":"newType"}],"conditions":\[{"lastProbeTime":"2020-05-13T01:01:01Z","lastTransitionTime":".*","message":"newMessage","reason":"newReason","status":"newStatus","type":"newType"}]}}`,
  1319  		},
  1320  		{
  1321  			name: "Should make patch request to update an existing pod condition",
  1322  			currentPodConditions: []v1.PodCondition{
  1323  				{
  1324  					Type:               "currentType",
  1325  					Status:             "currentStatus",
  1326  					LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1327  					LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1328  					Reason:             "currentReason",
  1329  					Message:            "currentMessage",
  1330  				},
  1331  			},
  1332  			newPodCondition: &v1.PodCondition{
  1333  				Type:               "currentType",
  1334  				Status:             "newStatus",
  1335  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 1, 1, 1, 1, time.UTC)),
  1336  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 1, 1, 1, 1, time.UTC)),
  1337  				Reason:             "newReason",
  1338  				Message:            "newMessage",
  1339  			},
  1340  			expectedPatchRequests:    1,
  1341  			expectedPatchDataPattern: `{"status":{"\$setElementOrder/conditions":\[{"type":"currentType"}],"conditions":\[{"lastProbeTime":"2020-05-13T01:01:01Z","lastTransitionTime":".*","message":"newMessage","reason":"newReason","status":"newStatus","type":"currentType"}]}}`,
  1342  		},
  1343  		{
  1344  			name: "Should make patch request to update an existing pod condition, but the transition time should remain unchanged because the status is the same",
  1345  			currentPodConditions: []v1.PodCondition{
  1346  				{
  1347  					Type:               "currentType",
  1348  					Status:             "currentStatus",
  1349  					LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1350  					LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1351  					Reason:             "currentReason",
  1352  					Message:            "currentMessage",
  1353  				},
  1354  			},
  1355  			newPodCondition: &v1.PodCondition{
  1356  				Type:               "currentType",
  1357  				Status:             "currentStatus",
  1358  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 1, 1, 1, 1, time.UTC)),
  1359  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1360  				Reason:             "newReason",
  1361  				Message:            "newMessage",
  1362  			},
  1363  			expectedPatchRequests:    1,
  1364  			expectedPatchDataPattern: `{"status":{"\$setElementOrder/conditions":\[{"type":"currentType"}],"conditions":\[{"lastProbeTime":"2020-05-13T01:01:01Z","message":"newMessage","reason":"newReason","type":"currentType"}]}}`,
  1365  		},
  1366  		{
  1367  			name: "Should not make patch request if pod condition already exists and is identical and nominated node name is not set",
  1368  			currentPodConditions: []v1.PodCondition{
  1369  				{
  1370  					Type:               "currentType",
  1371  					Status:             "currentStatus",
  1372  					LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1373  					LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1374  					Reason:             "currentReason",
  1375  					Message:            "currentMessage",
  1376  				},
  1377  			},
  1378  			newPodCondition: &v1.PodCondition{
  1379  				Type:               "currentType",
  1380  				Status:             "currentStatus",
  1381  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1382  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1383  				Reason:             "currentReason",
  1384  				Message:            "currentMessage",
  1385  			},
  1386  			currentNominatedNodeName: "node1",
  1387  			expectedPatchRequests:    0,
  1388  		},
  1389  		{
  1390  			name: "Should make patch request if pod condition already exists and is identical but nominated node name is set and different",
  1391  			currentPodConditions: []v1.PodCondition{
  1392  				{
  1393  					Type:               "currentType",
  1394  					Status:             "currentStatus",
  1395  					LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1396  					LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1397  					Reason:             "currentReason",
  1398  					Message:            "currentMessage",
  1399  				},
  1400  			},
  1401  			newPodCondition: &v1.PodCondition{
  1402  				Type:               "currentType",
  1403  				Status:             "currentStatus",
  1404  				LastProbeTime:      metav1.NewTime(time.Date(2020, 5, 13, 0, 0, 0, 0, time.UTC)),
  1405  				LastTransitionTime: metav1.NewTime(time.Date(2020, 5, 12, 0, 0, 0, 0, time.UTC)),
  1406  				Reason:             "currentReason",
  1407  				Message:            "currentMessage",
  1408  			},
  1409  			newNominatingInfo:        &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "node1"},
  1410  			expectedPatchRequests:    1,
  1411  			expectedPatchDataPattern: `{"status":{"nominatedNodeName":"node1"}}`,
  1412  		},
  1413  	}
  1414  	for _, test := range tests {
  1415  		t.Run(test.name, func(t *testing.T) {
  1416  			actualPatchRequests := 0
  1417  			var actualPatchData string
  1418  			cs := &clientsetfake.Clientset{}
  1419  			cs.AddReactor("patch", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
  1420  				actualPatchRequests++
  1421  				patch := action.(clienttesting.PatchAction)
  1422  				actualPatchData = string(patch.GetPatch())
  1423  				// For this test, we don't care about the result of the patched pod, just that we got the expected
  1424  				// patch request, so just returning &v1.Pod{} here is OK because scheduler doesn't use the response.
  1425  				return true, &v1.Pod{}, nil
  1426  			})
  1427  
  1428  			pod := st.MakePod().Name("foo").NominatedNodeName(test.currentNominatedNodeName).Conditions(test.currentPodConditions).Obj()
  1429  
  1430  			ctx, cancel := context.WithCancel(context.Background())
  1431  			defer cancel()
  1432  			if err := updatePod(ctx, cs, pod, test.newPodCondition, test.newNominatingInfo); err != nil {
  1433  				t.Fatalf("Error calling update: %v", err)
  1434  			}
  1435  
  1436  			if actualPatchRequests != test.expectedPatchRequests {
  1437  				t.Fatalf("Actual patch requests (%d) does not equal expected patch requests (%d), actual patch data: %v", actualPatchRequests, test.expectedPatchRequests, actualPatchData)
  1438  			}
  1439  
  1440  			regex, err := regexp.Compile(test.expectedPatchDataPattern)
  1441  			if err != nil {
  1442  				t.Fatalf("Error compiling regexp for %v: %v", test.expectedPatchDataPattern, err)
  1443  			}
  1444  
  1445  			if test.expectedPatchRequests > 0 && !regex.MatchString(actualPatchData) {
  1446  				t.Fatalf("Patch data mismatch: Actual was %v, but expected to match regexp %v", actualPatchData, test.expectedPatchDataPattern)
  1447  			}
  1448  		})
  1449  	}
  1450  }
  1451  
  1452  func Test_SelectHost(t *testing.T) {
  1453  	tests := []struct {
  1454  		name              string
  1455  		list              []framework.NodePluginScores
  1456  		topNodesCnt       int
  1457  		possibleNodes     sets.Set[string]
  1458  		possibleNodeLists [][]framework.NodePluginScores
  1459  		wantError         error
  1460  	}{
  1461  		{
  1462  			name: "unique properly ordered scores",
  1463  			list: []framework.NodePluginScores{
  1464  				{Name: "node1", TotalScore: 1},
  1465  				{Name: "node2", TotalScore: 2},
  1466  			},
  1467  			topNodesCnt:   2,
  1468  			possibleNodes: sets.New("node2"),
  1469  			possibleNodeLists: [][]framework.NodePluginScores{
  1470  				{
  1471  					{Name: "node2", TotalScore: 2},
  1472  					{Name: "node1", TotalScore: 1},
  1473  				},
  1474  			},
  1475  		},
  1476  		{
  1477  			name: "numberOfNodeScoresToReturn > len(list)",
  1478  			list: []framework.NodePluginScores{
  1479  				{Name: "node1", TotalScore: 1},
  1480  				{Name: "node2", TotalScore: 2},
  1481  			},
  1482  			topNodesCnt:   100,
  1483  			possibleNodes: sets.New("node2"),
  1484  			possibleNodeLists: [][]framework.NodePluginScores{
  1485  				{
  1486  					{Name: "node2", TotalScore: 2},
  1487  					{Name: "node1", TotalScore: 1},
  1488  				},
  1489  			},
  1490  		},
  1491  		{
  1492  			name: "equal scores",
  1493  			list: []framework.NodePluginScores{
  1494  				{Name: "node2.1", TotalScore: 2},
  1495  				{Name: "node2.2", TotalScore: 2},
  1496  				{Name: "node2.3", TotalScore: 2},
  1497  			},
  1498  			topNodesCnt:   2,
  1499  			possibleNodes: sets.New("node2.1", "node2.2", "node2.3"),
  1500  			possibleNodeLists: [][]framework.NodePluginScores{
  1501  				{
  1502  					{Name: "node2.1", TotalScore: 2},
  1503  					{Name: "node2.2", TotalScore: 2},
  1504  				},
  1505  				{
  1506  					{Name: "node2.1", TotalScore: 2},
  1507  					{Name: "node2.3", TotalScore: 2},
  1508  				},
  1509  				{
  1510  					{Name: "node2.2", TotalScore: 2},
  1511  					{Name: "node2.1", TotalScore: 2},
  1512  				},
  1513  				{
  1514  					{Name: "node2.2", TotalScore: 2},
  1515  					{Name: "node2.3", TotalScore: 2},
  1516  				},
  1517  				{
  1518  					{Name: "node2.3", TotalScore: 2},
  1519  					{Name: "node2.1", TotalScore: 2},
  1520  				},
  1521  				{
  1522  					{Name: "node2.3", TotalScore: 2},
  1523  					{Name: "node2.2", TotalScore: 2},
  1524  				},
  1525  			},
  1526  		},
  1527  		{
  1528  			name: "out of order scores",
  1529  			list: []framework.NodePluginScores{
  1530  				{Name: "node3.1", TotalScore: 3},
  1531  				{Name: "node2.1", TotalScore: 2},
  1532  				{Name: "node1.1", TotalScore: 1},
  1533  				{Name: "node3.2", TotalScore: 3},
  1534  			},
  1535  			topNodesCnt:   3,
  1536  			possibleNodes: sets.New("node3.1", "node3.2"),
  1537  			possibleNodeLists: [][]framework.NodePluginScores{
  1538  				{
  1539  					{Name: "node3.1", TotalScore: 3},
  1540  					{Name: "node3.2", TotalScore: 3},
  1541  					{Name: "node2.1", TotalScore: 2},
  1542  				},
  1543  				{
  1544  					{Name: "node3.2", TotalScore: 3},
  1545  					{Name: "node3.1", TotalScore: 3},
  1546  					{Name: "node2.1", TotalScore: 2},
  1547  				},
  1548  			},
  1549  		},
  1550  		{
  1551  			name:          "empty priority list",
  1552  			list:          []framework.NodePluginScores{},
  1553  			possibleNodes: sets.Set[string]{},
  1554  			wantError:     errEmptyPriorityList,
  1555  		},
  1556  	}
  1557  
  1558  	for _, test := range tests {
  1559  		t.Run(test.name, func(t *testing.T) {
  1560  			// increase the randomness
  1561  			for i := 0; i < 10; i++ {
  1562  				got, scoreList, err := selectHost(test.list, test.topNodesCnt)
  1563  				if err != test.wantError {
  1564  					t.Fatalf("unexpected error is returned from selectHost: got: %v want: %v", err, test.wantError)
  1565  				}
  1566  				if test.possibleNodes.Len() == 0 {
  1567  					if got != "" {
  1568  						t.Fatalf("expected nothing returned as selected Node, but actually %s is returned from selectHost", got)
  1569  					}
  1570  					return
  1571  				}
  1572  				if !test.possibleNodes.Has(got) {
  1573  					t.Errorf("got %s is not in the possible map %v", got, test.possibleNodes)
  1574  				}
  1575  				if got != scoreList[0].Name {
  1576  					t.Errorf("The head of list should be the selected Node's score: got: %v, expected: %v", scoreList[0], got)
  1577  				}
  1578  				for _, list := range test.possibleNodeLists {
  1579  					if cmp.Equal(list, scoreList) {
  1580  						return
  1581  					}
  1582  				}
  1583  				t.Errorf("Unexpected scoreList: %v", scoreList)
  1584  			}
  1585  		})
  1586  	}
  1587  }
  1588  
  1589  func TestFindNodesThatPassExtenders(t *testing.T) {
  1590  	tests := []struct {
  1591  		name                  string
  1592  		extenders             []tf.FakeExtender
  1593  		nodes                 []*v1.Node
  1594  		filteredNodesStatuses framework.NodeToStatusMap
  1595  		expectsErr            bool
  1596  		expectedNodes         []*v1.Node
  1597  		expectedStatuses      framework.NodeToStatusMap
  1598  	}{
  1599  		{
  1600  			name: "error",
  1601  			extenders: []tf.FakeExtender{
  1602  				{
  1603  					ExtenderName: "FakeExtender1",
  1604  					Predicates:   []tf.FitPredicate{tf.ErrorPredicateExtender},
  1605  				},
  1606  			},
  1607  			nodes:                 makeNodeList([]string{"a"}),
  1608  			filteredNodesStatuses: make(framework.NodeToStatusMap),
  1609  			expectsErr:            true,
  1610  		},
  1611  		{
  1612  			name: "success",
  1613  			extenders: []tf.FakeExtender{
  1614  				{
  1615  					ExtenderName: "FakeExtender1",
  1616  					Predicates:   []tf.FitPredicate{tf.TruePredicateExtender},
  1617  				},
  1618  			},
  1619  			nodes:                 makeNodeList([]string{"a"}),
  1620  			filteredNodesStatuses: make(framework.NodeToStatusMap),
  1621  			expectsErr:            false,
  1622  			expectedNodes:         makeNodeList([]string{"a"}),
  1623  			expectedStatuses:      make(framework.NodeToStatusMap),
  1624  		},
  1625  		{
  1626  			name: "unschedulable",
  1627  			extenders: []tf.FakeExtender{
  1628  				{
  1629  					ExtenderName: "FakeExtender1",
  1630  					Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status {
  1631  						if node.Name == "a" {
  1632  							return framework.NewStatus(framework.Success)
  1633  						}
  1634  						return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name))
  1635  					}},
  1636  				},
  1637  			},
  1638  			nodes:                 makeNodeList([]string{"a", "b"}),
  1639  			filteredNodesStatuses: make(framework.NodeToStatusMap),
  1640  			expectsErr:            false,
  1641  			expectedNodes:         makeNodeList([]string{"a"}),
  1642  			expectedStatuses: framework.NodeToStatusMap{
  1643  				"b": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("FakeExtender: node %q failed", "b")),
  1644  			},
  1645  		},
  1646  		{
  1647  			name: "unschedulable and unresolvable",
  1648  			extenders: []tf.FakeExtender{
  1649  				{
  1650  					ExtenderName: "FakeExtender1",
  1651  					Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status {
  1652  						if node.Name == "a" {
  1653  							return framework.NewStatus(framework.Success)
  1654  						}
  1655  						if node.Name == "b" {
  1656  							return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name))
  1657  						}
  1658  						return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Name))
  1659  					}},
  1660  				},
  1661  			},
  1662  			nodes:                 makeNodeList([]string{"a", "b", "c"}),
  1663  			filteredNodesStatuses: make(framework.NodeToStatusMap),
  1664  			expectsErr:            false,
  1665  			expectedNodes:         makeNodeList([]string{"a"}),
  1666  			expectedStatuses: framework.NodeToStatusMap{
  1667  				"b": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("FakeExtender: node %q failed", "b")),
  1668  				"c": framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("FakeExtender: node %q failed and unresolvable", "c")),
  1669  			},
  1670  		},
  1671  		{
  1672  			name: "extender may overwrite the statuses",
  1673  			extenders: []tf.FakeExtender{
  1674  				{
  1675  					ExtenderName: "FakeExtender1",
  1676  					Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status {
  1677  						if node.Name == "a" {
  1678  							return framework.NewStatus(framework.Success)
  1679  						}
  1680  						if node.Name == "b" {
  1681  							return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name))
  1682  						}
  1683  						return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Name))
  1684  					}},
  1685  				},
  1686  			},
  1687  			nodes: makeNodeList([]string{"a", "b", "c"}),
  1688  			filteredNodesStatuses: framework.NodeToStatusMap{
  1689  				"c": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("FakeFilterPlugin: node %q failed", "c")),
  1690  			},
  1691  			expectsErr:    false,
  1692  			expectedNodes: makeNodeList([]string{"a"}),
  1693  			expectedStatuses: framework.NodeToStatusMap{
  1694  				"b": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("FakeExtender: node %q failed", "b")),
  1695  				"c": framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("FakeFilterPlugin: node %q failed", "c"), fmt.Sprintf("FakeExtender: node %q failed and unresolvable", "c")),
  1696  			},
  1697  		},
  1698  		{
  1699  			name: "multiple extenders",
  1700  			extenders: []tf.FakeExtender{
  1701  				{
  1702  					ExtenderName: "FakeExtender1",
  1703  					Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status {
  1704  						if node.Name == "a" {
  1705  							return framework.NewStatus(framework.Success)
  1706  						}
  1707  						if node.Name == "b" {
  1708  							return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name))
  1709  						}
  1710  						return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Name))
  1711  					}},
  1712  				},
  1713  				{
  1714  					ExtenderName: "FakeExtender1",
  1715  					Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status {
  1716  						if node.Name == "a" {
  1717  							return framework.NewStatus(framework.Success)
  1718  						}
  1719  						return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name))
  1720  					}},
  1721  				},
  1722  			},
  1723  			nodes:                 makeNodeList([]string{"a", "b", "c"}),
  1724  			filteredNodesStatuses: make(framework.NodeToStatusMap),
  1725  			expectsErr:            false,
  1726  			expectedNodes:         makeNodeList([]string{"a"}),
  1727  			expectedStatuses: framework.NodeToStatusMap{
  1728  				"b": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("FakeExtender: node %q failed", "b")),
  1729  				"c": framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("FakeExtender: node %q failed and unresolvable", "c")),
  1730  			},
  1731  		},
  1732  	}
  1733  
  1734  	cmpOpts := []cmp.Option{
  1735  		cmp.Comparer(func(s1 framework.Status, s2 framework.Status) bool {
  1736  			return s1.Code() == s2.Code() && reflect.DeepEqual(s1.Reasons(), s2.Reasons())
  1737  		}),
  1738  	}
  1739  
  1740  	for _, tt := range tests {
  1741  		t.Run(tt.name, func(t *testing.T) {
  1742  			_, ctx := ktesting.NewTestContext(t)
  1743  			var extenders []framework.Extender
  1744  			for ii := range tt.extenders {
  1745  				extenders = append(extenders, &tt.extenders[ii])
  1746  			}
  1747  
  1748  			pod := st.MakePod().Name("1").UID("1").Obj()
  1749  			got, err := findNodesThatPassExtenders(ctx, extenders, pod, tt.nodes, tt.filteredNodesStatuses)
  1750  			if tt.expectsErr {
  1751  				if err == nil {
  1752  					t.Error("Unexpected non-error")
  1753  				}
  1754  			} else {
  1755  				if err != nil {
  1756  					t.Errorf("Unexpected error: %v", err)
  1757  				}
  1758  				if diff := cmp.Diff(tt.expectedNodes, got); diff != "" {
  1759  					t.Errorf("filtered nodes (-want,+got):\n%s", diff)
  1760  				}
  1761  				if diff := cmp.Diff(tt.expectedStatuses, tt.filteredNodesStatuses, cmpOpts...); diff != "" {
  1762  					t.Errorf("filtered statuses (-want,+got):\n%s", diff)
  1763  				}
  1764  			}
  1765  		})
  1766  	}
  1767  }
  1768  
  1769  func TestSchedulerSchedulePod(t *testing.T) {
  1770  	fts := feature.Features{}
  1771  	tests := []struct {
  1772  		name               string
  1773  		registerPlugins    []tf.RegisterPluginFunc
  1774  		nodes              []string
  1775  		pvcs               []v1.PersistentVolumeClaim
  1776  		pod                *v1.Pod
  1777  		pods               []*v1.Pod
  1778  		wantNodes          sets.Set[string]
  1779  		wantEvaluatedNodes *int32
  1780  		wErr               error
  1781  	}{
  1782  		{
  1783  			registerPlugins: []tf.RegisterPluginFunc{
  1784  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1785  				tf.RegisterFilterPlugin("FalseFilter", tf.NewFalseFilterPlugin),
  1786  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1787  			},
  1788  			nodes: []string{"node1", "node2"},
  1789  			pod:   st.MakePod().Name("2").UID("2").Obj(),
  1790  			name:  "test 1",
  1791  			wErr: &framework.FitError{
  1792  				Pod:         st.MakePod().Name("2").UID("2").Obj(),
  1793  				NumAllNodes: 2,
  1794  				Diagnosis: framework.Diagnosis{
  1795  					NodeToStatusMap: framework.NodeToStatusMap{
  1796  						"node1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"),
  1797  						"node2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"),
  1798  					},
  1799  					UnschedulablePlugins: sets.New("FalseFilter"),
  1800  				},
  1801  			},
  1802  		},
  1803  		{
  1804  			registerPlugins: []tf.RegisterPluginFunc{
  1805  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1806  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1807  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1808  			},
  1809  			nodes:     []string{"node1", "node2"},
  1810  			pod:       st.MakePod().Name("ignore").UID("ignore").Obj(),
  1811  			wantNodes: sets.New("node1", "node2"),
  1812  			name:      "test 2",
  1813  			wErr:      nil,
  1814  		},
  1815  		{
  1816  			// Fits on a node where the pod ID matches the node name
  1817  			registerPlugins: []tf.RegisterPluginFunc{
  1818  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1819  				tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin),
  1820  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1821  			},
  1822  			nodes:     []string{"node1", "node2"},
  1823  			pod:       st.MakePod().Name("node2").UID("node2").Obj(),
  1824  			wantNodes: sets.New("node2"),
  1825  			name:      "test 3",
  1826  			wErr:      nil,
  1827  		},
  1828  		{
  1829  			registerPlugins: []tf.RegisterPluginFunc{
  1830  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1831  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1832  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  1833  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1834  			},
  1835  			nodes:     []string{"3", "2", "1"},
  1836  			pod:       st.MakePod().Name("ignore").UID("ignore").Obj(),
  1837  			wantNodes: sets.New("3"),
  1838  			name:      "test 4",
  1839  			wErr:      nil,
  1840  		},
  1841  		{
  1842  			registerPlugins: []tf.RegisterPluginFunc{
  1843  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1844  				tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin),
  1845  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  1846  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1847  			},
  1848  			nodes:     []string{"3", "2", "1"},
  1849  			pod:       st.MakePod().Name("2").UID("2").Obj(),
  1850  			wantNodes: sets.New("2"),
  1851  			name:      "test 5",
  1852  			wErr:      nil,
  1853  		},
  1854  		{
  1855  			registerPlugins: []tf.RegisterPluginFunc{
  1856  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1857  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1858  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  1859  				tf.RegisterScorePlugin("ReverseNumericMap", newReverseNumericMapPlugin(), 2),
  1860  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1861  			},
  1862  			nodes:     []string{"3", "2", "1"},
  1863  			pod:       st.MakePod().Name("2").UID("2").Obj(),
  1864  			wantNodes: sets.New("1"),
  1865  			name:      "test 6",
  1866  			wErr:      nil,
  1867  		},
  1868  		{
  1869  			registerPlugins: []tf.RegisterPluginFunc{
  1870  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1871  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1872  				tf.RegisterFilterPlugin("FalseFilter", tf.NewFalseFilterPlugin),
  1873  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  1874  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1875  			},
  1876  			nodes: []string{"3", "2", "1"},
  1877  			pod:   st.MakePod().Name("2").UID("2").Obj(),
  1878  			name:  "test 7",
  1879  			wErr: &framework.FitError{
  1880  				Pod:         st.MakePod().Name("2").UID("2").Obj(),
  1881  				NumAllNodes: 3,
  1882  				Diagnosis: framework.Diagnosis{
  1883  					NodeToStatusMap: framework.NodeToStatusMap{
  1884  						"3": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"),
  1885  						"2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"),
  1886  						"1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("FalseFilter"),
  1887  					},
  1888  					UnschedulablePlugins: sets.New("FalseFilter"),
  1889  				},
  1890  			},
  1891  		},
  1892  		{
  1893  			registerPlugins: []tf.RegisterPluginFunc{
  1894  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1895  				tf.RegisterFilterPlugin("NoPodsFilter", NewNoPodsFilterPlugin),
  1896  				tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin),
  1897  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  1898  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1899  			},
  1900  			pods: []*v1.Pod{
  1901  				st.MakePod().Name("2").UID("2").Node("2").Phase(v1.PodRunning).Obj(),
  1902  			},
  1903  			pod:   st.MakePod().Name("2").UID("2").Obj(),
  1904  			nodes: []string{"1", "2"},
  1905  			name:  "test 8",
  1906  			wErr: &framework.FitError{
  1907  				Pod:         st.MakePod().Name("2").UID("2").Obj(),
  1908  				NumAllNodes: 2,
  1909  				Diagnosis: framework.Diagnosis{
  1910  					NodeToStatusMap: framework.NodeToStatusMap{
  1911  						"1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"),
  1912  						"2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("NoPodsFilter"),
  1913  					},
  1914  					UnschedulablePlugins: sets.New("MatchFilter", "NoPodsFilter"),
  1915  				},
  1916  			},
  1917  		},
  1918  		{
  1919  			// Pod with existing PVC
  1920  			registerPlugins: []tf.RegisterPluginFunc{
  1921  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1922  				tf.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)),
  1923  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1924  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1925  			},
  1926  			nodes: []string{"node1", "node2"},
  1927  			pvcs: []v1.PersistentVolumeClaim{
  1928  				{
  1929  					ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault},
  1930  					Spec:       v1.PersistentVolumeClaimSpec{VolumeName: "existingPV"},
  1931  				},
  1932  			},
  1933  			pod:       st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
  1934  			wantNodes: sets.New("node1", "node2"),
  1935  			name:      "existing PVC",
  1936  			wErr:      nil,
  1937  		},
  1938  		{
  1939  			// Pod with non existing PVC
  1940  			registerPlugins: []tf.RegisterPluginFunc{
  1941  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1942  				tf.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)),
  1943  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1944  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1945  			},
  1946  			nodes: []string{"node1", "node2"},
  1947  			pod:   st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(),
  1948  			name:  "unknown PVC",
  1949  			wErr: &framework.FitError{
  1950  				Pod:         st.MakePod().Name("ignore").UID("ignore").PVC("unknownPVC").Obj(),
  1951  				NumAllNodes: 2,
  1952  				Diagnosis: framework.Diagnosis{
  1953  					NodeToStatusMap: framework.NodeToStatusMap{
  1954  						"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithPlugin("VolumeBinding"),
  1955  						"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "unknownPVC" not found`).WithPlugin("VolumeBinding"),
  1956  					},
  1957  					PreFilterMsg:         `persistentvolumeclaim "unknownPVC" not found`,
  1958  					UnschedulablePlugins: sets.New(volumebinding.Name),
  1959  				},
  1960  			},
  1961  		},
  1962  		{
  1963  			// Pod with deleting PVC
  1964  			registerPlugins: []tf.RegisterPluginFunc{
  1965  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1966  				tf.RegisterPreFilterPlugin(volumebinding.Name, frameworkruntime.FactoryAdapter(fts, volumebinding.New)),
  1967  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1968  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1969  			},
  1970  			nodes: []string{"node1", "node2"},
  1971  			pvcs:  []v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "existingPVC", UID: types.UID("existingPVC"), Namespace: v1.NamespaceDefault, DeletionTimestamp: &metav1.Time{}}}},
  1972  			pod:   st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
  1973  			name:  "deleted PVC",
  1974  			wErr: &framework.FitError{
  1975  				Pod:         st.MakePod().Name("ignore").UID("ignore").Namespace(v1.NamespaceDefault).PVC("existingPVC").Obj(),
  1976  				NumAllNodes: 2,
  1977  				Diagnosis: framework.Diagnosis{
  1978  					NodeToStatusMap: framework.NodeToStatusMap{
  1979  						"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithPlugin("VolumeBinding"),
  1980  						"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "existingPVC" is being deleted`).WithPlugin("VolumeBinding"),
  1981  					},
  1982  					PreFilterMsg:         `persistentvolumeclaim "existingPVC" is being deleted`,
  1983  					UnschedulablePlugins: sets.New(volumebinding.Name),
  1984  				},
  1985  			},
  1986  		},
  1987  		{
  1988  			registerPlugins: []tf.RegisterPluginFunc{
  1989  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  1990  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  1991  				tf.RegisterScorePlugin("FalseMap", newFalseMapPlugin(), 1),
  1992  				tf.RegisterScorePlugin("TrueMap", newTrueMapPlugin(), 2),
  1993  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  1994  			},
  1995  			nodes: []string{"2", "1"},
  1996  			pod:   st.MakePod().Name("2").Obj(),
  1997  			name:  "test error with priority map",
  1998  			wErr:  fmt.Errorf("running Score plugins: %w", fmt.Errorf(`plugin "FalseMap" failed with: %w`, errPrioritize)),
  1999  		},
  2000  		{
  2001  			name: "test podtopologyspread plugin - 2 nodes with maxskew=1",
  2002  			registerPlugins: []tf.RegisterPluginFunc{
  2003  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2004  				tf.RegisterPluginAsExtensions(
  2005  					podtopologyspread.Name,
  2006  					podTopologySpreadFunc,
  2007  					"PreFilter",
  2008  					"Filter",
  2009  				),
  2010  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2011  			},
  2012  			nodes: []string{"node1", "node2"},
  2013  			pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(1, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{
  2014  				MatchExpressions: []metav1.LabelSelectorRequirement{
  2015  					{
  2016  						Key:      "foo",
  2017  						Operator: metav1.LabelSelectorOpExists,
  2018  					},
  2019  				},
  2020  			}, nil, nil, nil, nil).Obj(),
  2021  			pods: []*v1.Pod{
  2022  				st.MakePod().Name("pod1").UID("pod1").Label("foo", "").Node("node1").Phase(v1.PodRunning).Obj(),
  2023  			},
  2024  			wantNodes: sets.New("node2"),
  2025  			wErr:      nil,
  2026  		},
  2027  		{
  2028  			name: "test podtopologyspread plugin - 3 nodes with maxskew=2",
  2029  			registerPlugins: []tf.RegisterPluginFunc{
  2030  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2031  				tf.RegisterPluginAsExtensions(
  2032  					podtopologyspread.Name,
  2033  					podTopologySpreadFunc,
  2034  					"PreFilter",
  2035  					"Filter",
  2036  				),
  2037  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2038  			},
  2039  			nodes: []string{"node1", "node2", "node3"},
  2040  			pod: st.MakePod().Name("p").UID("p").Label("foo", "").SpreadConstraint(2, "hostname", v1.DoNotSchedule, &metav1.LabelSelector{
  2041  				MatchExpressions: []metav1.LabelSelectorRequirement{
  2042  					{
  2043  						Key:      "foo",
  2044  						Operator: metav1.LabelSelectorOpExists,
  2045  					},
  2046  				},
  2047  			}, nil, nil, nil, nil).Obj(),
  2048  			pods: []*v1.Pod{
  2049  				st.MakePod().Name("pod1a").UID("pod1a").Label("foo", "").Node("node1").Phase(v1.PodRunning).Obj(),
  2050  				st.MakePod().Name("pod1b").UID("pod1b").Label("foo", "").Node("node1").Phase(v1.PodRunning).Obj(),
  2051  				st.MakePod().Name("pod2").UID("pod2").Label("foo", "").Node("node2").Phase(v1.PodRunning).Obj(),
  2052  			},
  2053  			wantNodes: sets.New("node2", "node3"),
  2054  			wErr:      nil,
  2055  		},
  2056  		{
  2057  			name: "test with filter plugin returning Unschedulable status",
  2058  			registerPlugins: []tf.RegisterPluginFunc{
  2059  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2060  				tf.RegisterFilterPlugin(
  2061  					"FakeFilter",
  2062  					tf.NewFakeFilterPlugin(map[string]framework.Code{"3": framework.Unschedulable}),
  2063  				),
  2064  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  2065  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2066  			},
  2067  			nodes:     []string{"3"},
  2068  			pod:       st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2069  			wantNodes: nil,
  2070  			wErr: &framework.FitError{
  2071  				Pod:         st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2072  				NumAllNodes: 1,
  2073  				Diagnosis: framework.Diagnosis{
  2074  					NodeToStatusMap: framework.NodeToStatusMap{
  2075  						"3": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-filter").WithPlugin("FakeFilter"),
  2076  					},
  2077  					UnschedulablePlugins: sets.New("FakeFilter"),
  2078  				},
  2079  			},
  2080  		},
  2081  		{
  2082  			name: "test with filter plugin returning UnschedulableAndUnresolvable status",
  2083  			registerPlugins: []tf.RegisterPluginFunc{
  2084  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2085  				tf.RegisterFilterPlugin(
  2086  					"FakeFilter",
  2087  					tf.NewFakeFilterPlugin(map[string]framework.Code{"3": framework.UnschedulableAndUnresolvable}),
  2088  				),
  2089  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  2090  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2091  			},
  2092  			nodes:     []string{"3"},
  2093  			pod:       st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2094  			wantNodes: nil,
  2095  			wErr: &framework.FitError{
  2096  				Pod:         st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2097  				NumAllNodes: 1,
  2098  				Diagnosis: framework.Diagnosis{
  2099  					NodeToStatusMap: framework.NodeToStatusMap{
  2100  						"3": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injecting failure for pod test-filter").WithPlugin("FakeFilter"),
  2101  					},
  2102  					UnschedulablePlugins: sets.New("FakeFilter"),
  2103  				},
  2104  			},
  2105  		},
  2106  		{
  2107  			name: "test with partial failed filter plugin",
  2108  			registerPlugins: []tf.RegisterPluginFunc{
  2109  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2110  				tf.RegisterFilterPlugin(
  2111  					"FakeFilter",
  2112  					tf.NewFakeFilterPlugin(map[string]framework.Code{"1": framework.Unschedulable}),
  2113  				),
  2114  				tf.RegisterScorePlugin("NumericMap", newNumericMapPlugin(), 1),
  2115  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2116  			},
  2117  			nodes:     []string{"1", "2"},
  2118  			pod:       st.MakePod().Name("test-filter").UID("test-filter").Obj(),
  2119  			wantNodes: nil,
  2120  			wErr:      nil,
  2121  		},
  2122  		{
  2123  			name: "test prefilter plugin returning Unschedulable status",
  2124  			registerPlugins: []tf.RegisterPluginFunc{
  2125  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2126  				tf.RegisterPreFilterPlugin(
  2127  					"FakePreFilter",
  2128  					tf.NewFakePreFilterPlugin("FakePreFilter", nil, framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status")),
  2129  				),
  2130  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2131  			},
  2132  			nodes:     []string{"1", "2"},
  2133  			pod:       st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2134  			wantNodes: nil,
  2135  			wErr: &framework.FitError{
  2136  				Pod:         st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2137  				NumAllNodes: 2,
  2138  				Diagnosis: framework.Diagnosis{
  2139  					NodeToStatusMap: framework.NodeToStatusMap{
  2140  						"1": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status").WithPlugin("FakePreFilter"),
  2141  						"2": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injected unschedulable status").WithPlugin("FakePreFilter"),
  2142  					},
  2143  					PreFilterMsg:         "injected unschedulable status",
  2144  					UnschedulablePlugins: sets.New("FakePreFilter"),
  2145  				},
  2146  			},
  2147  		},
  2148  		{
  2149  			name: "test prefilter plugin returning error status",
  2150  			registerPlugins: []tf.RegisterPluginFunc{
  2151  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2152  				tf.RegisterPreFilterPlugin(
  2153  					"FakePreFilter",
  2154  					tf.NewFakePreFilterPlugin("FakePreFilter", nil, framework.NewStatus(framework.Error, "injected error status")),
  2155  				),
  2156  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2157  			},
  2158  			nodes:     []string{"1", "2"},
  2159  			pod:       st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2160  			wantNodes: nil,
  2161  			wErr:      fmt.Errorf(`running PreFilter plugin "FakePreFilter": %w`, errors.New("injected error status")),
  2162  		},
  2163  		{
  2164  			name: "test prefilter plugin returning node",
  2165  			registerPlugins: []tf.RegisterPluginFunc{
  2166  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2167  				tf.RegisterPreFilterPlugin(
  2168  					"FakePreFilter1",
  2169  					tf.NewFakePreFilterPlugin("FakePreFilter1", nil, nil),
  2170  				),
  2171  				tf.RegisterPreFilterPlugin(
  2172  					"FakePreFilter2",
  2173  					tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil),
  2174  				),
  2175  				tf.RegisterPreFilterPlugin(
  2176  					"FakePreFilter3",
  2177  					tf.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1", "node2")}, nil),
  2178  				),
  2179  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2180  			},
  2181  			nodes:              []string{"node1", "node2", "node3"},
  2182  			pod:                st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2183  			wantNodes:          sets.New("node2"),
  2184  			wantEvaluatedNodes: ptr.To[int32](1),
  2185  		},
  2186  		{
  2187  			name: "test prefilter plugin returning non-intersecting nodes",
  2188  			registerPlugins: []tf.RegisterPluginFunc{
  2189  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2190  				tf.RegisterPreFilterPlugin(
  2191  					"FakePreFilter1",
  2192  					tf.NewFakePreFilterPlugin("FakePreFilter1", nil, nil),
  2193  				),
  2194  				tf.RegisterPreFilterPlugin(
  2195  					"FakePreFilter2",
  2196  					tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New("node2")}, nil),
  2197  				),
  2198  				tf.RegisterPreFilterPlugin(
  2199  					"FakePreFilter3",
  2200  					tf.NewFakePreFilterPlugin("FakePreFilter3", &framework.PreFilterResult{NodeNames: sets.New("node1")}, nil),
  2201  				),
  2202  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2203  			},
  2204  			nodes: []string{"node1", "node2", "node3"},
  2205  			pod:   st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2206  			wErr: &framework.FitError{
  2207  				Pod:         st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2208  				NumAllNodes: 3,
  2209  				Diagnosis: framework.Diagnosis{
  2210  					NodeToStatusMap: framework.NodeToStatusMap{
  2211  						"node1": framework.NewStatus(framework.Unschedulable, "node(s) didn't satisfy plugin(s) [FakePreFilter2 FakePreFilter3] simultaneously"),
  2212  						"node2": framework.NewStatus(framework.Unschedulable, "node(s) didn't satisfy plugin(s) [FakePreFilter2 FakePreFilter3] simultaneously"),
  2213  						"node3": framework.NewStatus(framework.Unschedulable, "node(s) didn't satisfy plugin(s) [FakePreFilter2 FakePreFilter3] simultaneously"),
  2214  					},
  2215  					UnschedulablePlugins: sets.Set[string]{},
  2216  					PreFilterMsg:         "node(s) didn't satisfy plugin(s) [FakePreFilter2 FakePreFilter3] simultaneously",
  2217  				},
  2218  			},
  2219  		},
  2220  		{
  2221  			name: "test prefilter plugin returning empty node set",
  2222  			registerPlugins: []tf.RegisterPluginFunc{
  2223  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2224  				tf.RegisterPreFilterPlugin(
  2225  					"FakePreFilter1",
  2226  					tf.NewFakePreFilterPlugin("FakePreFilter1", nil, nil),
  2227  				),
  2228  				tf.RegisterPreFilterPlugin(
  2229  					"FakePreFilter2",
  2230  					tf.NewFakePreFilterPlugin("FakePreFilter2", &framework.PreFilterResult{NodeNames: sets.New[string]()}, nil),
  2231  				),
  2232  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2233  			},
  2234  			nodes: []string{"node1"},
  2235  			pod:   st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2236  			wErr: &framework.FitError{
  2237  				Pod:         st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2238  				NumAllNodes: 1,
  2239  				Diagnosis: framework.Diagnosis{
  2240  					NodeToStatusMap: framework.NodeToStatusMap{
  2241  						"node1": framework.NewStatus(framework.Unschedulable, "node(s) didn't satisfy plugin FakePreFilter2"),
  2242  					},
  2243  					UnschedulablePlugins: sets.Set[string]{},
  2244  					PreFilterMsg:         "node(s) didn't satisfy plugin FakePreFilter2",
  2245  				},
  2246  			},
  2247  		},
  2248  		{
  2249  			name: "test prefilter plugin returning skip",
  2250  			registerPlugins: []tf.RegisterPluginFunc{
  2251  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2252  				tf.RegisterPreFilterPlugin(
  2253  					"FakePreFilter1",
  2254  					tf.NewFakePreFilterPlugin("FakeFilter1", nil, nil),
  2255  				),
  2256  				tf.RegisterFilterPlugin(
  2257  					"FakeFilter1",
  2258  					tf.NewFakeFilterPlugin(map[string]framework.Code{
  2259  						"node1": framework.Unschedulable,
  2260  					}),
  2261  				),
  2262  				tf.RegisterPluginAsExtensions("FakeFilter2", func(_ context.Context, configuration runtime.Object, f framework.Handle) (framework.Plugin, error) {
  2263  					return tf.FakePreFilterAndFilterPlugin{
  2264  						FakePreFilterPlugin: &tf.FakePreFilterPlugin{
  2265  							Result: nil,
  2266  							Status: framework.NewStatus(framework.Skip),
  2267  						},
  2268  						FakeFilterPlugin: &tf.FakeFilterPlugin{
  2269  							// This Filter plugin shouldn't be executed in the Filter extension point due to skip.
  2270  							// To confirm that, return the status code Error to all Nodes.
  2271  							FailedNodeReturnCodeMap: map[string]framework.Code{
  2272  								"node1": framework.Error, "node2": framework.Error, "node3": framework.Error,
  2273  							},
  2274  						},
  2275  					}, nil
  2276  				}, "PreFilter", "Filter"),
  2277  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2278  			},
  2279  			nodes:              []string{"node1", "node2", "node3"},
  2280  			pod:                st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
  2281  			wantNodes:          sets.New("node2", "node3"),
  2282  			wantEvaluatedNodes: ptr.To[int32](3),
  2283  		},
  2284  		{
  2285  			name: "test all prescore plugins return skip",
  2286  			registerPlugins: []tf.RegisterPluginFunc{
  2287  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2288  				tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  2289  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2290  				tf.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", tf.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0,
  2291  					framework.NewStatus(framework.Skip, "fake skip"),
  2292  					framework.NewStatus(framework.Error, "this score function shouldn't be executed because this plugin returned Skip in the PreScore"),
  2293  				), "PreScore", "Score"),
  2294  			},
  2295  			nodes:     []string{"node1", "node2"},
  2296  			pod:       st.MakePod().Name("ignore").UID("ignore").Obj(),
  2297  			wantNodes: sets.New("node1", "node2"),
  2298  		},
  2299  	}
  2300  	for _, test := range tests {
  2301  		t.Run(test.name, func(t *testing.T) {
  2302  			logger, ctx := ktesting.NewTestContext(t)
  2303  			ctx, cancel := context.WithCancel(ctx)
  2304  			defer cancel()
  2305  
  2306  			cache := internalcache.New(ctx, time.Duration(0))
  2307  			for _, pod := range test.pods {
  2308  				cache.AddPod(logger, pod)
  2309  			}
  2310  			var nodes []*v1.Node
  2311  			for _, name := range test.nodes {
  2312  				node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"hostname": name}}}
  2313  				nodes = append(nodes, node)
  2314  				cache.AddNode(logger, node)
  2315  			}
  2316  
  2317  			cs := clientsetfake.NewSimpleClientset()
  2318  			informerFactory := informers.NewSharedInformerFactory(cs, 0)
  2319  			for _, pvc := range test.pvcs {
  2320  				metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, volume.AnnBindCompleted, "true")
  2321  				cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, &pvc, metav1.CreateOptions{})
  2322  				if pvName := pvc.Spec.VolumeName; pvName != "" {
  2323  					pv := v1.PersistentVolume{ObjectMeta: metav1.ObjectMeta{Name: pvName}}
  2324  					cs.CoreV1().PersistentVolumes().Create(ctx, &pv, metav1.CreateOptions{})
  2325  				}
  2326  			}
  2327  			snapshot := internalcache.NewSnapshot(test.pods, nodes)
  2328  			fwk, err := tf.NewFramework(
  2329  				ctx,
  2330  				test.registerPlugins, "",
  2331  				frameworkruntime.WithSnapshotSharedLister(snapshot),
  2332  				frameworkruntime.WithInformerFactory(informerFactory),
  2333  				frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())),
  2334  			)
  2335  			if err != nil {
  2336  				t.Fatal(err)
  2337  			}
  2338  
  2339  			sched := &Scheduler{
  2340  				Cache:                    cache,
  2341  				nodeInfoSnapshot:         snapshot,
  2342  				percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
  2343  			}
  2344  			sched.applyDefaultHandlers()
  2345  
  2346  			informerFactory.Start(ctx.Done())
  2347  			informerFactory.WaitForCacheSync(ctx.Done())
  2348  
  2349  			result, err := sched.SchedulePod(ctx, fwk, framework.NewCycleState(), test.pod)
  2350  			if err != test.wErr {
  2351  				gotFitErr, gotOK := err.(*framework.FitError)
  2352  				wantFitErr, wantOK := test.wErr.(*framework.FitError)
  2353  				if gotOK != wantOK {
  2354  					t.Errorf("Expected err to be FitError: %v, but got %v", wantOK, gotOK)
  2355  				} else if gotOK {
  2356  					if diff := cmp.Diff(gotFitErr, wantFitErr); diff != "" {
  2357  						t.Errorf("Unexpected fitErr: (-want, +got): %s", diff)
  2358  					}
  2359  				}
  2360  			}
  2361  			if test.wantNodes != nil && !test.wantNodes.Has(result.SuggestedHost) {
  2362  				t.Errorf("Expected: %s, got: %s", test.wantNodes, result.SuggestedHost)
  2363  			}
  2364  			wantEvaluatedNodes := len(test.nodes)
  2365  			if test.wantEvaluatedNodes != nil {
  2366  				wantEvaluatedNodes = int(*test.wantEvaluatedNodes)
  2367  			}
  2368  			if test.wErr == nil && wantEvaluatedNodes != result.EvaluatedNodes {
  2369  				t.Errorf("Expected EvaluatedNodes: %d, got: %d", wantEvaluatedNodes, result.EvaluatedNodes)
  2370  			}
  2371  		})
  2372  	}
  2373  }
  2374  
  2375  func TestFindFitAllError(t *testing.T) {
  2376  	ctx, cancel := context.WithCancel(context.Background())
  2377  	defer cancel()
  2378  
  2379  	nodes := makeNodeList([]string{"3", "2", "1"})
  2380  	scheduler := makeScheduler(ctx, nodes)
  2381  
  2382  	fwk, err := tf.NewFramework(
  2383  		ctx,
  2384  		[]tf.RegisterPluginFunc{
  2385  			tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2386  			tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  2387  			tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin),
  2388  			tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2389  		},
  2390  		"",
  2391  		frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)),
  2392  	)
  2393  	if err != nil {
  2394  		t.Fatal(err)
  2395  	}
  2396  
  2397  	_, diagnosis, err := scheduler.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), &v1.Pod{})
  2398  	if err != nil {
  2399  		t.Errorf("unexpected error: %v", err)
  2400  	}
  2401  
  2402  	expected := framework.Diagnosis{
  2403  		NodeToStatusMap: framework.NodeToStatusMap{
  2404  			"1": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"),
  2405  			"2": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"),
  2406  			"3": framework.NewStatus(framework.Unschedulable, tf.ErrReasonFake).WithPlugin("MatchFilter"),
  2407  		},
  2408  		UnschedulablePlugins: sets.New("MatchFilter"),
  2409  	}
  2410  	if diff := cmp.Diff(diagnosis, expected); diff != "" {
  2411  		t.Errorf("Unexpected diagnosis: (-want, +got): %s", diff)
  2412  	}
  2413  }
  2414  
  2415  func TestFindFitSomeError(t *testing.T) {
  2416  	ctx, cancel := context.WithCancel(context.Background())
  2417  	defer cancel()
  2418  
  2419  	nodes := makeNodeList([]string{"3", "2", "1"})
  2420  	scheduler := makeScheduler(ctx, nodes)
  2421  
  2422  	fwk, err := tf.NewFramework(
  2423  		ctx,
  2424  		[]tf.RegisterPluginFunc{
  2425  			tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2426  			tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  2427  			tf.RegisterFilterPlugin("MatchFilter", tf.NewMatchFilterPlugin),
  2428  			tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2429  		},
  2430  		"",
  2431  		frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)),
  2432  	)
  2433  	if err != nil {
  2434  		t.Fatal(err)
  2435  	}
  2436  
  2437  	pod := st.MakePod().Name("1").UID("1").Obj()
  2438  	_, diagnosis, err := scheduler.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), pod)
  2439  	if err != nil {
  2440  		t.Errorf("unexpected error: %v", err)
  2441  	}
  2442  
  2443  	if len(diagnosis.NodeToStatusMap) != len(nodes)-1 {
  2444  		t.Errorf("unexpected failed status map: %v", diagnosis.NodeToStatusMap)
  2445  	}
  2446  
  2447  	if diff := cmp.Diff(sets.New("MatchFilter"), diagnosis.UnschedulablePlugins); diff != "" {
  2448  		t.Errorf("Unexpected unschedulablePlugins: (-want, +got): %s", diagnosis.UnschedulablePlugins)
  2449  	}
  2450  
  2451  	for _, node := range nodes {
  2452  		if node.Name == pod.Name {
  2453  			continue
  2454  		}
  2455  		t.Run(node.Name, func(t *testing.T) {
  2456  			status, found := diagnosis.NodeToStatusMap[node.Name]
  2457  			if !found {
  2458  				t.Errorf("failed to find node %v in %v", node.Name, diagnosis.NodeToStatusMap)
  2459  			}
  2460  			reasons := status.Reasons()
  2461  			if len(reasons) != 1 || reasons[0] != tf.ErrReasonFake {
  2462  				t.Errorf("unexpected failures: %v", reasons)
  2463  			}
  2464  		})
  2465  	}
  2466  }
  2467  
  2468  func TestFindFitPredicateCallCounts(t *testing.T) {
  2469  	tests := []struct {
  2470  		name          string
  2471  		pod           *v1.Pod
  2472  		expectedCount int32
  2473  	}{
  2474  		{
  2475  			name:          "nominated pods have lower priority, predicate is called once",
  2476  			pod:           st.MakePod().Name("1").UID("1").Priority(highPriority).Obj(),
  2477  			expectedCount: 1,
  2478  		},
  2479  		{
  2480  			name:          "nominated pods have higher priority, predicate is called twice",
  2481  			pod:           st.MakePod().Name("1").UID("1").Priority(lowPriority).Obj(),
  2482  			expectedCount: 2,
  2483  		},
  2484  	}
  2485  
  2486  	for _, test := range tests {
  2487  		t.Run(test.name, func(t *testing.T) {
  2488  			nodes := makeNodeList([]string{"1"})
  2489  
  2490  			plugin := tf.FakeFilterPlugin{}
  2491  			registerFakeFilterFunc := tf.RegisterFilterPlugin(
  2492  				"FakeFilter",
  2493  				func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
  2494  					return &plugin, nil
  2495  				},
  2496  			)
  2497  			registerPlugins := []tf.RegisterPluginFunc{
  2498  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2499  				registerFakeFilterFunc,
  2500  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2501  			}
  2502  			logger, ctx := ktesting.NewTestContext(t)
  2503  			ctx, cancel := context.WithCancel(ctx)
  2504  			defer cancel()
  2505  			fwk, err := tf.NewFramework(
  2506  				ctx,
  2507  				registerPlugins, "",
  2508  				frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)),
  2509  			)
  2510  			if err != nil {
  2511  				t.Fatal(err)
  2512  			}
  2513  
  2514  			scheduler := makeScheduler(ctx, nodes)
  2515  			if err := scheduler.Cache.UpdateSnapshot(logger, scheduler.nodeInfoSnapshot); err != nil {
  2516  				t.Fatal(err)
  2517  			}
  2518  			podinfo, err := framework.NewPodInfo(st.MakePod().UID("nominated").Priority(midPriority).Obj())
  2519  			if err != nil {
  2520  				t.Fatal(err)
  2521  			}
  2522  			fwk.AddNominatedPod(logger, podinfo, &framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: "1"})
  2523  
  2524  			_, _, err = scheduler.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), test.pod)
  2525  			if err != nil {
  2526  				t.Errorf("unexpected error: %v", err)
  2527  			}
  2528  			if test.expectedCount != plugin.NumFilterCalled {
  2529  				t.Errorf("predicate was called %d times, expected is %d", plugin.NumFilterCalled, test.expectedCount)
  2530  			}
  2531  		})
  2532  	}
  2533  }
  2534  
  2535  // The point of this test is to show that you:
  2536  //   - get the same priority for a zero-request pod as for a pod with the defaults requests,
  2537  //     both when the zero-request pod is already on the node and when the zero-request pod
  2538  //     is the one being scheduled.
  2539  //   - don't get the same score no matter what we schedule.
  2540  func TestZeroRequest(t *testing.T) {
  2541  	// A pod with no resources. We expect spreading to count it as having the default resources.
  2542  	noResources := v1.PodSpec{
  2543  		Containers: []v1.Container{
  2544  			{},
  2545  		},
  2546  	}
  2547  	noResources1 := noResources
  2548  	noResources1.NodeName = "node1"
  2549  	// A pod with the same resources as a 0-request pod gets by default as its resources (for spreading).
  2550  	small := v1.PodSpec{
  2551  		Containers: []v1.Container{
  2552  			{
  2553  				Resources: v1.ResourceRequirements{
  2554  					Requests: v1.ResourceList{
  2555  						v1.ResourceCPU: resource.MustParse(
  2556  							strconv.FormatInt(schedutil.DefaultMilliCPURequest, 10) + "m"),
  2557  						v1.ResourceMemory: resource.MustParse(
  2558  							strconv.FormatInt(schedutil.DefaultMemoryRequest, 10)),
  2559  					},
  2560  				},
  2561  			},
  2562  		},
  2563  	}
  2564  	small2 := small
  2565  	small2.NodeName = "node2"
  2566  	// A larger pod.
  2567  	large := v1.PodSpec{
  2568  		Containers: []v1.Container{
  2569  			{
  2570  				Resources: v1.ResourceRequirements{
  2571  					Requests: v1.ResourceList{
  2572  						v1.ResourceCPU: resource.MustParse(
  2573  							strconv.FormatInt(schedutil.DefaultMilliCPURequest*3, 10) + "m"),
  2574  						v1.ResourceMemory: resource.MustParse(
  2575  							strconv.FormatInt(schedutil.DefaultMemoryRequest*3, 10)),
  2576  					},
  2577  				},
  2578  			},
  2579  		},
  2580  	}
  2581  	large1 := large
  2582  	large1.NodeName = "node1"
  2583  	large2 := large
  2584  	large2.NodeName = "node2"
  2585  	tests := []struct {
  2586  		pod           *v1.Pod
  2587  		pods          []*v1.Pod
  2588  		nodes         []*v1.Node
  2589  		name          string
  2590  		expectedScore int64
  2591  	}{
  2592  		// The point of these next two tests is to show you get the same priority for a zero-request pod
  2593  		// as for a pod with the defaults requests, both when the zero-request pod is already on the node
  2594  		// and when the zero-request pod is the one being scheduled.
  2595  		{
  2596  			pod:   &v1.Pod{Spec: noResources},
  2597  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2598  			name:  "test priority of zero-request pod with node with zero-request pod",
  2599  			pods: []*v1.Pod{
  2600  				{Spec: large1}, {Spec: noResources1},
  2601  				{Spec: large2}, {Spec: small2},
  2602  			},
  2603  			expectedScore: 150,
  2604  		},
  2605  		{
  2606  			pod:   &v1.Pod{Spec: small},
  2607  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2608  			name:  "test priority of nonzero-request pod with node with zero-request pod",
  2609  			pods: []*v1.Pod{
  2610  				{Spec: large1}, {Spec: noResources1},
  2611  				{Spec: large2}, {Spec: small2},
  2612  			},
  2613  			expectedScore: 150,
  2614  		},
  2615  		// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
  2616  		{
  2617  			pod:   &v1.Pod{Spec: large},
  2618  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2619  			name:  "test priority of larger pod with node with zero-request pod",
  2620  			pods: []*v1.Pod{
  2621  				{Spec: large1}, {Spec: noResources1},
  2622  				{Spec: large2}, {Spec: small2},
  2623  			},
  2624  			expectedScore: 130,
  2625  		},
  2626  	}
  2627  
  2628  	for _, test := range tests {
  2629  		t.Run(test.name, func(t *testing.T) {
  2630  			client := clientsetfake.NewSimpleClientset()
  2631  			informerFactory := informers.NewSharedInformerFactory(client, 0)
  2632  
  2633  			snapshot := internalcache.NewSnapshot(test.pods, test.nodes)
  2634  			fts := feature.Features{}
  2635  			pluginRegistrations := []tf.RegisterPluginFunc{
  2636  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2637  				tf.RegisterScorePlugin(noderesources.Name, frameworkruntime.FactoryAdapter(fts, noderesources.NewFit), 1),
  2638  				tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(fts, noderesources.NewBalancedAllocation), 1),
  2639  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2640  			}
  2641  			ctx, cancel := context.WithCancel(context.Background())
  2642  			defer cancel()
  2643  			fwk, err := tf.NewFramework(
  2644  				ctx,
  2645  				pluginRegistrations, "",
  2646  				frameworkruntime.WithInformerFactory(informerFactory),
  2647  				frameworkruntime.WithSnapshotSharedLister(snapshot),
  2648  				frameworkruntime.WithClientSet(client),
  2649  				frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())),
  2650  			)
  2651  			if err != nil {
  2652  				t.Fatalf("error creating framework: %+v", err)
  2653  			}
  2654  
  2655  			sched := &Scheduler{
  2656  				nodeInfoSnapshot:         snapshot,
  2657  				percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
  2658  			}
  2659  			sched.applyDefaultHandlers()
  2660  
  2661  			state := framework.NewCycleState()
  2662  			_, _, err = sched.findNodesThatFitPod(ctx, fwk, state, test.pod)
  2663  			if err != nil {
  2664  				t.Fatalf("error filtering nodes: %+v", err)
  2665  			}
  2666  			fwk.RunPreScorePlugins(ctx, state, test.pod, test.nodes)
  2667  			list, err := prioritizeNodes(ctx, nil, fwk, state, test.pod, test.nodes)
  2668  			if err != nil {
  2669  				t.Errorf("unexpected error: %v", err)
  2670  			}
  2671  			for _, hp := range list {
  2672  				if hp.TotalScore != test.expectedScore {
  2673  					t.Errorf("expected %d for all priorities, got list %#v", test.expectedScore, list)
  2674  				}
  2675  			}
  2676  		})
  2677  	}
  2678  }
  2679  
  2680  func Test_prioritizeNodes(t *testing.T) {
  2681  	imageStatus1 := []v1.ContainerImage{
  2682  		{
  2683  			Names: []string{
  2684  				"gcr.io/40:latest",
  2685  				"gcr.io/40:v1",
  2686  			},
  2687  			SizeBytes: int64(80 * mb),
  2688  		},
  2689  		{
  2690  			Names: []string{
  2691  				"gcr.io/300:latest",
  2692  				"gcr.io/300:v1",
  2693  			},
  2694  			SizeBytes: int64(300 * mb),
  2695  		},
  2696  	}
  2697  
  2698  	imageStatus2 := []v1.ContainerImage{
  2699  		{
  2700  			Names: []string{
  2701  				"gcr.io/300:latest",
  2702  			},
  2703  			SizeBytes: int64(300 * mb),
  2704  		},
  2705  		{
  2706  			Names: []string{
  2707  				"gcr.io/40:latest",
  2708  				"gcr.io/40:v1",
  2709  			},
  2710  			SizeBytes: int64(80 * mb),
  2711  		},
  2712  	}
  2713  
  2714  	imageStatus3 := []v1.ContainerImage{
  2715  		{
  2716  			Names: []string{
  2717  				"gcr.io/600:latest",
  2718  			},
  2719  			SizeBytes: int64(600 * mb),
  2720  		},
  2721  		{
  2722  			Names: []string{
  2723  				"gcr.io/40:latest",
  2724  			},
  2725  			SizeBytes: int64(80 * mb),
  2726  		},
  2727  		{
  2728  			Names: []string{
  2729  				"gcr.io/900:latest",
  2730  			},
  2731  			SizeBytes: int64(900 * mb),
  2732  		},
  2733  	}
  2734  	tests := []struct {
  2735  		name                string
  2736  		pod                 *v1.Pod
  2737  		pods                []*v1.Pod
  2738  		nodes               []*v1.Node
  2739  		pluginRegistrations []tf.RegisterPluginFunc
  2740  		extenders           []tf.FakeExtender
  2741  		want                []framework.NodePluginScores
  2742  	}{
  2743  		{
  2744  			name:  "the score from all plugins should be recorded in PluginToNodeScores",
  2745  			pod:   &v1.Pod{},
  2746  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2747  			pluginRegistrations: []tf.RegisterPluginFunc{
  2748  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2749  				tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1),
  2750  				tf.RegisterScorePlugin("Node2Prioritizer", tf.NewNode2PrioritizerPlugin(), 1),
  2751  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2752  			},
  2753  			extenders: nil,
  2754  			want: []framework.NodePluginScores{
  2755  				{
  2756  					Name: "node1",
  2757  					Scores: []framework.PluginScore{
  2758  						{
  2759  							Name:  "Node2Prioritizer",
  2760  							Score: 10,
  2761  						},
  2762  						{
  2763  							Name:  "NodeResourcesBalancedAllocation",
  2764  							Score: 100,
  2765  						},
  2766  					},
  2767  					TotalScore: 110,
  2768  				},
  2769  				{
  2770  					Name: "node2",
  2771  					Scores: []framework.PluginScore{
  2772  						{
  2773  							Name:  "Node2Prioritizer",
  2774  							Score: 100,
  2775  						},
  2776  						{
  2777  							Name:  "NodeResourcesBalancedAllocation",
  2778  							Score: 100,
  2779  						},
  2780  					},
  2781  					TotalScore: 200,
  2782  				},
  2783  			},
  2784  		},
  2785  		{
  2786  			name:  "the score from extender should also be recorded in PluginToNodeScores with plugin scores",
  2787  			pod:   &v1.Pod{},
  2788  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2789  			pluginRegistrations: []tf.RegisterPluginFunc{
  2790  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2791  				tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1),
  2792  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2793  			},
  2794  			extenders: []tf.FakeExtender{
  2795  				{
  2796  					ExtenderName: "FakeExtender1",
  2797  					Weight:       1,
  2798  					Prioritizers: []tf.PriorityConfig{
  2799  						{
  2800  							Weight:   3,
  2801  							Function: tf.Node1PrioritizerExtender,
  2802  						},
  2803  					},
  2804  				},
  2805  				{
  2806  					ExtenderName: "FakeExtender2",
  2807  					Weight:       1,
  2808  					Prioritizers: []tf.PriorityConfig{
  2809  						{
  2810  							Weight:   2,
  2811  							Function: tf.Node2PrioritizerExtender,
  2812  						},
  2813  					},
  2814  				},
  2815  			},
  2816  			want: []framework.NodePluginScores{
  2817  				{
  2818  					Name: "node1",
  2819  					Scores: []framework.PluginScore{
  2820  
  2821  						{
  2822  							Name:  "FakeExtender1",
  2823  							Score: 300,
  2824  						},
  2825  						{
  2826  							Name:  "FakeExtender2",
  2827  							Score: 20,
  2828  						},
  2829  						{
  2830  							Name:  "NodeResourcesBalancedAllocation",
  2831  							Score: 100,
  2832  						},
  2833  					},
  2834  					TotalScore: 420,
  2835  				},
  2836  				{
  2837  					Name: "node2",
  2838  					Scores: []framework.PluginScore{
  2839  						{
  2840  							Name:  "FakeExtender1",
  2841  							Score: 30,
  2842  						},
  2843  						{
  2844  							Name:  "FakeExtender2",
  2845  							Score: 200,
  2846  						},
  2847  						{
  2848  							Name:  "NodeResourcesBalancedAllocation",
  2849  							Score: 100,
  2850  						},
  2851  					},
  2852  					TotalScore: 330,
  2853  				},
  2854  			},
  2855  		},
  2856  		{
  2857  			name:  "plugin which returned skip in preScore shouldn't be executed in the score phase",
  2858  			pod:   &v1.Pod{},
  2859  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2860  			pluginRegistrations: []tf.RegisterPluginFunc{
  2861  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2862  				tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewBalancedAllocation), 1),
  2863  				tf.RegisterScorePlugin("Node2Prioritizer", tf.NewNode2PrioritizerPlugin(), 1),
  2864  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2865  				tf.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", tf.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0,
  2866  					framework.NewStatus(framework.Skip, "fake skip"),
  2867  					framework.NewStatus(framework.Error, "this score function shouldn't be executed because this plugin returned Skip in the PreScore"),
  2868  				), "PreScore", "Score"),
  2869  			},
  2870  			extenders: nil,
  2871  			want: []framework.NodePluginScores{
  2872  				{
  2873  					Name: "node1",
  2874  					Scores: []framework.PluginScore{
  2875  						{
  2876  							Name:  "Node2Prioritizer",
  2877  							Score: 10,
  2878  						},
  2879  						{
  2880  							Name:  "NodeResourcesBalancedAllocation",
  2881  							Score: 100,
  2882  						},
  2883  					},
  2884  					TotalScore: 110,
  2885  				},
  2886  				{
  2887  					Name: "node2",
  2888  					Scores: []framework.PluginScore{
  2889  						{
  2890  							Name:  "Node2Prioritizer",
  2891  							Score: 100,
  2892  						},
  2893  						{
  2894  							Name:  "NodeResourcesBalancedAllocation",
  2895  							Score: 100,
  2896  						},
  2897  					},
  2898  					TotalScore: 200,
  2899  				},
  2900  			},
  2901  		},
  2902  		{
  2903  			name:  "all score plugins are skipped",
  2904  			pod:   &v1.Pod{},
  2905  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10)},
  2906  			pluginRegistrations: []tf.RegisterPluginFunc{
  2907  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2908  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2909  				tf.RegisterPluginAsExtensions("FakePreScoreAndScorePlugin", tf.NewFakePreScoreAndScorePlugin("FakePreScoreAndScorePlugin", 0,
  2910  					framework.NewStatus(framework.Skip, "fake skip"),
  2911  					framework.NewStatus(framework.Error, "this score function shouldn't be executed because this plugin returned Skip in the PreScore"),
  2912  				), "PreScore", "Score"),
  2913  			},
  2914  			extenders: nil,
  2915  			want: []framework.NodePluginScores{
  2916  				{Name: "node1", Scores: []framework.PluginScore{}},
  2917  				{Name: "node2", Scores: []framework.PluginScore{}},
  2918  			},
  2919  		},
  2920  		{
  2921  			name: "the score from Image Locality plugin with image in all nodes",
  2922  			pod: &v1.Pod{
  2923  				Spec: v1.PodSpec{
  2924  					Containers: []v1.Container{
  2925  						{
  2926  							Image: "gcr.io/40",
  2927  						},
  2928  					},
  2929  				},
  2930  			},
  2931  			nodes: []*v1.Node{
  2932  				makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10, imageStatus1...),
  2933  				makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10, imageStatus2...),
  2934  				makeNode("node3", 1000, schedutil.DefaultMemoryRequest*10, imageStatus3...),
  2935  			},
  2936  			pluginRegistrations: []tf.RegisterPluginFunc{
  2937  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2938  				tf.RegisterScorePlugin(imagelocality.Name, imagelocality.New, 1),
  2939  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2940  			},
  2941  			extenders: nil,
  2942  			want: []framework.NodePluginScores{
  2943  				{
  2944  					Name: "node1",
  2945  					Scores: []framework.PluginScore{
  2946  						{
  2947  							Name:  "ImageLocality",
  2948  							Score: 5,
  2949  						},
  2950  					},
  2951  					TotalScore: 5,
  2952  				},
  2953  				{
  2954  					Name: "node2",
  2955  					Scores: []framework.PluginScore{
  2956  						{
  2957  							Name:  "ImageLocality",
  2958  							Score: 5,
  2959  						},
  2960  					},
  2961  					TotalScore: 5,
  2962  				},
  2963  				{
  2964  					Name: "node3",
  2965  					Scores: []framework.PluginScore{
  2966  						{
  2967  							Name:  "ImageLocality",
  2968  							Score: 5,
  2969  						},
  2970  					},
  2971  					TotalScore: 5,
  2972  				},
  2973  			},
  2974  		},
  2975  		{
  2976  			name: "the score from Image Locality plugin with image in partial nodes",
  2977  			pod: &v1.Pod{
  2978  				Spec: v1.PodSpec{
  2979  					Containers: []v1.Container{
  2980  						{
  2981  							Image: "gcr.io/300",
  2982  						},
  2983  					},
  2984  				},
  2985  			},
  2986  			nodes: []*v1.Node{makeNode("node1", 1000, schedutil.DefaultMemoryRequest*10, imageStatus1...),
  2987  				makeNode("node2", 1000, schedutil.DefaultMemoryRequest*10, imageStatus2...),
  2988  				makeNode("node3", 1000, schedutil.DefaultMemoryRequest*10, imageStatus3...),
  2989  			},
  2990  			pluginRegistrations: []tf.RegisterPluginFunc{
  2991  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  2992  				tf.RegisterScorePlugin(imagelocality.Name, imagelocality.New, 1),
  2993  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  2994  			},
  2995  			extenders: nil,
  2996  			want: []framework.NodePluginScores{
  2997  				{
  2998  					Name: "node1",
  2999  					Scores: []framework.PluginScore{
  3000  						{
  3001  							Name:  "ImageLocality",
  3002  							Score: 18,
  3003  						},
  3004  					},
  3005  					TotalScore: 18,
  3006  				},
  3007  				{
  3008  					Name: "node2",
  3009  					Scores: []framework.PluginScore{
  3010  						{
  3011  							Name:  "ImageLocality",
  3012  							Score: 18,
  3013  						},
  3014  					},
  3015  					TotalScore: 18,
  3016  				},
  3017  				{
  3018  					Name: "node3",
  3019  					Scores: []framework.PluginScore{
  3020  						{
  3021  							Name:  "ImageLocality",
  3022  							Score: 0,
  3023  						},
  3024  					},
  3025  					TotalScore: 0,
  3026  				},
  3027  			},
  3028  		},
  3029  	}
  3030  
  3031  	for _, test := range tests {
  3032  		t.Run(test.name, func(t *testing.T) {
  3033  			client := clientsetfake.NewSimpleClientset()
  3034  			informerFactory := informers.NewSharedInformerFactory(client, 0)
  3035  
  3036  			ctx, cancel := context.WithCancel(context.Background())
  3037  			defer cancel()
  3038  			cache := internalcache.New(ctx, time.Duration(0))
  3039  			for _, node := range test.nodes {
  3040  				cache.AddNode(klog.FromContext(ctx), node)
  3041  			}
  3042  			snapshot := internalcache.NewEmptySnapshot()
  3043  			if err := cache.UpdateSnapshot(klog.FromContext(ctx), snapshot); err != nil {
  3044  				t.Fatal(err)
  3045  			}
  3046  			fwk, err := tf.NewFramework(
  3047  				ctx,
  3048  				test.pluginRegistrations, "",
  3049  				frameworkruntime.WithInformerFactory(informerFactory),
  3050  				frameworkruntime.WithSnapshotSharedLister(snapshot),
  3051  				frameworkruntime.WithClientSet(client),
  3052  				frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())),
  3053  			)
  3054  			if err != nil {
  3055  				t.Fatalf("error creating framework: %+v", err)
  3056  			}
  3057  
  3058  			state := framework.NewCycleState()
  3059  			var extenders []framework.Extender
  3060  			for ii := range test.extenders {
  3061  				extenders = append(extenders, &test.extenders[ii])
  3062  			}
  3063  			nodesscores, err := prioritizeNodes(ctx, extenders, fwk, state, test.pod, test.nodes)
  3064  			if err != nil {
  3065  				t.Errorf("unexpected error: %v", err)
  3066  			}
  3067  			for i := range nodesscores {
  3068  				sort.Slice(nodesscores[i].Scores, func(j, k int) bool {
  3069  					return nodesscores[i].Scores[j].Name < nodesscores[i].Scores[k].Name
  3070  				})
  3071  			}
  3072  
  3073  			if diff := cmp.Diff(test.want, nodesscores); diff != "" {
  3074  				t.Errorf("returned nodes scores (-want,+got):\n%s", diff)
  3075  			}
  3076  		})
  3077  	}
  3078  }
  3079  
  3080  var lowPriority, midPriority, highPriority = int32(0), int32(100), int32(1000)
  3081  
  3082  func TestNumFeasibleNodesToFind(t *testing.T) {
  3083  	tests := []struct {
  3084  		name              string
  3085  		globalPercentage  int32
  3086  		profilePercentage *int32
  3087  		numAllNodes       int32
  3088  		wantNumNodes      int32
  3089  	}{
  3090  		{
  3091  			name:         "not set percentageOfNodesToScore and nodes number not more than 50",
  3092  			numAllNodes:  10,
  3093  			wantNumNodes: 10,
  3094  		},
  3095  		{
  3096  			name:              "set profile percentageOfNodesToScore and nodes number not more than 50",
  3097  			profilePercentage: ptr.To[int32](40),
  3098  			numAllNodes:       10,
  3099  			wantNumNodes:      10,
  3100  		},
  3101  		{
  3102  			name:         "not set percentageOfNodesToScore and nodes number more than 50",
  3103  			numAllNodes:  1000,
  3104  			wantNumNodes: 420,
  3105  		},
  3106  		{
  3107  			name:              "set profile percentageOfNodesToScore and nodes number more than 50",
  3108  			profilePercentage: ptr.To[int32](40),
  3109  			numAllNodes:       1000,
  3110  			wantNumNodes:      400,
  3111  		},
  3112  		{
  3113  			name:              "set global and profile percentageOfNodesToScore and nodes number more than 50",
  3114  			globalPercentage:  100,
  3115  			profilePercentage: ptr.To[int32](40),
  3116  			numAllNodes:       1000,
  3117  			wantNumNodes:      400,
  3118  		},
  3119  		{
  3120  			name:             "set global percentageOfNodesToScore and nodes number more than 50",
  3121  			globalPercentage: 40,
  3122  			numAllNodes:      1000,
  3123  			wantNumNodes:     400,
  3124  		},
  3125  		{
  3126  			name:         "not set profile percentageOfNodesToScore and nodes number more than 50*125",
  3127  			numAllNodes:  6000,
  3128  			wantNumNodes: 300,
  3129  		},
  3130  		{
  3131  			name:              "set profile percentageOfNodesToScore and nodes number more than 50*125",
  3132  			profilePercentage: ptr.To[int32](40),
  3133  			numAllNodes:       6000,
  3134  			wantNumNodes:      2400,
  3135  		},
  3136  	}
  3137  
  3138  	for _, tt := range tests {
  3139  		t.Run(tt.name, func(t *testing.T) {
  3140  			sched := &Scheduler{
  3141  				percentageOfNodesToScore: tt.globalPercentage,
  3142  			}
  3143  			if gotNumNodes := sched.numFeasibleNodesToFind(tt.profilePercentage, tt.numAllNodes); gotNumNodes != tt.wantNumNodes {
  3144  				t.Errorf("Scheduler.numFeasibleNodesToFind() = %v, want %v", gotNumNodes, tt.wantNumNodes)
  3145  			}
  3146  		})
  3147  	}
  3148  }
  3149  
  3150  func TestFairEvaluationForNodes(t *testing.T) {
  3151  	numAllNodes := 500
  3152  	nodeNames := make([]string, 0, numAllNodes)
  3153  	for i := 0; i < numAllNodes; i++ {
  3154  		nodeNames = append(nodeNames, strconv.Itoa(i))
  3155  	}
  3156  	nodes := makeNodeList(nodeNames)
  3157  	ctx, cancel := context.WithCancel(context.Background())
  3158  	defer cancel()
  3159  	sched := makeScheduler(ctx, nodes)
  3160  
  3161  	fwk, err := tf.NewFramework(
  3162  		ctx,
  3163  		[]tf.RegisterPluginFunc{
  3164  			tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  3165  			tf.RegisterFilterPlugin("TrueFilter", tf.NewTrueFilterPlugin),
  3166  			tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  3167  		},
  3168  		"",
  3169  		frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(nil)),
  3170  	)
  3171  	if err != nil {
  3172  		t.Fatal(err)
  3173  	}
  3174  
  3175  	// To make numAllNodes % nodesToFind != 0
  3176  	sched.percentageOfNodesToScore = 30
  3177  	nodesToFind := int(sched.numFeasibleNodesToFind(fwk.PercentageOfNodesToScore(), int32(numAllNodes)))
  3178  
  3179  	// Iterating over all nodes more than twice
  3180  	for i := 0; i < 2*(numAllNodes/nodesToFind+1); i++ {
  3181  		nodesThatFit, _, err := sched.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), &v1.Pod{})
  3182  		if err != nil {
  3183  			t.Errorf("unexpected error: %v", err)
  3184  		}
  3185  		if len(nodesThatFit) != nodesToFind {
  3186  			t.Errorf("got %d nodes filtered, want %d", len(nodesThatFit), nodesToFind)
  3187  		}
  3188  		if sched.nextStartNodeIndex != (i+1)*nodesToFind%numAllNodes {
  3189  			t.Errorf("got %d lastProcessedNodeIndex, want %d", sched.nextStartNodeIndex, (i+1)*nodesToFind%numAllNodes)
  3190  		}
  3191  	}
  3192  }
  3193  
  3194  func TestPreferNominatedNodeFilterCallCounts(t *testing.T) {
  3195  	tests := []struct {
  3196  		name                  string
  3197  		pod                   *v1.Pod
  3198  		nodeReturnCodeMap     map[string]framework.Code
  3199  		expectedCount         int32
  3200  		expectedPatchRequests int
  3201  	}{
  3202  		{
  3203  			name:          "pod has the nominated node set, filter is called only once",
  3204  			pod:           st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
  3205  			expectedCount: 1,
  3206  		},
  3207  		{
  3208  			name:          "pod without the nominated pod, filter is called for each node",
  3209  			pod:           st.MakePod().Name("p_without_nominated_node").UID("p").Priority(highPriority).Obj(),
  3210  			expectedCount: 3,
  3211  		},
  3212  		{
  3213  			name:              "nominated pod cannot pass the filter, filter is called for each node",
  3214  			pod:               st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
  3215  			nodeReturnCodeMap: map[string]framework.Code{"node1": framework.Unschedulable},
  3216  			expectedCount:     4,
  3217  		},
  3218  	}
  3219  
  3220  	for _, test := range tests {
  3221  		t.Run(test.name, func(t *testing.T) {
  3222  			logger, ctx := ktesting.NewTestContext(t)
  3223  			ctx, cancel := context.WithCancel(ctx)
  3224  			defer cancel()
  3225  
  3226  			// create three nodes in the cluster.
  3227  			nodes := makeNodeList([]string{"node1", "node2", "node3"})
  3228  			client := clientsetfake.NewSimpleClientset(test.pod)
  3229  			informerFactory := informers.NewSharedInformerFactory(client, 0)
  3230  			cache := internalcache.New(ctx, time.Duration(0))
  3231  			for _, n := range nodes {
  3232  				cache.AddNode(logger, n)
  3233  			}
  3234  			plugin := tf.FakeFilterPlugin{FailedNodeReturnCodeMap: test.nodeReturnCodeMap}
  3235  			registerFakeFilterFunc := tf.RegisterFilterPlugin(
  3236  				"FakeFilter",
  3237  				func(_ context.Context, _ runtime.Object, fh framework.Handle) (framework.Plugin, error) {
  3238  					return &plugin, nil
  3239  				},
  3240  			)
  3241  			registerPlugins := []tf.RegisterPluginFunc{
  3242  				tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  3243  				registerFakeFilterFunc,
  3244  				tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  3245  			}
  3246  			fwk, err := tf.NewFramework(
  3247  				ctx,
  3248  				registerPlugins, "",
  3249  				frameworkruntime.WithClientSet(client),
  3250  				frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())),
  3251  			)
  3252  			if err != nil {
  3253  				t.Fatal(err)
  3254  			}
  3255  			snapshot := internalcache.NewSnapshot(nil, nodes)
  3256  
  3257  			sched := &Scheduler{
  3258  				Cache:                    cache,
  3259  				nodeInfoSnapshot:         snapshot,
  3260  				percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
  3261  			}
  3262  			sched.applyDefaultHandlers()
  3263  
  3264  			_, _, err = sched.findNodesThatFitPod(ctx, fwk, framework.NewCycleState(), test.pod)
  3265  			if err != nil {
  3266  				t.Errorf("unexpected error: %v", err)
  3267  			}
  3268  			if test.expectedCount != plugin.NumFilterCalled {
  3269  				t.Errorf("predicate was called %d times, expected is %d", plugin.NumFilterCalled, test.expectedCount)
  3270  			}
  3271  		})
  3272  	}
  3273  }
  3274  
  3275  func podWithID(id, desiredHost string) *v1.Pod {
  3276  	return st.MakePod().Name(id).UID(id).Node(desiredHost).SchedulerName(testSchedulerName).Obj()
  3277  }
  3278  
  3279  func deletingPod(id string) *v1.Pod {
  3280  	return st.MakePod().Name(id).UID(id).Terminating().Node("").SchedulerName(testSchedulerName).Obj()
  3281  }
  3282  
  3283  func podWithPort(id, desiredHost string, port int) *v1.Pod {
  3284  	pod := podWithID(id, desiredHost)
  3285  	pod.Spec.Containers = []v1.Container{
  3286  		{Name: "ctr", Ports: []v1.ContainerPort{{HostPort: int32(port)}}},
  3287  	}
  3288  	return pod
  3289  }
  3290  
  3291  func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v1.ResourceList) *v1.Pod {
  3292  	pod := podWithID(id, desiredHost)
  3293  	pod.Spec.Containers = []v1.Container{
  3294  		{Name: "ctr", Resources: v1.ResourceRequirements{Limits: limits, Requests: requests}},
  3295  	}
  3296  	return pod
  3297  }
  3298  
  3299  func makeNodeList(nodeNames []string) []*v1.Node {
  3300  	result := make([]*v1.Node, 0, len(nodeNames))
  3301  	for _, nodeName := range nodeNames {
  3302  		result = append(result, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
  3303  	}
  3304  	return result
  3305  }
  3306  
  3307  // makeScheduler makes a simple Scheduler for testing.
  3308  func makeScheduler(ctx context.Context, nodes []*v1.Node) *Scheduler {
  3309  	logger := klog.FromContext(ctx)
  3310  	cache := internalcache.New(ctx, time.Duration(0))
  3311  	for _, n := range nodes {
  3312  		cache.AddNode(logger, n)
  3313  	}
  3314  
  3315  	sched := &Scheduler{
  3316  		Cache:                    cache,
  3317  		nodeInfoSnapshot:         emptySnapshot,
  3318  		percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
  3319  	}
  3320  	sched.applyDefaultHandlers()
  3321  	cache.UpdateSnapshot(logger, sched.nodeInfoSnapshot)
  3322  	return sched
  3323  }
  3324  
  3325  func makeNode(node string, milliCPU, memory int64, images ...v1.ContainerImage) *v1.Node {
  3326  	return &v1.Node{
  3327  		ObjectMeta: metav1.ObjectMeta{Name: node},
  3328  		Status: v1.NodeStatus{
  3329  			Capacity: v1.ResourceList{
  3330  				v1.ResourceCPU:    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
  3331  				v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
  3332  				"pods":            *resource.NewQuantity(100, resource.DecimalSI),
  3333  			},
  3334  			Allocatable: v1.ResourceList{
  3335  
  3336  				v1.ResourceCPU:    *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
  3337  				v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
  3338  				"pods":            *resource.NewQuantity(100, resource.DecimalSI),
  3339  			},
  3340  			Images: images,
  3341  		},
  3342  	}
  3343  }
  3344  
  3345  // queuedPodStore: pods queued before processing.
  3346  // cache: scheduler cache that might contain assumed pods.
  3347  func setupTestSchedulerWithOnePodOnNode(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, scache internalcache.Cache,
  3348  	pod *v1.Pod, node *v1.Node, fns ...tf.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) {
  3349  	scheduler, bindingChan, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, nil, nil, fns...)
  3350  
  3351  	queuedPodStore.Add(pod)
  3352  	// queuedPodStore: [foo:8080]
  3353  	// cache: []
  3354  
  3355  	scheduler.scheduleOne(ctx)
  3356  	// queuedPodStore: []
  3357  	// cache: [(assumed)foo:8080]
  3358  
  3359  	select {
  3360  	case b := <-bindingChan:
  3361  		expectBinding := &v1.Binding{
  3362  			ObjectMeta: metav1.ObjectMeta{Name: pod.Name, UID: types.UID(pod.Name)},
  3363  			Target:     v1.ObjectReference{Kind: "Node", Name: node.Name},
  3364  		}
  3365  		if !reflect.DeepEqual(expectBinding, b) {
  3366  			t.Errorf("binding want=%v, get=%v", expectBinding, b)
  3367  		}
  3368  	case <-time.After(wait.ForeverTestTimeout):
  3369  		t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
  3370  	}
  3371  	return scheduler, bindingChan, errChan
  3372  }
  3373  
  3374  // queuedPodStore: pods queued before processing.
  3375  // scache: scheduler cache that might contain assumed pods.
  3376  func setupTestScheduler(ctx context.Context, t *testing.T, queuedPodStore *clientcache.FIFO, cache internalcache.Cache, informerFactory informers.SharedInformerFactory, broadcaster events.EventBroadcaster, fns ...tf.RegisterPluginFunc) (*Scheduler, chan *v1.Binding, chan error) {
  3377  	bindingChan := make(chan *v1.Binding, 1)
  3378  	client := clientsetfake.NewSimpleClientset()
  3379  	client.PrependReactor("create", "pods", func(action clienttesting.Action) (bool, runtime.Object, error) {
  3380  		var b *v1.Binding
  3381  		if action.GetSubresource() == "binding" {
  3382  			b := action.(clienttesting.CreateAction).GetObject().(*v1.Binding)
  3383  			bindingChan <- b
  3384  		}
  3385  		return true, b, nil
  3386  	})
  3387  
  3388  	var recorder events.EventRecorder
  3389  	if broadcaster != nil {
  3390  		recorder = broadcaster.NewRecorder(scheme.Scheme, testSchedulerName)
  3391  	} else {
  3392  		recorder = &events.FakeRecorder{}
  3393  	}
  3394  
  3395  	if informerFactory == nil {
  3396  		informerFactory = informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(), 0)
  3397  	}
  3398  	schedulingQueue := internalqueue.NewTestQueueWithInformerFactory(ctx, nil, informerFactory)
  3399  
  3400  	fwk, _ := tf.NewFramework(
  3401  		ctx,
  3402  		fns,
  3403  		testSchedulerName,
  3404  		frameworkruntime.WithClientSet(client),
  3405  		frameworkruntime.WithEventRecorder(recorder),
  3406  		frameworkruntime.WithInformerFactory(informerFactory),
  3407  		frameworkruntime.WithPodNominator(internalqueue.NewPodNominator(informerFactory.Core().V1().Pods().Lister())),
  3408  	)
  3409  
  3410  	errChan := make(chan error, 1)
  3411  	sched := &Scheduler{
  3412  		Cache:                    cache,
  3413  		client:                   client,
  3414  		nodeInfoSnapshot:         internalcache.NewEmptySnapshot(),
  3415  		percentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
  3416  		NextPod: func(logger klog.Logger) (*framework.QueuedPodInfo, error) {
  3417  			return &framework.QueuedPodInfo{PodInfo: mustNewPodInfo(t, clientcache.Pop(queuedPodStore).(*v1.Pod))}, nil
  3418  		},
  3419  		SchedulingQueue: schedulingQueue,
  3420  		Profiles:        profile.Map{testSchedulerName: fwk},
  3421  	}
  3422  
  3423  	sched.SchedulePod = sched.schedulePod
  3424  	sched.FailureHandler = func(_ context.Context, _ framework.Framework, p *framework.QueuedPodInfo, status *framework.Status, _ *framework.NominatingInfo, _ time.Time) {
  3425  		err := status.AsError()
  3426  		errChan <- err
  3427  
  3428  		msg := truncateMessage(err.Error())
  3429  		fwk.EventRecorder().Eventf(p.Pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", msg)
  3430  	}
  3431  	return sched, bindingChan, errChan
  3432  }
  3433  
  3434  func setupTestSchedulerWithVolumeBinding(ctx context.Context, t *testing.T, volumeBinder volumebinding.SchedulerVolumeBinder, broadcaster events.EventBroadcaster) (*Scheduler, chan *v1.Binding, chan error) {
  3435  	logger := klog.FromContext(ctx)
  3436  	testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", UID: types.UID("node1")}}
  3437  	queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
  3438  	pod := podWithID("foo", "")
  3439  	pod.Namespace = "foo-ns"
  3440  	pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{Name: "testVol",
  3441  		VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "testPVC"}}})
  3442  	queuedPodStore.Add(pod)
  3443  	scache := internalcache.New(ctx, 10*time.Minute)
  3444  	scache.AddNode(logger, &testNode)
  3445  	testPVC := v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "testPVC", Namespace: pod.Namespace, UID: types.UID("testPVC")}}
  3446  	client := clientsetfake.NewSimpleClientset(&testNode, &testPVC)
  3447  	informerFactory := informers.NewSharedInformerFactory(client, 0)
  3448  	pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims()
  3449  	pvcInformer.Informer().GetStore().Add(&testPVC)
  3450  
  3451  	fns := []tf.RegisterPluginFunc{
  3452  		tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
  3453  		tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
  3454  		tf.RegisterPluginAsExtensions(volumebinding.Name, func(ctx context.Context, plArgs runtime.Object, handle framework.Handle) (framework.Plugin, error) {
  3455  			return &volumebinding.VolumeBinding{Binder: volumeBinder, PVCLister: pvcInformer.Lister()}, nil
  3456  		}, "PreFilter", "Filter", "Reserve", "PreBind"),
  3457  	}
  3458  	s, bindingChan, errChan := setupTestScheduler(ctx, t, queuedPodStore, scache, informerFactory, broadcaster, fns...)
  3459  	return s, bindingChan, errChan
  3460  }
  3461  
  3462  // This is a workaround because golint complains that errors cannot
  3463  // end with punctuation.  However, the real predicate error message does
  3464  // end with a period.
  3465  func makePredicateError(failReason string) error {
  3466  	s := fmt.Sprintf("0/1 nodes are available: %v.", failReason)
  3467  	return fmt.Errorf(s)
  3468  }
  3469  
  3470  func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
  3471  	podInfo, err := framework.NewPodInfo(pod)
  3472  	if err != nil {
  3473  		t.Fatal(err)
  3474  	}
  3475  	return podInfo
  3476  }