k8s.io/kubernetes@v1.29.3/pkg/kubelet/kubelet_test.go (about)

     1  /*
     2  Copyright 2014 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package kubelet
    18  
    19  import (
    20  	"context"
    21  	"crypto/tls"
    22  	"fmt"
    23  	"net"
    24  	"os"
    25  	"path/filepath"
    26  	"reflect"
    27  	goruntime "runtime"
    28  	"sort"
    29  	"strconv"
    30  	"strings"
    31  	"testing"
    32  	"time"
    33  
    34  	oteltrace "go.opentelemetry.io/otel/trace"
    35  
    36  	"github.com/golang/mock/gomock"
    37  	cadvisorapi "github.com/google/cadvisor/info/v1"
    38  	cadvisorapiv2 "github.com/google/cadvisor/info/v2"
    39  	"github.com/stretchr/testify/assert"
    40  	"github.com/stretchr/testify/require"
    41  	core "k8s.io/client-go/testing"
    42  	"k8s.io/mount-utils"
    43  
    44  	v1 "k8s.io/api/core/v1"
    45  	"k8s.io/apimachinery/pkg/api/resource"
    46  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    47  	"k8s.io/apimachinery/pkg/labels"
    48  	"k8s.io/apimachinery/pkg/types"
    49  	"k8s.io/apimachinery/pkg/util/sets"
    50  	"k8s.io/apimachinery/pkg/util/wait"
    51  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    52  	"k8s.io/client-go/kubernetes/fake"
    53  	"k8s.io/client-go/tools/record"
    54  	"k8s.io/client-go/util/flowcontrol"
    55  	featuregatetesting "k8s.io/component-base/featuregate/testing"
    56  	internalapi "k8s.io/cri-api/pkg/apis"
    57  	runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
    58  	"k8s.io/klog/v2/ktesting"
    59  	"k8s.io/kubernetes/pkg/features"
    60  	kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
    61  	cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
    62  	"k8s.io/kubernetes/pkg/kubelet/clustertrustbundle"
    63  	"k8s.io/kubernetes/pkg/kubelet/cm"
    64  	"k8s.io/kubernetes/pkg/kubelet/config"
    65  	"k8s.io/kubernetes/pkg/kubelet/configmap"
    66  	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
    67  	containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
    68  	"k8s.io/kubernetes/pkg/kubelet/cri/remote"
    69  	fakeremote "k8s.io/kubernetes/pkg/kubelet/cri/remote/fake"
    70  	"k8s.io/kubernetes/pkg/kubelet/eviction"
    71  	"k8s.io/kubernetes/pkg/kubelet/images"
    72  	"k8s.io/kubernetes/pkg/kubelet/lifecycle"
    73  	"k8s.io/kubernetes/pkg/kubelet/logs"
    74  	"k8s.io/kubernetes/pkg/kubelet/network/dns"
    75  	"k8s.io/kubernetes/pkg/kubelet/nodeshutdown"
    76  	"k8s.io/kubernetes/pkg/kubelet/pleg"
    77  	"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
    78  	kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
    79  	podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
    80  	proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
    81  	probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
    82  	"k8s.io/kubernetes/pkg/kubelet/secret"
    83  	"k8s.io/kubernetes/pkg/kubelet/server"
    84  	serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
    85  	"k8s.io/kubernetes/pkg/kubelet/stats"
    86  	"k8s.io/kubernetes/pkg/kubelet/status"
    87  	"k8s.io/kubernetes/pkg/kubelet/status/state"
    88  	statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
    89  	"k8s.io/kubernetes/pkg/kubelet/token"
    90  	kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
    91  	kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
    92  	"k8s.io/kubernetes/pkg/kubelet/util/queue"
    93  	kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
    94  	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
    95  	"k8s.io/kubernetes/pkg/util/oom"
    96  	"k8s.io/kubernetes/pkg/volume"
    97  	_ "k8s.io/kubernetes/pkg/volume/hostpath"
    98  	volumesecret "k8s.io/kubernetes/pkg/volume/secret"
    99  	volumetest "k8s.io/kubernetes/pkg/volume/testing"
   100  	"k8s.io/kubernetes/pkg/volume/util"
   101  	"k8s.io/kubernetes/pkg/volume/util/hostutil"
   102  	"k8s.io/kubernetes/pkg/volume/util/subpath"
   103  	"k8s.io/utils/clock"
   104  	testingclock "k8s.io/utils/clock/testing"
   105  	utilpointer "k8s.io/utils/pointer"
   106  )
   107  
   108  func init() {
   109  }
   110  
   111  const (
   112  	testKubeletHostname = "127.0.0.1"
   113  	testKubeletHostIP   = "127.0.0.1"
   114  	testKubeletHostIPv6 = "::1"
   115  
   116  	// TODO(harry) any global place for these two?
   117  	// Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
   118  	minImgSize int64 = 23 * 1024 * 1024
   119  	maxImgSize int64 = 1000 * 1024 * 1024
   120  )
   121  
   122  // fakeImageGCManager is a fake image gc manager for testing. It will return image
   123  // list from fake runtime directly instead of caching it.
   124  type fakeImageGCManager struct {
   125  	fakeImageService kubecontainer.ImageService
   126  	images.ImageGCManager
   127  }
   128  
   129  func (f *fakeImageGCManager) GetImageList() ([]kubecontainer.Image, error) {
   130  	return f.fakeImageService.ListImages(context.Background())
   131  }
   132  
   133  type TestKubelet struct {
   134  	kubelet              *Kubelet
   135  	fakeRuntime          *containertest.FakeRuntime
   136  	fakeContainerManager *cm.FakeContainerManager
   137  	fakeKubeClient       *fake.Clientset
   138  	fakeMirrorClient     *podtest.FakeMirrorClient
   139  	fakeClock            *testingclock.FakeClock
   140  	mounter              mount.Interface
   141  	volumePlugin         *volumetest.FakeVolumePlugin
   142  }
   143  
   144  func (tk *TestKubelet) Cleanup() {
   145  	if tk.kubelet != nil {
   146  		os.RemoveAll(tk.kubelet.rootDirectory)
   147  		tk.kubelet = nil
   148  	}
   149  }
   150  
   151  // newTestKubelet returns test kubelet with two images.
   152  func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubelet {
   153  	imageList := []kubecontainer.Image{
   154  		{
   155  			ID:       "abc",
   156  			RepoTags: []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
   157  			Size:     123,
   158  		},
   159  		{
   160  			ID:       "efg",
   161  			RepoTags: []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
   162  			Size:     456,
   163  		},
   164  	}
   165  	return newTestKubeletWithImageList(t, imageList, controllerAttachDetachEnabled, true /*initFakeVolumePlugin*/, true /*localStorageCapacityIsolation*/)
   166  }
   167  
   168  func newTestKubeletWithImageList(
   169  	t *testing.T,
   170  	imageList []kubecontainer.Image,
   171  	controllerAttachDetachEnabled bool,
   172  	initFakeVolumePlugin bool,
   173  	localStorageCapacityIsolation bool,
   174  ) *TestKubelet {
   175  	logger, _ := ktesting.NewTestContext(t)
   176  
   177  	fakeRuntime := &containertest.FakeRuntime{
   178  		ImageList: imageList,
   179  		// Set ready conditions by default.
   180  		RuntimeStatus: &kubecontainer.RuntimeStatus{
   181  			Conditions: []kubecontainer.RuntimeCondition{
   182  				{Type: "RuntimeReady", Status: true},
   183  				{Type: "NetworkReady", Status: true},
   184  			},
   185  		},
   186  		VersionInfo: "1.5.0",
   187  		RuntimeType: "test",
   188  		T:           t,
   189  	}
   190  
   191  	fakeRecorder := &record.FakeRecorder{}
   192  	fakeKubeClient := &fake.Clientset{}
   193  	kubelet := &Kubelet{}
   194  	kubelet.recorder = fakeRecorder
   195  	kubelet.kubeClient = fakeKubeClient
   196  	kubelet.heartbeatClient = fakeKubeClient
   197  	kubelet.os = &containertest.FakeOS{}
   198  	kubelet.mounter = mount.NewFakeMounter(nil)
   199  	kubelet.hostutil = hostutil.NewFakeHostUtil(nil)
   200  	kubelet.subpather = &subpath.FakeSubpath{}
   201  
   202  	kubelet.hostname = testKubeletHostname
   203  	kubelet.nodeName = types.NodeName(testKubeletHostname)
   204  	kubelet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
   205  	kubelet.runtimeState.setNetworkState(nil)
   206  	if tempDir, err := os.MkdirTemp("", "kubelet_test."); err != nil {
   207  		t.Fatalf("can't make a temp rootdir: %v", err)
   208  	} else {
   209  		kubelet.rootDirectory = tempDir
   210  	}
   211  	if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil {
   212  		t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
   213  	}
   214  	kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true })
   215  	kubelet.serviceLister = testServiceLister{}
   216  	kubelet.serviceHasSynced = func() bool { return true }
   217  	kubelet.nodeHasSynced = func() bool { return true }
   218  	kubelet.nodeLister = testNodeLister{
   219  		nodes: []*v1.Node{
   220  			{
   221  				ObjectMeta: metav1.ObjectMeta{
   222  					Name: string(kubelet.nodeName),
   223  				},
   224  				Status: v1.NodeStatus{
   225  					Conditions: []v1.NodeCondition{
   226  						{
   227  							Type:    v1.NodeReady,
   228  							Status:  v1.ConditionTrue,
   229  							Reason:  "Ready",
   230  							Message: "Node ready",
   231  						},
   232  					},
   233  					Addresses: []v1.NodeAddress{
   234  						{
   235  							Type:    v1.NodeInternalIP,
   236  							Address: testKubeletHostIP,
   237  						},
   238  						{
   239  							Type:    v1.NodeInternalIP,
   240  							Address: testKubeletHostIPv6,
   241  						},
   242  					},
   243  					VolumesAttached: []v1.AttachedVolume{
   244  						{
   245  							Name:       "fake/fake-device",
   246  							DevicePath: "fake/path",
   247  						},
   248  					},
   249  				},
   250  			},
   251  		},
   252  	}
   253  	kubelet.recorder = fakeRecorder
   254  	if err := kubelet.setupDataDirs(); err != nil {
   255  		t.Fatalf("can't initialize kubelet data dirs: %v", err)
   256  	}
   257  	kubelet.daemonEndpoints = &v1.NodeDaemonEndpoints{}
   258  
   259  	kubelet.cadvisor = &cadvisortest.Fake{}
   260  	machineInfo, _ := kubelet.cadvisor.MachineInfo()
   261  	kubelet.setCachedMachineInfo(machineInfo)
   262  	kubelet.tracer = oteltrace.NewNoopTracerProvider().Tracer("")
   263  
   264  	fakeMirrorClient := podtest.NewFakeMirrorClient()
   265  	secretManager := secret.NewSimpleSecretManager(kubelet.kubeClient)
   266  	kubelet.secretManager = secretManager
   267  	configMapManager := configmap.NewSimpleConfigMapManager(kubelet.kubeClient)
   268  	kubelet.configMapManager = configMapManager
   269  	kubelet.mirrorPodClient = fakeMirrorClient
   270  	kubelet.podManager = kubepod.NewBasicPodManager()
   271  	podStartupLatencyTracker := kubeletutil.NewPodStartupLatencyTracker()
   272  	kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{}, podStartupLatencyTracker, kubelet.getRootDir())
   273  	kubelet.nodeStartupLatencyTracker = kubeletutil.NewNodeStartupLatencyTracker()
   274  
   275  	kubelet.containerRuntime = fakeRuntime
   276  	kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime)
   277  	kubelet.reasonCache = NewReasonCache()
   278  	kubelet.podCache = containertest.NewFakeCache(kubelet.containerRuntime)
   279  	kubelet.podWorkers = &fakePodWorkers{
   280  		syncPodFn: kubelet.SyncPod,
   281  		cache:     kubelet.podCache,
   282  		t:         t,
   283  	}
   284  
   285  	kubelet.probeManager = probetest.FakeManager{}
   286  	kubelet.livenessManager = proberesults.NewManager()
   287  	kubelet.readinessManager = proberesults.NewManager()
   288  	kubelet.startupManager = proberesults.NewManager()
   289  
   290  	fakeContainerManager := cm.NewFakeContainerManager()
   291  	kubelet.containerManager = fakeContainerManager
   292  	fakeNodeRef := &v1.ObjectReference{
   293  		Kind:      "Node",
   294  		Name:      testKubeletHostname,
   295  		UID:       types.UID(testKubeletHostname),
   296  		Namespace: "",
   297  	}
   298  
   299  	volumeStatsAggPeriod := time.Second * 10
   300  	kubelet.resourceAnalyzer = serverstats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.recorder)
   301  
   302  	fakeHostStatsProvider := stats.NewFakeHostStatsProvider()
   303  
   304  	kubelet.StatsProvider = stats.NewCadvisorStatsProvider(
   305  		kubelet.cadvisor,
   306  		kubelet.resourceAnalyzer,
   307  		kubelet.podManager,
   308  		kubelet.runtimeCache,
   309  		fakeRuntime,
   310  		kubelet.statusManager,
   311  		fakeHostStatsProvider,
   312  	)
   313  	fakeImageGCPolicy := images.ImageGCPolicy{
   314  		HighThresholdPercent: 90,
   315  		LowThresholdPercent:  80,
   316  	}
   317  	imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, fakeRecorder, fakeNodeRef, fakeImageGCPolicy, oteltrace.NewNoopTracerProvider())
   318  	assert.NoError(t, err)
   319  	kubelet.imageManager = &fakeImageGCManager{
   320  		fakeImageService: fakeRuntime,
   321  		ImageGCManager:   imageGCManager,
   322  	}
   323  	kubelet.containerLogManager = logs.NewStubContainerLogManager()
   324  	containerGCPolicy := kubecontainer.GCPolicy{
   325  		MinAge:             time.Duration(0),
   326  		MaxPerPodContainer: 1,
   327  		MaxContainers:      -1,
   328  	}
   329  	containerGC, err := kubecontainer.NewContainerGC(fakeRuntime, containerGCPolicy, kubelet.sourcesReady)
   330  	assert.NoError(t, err)
   331  	kubelet.containerGC = containerGC
   332  
   333  	fakeClock := testingclock.NewFakeClock(time.Now())
   334  	kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
   335  	kubelet.backOff.Clock = fakeClock
   336  	kubelet.resyncInterval = 10 * time.Second
   337  	kubelet.workQueue = queue.NewBasicWorkQueue(fakeClock)
   338  	// Relist period does not affect the tests.
   339  	kubelet.pleg = pleg.NewGenericPLEG(fakeRuntime, make(chan *pleg.PodLifecycleEvent, 100), &pleg.RelistDuration{RelistPeriod: time.Hour, RelistThreshold: genericPlegRelistThreshold}, kubelet.podCache, clock.RealClock{})
   340  	kubelet.clock = fakeClock
   341  
   342  	nodeRef := &v1.ObjectReference{
   343  		Kind:      "Node",
   344  		Name:      string(kubelet.nodeName),
   345  		UID:       types.UID(kubelet.nodeName),
   346  		Namespace: "",
   347  	}
   348  	// setup eviction manager
   349  	evictionManager, evictionAdmitHandler := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{},
   350  		killPodNow(kubelet.podWorkers, fakeRecorder), kubelet.imageManager, kubelet.containerGC, fakeRecorder, nodeRef, kubelet.clock, kubelet.supportLocalStorageCapacityIsolation())
   351  
   352  	kubelet.evictionManager = evictionManager
   353  	kubelet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)
   354  
   355  	// setup shutdown manager
   356  	shutdownManager, shutdownAdmitHandler := nodeshutdown.NewManager(&nodeshutdown.Config{
   357  		Logger:                          logger,
   358  		ProbeManager:                    kubelet.probeManager,
   359  		Recorder:                        fakeRecorder,
   360  		NodeRef:                         nodeRef,
   361  		GetPodsFunc:                     kubelet.podManager.GetPods,
   362  		KillPodFunc:                     killPodNow(kubelet.podWorkers, fakeRecorder),
   363  		SyncNodeStatusFunc:              func() {},
   364  		ShutdownGracePeriodRequested:    0,
   365  		ShutdownGracePeriodCriticalPods: 0,
   366  	})
   367  	kubelet.shutdownManager = shutdownManager
   368  	kubelet.admitHandlers.AddPodAdmitHandler(shutdownAdmitHandler)
   369  
   370  	// Add this as cleanup predicate pod admitter
   371  	kubelet.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kubelet.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), kubelet.containerManager.UpdatePluginResources))
   372  
   373  	allPlugins := []volume.VolumePlugin{}
   374  	plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
   375  	if initFakeVolumePlugin {
   376  		allPlugins = append(allPlugins, plug)
   377  	} else {
   378  		allPlugins = append(allPlugins, volumesecret.ProbeVolumePlugins()...)
   379  	}
   380  
   381  	var prober volume.DynamicPluginProber // TODO (#51147) inject mock
   382  	kubelet.volumePluginMgr, err =
   383  		NewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient), &clustertrustbundle.NoopManager{}, allPlugins, prober)
   384  	require.NoError(t, err, "Failed to initialize VolumePluginMgr")
   385  
   386  	kubelet.volumeManager = kubeletvolume.NewVolumeManager(
   387  		controllerAttachDetachEnabled,
   388  		kubelet.nodeName,
   389  		kubelet.podManager,
   390  		kubelet.podWorkers,
   391  		fakeKubeClient,
   392  		kubelet.volumePluginMgr,
   393  		fakeRuntime,
   394  		kubelet.mounter,
   395  		kubelet.hostutil,
   396  		kubelet.getPodsDir(),
   397  		kubelet.recorder,
   398  		false, /* keepTerminatedPodVolumes */
   399  		volumetest.NewBlockVolumePathHandler())
   400  
   401  	kubelet.pluginManager = pluginmanager.NewPluginManager(
   402  		kubelet.getPluginsRegistrationDir(), /* sockDir */
   403  		kubelet.recorder,
   404  	)
   405  	kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
   406  
   407  	// enable active deadline handler
   408  	activeDeadlineHandler, err := newActiveDeadlineHandler(kubelet.statusManager, kubelet.recorder, kubelet.clock)
   409  	require.NoError(t, err, "Can't initialize active deadline handler")
   410  
   411  	kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
   412  	kubelet.AddPodSyncHandler(activeDeadlineHandler)
   413  	kubelet.kubeletConfiguration.LocalStorageCapacityIsolation = localStorageCapacityIsolation
   414  	return &TestKubelet{kubelet, fakeRuntime, fakeContainerManager, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
   415  }
   416  
   417  func newTestPods(count int) []*v1.Pod {
   418  	pods := make([]*v1.Pod, count)
   419  	for i := 0; i < count; i++ {
   420  		pods[i] = &v1.Pod{
   421  			Spec: v1.PodSpec{
   422  				HostNetwork: true,
   423  			},
   424  			ObjectMeta: metav1.ObjectMeta{
   425  				UID:  types.UID(strconv.Itoa(10000 + i)),
   426  				Name: fmt.Sprintf("pod%d", i),
   427  			},
   428  		}
   429  	}
   430  	return pods
   431  }
   432  
   433  func TestSyncLoopAbort(t *testing.T) {
   434  	ctx := context.Background()
   435  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   436  	defer testKubelet.Cleanup()
   437  	kubelet := testKubelet.kubelet
   438  	kubelet.runtimeState.setRuntimeSync(time.Now())
   439  	// The syncLoop waits on time.After(resyncInterval), set it really big so that we don't race for
   440  	// the channel close
   441  	kubelet.resyncInterval = time.Second * 30
   442  
   443  	ch := make(chan kubetypes.PodUpdate)
   444  	close(ch)
   445  
   446  	// sanity check (also prevent this test from hanging in the next step)
   447  	ok := kubelet.syncLoopIteration(ctx, ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1))
   448  	require.False(t, ok, "Expected syncLoopIteration to return !ok since update chan was closed")
   449  
   450  	// this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
   451  	kubelet.syncLoop(ctx, ch, kubelet)
   452  }
   453  
   454  func TestSyncPodsStartPod(t *testing.T) {
   455  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   456  	defer testKubelet.Cleanup()
   457  	kubelet := testKubelet.kubelet
   458  	fakeRuntime := testKubelet.fakeRuntime
   459  	pods := []*v1.Pod{
   460  		podWithUIDNameNsSpec("12345678", "foo", "new", v1.PodSpec{
   461  			Containers: []v1.Container{
   462  				{Name: "bar"},
   463  			},
   464  		}),
   465  	}
   466  	kubelet.podManager.SetPods(pods)
   467  	kubelet.HandlePodSyncs(pods)
   468  	fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)})
   469  }
   470  
   471  func TestHandlePodCleanupsPerQOS(t *testing.T) {
   472  	ctx := context.Background()
   473  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   474  	defer testKubelet.Cleanup()
   475  
   476  	pod := &kubecontainer.Pod{
   477  		ID:        "12345678",
   478  		Name:      "foo",
   479  		Namespace: "new",
   480  		Containers: []*kubecontainer.Container{
   481  			{Name: "bar"},
   482  		},
   483  	}
   484  
   485  	fakeRuntime := testKubelet.fakeRuntime
   486  	fakeContainerManager := testKubelet.fakeContainerManager
   487  	fakeContainerManager.PodContainerManager.AddPodFromCgroups(pod) // add pod to mock cgroup
   488  	fakeRuntime.PodList = []*containertest.FakePod{
   489  		{Pod: pod},
   490  	}
   491  	kubelet := testKubelet.kubelet
   492  	kubelet.cgroupsPerQOS = true // enable cgroupsPerQOS to turn on the cgroups cleanup
   493  
   494  	// HandlePodCleanups gets called every 2 seconds within the Kubelet's
   495  	// housekeeping routine. This test registers the pod, removes the unwanted pod, then calls into
   496  	// HandlePodCleanups a few more times. We should only see one Destroy() event. podKiller runs
   497  	// within a goroutine so a two second delay should be enough time to
   498  	// mark the pod as killed (within this test case).
   499  
   500  	kubelet.HandlePodCleanups(ctx)
   501  
   502  	// assert that unwanted pods were killed
   503  	if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
   504  		t.Fatalf("expected %v to be deleted, got %v", expected, actual)
   505  	}
   506  	fakeRuntime.AssertKilledPods([]string(nil))
   507  
   508  	// simulate Runtime.KillPod
   509  	fakeRuntime.PodList = nil
   510  
   511  	kubelet.HandlePodCleanups(ctx)
   512  	kubelet.HandlePodCleanups(ctx)
   513  	kubelet.HandlePodCleanups(ctx)
   514  
   515  	destroyCount := 0
   516  	err := wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) {
   517  		fakeContainerManager.PodContainerManager.Lock()
   518  		defer fakeContainerManager.PodContainerManager.Unlock()
   519  		destroyCount = 0
   520  		for _, functionName := range fakeContainerManager.PodContainerManager.CalledFunctions {
   521  			if functionName == "Destroy" {
   522  				destroyCount = destroyCount + 1
   523  			}
   524  		}
   525  		return destroyCount >= 1, nil
   526  	})
   527  
   528  	assert.NoError(t, err, "wait should not return error")
   529  	// housekeeping can get called multiple times. The cgroup Destroy() is
   530  	// done within a goroutine and can get called multiple times, so the
   531  	// Destroy() count in not deterministic on the actual number.
   532  	// https://github.com/kubernetes/kubernetes/blob/29fdbb065b5e0d195299eb2d260b975cbc554673/pkg/kubelet/kubelet_pods.go#L2006
   533  	assert.True(t, destroyCount >= 1, "Expect 1 or more destroys")
   534  }
   535  
   536  func TestDispatchWorkOfCompletedPod(t *testing.T) {
   537  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   538  	defer testKubelet.Cleanup()
   539  	kubelet := testKubelet.kubelet
   540  	var got bool
   541  	kubelet.podWorkers = &fakePodWorkers{
   542  		syncPodFn: func(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) {
   543  			got = true
   544  			return false, nil
   545  		},
   546  		cache: kubelet.podCache,
   547  		t:     t,
   548  	}
   549  	now := metav1.NewTime(time.Now())
   550  	pods := []*v1.Pod{
   551  		{
   552  			ObjectMeta: metav1.ObjectMeta{
   553  				UID:         "1",
   554  				Name:        "completed-pod1",
   555  				Namespace:   "ns",
   556  				Annotations: make(map[string]string),
   557  			},
   558  			Status: v1.PodStatus{
   559  				Phase: v1.PodFailed,
   560  				ContainerStatuses: []v1.ContainerStatus{
   561  					{
   562  						State: v1.ContainerState{
   563  							Terminated: &v1.ContainerStateTerminated{},
   564  						},
   565  					},
   566  				},
   567  			},
   568  		},
   569  		{
   570  			ObjectMeta: metav1.ObjectMeta{
   571  				UID:         "2",
   572  				Name:        "completed-pod2",
   573  				Namespace:   "ns",
   574  				Annotations: make(map[string]string),
   575  			},
   576  			Status: v1.PodStatus{
   577  				Phase: v1.PodSucceeded,
   578  				ContainerStatuses: []v1.ContainerStatus{
   579  					{
   580  						State: v1.ContainerState{
   581  							Terminated: &v1.ContainerStateTerminated{},
   582  						},
   583  					},
   584  				},
   585  			},
   586  		},
   587  		{
   588  			ObjectMeta: metav1.ObjectMeta{
   589  				UID:               "3",
   590  				Name:              "completed-pod3",
   591  				Namespace:         "ns",
   592  				Annotations:       make(map[string]string),
   593  				DeletionTimestamp: &now,
   594  			},
   595  			Status: v1.PodStatus{
   596  				ContainerStatuses: []v1.ContainerStatus{
   597  					{
   598  						State: v1.ContainerState{
   599  							Terminated: &v1.ContainerStateTerminated{},
   600  						},
   601  					},
   602  				},
   603  			},
   604  		},
   605  	}
   606  	for _, pod := range pods {
   607  		kubelet.podWorkers.UpdatePod(UpdatePodOptions{
   608  			Pod:        pod,
   609  			UpdateType: kubetypes.SyncPodSync,
   610  			StartTime:  time.Now(),
   611  		})
   612  		if !got {
   613  			t.Errorf("Should not skip completed pod %q", pod.Name)
   614  		}
   615  		got = false
   616  	}
   617  }
   618  
   619  func TestDispatchWorkOfActivePod(t *testing.T) {
   620  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   621  	defer testKubelet.Cleanup()
   622  	kubelet := testKubelet.kubelet
   623  	var got bool
   624  	kubelet.podWorkers = &fakePodWorkers{
   625  		syncPodFn: func(ctx context.Context, updateType kubetypes.SyncPodType, pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) {
   626  			got = true
   627  			return false, nil
   628  		},
   629  		cache: kubelet.podCache,
   630  		t:     t,
   631  	}
   632  	pods := []*v1.Pod{
   633  		{
   634  			ObjectMeta: metav1.ObjectMeta{
   635  				UID:         "1",
   636  				Name:        "active-pod1",
   637  				Namespace:   "ns",
   638  				Annotations: make(map[string]string),
   639  			},
   640  			Status: v1.PodStatus{
   641  				Phase: v1.PodRunning,
   642  			},
   643  		},
   644  		{
   645  			ObjectMeta: metav1.ObjectMeta{
   646  				UID:         "2",
   647  				Name:        "active-pod2",
   648  				Namespace:   "ns",
   649  				Annotations: make(map[string]string),
   650  			},
   651  			Status: v1.PodStatus{
   652  				Phase: v1.PodFailed,
   653  				ContainerStatuses: []v1.ContainerStatus{
   654  					{
   655  						State: v1.ContainerState{
   656  							Running: &v1.ContainerStateRunning{},
   657  						},
   658  					},
   659  				},
   660  			},
   661  		},
   662  	}
   663  
   664  	for _, pod := range pods {
   665  		kubelet.podWorkers.UpdatePod(UpdatePodOptions{
   666  			Pod:        pod,
   667  			UpdateType: kubetypes.SyncPodSync,
   668  			StartTime:  time.Now(),
   669  		})
   670  		if !got {
   671  			t.Errorf("Should not skip active pod %q", pod.Name)
   672  		}
   673  		got = false
   674  	}
   675  }
   676  
   677  func TestHandlePodCleanups(t *testing.T) {
   678  	ctx := context.Background()
   679  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   680  	defer testKubelet.Cleanup()
   681  
   682  	pod := &kubecontainer.Pod{
   683  		ID:        "12345678",
   684  		Name:      "foo",
   685  		Namespace: "new",
   686  		Containers: []*kubecontainer.Container{
   687  			{Name: "bar"},
   688  		},
   689  	}
   690  
   691  	fakeRuntime := testKubelet.fakeRuntime
   692  	fakeRuntime.PodList = []*containertest.FakePod{
   693  		{Pod: pod},
   694  	}
   695  	kubelet := testKubelet.kubelet
   696  
   697  	kubelet.HandlePodCleanups(ctx)
   698  
   699  	// assert that unwanted pods were queued to kill
   700  	if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) {
   701  		t.Fatalf("expected %v to be deleted, got %v", expected, actual)
   702  	}
   703  	fakeRuntime.AssertKilledPods([]string(nil))
   704  }
   705  
   706  func TestHandlePodRemovesWhenSourcesAreReady(t *testing.T) {
   707  	if testing.Short() {
   708  		t.Skip("skipping test in short mode.")
   709  	}
   710  
   711  	ready := false
   712  
   713  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   714  	defer testKubelet.Cleanup()
   715  
   716  	fakePod := &kubecontainer.Pod{
   717  		ID:        "1",
   718  		Name:      "foo",
   719  		Namespace: "new",
   720  		Containers: []*kubecontainer.Container{
   721  			{Name: "bar"},
   722  		},
   723  	}
   724  
   725  	pods := []*v1.Pod{
   726  		podWithUIDNameNs("1", "foo", "new"),
   727  	}
   728  
   729  	fakeRuntime := testKubelet.fakeRuntime
   730  	fakeRuntime.PodList = []*containertest.FakePod{
   731  		{Pod: fakePod},
   732  	}
   733  	kubelet := testKubelet.kubelet
   734  	kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready })
   735  
   736  	kubelet.HandlePodRemoves(pods)
   737  	time.Sleep(2 * time.Second)
   738  
   739  	// Sources are not ready yet. Don't remove any pods.
   740  	if expect, actual := []types.UID(nil), kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion; !reflect.DeepEqual(expect, actual) {
   741  		t.Fatalf("expected %v kills, got %v", expect, actual)
   742  	}
   743  
   744  	ready = true
   745  	kubelet.HandlePodRemoves(pods)
   746  	time.Sleep(2 * time.Second)
   747  
   748  	// Sources are ready. Remove unwanted pods.
   749  	if expect, actual := []types.UID{"1"}, kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion; !reflect.DeepEqual(expect, actual) {
   750  		t.Fatalf("expected %v kills, got %v", expect, actual)
   751  	}
   752  }
   753  
   754  type testNodeLister struct {
   755  	nodes []*v1.Node
   756  }
   757  
   758  func (nl testNodeLister) Get(name string) (*v1.Node, error) {
   759  	for _, node := range nl.nodes {
   760  		if node.Name == name {
   761  			return node, nil
   762  		}
   763  	}
   764  	return nil, fmt.Errorf("Node with name: %s does not exist", name)
   765  }
   766  
   767  func (nl testNodeLister) List(_ labels.Selector) (ret []*v1.Node, err error) {
   768  	return nl.nodes, nil
   769  }
   770  
   771  func checkPodStatus(t *testing.T, kl *Kubelet, pod *v1.Pod, phase v1.PodPhase) {
   772  	t.Helper()
   773  	status, found := kl.statusManager.GetPodStatus(pod.UID)
   774  	require.True(t, found, "Status of pod %q is not found in the status map", pod.UID)
   775  	require.Equal(t, phase, status.Phase)
   776  }
   777  
   778  // Tests that we handle port conflicts correctly by setting the failed status in status map.
   779  func TestHandlePortConflicts(t *testing.T) {
   780  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   781  	defer testKubelet.Cleanup()
   782  	kl := testKubelet.kubelet
   783  
   784  	kl.nodeLister = testNodeLister{nodes: []*v1.Node{
   785  		{
   786  			ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
   787  			Status: v1.NodeStatus{
   788  				Allocatable: v1.ResourceList{
   789  					v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
   790  				},
   791  			},
   792  		},
   793  	}}
   794  
   795  	recorder := record.NewFakeRecorder(20)
   796  	nodeRef := &v1.ObjectReference{
   797  		Kind:      "Node",
   798  		Name:      "testNode",
   799  		UID:       types.UID("testNode"),
   800  		Namespace: "",
   801  	}
   802  	testClusterDNSDomain := "TEST"
   803  	kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
   804  
   805  	spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}
   806  	pods := []*v1.Pod{
   807  		podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
   808  		podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
   809  	}
   810  	// Make sure the Pods are in the reverse order of creation time.
   811  	pods[1].CreationTimestamp = metav1.NewTime(time.Now())
   812  	pods[0].CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Second))
   813  	// The newer pod should be rejected.
   814  	notfittingPod := pods[0]
   815  	fittingPod := pods[1]
   816  	kl.podWorkers.(*fakePodWorkers).running = map[types.UID]bool{
   817  		pods[0].UID: true,
   818  		pods[1].UID: true,
   819  	}
   820  
   821  	kl.HandlePodAdditions(pods)
   822  
   823  	// Check pod status stored in the status map.
   824  	checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
   825  	checkPodStatus(t, kl, fittingPod, v1.PodPending)
   826  }
   827  
   828  // Tests that we handle host name conflicts correctly by setting the failed status in status map.
   829  func TestHandleHostNameConflicts(t *testing.T) {
   830  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   831  	defer testKubelet.Cleanup()
   832  	kl := testKubelet.kubelet
   833  
   834  	kl.nodeLister = testNodeLister{nodes: []*v1.Node{
   835  		{
   836  			ObjectMeta: metav1.ObjectMeta{Name: "127.0.0.1"},
   837  			Status: v1.NodeStatus{
   838  				Allocatable: v1.ResourceList{
   839  					v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
   840  				},
   841  			},
   842  		},
   843  	}}
   844  
   845  	recorder := record.NewFakeRecorder(20)
   846  	nodeRef := &v1.ObjectReference{
   847  		Kind:      "Node",
   848  		Name:      "testNode",
   849  		UID:       types.UID("testNode"),
   850  		Namespace: "",
   851  	}
   852  	testClusterDNSDomain := "TEST"
   853  	kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
   854  
   855  	// default NodeName in test is 127.0.0.1
   856  	pods := []*v1.Pod{
   857  		podWithUIDNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
   858  		podWithUIDNameNsSpec("987654321", "fittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.1"}),
   859  	}
   860  
   861  	notfittingPod := pods[0]
   862  	fittingPod := pods[1]
   863  
   864  	kl.HandlePodAdditions(pods)
   865  
   866  	// Check pod status stored in the status map.
   867  	checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
   868  	checkPodStatus(t, kl, fittingPod, v1.PodPending)
   869  }
   870  
   871  // Tests that we handle not matching labels selector correctly by setting the failed status in status map.
   872  func TestHandleNodeSelector(t *testing.T) {
   873  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   874  	defer testKubelet.Cleanup()
   875  	kl := testKubelet.kubelet
   876  	nodes := []*v1.Node{
   877  		{
   878  			ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
   879  			Status: v1.NodeStatus{
   880  				Allocatable: v1.ResourceList{
   881  					v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
   882  				},
   883  			},
   884  		},
   885  	}
   886  	kl.nodeLister = testNodeLister{nodes: nodes}
   887  
   888  	recorder := record.NewFakeRecorder(20)
   889  	nodeRef := &v1.ObjectReference{
   890  		Kind:      "Node",
   891  		Name:      "testNode",
   892  		UID:       types.UID("testNode"),
   893  		Namespace: "",
   894  	}
   895  	testClusterDNSDomain := "TEST"
   896  	kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
   897  
   898  	pods := []*v1.Pod{
   899  		podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
   900  		podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
   901  	}
   902  	// The first pod should be rejected.
   903  	notfittingPod := pods[0]
   904  	fittingPod := pods[1]
   905  
   906  	kl.HandlePodAdditions(pods)
   907  
   908  	// Check pod status stored in the status map.
   909  	checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
   910  	checkPodStatus(t, kl, fittingPod, v1.PodPending)
   911  }
   912  
   913  // Tests that we handle not matching labels selector correctly by setting the failed status in status map.
   914  func TestHandleNodeSelectorBasedOnOS(t *testing.T) {
   915  	tests := []struct {
   916  		name        string
   917  		nodeLabels  map[string]string
   918  		podSelector map[string]string
   919  		podStatus   v1.PodPhase
   920  	}{
   921  		{
   922  			name:        "correct OS label, wrong pod selector, admission denied",
   923  			nodeLabels:  map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH},
   924  			podSelector: map[string]string{v1.LabelOSStable: "dummyOS"},
   925  			podStatus:   v1.PodFailed,
   926  		},
   927  		{
   928  			name:        "correct OS label, correct pod selector, admission denied",
   929  			nodeLabels:  map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH},
   930  			podSelector: map[string]string{v1.LabelOSStable: goruntime.GOOS},
   931  			podStatus:   v1.PodPending,
   932  		},
   933  		{
   934  			// Expect no patching to happen, label B should be preserved and can be used for nodeAffinity.
   935  			name:        "new node label, correct pod selector, admitted",
   936  			nodeLabels:  map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH, "key": "B"},
   937  			podSelector: map[string]string{"key": "B"},
   938  			podStatus:   v1.PodPending,
   939  		},
   940  	}
   941  	for _, test := range tests {
   942  		t.Run(test.name, func(t *testing.T) {
   943  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   944  			defer testKubelet.Cleanup()
   945  			kl := testKubelet.kubelet
   946  			nodes := []*v1.Node{
   947  				{
   948  					ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: test.nodeLabels},
   949  					Status: v1.NodeStatus{
   950  						Allocatable: v1.ResourceList{
   951  							v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
   952  						},
   953  					},
   954  				},
   955  			}
   956  			kl.nodeLister = testNodeLister{nodes: nodes}
   957  
   958  			recorder := record.NewFakeRecorder(20)
   959  			nodeRef := &v1.ObjectReference{
   960  				Kind:      "Node",
   961  				Name:      "testNode",
   962  				UID:       types.UID("testNode"),
   963  				Namespace: "",
   964  			}
   965  			testClusterDNSDomain := "TEST"
   966  			kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
   967  
   968  			pod := podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: test.podSelector})
   969  
   970  			kl.HandlePodAdditions([]*v1.Pod{pod})
   971  
   972  			// Check pod status stored in the status map.
   973  			checkPodStatus(t, kl, pod, test.podStatus)
   974  		})
   975  	}
   976  }
   977  
   978  // Tests that we handle exceeded resources correctly by setting the failed status in status map.
   979  func TestHandleMemExceeded(t *testing.T) {
   980  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   981  	defer testKubelet.Cleanup()
   982  	kl := testKubelet.kubelet
   983  	nodes := []*v1.Node{
   984  		{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
   985  			Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
   986  				v1.ResourceCPU:    *resource.NewMilliQuantity(10, resource.DecimalSI),
   987  				v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
   988  				v1.ResourcePods:   *resource.NewQuantity(40, resource.DecimalSI),
   989  			}}},
   990  	}
   991  	kl.nodeLister = testNodeLister{nodes: nodes}
   992  
   993  	recorder := record.NewFakeRecorder(20)
   994  	nodeRef := &v1.ObjectReference{
   995  		Kind:      "Node",
   996  		Name:      "testNode",
   997  		UID:       types.UID("testNode"),
   998  		Namespace: "",
   999  	}
  1000  	testClusterDNSDomain := "TEST"
  1001  	kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  1002  
  1003  	spec := v1.PodSpec{NodeName: string(kl.nodeName),
  1004  		Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  1005  			Requests: v1.ResourceList{
  1006  				v1.ResourceMemory: resource.MustParse("90"),
  1007  			},
  1008  		}}},
  1009  	}
  1010  	pods := []*v1.Pod{
  1011  		podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
  1012  		podWithUIDNameNsSpec("987654321", "oldpod", "foo", spec),
  1013  	}
  1014  	// Make sure the Pods are in the reverse order of creation time.
  1015  	pods[1].CreationTimestamp = metav1.NewTime(time.Now())
  1016  	pods[0].CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Second))
  1017  	// The newer pod should be rejected.
  1018  	notfittingPod := pods[0]
  1019  	fittingPod := pods[1]
  1020  	kl.podWorkers.(*fakePodWorkers).running = map[types.UID]bool{
  1021  		pods[0].UID: true,
  1022  		pods[1].UID: true,
  1023  	}
  1024  
  1025  	kl.HandlePodAdditions(pods)
  1026  
  1027  	// Check pod status stored in the status map.
  1028  	checkPodStatus(t, kl, notfittingPod, v1.PodFailed)
  1029  	checkPodStatus(t, kl, fittingPod, v1.PodPending)
  1030  }
  1031  
  1032  // Tests that we handle result of interface UpdatePluginResources correctly
  1033  // by setting corresponding status in status map.
  1034  func TestHandlePluginResources(t *testing.T) {
  1035  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1036  	defer testKubelet.Cleanup()
  1037  	kl := testKubelet.kubelet
  1038  
  1039  	adjustedResource := v1.ResourceName("domain1.com/adjustedResource")
  1040  	emptyResource := v1.ResourceName("domain2.com/emptyResource")
  1041  	missingResource := v1.ResourceName("domain2.com/missingResource")
  1042  	failedResource := v1.ResourceName("domain2.com/failedResource")
  1043  	resourceQuantity0 := *resource.NewQuantity(int64(0), resource.DecimalSI)
  1044  	resourceQuantity1 := *resource.NewQuantity(int64(1), resource.DecimalSI)
  1045  	resourceQuantity2 := *resource.NewQuantity(int64(2), resource.DecimalSI)
  1046  	resourceQuantityInvalid := *resource.NewQuantity(int64(-1), resource.DecimalSI)
  1047  	allowedPodQuantity := *resource.NewQuantity(int64(10), resource.DecimalSI)
  1048  	nodes := []*v1.Node{
  1049  		{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  1050  			Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
  1051  				adjustedResource: resourceQuantity1,
  1052  				emptyResource:    resourceQuantity0,
  1053  				v1.ResourcePods:  allowedPodQuantity,
  1054  			}}},
  1055  	}
  1056  	kl.nodeLister = testNodeLister{nodes: nodes}
  1057  
  1058  	updatePluginResourcesFunc := func(node *schedulerframework.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
  1059  		// Maps from resourceName to the value we use to set node.allocatableResource[resourceName].
  1060  		// A resource with invalid value (< 0) causes the function to return an error
  1061  		// to emulate resource Allocation failure.
  1062  		// Resources not contained in this map will have their node.allocatableResource
  1063  		// quantity unchanged.
  1064  		updateResourceMap := map[v1.ResourceName]resource.Quantity{
  1065  			adjustedResource: resourceQuantity2,
  1066  			emptyResource:    resourceQuantity0,
  1067  			failedResource:   resourceQuantityInvalid,
  1068  		}
  1069  		pod := attrs.Pod
  1070  		newAllocatableResource := node.Allocatable.Clone()
  1071  		for _, container := range pod.Spec.Containers {
  1072  			for resource := range container.Resources.Requests {
  1073  				newQuantity, exist := updateResourceMap[resource]
  1074  				if !exist {
  1075  					continue
  1076  				}
  1077  				if newQuantity.Value() < 0 {
  1078  					return fmt.Errorf("Allocation failed")
  1079  				}
  1080  				newAllocatableResource.ScalarResources[resource] = newQuantity.Value()
  1081  			}
  1082  		}
  1083  		node.Allocatable = newAllocatableResource
  1084  		return nil
  1085  	}
  1086  
  1087  	// add updatePluginResourcesFunc to admission handler, to test it's behavior.
  1088  	kl.admitHandlers = lifecycle.PodAdmitHandlers{}
  1089  	kl.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kl.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), updatePluginResourcesFunc))
  1090  
  1091  	recorder := record.NewFakeRecorder(20)
  1092  	nodeRef := &v1.ObjectReference{
  1093  		Kind:      "Node",
  1094  		Name:      "testNode",
  1095  		UID:       types.UID("testNode"),
  1096  		Namespace: "",
  1097  	}
  1098  	testClusterDNSDomain := "TEST"
  1099  	kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
  1100  
  1101  	// pod requiring adjustedResource can be successfully allocated because updatePluginResourcesFunc
  1102  	// adjusts node.allocatableResource for this resource to a sufficient value.
  1103  	fittingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  1104  		Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  1105  			Limits: v1.ResourceList{
  1106  				adjustedResource: resourceQuantity2,
  1107  			},
  1108  			Requests: v1.ResourceList{
  1109  				adjustedResource: resourceQuantity2,
  1110  			},
  1111  		}}},
  1112  	}
  1113  	// pod requiring emptyResource (extended resources with 0 allocatable) will
  1114  	// not pass PredicateAdmit.
  1115  	emptyPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  1116  		Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  1117  			Limits: v1.ResourceList{
  1118  				emptyResource: resourceQuantity2,
  1119  			},
  1120  			Requests: v1.ResourceList{
  1121  				emptyResource: resourceQuantity2,
  1122  			},
  1123  		}}},
  1124  	}
  1125  	// pod requiring missingResource will pass PredicateAdmit.
  1126  	//
  1127  	// Extended resources missing in node status are ignored in PredicateAdmit.
  1128  	// This is required to support extended resources that are not managed by
  1129  	// device plugin, such as cluster-level resources.
  1130  	missingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  1131  		Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  1132  			Limits: v1.ResourceList{
  1133  				missingResource: resourceQuantity2,
  1134  			},
  1135  			Requests: v1.ResourceList{
  1136  				missingResource: resourceQuantity2,
  1137  			},
  1138  		}}},
  1139  	}
  1140  	// pod requiring failedResource will fail with the resource failed to be allocated.
  1141  	failedPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
  1142  		Containers: []v1.Container{{Resources: v1.ResourceRequirements{
  1143  			Limits: v1.ResourceList{
  1144  				failedResource: resourceQuantity1,
  1145  			},
  1146  			Requests: v1.ResourceList{
  1147  				failedResource: resourceQuantity1,
  1148  			},
  1149  		}}},
  1150  	}
  1151  
  1152  	fittingPod := podWithUIDNameNsSpec("1", "fittingpod", "foo", fittingPodSpec)
  1153  	emptyPod := podWithUIDNameNsSpec("2", "emptypod", "foo", emptyPodSpec)
  1154  	missingPod := podWithUIDNameNsSpec("3", "missingpod", "foo", missingPodSpec)
  1155  	failedPod := podWithUIDNameNsSpec("4", "failedpod", "foo", failedPodSpec)
  1156  
  1157  	kl.HandlePodAdditions([]*v1.Pod{fittingPod, emptyPod, missingPod, failedPod})
  1158  
  1159  	// Check pod status stored in the status map.
  1160  	checkPodStatus(t, kl, fittingPod, v1.PodPending)
  1161  	checkPodStatus(t, kl, emptyPod, v1.PodFailed)
  1162  	checkPodStatus(t, kl, missingPod, v1.PodPending)
  1163  	checkPodStatus(t, kl, failedPod, v1.PodFailed)
  1164  }
  1165  
  1166  // TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
  1167  func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
  1168  	ctx := context.Background()
  1169  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1170  	defer testKubelet.Cleanup()
  1171  
  1172  	kl := testKubelet.kubelet
  1173  	pods := []*v1.Pod{
  1174  		{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
  1175  		{ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: v1.PodSpec{Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}},
  1176  	}
  1177  	podToTest := pods[1]
  1178  	// Run once to populate the status map.
  1179  	kl.HandlePodAdditions(pods)
  1180  	if _, found := kl.statusManager.GetPodStatus(podToTest.UID); !found {
  1181  		t.Fatalf("expected to have status cached for pod2")
  1182  	}
  1183  	// Sync with empty pods so that the entry in status map will be removed.
  1184  	kl.podManager.SetPods([]*v1.Pod{})
  1185  	kl.HandlePodCleanups(ctx)
  1186  	if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
  1187  		t.Fatalf("expected to not have status cached for pod2")
  1188  	}
  1189  }
  1190  
  1191  func TestValidateContainerLogStatus(t *testing.T) {
  1192  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1193  	defer testKubelet.Cleanup()
  1194  	kubelet := testKubelet.kubelet
  1195  	containerName := "x"
  1196  	testCases := []struct {
  1197  		statuses []v1.ContainerStatus
  1198  		success  bool // whether getting logs for the container should succeed.
  1199  		pSuccess bool // whether getting logs for the previous container should succeed.
  1200  	}{
  1201  		{
  1202  			statuses: []v1.ContainerStatus{
  1203  				{
  1204  					Name: containerName,
  1205  					State: v1.ContainerState{
  1206  						Running: &v1.ContainerStateRunning{},
  1207  					},
  1208  					LastTerminationState: v1.ContainerState{
  1209  						Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
  1210  					},
  1211  				},
  1212  			},
  1213  			success:  true,
  1214  			pSuccess: true,
  1215  		},
  1216  		{
  1217  			statuses: []v1.ContainerStatus{
  1218  				{
  1219  					Name: containerName,
  1220  					State: v1.ContainerState{
  1221  						Running: &v1.ContainerStateRunning{},
  1222  					},
  1223  				},
  1224  			},
  1225  			success:  true,
  1226  			pSuccess: false,
  1227  		},
  1228  		{
  1229  			statuses: []v1.ContainerStatus{
  1230  				{
  1231  					Name: containerName,
  1232  					State: v1.ContainerState{
  1233  						Terminated: &v1.ContainerStateTerminated{},
  1234  					},
  1235  				},
  1236  			},
  1237  			success:  false,
  1238  			pSuccess: false,
  1239  		},
  1240  		{
  1241  			statuses: []v1.ContainerStatus{
  1242  				{
  1243  					Name: containerName,
  1244  					State: v1.ContainerState{
  1245  						Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
  1246  					},
  1247  				},
  1248  			},
  1249  			success:  true,
  1250  			pSuccess: false,
  1251  		},
  1252  		{
  1253  			statuses: []v1.ContainerStatus{
  1254  				{
  1255  					Name: containerName,
  1256  					State: v1.ContainerState{
  1257  						Terminated: &v1.ContainerStateTerminated{},
  1258  					},
  1259  					LastTerminationState: v1.ContainerState{
  1260  						Terminated: &v1.ContainerStateTerminated{},
  1261  					},
  1262  				},
  1263  			},
  1264  			success:  false,
  1265  			pSuccess: false,
  1266  		},
  1267  		{
  1268  			statuses: []v1.ContainerStatus{
  1269  				{
  1270  					Name: containerName,
  1271  					State: v1.ContainerState{
  1272  						Terminated: &v1.ContainerStateTerminated{},
  1273  					},
  1274  					LastTerminationState: v1.ContainerState{
  1275  						Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
  1276  					},
  1277  				},
  1278  			},
  1279  			success:  true,
  1280  			pSuccess: true,
  1281  		},
  1282  		{
  1283  			statuses: []v1.ContainerStatus{
  1284  				{
  1285  					Name: containerName,
  1286  					State: v1.ContainerState{
  1287  						Waiting: &v1.ContainerStateWaiting{},
  1288  					},
  1289  				},
  1290  			},
  1291  			success:  false,
  1292  			pSuccess: false,
  1293  		},
  1294  		{
  1295  			statuses: []v1.ContainerStatus{
  1296  				{
  1297  					Name:  containerName,
  1298  					State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePull"}},
  1299  				},
  1300  			},
  1301  			success:  false,
  1302  			pSuccess: false,
  1303  		},
  1304  		{
  1305  			statuses: []v1.ContainerStatus{
  1306  				{
  1307  					Name:  containerName,
  1308  					State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}},
  1309  				},
  1310  			},
  1311  			success:  false,
  1312  			pSuccess: false,
  1313  		},
  1314  	}
  1315  
  1316  	for i, tc := range testCases {
  1317  		// Access the log of the most recent container
  1318  		previous := false
  1319  		podStatus := &v1.PodStatus{ContainerStatuses: tc.statuses}
  1320  		_, err := kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
  1321  		if !tc.success {
  1322  			assert.Error(t, err, fmt.Sprintf("[case %d] error", i))
  1323  		} else {
  1324  			assert.NoError(t, err, "[case %d] error", i)
  1325  		}
  1326  		// Access the log of the previous, terminated container
  1327  		previous = true
  1328  		_, err = kubelet.validateContainerLogStatus("podName", podStatus, containerName, previous)
  1329  		if !tc.pSuccess {
  1330  			assert.Error(t, err, fmt.Sprintf("[case %d] error", i))
  1331  		} else {
  1332  			assert.NoError(t, err, "[case %d] error", i)
  1333  		}
  1334  		// Access the log of a container that's not in the pod
  1335  		_, err = kubelet.validateContainerLogStatus("podName", podStatus, "blah", false)
  1336  		assert.Error(t, err, fmt.Sprintf("[case %d] invalid container name should cause an error", i))
  1337  	}
  1338  }
  1339  
  1340  func TestCreateMirrorPod(t *testing.T) {
  1341  	tests := []struct {
  1342  		name       string
  1343  		updateType kubetypes.SyncPodType
  1344  	}{
  1345  		{
  1346  			name:       "SyncPodCreate",
  1347  			updateType: kubetypes.SyncPodCreate,
  1348  		},
  1349  		{
  1350  			name:       "SyncPodUpdate",
  1351  			updateType: kubetypes.SyncPodUpdate,
  1352  		},
  1353  	}
  1354  	for _, tt := range tests {
  1355  		tt := tt
  1356  		t.Run(tt.name, func(t *testing.T) {
  1357  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1358  			defer testKubelet.Cleanup()
  1359  
  1360  			kl := testKubelet.kubelet
  1361  			manager := testKubelet.fakeMirrorClient
  1362  			pod := podWithUIDNameNs("12345678", "bar", "foo")
  1363  			pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
  1364  			pods := []*v1.Pod{pod}
  1365  			kl.podManager.SetPods(pods)
  1366  			isTerminal, err := kl.SyncPod(context.Background(), tt.updateType, pod, nil, &kubecontainer.PodStatus{})
  1367  			assert.NoError(t, err)
  1368  			if isTerminal {
  1369  				t.Fatalf("pod should not be terminal: %#v", pod)
  1370  			}
  1371  			podFullName := kubecontainer.GetPodFullName(pod)
  1372  			assert.True(t, manager.HasPod(podFullName), "Expected mirror pod %q to be created", podFullName)
  1373  			assert.Equal(t, 1, manager.NumOfPods(), "Expected only 1 mirror pod %q, got %+v", podFullName, manager.GetPods())
  1374  		})
  1375  	}
  1376  }
  1377  
  1378  func TestDeleteOutdatedMirrorPod(t *testing.T) {
  1379  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1380  	defer testKubelet.Cleanup()
  1381  
  1382  	kl := testKubelet.kubelet
  1383  	manager := testKubelet.fakeMirrorClient
  1384  	pod := podWithUIDNameNsSpec("12345678", "foo", "ns", v1.PodSpec{
  1385  		Containers: []v1.Container{
  1386  			{Name: "1234", Image: "foo"},
  1387  		},
  1388  	})
  1389  	pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
  1390  
  1391  	// Mirror pod has an outdated spec.
  1392  	mirrorPod := podWithUIDNameNsSpec("11111111", "foo", "ns", v1.PodSpec{
  1393  		Containers: []v1.Container{
  1394  			{Name: "1234", Image: "bar"},
  1395  		},
  1396  	})
  1397  	mirrorPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "api"
  1398  	mirrorPod.Annotations[kubetypes.ConfigMirrorAnnotationKey] = "mirror"
  1399  
  1400  	pods := []*v1.Pod{pod, mirrorPod}
  1401  	kl.podManager.SetPods(pods)
  1402  	isTerminal, err := kl.SyncPod(context.Background(), kubetypes.SyncPodUpdate, pod, mirrorPod, &kubecontainer.PodStatus{})
  1403  	assert.NoError(t, err)
  1404  	if isTerminal {
  1405  		t.Fatalf("pod should not be terminal: %#v", pod)
  1406  	}
  1407  	name := kubecontainer.GetPodFullName(pod)
  1408  	creates, deletes := manager.GetCounts(name)
  1409  	if creates != 1 || deletes != 1 {
  1410  		t.Errorf("expected 1 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
  1411  	}
  1412  }
  1413  
  1414  func TestDeleteOrphanedMirrorPods(t *testing.T) {
  1415  	ctx := context.Background()
  1416  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1417  	defer testKubelet.Cleanup()
  1418  
  1419  	kl := testKubelet.kubelet
  1420  	manager := testKubelet.fakeMirrorClient
  1421  	orphanPods := []*v1.Pod{
  1422  		{
  1423  			ObjectMeta: metav1.ObjectMeta{
  1424  				UID:       "12345678",
  1425  				Name:      "pod1",
  1426  				Namespace: "ns",
  1427  				Annotations: map[string]string{
  1428  					kubetypes.ConfigSourceAnnotationKey: "api",
  1429  					kubetypes.ConfigMirrorAnnotationKey: "mirror",
  1430  				},
  1431  			},
  1432  		},
  1433  		{
  1434  			ObjectMeta: metav1.ObjectMeta{
  1435  				UID:       "12345679",
  1436  				Name:      "pod2",
  1437  				Namespace: "ns",
  1438  				Annotations: map[string]string{
  1439  					kubetypes.ConfigSourceAnnotationKey: "api",
  1440  					kubetypes.ConfigMirrorAnnotationKey: "mirror",
  1441  				},
  1442  			},
  1443  		},
  1444  		{
  1445  			ObjectMeta: metav1.ObjectMeta{
  1446  				UID:       "12345670",
  1447  				Name:      "pod3",
  1448  				Namespace: "ns",
  1449  				Annotations: map[string]string{
  1450  					kubetypes.ConfigSourceAnnotationKey: "api",
  1451  					kubetypes.ConfigMirrorAnnotationKey: "mirror",
  1452  				},
  1453  			},
  1454  		},
  1455  	}
  1456  
  1457  	kl.podManager.SetPods(orphanPods)
  1458  
  1459  	// a static pod that is terminating will not be deleted
  1460  	kl.podWorkers.(*fakePodWorkers).terminatingStaticPods = map[string]bool{
  1461  		kubecontainer.GetPodFullName(orphanPods[2]): true,
  1462  	}
  1463  
  1464  	// Sync with an empty pod list to delete all mirror pods.
  1465  	kl.HandlePodCleanups(ctx)
  1466  	assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods")
  1467  	for i, pod := range orphanPods {
  1468  		name := kubecontainer.GetPodFullName(pod)
  1469  		creates, deletes := manager.GetCounts(name)
  1470  		switch i {
  1471  		case 2:
  1472  			if creates != 0 || deletes != 0 {
  1473  				t.Errorf("expected 0 creation and 0 deletion of %q, got %d, %d", name, creates, deletes)
  1474  			}
  1475  		default:
  1476  			if creates != 0 || deletes != 1 {
  1477  				t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
  1478  			}
  1479  		}
  1480  	}
  1481  }
  1482  
  1483  func TestGetContainerInfoForMirrorPods(t *testing.T) {
  1484  	ctx := context.Background()
  1485  	// pods contain one static and one mirror pod with the same name but
  1486  	// different UIDs.
  1487  	pods := []*v1.Pod{
  1488  		{
  1489  			ObjectMeta: metav1.ObjectMeta{
  1490  				UID:       "1234",
  1491  				Name:      "qux",
  1492  				Namespace: "ns",
  1493  				Annotations: map[string]string{
  1494  					kubetypes.ConfigSourceAnnotationKey: "file",
  1495  				},
  1496  			},
  1497  			Spec: v1.PodSpec{
  1498  				Containers: []v1.Container{
  1499  					{Name: "foo"},
  1500  				},
  1501  			},
  1502  		},
  1503  		{
  1504  			ObjectMeta: metav1.ObjectMeta{
  1505  				UID:       "5678",
  1506  				Name:      "qux",
  1507  				Namespace: "ns",
  1508  				Annotations: map[string]string{
  1509  					kubetypes.ConfigSourceAnnotationKey: "api",
  1510  					kubetypes.ConfigMirrorAnnotationKey: "mirror",
  1511  				},
  1512  			},
  1513  			Spec: v1.PodSpec{
  1514  				Containers: []v1.Container{
  1515  					{Name: "foo"},
  1516  				},
  1517  			},
  1518  		},
  1519  	}
  1520  
  1521  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1522  	defer testKubelet.Cleanup()
  1523  	fakeRuntime := testKubelet.fakeRuntime
  1524  	cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
  1525  	kubelet := testKubelet.kubelet
  1526  
  1527  	fakeRuntime.PodList = []*containertest.FakePod{
  1528  		{Pod: &kubecontainer.Pod{
  1529  			ID:        "1234",
  1530  			Name:      "qux",
  1531  			Namespace: "ns",
  1532  			Containers: []*kubecontainer.Container{
  1533  				{
  1534  					Name: "foo",
  1535  					ID:   kubecontainer.ContainerID{Type: "test", ID: "ab2cdf"},
  1536  				},
  1537  			},
  1538  		}},
  1539  	}
  1540  
  1541  	kubelet.podManager.SetPods(pods)
  1542  	// Use the mirror pod UID to retrieve the stats.
  1543  	stats, err := kubelet.GetContainerInfo(ctx, "qux_ns", "5678", "foo", cadvisorReq)
  1544  	assert.NoError(t, err)
  1545  	require.NotNil(t, stats)
  1546  }
  1547  
  1548  func TestNetworkErrorsWithoutHostNetwork(t *testing.T) {
  1549  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1550  	defer testKubelet.Cleanup()
  1551  	kubelet := testKubelet.kubelet
  1552  
  1553  	kubelet.runtimeState.setNetworkState(fmt.Errorf("simulated network error"))
  1554  
  1555  	pod := podWithUIDNameNsSpec("12345678", "hostnetwork", "new", v1.PodSpec{
  1556  		HostNetwork: false,
  1557  
  1558  		Containers: []v1.Container{
  1559  			{Name: "foo"},
  1560  		},
  1561  	})
  1562  
  1563  	kubelet.podManager.SetPods([]*v1.Pod{pod})
  1564  	isTerminal, err := kubelet.SyncPod(context.Background(), kubetypes.SyncPodUpdate, pod, nil, &kubecontainer.PodStatus{})
  1565  	assert.Error(t, err, "expected pod with hostNetwork=false to fail when network in error")
  1566  	if isTerminal {
  1567  		t.Fatalf("pod should not be terminal: %#v", pod)
  1568  	}
  1569  
  1570  	pod.Annotations[kubetypes.ConfigSourceAnnotationKey] = kubetypes.FileSource
  1571  	pod.Spec.HostNetwork = true
  1572  	isTerminal, err = kubelet.SyncPod(context.Background(), kubetypes.SyncPodUpdate, pod, nil, &kubecontainer.PodStatus{})
  1573  	assert.NoError(t, err, "expected pod with hostNetwork=true to succeed when network in error")
  1574  	if isTerminal {
  1575  		t.Fatalf("pod should not be terminal: %#v", pod)
  1576  	}
  1577  }
  1578  
  1579  func TestFilterOutInactivePods(t *testing.T) {
  1580  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1581  	defer testKubelet.Cleanup()
  1582  	kubelet := testKubelet.kubelet
  1583  	pods := newTestPods(8)
  1584  	now := metav1.NewTime(time.Now())
  1585  
  1586  	// terminal pods are excluded
  1587  	pods[0].Status.Phase = v1.PodFailed
  1588  	pods[1].Status.Phase = v1.PodSucceeded
  1589  
  1590  	// deleted pod is included unless it's known to be terminated
  1591  	pods[2].Status.Phase = v1.PodRunning
  1592  	pods[2].DeletionTimestamp = &now
  1593  	pods[2].Status.ContainerStatuses = []v1.ContainerStatus{
  1594  		{State: v1.ContainerState{
  1595  			Running: &v1.ContainerStateRunning{
  1596  				StartedAt: now,
  1597  			},
  1598  		}},
  1599  	}
  1600  
  1601  	// pending and running pods are included
  1602  	pods[3].Status.Phase = v1.PodPending
  1603  	pods[4].Status.Phase = v1.PodRunning
  1604  
  1605  	// pod that is running but has been rejected by admission is excluded
  1606  	pods[5].Status.Phase = v1.PodRunning
  1607  	kubelet.statusManager.SetPodStatus(pods[5], v1.PodStatus{Phase: v1.PodFailed})
  1608  
  1609  	// pod that is running according to the api but is known terminated is excluded
  1610  	pods[6].Status.Phase = v1.PodRunning
  1611  	kubelet.podWorkers.(*fakePodWorkers).terminated = map[types.UID]bool{
  1612  		pods[6].UID: true,
  1613  	}
  1614  
  1615  	// pod that is failed but still terminating is included (it may still be consuming
  1616  	// resources)
  1617  	pods[7].Status.Phase = v1.PodFailed
  1618  	kubelet.podWorkers.(*fakePodWorkers).terminationRequested = map[types.UID]bool{
  1619  		pods[7].UID: true,
  1620  	}
  1621  
  1622  	expected := []*v1.Pod{pods[2], pods[3], pods[4], pods[7]}
  1623  	kubelet.podManager.SetPods(pods)
  1624  	actual := kubelet.filterOutInactivePods(pods)
  1625  	assert.Equal(t, expected, actual)
  1626  }
  1627  
  1628  func TestCheckpointContainer(t *testing.T) {
  1629  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1630  	defer testKubelet.Cleanup()
  1631  	kubelet := testKubelet.kubelet
  1632  
  1633  	fakeRuntime := testKubelet.fakeRuntime
  1634  	containerID := kubecontainer.ContainerID{
  1635  		Type: "test",
  1636  		ID:   "abc1234",
  1637  	}
  1638  
  1639  	fakePod := &containertest.FakePod{
  1640  		Pod: &kubecontainer.Pod{
  1641  			ID:        "12345678",
  1642  			Name:      "podFoo",
  1643  			Namespace: "nsFoo",
  1644  			Containers: []*kubecontainer.Container{
  1645  				{
  1646  					Name: "containerFoo",
  1647  					ID:   containerID,
  1648  				},
  1649  			},
  1650  		},
  1651  	}
  1652  
  1653  	fakeRuntime.PodList = []*containertest.FakePod{fakePod}
  1654  	wrongContainerName := "wrongContainerName"
  1655  
  1656  	tests := []struct {
  1657  		name               string
  1658  		containerName      string
  1659  		checkpointLocation string
  1660  		expectedStatus     error
  1661  		expectedLocation   string
  1662  	}{
  1663  		{
  1664  			name:               "Checkpoint with wrong container name",
  1665  			containerName:      wrongContainerName,
  1666  			checkpointLocation: "",
  1667  			expectedStatus:     fmt.Errorf("container %s not found", wrongContainerName),
  1668  			expectedLocation:   "",
  1669  		},
  1670  		{
  1671  			name:               "Checkpoint with default checkpoint location",
  1672  			containerName:      fakePod.Pod.Containers[0].Name,
  1673  			checkpointLocation: "",
  1674  			expectedStatus:     nil,
  1675  			expectedLocation: filepath.Join(
  1676  				kubelet.getCheckpointsDir(),
  1677  				fmt.Sprintf(
  1678  					"checkpoint-%s_%s-%s",
  1679  					fakePod.Pod.Name,
  1680  					fakePod.Pod.Namespace,
  1681  					fakePod.Pod.Containers[0].Name,
  1682  				),
  1683  			),
  1684  		},
  1685  		{
  1686  			name:               "Checkpoint with ignored location",
  1687  			containerName:      fakePod.Pod.Containers[0].Name,
  1688  			checkpointLocation: "somethingThatWillBeIgnored",
  1689  			expectedStatus:     nil,
  1690  			expectedLocation: filepath.Join(
  1691  				kubelet.getCheckpointsDir(),
  1692  				fmt.Sprintf(
  1693  					"checkpoint-%s_%s-%s",
  1694  					fakePod.Pod.Name,
  1695  					fakePod.Pod.Namespace,
  1696  					fakePod.Pod.Containers[0].Name,
  1697  				),
  1698  			),
  1699  		},
  1700  	}
  1701  
  1702  	for _, test := range tests {
  1703  		t.Run(test.name, func(t *testing.T) {
  1704  			ctx := context.Background()
  1705  			options := &runtimeapi.CheckpointContainerRequest{}
  1706  			if test.checkpointLocation != "" {
  1707  				options.Location = test.checkpointLocation
  1708  			}
  1709  			status := kubelet.CheckpointContainer(
  1710  				ctx,
  1711  				fakePod.Pod.ID,
  1712  				fmt.Sprintf(
  1713  					"%s_%s",
  1714  					fakePod.Pod.Name,
  1715  					fakePod.Pod.Namespace,
  1716  				),
  1717  				test.containerName,
  1718  				options,
  1719  			)
  1720  			require.Equal(t, status, test.expectedStatus)
  1721  
  1722  			if status != nil {
  1723  				return
  1724  			}
  1725  
  1726  			require.True(
  1727  				t,
  1728  				strings.HasPrefix(
  1729  					options.Location,
  1730  					test.expectedLocation,
  1731  				),
  1732  			)
  1733  			require.Equal(
  1734  				t,
  1735  				options.ContainerId,
  1736  				containerID.ID,
  1737  			)
  1738  
  1739  		})
  1740  	}
  1741  }
  1742  
  1743  func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
  1744  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1745  	defer testKubelet.Cleanup()
  1746  	fakeRuntime := testKubelet.fakeRuntime
  1747  	kubelet := testKubelet.kubelet
  1748  
  1749  	now := metav1.Now()
  1750  	startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
  1751  	exceededActiveDeadlineSeconds := int64(30)
  1752  
  1753  	pods := []*v1.Pod{
  1754  		{
  1755  			ObjectMeta: metav1.ObjectMeta{
  1756  				UID:       "12345678",
  1757  				Name:      "bar",
  1758  				Namespace: "new",
  1759  			},
  1760  			Spec: v1.PodSpec{
  1761  				Containers: []v1.Container{
  1762  					{Name: "foo"},
  1763  				},
  1764  				ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
  1765  			},
  1766  			Status: v1.PodStatus{
  1767  				StartTime: &startTime,
  1768  			},
  1769  		},
  1770  	}
  1771  
  1772  	fakeRuntime.PodList = []*containertest.FakePod{
  1773  		{Pod: &kubecontainer.Pod{
  1774  			ID:        "12345678",
  1775  			Name:      "bar",
  1776  			Namespace: "new",
  1777  			Containers: []*kubecontainer.Container{
  1778  				{Name: "foo"},
  1779  			},
  1780  		}},
  1781  	}
  1782  
  1783  	// Let the pod worker sets the status to fail after this sync.
  1784  	kubelet.HandlePodUpdates(pods)
  1785  	status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
  1786  	assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
  1787  	assert.Equal(t, v1.PodFailed, status.Phase)
  1788  	// check pod status contains ContainerStatuses, etc.
  1789  	assert.NotNil(t, status.ContainerStatuses)
  1790  }
  1791  
  1792  func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
  1793  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1794  	defer testKubelet.Cleanup()
  1795  	fakeRuntime := testKubelet.fakeRuntime
  1796  
  1797  	kubelet := testKubelet.kubelet
  1798  
  1799  	now := metav1.Now()
  1800  	startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute))
  1801  	exceededActiveDeadlineSeconds := int64(300)
  1802  
  1803  	pods := []*v1.Pod{
  1804  		{
  1805  			ObjectMeta: metav1.ObjectMeta{
  1806  				UID:       "12345678",
  1807  				Name:      "bar",
  1808  				Namespace: "new",
  1809  			},
  1810  			Spec: v1.PodSpec{
  1811  				Containers: []v1.Container{
  1812  					{Name: "foo"},
  1813  				},
  1814  				ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
  1815  			},
  1816  			Status: v1.PodStatus{
  1817  				StartTime: &startTime,
  1818  			},
  1819  		},
  1820  	}
  1821  
  1822  	fakeRuntime.PodList = []*containertest.FakePod{
  1823  		{Pod: &kubecontainer.Pod{
  1824  			ID:        "12345678",
  1825  			Name:      "bar",
  1826  			Namespace: "new",
  1827  			Containers: []*kubecontainer.Container{
  1828  				{Name: "foo"},
  1829  			},
  1830  		}},
  1831  	}
  1832  
  1833  	kubelet.podManager.SetPods(pods)
  1834  	kubelet.HandlePodUpdates(pods)
  1835  	status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
  1836  	assert.True(t, found, "expected to found status for pod %q", pods[0].UID)
  1837  	assert.NotEqual(t, v1.PodFailed, status.Phase)
  1838  }
  1839  
  1840  func podWithUIDNameNs(uid types.UID, name, namespace string) *v1.Pod {
  1841  	return &v1.Pod{
  1842  		ObjectMeta: metav1.ObjectMeta{
  1843  			UID:         uid,
  1844  			Name:        name,
  1845  			Namespace:   namespace,
  1846  			Annotations: map[string]string{},
  1847  		},
  1848  	}
  1849  }
  1850  
  1851  func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec) *v1.Pod {
  1852  	pod := podWithUIDNameNs(uid, name, namespace)
  1853  	pod.Spec = spec
  1854  	return pod
  1855  }
  1856  
  1857  func TestDeletePodDirsForDeletedPods(t *testing.T) {
  1858  	ctx := context.Background()
  1859  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1860  	defer testKubelet.Cleanup()
  1861  	kl := testKubelet.kubelet
  1862  	pods := []*v1.Pod{
  1863  		podWithUIDNameNs("12345678", "pod1", "ns"),
  1864  		podWithUIDNameNs("12345679", "pod2", "ns"),
  1865  	}
  1866  
  1867  	kl.podManager.SetPods(pods)
  1868  	// Sync to create pod directories.
  1869  	kl.HandlePodSyncs(kl.podManager.GetPods())
  1870  	for i := range pods {
  1871  		assert.True(t, dirExists(kl.getPodDir(pods[i].UID)), "Expected directory to exist for pod %d", i)
  1872  	}
  1873  
  1874  	// Pod 1 has been deleted and no longer exists.
  1875  	kl.podManager.SetPods([]*v1.Pod{pods[0]})
  1876  	kl.HandlePodCleanups(ctx)
  1877  	assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0")
  1878  	assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1")
  1879  }
  1880  
  1881  func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) {
  1882  	ctx := context.Background()
  1883  	t.Helper()
  1884  	kl := testKubelet.kubelet
  1885  
  1886  	kl.podManager.SetPods(pods)
  1887  	kl.HandlePodSyncs(pods)
  1888  	kl.HandlePodCleanups(ctx)
  1889  	for i, pod := range podsToCheck {
  1890  		exist := dirExists(kl.getPodDir(pod.UID))
  1891  		assert.Equal(t, shouldExist, exist, "directory of pod %d", i)
  1892  	}
  1893  }
  1894  
  1895  func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
  1896  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1897  	defer testKubelet.Cleanup()
  1898  	kl := testKubelet.kubelet
  1899  	pods := []*v1.Pod{
  1900  		podWithUIDNameNs("12345678", "pod1", "ns"),
  1901  		podWithUIDNameNs("12345679", "pod2", "ns"),
  1902  		podWithUIDNameNs("12345680", "pod3", "ns"),
  1903  	}
  1904  	syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
  1905  	// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
  1906  	// deleted.
  1907  	kl.statusManager.SetPodStatus(pods[1], v1.PodStatus{Phase: v1.PodFailed})
  1908  	kl.statusManager.SetPodStatus(pods[2], v1.PodStatus{Phase: v1.PodSucceeded})
  1909  	syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
  1910  }
  1911  
  1912  func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
  1913  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1914  	defer testKubelet.Cleanup()
  1915  	runningPod := &kubecontainer.Pod{
  1916  		ID:        "12345678",
  1917  		Name:      "pod1",
  1918  		Namespace: "ns",
  1919  	}
  1920  	apiPod := podWithUIDNameNs(runningPod.ID, runningPod.Name, runningPod.Namespace)
  1921  
  1922  	// Sync once to create pod directory; confirm that the pod directory has
  1923  	// already been created.
  1924  	pods := []*v1.Pod{apiPod}
  1925  	testKubelet.kubelet.podWorkers.(*fakePodWorkers).running = map[types.UID]bool{apiPod.UID: true}
  1926  	syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true)
  1927  
  1928  	// Pretend the pod is deleted from apiserver, but is still active on the node.
  1929  	// The pod directory should not be removed.
  1930  	pods = []*v1.Pod{}
  1931  	testKubelet.fakeRuntime.PodList = []*containertest.FakePod{{Pod: runningPod, NetnsPath: ""}}
  1932  	syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, true)
  1933  
  1934  	// The pod is deleted and also not active on the node. The pod directory
  1935  	// should be removed.
  1936  	pods = []*v1.Pod{}
  1937  	testKubelet.fakeRuntime.PodList = []*containertest.FakePod{}
  1938  	testKubelet.kubelet.podWorkers.(*fakePodWorkers).running = nil
  1939  	syncAndVerifyPodDir(t, testKubelet, pods, []*v1.Pod{apiPod}, false)
  1940  }
  1941  
  1942  func TestGetPodsToSync(t *testing.T) {
  1943  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1944  	defer testKubelet.Cleanup()
  1945  	kubelet := testKubelet.kubelet
  1946  	clock := testKubelet.fakeClock
  1947  	pods := newTestPods(5)
  1948  
  1949  	exceededActiveDeadlineSeconds := int64(30)
  1950  	notYetActiveDeadlineSeconds := int64(120)
  1951  	startTime := metav1.NewTime(clock.Now())
  1952  	pods[0].Status.StartTime = &startTime
  1953  	pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
  1954  	pods[1].Status.StartTime = &startTime
  1955  	pods[1].Spec.ActiveDeadlineSeconds = &notYetActiveDeadlineSeconds
  1956  	pods[2].Status.StartTime = &startTime
  1957  	pods[2].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
  1958  
  1959  	kubelet.podManager.SetPods(pods)
  1960  	kubelet.workQueue.Enqueue(pods[2].UID, 0)
  1961  	kubelet.workQueue.Enqueue(pods[3].UID, 30*time.Second)
  1962  	kubelet.workQueue.Enqueue(pods[4].UID, 2*time.Minute)
  1963  
  1964  	clock.Step(1 * time.Minute)
  1965  
  1966  	expected := []*v1.Pod{pods[2], pods[3], pods[0]}
  1967  	podsToSync := kubelet.getPodsToSync()
  1968  	sort.Sort(podsByUID(expected))
  1969  	sort.Sort(podsByUID(podsToSync))
  1970  	assert.Equal(t, expected, podsToSync)
  1971  }
  1972  
  1973  func TestGenerateAPIPodStatusWithSortedContainers(t *testing.T) {
  1974  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1975  	defer testKubelet.Cleanup()
  1976  	kubelet := testKubelet.kubelet
  1977  	numContainers := 10
  1978  	expectedOrder := []string{}
  1979  	cStatuses := []*kubecontainer.Status{}
  1980  	specContainerList := []v1.Container{}
  1981  	for i := 0; i < numContainers; i++ {
  1982  		id := fmt.Sprintf("%v", i)
  1983  		containerName := fmt.Sprintf("%vcontainer", id)
  1984  		expectedOrder = append(expectedOrder, containerName)
  1985  		cStatus := &kubecontainer.Status{
  1986  			ID:   kubecontainer.BuildContainerID("test", id),
  1987  			Name: containerName,
  1988  		}
  1989  		// Rearrange container statuses
  1990  		if i%2 == 0 {
  1991  			cStatuses = append(cStatuses, cStatus)
  1992  		} else {
  1993  			cStatuses = append([]*kubecontainer.Status{cStatus}, cStatuses...)
  1994  		}
  1995  		specContainerList = append(specContainerList, v1.Container{Name: containerName})
  1996  	}
  1997  	pod := podWithUIDNameNs("uid1", "foo", "test")
  1998  	pod.Spec = v1.PodSpec{
  1999  		Containers: specContainerList,
  2000  	}
  2001  
  2002  	status := &kubecontainer.PodStatus{
  2003  		ID:                pod.UID,
  2004  		Name:              pod.Name,
  2005  		Namespace:         pod.Namespace,
  2006  		ContainerStatuses: cStatuses,
  2007  	}
  2008  	for i := 0; i < 5; i++ {
  2009  		apiStatus := kubelet.generateAPIPodStatus(pod, status, false)
  2010  		for i, c := range apiStatus.ContainerStatuses {
  2011  			if expectedOrder[i] != c.Name {
  2012  				t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name)
  2013  			}
  2014  		}
  2015  	}
  2016  }
  2017  
  2018  func verifyContainerStatuses(t *testing.T, statuses []v1.ContainerStatus, expectedState, expectedLastTerminationState map[string]v1.ContainerState, message string) {
  2019  	for _, s := range statuses {
  2020  		assert.Equal(t, expectedState[s.Name], s.State, "%s: state", message)
  2021  		assert.Equal(t, expectedLastTerminationState[s.Name], s.LastTerminationState, "%s: last terminated state", message)
  2022  	}
  2023  }
  2024  
  2025  // Test generateAPIPodStatus with different reason cache and old api pod status.
  2026  func TestGenerateAPIPodStatusWithReasonCache(t *testing.T) {
  2027  	// The following waiting reason and message  are generated in convertStatusToAPIStatus()
  2028  	testTimestamp := time.Unix(123456789, 987654321)
  2029  	testErrorReason := fmt.Errorf("test-error")
  2030  	emptyContainerID := (&kubecontainer.ContainerID{}).String()
  2031  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2032  	defer testKubelet.Cleanup()
  2033  	kubelet := testKubelet.kubelet
  2034  	pod := podWithUIDNameNs("12345678", "foo", "new")
  2035  	pod.Spec = v1.PodSpec{RestartPolicy: v1.RestartPolicyOnFailure}
  2036  
  2037  	podStatus := &kubecontainer.PodStatus{
  2038  		ID:        pod.UID,
  2039  		Name:      pod.Name,
  2040  		Namespace: pod.Namespace,
  2041  	}
  2042  	tests := []struct {
  2043  		containers    []v1.Container
  2044  		statuses      []*kubecontainer.Status
  2045  		reasons       map[string]error
  2046  		oldStatuses   []v1.ContainerStatus
  2047  		expectedState map[string]v1.ContainerState
  2048  		// Only set expectedInitState when it is different from expectedState
  2049  		expectedInitState            map[string]v1.ContainerState
  2050  		expectedLastTerminationState map[string]v1.ContainerState
  2051  	}{
  2052  		// For container with no historical record, State should be Waiting, LastTerminationState should be retrieved from
  2053  		// old status from apiserver.
  2054  		{
  2055  			containers: []v1.Container{{Name: "without-old-record"}, {Name: "with-old-record"}},
  2056  			statuses:   []*kubecontainer.Status{},
  2057  			reasons:    map[string]error{},
  2058  			oldStatuses: []v1.ContainerStatus{{
  2059  				Name:                 "with-old-record",
  2060  				LastTerminationState: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{}},
  2061  			}},
  2062  			expectedState: map[string]v1.ContainerState{
  2063  				"without-old-record": {Waiting: &v1.ContainerStateWaiting{
  2064  					Reason: ContainerCreating,
  2065  				}},
  2066  				"with-old-record": {Waiting: &v1.ContainerStateWaiting{
  2067  					Reason: ContainerCreating,
  2068  				}},
  2069  			},
  2070  			expectedInitState: map[string]v1.ContainerState{
  2071  				"without-old-record": {Waiting: &v1.ContainerStateWaiting{
  2072  					Reason: PodInitializing,
  2073  				}},
  2074  				"with-old-record": {Waiting: &v1.ContainerStateWaiting{
  2075  					Reason: PodInitializing,
  2076  				}},
  2077  			},
  2078  			expectedLastTerminationState: map[string]v1.ContainerState{
  2079  				"with-old-record": {Terminated: &v1.ContainerStateTerminated{}},
  2080  			},
  2081  		},
  2082  		// For running container, State should be Running, LastTerminationState should be retrieved from latest terminated status.
  2083  		{
  2084  			containers: []v1.Container{{Name: "running"}},
  2085  			statuses: []*kubecontainer.Status{
  2086  				{
  2087  					Name:      "running",
  2088  					State:     kubecontainer.ContainerStateRunning,
  2089  					StartedAt: testTimestamp,
  2090  				},
  2091  				{
  2092  					Name:     "running",
  2093  					State:    kubecontainer.ContainerStateExited,
  2094  					ExitCode: 1,
  2095  				},
  2096  			},
  2097  			reasons:     map[string]error{},
  2098  			oldStatuses: []v1.ContainerStatus{},
  2099  			expectedState: map[string]v1.ContainerState{
  2100  				"running": {Running: &v1.ContainerStateRunning{
  2101  					StartedAt: metav1.NewTime(testTimestamp),
  2102  				}},
  2103  			},
  2104  			expectedLastTerminationState: map[string]v1.ContainerState{
  2105  				"running": {Terminated: &v1.ContainerStateTerminated{
  2106  					ExitCode:    1,
  2107  					ContainerID: emptyContainerID,
  2108  				}},
  2109  			},
  2110  		},
  2111  		// For terminated container:
  2112  		// * If there is no recent start error record, State should be Terminated, LastTerminationState should be retrieved from
  2113  		// second latest terminated status;
  2114  		// * If there is recent start error record, State should be Waiting, LastTerminationState should be retrieved from latest
  2115  		// terminated status;
  2116  		// * If ExitCode = 0, restart policy is RestartPolicyOnFailure, the container shouldn't be restarted. No matter there is
  2117  		// recent start error or not, State should be Terminated, LastTerminationState should be retrieved from second latest
  2118  		// terminated status.
  2119  		{
  2120  			containers: []v1.Container{{Name: "without-reason"}, {Name: "with-reason"}},
  2121  			statuses: []*kubecontainer.Status{
  2122  				{
  2123  					Name:     "without-reason",
  2124  					State:    kubecontainer.ContainerStateExited,
  2125  					ExitCode: 1,
  2126  				},
  2127  				{
  2128  					Name:     "with-reason",
  2129  					State:    kubecontainer.ContainerStateExited,
  2130  					ExitCode: 2,
  2131  				},
  2132  				{
  2133  					Name:     "without-reason",
  2134  					State:    kubecontainer.ContainerStateExited,
  2135  					ExitCode: 3,
  2136  				},
  2137  				{
  2138  					Name:     "with-reason",
  2139  					State:    kubecontainer.ContainerStateExited,
  2140  					ExitCode: 4,
  2141  				},
  2142  				{
  2143  					Name:     "succeed",
  2144  					State:    kubecontainer.ContainerStateExited,
  2145  					ExitCode: 0,
  2146  				},
  2147  				{
  2148  					Name:     "succeed",
  2149  					State:    kubecontainer.ContainerStateExited,
  2150  					ExitCode: 5,
  2151  				},
  2152  			},
  2153  			reasons:     map[string]error{"with-reason": testErrorReason, "succeed": testErrorReason},
  2154  			oldStatuses: []v1.ContainerStatus{},
  2155  			expectedState: map[string]v1.ContainerState{
  2156  				"without-reason": {Terminated: &v1.ContainerStateTerminated{
  2157  					ExitCode:    1,
  2158  					ContainerID: emptyContainerID,
  2159  				}},
  2160  				"with-reason": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  2161  				"succeed": {Terminated: &v1.ContainerStateTerminated{
  2162  					ExitCode:    0,
  2163  					ContainerID: emptyContainerID,
  2164  				}},
  2165  			},
  2166  			expectedLastTerminationState: map[string]v1.ContainerState{
  2167  				"without-reason": {Terminated: &v1.ContainerStateTerminated{
  2168  					ExitCode:    3,
  2169  					ContainerID: emptyContainerID,
  2170  				}},
  2171  				"with-reason": {Terminated: &v1.ContainerStateTerminated{
  2172  					ExitCode:    2,
  2173  					ContainerID: emptyContainerID,
  2174  				}},
  2175  				"succeed": {Terminated: &v1.ContainerStateTerminated{
  2176  					ExitCode:    5,
  2177  					ContainerID: emptyContainerID,
  2178  				}},
  2179  			},
  2180  		},
  2181  		// For Unknown Container Status:
  2182  		// * In certain situations a container can be running and fail to retrieve the status which results in
  2183  		// * a transition to the Unknown state. Prior to this fix, a container would make an invalid transition
  2184  		// * from Running->Waiting. This test validates the correct behavior of transitioning from Running->Terminated.
  2185  		{
  2186  			containers: []v1.Container{{Name: "unknown"}},
  2187  			statuses: []*kubecontainer.Status{
  2188  				{
  2189  					Name:  "unknown",
  2190  					State: kubecontainer.ContainerStateUnknown,
  2191  				},
  2192  				{
  2193  					Name:  "unknown",
  2194  					State: kubecontainer.ContainerStateRunning,
  2195  				},
  2196  			},
  2197  			reasons: map[string]error{},
  2198  			oldStatuses: []v1.ContainerStatus{{
  2199  				Name:  "unknown",
  2200  				State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
  2201  			}},
  2202  			expectedState: map[string]v1.ContainerState{
  2203  				"unknown": {Terminated: &v1.ContainerStateTerminated{
  2204  					ExitCode: 137,
  2205  					Message:  "The container could not be located when the pod was terminated",
  2206  					Reason:   "ContainerStatusUnknown",
  2207  				}},
  2208  			},
  2209  			expectedLastTerminationState: map[string]v1.ContainerState{
  2210  				"unknown": {Running: &v1.ContainerStateRunning{}},
  2211  			},
  2212  		},
  2213  	}
  2214  
  2215  	for i, test := range tests {
  2216  		kubelet.reasonCache = NewReasonCache()
  2217  		for n, e := range test.reasons {
  2218  			kubelet.reasonCache.add(pod.UID, n, e, "")
  2219  		}
  2220  		pod.Spec.Containers = test.containers
  2221  		pod.Status.ContainerStatuses = test.oldStatuses
  2222  		podStatus.ContainerStatuses = test.statuses
  2223  		apiStatus := kubelet.generateAPIPodStatus(pod, podStatus, false)
  2224  		verifyContainerStatuses(t, apiStatus.ContainerStatuses, test.expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
  2225  	}
  2226  
  2227  	// Everything should be the same for init containers
  2228  	for i, test := range tests {
  2229  		kubelet.reasonCache = NewReasonCache()
  2230  		for n, e := range test.reasons {
  2231  			kubelet.reasonCache.add(pod.UID, n, e, "")
  2232  		}
  2233  		pod.Spec.InitContainers = test.containers
  2234  		pod.Status.InitContainerStatuses = test.oldStatuses
  2235  		podStatus.ContainerStatuses = test.statuses
  2236  		apiStatus := kubelet.generateAPIPodStatus(pod, podStatus, false)
  2237  		expectedState := test.expectedState
  2238  		if test.expectedInitState != nil {
  2239  			expectedState = test.expectedInitState
  2240  		}
  2241  		verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, test.expectedLastTerminationState, fmt.Sprintf("case %d", i))
  2242  	}
  2243  }
  2244  
  2245  // Test generateAPIPodStatus with different restart policies.
  2246  func TestGenerateAPIPodStatusWithDifferentRestartPolicies(t *testing.T) {
  2247  	testErrorReason := fmt.Errorf("test-error")
  2248  	emptyContainerID := (&kubecontainer.ContainerID{}).String()
  2249  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2250  	defer testKubelet.Cleanup()
  2251  	kubelet := testKubelet.kubelet
  2252  	pod := podWithUIDNameNs("12345678", "foo", "new")
  2253  	containers := []v1.Container{{Name: "succeed"}, {Name: "failed"}}
  2254  	podStatus := &kubecontainer.PodStatus{
  2255  		ID:        pod.UID,
  2256  		Name:      pod.Name,
  2257  		Namespace: pod.Namespace,
  2258  		ContainerStatuses: []*kubecontainer.Status{
  2259  			{
  2260  				Name:     "succeed",
  2261  				State:    kubecontainer.ContainerStateExited,
  2262  				ExitCode: 0,
  2263  			},
  2264  			{
  2265  				Name:     "failed",
  2266  				State:    kubecontainer.ContainerStateExited,
  2267  				ExitCode: 1,
  2268  			},
  2269  			{
  2270  				Name:     "succeed",
  2271  				State:    kubecontainer.ContainerStateExited,
  2272  				ExitCode: 2,
  2273  			},
  2274  			{
  2275  				Name:     "failed",
  2276  				State:    kubecontainer.ContainerStateExited,
  2277  				ExitCode: 3,
  2278  			},
  2279  		},
  2280  	}
  2281  	kubelet.reasonCache.add(pod.UID, "succeed", testErrorReason, "")
  2282  	kubelet.reasonCache.add(pod.UID, "failed", testErrorReason, "")
  2283  	for c, test := range []struct {
  2284  		restartPolicy                v1.RestartPolicy
  2285  		expectedState                map[string]v1.ContainerState
  2286  		expectedLastTerminationState map[string]v1.ContainerState
  2287  		// Only set expectedInitState when it is different from expectedState
  2288  		expectedInitState map[string]v1.ContainerState
  2289  		// Only set expectedInitLastTerminationState when it is different from expectedLastTerminationState
  2290  		expectedInitLastTerminationState map[string]v1.ContainerState
  2291  	}{
  2292  		{
  2293  			restartPolicy: v1.RestartPolicyNever,
  2294  			expectedState: map[string]v1.ContainerState{
  2295  				"succeed": {Terminated: &v1.ContainerStateTerminated{
  2296  					ExitCode:    0,
  2297  					ContainerID: emptyContainerID,
  2298  				}},
  2299  				"failed": {Terminated: &v1.ContainerStateTerminated{
  2300  					ExitCode:    1,
  2301  					ContainerID: emptyContainerID,
  2302  				}},
  2303  			},
  2304  			expectedLastTerminationState: map[string]v1.ContainerState{
  2305  				"succeed": {Terminated: &v1.ContainerStateTerminated{
  2306  					ExitCode:    2,
  2307  					ContainerID: emptyContainerID,
  2308  				}},
  2309  				"failed": {Terminated: &v1.ContainerStateTerminated{
  2310  					ExitCode:    3,
  2311  					ContainerID: emptyContainerID,
  2312  				}},
  2313  			},
  2314  		},
  2315  		{
  2316  			restartPolicy: v1.RestartPolicyOnFailure,
  2317  			expectedState: map[string]v1.ContainerState{
  2318  				"succeed": {Terminated: &v1.ContainerStateTerminated{
  2319  					ExitCode:    0,
  2320  					ContainerID: emptyContainerID,
  2321  				}},
  2322  				"failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  2323  			},
  2324  			expectedLastTerminationState: map[string]v1.ContainerState{
  2325  				"succeed": {Terminated: &v1.ContainerStateTerminated{
  2326  					ExitCode:    2,
  2327  					ContainerID: emptyContainerID,
  2328  				}},
  2329  				"failed": {Terminated: &v1.ContainerStateTerminated{
  2330  					ExitCode:    1,
  2331  					ContainerID: emptyContainerID,
  2332  				}},
  2333  			},
  2334  		},
  2335  		{
  2336  			restartPolicy: v1.RestartPolicyAlways,
  2337  			expectedState: map[string]v1.ContainerState{
  2338  				"succeed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  2339  				"failed":  {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  2340  			},
  2341  			expectedLastTerminationState: map[string]v1.ContainerState{
  2342  				"succeed": {Terminated: &v1.ContainerStateTerminated{
  2343  					ExitCode:    0,
  2344  					ContainerID: emptyContainerID,
  2345  				}},
  2346  				"failed": {Terminated: &v1.ContainerStateTerminated{
  2347  					ExitCode:    1,
  2348  					ContainerID: emptyContainerID,
  2349  				}},
  2350  			},
  2351  			// If the init container is terminated with exit code 0, it won't be restarted even when the
  2352  			// restart policy is RestartAlways.
  2353  			expectedInitState: map[string]v1.ContainerState{
  2354  				"succeed": {Terminated: &v1.ContainerStateTerminated{
  2355  					ExitCode:    0,
  2356  					ContainerID: emptyContainerID,
  2357  				}},
  2358  				"failed": {Waiting: &v1.ContainerStateWaiting{Reason: testErrorReason.Error()}},
  2359  			},
  2360  			expectedInitLastTerminationState: map[string]v1.ContainerState{
  2361  				"succeed": {Terminated: &v1.ContainerStateTerminated{
  2362  					ExitCode:    2,
  2363  					ContainerID: emptyContainerID,
  2364  				}},
  2365  				"failed": {Terminated: &v1.ContainerStateTerminated{
  2366  					ExitCode:    1,
  2367  					ContainerID: emptyContainerID,
  2368  				}},
  2369  			},
  2370  		},
  2371  	} {
  2372  		pod.Spec.RestartPolicy = test.restartPolicy
  2373  		// Test normal containers
  2374  		pod.Spec.Containers = containers
  2375  		apiStatus := kubelet.generateAPIPodStatus(pod, podStatus, false)
  2376  		expectedState, expectedLastTerminationState := test.expectedState, test.expectedLastTerminationState
  2377  		verifyContainerStatuses(t, apiStatus.ContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
  2378  		pod.Spec.Containers = nil
  2379  
  2380  		// Test init containers
  2381  		pod.Spec.InitContainers = containers
  2382  		apiStatus = kubelet.generateAPIPodStatus(pod, podStatus, false)
  2383  		if test.expectedInitState != nil {
  2384  			expectedState = test.expectedInitState
  2385  		}
  2386  		if test.expectedInitLastTerminationState != nil {
  2387  			expectedLastTerminationState = test.expectedInitLastTerminationState
  2388  		}
  2389  		verifyContainerStatuses(t, apiStatus.InitContainerStatuses, expectedState, expectedLastTerminationState, fmt.Sprintf("case %d", c))
  2390  		pod.Spec.InitContainers = nil
  2391  	}
  2392  }
  2393  
  2394  // testPodAdmitHandler is a lifecycle.PodAdmitHandler for testing.
  2395  type testPodAdmitHandler struct {
  2396  	// list of pods to reject.
  2397  	podsToReject []*v1.Pod
  2398  }
  2399  
  2400  // Admit rejects all pods in the podsToReject list with a matching UID.
  2401  func (a *testPodAdmitHandler) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
  2402  	for _, podToReject := range a.podsToReject {
  2403  		if podToReject.UID == attrs.Pod.UID {
  2404  			return lifecycle.PodAdmitResult{Admit: false, Reason: "Rejected", Message: "Pod is rejected"}
  2405  		}
  2406  	}
  2407  	return lifecycle.PodAdmitResult{Admit: true}
  2408  }
  2409  
  2410  // Test verifies that the kubelet invokes an admission handler during HandlePodAdditions.
  2411  func TestHandlePodAdditionsInvokesPodAdmitHandlers(t *testing.T) {
  2412  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2413  	defer testKubelet.Cleanup()
  2414  	kl := testKubelet.kubelet
  2415  	kl.nodeLister = testNodeLister{nodes: []*v1.Node{
  2416  		{
  2417  			ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
  2418  			Status: v1.NodeStatus{
  2419  				Allocatable: v1.ResourceList{
  2420  					v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
  2421  				},
  2422  			},
  2423  		},
  2424  	}}
  2425  
  2426  	pods := []*v1.Pod{
  2427  		{
  2428  			ObjectMeta: metav1.ObjectMeta{
  2429  				UID:       "123456789",
  2430  				Name:      "podA",
  2431  				Namespace: "foo",
  2432  			},
  2433  		},
  2434  		{
  2435  			ObjectMeta: metav1.ObjectMeta{
  2436  				UID:       "987654321",
  2437  				Name:      "podB",
  2438  				Namespace: "foo",
  2439  			},
  2440  		},
  2441  	}
  2442  	podToReject := pods[0]
  2443  	podToAdmit := pods[1]
  2444  	podsToReject := []*v1.Pod{podToReject}
  2445  
  2446  	kl.admitHandlers.AddPodAdmitHandler(&testPodAdmitHandler{podsToReject: podsToReject})
  2447  
  2448  	kl.HandlePodAdditions(pods)
  2449  
  2450  	// Check pod status stored in the status map.
  2451  	checkPodStatus(t, kl, podToReject, v1.PodFailed)
  2452  	checkPodStatus(t, kl, podToAdmit, v1.PodPending)
  2453  }
  2454  
  2455  func TestPodResourceAllocationReset(t *testing.T) {
  2456  	defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)()
  2457  	testKubelet := newTestKubelet(t, false)
  2458  	defer testKubelet.Cleanup()
  2459  	kubelet := testKubelet.kubelet
  2460  	kubelet.statusManager = status.NewFakeManager()
  2461  
  2462  	nodes := []*v1.Node{
  2463  		{
  2464  			ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  2465  			Status: v1.NodeStatus{
  2466  				Capacity: v1.ResourceList{
  2467  					v1.ResourceCPU:    resource.MustParse("8"),
  2468  					v1.ResourceMemory: resource.MustParse("8Gi"),
  2469  				},
  2470  				Allocatable: v1.ResourceList{
  2471  					v1.ResourceCPU:    resource.MustParse("4"),
  2472  					v1.ResourceMemory: resource.MustParse("4Gi"),
  2473  					v1.ResourcePods:   *resource.NewQuantity(40, resource.DecimalSI),
  2474  				},
  2475  			},
  2476  		},
  2477  	}
  2478  	kubelet.nodeLister = testNodeLister{nodes: nodes}
  2479  
  2480  	cpu500m := resource.MustParse("500m")
  2481  	cpu800m := resource.MustParse("800m")
  2482  	mem500M := resource.MustParse("500Mi")
  2483  	mem800M := resource.MustParse("800Mi")
  2484  	cpu500mMem500MPodSpec := &v1.PodSpec{
  2485  		Containers: []v1.Container{
  2486  			{
  2487  				Name: "c1",
  2488  				Resources: v1.ResourceRequirements{
  2489  					Requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
  2490  				},
  2491  			},
  2492  		},
  2493  	}
  2494  	cpu800mMem800MPodSpec := cpu500mMem500MPodSpec.DeepCopy()
  2495  	cpu800mMem800MPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceCPU: cpu800m, v1.ResourceMemory: mem800M}
  2496  	cpu800mPodSpec := cpu500mMem500MPodSpec.DeepCopy()
  2497  	cpu800mPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceCPU: cpu800m}
  2498  	mem800MPodSpec := cpu500mMem500MPodSpec.DeepCopy()
  2499  	mem800MPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceMemory: mem800M}
  2500  
  2501  	cpu500mPodSpec := cpu500mMem500MPodSpec.DeepCopy()
  2502  	cpu500mPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceCPU: cpu500m}
  2503  	mem500MPodSpec := cpu500mMem500MPodSpec.DeepCopy()
  2504  	mem500MPodSpec.Containers[0].Resources.Requests = v1.ResourceList{v1.ResourceMemory: mem500M}
  2505  	emptyPodSpec := cpu500mMem500MPodSpec.DeepCopy()
  2506  	emptyPodSpec.Containers[0].Resources.Requests = v1.ResourceList{}
  2507  
  2508  	tests := []struct {
  2509  		name                          string
  2510  		pod                           *v1.Pod
  2511  		existingPodAllocation         *v1.Pod
  2512  		expectedPodResourceAllocation state.PodResourceAllocation
  2513  	}{
  2514  		{
  2515  			name: "Having both memory and cpu, resource allocation not exists",
  2516  			pod:  podWithUIDNameNsSpec("1", "pod1", "foo", *cpu500mMem500MPodSpec),
  2517  			expectedPodResourceAllocation: state.PodResourceAllocation{
  2518  				"1": map[string]v1.ResourceList{
  2519  					cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources.Requests,
  2520  				},
  2521  			},
  2522  		},
  2523  		{
  2524  			name:                  "Having both memory and cpu, resource allocation exists",
  2525  			pod:                   podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec),
  2526  			existingPodAllocation: podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec),
  2527  			expectedPodResourceAllocation: state.PodResourceAllocation{
  2528  				"2": map[string]v1.ResourceList{
  2529  					cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources.Requests,
  2530  				},
  2531  			},
  2532  		},
  2533  		{
  2534  			name:                  "Having both memory and cpu, resource allocation exists (with different value)",
  2535  			pod:                   podWithUIDNameNsSpec("3", "pod3", "foo", *cpu500mMem500MPodSpec),
  2536  			existingPodAllocation: podWithUIDNameNsSpec("3", "pod3", "foo", *cpu800mMem800MPodSpec),
  2537  			expectedPodResourceAllocation: state.PodResourceAllocation{
  2538  				"3": map[string]v1.ResourceList{
  2539  					cpu800mMem800MPodSpec.Containers[0].Name: cpu800mMem800MPodSpec.Containers[0].Resources.Requests,
  2540  				},
  2541  			},
  2542  		},
  2543  		{
  2544  			name: "Only has cpu, resource allocation not exists",
  2545  			pod:  podWithUIDNameNsSpec("4", "pod5", "foo", *cpu500mPodSpec),
  2546  			expectedPodResourceAllocation: state.PodResourceAllocation{
  2547  				"4": map[string]v1.ResourceList{
  2548  					cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources.Requests,
  2549  				},
  2550  			},
  2551  		},
  2552  		{
  2553  			name:                  "Only has cpu, resource allocation exists",
  2554  			pod:                   podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec),
  2555  			existingPodAllocation: podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec),
  2556  			expectedPodResourceAllocation: state.PodResourceAllocation{
  2557  				"5": map[string]v1.ResourceList{
  2558  					cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources.Requests,
  2559  				},
  2560  			},
  2561  		},
  2562  		{
  2563  			name:                  "Only has cpu, resource allocation exists (with different value)",
  2564  			pod:                   podWithUIDNameNsSpec("6", "pod6", "foo", *cpu500mPodSpec),
  2565  			existingPodAllocation: podWithUIDNameNsSpec("6", "pod6", "foo", *cpu800mPodSpec),
  2566  			expectedPodResourceAllocation: state.PodResourceAllocation{
  2567  				"6": map[string]v1.ResourceList{
  2568  					cpu800mPodSpec.Containers[0].Name: cpu800mPodSpec.Containers[0].Resources.Requests,
  2569  				},
  2570  			},
  2571  		},
  2572  		{
  2573  			name: "Only has memory, resource allocation not exists",
  2574  			pod:  podWithUIDNameNsSpec("7", "pod7", "foo", *mem500MPodSpec),
  2575  			expectedPodResourceAllocation: state.PodResourceAllocation{
  2576  				"7": map[string]v1.ResourceList{
  2577  					mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources.Requests,
  2578  				},
  2579  			},
  2580  		},
  2581  		{
  2582  			name:                  "Only has memory, resource allocation exists",
  2583  			pod:                   podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec),
  2584  			existingPodAllocation: podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec),
  2585  			expectedPodResourceAllocation: state.PodResourceAllocation{
  2586  				"8": map[string]v1.ResourceList{
  2587  					mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources.Requests,
  2588  				},
  2589  			},
  2590  		},
  2591  		{
  2592  			name:                  "Only has memory, resource allocation exists (with different value)",
  2593  			pod:                   podWithUIDNameNsSpec("9", "pod9", "foo", *mem500MPodSpec),
  2594  			existingPodAllocation: podWithUIDNameNsSpec("9", "pod9", "foo", *mem800MPodSpec),
  2595  			expectedPodResourceAllocation: state.PodResourceAllocation{
  2596  				"9": map[string]v1.ResourceList{
  2597  					mem800MPodSpec.Containers[0].Name: mem800MPodSpec.Containers[0].Resources.Requests,
  2598  				},
  2599  			},
  2600  		},
  2601  		{
  2602  			name: "No CPU and memory, resource allocation not exists",
  2603  			pod:  podWithUIDNameNsSpec("10", "pod10", "foo", *emptyPodSpec),
  2604  			expectedPodResourceAllocation: state.PodResourceAllocation{
  2605  				"10": map[string]v1.ResourceList{
  2606  					emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources.Requests,
  2607  				},
  2608  			},
  2609  		},
  2610  		{
  2611  			name:                  "No CPU and memory, resource allocation exists",
  2612  			pod:                   podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec),
  2613  			existingPodAllocation: podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec),
  2614  			expectedPodResourceAllocation: state.PodResourceAllocation{
  2615  				"11": map[string]v1.ResourceList{
  2616  					emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources.Requests,
  2617  				},
  2618  			},
  2619  		},
  2620  	}
  2621  	for _, tc := range tests {
  2622  		if tc.existingPodAllocation != nil {
  2623  			// when kubelet restarts, AllocatedResources has already existed before adding pod
  2624  			err := kubelet.statusManager.SetPodAllocation(tc.existingPodAllocation)
  2625  			if err != nil {
  2626  				t.Fatalf("failed to set pod allocation: %v", err)
  2627  			}
  2628  		}
  2629  		kubelet.HandlePodAdditions([]*v1.Pod{tc.pod})
  2630  
  2631  		allocatedResources, found := kubelet.statusManager.GetContainerResourceAllocation(string(tc.pod.UID), tc.pod.Spec.Containers[0].Name)
  2632  		if !found {
  2633  			t.Fatalf("resource allocation should exist: (pod: %#v, container: %s)", tc.pod, tc.pod.Spec.Containers[0].Name)
  2634  		}
  2635  		assert.Equal(t, tc.expectedPodResourceAllocation[string(tc.pod.UID)][tc.pod.Spec.Containers[0].Name], allocatedResources, tc.name)
  2636  	}
  2637  }
  2638  
  2639  func TestHandlePodResourcesResize(t *testing.T) {
  2640  	defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)()
  2641  	testKubelet := newTestKubelet(t, false)
  2642  	defer testKubelet.Cleanup()
  2643  	kubelet := testKubelet.kubelet
  2644  	kubelet.statusManager = status.NewFakeManager()
  2645  
  2646  	cpu500m := resource.MustParse("500m")
  2647  	cpu1000m := resource.MustParse("1")
  2648  	cpu1500m := resource.MustParse("1500m")
  2649  	cpu2500m := resource.MustParse("2500m")
  2650  	cpu5000m := resource.MustParse("5000m")
  2651  	mem500M := resource.MustParse("500Mi")
  2652  	mem1000M := resource.MustParse("1Gi")
  2653  	mem1500M := resource.MustParse("1500Mi")
  2654  	mem2500M := resource.MustParse("2500Mi")
  2655  	mem4500M := resource.MustParse("4500Mi")
  2656  
  2657  	nodes := []*v1.Node{
  2658  		{
  2659  			ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  2660  			Status: v1.NodeStatus{
  2661  				Capacity: v1.ResourceList{
  2662  					v1.ResourceCPU:    resource.MustParse("8"),
  2663  					v1.ResourceMemory: resource.MustParse("8Gi"),
  2664  				},
  2665  				Allocatable: v1.ResourceList{
  2666  					v1.ResourceCPU:    resource.MustParse("4"),
  2667  					v1.ResourceMemory: resource.MustParse("4Gi"),
  2668  					v1.ResourcePods:   *resource.NewQuantity(40, resource.DecimalSI),
  2669  				},
  2670  			},
  2671  		},
  2672  	}
  2673  	kubelet.nodeLister = testNodeLister{nodes: nodes}
  2674  
  2675  	testPod1 := &v1.Pod{
  2676  		ObjectMeta: metav1.ObjectMeta{
  2677  			UID:       "1111",
  2678  			Name:      "pod1",
  2679  			Namespace: "ns1",
  2680  		},
  2681  		Spec: v1.PodSpec{
  2682  			Containers: []v1.Container{
  2683  				{
  2684  					Name:  "c1",
  2685  					Image: "i1",
  2686  					Resources: v1.ResourceRequirements{
  2687  						Requests: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
  2688  					},
  2689  				},
  2690  			},
  2691  		},
  2692  		Status: v1.PodStatus{
  2693  			Phase: v1.PodRunning,
  2694  			ContainerStatuses: []v1.ContainerStatus{
  2695  				{
  2696  					Name:               "c1",
  2697  					AllocatedResources: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
  2698  					Resources:          &v1.ResourceRequirements{},
  2699  				},
  2700  			},
  2701  		},
  2702  	}
  2703  	testPod2 := testPod1.DeepCopy()
  2704  	testPod2.UID = "2222"
  2705  	testPod2.Name = "pod2"
  2706  	testPod2.Namespace = "ns2"
  2707  	testPod3 := testPod1.DeepCopy()
  2708  	testPod3.UID = "3333"
  2709  	testPod3.Name = "pod3"
  2710  	testPod3.Namespace = "ns2"
  2711  
  2712  	testKubelet.fakeKubeClient = fake.NewSimpleClientset(testPod1, testPod2, testPod3)
  2713  	kubelet.kubeClient = testKubelet.fakeKubeClient
  2714  	defer testKubelet.fakeKubeClient.ClearActions()
  2715  	kubelet.podManager.AddPod(testPod1)
  2716  	kubelet.podManager.AddPod(testPod2)
  2717  	kubelet.podManager.AddPod(testPod3)
  2718  	kubelet.podWorkers.(*fakePodWorkers).running = map[types.UID]bool{
  2719  		testPod1.UID: true,
  2720  		testPod2.UID: true,
  2721  		testPod3.UID: true,
  2722  	}
  2723  	defer kubelet.podManager.RemovePod(testPod3)
  2724  	defer kubelet.podManager.RemovePod(testPod2)
  2725  	defer kubelet.podManager.RemovePod(testPod1)
  2726  
  2727  	tests := []struct {
  2728  		name                string
  2729  		pod                 *v1.Pod
  2730  		newRequests         v1.ResourceList
  2731  		expectedAllocations v1.ResourceList
  2732  		expectedResize      v1.PodResizeStatus
  2733  	}{
  2734  		{
  2735  			name:                "Request CPU and memory decrease - expect InProgress",
  2736  			pod:                 testPod2,
  2737  			newRequests:         v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
  2738  			expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
  2739  			expectedResize:      v1.PodResizeStatusInProgress,
  2740  		},
  2741  		{
  2742  			name:                "Request CPU increase, memory decrease - expect InProgress",
  2743  			pod:                 testPod2,
  2744  			newRequests:         v1.ResourceList{v1.ResourceCPU: cpu1500m, v1.ResourceMemory: mem500M},
  2745  			expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu1500m, v1.ResourceMemory: mem500M},
  2746  			expectedResize:      v1.PodResizeStatusInProgress,
  2747  		},
  2748  		{
  2749  			name:                "Request CPU decrease, memory increase - expect InProgress",
  2750  			pod:                 testPod2,
  2751  			newRequests:         v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem1500M},
  2752  			expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem1500M},
  2753  			expectedResize:      v1.PodResizeStatusInProgress,
  2754  		},
  2755  		{
  2756  			name:                "Request CPU and memory increase beyond current capacity - expect Deferred",
  2757  			pod:                 testPod2,
  2758  			newRequests:         v1.ResourceList{v1.ResourceCPU: cpu2500m, v1.ResourceMemory: mem2500M},
  2759  			expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
  2760  			expectedResize:      v1.PodResizeStatusDeferred,
  2761  		},
  2762  		{
  2763  			name:                "Request CPU decrease and memory increase beyond current capacity - expect Deferred",
  2764  			pod:                 testPod2,
  2765  			newRequests:         v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem2500M},
  2766  			expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
  2767  			expectedResize:      v1.PodResizeStatusDeferred,
  2768  		},
  2769  		{
  2770  			name:                "Request memory increase beyond node capacity - expect Infeasible",
  2771  			pod:                 testPod2,
  2772  			newRequests:         v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem4500M},
  2773  			expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
  2774  			expectedResize:      v1.PodResizeStatusInfeasible,
  2775  		},
  2776  		{
  2777  			name:                "Request CPU increase beyond node capacity - expect Infeasible",
  2778  			pod:                 testPod2,
  2779  			newRequests:         v1.ResourceList{v1.ResourceCPU: cpu5000m, v1.ResourceMemory: mem1000M},
  2780  			expectedAllocations: v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M},
  2781  			expectedResize:      v1.PodResizeStatusInfeasible,
  2782  		},
  2783  	}
  2784  
  2785  	for _, tt := range tests {
  2786  		tt.pod.Spec.Containers[0].Resources.Requests = tt.newRequests
  2787  		tt.pod.Status.ContainerStatuses[0].AllocatedResources = v1.ResourceList{v1.ResourceCPU: cpu1000m, v1.ResourceMemory: mem1000M}
  2788  		kubelet.handlePodResourcesResize(tt.pod)
  2789  		updatedPod, found := kubelet.podManager.GetPodByName(tt.pod.Namespace, tt.pod.Name)
  2790  		assert.True(t, found, "expected to find pod %s", tt.pod.Name)
  2791  		assert.Equal(t, tt.expectedAllocations, updatedPod.Status.ContainerStatuses[0].AllocatedResources, tt.name)
  2792  		assert.Equal(t, tt.expectedResize, updatedPod.Status.Resize, tt.name)
  2793  		testKubelet.fakeKubeClient.ClearActions()
  2794  	}
  2795  }
  2796  
  2797  // testPodSyncLoopHandler is a lifecycle.PodSyncLoopHandler that is used for testing.
  2798  type testPodSyncLoopHandler struct {
  2799  	// list of pods to sync
  2800  	podsToSync []*v1.Pod
  2801  }
  2802  
  2803  // ShouldSync evaluates if the pod should be synced from the kubelet.
  2804  func (a *testPodSyncLoopHandler) ShouldSync(pod *v1.Pod) bool {
  2805  	for _, podToSync := range a.podsToSync {
  2806  		if podToSync.UID == pod.UID {
  2807  			return true
  2808  		}
  2809  	}
  2810  	return false
  2811  }
  2812  
  2813  // TestGetPodsToSyncInvokesPodSyncLoopHandlers ensures that the get pods to sync routine invokes the handler.
  2814  func TestGetPodsToSyncInvokesPodSyncLoopHandlers(t *testing.T) {
  2815  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2816  	defer testKubelet.Cleanup()
  2817  	kubelet := testKubelet.kubelet
  2818  	pods := newTestPods(5)
  2819  	expected := []*v1.Pod{pods[0]}
  2820  	kubelet.AddPodSyncLoopHandler(&testPodSyncLoopHandler{expected})
  2821  	kubelet.podManager.SetPods(pods)
  2822  
  2823  	podsToSync := kubelet.getPodsToSync()
  2824  	sort.Sort(podsByUID(expected))
  2825  	sort.Sort(podsByUID(podsToSync))
  2826  	assert.Equal(t, expected, podsToSync)
  2827  }
  2828  
  2829  // testPodSyncHandler is a lifecycle.PodSyncHandler that is used for testing.
  2830  type testPodSyncHandler struct {
  2831  	// list of pods to evict.
  2832  	podsToEvict []*v1.Pod
  2833  	// the reason for the eviction
  2834  	reason string
  2835  	// the message for the eviction
  2836  	message string
  2837  }
  2838  
  2839  // ShouldEvict evaluates if the pod should be evicted from the kubelet.
  2840  func (a *testPodSyncHandler) ShouldEvict(pod *v1.Pod) lifecycle.ShouldEvictResponse {
  2841  	for _, podToEvict := range a.podsToEvict {
  2842  		if podToEvict.UID == pod.UID {
  2843  			return lifecycle.ShouldEvictResponse{Evict: true, Reason: a.reason, Message: a.message}
  2844  		}
  2845  	}
  2846  	return lifecycle.ShouldEvictResponse{Evict: false}
  2847  }
  2848  
  2849  // TestGenerateAPIPodStatusInvokesPodSyncHandlers invokes the handlers and reports the proper status
  2850  func TestGenerateAPIPodStatusInvokesPodSyncHandlers(t *testing.T) {
  2851  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2852  	defer testKubelet.Cleanup()
  2853  	kubelet := testKubelet.kubelet
  2854  	pod := newTestPods(1)[0]
  2855  	podsToEvict := []*v1.Pod{pod}
  2856  	kubelet.AddPodSyncHandler(&testPodSyncHandler{podsToEvict, "Evicted", "because"})
  2857  	status := &kubecontainer.PodStatus{
  2858  		ID:        pod.UID,
  2859  		Name:      pod.Name,
  2860  		Namespace: pod.Namespace,
  2861  	}
  2862  	apiStatus := kubelet.generateAPIPodStatus(pod, status, false)
  2863  	require.Equal(t, v1.PodFailed, apiStatus.Phase)
  2864  	require.Equal(t, "Evicted", apiStatus.Reason)
  2865  	require.Equal(t, "because", apiStatus.Message)
  2866  }
  2867  
  2868  func TestSyncTerminatingPodKillPod(t *testing.T) {
  2869  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2870  	defer testKubelet.Cleanup()
  2871  	kl := testKubelet.kubelet
  2872  	pod := &v1.Pod{
  2873  		ObjectMeta: metav1.ObjectMeta{
  2874  			UID:       "12345678",
  2875  			Name:      "bar",
  2876  			Namespace: "foo",
  2877  		},
  2878  	}
  2879  	pods := []*v1.Pod{pod}
  2880  	kl.podManager.SetPods(pods)
  2881  	podStatus := &kubecontainer.PodStatus{ID: pod.UID}
  2882  	gracePeriodOverride := int64(0)
  2883  	err := kl.SyncTerminatingPod(context.Background(), pod, podStatus, &gracePeriodOverride, func(podStatus *v1.PodStatus) {
  2884  		podStatus.Phase = v1.PodFailed
  2885  		podStatus.Reason = "reason"
  2886  		podStatus.Message = "message"
  2887  	})
  2888  	require.NoError(t, err)
  2889  
  2890  	// Check pod status stored in the status map.
  2891  	checkPodStatus(t, kl, pod, v1.PodFailed)
  2892  }
  2893  
  2894  func TestSyncLabels(t *testing.T) {
  2895  	tests := []struct {
  2896  		name             string
  2897  		existingNode     *v1.Node
  2898  		isPatchingNeeded bool
  2899  	}{
  2900  		{
  2901  			name:             "no labels",
  2902  			existingNode:     &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{}}},
  2903  			isPatchingNeeded: true,
  2904  		},
  2905  		{
  2906  			name:             "wrong labels",
  2907  			existingNode:     &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS", v1.LabelArchStable: "dummyArch"}}},
  2908  			isPatchingNeeded: true,
  2909  		},
  2910  		{
  2911  			name:             "correct labels",
  2912  			existingNode:     &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
  2913  			isPatchingNeeded: false,
  2914  		},
  2915  		{
  2916  			name:             "partially correct labels",
  2917  			existingNode:     &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: "dummyArch"}}},
  2918  			isPatchingNeeded: true,
  2919  		},
  2920  	}
  2921  
  2922  	for _, test := range tests {
  2923  		t.Run(test.name, func(t *testing.T) {
  2924  			testKubelet := newTestKubelet(t, false)
  2925  			defer testKubelet.Cleanup()
  2926  			kl := testKubelet.kubelet
  2927  			kubeClient := testKubelet.fakeKubeClient
  2928  
  2929  			test.existingNode.Name = string(kl.nodeName)
  2930  
  2931  			kl.nodeLister = testNodeLister{nodes: []*v1.Node{test.existingNode}}
  2932  			go func() { kl.syncNodeStatus() }()
  2933  
  2934  			err := retryWithExponentialBackOff(
  2935  				100*time.Millisecond,
  2936  				func() (bool, error) {
  2937  					var savedNode *v1.Node
  2938  					if test.isPatchingNeeded {
  2939  						actions := kubeClient.Actions()
  2940  						if len(actions) == 0 {
  2941  							t.Logf("No action yet")
  2942  							return false, nil
  2943  						}
  2944  						for _, action := range actions {
  2945  							if action.GetVerb() == "patch" {
  2946  								var (
  2947  									err          error
  2948  									patchAction  = action.(core.PatchActionImpl)
  2949  									patchContent = patchAction.GetPatch()
  2950  								)
  2951  								savedNode, err = applyNodeStatusPatch(test.existingNode, patchContent)
  2952  								if err != nil {
  2953  									t.Logf("node patching failed, %v", err)
  2954  									return false, nil
  2955  								}
  2956  							}
  2957  						}
  2958  					} else {
  2959  						savedNode = test.existingNode
  2960  					}
  2961  					if savedNode == nil || savedNode.Labels == nil {
  2962  						t.Logf("savedNode.Labels should not be nil")
  2963  						return false, nil
  2964  					}
  2965  					val, ok := savedNode.Labels[v1.LabelOSStable]
  2966  					if !ok {
  2967  						t.Logf("expected kubernetes.io/os label to be present")
  2968  						return false, nil
  2969  					}
  2970  					if val != goruntime.GOOS {
  2971  						t.Logf("expected kubernetes.io/os to match runtime.GOOS but got %v", val)
  2972  						return false, nil
  2973  					}
  2974  					val, ok = savedNode.Labels[v1.LabelArchStable]
  2975  					if !ok {
  2976  						t.Logf("expected kubernetes.io/arch label to be present")
  2977  						return false, nil
  2978  					}
  2979  					if val != goruntime.GOARCH {
  2980  						t.Logf("expected kubernetes.io/arch to match runtime.GOARCH but got %v", val)
  2981  						return false, nil
  2982  					}
  2983  					return true, nil
  2984  				},
  2985  			)
  2986  			if err != nil {
  2987  				t.Fatalf("expected labels to be reconciled but it failed with %v", err)
  2988  			}
  2989  		})
  2990  	}
  2991  }
  2992  
  2993  func waitForVolumeUnmount(
  2994  	volumeManager kubeletvolume.VolumeManager,
  2995  	pod *v1.Pod) error {
  2996  	var podVolumes kubecontainer.VolumeMap
  2997  	err := retryWithExponentialBackOff(
  2998  		time.Duration(50*time.Millisecond),
  2999  		func() (bool, error) {
  3000  			// Verify volumes detached
  3001  			podVolumes = volumeManager.GetMountedVolumesForPod(
  3002  				util.GetUniquePodName(pod))
  3003  
  3004  			if len(podVolumes) != 0 {
  3005  				return false, nil
  3006  			}
  3007  
  3008  			return true, nil
  3009  		},
  3010  	)
  3011  
  3012  	if err != nil {
  3013  		return fmt.Errorf(
  3014  			"Expected volumes to be unmounted. But some volumes are still mounted: %#v", podVolumes)
  3015  	}
  3016  
  3017  	return nil
  3018  }
  3019  
  3020  func waitForVolumeDetach(
  3021  	volumeName v1.UniqueVolumeName,
  3022  	volumeManager kubeletvolume.VolumeManager) error {
  3023  	attachedVolumes := []v1.UniqueVolumeName{}
  3024  	err := retryWithExponentialBackOff(
  3025  		time.Duration(50*time.Millisecond),
  3026  		func() (bool, error) {
  3027  			// Verify volumes detached
  3028  			volumeAttached := volumeManager.VolumeIsAttached(volumeName)
  3029  			return !volumeAttached, nil
  3030  		},
  3031  	)
  3032  
  3033  	if err != nil {
  3034  		return fmt.Errorf(
  3035  			"Expected volumes to be detached. But some volumes are still attached: %#v", attachedVolumes)
  3036  	}
  3037  
  3038  	return nil
  3039  }
  3040  
  3041  func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
  3042  	backoff := wait.Backoff{
  3043  		Duration: initialDuration,
  3044  		Factor:   3,
  3045  		Jitter:   0,
  3046  		Steps:    6,
  3047  	}
  3048  	return wait.ExponentialBackoff(backoff, fn)
  3049  }
  3050  
  3051  func simulateVolumeInUseUpdate(
  3052  	volumeName v1.UniqueVolumeName,
  3053  	stopCh <-chan struct{},
  3054  	volumeManager kubeletvolume.VolumeManager) {
  3055  	ticker := time.NewTicker(100 * time.Millisecond)
  3056  	defer ticker.Stop()
  3057  	for {
  3058  		select {
  3059  		case <-ticker.C:
  3060  			volumeManager.MarkVolumesAsReportedInUse(
  3061  				[]v1.UniqueVolumeName{volumeName})
  3062  		case <-stopCh:
  3063  			return
  3064  		}
  3065  	}
  3066  }
  3067  
  3068  func runVolumeManager(kubelet *Kubelet) chan struct{} {
  3069  	stopCh := make(chan struct{})
  3070  	go kubelet.volumeManager.Run(kubelet.sourcesReady, stopCh)
  3071  	return stopCh
  3072  }
  3073  
  3074  // dirExists returns true if the path exists and represents a directory.
  3075  func dirExists(path string) bool {
  3076  	s, err := os.Stat(path)
  3077  	if err != nil {
  3078  		return false
  3079  	}
  3080  	return s.IsDir()
  3081  }
  3082  
  3083  // Sort pods by UID.
  3084  type podsByUID []*v1.Pod
  3085  
  3086  func (p podsByUID) Len() int           { return len(p) }
  3087  func (p podsByUID) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
  3088  func (p podsByUID) Less(i, j int) bool { return p[i].UID < p[j].UID }
  3089  
  3090  // createAndStartFakeRemoteRuntime creates and starts fakeremote.RemoteRuntime.
  3091  // It returns the RemoteRuntime, endpoint on success.
  3092  // Users should call fakeRuntime.Stop() to cleanup the server.
  3093  func createAndStartFakeRemoteRuntime(t *testing.T) (*fakeremote.RemoteRuntime, string) {
  3094  	endpoint, err := fakeremote.GenerateEndpoint()
  3095  	require.NoError(t, err)
  3096  
  3097  	fakeRuntime := fakeremote.NewFakeRemoteRuntime()
  3098  	fakeRuntime.Start(endpoint)
  3099  
  3100  	return fakeRuntime, endpoint
  3101  }
  3102  
  3103  func createRemoteRuntimeService(endpoint string, t *testing.T) internalapi.RuntimeService {
  3104  	runtimeService, err := remote.NewRemoteRuntimeService(endpoint, 15*time.Second, oteltrace.NewNoopTracerProvider())
  3105  	require.NoError(t, err)
  3106  	return runtimeService
  3107  }
  3108  
  3109  func TestNewMainKubeletStandAlone(t *testing.T) {
  3110  	tempDir, err := os.MkdirTemp("", "logs")
  3111  	ContainerLogsDir = tempDir
  3112  	assert.NoError(t, err)
  3113  	defer os.RemoveAll(ContainerLogsDir)
  3114  	kubeCfg := &kubeletconfiginternal.KubeletConfiguration{
  3115  		SyncFrequency: metav1.Duration{Duration: time.Minute},
  3116  		ConfigMapAndSecretChangeDetectionStrategy: kubeletconfiginternal.WatchChangeDetectionStrategy,
  3117  		ContainerLogMaxSize:                       "10Mi",
  3118  		ContainerLogMaxFiles:                      5,
  3119  		MemoryThrottlingFactor:                    utilpointer.Float64(0),
  3120  	}
  3121  	var prober volume.DynamicPluginProber
  3122  	tp := oteltrace.NewNoopTracerProvider()
  3123  	mockCtrl := gomock.NewController(t)
  3124  	defer mockCtrl.Finish()
  3125  	cadvisor := cadvisortest.NewMockInterface(mockCtrl)
  3126  	cadvisor.EXPECT().MachineInfo().Return(&cadvisorapi.MachineInfo{}, nil).AnyTimes()
  3127  	cadvisor.EXPECT().ImagesFsInfo().Return(cadvisorapiv2.FsInfo{
  3128  		Usage:     400,
  3129  		Capacity:  1000,
  3130  		Available: 600,
  3131  	}, nil).AnyTimes()
  3132  	tlsOptions := &server.TLSOptions{
  3133  		Config: &tls.Config{
  3134  			MinVersion: 0,
  3135  		},
  3136  	}
  3137  	fakeRuntime, endpoint := createAndStartFakeRemoteRuntime(t)
  3138  	defer func() {
  3139  		fakeRuntime.Stop()
  3140  	}()
  3141  	fakeRecorder := &record.FakeRecorder{}
  3142  	rtSvc := createRemoteRuntimeService(endpoint, t)
  3143  	kubeDep := &Dependencies{
  3144  		Auth:                 nil,
  3145  		CAdvisorInterface:    cadvisor,
  3146  		Cloud:                nil,
  3147  		ContainerManager:     cm.NewStubContainerManager(),
  3148  		KubeClient:           nil, // standalone mode
  3149  		HeartbeatClient:      nil,
  3150  		EventClient:          nil,
  3151  		TracerProvider:       tp,
  3152  		HostUtil:             hostutil.NewFakeHostUtil(nil),
  3153  		Mounter:              mount.NewFakeMounter(nil),
  3154  		Recorder:             fakeRecorder,
  3155  		RemoteRuntimeService: rtSvc,
  3156  		RemoteImageService:   fakeRuntime.ImageService,
  3157  		Subpather:            &subpath.FakeSubpath{},
  3158  		OOMAdjuster:          oom.NewOOMAdjuster(),
  3159  		OSInterface:          kubecontainer.RealOS{},
  3160  		DynamicPluginProber:  prober,
  3161  		TLSOptions:           tlsOptions,
  3162  	}
  3163  	crOptions := &config.ContainerRuntimeOptions{}
  3164  
  3165  	testMainKubelet, err := NewMainKubelet(
  3166  		kubeCfg,
  3167  		kubeDep,
  3168  		crOptions,
  3169  		"hostname",
  3170  		false,
  3171  		"hostname",
  3172  		[]net.IP{},
  3173  		"",
  3174  		"external",
  3175  		"/tmp/cert",
  3176  		"/tmp/rootdir",
  3177  		"",
  3178  		"",
  3179  		false,
  3180  		[]v1.Taint{},
  3181  		[]string{},
  3182  		"",
  3183  		false,
  3184  		false,
  3185  		metav1.Duration{Duration: time.Minute},
  3186  		1024,
  3187  		110,
  3188  		true,
  3189  		true,
  3190  		map[string]string{},
  3191  		1024,
  3192  		false,
  3193  	)
  3194  	assert.NoError(t, err, "NewMainKubelet should succeed")
  3195  	assert.NotNil(t, testMainKubelet, "testMainKubelet should not be nil")
  3196  
  3197  	testMainKubelet.BirthCry()
  3198  	testMainKubelet.StartGarbageCollection()
  3199  	// Nil pointer panic can be reproduced if configmap manager is not nil.
  3200  	// See https://github.com/kubernetes/kubernetes/issues/113492
  3201  	// pod := &v1.Pod{
  3202  	// 	ObjectMeta: metav1.ObjectMeta{
  3203  	// 		UID:       "12345678",
  3204  	// 		Name:      "bar",
  3205  	// 		Namespace: "foo",
  3206  	// 	},
  3207  	// 	Spec: v1.PodSpec{
  3208  	// 		Containers: []v1.Container{{
  3209  	// 			EnvFrom: []v1.EnvFromSource{{
  3210  	// 				ConfigMapRef: &v1.ConfigMapEnvSource{
  3211  	// 					LocalObjectReference: v1.LocalObjectReference{Name: "config-map"}}},
  3212  	// 			}}},
  3213  	// 		Volumes: []v1.Volume{{
  3214  	// 			VolumeSource: v1.VolumeSource{
  3215  	// 				ConfigMap: &v1.ConfigMapVolumeSource{
  3216  	// 					LocalObjectReference: v1.LocalObjectReference{
  3217  	// 						Name: "config-map"}}}}},
  3218  	// 	},
  3219  	// }
  3220  	// testMainKubelet.configMapManager.RegisterPod(pod)
  3221  	// testMainKubelet.secretManager.RegisterPod(pod)
  3222  	assert.Nil(t, testMainKubelet.configMapManager, "configmap manager should be nil if kubelet is in standalone mode")
  3223  	assert.Nil(t, testMainKubelet.secretManager, "secret manager should be nil if kubelet is in standalone mode")
  3224  }