k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/kubelet/kubelet_node_status_test.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package kubelet
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"fmt"
    23  	"net"
    24  	goruntime "runtime"
    25  	"sort"
    26  	"strconv"
    27  	"strings"
    28  	"sync/atomic"
    29  	"testing"
    30  	"time"
    31  
    32  	"github.com/stretchr/testify/assert"
    33  	"github.com/stretchr/testify/require"
    34  
    35  	cadvisorapi "github.com/google/cadvisor/info/v1"
    36  	"github.com/google/go-cmp/cmp"
    37  	v1 "k8s.io/api/core/v1"
    38  	apiequality "k8s.io/apimachinery/pkg/api/equality"
    39  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    40  	"k8s.io/apimachinery/pkg/api/resource"
    41  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    42  	"k8s.io/apimachinery/pkg/labels"
    43  	"k8s.io/apimachinery/pkg/runtime"
    44  	"k8s.io/apimachinery/pkg/util/rand"
    45  	"k8s.io/apimachinery/pkg/util/strategicpatch"
    46  	"k8s.io/apimachinery/pkg/util/uuid"
    47  	"k8s.io/apimachinery/pkg/util/wait"
    48  	clientset "k8s.io/client-go/kubernetes"
    49  	"k8s.io/client-go/kubernetes/fake"
    50  	"k8s.io/client-go/rest"
    51  	core "k8s.io/client-go/testing"
    52  	"k8s.io/component-base/version"
    53  	kubeletapis "k8s.io/kubelet/pkg/apis"
    54  	cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
    55  	"k8s.io/kubernetes/pkg/kubelet/cm"
    56  	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
    57  	"k8s.io/kubernetes/pkg/kubelet/nodestatus"
    58  	"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
    59  	kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
    60  	taintutil "k8s.io/kubernetes/pkg/util/taints"
    61  	"k8s.io/kubernetes/pkg/volume/util"
    62  	netutils "k8s.io/utils/net"
    63  )
    64  
    65  const (
    66  	maxImageTagsForTest = 20
    67  )
    68  
    69  // generateTestingImageLists generate randomly generated image list and corresponding expectedImageList.
    70  func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) {
    71  	// imageList is randomly generated image list
    72  	var imageList []kubecontainer.Image
    73  	for ; count > 0; count-- {
    74  		imageItem := kubecontainer.Image{
    75  			ID:       string(uuid.NewUUID()),
    76  			RepoTags: generateImageTags(),
    77  			Size:     rand.Int63nRange(minImgSize, maxImgSize+1),
    78  		}
    79  		imageList = append(imageList, imageItem)
    80  	}
    81  
    82  	expectedImageList := makeExpectedImageList(imageList, maxImages)
    83  	return imageList, expectedImageList
    84  }
    85  
    86  func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage {
    87  	// expectedImageList is generated by imageList according to size and maxImages
    88  	// 1. sort the imageList by size
    89  	sort.Sort(sliceutils.ByImageSize(imageList))
    90  	// 2. convert sorted imageList to v1.ContainerImage list
    91  	var expectedImageList []v1.ContainerImage
    92  	for _, kubeImage := range imageList {
    93  		apiImage := v1.ContainerImage{
    94  			Names:     kubeImage.RepoTags[0:nodestatus.MaxNamesPerImageInNodeStatus],
    95  			SizeBytes: kubeImage.Size,
    96  		}
    97  
    98  		expectedImageList = append(expectedImageList, apiImage)
    99  	}
   100  	// 3. only returns the top maxImages images in expectedImageList
   101  	if maxImages == -1 { // -1 means no limit
   102  		return expectedImageList
   103  	}
   104  	return expectedImageList[0:maxImages]
   105  }
   106  
   107  func generateImageTags() []string {
   108  	var tagList []string
   109  	// Generate > MaxNamesPerImageInNodeStatus tags so that the test can verify
   110  	// that kubelet report up to MaxNamesPerImageInNodeStatus tags.
   111  	count := rand.IntnRange(nodestatus.MaxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
   112  	for ; count > 0; count-- {
   113  		tagList = append(tagList, "registry.k8s.io:v"+strconv.Itoa(count))
   114  	}
   115  	return tagList
   116  }
   117  
   118  func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error) {
   119  	original, err := json.Marshal(originalNode)
   120  	if err != nil {
   121  		return nil, fmt.Errorf("failed to marshal original node %#v: %v", originalNode, err)
   122  	}
   123  	updated, err := strategicpatch.StrategicMergePatch(original, patch, v1.Node{})
   124  	if err != nil {
   125  		return nil, fmt.Errorf("failed to apply strategic merge patch %q on node %#v: %v",
   126  			patch, originalNode, err)
   127  	}
   128  	updatedNode := &v1.Node{}
   129  	if err := json.Unmarshal(updated, updatedNode); err != nil {
   130  		return nil, fmt.Errorf("failed to unmarshal updated node %q: %v", updated, err)
   131  	}
   132  	return updatedNode, nil
   133  }
   134  
   135  func notImplemented(action core.Action) (bool, runtime.Object, error) {
   136  	return true, nil, fmt.Errorf("no reaction implemented for %s", action)
   137  }
   138  
   139  func addNotImplatedReaction(kubeClient *fake.Clientset) {
   140  	if kubeClient == nil {
   141  		return
   142  	}
   143  
   144  	kubeClient.AddReactor("*", "*", notImplemented)
   145  }
   146  
   147  type localCM struct {
   148  	cm.ContainerManager
   149  	allocatableReservation v1.ResourceList
   150  	capacity               v1.ResourceList
   151  }
   152  
   153  func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList {
   154  	return lcm.allocatableReservation
   155  }
   156  
   157  func (lcm *localCM) GetCapacity(localStorageCapacityIsolation bool) v1.ResourceList {
   158  	if !localStorageCapacityIsolation {
   159  		delete(lcm.capacity, v1.ResourceEphemeralStorage)
   160  	}
   161  	return lcm.capacity
   162  }
   163  
   164  type delegatingNodeLister struct {
   165  	client clientset.Interface
   166  }
   167  
   168  func (l delegatingNodeLister) Get(name string) (*v1.Node, error) {
   169  	return l.client.CoreV1().Nodes().Get(context.Background(), name, metav1.GetOptions{})
   170  }
   171  
   172  func (l delegatingNodeLister) List(selector labels.Selector) (ret []*v1.Node, err error) {
   173  	opts := metav1.ListOptions{}
   174  	if selector != nil {
   175  		opts.LabelSelector = selector.String()
   176  	}
   177  	nodeList, err := l.client.CoreV1().Nodes().List(context.Background(), opts)
   178  	if err != nil {
   179  		return nil, err
   180  	}
   181  	nodes := make([]*v1.Node, len(nodeList.Items))
   182  	return nodes, nil
   183  }
   184  
   185  func TestUpdateNewNodeStatus(t *testing.T) {
   186  	cases := []struct {
   187  		desc                string
   188  		nodeStatusMaxImages int32
   189  	}{
   190  		{
   191  			desc:                "5 image limit",
   192  			nodeStatusMaxImages: 5,
   193  		},
   194  		{
   195  			desc:                "no image limit",
   196  			nodeStatusMaxImages: -1,
   197  		},
   198  	}
   199  
   200  	for _, tc := range cases {
   201  		t.Run(tc.desc, func(t *testing.T) {
   202  			ctx := context.Background()
   203  			// generate one more in inputImageList than we configure the Kubelet to report,
   204  			// or 5 images if unlimited
   205  			numTestImages := int(tc.nodeStatusMaxImages) + 1
   206  			if tc.nodeStatusMaxImages == -1 {
   207  				numTestImages = 5
   208  			}
   209  			inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages))
   210  			testKubelet := newTestKubeletWithImageList(
   211  				t, inputImageList, false /* controllerAttachDetachEnabled */, true /*initFakeVolumePlugin*/, true /* localStorageCapacityIsolation */)
   212  			defer testKubelet.Cleanup()
   213  			kubelet := testKubelet.kubelet
   214  			kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages
   215  			kubelet.kubeClient = nil // ensure only the heartbeat client is used
   216  			kubelet.containerManager = &localCM{
   217  				ContainerManager: cm.NewStubContainerManager(),
   218  				allocatableReservation: v1.ResourceList{
   219  					v1.ResourceCPU:              *resource.NewMilliQuantity(200, resource.DecimalSI),
   220  					v1.ResourceMemory:           *resource.NewQuantity(100e6, resource.BinarySI),
   221  					v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
   222  				},
   223  				capacity: v1.ResourceList{
   224  					v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   225  					v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
   226  					v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   227  				},
   228  			}
   229  			// Since this test retroactively overrides the stub container manager,
   230  			// we have to regenerate default status setters.
   231  			kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
   232  
   233  			kubeClient := testKubelet.fakeKubeClient
   234  			existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
   235  			kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
   236  			kubelet.nodeLister = delegatingNodeLister{client: kubeClient}
   237  			machineInfo := &cadvisorapi.MachineInfo{
   238  				MachineID:      "123",
   239  				SystemUUID:     "abc",
   240  				BootID:         "1b3",
   241  				NumCores:       2,
   242  				MemoryCapacity: 10e9, // 10G
   243  			}
   244  			kubelet.setCachedMachineInfo(machineInfo)
   245  
   246  			expectedNode := &v1.Node{
   247  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
   248  				Spec:       v1.NodeSpec{},
   249  				Status: v1.NodeStatus{
   250  					Conditions: []v1.NodeCondition{
   251  						{
   252  							Type:               v1.NodeMemoryPressure,
   253  							Status:             v1.ConditionFalse,
   254  							Reason:             "KubeletHasSufficientMemory",
   255  							Message:            "kubelet has sufficient memory available",
   256  							LastHeartbeatTime:  metav1.Time{},
   257  							LastTransitionTime: metav1.Time{},
   258  						},
   259  						{
   260  							Type:               v1.NodeDiskPressure,
   261  							Status:             v1.ConditionFalse,
   262  							Reason:             "KubeletHasNoDiskPressure",
   263  							Message:            "kubelet has no disk pressure",
   264  							LastHeartbeatTime:  metav1.Time{},
   265  							LastTransitionTime: metav1.Time{},
   266  						},
   267  						{
   268  							Type:               v1.NodePIDPressure,
   269  							Status:             v1.ConditionFalse,
   270  							Reason:             "KubeletHasSufficientPID",
   271  							Message:            "kubelet has sufficient PID available",
   272  							LastHeartbeatTime:  metav1.Time{},
   273  							LastTransitionTime: metav1.Time{},
   274  						},
   275  						{
   276  							Type:               v1.NodeReady,
   277  							Status:             v1.ConditionTrue,
   278  							Reason:             "KubeletReady",
   279  							Message:            "kubelet is posting ready status",
   280  							LastHeartbeatTime:  metav1.Time{},
   281  							LastTransitionTime: metav1.Time{},
   282  						},
   283  					},
   284  					NodeInfo: v1.NodeSystemInfo{
   285  						MachineID:               "123",
   286  						SystemUUID:              "abc",
   287  						BootID:                  "1b3",
   288  						KernelVersion:           cadvisortest.FakeKernelVersion,
   289  						OSImage:                 cadvisortest.FakeContainerOSVersion,
   290  						OperatingSystem:         goruntime.GOOS,
   291  						Architecture:            goruntime.GOARCH,
   292  						ContainerRuntimeVersion: "test://1.5.0",
   293  						KubeletVersion:          version.Get().String(),
   294  						KubeProxyVersion:        "",
   295  					},
   296  					Capacity: v1.ResourceList{
   297  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   298  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
   299  						v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   300  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   301  					},
   302  					Allocatable: v1.ResourceList{
   303  						v1.ResourceCPU:              *resource.NewMilliQuantity(1800, resource.DecimalSI),
   304  						v1.ResourceMemory:           *resource.NewQuantity(9900e6, resource.BinarySI),
   305  						v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   306  						v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
   307  					},
   308  					Addresses: []v1.NodeAddress{
   309  						{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
   310  						{Type: v1.NodeHostName, Address: testKubeletHostname},
   311  					},
   312  					Images: expectedImageList,
   313  				},
   314  			}
   315  
   316  			kubelet.updateRuntimeUp()
   317  			assert.NoError(t, kubelet.updateNodeStatus(ctx))
   318  			actions := kubeClient.Actions()
   319  			require.Len(t, actions, 2)
   320  			require.True(t, actions[1].Matches("patch", "nodes"))
   321  			require.Equal(t, actions[1].GetSubresource(), "status")
   322  
   323  			updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
   324  			assert.NoError(t, err)
   325  			for i, cond := range updatedNode.Status.Conditions {
   326  				assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
   327  				assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
   328  				updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
   329  				updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
   330  			}
   331  
   332  			// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
   333  			assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
   334  				"NotReady should be last")
   335  			assert.Len(t, updatedNode.Status.Images, len(expectedImageList))
   336  			assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
   337  		})
   338  	}
   339  }
   340  
   341  func TestUpdateExistingNodeStatus(t *testing.T) {
   342  	ctx := context.Background()
   343  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   344  	defer testKubelet.Cleanup()
   345  	kubelet := testKubelet.kubelet
   346  	kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
   347  	kubelet.kubeClient = nil        // ensure only the heartbeat client is used
   348  	kubelet.containerManager = &localCM{
   349  		ContainerManager: cm.NewStubContainerManager(),
   350  		allocatableReservation: v1.ResourceList{
   351  			v1.ResourceCPU:    *resource.NewMilliQuantity(200, resource.DecimalSI),
   352  			v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
   353  		},
   354  		capacity: v1.ResourceList{
   355  			v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   356  			v1.ResourceMemory:           *resource.NewQuantity(20e9, resource.BinarySI),
   357  			v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   358  		},
   359  	}
   360  	// Since this test retroactively overrides the stub container manager,
   361  	// we have to regenerate default status setters.
   362  	kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
   363  
   364  	kubeClient := testKubelet.fakeKubeClient
   365  	existingNode := v1.Node{
   366  		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
   367  		Spec:       v1.NodeSpec{},
   368  		Status: v1.NodeStatus{
   369  			Conditions: []v1.NodeCondition{
   370  				{
   371  					Type:               v1.NodeMemoryPressure,
   372  					Status:             v1.ConditionFalse,
   373  					Reason:             "KubeletHasSufficientMemory",
   374  					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
   375  					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   376  					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   377  				},
   378  				{
   379  					Type:               v1.NodeDiskPressure,
   380  					Status:             v1.ConditionFalse,
   381  					Reason:             "KubeletHasSufficientDisk",
   382  					Message:            fmt.Sprintf("kubelet has sufficient disk space available"),
   383  					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   384  					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   385  				},
   386  				{
   387  					Type:               v1.NodePIDPressure,
   388  					Status:             v1.ConditionFalse,
   389  					Reason:             "KubeletHasSufficientPID",
   390  					Message:            fmt.Sprintf("kubelet has sufficient PID available"),
   391  					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   392  					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   393  				},
   394  				{
   395  					Type:               v1.NodeReady,
   396  					Status:             v1.ConditionTrue,
   397  					Reason:             "KubeletReady",
   398  					Message:            fmt.Sprintf("kubelet is posting ready status"),
   399  					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   400  					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   401  				},
   402  			},
   403  			Capacity: v1.ResourceList{
   404  				v1.ResourceCPU:    *resource.NewMilliQuantity(3000, resource.DecimalSI),
   405  				v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
   406  				v1.ResourcePods:   *resource.NewQuantity(0, resource.DecimalSI),
   407  			},
   408  			Allocatable: v1.ResourceList{
   409  				v1.ResourceCPU:    *resource.NewMilliQuantity(2800, resource.DecimalSI),
   410  				v1.ResourceMemory: *resource.NewQuantity(19900e6, resource.BinarySI),
   411  				v1.ResourcePods:   *resource.NewQuantity(0, resource.DecimalSI),
   412  			},
   413  		},
   414  	}
   415  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
   416  	kubelet.nodeLister = delegatingNodeLister{client: kubeClient}
   417  	machineInfo := &cadvisorapi.MachineInfo{
   418  		MachineID:      "123",
   419  		SystemUUID:     "abc",
   420  		BootID:         "1b3",
   421  		NumCores:       2,
   422  		MemoryCapacity: 20e9,
   423  	}
   424  	kubelet.setCachedMachineInfo(machineInfo)
   425  
   426  	expectedNode := &v1.Node{
   427  		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
   428  		Spec:       v1.NodeSpec{},
   429  		Status: v1.NodeStatus{
   430  			Conditions: []v1.NodeCondition{
   431  				{
   432  					Type:               v1.NodeMemoryPressure,
   433  					Status:             v1.ConditionFalse,
   434  					Reason:             "KubeletHasSufficientMemory",
   435  					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
   436  					LastHeartbeatTime:  metav1.Time{},
   437  					LastTransitionTime: metav1.Time{},
   438  				},
   439  				{
   440  					Type:               v1.NodeDiskPressure,
   441  					Status:             v1.ConditionFalse,
   442  					Reason:             "KubeletHasSufficientDisk",
   443  					Message:            fmt.Sprintf("kubelet has sufficient disk space available"),
   444  					LastHeartbeatTime:  metav1.Time{},
   445  					LastTransitionTime: metav1.Time{},
   446  				},
   447  				{
   448  					Type:               v1.NodePIDPressure,
   449  					Status:             v1.ConditionFalse,
   450  					Reason:             "KubeletHasSufficientPID",
   451  					Message:            fmt.Sprintf("kubelet has sufficient PID available"),
   452  					LastHeartbeatTime:  metav1.Time{},
   453  					LastTransitionTime: metav1.Time{},
   454  				},
   455  				{
   456  					Type:               v1.NodeReady,
   457  					Status:             v1.ConditionTrue,
   458  					Reason:             "KubeletReady",
   459  					Message:            fmt.Sprintf("kubelet is posting ready status"),
   460  					LastHeartbeatTime:  metav1.Time{}, // placeholder
   461  					LastTransitionTime: metav1.Time{}, // placeholder
   462  				},
   463  			},
   464  			NodeInfo: v1.NodeSystemInfo{
   465  				MachineID:               "123",
   466  				SystemUUID:              "abc",
   467  				BootID:                  "1b3",
   468  				KernelVersion:           cadvisortest.FakeKernelVersion,
   469  				OSImage:                 cadvisortest.FakeContainerOSVersion,
   470  				OperatingSystem:         goruntime.GOOS,
   471  				Architecture:            goruntime.GOARCH,
   472  				ContainerRuntimeVersion: "test://1.5.0",
   473  				KubeletVersion:          version.Get().String(),
   474  				KubeProxyVersion:        "",
   475  			},
   476  			Capacity: v1.ResourceList{
   477  				v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   478  				v1.ResourceMemory:           *resource.NewQuantity(20e9, resource.BinarySI),
   479  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   480  				v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   481  			},
   482  			Allocatable: v1.ResourceList{
   483  				v1.ResourceCPU:              *resource.NewMilliQuantity(1800, resource.DecimalSI),
   484  				v1.ResourceMemory:           *resource.NewQuantity(19900e6, resource.BinarySI),
   485  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   486  				v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   487  			},
   488  			Addresses: []v1.NodeAddress{
   489  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
   490  				{Type: v1.NodeHostName, Address: testKubeletHostname},
   491  			},
   492  			// images will be sorted from max to min in node status.
   493  			Images: []v1.ContainerImage{
   494  				{
   495  					Names:     []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
   496  					SizeBytes: 123,
   497  				},
   498  				{
   499  					Names:     []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
   500  					SizeBytes: 456,
   501  				},
   502  			},
   503  		},
   504  	}
   505  
   506  	kubelet.updateRuntimeUp()
   507  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
   508  
   509  	actions := kubeClient.Actions()
   510  	assert.Len(t, actions, 2)
   511  
   512  	assert.IsType(t, core.PatchActionImpl{}, actions[1])
   513  	patchAction := actions[1].(core.PatchActionImpl)
   514  
   515  	updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
   516  	require.NoError(t, err)
   517  
   518  	for i, cond := range updatedNode.Status.Conditions {
   519  		old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time
   520  		// Expect LastHearbeat to be updated to Now, while LastTransitionTime to be the same.
   521  		assert.NotEqual(t, old, cond.LastHeartbeatTime.Rfc3339Copy().UTC(), "LastHeartbeatTime for condition %v", cond.Type)
   522  		assert.EqualValues(t, old, cond.LastTransitionTime.Rfc3339Copy().UTC(), "LastTransitionTime for condition %v", cond.Type)
   523  
   524  		updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
   525  		updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
   526  	}
   527  
   528  	// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
   529  	assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
   530  		"NodeReady should be the last condition")
   531  	assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
   532  }
   533  
   534  func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
   535  	ctx := context.Background()
   536  	if testing.Short() {
   537  		t.Skip("skipping test in short mode.")
   538  	}
   539  
   540  	attempts := int64(0)
   541  	failureCallbacks := int64(0)
   542  
   543  	// set up a listener that hangs connections
   544  	ln, err := net.Listen("tcp", "127.0.0.1:0")
   545  	assert.NoError(t, err)
   546  	defer ln.Close()
   547  	go func() {
   548  		// accept connections and just let them hang
   549  		for {
   550  			_, err := ln.Accept()
   551  			if err != nil {
   552  				t.Log(err)
   553  				return
   554  			}
   555  			t.Log("accepted connection")
   556  			atomic.AddInt64(&attempts, 1)
   557  		}
   558  	}()
   559  
   560  	config := &rest.Config{
   561  		Host:    "http://" + ln.Addr().String(),
   562  		QPS:     -1,
   563  		Timeout: time.Second,
   564  	}
   565  	assert.NoError(t, err)
   566  
   567  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   568  	defer testKubelet.Cleanup()
   569  	kubelet := testKubelet.kubelet
   570  	kubelet.kubeClient = nil // ensure only the heartbeat client is used
   571  	kubelet.heartbeatClient, err = clientset.NewForConfig(config)
   572  	require.NoError(t, err)
   573  	kubelet.onRepeatedHeartbeatFailure = func() {
   574  		atomic.AddInt64(&failureCallbacks, 1)
   575  	}
   576  	kubelet.containerManager = &localCM{
   577  		ContainerManager: cm.NewStubContainerManager(),
   578  		allocatableReservation: v1.ResourceList{
   579  			v1.ResourceCPU:    *resource.NewMilliQuantity(200, resource.DecimalSI),
   580  			v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
   581  		},
   582  		capacity: v1.ResourceList{
   583  			v1.ResourceCPU:    *resource.NewMilliQuantity(2000, resource.DecimalSI),
   584  			v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
   585  		},
   586  	}
   587  
   588  	// should return an error, but not hang
   589  	assert.Error(t, kubelet.updateNodeStatus(ctx))
   590  
   591  	// should have attempted multiple times
   592  	if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry {
   593  		t.Errorf("Expected at least %d attempts, got %d", nodeStatusUpdateRetry, actualAttempts)
   594  	}
   595  	// should have gotten multiple failure callbacks
   596  	if actualFailureCallbacks := atomic.LoadInt64(&failureCallbacks); actualFailureCallbacks < (nodeStatusUpdateRetry - 1) {
   597  		t.Errorf("Expected %d failure callbacks, got %d", (nodeStatusUpdateRetry - 1), actualFailureCallbacks)
   598  	}
   599  }
   600  
   601  func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
   602  	ctx := context.Background()
   603  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   604  	defer testKubelet.Cleanup()
   605  	kubelet := testKubelet.kubelet
   606  	kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
   607  	kubelet.kubeClient = nil        // ensure only the heartbeat client is used
   608  	kubelet.containerManager = &localCM{
   609  		ContainerManager: cm.NewStubContainerManager(),
   610  		allocatableReservation: v1.ResourceList{
   611  			v1.ResourceCPU:              *resource.NewMilliQuantity(200, resource.DecimalSI),
   612  			v1.ResourceMemory:           *resource.NewQuantity(100e6, resource.BinarySI),
   613  			v1.ResourceEphemeralStorage: *resource.NewQuantity(10e9, resource.BinarySI),
   614  		},
   615  		capacity: v1.ResourceList{
   616  			v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   617  			v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
   618  			v1.ResourceEphemeralStorage: *resource.NewQuantity(20e9, resource.BinarySI),
   619  		},
   620  	}
   621  	// Since this test retroactively overrides the stub container manager,
   622  	// we have to regenerate default status setters.
   623  	kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
   624  
   625  	clock := testKubelet.fakeClock
   626  	kubeClient := testKubelet.fakeKubeClient
   627  	existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
   628  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
   629  	kubelet.nodeLister = delegatingNodeLister{client: kubeClient}
   630  	machineInfo := &cadvisorapi.MachineInfo{
   631  		MachineID:      "123",
   632  		SystemUUID:     "abc",
   633  		BootID:         "1b3",
   634  		NumCores:       2,
   635  		MemoryCapacity: 10e9,
   636  	}
   637  	kubelet.setCachedMachineInfo(machineInfo)
   638  
   639  	expectedNode := &v1.Node{
   640  		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
   641  		Spec:       v1.NodeSpec{},
   642  		Status: v1.NodeStatus{
   643  			Conditions: []v1.NodeCondition{
   644  				{
   645  					Type:               v1.NodeMemoryPressure,
   646  					Status:             v1.ConditionFalse,
   647  					Reason:             "KubeletHasSufficientMemory",
   648  					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
   649  					LastHeartbeatTime:  metav1.Time{},
   650  					LastTransitionTime: metav1.Time{},
   651  				},
   652  				{
   653  					Type:               v1.NodeDiskPressure,
   654  					Status:             v1.ConditionFalse,
   655  					Reason:             "KubeletHasNoDiskPressure",
   656  					Message:            fmt.Sprintf("kubelet has no disk pressure"),
   657  					LastHeartbeatTime:  metav1.Time{},
   658  					LastTransitionTime: metav1.Time{},
   659  				},
   660  				{
   661  					Type:               v1.NodePIDPressure,
   662  					Status:             v1.ConditionFalse,
   663  					Reason:             "KubeletHasSufficientPID",
   664  					Message:            fmt.Sprintf("kubelet has sufficient PID available"),
   665  					LastHeartbeatTime:  metav1.Time{},
   666  					LastTransitionTime: metav1.Time{},
   667  				},
   668  				{}, //placeholder
   669  			},
   670  			NodeInfo: v1.NodeSystemInfo{
   671  				MachineID:               "123",
   672  				SystemUUID:              "abc",
   673  				BootID:                  "1b3",
   674  				KernelVersion:           cadvisortest.FakeKernelVersion,
   675  				OSImage:                 cadvisortest.FakeContainerOSVersion,
   676  				OperatingSystem:         goruntime.GOOS,
   677  				Architecture:            goruntime.GOARCH,
   678  				ContainerRuntimeVersion: "test://1.5.0",
   679  				KubeletVersion:          version.Get().String(),
   680  				KubeProxyVersion:        "",
   681  			},
   682  			Capacity: v1.ResourceList{
   683  				v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   684  				v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
   685  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   686  				v1.ResourceEphemeralStorage: *resource.NewQuantity(20e9, resource.BinarySI),
   687  			},
   688  			Allocatable: v1.ResourceList{
   689  				v1.ResourceCPU:              *resource.NewMilliQuantity(1800, resource.DecimalSI),
   690  				v1.ResourceMemory:           *resource.NewQuantity(9900e6, resource.BinarySI),
   691  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   692  				v1.ResourceEphemeralStorage: *resource.NewQuantity(10e9, resource.BinarySI),
   693  			},
   694  			Addresses: []v1.NodeAddress{
   695  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
   696  				{Type: v1.NodeHostName, Address: testKubeletHostname},
   697  			},
   698  			Images: []v1.ContainerImage{
   699  				{
   700  					Names:     []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
   701  					SizeBytes: 123,
   702  				},
   703  				{
   704  					Names:     []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
   705  					SizeBytes: 456,
   706  				},
   707  			},
   708  		},
   709  	}
   710  
   711  	checkNodeStatus := func(status v1.ConditionStatus, reason string) {
   712  		kubeClient.ClearActions()
   713  		assert.NoError(t, kubelet.updateNodeStatus(ctx))
   714  		actions := kubeClient.Actions()
   715  		require.Len(t, actions, 2)
   716  		require.True(t, actions[1].Matches("patch", "nodes"))
   717  		require.Equal(t, actions[1].GetSubresource(), "status")
   718  
   719  		updatedNode, err := kubeClient.CoreV1().Nodes().Get(ctx, testKubeletHostname, metav1.GetOptions{})
   720  		require.NoError(t, err, "can't apply node status patch")
   721  
   722  		for i, cond := range updatedNode.Status.Conditions {
   723  			assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
   724  			assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition  is zero", cond.Type)
   725  			updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
   726  			updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
   727  		}
   728  
   729  		// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
   730  		lastIndex := len(updatedNode.Status.Conditions) - 1
   731  		assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition")
   732  		assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message)
   733  
   734  		updatedNode.Status.Conditions[lastIndex].Message = ""
   735  		expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
   736  			Type:               v1.NodeReady,
   737  			Status:             status,
   738  			Reason:             reason,
   739  			LastHeartbeatTime:  metav1.Time{},
   740  			LastTransitionTime: metav1.Time{},
   741  		}
   742  		assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
   743  	}
   744  
   745  	// TODO(random-liu): Refactor the unit test to be table driven test.
   746  	// Should report kubelet not ready if the runtime check is out of date
   747  	clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
   748  	kubelet.updateRuntimeUp()
   749  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   750  
   751  	// Should report kubelet ready if the runtime check is updated
   752  	clock.SetTime(time.Now())
   753  	kubelet.updateRuntimeUp()
   754  	checkNodeStatus(v1.ConditionTrue, "KubeletReady")
   755  
   756  	// Should report kubelet not ready if the runtime check is out of date
   757  	clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
   758  	kubelet.updateRuntimeUp()
   759  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   760  
   761  	// Should report kubelet not ready if the runtime check failed
   762  	fakeRuntime := testKubelet.fakeRuntime
   763  	// Inject error into fake runtime status check, node should be NotReady
   764  	fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
   765  	clock.SetTime(time.Now())
   766  	kubelet.updateRuntimeUp()
   767  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   768  
   769  	fakeRuntime.StatusErr = nil
   770  
   771  	// Should report node not ready if runtime status is nil.
   772  	fakeRuntime.RuntimeStatus = nil
   773  	kubelet.updateRuntimeUp()
   774  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   775  
   776  	// Should report node not ready if runtime status is empty.
   777  	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
   778  	kubelet.updateRuntimeUp()
   779  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   780  
   781  	// Should report node not ready if RuntimeReady is false.
   782  	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
   783  		Conditions: []kubecontainer.RuntimeCondition{
   784  			{Type: kubecontainer.RuntimeReady, Status: false},
   785  			{Type: kubecontainer.NetworkReady, Status: true},
   786  		},
   787  	}
   788  	kubelet.updateRuntimeUp()
   789  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   790  
   791  	// Should report node ready if RuntimeReady is true.
   792  	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
   793  		Conditions: []kubecontainer.RuntimeCondition{
   794  			{Type: kubecontainer.RuntimeReady, Status: true},
   795  			{Type: kubecontainer.NetworkReady, Status: true},
   796  		},
   797  	}
   798  	kubelet.updateRuntimeUp()
   799  	checkNodeStatus(v1.ConditionTrue, "KubeletReady")
   800  
   801  	// Should report node not ready if NetworkReady is false.
   802  	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
   803  		Conditions: []kubecontainer.RuntimeCondition{
   804  			{Type: kubecontainer.RuntimeReady, Status: true},
   805  			{Type: kubecontainer.NetworkReady, Status: false},
   806  		},
   807  	}
   808  	kubelet.updateRuntimeUp()
   809  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   810  }
   811  
   812  func TestUpdateNodeStatusError(t *testing.T) {
   813  	ctx := context.Background()
   814  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   815  	defer testKubelet.Cleanup()
   816  	kubelet := testKubelet.kubelet
   817  	kubelet.kubeClient = nil // ensure only the heartbeat client is used
   818  	// No matching node for the kubelet
   819  	testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
   820  	assert.Error(t, kubelet.updateNodeStatus(ctx))
   821  	assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry)
   822  }
   823  
   824  func TestUpdateNodeStatusWithLease(t *testing.T) {
   825  	ctx := context.Background()
   826  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   827  	defer testKubelet.Cleanup()
   828  	clock := testKubelet.fakeClock
   829  	kubelet := testKubelet.kubelet
   830  	kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
   831  	kubelet.kubeClient = nil        // ensure only the heartbeat client is used
   832  	kubelet.containerManager = &localCM{
   833  		ContainerManager: cm.NewStubContainerManager(),
   834  		allocatableReservation: v1.ResourceList{
   835  			v1.ResourceCPU:    *resource.NewMilliQuantity(200, resource.DecimalSI),
   836  			v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
   837  		},
   838  		capacity: v1.ResourceList{
   839  			v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   840  			v1.ResourceMemory:           *resource.NewQuantity(20e9, resource.BinarySI),
   841  			v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   842  		},
   843  	}
   844  	// Since this test retroactively overrides the stub container manager,
   845  	// we have to regenerate default status setters.
   846  	kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
   847  	kubelet.nodeStatusReportFrequency = time.Minute
   848  
   849  	kubeClient := testKubelet.fakeKubeClient
   850  	existingNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
   851  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*existingNode}}).ReactionChain
   852  	kubelet.nodeLister = delegatingNodeLister{client: kubeClient}
   853  	machineInfo := &cadvisorapi.MachineInfo{
   854  		MachineID:      "123",
   855  		SystemUUID:     "abc",
   856  		BootID:         "1b3",
   857  		NumCores:       2,
   858  		MemoryCapacity: 20e9,
   859  	}
   860  	kubelet.setCachedMachineInfo(machineInfo)
   861  
   862  	now := metav1.NewTime(clock.Now()).Rfc3339Copy()
   863  	expectedNode := &v1.Node{
   864  		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
   865  		Spec:       v1.NodeSpec{},
   866  		Status: v1.NodeStatus{
   867  			Conditions: []v1.NodeCondition{
   868  				{
   869  					Type:               v1.NodeMemoryPressure,
   870  					Status:             v1.ConditionFalse,
   871  					Reason:             "KubeletHasSufficientMemory",
   872  					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
   873  					LastHeartbeatTime:  now,
   874  					LastTransitionTime: now,
   875  				},
   876  				{
   877  					Type:               v1.NodeDiskPressure,
   878  					Status:             v1.ConditionFalse,
   879  					Reason:             "KubeletHasNoDiskPressure",
   880  					Message:            fmt.Sprintf("kubelet has no disk pressure"),
   881  					LastHeartbeatTime:  now,
   882  					LastTransitionTime: now,
   883  				},
   884  				{
   885  					Type:               v1.NodePIDPressure,
   886  					Status:             v1.ConditionFalse,
   887  					Reason:             "KubeletHasSufficientPID",
   888  					Message:            fmt.Sprintf("kubelet has sufficient PID available"),
   889  					LastHeartbeatTime:  now,
   890  					LastTransitionTime: now,
   891  				},
   892  				{
   893  					Type:               v1.NodeReady,
   894  					Status:             v1.ConditionTrue,
   895  					Reason:             "KubeletReady",
   896  					Message:            fmt.Sprintf("kubelet is posting ready status"),
   897  					LastHeartbeatTime:  now,
   898  					LastTransitionTime: now,
   899  				},
   900  			},
   901  			NodeInfo: v1.NodeSystemInfo{
   902  				MachineID:               "123",
   903  				SystemUUID:              "abc",
   904  				BootID:                  "1b3",
   905  				KernelVersion:           cadvisortest.FakeKernelVersion,
   906  				OSImage:                 cadvisortest.FakeContainerOSVersion,
   907  				OperatingSystem:         goruntime.GOOS,
   908  				Architecture:            goruntime.GOARCH,
   909  				ContainerRuntimeVersion: "test://1.5.0",
   910  				KubeletVersion:          version.Get().String(),
   911  				KubeProxyVersion:        "",
   912  			},
   913  			Capacity: v1.ResourceList{
   914  				v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   915  				v1.ResourceMemory:           *resource.NewQuantity(20e9, resource.BinarySI),
   916  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   917  				v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   918  			},
   919  			Allocatable: v1.ResourceList{
   920  				v1.ResourceCPU:              *resource.NewMilliQuantity(1800, resource.DecimalSI),
   921  				v1.ResourceMemory:           *resource.NewQuantity(19900e6, resource.BinarySI),
   922  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   923  				v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   924  			},
   925  			Addresses: []v1.NodeAddress{
   926  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
   927  				{Type: v1.NodeHostName, Address: testKubeletHostname},
   928  			},
   929  			// images will be sorted from max to min in node status.
   930  			Images: []v1.ContainerImage{
   931  				{
   932  					Names:     []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
   933  					SizeBytes: 123,
   934  				},
   935  				{
   936  					Names:     []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
   937  					SizeBytes: 456,
   938  				},
   939  			},
   940  		},
   941  	}
   942  
   943  	// Update node status when node status is created.
   944  	// Report node status.
   945  	kubelet.updateRuntimeUp()
   946  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
   947  
   948  	actions := kubeClient.Actions()
   949  	assert.Len(t, actions, 2)
   950  	assert.IsType(t, core.GetActionImpl{}, actions[0])
   951  	assert.IsType(t, core.PatchActionImpl{}, actions[1])
   952  	patchAction := actions[1].(core.PatchActionImpl)
   953  
   954  	updatedNode, err := applyNodeStatusPatch(existingNode, patchAction.GetPatch())
   955  	require.NoError(t, err)
   956  	for _, cond := range updatedNode.Status.Conditions {
   957  		cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
   958  		cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
   959  	}
   960  	assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
   961  
   962  	// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
   963  	assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
   964  		"NodeReady should be the last condition")
   965  
   966  	// Update node status again when nothing is changed (except heartbeat time).
   967  	// Report node status if it has exceeded the duration of nodeStatusReportFrequency.
   968  	clock.Step(time.Minute)
   969  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
   970  
   971  	// 2 more action (There were 2 actions before).
   972  	actions = kubeClient.Actions()
   973  	assert.Len(t, actions, 4)
   974  	assert.IsType(t, core.GetActionImpl{}, actions[2])
   975  	assert.IsType(t, core.PatchActionImpl{}, actions[3])
   976  	patchAction = actions[3].(core.PatchActionImpl)
   977  
   978  	updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
   979  	require.NoError(t, err)
   980  	for _, cond := range updatedNode.Status.Conditions {
   981  		cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
   982  		cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
   983  	}
   984  
   985  	// Expect LastHeartbeat updated, other things unchanged.
   986  	for i, cond := range expectedNode.Status.Conditions {
   987  		expectedNode.Status.Conditions[i].LastHeartbeatTime = metav1.NewTime(cond.LastHeartbeatTime.Time.Add(time.Minute)).Rfc3339Copy()
   988  	}
   989  	assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
   990  
   991  	// Update node status again when nothing is changed (except heartbeat time).
   992  	// Do not report node status if it is within the duration of nodeStatusReportFrequency.
   993  	clock.Step(10 * time.Second)
   994  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
   995  
   996  	// Only 1 more action (There were 4 actions before).
   997  	actions = kubeClient.Actions()
   998  	assert.Len(t, actions, 5)
   999  	assert.IsType(t, core.GetActionImpl{}, actions[4])
  1000  
  1001  	// Update node status again when something is changed.
  1002  	// Report node status even if it is still within the duration of nodeStatusReportFrequency.
  1003  	clock.Step(10 * time.Second)
  1004  	var newMemoryCapacity int64 = 40e9
  1005  	oldMachineInfo, err := kubelet.GetCachedMachineInfo()
  1006  	if err != nil {
  1007  		t.Fatal(err)
  1008  	}
  1009  	newMachineInfo := oldMachineInfo.Clone()
  1010  	newMachineInfo.MemoryCapacity = uint64(newMemoryCapacity)
  1011  	kubelet.setCachedMachineInfo(newMachineInfo)
  1012  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
  1013  
  1014  	// 2 more action (There were 5 actions before).
  1015  	actions = kubeClient.Actions()
  1016  	assert.Len(t, actions, 7)
  1017  	assert.IsType(t, core.GetActionImpl{}, actions[5])
  1018  	assert.IsType(t, core.PatchActionImpl{}, actions[6])
  1019  	patchAction = actions[6].(core.PatchActionImpl)
  1020  
  1021  	updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
  1022  	require.NoError(t, err)
  1023  	memCapacity := updatedNode.Status.Capacity[v1.ResourceMemory]
  1024  	updatedMemoryCapacity, _ := (&memCapacity).AsInt64()
  1025  	assert.Equal(t, newMemoryCapacity, updatedMemoryCapacity, "Memory capacity")
  1026  
  1027  	now = metav1.NewTime(clock.Now()).Rfc3339Copy()
  1028  	for _, cond := range updatedNode.Status.Conditions {
  1029  		// Expect LastHearbeat updated, while LastTransitionTime unchanged.
  1030  		assert.Equal(t, now, cond.LastHeartbeatTime.Rfc3339Copy(),
  1031  			"LastHeartbeatTime for condition %v", cond.Type)
  1032  		assert.Equal(t, now, metav1.NewTime(cond.LastTransitionTime.Time.Add(time.Minute+20*time.Second)).Rfc3339Copy(),
  1033  			"LastTransitionTime for condition %v", cond.Type)
  1034  	}
  1035  
  1036  	// Update node status when changing pod CIDR.
  1037  	// Report node status if it is still within the duration of nodeStatusReportFrequency.
  1038  	clock.Step(10 * time.Second)
  1039  	assert.Equal(t, "", kubelet.runtimeState.podCIDR(), "Pod CIDR should be empty")
  1040  	podCIDRs := []string{"10.0.0.0/24", "2000::/10"}
  1041  	updatedNode.Spec.PodCIDR = podCIDRs[0]
  1042  	updatedNode.Spec.PodCIDRs = podCIDRs
  1043  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*updatedNode}}).ReactionChain
  1044  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
  1045  	assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should be updated now")
  1046  	// 2 more action (There were 7 actions before).
  1047  	actions = kubeClient.Actions()
  1048  	assert.Len(t, actions, 9)
  1049  	assert.IsType(t, core.GetActionImpl{}, actions[7])
  1050  	assert.IsType(t, core.PatchActionImpl{}, actions[8])
  1051  
  1052  	// Update node status when keeping the pod CIDR.
  1053  	// Do not report node status if it is within the duration of nodeStatusReportFrequency.
  1054  	clock.Step(10 * time.Second)
  1055  	assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should already be updated")
  1056  
  1057  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
  1058  	// Only 1 more action (There were 9 actions before).
  1059  	actions = kubeClient.Actions()
  1060  	assert.Len(t, actions, 10)
  1061  	assert.IsType(t, core.GetActionImpl{}, actions[9])
  1062  }
  1063  
  1064  func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
  1065  	cases := []struct {
  1066  		desc                  string
  1067  		existingVolumes       []v1.UniqueVolumeName // volumes to initially populate volumeManager
  1068  		existingNode          *v1.Node              // existing node object
  1069  		expectedNode          *v1.Node              // new node object after patch
  1070  		expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
  1071  	}{
  1072  		{
  1073  			desc:         "no volumes and no update",
  1074  			existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
  1075  		},
  1076  		{
  1077  			desc:            "volumes inuse on node and volumeManager",
  1078  			existingVolumes: []v1.UniqueVolumeName{"vol1"},
  1079  			existingNode: &v1.Node{
  1080  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
  1081  				Status: v1.NodeStatus{
  1082  					VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  1083  				},
  1084  			},
  1085  			expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
  1086  		},
  1087  		{
  1088  			desc: "volumes inuse on node but not in volumeManager",
  1089  			existingNode: &v1.Node{
  1090  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  1091  				Status: v1.NodeStatus{
  1092  					VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  1093  				},
  1094  			},
  1095  			expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
  1096  		},
  1097  		{
  1098  			desc:            "volumes inuse in volumeManager but not on node",
  1099  			existingVolumes: []v1.UniqueVolumeName{"vol1"},
  1100  			existingNode:    &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  1101  			expectedNode: &v1.Node{
  1102  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
  1103  				Status: v1.NodeStatus{
  1104  					VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  1105  				},
  1106  			},
  1107  			expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
  1108  		},
  1109  	}
  1110  
  1111  	for _, tc := range cases {
  1112  		t.Run(tc.desc, func(t *testing.T) {
  1113  			ctx := context.Background()
  1114  			// Setup
  1115  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1116  			defer testKubelet.Cleanup()
  1117  
  1118  			kubelet := testKubelet.kubelet
  1119  			kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1120  			kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
  1121  			kubelet.lastStatusReportTime = kubelet.clock.Now()
  1122  			kubelet.nodeStatusReportFrequency = time.Hour
  1123  			kubelet.setCachedMachineInfo(&cadvisorapi.MachineInfo{})
  1124  
  1125  			// override test volumeManager
  1126  			fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
  1127  			kubelet.volumeManager = fakeVolumeManager
  1128  
  1129  			// Only test VolumesInUse setter
  1130  			kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{
  1131  				nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
  1132  					kubelet.volumeManager.GetVolumesInUse),
  1133  			}
  1134  
  1135  			kubeClient := testKubelet.fakeKubeClient
  1136  			kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
  1137  			kubelet.nodeLister = delegatingNodeLister{client: kubeClient}
  1138  
  1139  			// Execute
  1140  			assert.NoError(t, kubelet.updateNodeStatus(ctx))
  1141  
  1142  			// Validate
  1143  			actions := kubeClient.Actions()
  1144  			if tc.expectedNode != nil {
  1145  				assert.Len(t, actions, 2)
  1146  				assert.IsType(t, core.GetActionImpl{}, actions[0])
  1147  				assert.IsType(t, core.PatchActionImpl{}, actions[1])
  1148  				patchAction := actions[1].(core.PatchActionImpl)
  1149  
  1150  				updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
  1151  				require.NoError(t, err)
  1152  				assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", cmp.Diff(tc.expectedNode, updatedNode))
  1153  			} else {
  1154  				assert.Len(t, actions, 1)
  1155  				assert.IsType(t, core.GetActionImpl{}, actions[0])
  1156  			}
  1157  
  1158  			reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
  1159  			assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", cmp.Diff(tc.expectedReportedInUse, reportedInUse))
  1160  		})
  1161  	}
  1162  }
  1163  
  1164  func TestFastStatusUpdateOnce(t *testing.T) {
  1165  	tests := []struct {
  1166  		name            string
  1167  		beforeMarkReady int
  1168  		beforeNextReady int
  1169  		beforeTimeout   int
  1170  		wantCalls       int
  1171  		patchFailures   int
  1172  		wantPatches     int
  1173  	}{
  1174  		{
  1175  			name:            "timeout after third loop",
  1176  			beforeMarkReady: 9,
  1177  			beforeNextReady: 9,
  1178  			beforeTimeout:   2,
  1179  			wantCalls:       3,
  1180  		},
  1181  		{
  1182  			name:            "already ready on third loop",
  1183  			beforeMarkReady: 9,
  1184  			beforeNextReady: 1,
  1185  			beforeTimeout:   9,
  1186  			wantCalls:       2,
  1187  		},
  1188  		{
  1189  			name:            "turns ready on third loop",
  1190  			beforeMarkReady: 2,
  1191  			beforeNextReady: 9,
  1192  			beforeTimeout:   9,
  1193  			wantCalls:       3,
  1194  			wantPatches:     1,
  1195  		},
  1196  		{
  1197  			name:            "turns ready on second loop then first patch fails",
  1198  			beforeMarkReady: 1,
  1199  			beforeNextReady: 9,
  1200  			beforeTimeout:   9,
  1201  			wantCalls:       3,
  1202  			patchFailures:   1,
  1203  			wantPatches:     2,
  1204  		},
  1205  		{
  1206  			name:            "turns ready on second loop then all patches fail",
  1207  			beforeMarkReady: 1,
  1208  			beforeNextReady: 9,
  1209  			beforeTimeout:   9,
  1210  			wantCalls:       nodeStatusUpdateRetry + 2,
  1211  			patchFailures:   nodeStatusUpdateRetry + 2,
  1212  			wantPatches:     nodeStatusUpdateRetry + 1,
  1213  		},
  1214  	}
  1215  
  1216  	for _, tc := range tests {
  1217  		t.Run(tc.name, func(t *testing.T) {
  1218  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1219  			defer testKubelet.Cleanup()
  1220  			kubelet := testKubelet.kubelet
  1221  			// Ensure we capture actions on the heartbeat client only.
  1222  			// We don't set it to nil or GetNode() doesn't read from nodeLister.
  1223  			kubelet.kubeClient = &fake.Clientset{}
  1224  			kubeClient := testKubelet.fakeKubeClient
  1225  
  1226  			node := &v1.Node{
  1227  				ObjectMeta: metav1.ObjectMeta{
  1228  					Name: string(kubelet.nodeName),
  1229  				},
  1230  				Status: v1.NodeStatus{
  1231  					Conditions: []v1.NodeCondition{
  1232  						{
  1233  							Type:    v1.NodeReady,
  1234  							Status:  v1.ConditionFalse,
  1235  							Reason:  "NotReady",
  1236  							Message: "Node not ready",
  1237  						},
  1238  					},
  1239  				},
  1240  			}
  1241  
  1242  			nodeLister := testNodeLister{[]*v1.Node{node.DeepCopy()}}
  1243  			kubelet.nodeLister = nodeLister
  1244  
  1245  			callCount := 0
  1246  			// The original node status functions turn the node ready.
  1247  			nodeStatusFuncs := kubelet.setNodeStatusFuncs
  1248  			kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{func(ctx context.Context, node *v1.Node) error {
  1249  				assert.False(t, kubelet.containerRuntimeReadyExpected)
  1250  				callCount++
  1251  				var lastErr error
  1252  				if callCount > tc.beforeMarkReady {
  1253  					for _, f := range nodeStatusFuncs {
  1254  						if err := f(ctx, node); err != nil {
  1255  							lastErr = err
  1256  						}
  1257  					}
  1258  				}
  1259  				if callCount > tc.beforeNextReady {
  1260  					nodeLister.nodes[0].Status.Conditions[0].Status = v1.ConditionTrue
  1261  				}
  1262  				if callCount > tc.beforeTimeout {
  1263  					testKubelet.fakeClock.Step(nodeReadyGracePeriod)
  1264  				}
  1265  				return lastErr
  1266  			}}
  1267  
  1268  			patchCount := 0
  1269  			kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1270  				assert.False(t, kubelet.containerRuntimeReadyExpected)
  1271  				patchCount++
  1272  				if patchCount > tc.patchFailures {
  1273  					return false, nil, nil
  1274  				}
  1275  				return true, nil, fmt.Errorf("try again")
  1276  			})
  1277  
  1278  			kubelet.fastStatusUpdateOnce()
  1279  
  1280  			assert.True(t, kubelet.containerRuntimeReadyExpected)
  1281  			assert.Equal(t, tc.wantCalls, callCount)
  1282  			assert.Equal(t, tc.wantPatches, patchCount)
  1283  
  1284  			actions := kubeClient.Actions()
  1285  			if tc.wantPatches == 0 {
  1286  				require.Len(t, actions, 0)
  1287  				return
  1288  			}
  1289  
  1290  			// patch, then patch, get, patch, get, patch, ... up to initial patch + nodeStatusUpdateRetry patches
  1291  			expectedActions := 2*tc.wantPatches - 2
  1292  			if tc.wantPatches == 1 {
  1293  				expectedActions = 1
  1294  			}
  1295  			require.Len(t, actions, expectedActions)
  1296  
  1297  			for i, action := range actions {
  1298  				if i%2 == 0 && i > 0 {
  1299  					require.IsType(t, core.GetActionImpl{}, action)
  1300  					continue
  1301  				}
  1302  
  1303  				require.IsType(t, core.PatchActionImpl{}, action)
  1304  				patchAction := action.(core.PatchActionImpl)
  1305  
  1306  				updatedNode, err := applyNodeStatusPatch(node, patchAction.GetPatch())
  1307  				require.NoError(t, err)
  1308  				seenNodeReady := false
  1309  				for _, c := range updatedNode.Status.Conditions {
  1310  					if c.Type == v1.NodeReady {
  1311  						assert.Equal(t, v1.ConditionTrue, c.Status)
  1312  						seenNodeReady = true
  1313  					}
  1314  				}
  1315  				assert.True(t, seenNodeReady)
  1316  			}
  1317  		})
  1318  	}
  1319  }
  1320  
  1321  func TestRegisterWithApiServer(t *testing.T) {
  1322  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1323  	defer testKubelet.Cleanup()
  1324  	kubelet := testKubelet.kubelet
  1325  	kubeClient := testKubelet.fakeKubeClient
  1326  	kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1327  		// Return an error on create.
  1328  		return true, &v1.Node{}, &apierrors.StatusError{
  1329  			ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
  1330  		}
  1331  	})
  1332  	kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1333  		// Return an existing (matching) node on get.
  1334  		return true, &v1.Node{
  1335  			ObjectMeta: metav1.ObjectMeta{
  1336  				Name: testKubeletHostname,
  1337  				Labels: map[string]string{
  1338  					v1.LabelHostname:      testKubeletHostname,
  1339  					v1.LabelOSStable:      goruntime.GOOS,
  1340  					v1.LabelArchStable:    goruntime.GOARCH,
  1341  					kubeletapis.LabelOS:   goruntime.GOOS,
  1342  					kubeletapis.LabelArch: goruntime.GOARCH,
  1343  				},
  1344  			},
  1345  		}, nil
  1346  	})
  1347  
  1348  	kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1349  		if action.GetSubresource() == "status" {
  1350  			return true, nil, nil
  1351  		}
  1352  		return notImplemented(action)
  1353  	})
  1354  
  1355  	addNotImplatedReaction(kubeClient)
  1356  
  1357  	machineInfo := &cadvisorapi.MachineInfo{
  1358  		MachineID:      "123",
  1359  		SystemUUID:     "abc",
  1360  		BootID:         "1b3",
  1361  		NumCores:       2,
  1362  		MemoryCapacity: 1024,
  1363  	}
  1364  	kubelet.setCachedMachineInfo(machineInfo)
  1365  
  1366  	done := make(chan struct{})
  1367  	go func() {
  1368  		kubelet.registerWithAPIServer()
  1369  		done <- struct{}{}
  1370  	}()
  1371  	select {
  1372  	case <-time.After(wait.ForeverTestTimeout):
  1373  		assert.Fail(t, "timed out waiting for registration")
  1374  	case <-done:
  1375  		return
  1376  	}
  1377  }
  1378  
  1379  func TestTryRegisterWithApiServer(t *testing.T) {
  1380  	alreadyExists := &apierrors.StatusError{
  1381  		ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
  1382  	}
  1383  
  1384  	conflict := &apierrors.StatusError{
  1385  		ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict},
  1386  	}
  1387  
  1388  	newNode := func(cmad bool) *v1.Node {
  1389  		node := &v1.Node{
  1390  			ObjectMeta: metav1.ObjectMeta{
  1391  				Labels: map[string]string{
  1392  					v1.LabelHostname:      testKubeletHostname,
  1393  					v1.LabelOSStable:      goruntime.GOOS,
  1394  					v1.LabelArchStable:    goruntime.GOARCH,
  1395  					kubeletapis.LabelOS:   goruntime.GOOS,
  1396  					kubeletapis.LabelArch: goruntime.GOARCH,
  1397  				},
  1398  			},
  1399  		}
  1400  
  1401  		if cmad {
  1402  			node.Annotations = make(map[string]string)
  1403  			node.Annotations[util.ControllerManagedAttachAnnotation] = "true"
  1404  		}
  1405  
  1406  		return node
  1407  	}
  1408  
  1409  	cases := []struct {
  1410  		name            string
  1411  		newNode         *v1.Node
  1412  		existingNode    *v1.Node
  1413  		createError     error
  1414  		getError        error
  1415  		patchError      error
  1416  		deleteError     error
  1417  		expectedResult  bool
  1418  		expectedActions int
  1419  		testSavedNode   bool
  1420  		savedNodeIndex  int
  1421  		savedNodeCMAD   bool
  1422  	}{
  1423  		{
  1424  			name:            "success case - new node",
  1425  			newNode:         &v1.Node{},
  1426  			expectedResult:  true,
  1427  			expectedActions: 1,
  1428  		},
  1429  		{
  1430  			name:            "success case - existing node - no change in CMAD",
  1431  			newNode:         newNode(true),
  1432  			createError:     alreadyExists,
  1433  			existingNode:    newNode(true),
  1434  			expectedResult:  true,
  1435  			expectedActions: 2,
  1436  		},
  1437  		{
  1438  			name:            "success case - existing node - CMAD disabled",
  1439  			newNode:         newNode(false),
  1440  			createError:     alreadyExists,
  1441  			existingNode:    newNode(true),
  1442  			expectedResult:  true,
  1443  			expectedActions: 3,
  1444  			testSavedNode:   true,
  1445  			savedNodeIndex:  2,
  1446  			savedNodeCMAD:   false,
  1447  		},
  1448  		{
  1449  			name:            "success case - existing node - CMAD enabled",
  1450  			newNode:         newNode(true),
  1451  			createError:     alreadyExists,
  1452  			existingNode:    newNode(false),
  1453  			expectedResult:  true,
  1454  			expectedActions: 3,
  1455  			testSavedNode:   true,
  1456  			savedNodeIndex:  2,
  1457  			savedNodeCMAD:   true,
  1458  		},
  1459  		{
  1460  			name:            "create failed",
  1461  			newNode:         newNode(false),
  1462  			createError:     conflict,
  1463  			expectedResult:  false,
  1464  			expectedActions: 1,
  1465  		},
  1466  		{
  1467  			name:            "get existing node failed",
  1468  			newNode:         newNode(false),
  1469  			createError:     alreadyExists,
  1470  			getError:        conflict,
  1471  			expectedResult:  false,
  1472  			expectedActions: 2,
  1473  		},
  1474  		{
  1475  			name:            "update existing node failed",
  1476  			newNode:         newNode(false),
  1477  			createError:     alreadyExists,
  1478  			existingNode:    newNode(true),
  1479  			patchError:      conflict,
  1480  			expectedResult:  false,
  1481  			expectedActions: 3,
  1482  		},
  1483  	}
  1484  
  1485  	for _, tc := range cases {
  1486  		testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled is a don't-care for this test */)
  1487  		defer testKubelet.Cleanup()
  1488  		kubelet := testKubelet.kubelet
  1489  		kubeClient := testKubelet.fakeKubeClient
  1490  
  1491  		kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1492  			return true, nil, tc.createError
  1493  		})
  1494  		kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1495  			// Return an existing (matching) node on get.
  1496  			return true, tc.existingNode, tc.getError
  1497  		})
  1498  		kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1499  			if action.GetSubresource() == "status" {
  1500  				return true, nil, tc.patchError
  1501  			}
  1502  			return notImplemented(action)
  1503  		})
  1504  		kubeClient.AddReactor("delete", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1505  			return true, nil, tc.deleteError
  1506  		})
  1507  		addNotImplatedReaction(kubeClient)
  1508  
  1509  		result := kubelet.tryRegisterWithAPIServer(tc.newNode)
  1510  		require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name)
  1511  
  1512  		actions := kubeClient.Actions()
  1513  		assert.Len(t, actions, tc.expectedActions, "test [%s]", tc.name)
  1514  
  1515  		if tc.testSavedNode {
  1516  			var savedNode *v1.Node
  1517  
  1518  			t.Logf("actions: %v: %+v", len(actions), actions)
  1519  			action := actions[tc.savedNodeIndex]
  1520  			if action.GetVerb() == "create" {
  1521  				createAction := action.(core.CreateAction)
  1522  				obj := createAction.GetObject()
  1523  				require.IsType(t, &v1.Node{}, obj)
  1524  				savedNode = obj.(*v1.Node)
  1525  			} else if action.GetVerb() == "patch" {
  1526  				patchAction := action.(core.PatchActionImpl)
  1527  				var err error
  1528  				savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
  1529  				require.NoError(t, err)
  1530  			}
  1531  
  1532  			actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation])
  1533  			assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
  1534  		}
  1535  	}
  1536  }
  1537  
  1538  func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
  1539  	ctx := context.Background()
  1540  	const nodeStatusMaxImages = 5
  1541  
  1542  	// generate one more in inputImageList than we configure the Kubelet to report
  1543  	inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages)
  1544  	testKubelet := newTestKubeletWithImageList(
  1545  		t, inputImageList, false /* controllerAttachDetachEnabled */, true /* initFakeVolumePlugin */, true)
  1546  	defer testKubelet.Cleanup()
  1547  	kubelet := testKubelet.kubelet
  1548  	kubelet.nodeStatusMaxImages = nodeStatusMaxImages
  1549  	kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1550  	kubelet.containerManager = &localCM{
  1551  		ContainerManager: cm.NewStubContainerManager(),
  1552  		allocatableReservation: v1.ResourceList{
  1553  			v1.ResourceCPU:              *resource.NewMilliQuantity(40000, resource.DecimalSI),
  1554  			v1.ResourceEphemeralStorage: *resource.NewQuantity(1000, resource.BinarySI),
  1555  		},
  1556  		capacity: v1.ResourceList{
  1557  			v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1558  			v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1559  			v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
  1560  		},
  1561  	}
  1562  	// Since this test retroactively overrides the stub container manager,
  1563  	// we have to regenerate default status setters.
  1564  	kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  1565  
  1566  	kubeClient := testKubelet.fakeKubeClient
  1567  	existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  1568  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  1569  	machineInfo := &cadvisorapi.MachineInfo{
  1570  		MachineID:      "123",
  1571  		SystemUUID:     "abc",
  1572  		BootID:         "1b3",
  1573  		NumCores:       2,
  1574  		MemoryCapacity: 10e9, // 10G
  1575  	}
  1576  	kubelet.setCachedMachineInfo(machineInfo)
  1577  
  1578  	expectedNode := &v1.Node{
  1579  		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  1580  		Spec:       v1.NodeSpec{},
  1581  		Status: v1.NodeStatus{
  1582  			Capacity: v1.ResourceList{
  1583  				v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1584  				v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1585  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
  1586  				v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
  1587  			},
  1588  			Allocatable: v1.ResourceList{
  1589  				v1.ResourceCPU:              *resource.NewMilliQuantity(0, resource.DecimalSI),
  1590  				v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1591  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
  1592  				v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
  1593  			},
  1594  		},
  1595  	}
  1596  
  1597  	kubelet.updateRuntimeUp()
  1598  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
  1599  	actions := kubeClient.Actions()
  1600  	require.Len(t, actions, 1)
  1601  	require.True(t, actions[0].Matches("patch", "nodes"))
  1602  	require.Equal(t, actions[0].GetSubresource(), "status")
  1603  
  1604  	updatedNode, err := applyNodeStatusPatch(&existingNode, actions[0].(core.PatchActionImpl).GetPatch())
  1605  	assert.NoError(t, err)
  1606  	assert.True(t, apiequality.Semantic.DeepEqual(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable), "%s", cmp.Diff(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable))
  1607  }
  1608  
  1609  func TestUpdateDefaultLabels(t *testing.T) {
  1610  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1611  	testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1612  
  1613  	cases := []struct {
  1614  		name         string
  1615  		initialNode  *v1.Node
  1616  		existingNode *v1.Node
  1617  		needsUpdate  bool
  1618  		finalLabels  map[string]string
  1619  	}{
  1620  		{
  1621  			name: "make sure default labels exist",
  1622  			initialNode: &v1.Node{
  1623  				ObjectMeta: metav1.ObjectMeta{
  1624  					Labels: map[string]string{
  1625  						v1.LabelHostname:                "new-hostname",
  1626  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1627  						v1.LabelTopologyRegion:          "new-zone-region",
  1628  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1629  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1630  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1631  						v1.LabelInstanceType:            "new-instance-type",
  1632  						v1.LabelOSStable:                "new-os",
  1633  						v1.LabelArchStable:              "new-arch",
  1634  					},
  1635  				},
  1636  			},
  1637  			existingNode: &v1.Node{
  1638  				ObjectMeta: metav1.ObjectMeta{
  1639  					Labels: map[string]string{},
  1640  				},
  1641  			},
  1642  			needsUpdate: true,
  1643  			finalLabels: map[string]string{
  1644  				v1.LabelHostname:                "new-hostname",
  1645  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1646  				v1.LabelTopologyRegion:          "new-zone-region",
  1647  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1648  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1649  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1650  				v1.LabelInstanceType:            "new-instance-type",
  1651  				v1.LabelOSStable:                "new-os",
  1652  				v1.LabelArchStable:              "new-arch",
  1653  			},
  1654  		},
  1655  		{
  1656  			name: "make sure default labels are up to date",
  1657  			initialNode: &v1.Node{
  1658  				ObjectMeta: metav1.ObjectMeta{
  1659  					Labels: map[string]string{
  1660  						v1.LabelHostname:                "new-hostname",
  1661  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1662  						v1.LabelTopologyRegion:          "new-zone-region",
  1663  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1664  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1665  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1666  						v1.LabelInstanceType:            "new-instance-type",
  1667  						v1.LabelOSStable:                "new-os",
  1668  						v1.LabelArchStable:              "new-arch",
  1669  					},
  1670  				},
  1671  			},
  1672  			existingNode: &v1.Node{
  1673  				ObjectMeta: metav1.ObjectMeta{
  1674  					Labels: map[string]string{
  1675  						v1.LabelHostname:                "old-hostname",
  1676  						v1.LabelTopologyZone:            "old-zone-failure-domain",
  1677  						v1.LabelTopologyRegion:          "old-zone-region",
  1678  						v1.LabelFailureDomainBetaZone:   "old-zone-failure-domain",
  1679  						v1.LabelFailureDomainBetaRegion: "old-zone-region",
  1680  						v1.LabelInstanceTypeStable:      "old-instance-type",
  1681  						v1.LabelInstanceType:            "old-instance-type",
  1682  						v1.LabelOSStable:                "old-os",
  1683  						v1.LabelArchStable:              "old-arch",
  1684  					},
  1685  				},
  1686  			},
  1687  			needsUpdate: true,
  1688  			finalLabels: map[string]string{
  1689  				v1.LabelHostname:                "new-hostname",
  1690  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1691  				v1.LabelTopologyRegion:          "new-zone-region",
  1692  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1693  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1694  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1695  				v1.LabelInstanceType:            "new-instance-type",
  1696  				v1.LabelOSStable:                "new-os",
  1697  				v1.LabelArchStable:              "new-arch",
  1698  			},
  1699  		},
  1700  		{
  1701  			name: "make sure existing labels do not get deleted",
  1702  			initialNode: &v1.Node{
  1703  				ObjectMeta: metav1.ObjectMeta{
  1704  					Labels: map[string]string{
  1705  						v1.LabelHostname:                "new-hostname",
  1706  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1707  						v1.LabelTopologyRegion:          "new-zone-region",
  1708  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1709  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1710  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1711  						v1.LabelInstanceType:            "new-instance-type",
  1712  						v1.LabelOSStable:                "new-os",
  1713  						v1.LabelArchStable:              "new-arch",
  1714  					},
  1715  				},
  1716  			},
  1717  			existingNode: &v1.Node{
  1718  				ObjectMeta: metav1.ObjectMeta{
  1719  					Labels: map[string]string{
  1720  						v1.LabelHostname:                "new-hostname",
  1721  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1722  						v1.LabelTopologyRegion:          "new-zone-region",
  1723  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1724  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1725  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1726  						v1.LabelInstanceType:            "new-instance-type",
  1727  						v1.LabelOSStable:                "new-os",
  1728  						v1.LabelArchStable:              "new-arch",
  1729  						"please-persist":                "foo",
  1730  					},
  1731  				},
  1732  			},
  1733  			needsUpdate: false,
  1734  			finalLabels: map[string]string{
  1735  				v1.LabelHostname:                "new-hostname",
  1736  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1737  				v1.LabelTopologyRegion:          "new-zone-region",
  1738  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1739  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1740  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1741  				v1.LabelInstanceType:            "new-instance-type",
  1742  				v1.LabelOSStable:                "new-os",
  1743  				v1.LabelArchStable:              "new-arch",
  1744  				"please-persist":                "foo",
  1745  			},
  1746  		},
  1747  		{
  1748  			name: "make sure existing labels do not get deleted when initial node has no opinion",
  1749  			initialNode: &v1.Node{
  1750  				ObjectMeta: metav1.ObjectMeta{
  1751  					Labels: map[string]string{},
  1752  				},
  1753  			},
  1754  			existingNode: &v1.Node{
  1755  				ObjectMeta: metav1.ObjectMeta{
  1756  					Labels: map[string]string{
  1757  						v1.LabelHostname:                "new-hostname",
  1758  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1759  						v1.LabelTopologyRegion:          "new-zone-region",
  1760  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1761  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1762  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1763  						v1.LabelInstanceType:            "new-instance-type",
  1764  						v1.LabelOSStable:                "new-os",
  1765  						v1.LabelArchStable:              "new-arch",
  1766  						"please-persist":                "foo",
  1767  					},
  1768  				},
  1769  			},
  1770  			needsUpdate: false,
  1771  			finalLabels: map[string]string{
  1772  				v1.LabelHostname:                "new-hostname",
  1773  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1774  				v1.LabelTopologyRegion:          "new-zone-region",
  1775  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1776  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1777  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1778  				v1.LabelInstanceType:            "new-instance-type",
  1779  				v1.LabelOSStable:                "new-os",
  1780  				v1.LabelArchStable:              "new-arch",
  1781  				"please-persist":                "foo",
  1782  			},
  1783  		},
  1784  		{
  1785  			name: "no update needed",
  1786  			initialNode: &v1.Node{
  1787  				ObjectMeta: metav1.ObjectMeta{
  1788  					Labels: map[string]string{
  1789  						v1.LabelHostname:                "new-hostname",
  1790  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1791  						v1.LabelTopologyRegion:          "new-zone-region",
  1792  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1793  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1794  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1795  						v1.LabelInstanceType:            "new-instance-type",
  1796  						v1.LabelOSStable:                "new-os",
  1797  						v1.LabelArchStable:              "new-arch",
  1798  					},
  1799  				},
  1800  			},
  1801  			existingNode: &v1.Node{
  1802  				ObjectMeta: metav1.ObjectMeta{
  1803  					Labels: map[string]string{
  1804  						v1.LabelHostname:                "new-hostname",
  1805  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1806  						v1.LabelTopologyRegion:          "new-zone-region",
  1807  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1808  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1809  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1810  						v1.LabelInstanceType:            "new-instance-type",
  1811  						v1.LabelOSStable:                "new-os",
  1812  						v1.LabelArchStable:              "new-arch",
  1813  					},
  1814  				},
  1815  			},
  1816  			needsUpdate: false,
  1817  			finalLabels: map[string]string{
  1818  				v1.LabelHostname:                "new-hostname",
  1819  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1820  				v1.LabelTopologyRegion:          "new-zone-region",
  1821  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1822  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1823  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1824  				v1.LabelInstanceType:            "new-instance-type",
  1825  				v1.LabelOSStable:                "new-os",
  1826  				v1.LabelArchStable:              "new-arch",
  1827  			},
  1828  		},
  1829  		{
  1830  			name: "not panic when existing node has nil labels",
  1831  			initialNode: &v1.Node{
  1832  				ObjectMeta: metav1.ObjectMeta{
  1833  					Labels: map[string]string{
  1834  						v1.LabelHostname:                "new-hostname",
  1835  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1836  						v1.LabelTopologyRegion:          "new-zone-region",
  1837  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1838  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1839  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1840  						v1.LabelInstanceType:            "new-instance-type",
  1841  						v1.LabelOSStable:                "new-os",
  1842  						v1.LabelArchStable:              "new-arch",
  1843  					},
  1844  				},
  1845  			},
  1846  			existingNode: &v1.Node{
  1847  				ObjectMeta: metav1.ObjectMeta{},
  1848  			},
  1849  			needsUpdate: true,
  1850  			finalLabels: map[string]string{
  1851  				v1.LabelHostname:                "new-hostname",
  1852  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1853  				v1.LabelTopologyRegion:          "new-zone-region",
  1854  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1855  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1856  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1857  				v1.LabelInstanceType:            "new-instance-type",
  1858  				v1.LabelOSStable:                "new-os",
  1859  				v1.LabelArchStable:              "new-arch",
  1860  			},
  1861  		},
  1862  		{
  1863  			name: "backfill required for new stable labels for os/arch/zones/regions/instance-type",
  1864  			initialNode: &v1.Node{
  1865  				ObjectMeta: metav1.ObjectMeta{
  1866  					Labels: map[string]string{
  1867  						v1.LabelHostname:                "new-hostname",
  1868  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1869  						v1.LabelTopologyRegion:          "new-zone-region",
  1870  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1871  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1872  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1873  						v1.LabelInstanceType:            "new-instance-type",
  1874  						v1.LabelOSStable:                "new-os",
  1875  						v1.LabelArchStable:              "new-arch",
  1876  					},
  1877  				},
  1878  			},
  1879  			existingNode: &v1.Node{
  1880  				ObjectMeta: metav1.ObjectMeta{
  1881  					Labels: map[string]string{
  1882  						v1.LabelHostname:                "new-hostname",
  1883  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1884  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1885  						v1.LabelInstanceType:            "new-instance-type",
  1886  					},
  1887  				},
  1888  			},
  1889  			needsUpdate: true,
  1890  			finalLabels: map[string]string{
  1891  				v1.LabelHostname:                "new-hostname",
  1892  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1893  				v1.LabelTopologyRegion:          "new-zone-region",
  1894  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1895  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1896  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1897  				v1.LabelInstanceType:            "new-instance-type",
  1898  				v1.LabelOSStable:                "new-os",
  1899  				v1.LabelArchStable:              "new-arch",
  1900  			},
  1901  		},
  1902  	}
  1903  
  1904  	for _, tc := range cases {
  1905  		defer testKubelet.Cleanup()
  1906  		kubelet := testKubelet.kubelet
  1907  
  1908  		needsUpdate := kubelet.updateDefaultLabels(tc.initialNode, tc.existingNode)
  1909  		assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  1910  		assert.Equal(t, tc.finalLabels, tc.existingNode.Labels, tc.name)
  1911  	}
  1912  }
  1913  
  1914  func TestUpdateDefaultResources(t *testing.T) {
  1915  	cases := []struct {
  1916  		name         string
  1917  		initialNode  *v1.Node
  1918  		existingNode *v1.Node
  1919  		expectedNode *v1.Node
  1920  		needsUpdate  bool
  1921  	}{
  1922  		{
  1923  			name: "no update needed when capacity and allocatable of the existing node are not nil",
  1924  			initialNode: &v1.Node{
  1925  				Status: v1.NodeStatus{
  1926  					Capacity: v1.ResourceList{
  1927  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1928  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1929  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1930  					},
  1931  					Allocatable: v1.ResourceList{
  1932  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1933  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1934  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1935  					},
  1936  				},
  1937  			},
  1938  			existingNode: &v1.Node{
  1939  				Status: v1.NodeStatus{
  1940  					Capacity: v1.ResourceList{
  1941  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1942  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1943  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1944  					},
  1945  					Allocatable: v1.ResourceList{
  1946  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1947  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1948  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1949  					},
  1950  				},
  1951  			},
  1952  			expectedNode: &v1.Node{
  1953  				Status: v1.NodeStatus{
  1954  					Capacity: v1.ResourceList{
  1955  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1956  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1957  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1958  					},
  1959  					Allocatable: v1.ResourceList{
  1960  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1961  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1962  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1963  					},
  1964  				},
  1965  			},
  1966  			needsUpdate: false,
  1967  		}, {
  1968  			name:        "no update needed when capacity and allocatable of the initial node are nil",
  1969  			initialNode: &v1.Node{},
  1970  			existingNode: &v1.Node{
  1971  				Status: v1.NodeStatus{
  1972  					Capacity: v1.ResourceList{
  1973  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1974  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1975  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1976  					},
  1977  					Allocatable: v1.ResourceList{
  1978  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1979  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1980  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1981  					},
  1982  				},
  1983  			},
  1984  			expectedNode: &v1.Node{
  1985  				Status: v1.NodeStatus{
  1986  					Capacity: v1.ResourceList{
  1987  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1988  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1989  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1990  					},
  1991  					Allocatable: v1.ResourceList{
  1992  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1993  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1994  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1995  					},
  1996  				},
  1997  			},
  1998  			needsUpdate: false,
  1999  		}, {
  2000  			name: "update needed when capacity and allocatable of the existing node are nil and capacity and allocatable of the initial node are not nil",
  2001  			initialNode: &v1.Node{
  2002  				Status: v1.NodeStatus{
  2003  					Capacity: v1.ResourceList{
  2004  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2005  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2006  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2007  					},
  2008  					Allocatable: v1.ResourceList{
  2009  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2010  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2011  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2012  					},
  2013  				},
  2014  			},
  2015  			existingNode: &v1.Node{},
  2016  			expectedNode: &v1.Node{
  2017  				Status: v1.NodeStatus{
  2018  					Capacity: v1.ResourceList{
  2019  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2020  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2021  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2022  					},
  2023  					Allocatable: v1.ResourceList{
  2024  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2025  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2026  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2027  					},
  2028  				},
  2029  			},
  2030  			needsUpdate: true,
  2031  		}, {
  2032  			name: "update needed when capacity of the existing node is nil and capacity of the initial node is not nil",
  2033  			initialNode: &v1.Node{
  2034  				Status: v1.NodeStatus{
  2035  					Capacity: v1.ResourceList{
  2036  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2037  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2038  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2039  					},
  2040  				},
  2041  			},
  2042  			existingNode: &v1.Node{
  2043  				Status: v1.NodeStatus{
  2044  					Allocatable: v1.ResourceList{
  2045  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2046  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2047  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2048  					},
  2049  				},
  2050  			},
  2051  			expectedNode: &v1.Node{
  2052  				Status: v1.NodeStatus{
  2053  					Capacity: v1.ResourceList{
  2054  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2055  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2056  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2057  					},
  2058  					Allocatable: v1.ResourceList{
  2059  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2060  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2061  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2062  					},
  2063  				},
  2064  			},
  2065  			needsUpdate: true,
  2066  		}, {
  2067  			name: "update needed when allocatable of the existing node is nil and allocatable of the initial node is not nil",
  2068  			initialNode: &v1.Node{
  2069  				Status: v1.NodeStatus{
  2070  					Allocatable: v1.ResourceList{
  2071  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2072  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2073  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2074  					},
  2075  				},
  2076  			},
  2077  			existingNode: &v1.Node{
  2078  				Status: v1.NodeStatus{
  2079  					Capacity: v1.ResourceList{
  2080  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2081  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2082  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2083  					},
  2084  				},
  2085  			},
  2086  			expectedNode: &v1.Node{
  2087  				Status: v1.NodeStatus{
  2088  					Capacity: v1.ResourceList{
  2089  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2090  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2091  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2092  					},
  2093  					Allocatable: v1.ResourceList{
  2094  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2095  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2096  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2097  					},
  2098  				},
  2099  			},
  2100  			needsUpdate: true,
  2101  		}, {
  2102  			name:         "no update needed but capacity and allocatable of existing node should be initialized",
  2103  			initialNode:  &v1.Node{},
  2104  			existingNode: &v1.Node{},
  2105  			expectedNode: &v1.Node{
  2106  				Status: v1.NodeStatus{
  2107  					Capacity:    v1.ResourceList{},
  2108  					Allocatable: v1.ResourceList{},
  2109  				},
  2110  			},
  2111  			needsUpdate: false,
  2112  		},
  2113  	}
  2114  
  2115  	for _, tc := range cases {
  2116  		t.Run(tc.name, func(T *testing.T) {
  2117  			needsUpdate := updateDefaultResources(tc.initialNode, tc.existingNode)
  2118  			assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  2119  			assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
  2120  		})
  2121  	}
  2122  }
  2123  
  2124  func TestReconcileHugePageResource(t *testing.T) {
  2125  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2126  	hugePageResourceName64Ki := v1.ResourceName("hugepages-64Ki")
  2127  	hugePageResourceName2Mi := v1.ResourceName("hugepages-2Mi")
  2128  	hugePageResourceName1Gi := v1.ResourceName("hugepages-1Gi")
  2129  
  2130  	cases := []struct {
  2131  		name         string
  2132  		testKubelet  *TestKubelet
  2133  		initialNode  *v1.Node
  2134  		existingNode *v1.Node
  2135  		expectedNode *v1.Node
  2136  		needsUpdate  bool
  2137  	}{
  2138  		{
  2139  			name:        "no update needed when all huge page resources are similar",
  2140  			testKubelet: testKubelet,
  2141  			needsUpdate: false,
  2142  			initialNode: &v1.Node{
  2143  				Status: v1.NodeStatus{
  2144  					Capacity: v1.ResourceList{
  2145  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2146  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2147  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2148  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2149  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2150  					},
  2151  					Allocatable: v1.ResourceList{
  2152  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2153  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2154  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2155  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2156  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2157  					},
  2158  				},
  2159  			},
  2160  			existingNode: &v1.Node{
  2161  				Status: v1.NodeStatus{
  2162  					Capacity: v1.ResourceList{
  2163  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2164  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2165  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2166  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2167  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2168  					},
  2169  					Allocatable: v1.ResourceList{
  2170  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2171  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2172  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2173  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2174  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2175  					},
  2176  				},
  2177  			},
  2178  			expectedNode: &v1.Node{
  2179  				Status: v1.NodeStatus{
  2180  					Capacity: v1.ResourceList{
  2181  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2182  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2183  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2184  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2185  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2186  					},
  2187  					Allocatable: v1.ResourceList{
  2188  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2189  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2190  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2191  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2192  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2193  					},
  2194  				},
  2195  			},
  2196  		}, {
  2197  			name:        "update needed when new huge page resources is supported",
  2198  			testKubelet: testKubelet,
  2199  			needsUpdate: true,
  2200  			initialNode: &v1.Node{
  2201  				Status: v1.NodeStatus{
  2202  					Capacity: v1.ResourceList{
  2203  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2204  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2205  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2206  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2207  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2208  					},
  2209  					Allocatable: v1.ResourceList{
  2210  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2211  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2212  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2213  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2214  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2215  					},
  2216  				},
  2217  			},
  2218  			existingNode: &v1.Node{
  2219  				Status: v1.NodeStatus{
  2220  					Capacity: v1.ResourceList{
  2221  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2222  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2223  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2224  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2225  					},
  2226  					Allocatable: v1.ResourceList{
  2227  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2228  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2229  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2230  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2231  					},
  2232  				},
  2233  			},
  2234  			expectedNode: &v1.Node{
  2235  				Status: v1.NodeStatus{
  2236  					Capacity: v1.ResourceList{
  2237  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2238  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2239  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2240  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2241  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2242  					},
  2243  					Allocatable: v1.ResourceList{
  2244  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2245  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2246  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2247  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2248  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2249  					},
  2250  				},
  2251  			},
  2252  		}, {
  2253  			name:        "update needed when huge page resource quantity has changed",
  2254  			testKubelet: testKubelet,
  2255  			needsUpdate: true,
  2256  			initialNode: &v1.Node{
  2257  				Status: v1.NodeStatus{
  2258  					Capacity: v1.ResourceList{
  2259  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2260  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2261  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2262  						hugePageResourceName1Gi:     resource.MustParse("4Gi"),
  2263  					},
  2264  					Allocatable: v1.ResourceList{
  2265  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2266  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2267  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2268  						hugePageResourceName1Gi:     resource.MustParse("4Gi"),
  2269  					},
  2270  				},
  2271  			},
  2272  			existingNode: &v1.Node{
  2273  				Status: v1.NodeStatus{
  2274  					Capacity: v1.ResourceList{
  2275  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2276  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2277  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2278  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2279  					},
  2280  					Allocatable: v1.ResourceList{
  2281  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2282  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2283  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2284  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2285  					},
  2286  				},
  2287  			},
  2288  			expectedNode: &v1.Node{
  2289  				Status: v1.NodeStatus{
  2290  					Capacity: v1.ResourceList{
  2291  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2292  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2293  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2294  						hugePageResourceName1Gi:     resource.MustParse("4Gi"),
  2295  					},
  2296  					Allocatable: v1.ResourceList{
  2297  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2298  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2299  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2300  						hugePageResourceName1Gi:     resource.MustParse("4Gi"),
  2301  					},
  2302  				},
  2303  			},
  2304  		}, {
  2305  			name:        "update needed when a huge page resources is no longer supported",
  2306  			testKubelet: testKubelet,
  2307  			needsUpdate: true,
  2308  			initialNode: &v1.Node{
  2309  				Status: v1.NodeStatus{
  2310  					Capacity: v1.ResourceList{
  2311  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2312  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2313  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2314  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2315  					},
  2316  					Allocatable: v1.ResourceList{
  2317  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2318  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2319  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2320  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2321  					},
  2322  				},
  2323  			},
  2324  			existingNode: &v1.Node{
  2325  				Status: v1.NodeStatus{
  2326  					Capacity: v1.ResourceList{
  2327  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2328  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2329  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2330  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2331  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2332  					},
  2333  					Allocatable: v1.ResourceList{
  2334  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2335  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2336  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2337  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2338  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2339  					},
  2340  				},
  2341  			},
  2342  			expectedNode: &v1.Node{
  2343  				Status: v1.NodeStatus{
  2344  					Capacity: v1.ResourceList{
  2345  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2346  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2347  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2348  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2349  					},
  2350  					Allocatable: v1.ResourceList{
  2351  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2352  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2353  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2354  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2355  					},
  2356  				},
  2357  			},
  2358  		}, {
  2359  			name:        "not panic when capacity or allocatable of existing node is nil",
  2360  			testKubelet: testKubelet,
  2361  			needsUpdate: true,
  2362  			initialNode: &v1.Node{
  2363  				Status: v1.NodeStatus{
  2364  					Capacity: v1.ResourceList{
  2365  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2366  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2367  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2368  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2369  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2370  					},
  2371  					Allocatable: v1.ResourceList{
  2372  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2373  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2374  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2375  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2376  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2377  					},
  2378  				},
  2379  			},
  2380  			existingNode: &v1.Node{
  2381  				Status: v1.NodeStatus{},
  2382  			},
  2383  			expectedNode: &v1.Node{
  2384  				Status: v1.NodeStatus{
  2385  					Capacity: v1.ResourceList{
  2386  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2387  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2388  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2389  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2390  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2391  					},
  2392  					Allocatable: v1.ResourceList{
  2393  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2394  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2395  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2396  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2397  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2398  					},
  2399  				},
  2400  			},
  2401  		},
  2402  	}
  2403  
  2404  	for _, tc := range cases {
  2405  		t.Run(tc.name, func(T *testing.T) {
  2406  			defer testKubelet.Cleanup()
  2407  			kubelet := testKubelet.kubelet
  2408  
  2409  			needsUpdate := kubelet.reconcileHugePageResource(tc.initialNode, tc.existingNode)
  2410  			assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  2411  			assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
  2412  		})
  2413  	}
  2414  
  2415  }
  2416  func TestReconcileExtendedResource(t *testing.T) {
  2417  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2418  	testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
  2419  	testKubelet.kubelet.containerManager = cm.NewStubContainerManagerWithExtendedResource(true /* shouldResetExtendedResourceCapacity*/)
  2420  	testKubeletNoReset := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2421  	defer testKubeletNoReset.Cleanup()
  2422  	extendedResourceName1 := v1.ResourceName("test.com/resource1")
  2423  	extendedResourceName2 := v1.ResourceName("test.com/resource2")
  2424  
  2425  	cases := []struct {
  2426  		name         string
  2427  		testKubelet  *TestKubelet
  2428  		initialNode  *v1.Node
  2429  		existingNode *v1.Node
  2430  		expectedNode *v1.Node
  2431  		needsUpdate  bool
  2432  	}{
  2433  		{
  2434  			name:        "no update needed without extended resource",
  2435  			testKubelet: testKubelet,
  2436  			initialNode: &v1.Node{
  2437  				Status: v1.NodeStatus{
  2438  					Capacity: v1.ResourceList{
  2439  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2440  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2441  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2442  					},
  2443  					Allocatable: v1.ResourceList{
  2444  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2445  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2446  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2447  					},
  2448  				},
  2449  			},
  2450  			existingNode: &v1.Node{
  2451  				Status: v1.NodeStatus{
  2452  					Capacity: v1.ResourceList{
  2453  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2454  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2455  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2456  					},
  2457  					Allocatable: v1.ResourceList{
  2458  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2459  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2460  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2461  					},
  2462  				},
  2463  			},
  2464  			expectedNode: &v1.Node{
  2465  				Status: v1.NodeStatus{
  2466  					Capacity: v1.ResourceList{
  2467  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2468  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2469  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2470  					},
  2471  					Allocatable: v1.ResourceList{
  2472  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2473  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2474  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2475  					},
  2476  				},
  2477  			},
  2478  			needsUpdate: false,
  2479  		},
  2480  		{
  2481  			name:        "extended resource capacity is zeroed",
  2482  			testKubelet: testKubeletNoReset,
  2483  			initialNode: &v1.Node{
  2484  				Status: v1.NodeStatus{
  2485  					Capacity: v1.ResourceList{
  2486  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2487  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2488  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2489  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2490  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2491  					},
  2492  					Allocatable: v1.ResourceList{
  2493  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2494  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2495  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2496  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2497  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2498  					},
  2499  				},
  2500  			},
  2501  			existingNode: &v1.Node{
  2502  				Status: v1.NodeStatus{
  2503  					Capacity: v1.ResourceList{
  2504  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2505  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2506  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2507  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2508  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2509  					},
  2510  					Allocatable: v1.ResourceList{
  2511  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2512  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2513  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2514  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2515  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2516  					},
  2517  				},
  2518  			},
  2519  			expectedNode: &v1.Node{
  2520  				Status: v1.NodeStatus{
  2521  					Capacity: v1.ResourceList{
  2522  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2523  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2524  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2525  						extendedResourceName1:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2526  						extendedResourceName2:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2527  					},
  2528  					Allocatable: v1.ResourceList{
  2529  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2530  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2531  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2532  						extendedResourceName1:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2533  						extendedResourceName2:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2534  					},
  2535  				},
  2536  			},
  2537  			needsUpdate: true,
  2538  		},
  2539  		{
  2540  			name:        "not panic when allocatable of existing node is nil",
  2541  			testKubelet: testKubelet,
  2542  			initialNode: &v1.Node{
  2543  				Status: v1.NodeStatus{
  2544  					Capacity: v1.ResourceList{
  2545  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2546  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2547  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2548  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2549  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2550  					},
  2551  					Allocatable: v1.ResourceList{
  2552  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2553  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2554  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2555  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2556  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2557  					},
  2558  				},
  2559  			},
  2560  			existingNode: &v1.Node{
  2561  				Status: v1.NodeStatus{
  2562  					Capacity: v1.ResourceList{
  2563  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2564  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2565  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2566  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2567  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2568  					},
  2569  				},
  2570  			},
  2571  			expectedNode: &v1.Node{
  2572  				Status: v1.NodeStatus{
  2573  					Capacity: v1.ResourceList{
  2574  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2575  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2576  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2577  						extendedResourceName1:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2578  						extendedResourceName2:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2579  					},
  2580  					Allocatable: v1.ResourceList{
  2581  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2582  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2583  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2584  						extendedResourceName1:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2585  						extendedResourceName2:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2586  					},
  2587  				},
  2588  			},
  2589  			needsUpdate: true,
  2590  		},
  2591  	}
  2592  
  2593  	for _, tc := range cases {
  2594  		defer testKubelet.Cleanup()
  2595  		kubelet := testKubelet.kubelet
  2596  
  2597  		needsUpdate := kubelet.reconcileExtendedResource(tc.initialNode, tc.existingNode)
  2598  		assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  2599  		assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
  2600  	}
  2601  
  2602  }
  2603  
  2604  func TestValidateNodeIPParam(t *testing.T) {
  2605  	type test struct {
  2606  		nodeIP   string
  2607  		success  bool
  2608  		testName string
  2609  	}
  2610  	tests := []test{
  2611  		{
  2612  			nodeIP:   "",
  2613  			success:  false,
  2614  			testName: "IP not set",
  2615  		},
  2616  		{
  2617  			nodeIP:   "127.0.0.1",
  2618  			success:  false,
  2619  			testName: "IPv4 loopback address",
  2620  		},
  2621  		{
  2622  			nodeIP:   "::1",
  2623  			success:  false,
  2624  			testName: "IPv6 loopback address",
  2625  		},
  2626  		{
  2627  			nodeIP:   "224.0.0.1",
  2628  			success:  false,
  2629  			testName: "multicast IPv4 address",
  2630  		},
  2631  		{
  2632  			nodeIP:   "ff00::1",
  2633  			success:  false,
  2634  			testName: "multicast IPv6 address",
  2635  		},
  2636  		{
  2637  			nodeIP:   "169.254.0.1",
  2638  			success:  false,
  2639  			testName: "IPv4 link-local unicast address",
  2640  		},
  2641  		{
  2642  			nodeIP:   "fe80::0202:b3ff:fe1e:8329",
  2643  			success:  false,
  2644  			testName: "IPv6 link-local unicast address",
  2645  		},
  2646  		{
  2647  			nodeIP:   "0.0.0.0",
  2648  			success:  false,
  2649  			testName: "Unspecified IPv4 address",
  2650  		},
  2651  		{
  2652  			nodeIP:   "::",
  2653  			success:  false,
  2654  			testName: "Unspecified IPv6 address",
  2655  		},
  2656  		{
  2657  			nodeIP:   "1.2.3.4",
  2658  			success:  false,
  2659  			testName: "IPv4 address that doesn't belong to host",
  2660  		},
  2661  	}
  2662  	addrs, err := net.InterfaceAddrs()
  2663  	if err != nil {
  2664  		assert.Error(t, err, fmt.Sprintf(
  2665  			"Unable to obtain a list of the node's unicast interface addresses."))
  2666  	}
  2667  	for _, addr := range addrs {
  2668  		var ip net.IP
  2669  		switch v := addr.(type) {
  2670  		case *net.IPNet:
  2671  			ip = v.IP
  2672  		case *net.IPAddr:
  2673  			ip = v.IP
  2674  		}
  2675  		if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
  2676  			continue
  2677  		}
  2678  		successTest := test{
  2679  			nodeIP:   ip.String(),
  2680  			success:  true,
  2681  			testName: fmt.Sprintf("Success test case for address %s", ip.String()),
  2682  		}
  2683  		tests = append(tests, successTest)
  2684  	}
  2685  	for _, test := range tests {
  2686  		err := validateNodeIP(netutils.ParseIPSloppy(test.nodeIP))
  2687  		if test.success {
  2688  			assert.NoError(t, err, "test %s", test.testName)
  2689  		} else {
  2690  			assert.Error(t, err, fmt.Sprintf("test %s", test.testName))
  2691  		}
  2692  	}
  2693  }
  2694  
  2695  func TestRegisterWithApiServerWithTaint(t *testing.T) {
  2696  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2697  	defer testKubelet.Cleanup()
  2698  	kubelet := testKubelet.kubelet
  2699  	kubeClient := testKubelet.fakeKubeClient
  2700  
  2701  	machineInfo := &cadvisorapi.MachineInfo{
  2702  		MachineID:      "123",
  2703  		SystemUUID:     "abc",
  2704  		BootID:         "1b3",
  2705  		NumCores:       2,
  2706  		MemoryCapacity: 1024,
  2707  	}
  2708  	kubelet.setCachedMachineInfo(machineInfo)
  2709  
  2710  	var gotNode runtime.Object
  2711  	kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  2712  		createAction := action.(core.CreateAction)
  2713  		gotNode = createAction.GetObject()
  2714  		return true, gotNode, nil
  2715  	})
  2716  
  2717  	addNotImplatedReaction(kubeClient)
  2718  
  2719  	// Make node to be unschedulable.
  2720  	kubelet.registerSchedulable = false
  2721  
  2722  	// Reset kubelet status for each test.
  2723  	kubelet.registrationCompleted = false
  2724  
  2725  	// Register node to apiserver.
  2726  	kubelet.registerWithAPIServer()
  2727  
  2728  	// Check the unschedulable taint.
  2729  	got := gotNode.(*v1.Node)
  2730  	unschedulableTaint := &v1.Taint{
  2731  		Key:    v1.TaintNodeUnschedulable,
  2732  		Effect: v1.TaintEffectNoSchedule,
  2733  	}
  2734  
  2735  	require.Equal(t,
  2736  		true,
  2737  		taintutil.TaintExists(got.Spec.Taints, unschedulableTaint),
  2738  		"test unschedulable taint for TaintNodesByCondition")
  2739  }
  2740  
  2741  func TestNodeStatusHasChanged(t *testing.T) {
  2742  	fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
  2743  	fakeFuture := metav1.Time{Time: fakeNow.Time.Add(time.Minute)}
  2744  	readyCondition := v1.NodeCondition{
  2745  		Type:               v1.NodeReady,
  2746  		Status:             v1.ConditionTrue,
  2747  		LastHeartbeatTime:  fakeNow,
  2748  		LastTransitionTime: fakeNow,
  2749  	}
  2750  	readyConditionAtDiffHearbeatTime := v1.NodeCondition{
  2751  		Type:               v1.NodeReady,
  2752  		Status:             v1.ConditionTrue,
  2753  		LastHeartbeatTime:  fakeFuture,
  2754  		LastTransitionTime: fakeNow,
  2755  	}
  2756  	readyConditionAtDiffTransitionTime := v1.NodeCondition{
  2757  		Type:               v1.NodeReady,
  2758  		Status:             v1.ConditionTrue,
  2759  		LastHeartbeatTime:  fakeFuture,
  2760  		LastTransitionTime: fakeFuture,
  2761  	}
  2762  	notReadyCondition := v1.NodeCondition{
  2763  		Type:               v1.NodeReady,
  2764  		Status:             v1.ConditionFalse,
  2765  		LastHeartbeatTime:  fakeNow,
  2766  		LastTransitionTime: fakeNow,
  2767  	}
  2768  	memoryPressureCondition := v1.NodeCondition{
  2769  		Type:               v1.NodeMemoryPressure,
  2770  		Status:             v1.ConditionFalse,
  2771  		LastHeartbeatTime:  fakeNow,
  2772  		LastTransitionTime: fakeNow,
  2773  	}
  2774  	testcases := []struct {
  2775  		name           string
  2776  		originalStatus *v1.NodeStatus
  2777  		status         *v1.NodeStatus
  2778  		expectChange   bool
  2779  	}{
  2780  		{
  2781  			name:           "Node status does not change with nil status.",
  2782  			originalStatus: nil,
  2783  			status:         nil,
  2784  			expectChange:   false,
  2785  		},
  2786  		{
  2787  			name:           "Node status does not change with default status.",
  2788  			originalStatus: &v1.NodeStatus{},
  2789  			status:         &v1.NodeStatus{},
  2790  			expectChange:   false,
  2791  		},
  2792  		{
  2793  			name:           "Node status changes with nil and default status.",
  2794  			originalStatus: nil,
  2795  			status:         &v1.NodeStatus{},
  2796  			expectChange:   true,
  2797  		},
  2798  		{
  2799  			name:           "Node status changes with nil and status.",
  2800  			originalStatus: nil,
  2801  			status: &v1.NodeStatus{
  2802  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2803  			},
  2804  			expectChange: true,
  2805  		},
  2806  		{
  2807  			name:           "Node status does not change with empty conditions.",
  2808  			originalStatus: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
  2809  			status:         &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
  2810  			expectChange:   false,
  2811  		},
  2812  		{
  2813  			name: "Node status does not change",
  2814  			originalStatus: &v1.NodeStatus{
  2815  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2816  			},
  2817  			status: &v1.NodeStatus{
  2818  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2819  			},
  2820  			expectChange: false,
  2821  		},
  2822  		{
  2823  			name: "Node status does not change even if heartbeat time changes.",
  2824  			originalStatus: &v1.NodeStatus{
  2825  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2826  			},
  2827  			status: &v1.NodeStatus{
  2828  				Conditions: []v1.NodeCondition{readyConditionAtDiffHearbeatTime, memoryPressureCondition},
  2829  			},
  2830  			expectChange: false,
  2831  		},
  2832  		{
  2833  			name: "Node status does not change even if the orders of conditions are different.",
  2834  			originalStatus: &v1.NodeStatus{
  2835  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2836  			},
  2837  			status: &v1.NodeStatus{
  2838  				Conditions: []v1.NodeCondition{memoryPressureCondition, readyConditionAtDiffHearbeatTime},
  2839  			},
  2840  			expectChange: false,
  2841  		},
  2842  		{
  2843  			name: "Node status changes if condition status differs.",
  2844  			originalStatus: &v1.NodeStatus{
  2845  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2846  			},
  2847  			status: &v1.NodeStatus{
  2848  				Conditions: []v1.NodeCondition{notReadyCondition, memoryPressureCondition},
  2849  			},
  2850  			expectChange: true,
  2851  		},
  2852  		{
  2853  			name: "Node status changes if transition time changes.",
  2854  			originalStatus: &v1.NodeStatus{
  2855  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2856  			},
  2857  			status: &v1.NodeStatus{
  2858  				Conditions: []v1.NodeCondition{readyConditionAtDiffTransitionTime, memoryPressureCondition},
  2859  			},
  2860  			expectChange: true,
  2861  		},
  2862  		{
  2863  			name: "Node status changes with different number of conditions.",
  2864  			originalStatus: &v1.NodeStatus{
  2865  				Conditions: []v1.NodeCondition{readyCondition},
  2866  			},
  2867  			status: &v1.NodeStatus{
  2868  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2869  			},
  2870  			expectChange: true,
  2871  		},
  2872  		{
  2873  			name: "Node status changes with different phase.",
  2874  			originalStatus: &v1.NodeStatus{
  2875  				Phase:      v1.NodePending,
  2876  				Conditions: []v1.NodeCondition{readyCondition},
  2877  			},
  2878  			status: &v1.NodeStatus{
  2879  				Phase:      v1.NodeRunning,
  2880  				Conditions: []v1.NodeCondition{readyCondition},
  2881  			},
  2882  			expectChange: true,
  2883  		},
  2884  	}
  2885  	for _, tc := range testcases {
  2886  		t.Run(tc.name, func(t *testing.T) {
  2887  			originalStatusCopy := tc.originalStatus.DeepCopy()
  2888  			statusCopy := tc.status.DeepCopy()
  2889  			changed := nodeStatusHasChanged(tc.originalStatus, tc.status)
  2890  			assert.Equal(t, tc.expectChange, changed, "Expect node status change to be %t, but got %t.", tc.expectChange, changed)
  2891  			assert.True(t, apiequality.Semantic.DeepEqual(originalStatusCopy, tc.originalStatus), "%s", cmp.Diff(originalStatusCopy, tc.originalStatus))
  2892  			assert.True(t, apiequality.Semantic.DeepEqual(statusCopy, tc.status), "%s", cmp.Diff(statusCopy, tc.status))
  2893  		})
  2894  	}
  2895  }
  2896  
  2897  func TestUpdateNodeAddresses(t *testing.T) {
  2898  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2899  	defer testKubelet.Cleanup()
  2900  	kubelet := testKubelet.kubelet
  2901  	kubeClient := testKubelet.fakeKubeClient
  2902  
  2903  	existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  2904  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  2905  
  2906  	tests := []struct {
  2907  		Name   string
  2908  		Before []v1.NodeAddress
  2909  		After  []v1.NodeAddress
  2910  	}{
  2911  		{
  2912  			Name:   "nil to populated",
  2913  			Before: nil,
  2914  			After: []v1.NodeAddress{
  2915  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2916  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2917  			},
  2918  		},
  2919  		{
  2920  			Name:   "empty to populated",
  2921  			Before: []v1.NodeAddress{},
  2922  			After: []v1.NodeAddress{
  2923  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2924  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2925  			},
  2926  		},
  2927  		{
  2928  			Name: "populated to nil",
  2929  			Before: []v1.NodeAddress{
  2930  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2931  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2932  			},
  2933  			After: nil,
  2934  		},
  2935  		{
  2936  			Name: "populated to empty",
  2937  			Before: []v1.NodeAddress{
  2938  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2939  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2940  			},
  2941  			After: []v1.NodeAddress{},
  2942  		},
  2943  		{
  2944  			Name: "multiple addresses of same type, no change",
  2945  			Before: []v1.NodeAddress{
  2946  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2947  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2948  				{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
  2949  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2950  			},
  2951  			After: []v1.NodeAddress{
  2952  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2953  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2954  				{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
  2955  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2956  			},
  2957  		},
  2958  		{
  2959  			Name: "1 InternalIP to 2 InternalIP",
  2960  			Before: []v1.NodeAddress{
  2961  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2962  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2963  			},
  2964  			After: []v1.NodeAddress{
  2965  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2966  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2967  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2968  			},
  2969  		},
  2970  		{
  2971  			Name: "2 InternalIP to 1 InternalIP",
  2972  			Before: []v1.NodeAddress{
  2973  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2974  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2975  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2976  			},
  2977  			After: []v1.NodeAddress{
  2978  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2979  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2980  			},
  2981  		},
  2982  		{
  2983  			Name: "2 InternalIP to 2 different InternalIP",
  2984  			Before: []v1.NodeAddress{
  2985  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2986  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2987  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2988  			},
  2989  			After: []v1.NodeAddress{
  2990  				{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
  2991  				{Type: v1.NodeInternalIP, Address: "127.0.0.4"},
  2992  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2993  			},
  2994  		},
  2995  		{
  2996  			Name: "2 InternalIP to reversed order",
  2997  			Before: []v1.NodeAddress{
  2998  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2999  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  3000  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  3001  			},
  3002  			After: []v1.NodeAddress{
  3003  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  3004  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  3005  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  3006  			},
  3007  		},
  3008  	}
  3009  
  3010  	for _, test := range tests {
  3011  		t.Run(test.Name, func(t *testing.T) {
  3012  			ctx := context.Background()
  3013  			oldNode := &v1.Node{
  3014  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  3015  				Spec:       v1.NodeSpec{},
  3016  				Status: v1.NodeStatus{
  3017  					Addresses: test.Before,
  3018  				},
  3019  			}
  3020  			expectedNode := &v1.Node{
  3021  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
  3022  				Spec:       v1.NodeSpec{},
  3023  				Status: v1.NodeStatus{
  3024  					Addresses: test.After,
  3025  				},
  3026  			}
  3027  
  3028  			_, err := kubeClient.CoreV1().Nodes().Update(ctx, oldNode, metav1.UpdateOptions{})
  3029  			assert.NoError(t, err)
  3030  			kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{
  3031  				func(_ context.Context, node *v1.Node) error {
  3032  					node.Status.Addresses = expectedNode.Status.Addresses
  3033  					return nil
  3034  				},
  3035  			}
  3036  			assert.NoError(t, kubelet.updateNodeStatus(ctx))
  3037  
  3038  			actions := kubeClient.Actions()
  3039  			lastAction := actions[len(actions)-1]
  3040  			assert.IsType(t, core.PatchActionImpl{}, lastAction)
  3041  			patchAction := lastAction.(core.PatchActionImpl)
  3042  
  3043  			updatedNode, err := applyNodeStatusPatch(oldNode, patchAction.GetPatch())
  3044  			require.NoError(t, err)
  3045  
  3046  			assert.True(t, apiequality.Semantic.DeepEqual(updatedNode, expectedNode), "%s", cmp.Diff(expectedNode, updatedNode))
  3047  		})
  3048  	}
  3049  }