k8s.io/kubernetes@v1.29.3/pkg/kubelet/nodestatus/setters.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package nodestatus
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math"
    23  	"net"
    24  	goruntime "runtime"
    25  	"strings"
    26  	"time"
    27  
    28  	cadvisorapiv1 "github.com/google/cadvisor/info/v1"
    29  
    30  	v1 "k8s.io/api/core/v1"
    31  	"k8s.io/apimachinery/pkg/api/resource"
    32  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    33  	"k8s.io/apimachinery/pkg/util/errors"
    34  	utilnet "k8s.io/apimachinery/pkg/util/net"
    35  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    36  	cloudprovider "k8s.io/cloud-provider"
    37  	cloudproviderapi "k8s.io/cloud-provider/api"
    38  	cloudprovidernodeutil "k8s.io/cloud-provider/node/helpers"
    39  	"k8s.io/component-base/version"
    40  	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
    41  	"k8s.io/kubernetes/pkg/features"
    42  	"k8s.io/kubernetes/pkg/kubelet/cadvisor"
    43  	"k8s.io/kubernetes/pkg/kubelet/cm"
    44  	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
    45  	"k8s.io/kubernetes/pkg/kubelet/events"
    46  	"k8s.io/kubernetes/pkg/volume"
    47  	netutils "k8s.io/utils/net"
    48  
    49  	"k8s.io/klog/v2"
    50  )
    51  
    52  const (
    53  	// MaxNamesPerImageInNodeStatus is max number of names
    54  	// per image stored in the node status.
    55  	MaxNamesPerImageInNodeStatus = 5
    56  )
    57  
    58  // Setter modifies the node in-place, and returns an error if the modification failed.
    59  // Setters may partially mutate the node before returning an error.
    60  type Setter func(ctx context.Context, node *v1.Node) error
    61  
    62  // NodeAddress returns a Setter that updates address-related information on the node.
    63  func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs
    64  	validateNodeIPFunc func(net.IP) error, // typically Kubelet.nodeIPValidator
    65  	hostname string, // typically Kubelet.hostname
    66  	hostnameOverridden bool, // was the hostname force set?
    67  	externalCloudProvider bool, // typically Kubelet.externalCloudProvider
    68  	cloud cloudprovider.Interface, // typically Kubelet.cloud
    69  	nodeAddressesFunc func() ([]v1.NodeAddress, error), // typically Kubelet.cloudResourceSyncManager.NodeAddresses
    70  ) Setter {
    71  	var nodeIP, secondaryNodeIP net.IP
    72  	if len(nodeIPs) > 0 {
    73  		nodeIP = nodeIPs[0]
    74  	}
    75  	preferIPv4 := nodeIP == nil || nodeIP.To4() != nil
    76  	isPreferredIPFamily := func(ip net.IP) bool { return (ip.To4() != nil) == preferIPv4 }
    77  	nodeIPSpecified := nodeIP != nil && !nodeIP.IsUnspecified()
    78  
    79  	if len(nodeIPs) > 1 {
    80  		secondaryNodeIP = nodeIPs[1]
    81  	}
    82  	secondaryNodeIPSpecified := secondaryNodeIP != nil && !secondaryNodeIP.IsUnspecified()
    83  
    84  	return func(ctx context.Context, node *v1.Node) error {
    85  		if nodeIPSpecified {
    86  			if err := validateNodeIPFunc(nodeIP); err != nil {
    87  				return fmt.Errorf("failed to validate nodeIP: %v", err)
    88  			}
    89  			klog.V(4).InfoS("Using node IP", "IP", nodeIP.String())
    90  		}
    91  		if secondaryNodeIPSpecified {
    92  			if err := validateNodeIPFunc(secondaryNodeIP); err != nil {
    93  				return fmt.Errorf("failed to validate secondaryNodeIP: %v", err)
    94  			}
    95  			klog.V(4).InfoS("Using secondary node IP", "IP", secondaryNodeIP.String())
    96  		}
    97  
    98  		if (externalCloudProvider || cloud != nil) && nodeIPSpecified {
    99  			// Annotate the Node object with nodeIP for external cloud provider.
   100  			//
   101  			// We do this even when external CCM is not configured to cover a situation
   102  			// during migration from legacy to external CCM: when CCM is running the
   103  			// node controller in the cluster but kubelet is still running the in-tree
   104  			// provider. Adding this annotation in all cases ensures that while
   105  			// Addresses flap between the competing controllers, they at least flap
   106  			// consistently.
   107  			//
   108  			// We do not add the annotation in the case where there is no cloud
   109  			// controller at all, as we don't expect to migrate these clusters to use an
   110  			// external CCM.
   111  			if node.ObjectMeta.Annotations == nil {
   112  				node.ObjectMeta.Annotations = make(map[string]string)
   113  			}
   114  			annotation := nodeIP.String()
   115  			if secondaryNodeIPSpecified {
   116  				annotation += "," + secondaryNodeIP.String()
   117  			}
   118  			node.ObjectMeta.Annotations[cloudproviderapi.AnnotationAlphaProvidedIPAddr] = annotation
   119  		} else if node.ObjectMeta.Annotations != nil {
   120  			// Clean up stale annotations if no longer using a cloud provider or
   121  			// no longer overriding node IP.
   122  			delete(node.ObjectMeta.Annotations, cloudproviderapi.AnnotationAlphaProvidedIPAddr)
   123  		}
   124  
   125  		if externalCloudProvider {
   126  			// If --cloud-provider=external and node address is already set,
   127  			// then we return early because provider set addresses should take precedence.
   128  			// Otherwise, we try to use the node IP defined via flags and let the cloud provider override it later
   129  			// This should alleviate a lot of the bootstrapping issues with out-of-tree providers
   130  			if len(node.Status.Addresses) > 0 {
   131  				return nil
   132  			}
   133  			// If nodeIPs are not specified wait for the external cloud-provider to set the node addresses.
   134  			// Otherwise uses them on the assumption that the installer/administrator has the previous knowledge
   135  			// required to ensure the external cloud provider will use the same addresses to avoid the issues explained
   136  			// in https://github.com/kubernetes/kubernetes/issues/120720.
   137  			// We are already hinting the external cloud provider via the annotation AnnotationAlphaProvidedIPAddr.
   138  			if !nodeIPSpecified {
   139  				return nil
   140  			}
   141  		}
   142  		if cloud != nil {
   143  			cloudNodeAddresses, err := nodeAddressesFunc()
   144  			if err != nil {
   145  				return err
   146  			}
   147  
   148  			nodeAddresses, err := cloudprovidernodeutil.GetNodeAddressesFromNodeIPLegacy(nodeIP, cloudNodeAddresses)
   149  			if err != nil {
   150  				return err
   151  			}
   152  
   153  			switch {
   154  			case len(cloudNodeAddresses) == 0:
   155  				// the cloud provider didn't specify any addresses
   156  				nodeAddresses = append(nodeAddresses, v1.NodeAddress{Type: v1.NodeHostName, Address: hostname})
   157  
   158  			case !hasAddressType(cloudNodeAddresses, v1.NodeHostName) && hasAddressValue(cloudNodeAddresses, hostname):
   159  				// the cloud provider didn't specify an address of type Hostname,
   160  				// but the auto-detected hostname matched an address reported by the cloud provider,
   161  				// so we can add it and count on the value being verifiable via cloud provider metadata
   162  				nodeAddresses = append(nodeAddresses, v1.NodeAddress{Type: v1.NodeHostName, Address: hostname})
   163  
   164  			case hostnameOverridden:
   165  				// the hostname was force-set via flag/config.
   166  				// this means the hostname might not be able to be validated via cloud provider metadata,
   167  				// but was a choice by the kubelet deployer we should honor
   168  				var existingHostnameAddress *v1.NodeAddress
   169  				for i := range nodeAddresses {
   170  					if nodeAddresses[i].Type == v1.NodeHostName {
   171  						existingHostnameAddress = &nodeAddresses[i]
   172  						break
   173  					}
   174  				}
   175  
   176  				if existingHostnameAddress == nil {
   177  					// no existing Hostname address found, add it
   178  					klog.InfoS("Adding overridden hostname to cloudprovider-reported addresses", "hostname", hostname)
   179  					nodeAddresses = append(nodeAddresses, v1.NodeAddress{Type: v1.NodeHostName, Address: hostname})
   180  				} else if existingHostnameAddress.Address != hostname {
   181  					// override the Hostname address reported by the cloud provider
   182  					klog.InfoS("Replacing cloudprovider-reported hostname with overridden hostname", "cloudProviderHostname", existingHostnameAddress.Address, "overriddenHostname", hostname)
   183  					existingHostnameAddress.Address = hostname
   184  				}
   185  			}
   186  			node.Status.Addresses = nodeAddresses
   187  		} else if nodeIPSpecified && secondaryNodeIPSpecified {
   188  			node.Status.Addresses = []v1.NodeAddress{
   189  				{Type: v1.NodeInternalIP, Address: nodeIP.String()},
   190  				{Type: v1.NodeInternalIP, Address: secondaryNodeIP.String()},
   191  				{Type: v1.NodeHostName, Address: hostname},
   192  			}
   193  		} else {
   194  			var ipAddr net.IP
   195  			var err error
   196  
   197  			// 1) Use nodeIP if set (and not "0.0.0.0"/"::")
   198  			// 2) If the user has specified an IP to HostnameOverride, use it
   199  			// 3) Lookup the IP from node name by DNS
   200  			// 4) Try to get the IP from the network interface used as default gateway
   201  			//
   202  			// For steps 3 and 4, IPv4 addresses are preferred to IPv6 addresses
   203  			// unless nodeIP is "::", in which case it is reversed.
   204  			if nodeIPSpecified {
   205  				ipAddr = nodeIP
   206  			} else if addr := netutils.ParseIPSloppy(hostname); addr != nil {
   207  				ipAddr = addr
   208  			} else {
   209  				var addrs []net.IP
   210  				addrs, _ = net.LookupIP(node.Name)
   211  				for _, addr := range addrs {
   212  					if err = validateNodeIPFunc(addr); err == nil {
   213  						if isPreferredIPFamily(addr) {
   214  							ipAddr = addr
   215  							break
   216  						} else if ipAddr == nil {
   217  							ipAddr = addr
   218  						}
   219  					}
   220  				}
   221  
   222  				if ipAddr == nil {
   223  					ipAddr, err = utilnet.ResolveBindAddress(nodeIP)
   224  				}
   225  			}
   226  
   227  			if ipAddr == nil {
   228  				// We tried everything we could, but the IP address wasn't fetchable; error out
   229  				return fmt.Errorf("can't get ip address of node %s. error: %v", node.Name, err)
   230  			}
   231  			node.Status.Addresses = []v1.NodeAddress{
   232  				{Type: v1.NodeInternalIP, Address: ipAddr.String()},
   233  				{Type: v1.NodeHostName, Address: hostname},
   234  			}
   235  		}
   236  		return nil
   237  	}
   238  }
   239  
   240  func hasAddressType(addresses []v1.NodeAddress, addressType v1.NodeAddressType) bool {
   241  	for _, address := range addresses {
   242  		if address.Type == addressType {
   243  			return true
   244  		}
   245  	}
   246  	return false
   247  }
   248  func hasAddressValue(addresses []v1.NodeAddress, addressValue string) bool {
   249  	for _, address := range addresses {
   250  		if address.Address == addressValue {
   251  			return true
   252  		}
   253  	}
   254  	return false
   255  }
   256  
   257  // MachineInfo returns a Setter that updates machine-related information on the node.
   258  func MachineInfo(nodeName string,
   259  	maxPods int,
   260  	podsPerCore int,
   261  	machineInfoFunc func() (*cadvisorapiv1.MachineInfo, error), // typically Kubelet.GetCachedMachineInfo
   262  	capacityFunc func(localStorageCapacityIsolation bool) v1.ResourceList, // typically Kubelet.containerManager.GetCapacity
   263  	devicePluginResourceCapacityFunc func() (v1.ResourceList, v1.ResourceList, []string), // typically Kubelet.containerManager.GetDevicePluginResourceCapacity
   264  	nodeAllocatableReservationFunc func() v1.ResourceList, // typically Kubelet.containerManager.GetNodeAllocatableReservation
   265  	recordEventFunc func(eventType, event, message string), // typically Kubelet.recordEvent
   266  	localStorageCapacityIsolation bool,
   267  ) Setter {
   268  	return func(ctx context.Context, node *v1.Node) error {
   269  		// Note: avoid blindly overwriting the capacity in case opaque
   270  		//       resources are being advertised.
   271  		if node.Status.Capacity == nil {
   272  			node.Status.Capacity = v1.ResourceList{}
   273  		}
   274  
   275  		var devicePluginAllocatable v1.ResourceList
   276  		var devicePluginCapacity v1.ResourceList
   277  		var removedDevicePlugins []string
   278  
   279  		// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
   280  		// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
   281  		info, err := machineInfoFunc()
   282  		if err != nil {
   283  			// TODO(roberthbailey): This is required for test-cmd.sh to pass.
   284  			// See if the test should be updated instead.
   285  			node.Status.Capacity[v1.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI)
   286  			node.Status.Capacity[v1.ResourceMemory] = resource.MustParse("0Gi")
   287  			node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(int64(maxPods), resource.DecimalSI)
   288  			klog.ErrorS(err, "Error getting machine info")
   289  		} else {
   290  			node.Status.NodeInfo.MachineID = info.MachineID
   291  			node.Status.NodeInfo.SystemUUID = info.SystemUUID
   292  
   293  			for rName, rCap := range cadvisor.CapacityFromMachineInfo(info) {
   294  				node.Status.Capacity[rName] = rCap
   295  			}
   296  
   297  			if podsPerCore > 0 {
   298  				node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(
   299  					int64(math.Min(float64(info.NumCores*podsPerCore), float64(maxPods))), resource.DecimalSI)
   300  			} else {
   301  				node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(
   302  					int64(maxPods), resource.DecimalSI)
   303  			}
   304  
   305  			if node.Status.NodeInfo.BootID != "" &&
   306  				node.Status.NodeInfo.BootID != info.BootID {
   307  				// TODO: This requires a transaction, either both node status is updated
   308  				// and event is recorded or neither should happen, see issue #6055.
   309  				recordEventFunc(v1.EventTypeWarning, events.NodeRebooted,
   310  					fmt.Sprintf("Node %s has been rebooted, boot id: %s", nodeName, info.BootID))
   311  			}
   312  			node.Status.NodeInfo.BootID = info.BootID
   313  
   314  			// TODO: all the node resources should use ContainerManager.GetCapacity instead of deriving the
   315  			// capacity for every node status request
   316  			initialCapacity := capacityFunc(localStorageCapacityIsolation)
   317  			if initialCapacity != nil {
   318  				if v, exists := initialCapacity[v1.ResourceEphemeralStorage]; exists {
   319  					node.Status.Capacity[v1.ResourceEphemeralStorage] = v
   320  				}
   321  			}
   322  			//}
   323  
   324  			devicePluginCapacity, devicePluginAllocatable, removedDevicePlugins = devicePluginResourceCapacityFunc()
   325  			for k, v := range devicePluginCapacity {
   326  				if old, ok := node.Status.Capacity[k]; !ok || old.Value() != v.Value() {
   327  					klog.V(2).InfoS("Updated capacity for device plugin", "plugin", k, "capacity", v.Value())
   328  				}
   329  				node.Status.Capacity[k] = v
   330  			}
   331  
   332  			for _, removedResource := range removedDevicePlugins {
   333  				klog.V(2).InfoS("Set capacity for removed resource to 0 on device removal", "device", removedResource)
   334  				// Set the capacity of the removed resource to 0 instead of
   335  				// removing the resource from the node status. This is to indicate
   336  				// that the resource is managed by device plugin and had been
   337  				// registered before.
   338  				//
   339  				// This is required to differentiate the device plugin managed
   340  				// resources and the cluster-level resources, which are absent in
   341  				// node status.
   342  				node.Status.Capacity[v1.ResourceName(removedResource)] = *resource.NewQuantity(int64(0), resource.DecimalSI)
   343  			}
   344  		}
   345  
   346  		// Set Allocatable.
   347  		if node.Status.Allocatable == nil {
   348  			node.Status.Allocatable = make(v1.ResourceList)
   349  		}
   350  		// Remove extended resources from allocatable that are no longer
   351  		// present in capacity.
   352  		for k := range node.Status.Allocatable {
   353  			_, found := node.Status.Capacity[k]
   354  			if !found && v1helper.IsExtendedResourceName(k) {
   355  				delete(node.Status.Allocatable, k)
   356  			}
   357  		}
   358  		allocatableReservation := nodeAllocatableReservationFunc()
   359  		for k, v := range node.Status.Capacity {
   360  			value := v.DeepCopy()
   361  			if res, exists := allocatableReservation[k]; exists {
   362  				value.Sub(res)
   363  			}
   364  			if value.Sign() < 0 {
   365  				// Negative Allocatable resources don't make sense.
   366  				value.Set(0)
   367  			}
   368  			node.Status.Allocatable[k] = value
   369  		}
   370  
   371  		for k, v := range devicePluginAllocatable {
   372  			if old, ok := node.Status.Allocatable[k]; !ok || old.Value() != v.Value() {
   373  				klog.V(2).InfoS("Updated allocatable", "device", k, "allocatable", v.Value())
   374  			}
   375  			node.Status.Allocatable[k] = v
   376  		}
   377  		// for every huge page reservation, we need to remove it from allocatable memory
   378  		for k, v := range node.Status.Capacity {
   379  			if v1helper.IsHugePageResourceName(k) {
   380  				allocatableMemory := node.Status.Allocatable[v1.ResourceMemory]
   381  				value := v.DeepCopy()
   382  				allocatableMemory.Sub(value)
   383  				if allocatableMemory.Sign() < 0 {
   384  					// Negative Allocatable resources don't make sense.
   385  					allocatableMemory.Set(0)
   386  				}
   387  				node.Status.Allocatable[v1.ResourceMemory] = allocatableMemory
   388  			}
   389  		}
   390  		return nil
   391  	}
   392  }
   393  
   394  // VersionInfo returns a Setter that updates version-related information on the node.
   395  func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), // typically Kubelet.cadvisor.VersionInfo
   396  	runtimeTypeFunc func() string, // typically Kubelet.containerRuntime.Type
   397  	runtimeVersionFunc func(ctx context.Context) (kubecontainer.Version, error), // typically Kubelet.containerRuntime.Version
   398  ) Setter {
   399  	return func(ctx context.Context, node *v1.Node) error {
   400  		verinfo, err := versionInfoFunc()
   401  		if err != nil {
   402  			return fmt.Errorf("error getting version info: %v", err)
   403  		}
   404  
   405  		node.Status.NodeInfo.KernelVersion = verinfo.KernelVersion
   406  		node.Status.NodeInfo.OSImage = verinfo.ContainerOsVersion
   407  
   408  		runtimeVersion := "Unknown"
   409  		if runtimeVer, err := runtimeVersionFunc(ctx); err == nil {
   410  			runtimeVersion = runtimeVer.String()
   411  		}
   412  		node.Status.NodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", runtimeTypeFunc(), runtimeVersion)
   413  
   414  		node.Status.NodeInfo.KubeletVersion = version.Get().String()
   415  
   416  		if utilfeature.DefaultFeatureGate.Enabled(features.DisableNodeKubeProxyVersion) {
   417  			// This field is deprecated and should be cleared if it was previously set.
   418  			node.Status.NodeInfo.KubeProxyVersion = ""
   419  		} else {
   420  			node.Status.NodeInfo.KubeProxyVersion = version.Get().String()
   421  		}
   422  
   423  		return nil
   424  	}
   425  }
   426  
   427  // DaemonEndpoints returns a Setter that updates the daemon endpoints on the node.
   428  func DaemonEndpoints(daemonEndpoints *v1.NodeDaemonEndpoints) Setter {
   429  	return func(ctx context.Context, node *v1.Node) error {
   430  		node.Status.DaemonEndpoints = *daemonEndpoints
   431  		return nil
   432  	}
   433  }
   434  
   435  // Images returns a Setter that updates the images on the node.
   436  // imageListFunc is expected to return a list of images sorted in descending order by image size.
   437  // nodeStatusMaxImages is ignored if set to -1.
   438  func Images(nodeStatusMaxImages int32,
   439  	imageListFunc func() ([]kubecontainer.Image, error), // typically Kubelet.imageManager.GetImageList
   440  ) Setter {
   441  	return func(ctx context.Context, node *v1.Node) error {
   442  		// Update image list of this node
   443  		var imagesOnNode []v1.ContainerImage
   444  		containerImages, err := imageListFunc()
   445  		if err != nil {
   446  			node.Status.Images = imagesOnNode
   447  			return fmt.Errorf("error getting image list: %v", err)
   448  		}
   449  		// we expect imageListFunc to return a sorted list, so we just need to truncate
   450  		if int(nodeStatusMaxImages) > -1 &&
   451  			int(nodeStatusMaxImages) < len(containerImages) {
   452  			containerImages = containerImages[0:nodeStatusMaxImages]
   453  		}
   454  
   455  		for _, image := range containerImages {
   456  			// make a copy to avoid modifying slice members of the image items in the list
   457  			names := append([]string{}, image.RepoDigests...)
   458  			names = append(names, image.RepoTags...)
   459  			// Report up to MaxNamesPerImageInNodeStatus names per image.
   460  			if len(names) > MaxNamesPerImageInNodeStatus {
   461  				names = names[0:MaxNamesPerImageInNodeStatus]
   462  			}
   463  			imagesOnNode = append(imagesOnNode, v1.ContainerImage{
   464  				Names:     names,
   465  				SizeBytes: image.Size,
   466  			})
   467  		}
   468  
   469  		node.Status.Images = imagesOnNode
   470  		return nil
   471  	}
   472  }
   473  
   474  // GoRuntime returns a Setter that sets GOOS and GOARCH on the node.
   475  func GoRuntime() Setter {
   476  	return func(ctx context.Context, node *v1.Node) error {
   477  		node.Status.NodeInfo.OperatingSystem = goruntime.GOOS
   478  		node.Status.NodeInfo.Architecture = goruntime.GOARCH
   479  		return nil
   480  	}
   481  }
   482  
   483  // ReadyCondition returns a Setter that updates the v1.NodeReady condition on the node.
   484  func ReadyCondition(
   485  	nowFunc func() time.Time, // typically Kubelet.clock.Now
   486  	runtimeErrorsFunc func() error, // typically Kubelet.runtimeState.runtimeErrors
   487  	networkErrorsFunc func() error, // typically Kubelet.runtimeState.networkErrors
   488  	storageErrorsFunc func() error, // typically Kubelet.runtimeState.storageErrors
   489  	appArmorValidateHostFunc func() error, // typically Kubelet.appArmorValidator.ValidateHost, might be nil depending on whether there was an appArmorValidator
   490  	cmStatusFunc func() cm.Status, // typically Kubelet.containerManager.Status
   491  	nodeShutdownManagerErrorsFunc func() error, // typically kubelet.shutdownManager.errors.
   492  	recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
   493  	localStorageCapacityIsolation bool,
   494  ) Setter {
   495  	return func(ctx context.Context, node *v1.Node) error {
   496  		// NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions.
   497  		// This is due to an issue with version skewed kubelet and master components.
   498  		// ref: https://github.com/kubernetes/kubernetes/issues/16961
   499  		currentTime := metav1.NewTime(nowFunc())
   500  		newNodeReadyCondition := v1.NodeCondition{
   501  			Type:              v1.NodeReady,
   502  			Status:            v1.ConditionTrue,
   503  			Reason:            "KubeletReady",
   504  			Message:           "kubelet is posting ready status",
   505  			LastHeartbeatTime: currentTime,
   506  		}
   507  		errs := []error{runtimeErrorsFunc(), networkErrorsFunc(), storageErrorsFunc(), nodeShutdownManagerErrorsFunc()}
   508  		requiredCapacities := []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods}
   509  		if localStorageCapacityIsolation {
   510  			requiredCapacities = append(requiredCapacities, v1.ResourceEphemeralStorage)
   511  		}
   512  		missingCapacities := []string{}
   513  		for _, resource := range requiredCapacities {
   514  			if _, found := node.Status.Capacity[resource]; !found {
   515  				missingCapacities = append(missingCapacities, string(resource))
   516  			}
   517  		}
   518  		if len(missingCapacities) > 0 {
   519  			errs = append(errs, fmt.Errorf("missing node capacity for resources: %s", strings.Join(missingCapacities, ", ")))
   520  		}
   521  		if aggregatedErr := errors.NewAggregate(errs); aggregatedErr != nil {
   522  			newNodeReadyCondition = v1.NodeCondition{
   523  				Type:              v1.NodeReady,
   524  				Status:            v1.ConditionFalse,
   525  				Reason:            "KubeletNotReady",
   526  				Message:           aggregatedErr.Error(),
   527  				LastHeartbeatTime: currentTime,
   528  			}
   529  		}
   530  		// Append AppArmor status if it's enabled.
   531  		// TODO(tallclair): This is a temporary message until node feature reporting is added.
   532  		if appArmorValidateHostFunc != nil && newNodeReadyCondition.Status == v1.ConditionTrue {
   533  			if err := appArmorValidateHostFunc(); err == nil {
   534  				newNodeReadyCondition.Message = fmt.Sprintf("%s. AppArmor enabled", newNodeReadyCondition.Message)
   535  			}
   536  		}
   537  
   538  		// Record any soft requirements that were not met in the container manager.
   539  		status := cmStatusFunc()
   540  		if status.SoftRequirements != nil {
   541  			newNodeReadyCondition.Message = fmt.Sprintf("%s. WARNING: %s", newNodeReadyCondition.Message, status.SoftRequirements.Error())
   542  		}
   543  
   544  		readyConditionUpdated := false
   545  		needToRecordEvent := false
   546  		for i := range node.Status.Conditions {
   547  			if node.Status.Conditions[i].Type == v1.NodeReady {
   548  				if node.Status.Conditions[i].Status == newNodeReadyCondition.Status {
   549  					newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
   550  				} else {
   551  					newNodeReadyCondition.LastTransitionTime = currentTime
   552  					needToRecordEvent = true
   553  				}
   554  				node.Status.Conditions[i] = newNodeReadyCondition
   555  				readyConditionUpdated = true
   556  				break
   557  			}
   558  		}
   559  		if !readyConditionUpdated {
   560  			newNodeReadyCondition.LastTransitionTime = currentTime
   561  			node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition)
   562  		}
   563  		if needToRecordEvent {
   564  			if newNodeReadyCondition.Status == v1.ConditionTrue {
   565  				recordEventFunc(v1.EventTypeNormal, events.NodeReady)
   566  			} else {
   567  				recordEventFunc(v1.EventTypeNormal, events.NodeNotReady)
   568  				klog.InfoS("Node became not ready", "node", klog.KObj(node), "condition", newNodeReadyCondition)
   569  			}
   570  		}
   571  		return nil
   572  	}
   573  }
   574  
   575  // MemoryPressureCondition returns a Setter that updates the v1.NodeMemoryPressure condition on the node.
   576  func MemoryPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.Now
   577  	pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderMemoryPressure
   578  	recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
   579  ) Setter {
   580  	return func(ctx context.Context, node *v1.Node) error {
   581  		currentTime := metav1.NewTime(nowFunc())
   582  		var condition *v1.NodeCondition
   583  
   584  		// Check if NodeMemoryPressure condition already exists and if it does, just pick it up for update.
   585  		for i := range node.Status.Conditions {
   586  			if node.Status.Conditions[i].Type == v1.NodeMemoryPressure {
   587  				condition = &node.Status.Conditions[i]
   588  			}
   589  		}
   590  
   591  		newCondition := false
   592  		// If the NodeMemoryPressure condition doesn't exist, create one
   593  		if condition == nil {
   594  			condition = &v1.NodeCondition{
   595  				Type:   v1.NodeMemoryPressure,
   596  				Status: v1.ConditionUnknown,
   597  			}
   598  			// cannot be appended to node.Status.Conditions here because it gets
   599  			// copied to the slice. So if we append to the slice here none of the
   600  			// updates we make below are reflected in the slice.
   601  			newCondition = true
   602  		}
   603  
   604  		// Update the heartbeat time
   605  		condition.LastHeartbeatTime = currentTime
   606  
   607  		// Note: The conditions below take care of the case when a new NodeMemoryPressure condition is
   608  		// created and as well as the case when the condition already exists. When a new condition
   609  		// is created its status is set to v1.ConditionUnknown which matches either
   610  		// condition.Status != v1.ConditionTrue or
   611  		// condition.Status != v1.ConditionFalse in the conditions below depending on whether
   612  		// the kubelet is under memory pressure or not.
   613  		if pressureFunc() {
   614  			if condition.Status != v1.ConditionTrue {
   615  				condition.Status = v1.ConditionTrue
   616  				condition.Reason = "KubeletHasInsufficientMemory"
   617  				condition.Message = "kubelet has insufficient memory available"
   618  				condition.LastTransitionTime = currentTime
   619  				recordEventFunc(v1.EventTypeNormal, "NodeHasInsufficientMemory")
   620  			}
   621  		} else if condition.Status != v1.ConditionFalse {
   622  			condition.Status = v1.ConditionFalse
   623  			condition.Reason = "KubeletHasSufficientMemory"
   624  			condition.Message = "kubelet has sufficient memory available"
   625  			condition.LastTransitionTime = currentTime
   626  			recordEventFunc(v1.EventTypeNormal, "NodeHasSufficientMemory")
   627  		}
   628  
   629  		if newCondition {
   630  			node.Status.Conditions = append(node.Status.Conditions, *condition)
   631  		}
   632  		return nil
   633  	}
   634  }
   635  
   636  // PIDPressureCondition returns a Setter that updates the v1.NodePIDPressure condition on the node.
   637  func PIDPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.Now
   638  	pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderPIDPressure
   639  	recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
   640  ) Setter {
   641  	return func(ctx context.Context, node *v1.Node) error {
   642  		currentTime := metav1.NewTime(nowFunc())
   643  		var condition *v1.NodeCondition
   644  
   645  		// Check if NodePIDPressure condition already exists and if it does, just pick it up for update.
   646  		for i := range node.Status.Conditions {
   647  			if node.Status.Conditions[i].Type == v1.NodePIDPressure {
   648  				condition = &node.Status.Conditions[i]
   649  			}
   650  		}
   651  
   652  		newCondition := false
   653  		// If the NodePIDPressure condition doesn't exist, create one
   654  		if condition == nil {
   655  			condition = &v1.NodeCondition{
   656  				Type:   v1.NodePIDPressure,
   657  				Status: v1.ConditionUnknown,
   658  			}
   659  			// cannot be appended to node.Status.Conditions here because it gets
   660  			// copied to the slice. So if we append to the slice here none of the
   661  			// updates we make below are reflected in the slice.
   662  			newCondition = true
   663  		}
   664  
   665  		// Update the heartbeat time
   666  		condition.LastHeartbeatTime = currentTime
   667  
   668  		// Note: The conditions below take care of the case when a new NodePIDPressure condition is
   669  		// created and as well as the case when the condition already exists. When a new condition
   670  		// is created its status is set to v1.ConditionUnknown which matches either
   671  		// condition.Status != v1.ConditionTrue or
   672  		// condition.Status != v1.ConditionFalse in the conditions below depending on whether
   673  		// the kubelet is under PID pressure or not.
   674  		if pressureFunc() {
   675  			if condition.Status != v1.ConditionTrue {
   676  				condition.Status = v1.ConditionTrue
   677  				condition.Reason = "KubeletHasInsufficientPID"
   678  				condition.Message = "kubelet has insufficient PID available"
   679  				condition.LastTransitionTime = currentTime
   680  				recordEventFunc(v1.EventTypeNormal, "NodeHasInsufficientPID")
   681  			}
   682  		} else if condition.Status != v1.ConditionFalse {
   683  			condition.Status = v1.ConditionFalse
   684  			condition.Reason = "KubeletHasSufficientPID"
   685  			condition.Message = "kubelet has sufficient PID available"
   686  			condition.LastTransitionTime = currentTime
   687  			recordEventFunc(v1.EventTypeNormal, "NodeHasSufficientPID")
   688  		}
   689  
   690  		if newCondition {
   691  			node.Status.Conditions = append(node.Status.Conditions, *condition)
   692  		}
   693  		return nil
   694  	}
   695  }
   696  
   697  // DiskPressureCondition returns a Setter that updates the v1.NodeDiskPressure condition on the node.
   698  func DiskPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.Now
   699  	pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderDiskPressure
   700  	recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent
   701  ) Setter {
   702  	return func(ctx context.Context, node *v1.Node) error {
   703  		currentTime := metav1.NewTime(nowFunc())
   704  		var condition *v1.NodeCondition
   705  
   706  		// Check if NodeDiskPressure condition already exists and if it does, just pick it up for update.
   707  		for i := range node.Status.Conditions {
   708  			if node.Status.Conditions[i].Type == v1.NodeDiskPressure {
   709  				condition = &node.Status.Conditions[i]
   710  			}
   711  		}
   712  
   713  		newCondition := false
   714  		// If the NodeDiskPressure condition doesn't exist, create one
   715  		if condition == nil {
   716  			condition = &v1.NodeCondition{
   717  				Type:   v1.NodeDiskPressure,
   718  				Status: v1.ConditionUnknown,
   719  			}
   720  			// cannot be appended to node.Status.Conditions here because it gets
   721  			// copied to the slice. So if we append to the slice here none of the
   722  			// updates we make below are reflected in the slice.
   723  			newCondition = true
   724  		}
   725  
   726  		// Update the heartbeat time
   727  		condition.LastHeartbeatTime = currentTime
   728  
   729  		// Note: The conditions below take care of the case when a new NodeDiskPressure condition is
   730  		// created and as well as the case when the condition already exists. When a new condition
   731  		// is created its status is set to v1.ConditionUnknown which matches either
   732  		// condition.Status != v1.ConditionTrue or
   733  		// condition.Status != v1.ConditionFalse in the conditions below depending on whether
   734  		// the kubelet is under disk pressure or not.
   735  		if pressureFunc() {
   736  			if condition.Status != v1.ConditionTrue {
   737  				condition.Status = v1.ConditionTrue
   738  				condition.Reason = "KubeletHasDiskPressure"
   739  				condition.Message = "kubelet has disk pressure"
   740  				condition.LastTransitionTime = currentTime
   741  				recordEventFunc(v1.EventTypeNormal, "NodeHasDiskPressure")
   742  			}
   743  		} else if condition.Status != v1.ConditionFalse {
   744  			condition.Status = v1.ConditionFalse
   745  			condition.Reason = "KubeletHasNoDiskPressure"
   746  			condition.Message = "kubelet has no disk pressure"
   747  			condition.LastTransitionTime = currentTime
   748  			recordEventFunc(v1.EventTypeNormal, "NodeHasNoDiskPressure")
   749  		}
   750  
   751  		if newCondition {
   752  			node.Status.Conditions = append(node.Status.Conditions, *condition)
   753  		}
   754  		return nil
   755  	}
   756  }
   757  
   758  // VolumesInUse returns a Setter that updates the volumes in use on the node.
   759  func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.ReconcilerStatesHasBeenSynced
   760  	volumesInUseFunc func() []v1.UniqueVolumeName, // typically Kubelet.volumeManager.GetVolumesInUse
   761  ) Setter {
   762  	return func(ctx context.Context, node *v1.Node) error {
   763  		// Make sure to only update node status after reconciler starts syncing up states
   764  		if syncedFunc() {
   765  			node.Status.VolumesInUse = volumesInUseFunc()
   766  		}
   767  		return nil
   768  	}
   769  }
   770  
   771  // VolumeLimits returns a Setter that updates the volume limits on the node.
   772  func VolumeLimits(volumePluginListFunc func() []volume.VolumePluginWithAttachLimits, // typically Kubelet.volumePluginMgr.ListVolumePluginWithLimits
   773  ) Setter {
   774  	return func(ctx context.Context, node *v1.Node) error {
   775  		if node.Status.Capacity == nil {
   776  			node.Status.Capacity = v1.ResourceList{}
   777  		}
   778  		if node.Status.Allocatable == nil {
   779  			node.Status.Allocatable = v1.ResourceList{}
   780  		}
   781  
   782  		pluginWithLimits := volumePluginListFunc()
   783  		for _, volumePlugin := range pluginWithLimits {
   784  			attachLimits, err := volumePlugin.GetVolumeLimits()
   785  			if err != nil {
   786  				klog.V(4).InfoS("Skipping volume limits for volume plugin", "plugin", volumePlugin.GetPluginName())
   787  				continue
   788  			}
   789  			for limitKey, value := range attachLimits {
   790  				node.Status.Capacity[v1.ResourceName(limitKey)] = *resource.NewQuantity(value, resource.DecimalSI)
   791  				node.Status.Allocatable[v1.ResourceName(limitKey)] = *resource.NewQuantity(value, resource.DecimalSI)
   792  			}
   793  		}
   794  		return nil
   795  	}
   796  }