github.com/jingruilea/kubeedge@v1.2.0-beta.0.0.20200410162146-4bb8902b3879/edge/pkg/edged/edged.go (about) 1 /* 2 Copyright 2016 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 16 @CHANGELOG 17 KubeEdge Authors: To create mini-kubelet for edge deployment scenario, 18 This file is derived from K8S Kubelet code with reduced set of methods 19 Changes done are 20 1. Package edged got some functions from "k8s.io/kubernetes/pkg/kubelet/kubelet.go" 21 and made some variant 22 */ 23 24 package edged 25 26 import ( 27 "bytes" 28 "encoding/json" 29 "fmt" 30 "net" 31 "net/http" 32 "os" 33 "strings" 34 "sync" 35 "time" 36 37 "github.com/container-storage-interface/spec/lib/go/csi" 38 "github.com/golang/protobuf/jsonpb" 39 cadvisorapi "github.com/google/cadvisor/info/v1" 40 v1 "k8s.io/api/core/v1" 41 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 42 "k8s.io/apimachinery/pkg/types" 43 "k8s.io/apimachinery/pkg/util/clock" 44 "k8s.io/apimachinery/pkg/util/sets" 45 utilwait "k8s.io/apimachinery/pkg/util/wait" 46 clientset "k8s.io/client-go/kubernetes" 47 "k8s.io/client-go/tools/cache" 48 recordtools "k8s.io/client-go/tools/record" 49 "k8s.io/client-go/util/flowcontrol" 50 "k8s.io/client-go/util/workqueue" 51 internalapi "k8s.io/cri-api/pkg/apis" 52 "k8s.io/klog" 53 pluginwatcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" 54 kubeletinternalconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" 55 "k8s.io/kubernetes/pkg/kubelet/cm" 56 "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" 57 klconfigmap "k8s.io/kubernetes/pkg/kubelet/configmap" 58 kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" 59 "k8s.io/kubernetes/pkg/kubelet/dockershim" 60 dockerremote "k8s.io/kubernetes/pkg/kubelet/dockershim/remote" 61 "k8s.io/kubernetes/pkg/kubelet/images" 62 "k8s.io/kubernetes/pkg/kubelet/kuberuntime" 63 "k8s.io/kubernetes/pkg/kubelet/lifecycle" 64 kubedns "k8s.io/kubernetes/pkg/kubelet/network/dns" 65 "k8s.io/kubernetes/pkg/kubelet/pleg" 66 "k8s.io/kubernetes/pkg/kubelet/pluginmanager" 67 plugincache "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" 68 "k8s.io/kubernetes/pkg/kubelet/prober" 69 proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" 70 "k8s.io/kubernetes/pkg/kubelet/remote" 71 "k8s.io/kubernetes/pkg/kubelet/server/streaming" 72 kubestatus "k8s.io/kubernetes/pkg/kubelet/status" 73 "k8s.io/kubernetes/pkg/kubelet/util/format" 74 "k8s.io/kubernetes/pkg/kubelet/util/queue" 75 "k8s.io/kubernetes/pkg/kubelet/volumemanager" 76 schedulercache "k8s.io/kubernetes/pkg/scheduler/nodeinfo" 77 "k8s.io/kubernetes/pkg/volume" 78 "k8s.io/kubernetes/pkg/volume/configmap" 79 "k8s.io/kubernetes/pkg/volume/downwardapi" 80 "k8s.io/kubernetes/pkg/volume/emptydir" 81 "k8s.io/kubernetes/pkg/volume/hostpath" 82 "k8s.io/kubernetes/pkg/volume/projected" 83 secretvolume "k8s.io/kubernetes/pkg/volume/secret" 84 "k8s.io/kubernetes/pkg/volume/util/hostutil" 85 "k8s.io/kubernetes/pkg/volume/util/volumepathhandler" 86 "k8s.io/utils/mount" 87 88 "github.com/kubeedge/beehive/pkg/common/util" 89 "github.com/kubeedge/beehive/pkg/core" 90 beehiveContext "github.com/kubeedge/beehive/pkg/core/context" 91 "github.com/kubeedge/beehive/pkg/core/model" 92 "github.com/kubeedge/kubeedge/common/constants" 93 "github.com/kubeedge/kubeedge/edge/pkg/common/modules" 94 "github.com/kubeedge/kubeedge/edge/pkg/edged/apis" 95 "github.com/kubeedge/kubeedge/edge/pkg/edged/cadvisor" 96 "github.com/kubeedge/kubeedge/edge/pkg/edged/clcm" 97 edgedconfig "github.com/kubeedge/kubeedge/edge/pkg/edged/config" 98 "github.com/kubeedge/kubeedge/edge/pkg/edged/containers" 99 fakekube "github.com/kubeedge/kubeedge/edge/pkg/edged/fake" 100 edgeimages "github.com/kubeedge/kubeedge/edge/pkg/edged/images" 101 "github.com/kubeedge/kubeedge/edge/pkg/edged/podmanager" 102 "github.com/kubeedge/kubeedge/edge/pkg/edged/server" 103 "github.com/kubeedge/kubeedge/edge/pkg/edged/status" 104 edgedutil "github.com/kubeedge/kubeedge/edge/pkg/edged/util" 105 utilpod "github.com/kubeedge/kubeedge/edge/pkg/edged/util/pod" 106 "github.com/kubeedge/kubeedge/edge/pkg/edged/util/record" 107 csiplugin "github.com/kubeedge/kubeedge/edge/pkg/edged/volume/csi" 108 "github.com/kubeedge/kubeedge/edge/pkg/metamanager" 109 "github.com/kubeedge/kubeedge/edge/pkg/metamanager/client" 110 "github.com/kubeedge/kubeedge/pkg/apis/componentconfig/edgecore/v1alpha1" 111 "github.com/kubeedge/kubeedge/pkg/version" 112 ) 113 114 const ( 115 plegChannelCapacity = 1000 116 plegRelistPeriod = time.Second * 1 117 backOffPeriod = 10 * time.Second 118 // MaxContainerBackOff is the max backoff period, exported for the e2e test 119 MaxContainerBackOff = 300 * time.Second 120 enqueueDuration = 10 * time.Second 121 // ImageGCPeriod is the period for performing image garbage collection. 122 ImageGCPeriod = 5 * time.Second 123 // ContainerGCPeriod is the period for performing container garbage collection. 124 ContainerGCPeriod = 60 * time.Second 125 // Period for performing global cleanup tasks. 126 housekeepingPeriod = time.Second * 2 127 syncWorkQueuePeriod = time.Second * 2 128 minAge = 60 * time.Second 129 imageGcHighThreshold = "edged.image-gc-high-threshold" 130 syncMsgRespTimeout = 1 * time.Minute 131 //DefaultRootDir give default directory 132 DefaultRootDir = "/var/lib/edged" 133 workerResyncIntervalJitterFactor = 0.5 134 //EdgeController gives controller name 135 EdgeController = "edgecontroller" 136 137 //DockerShimEndpoint gives the default endpoint for Docker shim runtime 138 DockerShimEndpoint = "unix:///var/run/dockershim.sock" 139 //DockerShimEndpointDeprecated this is the deprecated dockershim endpoint 140 DockerShimEndpointDeprecated = "/var/run/dockershim.sock" 141 //DockershimRootDir givesthe default path to the dockershim root directory 142 DockershimRootDir = "/var/lib/dockershim" 143 //HairpinMode only use forkubenetNetworkPlugin.Currently not working 144 HairpinMode = kubeletinternalconfig.HairpinVeth 145 //NonMasqueradeCIDR only use forkubenetNetworkPlugin.Currently not working 146 NonMasqueradeCIDR = "10.0.0.1/8" 147 //cgroupName used for check if the cgroup is mounted.(default "") 148 cgroupName = "" 149 // redirectContainerStream decide whether to redirect the container stream 150 redirectContainerStream = false 151 // ResolvConfDefault gives the default dns resolv configration file 152 ResolvConfDefault = "/etc/resolv.conf" 153 ) 154 155 var ( 156 zeroDuration = metav1.Duration{} 157 ) 158 159 // podReady holds the initPodReady flag and its lock 160 type podReady struct { 161 // initPodReady is flag to check Pod ready status 162 initPodReady bool 163 // podReadyLock is used to guard initPodReady flag 164 podReadyLock sync.RWMutex 165 } 166 167 //Define edged 168 type edged struct { 169 //dns config 170 dnsConfigurer *kubedns.Configurer 171 hostname string 172 namespace string 173 nodeName string 174 interfaceName string 175 uid types.UID 176 nodeStatusUpdateFrequency time.Duration 177 registrationCompleted bool 178 containerManager cm.ContainerManager 179 containerRuntimeName string 180 concurrentConsumers int 181 // container runtime 182 containerRuntime kubecontainer.Runtime 183 podCache kubecontainer.Cache 184 os kubecontainer.OSInterface 185 runtimeService internalapi.RuntimeService 186 podManager podmanager.Manager 187 pleg pleg.PodLifecycleEventGenerator 188 statusManager kubestatus.Manager 189 kubeClient clientset.Interface 190 probeManager prober.Manager 191 livenessManager proberesults.Manager 192 startupManager proberesults.Manager 193 server *server.Server 194 podAdditionQueue *workqueue.Type 195 podAdditionBackoff *flowcontrol.Backoff 196 podDeletionQueue *workqueue.Type 197 podDeletionBackoff *flowcontrol.Backoff 198 imageGCManager images.ImageGCManager 199 containerGCManager kubecontainer.ContainerGC 200 metaClient client.CoreInterface 201 volumePluginMgr *volume.VolumePluginMgr 202 mounter mount.Interface 203 hostUtil hostutil.HostUtils 204 volumeManager volumemanager.VolumeManager 205 rootDirectory string 206 gpuPluginEnabled bool 207 version string 208 // podReady is structure with initPodReady flag and its lock 209 podReady 210 // cache for secret 211 secretStore cache.Store 212 configMapStore cache.Store 213 workQueue queue.WorkQueue 214 clcm clcm.ContainerLifecycleManager 215 //edged cgroup driver for container runtime 216 cgroupDriver string 217 //clusterDns dns 218 clusterDNS []net.IP 219 // edge node IP 220 nodeIP net.IP 221 222 // pluginmanager runs a set of asynchronous loops that figure out which 223 // plugins need to be registered/unregistered based on this node and makes it so. 224 pluginManager pluginmanager.PluginManager 225 226 recorder recordtools.EventRecorder 227 enable bool 228 229 configMapManager klconfigmap.Manager 230 } 231 232 // Register register edged 233 func Register(e *v1alpha1.Edged) { 234 edgedconfig.InitConfigure(e) 235 edged, err := newEdged(e.Enable) 236 if err != nil { 237 klog.Errorf("init new edged error, %v", err) 238 os.Exit(1) 239 return 240 } 241 core.Register(edged) 242 } 243 244 func (e *edged) Name() string { 245 return modules.EdgedModuleName 246 } 247 248 func (e *edged) Group() string { 249 return modules.EdgedGroup 250 } 251 252 //Enable indicates whether this module is enabled 253 func (e *edged) Enable() bool { 254 return e.enable 255 } 256 257 func (e *edged) Start() { 258 e.volumePluginMgr = NewInitializedVolumePluginMgr(e, ProbeVolumePlugins("")) 259 260 e.statusManager = status.NewManager(e.kubeClient, e.podManager, utilpod.NewPodDeleteSafety(), e.metaClient) 261 if err := e.initializeModules(); err != nil { 262 klog.Errorf("initialize module error: %v", err) 263 os.Exit(1) 264 } 265 e.hostUtil = hostutil.NewHostUtil() 266 267 e.configMapManager = klconfigmap.NewSimpleConfigMapManager(e.kubeClient) 268 269 e.volumeManager = volumemanager.NewVolumeManager( 270 true, 271 types.NodeName(e.nodeName), 272 e.podManager, 273 e.statusManager, 274 e.kubeClient, 275 e.volumePluginMgr, 276 e.containerRuntime, 277 e.mounter, 278 e.hostUtil, 279 e.getPodsDir(), 280 record.NewEventRecorder(), 281 false, 282 false, 283 volumepathhandler.NewBlockVolumePathHandler(), 284 ) 285 go e.volumeManager.Run(edgedutil.NewSourcesReady(), utilwait.NeverStop) 286 go utilwait.Until(e.syncNodeStatus, e.nodeStatusUpdateFrequency, utilwait.NeverStop) 287 288 e.probeManager = prober.NewManager(e.statusManager, e.livenessManager, e.startupManager, containers.NewContainerRunner(), kubecontainer.NewRefManager(), record.NewEventRecorder()) 289 e.pleg = pleg.NewGenericPLEG(e.containerRuntime, plegChannelCapacity, plegRelistPeriod, e.podCache, clock.RealClock{}) 290 e.statusManager.Start() 291 e.pleg.Start() 292 293 e.podAddWorkerRun(e.concurrentConsumers) 294 e.podRemoveWorkerRun(e.concurrentConsumers) 295 296 housekeepingTicker := time.NewTicker(housekeepingPeriod) 297 syncWorkQueueCh := time.NewTicker(syncWorkQueuePeriod) 298 e.probeManager.Start() 299 go e.syncLoopIteration(e.pleg.Watch(), housekeepingTicker.C, syncWorkQueueCh.C) 300 go e.server.ListenAndServe() 301 302 e.imageGCManager.Start() 303 e.StartGarbageCollection() 304 305 e.pluginManager = pluginmanager.NewPluginManager( 306 e.getPluginsRegistrationDir(), /* sockDir */ 307 nil, 308 ) 309 310 // Adding Registration Callback function for CSI Driver 311 e.pluginManager.AddHandler(pluginwatcherapi.CSIPlugin, plugincache.PluginHandler(csiplugin.PluginHandler)) 312 // Start the plugin manager 313 klog.Infof("starting plugin manager") 314 go e.pluginManager.Run(edgedutil.NewSourcesReady(), utilwait.NeverStop) 315 316 klog.Infof("starting syncPod") 317 e.syncPod() 318 } 319 320 // isInitPodReady is used to safely return initPodReady flag 321 func (e *edged) isInitPodReady() bool { 322 e.podReadyLock.RLock() 323 defer e.podReadyLock.RUnlock() 324 return e.initPodReady 325 } 326 327 // setInitPodReady is used to safely set initPodReady flag 328 func (e *edged) setInitPodReady(readyStatus bool) { 329 e.podReadyLock.Lock() 330 defer e.podReadyLock.Unlock() 331 e.initPodReady = readyStatus 332 } 333 334 func getRuntimeAndImageServices(remoteRuntimeEndpoint string, remoteImageEndpoint string, runtimeRequestTimeout metav1.Duration) (internalapi.RuntimeService, internalapi.ImageManagerService, error) { 335 rs, err := remote.NewRemoteRuntimeService(remoteRuntimeEndpoint, runtimeRequestTimeout.Duration) 336 if err != nil { 337 return nil, nil, err 338 } 339 is, err := remote.NewRemoteImageService(remoteImageEndpoint, runtimeRequestTimeout.Duration) 340 if err != nil { 341 return nil, nil, err 342 } 343 return rs, is, err 344 } 345 346 //newEdged creates new edged object and initialises it 347 func newEdged(enable bool) (*edged, error) { 348 backoff := flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff) 349 350 podManager := podmanager.NewPodManager() 351 policy := images.ImageGCPolicy{ 352 HighThresholdPercent: int(edgedconfig.Config.ImageGCHighThreshold), 353 LowThresholdPercent: int(edgedconfig.Config.ImageGCLowThreshold), 354 MinAge: minAge, 355 } 356 // build new object to match interface 357 recorder := record.NewEventRecorder() 358 359 metaClient := client.New() 360 361 ed := &edged{ 362 nodeName: edgedconfig.Config.HostnameOverride, 363 interfaceName: edgedconfig.Config.InterfaceName, 364 namespace: edgedconfig.Config.RegisterNodeNamespace, 365 gpuPluginEnabled: edgedconfig.Config.GPUPluginEnabled, 366 cgroupDriver: edgedconfig.Config.CGroupDriver, 367 concurrentConsumers: edgedconfig.Config.ConcurrentConsumers, 368 podManager: podManager, 369 podAdditionQueue: workqueue.New(), 370 podCache: kubecontainer.NewCache(), 371 podAdditionBackoff: backoff, 372 podDeletionQueue: workqueue.New(), 373 podDeletionBackoff: backoff, 374 metaClient: metaClient, 375 kubeClient: fakekube.NewSimpleClientset(metaClient), 376 nodeStatusUpdateFrequency: time.Duration(edgedconfig.Config.NodeStatusUpdateFrequency) * time.Second, 377 mounter: mount.New(""), 378 uid: types.UID("38796d14-1df3-11e8-8e5a-286ed488f209"), 379 version: fmt.Sprintf("%s-kubeedge-%s", constants.CurrentSupportK8sVersion, version.Get()), 380 rootDirectory: DefaultRootDir, 381 secretStore: cache.NewStore(cache.MetaNamespaceKeyFunc), 382 configMapStore: cache.NewStore(cache.MetaNamespaceKeyFunc), 383 workQueue: queue.NewBasicWorkQueue(clock.RealClock{}), 384 nodeIP: net.ParseIP(edgedconfig.Config.NodeIP), 385 recorder: recorder, 386 enable: enable, 387 } 388 389 err := ed.makePodDir() 390 if err != nil { 391 klog.Errorf("create pod dir [%s] failed: %v", ed.getPodsDir(), err) 392 os.Exit(1) 393 } 394 395 ed.livenessManager = proberesults.NewManager() 396 ed.startupManager = proberesults.NewManager() 397 398 nodeRef := &v1.ObjectReference{ 399 Kind: "Node", 400 Name: string(ed.nodeName), 401 UID: types.UID(ed.nodeName), 402 Namespace: "", 403 } 404 statsProvider := edgeimages.NewStatsProvider() 405 containerGCPolicy := kubecontainer.ContainerGCPolicy{ 406 MinAge: minAge, 407 MaxContainers: -1, 408 MaxPerPodContainer: int(edgedconfig.Config.MaximumDeadContainersPerPod), 409 } 410 411 //create and start the docker shim running as a grpc server 412 if edgedconfig.Config.RemoteRuntimeEndpoint == DockerShimEndpoint || 413 edgedconfig.Config.RemoteRuntimeEndpoint == DockerShimEndpointDeprecated { 414 streamingConfig := &streaming.Config{} 415 DockerClientConfig := &dockershim.ClientConfig{ 416 DockerEndpoint: edgedconfig.Config.DockerAddress, 417 ImagePullProgressDeadline: time.Duration(edgedconfig.Config.ImagePullProgressDeadline) * time.Second, 418 EnableSleep: true, 419 WithTraceDisabled: true, 420 } 421 422 pluginConfigs := dockershim.NetworkPluginSettings{ 423 HairpinMode: kubeletinternalconfig.HairpinMode(HairpinMode), 424 NonMasqueradeCIDR: NonMasqueradeCIDR, 425 PluginName: edgedconfig.Config.NetworkPluginName, 426 PluginBinDirString: edgedconfig.Config.CNIBinDir, 427 PluginConfDir: edgedconfig.Config.CNIConfDir, 428 PluginCacheDir: edgedconfig.Config.CNICacheDir, 429 MTU: int(edgedconfig.Config.NetworkPluginMTU), 430 } 431 432 redirectContainerStream := redirectContainerStream 433 cgroupDriver := ed.cgroupDriver 434 435 ds, err := dockershim.NewDockerService(DockerClientConfig, 436 edgedconfig.Config.PodSandboxImage, 437 streamingConfig, 438 &pluginConfigs, 439 cgroupName, 440 cgroupDriver, 441 DockershimRootDir, 442 redirectContainerStream) 443 444 if err != nil { 445 return nil, err 446 } 447 448 klog.Infof("RemoteRuntimeEndpoint: %q, remoteImageEndpoint: %q", 449 edgedconfig.Config.RemoteRuntimeEndpoint, edgedconfig.Config.RemoteImageEndpoint) 450 451 klog.Info("Starting the GRPC server for the docker CRI shim.") 452 server := dockerremote.NewDockerServer(edgedconfig.Config.RemoteRuntimeEndpoint, ds) 453 if err := server.Start(); err != nil { 454 return nil, err 455 } 456 457 } 458 ed.clusterDNS = convertStrToIP(edgedconfig.Config.ClusterDNS) 459 ed.dnsConfigurer = kubedns.NewConfigurer(recorder, 460 nodeRef, 461 ed.nodeIP, 462 ed.clusterDNS, 463 edgedconfig.Config.ClusterDomain, 464 ResolvConfDefault) 465 466 containerRefManager := kubecontainer.NewRefManager() 467 httpClient := &http.Client{} 468 runtimeService, imageService, err := getRuntimeAndImageServices( 469 edgedconfig.Config.RemoteRuntimeEndpoint, 470 edgedconfig.Config.RemoteImageEndpoint, 471 metav1.Duration{ 472 Duration: time.Duration(edgedconfig.Config.RuntimeRequestTimeout) * time.Minute, 473 }) 474 if err != nil { 475 return nil, err 476 } 477 if ed.os == nil { 478 ed.os = kubecontainer.RealOS{} 479 } 480 481 ed.clcm, err = clcm.NewContainerLifecycleManager(DefaultRootDir) 482 483 var machineInfo cadvisorapi.MachineInfo 484 machineInfo.MemoryCapacity = uint64(edgedconfig.Config.EdgedMemoryCapacity) 485 containerRuntime, err := kuberuntime.NewKubeGenericRuntimeManager( 486 recorder, 487 ed.livenessManager, 488 ed.startupManager, 489 "", 490 containerRefManager, 491 &machineInfo, 492 ed, 493 ed.os, 494 ed, 495 httpClient, 496 backoff, 497 false, 498 0, 499 0, 500 false, 501 metav1.Duration{Duration: 100 * time.Millisecond}, 502 runtimeService, 503 imageService, 504 ed.clcm.InternalContainerLifecycle(), 505 nil, 506 nil, 507 ) 508 if err != nil { 509 return nil, fmt.Errorf("New generic runtime manager failed, err: %s", err.Error()) 510 } 511 512 cadvisorInterface, err := cadvisor.New("") 513 containerManager, err := cm.NewContainerManager(mount.New(""), 514 cadvisorInterface, 515 cm.NodeConfig{ 516 CgroupDriver: edgedconfig.Config.CGroupDriver, 517 SystemCgroupsName: edgedconfig.Config.CGroupDriver, 518 KubeletCgroupsName: edgedconfig.Config.CGroupDriver, 519 ContainerRuntime: edgedconfig.Config.RuntimeType, 520 KubeletRootDir: DefaultRootDir, 521 ExperimentalCPUManagerPolicy: string(cpumanager.PolicyNone), 522 }, 523 false, 524 edgedconfig.Config.DevicePluginEnabled, 525 recorder) 526 if err != nil { 527 return nil, fmt.Errorf("init container manager failed with error: %v", err) 528 } 529 ed.containerRuntime = containerRuntime 530 ed.containerRuntimeName = edgedconfig.RemoteContainerRuntime 531 ed.containerManager = containerManager 532 ed.runtimeService = runtimeService 533 imageGCManager, err := images.NewImageGCManager( 534 ed.containerRuntime, 535 statsProvider, 536 recorder, 537 nodeRef, 538 policy, 539 edgedconfig.Config.PodSandboxImage, 540 ) 541 if err != nil { 542 return nil, fmt.Errorf("failed to initialize image manager: %v", err) 543 } 544 ed.imageGCManager = imageGCManager 545 546 containerGCManager, err := kubecontainer.NewContainerGC( 547 containerRuntime, 548 containerGCPolicy, 549 &containers.KubeSourcesReady{}) 550 if err != nil { 551 return nil, fmt.Errorf("init Container GC Manager failed with error %s", err.Error()) 552 } 553 ed.containerGCManager = containerGCManager 554 ed.server = server.NewServer(ed.podManager) 555 556 return ed, nil 557 } 558 559 func (e *edged) initializeModules() error { 560 node, err := e.initialNode() 561 if err != nil { 562 klog.Errorf("Failed to initialNode %v", err) 563 return err 564 } 565 566 err = e.containerManager.Start(node, e.GetActivePods, edgedutil.NewSourcesReady(), e.statusManager, e.runtimeService) 567 if err != nil { 568 klog.Errorf("Failed to start device plugin manager %v", err) 569 return err 570 } 571 return nil 572 } 573 574 func (e *edged) StartGarbageCollection() { 575 go utilwait.Until(func() { 576 err := e.imageGCManager.GarbageCollect() 577 if err != nil { 578 klog.Errorf("Image garbage collection failed: %v", err) 579 } 580 }, ImageGCPeriod, utilwait.NeverStop) 581 582 go utilwait.Until(func() { 583 if e.isInitPodReady() { 584 err := e.containerGCManager.GarbageCollect() 585 if err != nil { 586 klog.Errorf("Container garbage collection failed: %v", err) 587 } 588 } 589 }, ContainerGCPeriod, utilwait.NeverStop) 590 } 591 592 func (e *edged) syncLoopIteration(plegCh <-chan *pleg.PodLifecycleEvent, housekeepingCh <-chan time.Time, syncWorkQueueCh <-chan time.Time) { 593 594 for { 595 select { 596 case update := <-e.livenessManager.Updates(): 597 if update.Result == proberesults.Failure { 598 pod, ok := e.podManager.GetPodByUID(update.PodUID) 599 if !ok { 600 klog.Infof("SyncLoop (container unhealthy): ignore irrelevant update: %#v", update) 601 break 602 } 603 klog.Infof("SyncLoop (container unhealthy): %q", format.Pod(pod)) 604 if pod.Spec.RestartPolicy == v1.RestartPolicyNever { 605 break 606 } 607 var containerCompleted bool 608 if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure { 609 for _, containerStatus := range pod.Status.ContainerStatuses { 610 if containerStatus.State.Terminated != nil && containerStatus.State.Terminated.ExitCode == 0 { 611 containerCompleted = true 612 break 613 } 614 } 615 if containerCompleted { 616 break 617 } 618 } 619 klog.Infof("Will restart pod [%s]", pod.Name) 620 key := types.NamespacedName{ 621 Namespace: pod.Namespace, 622 Name: pod.Name, 623 } 624 e.podAdditionQueue.Add(key.String()) 625 } 626 case plegEvent := <-plegCh: 627 if pod, ok := e.podManager.GetPodByUID(plegEvent.ID); ok { 628 if err := e.updatePodStatus(pod); err != nil { 629 klog.Errorf("update pod %s status error", pod.Name) 630 break 631 } 632 if plegEvent.Type == pleg.ContainerDied { 633 if pod.Spec.RestartPolicy == v1.RestartPolicyNever { 634 break 635 } 636 var containerCompleted bool 637 if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure { 638 for _, containerStatus := range pod.Status.ContainerStatuses { 639 if containerStatus.State.Terminated != nil && containerStatus.State.Terminated.ExitCode == 0 { 640 containerCompleted = true 641 break 642 } 643 } 644 if containerCompleted { 645 break 646 } 647 } 648 klog.Errorf("sync loop get event container died, restart pod [%s]", pod.Name) 649 key := types.NamespacedName{ 650 Namespace: pod.Namespace, 651 Name: pod.Name, 652 } 653 e.podAdditionQueue.Add(key.String()) 654 } else { 655 klog.Infof("sync loop get event [%s], ignore it now.", plegEvent.Type) 656 } 657 } else { 658 klog.Infof("sync loop ignore event: [%s], with pod [%s] not found", plegEvent.Type, plegEvent.ID) 659 } 660 case <-housekeepingCh: 661 err := e.HandlePodCleanups() 662 if err != nil { 663 klog.Errorf("Handle Pod Cleanup Failed: %v", err) 664 } 665 case <-syncWorkQueueCh: 666 podsToSync := e.getPodsToSync() 667 if len(podsToSync) == 0 { 668 break 669 } 670 for _, pod := range podsToSync { 671 if !e.podIsTerminated(pod) { 672 key := types.NamespacedName{ 673 Namespace: pod.Namespace, 674 Name: pod.Name, 675 } 676 e.podAdditionQueue.Add(key.String()) 677 } 678 } 679 } 680 } 681 } 682 683 // NewNamespacedNameFromString parses the provided string and returns a NamespacedName 684 func NewNamespacedNameFromString(s string) types.NamespacedName { 685 Separator := '/' 686 nn := types.NamespacedName{} 687 result := strings.Split(s, string(Separator)) 688 if len(result) == 2 { 689 nn.Namespace = result[0] 690 nn.Name = result[1] 691 } 692 return nn 693 } 694 695 func (e *edged) podAddWorkerRun(consumers int) { 696 for i := 0; i < consumers; i++ { 697 klog.Infof("start pod addition queue work %d", i) 698 go func(i int) { 699 for { 700 item, quit := e.podAdditionQueue.Get() 701 if quit { 702 klog.Errorf("consumer: [%d], worker addition queue is shutting down!", i) 703 return 704 } 705 namespacedName := NewNamespacedNameFromString(item.(string)) 706 podName := namespacedName.Name 707 klog.Infof("worker [%d] get pod addition item [%s]", i, podName) 708 backOffKey := fmt.Sprintf("pod_addition_worker_%s", podName) 709 if e.podAdditionBackoff.IsInBackOffSinceUpdate(backOffKey, e.podAdditionBackoff.Clock.Now()) { 710 klog.Errorf("consume pod addition backoff: Back-off consume pod [%s] addition error, backoff: [%v]", podName, e.podAdditionBackoff.Get(backOffKey)) 711 go func() { 712 klog.Infof("worker [%d] backoff pod addition item [%s] failed, re-add to queue", i, podName) 713 time.Sleep(e.podAdditionBackoff.Get(backOffKey)) 714 e.podAdditionQueue.Add(item) 715 }() 716 e.podAdditionQueue.Done(item) 717 continue 718 } 719 err := e.consumePodAddition(&namespacedName) 720 if err != nil { 721 if err == apis.ErrPodNotFound { 722 klog.Infof("worker [%d] handle pod addition item [%s] failed with not found error.", i, podName) 723 e.podAdditionBackoff.Reset(backOffKey) 724 } else { 725 go func() { 726 klog.Errorf("worker [%d] handle pod addition item [%s] failed: %v, re-add to queue", i, podName, err) 727 e.podAdditionBackoff.Next(backOffKey, e.podAdditionBackoff.Clock.Now()) 728 time.Sleep(enqueueDuration) 729 e.podAdditionQueue.Add(item) 730 }() 731 } 732 } else { 733 e.podAdditionBackoff.Reset(backOffKey) 734 } 735 e.podAdditionQueue.Done(item) 736 } 737 }(i) 738 } 739 } 740 741 func (e *edged) podRemoveWorkerRun(consumers int) { 742 for i := 0; i < consumers; i++ { 743 go func(i int) { 744 for { 745 item, quit := e.podDeletionQueue.Get() 746 if quit { 747 klog.Errorf("consumer: [%d], worker addition queue is shutting down!", i) 748 return 749 } 750 namespacedName := NewNamespacedNameFromString(item.(string)) 751 podName := namespacedName.Name 752 klog.Infof("consumer: [%d], worker get removed pod [%s]\n", i, podName) 753 err := e.consumePodDeletion(&namespacedName) 754 if err != nil { 755 if err == apis.ErrContainerNotFound { 756 klog.Infof("pod [%s] is not exist, with container not found error", podName) 757 } else if err == apis.ErrPodNotFound { 758 klog.Infof("pod [%s] is not found", podName) 759 } else { 760 go func(item interface{}) { 761 klog.Errorf("worker remove pod [%s] failed: %v", podName, err) 762 time.Sleep(2 * time.Second) 763 e.podDeletionQueue.Add(item) 764 }(item) 765 } 766 767 } 768 e.podDeletionQueue.Done(item) 769 } 770 }(i) 771 } 772 } 773 774 func (e *edged) consumePodAddition(namespacedName *types.NamespacedName) error { 775 podName := namespacedName.Name 776 klog.Infof("start to consume added pod [%s]", podName) 777 pod, ok := e.podManager.GetPodByName(namespacedName.Namespace, podName) 778 if !ok || pod.DeletionTimestamp != nil { 779 return apis.ErrPodNotFound 780 } 781 782 if err := e.makePodDataDirs(pod); err != nil { 783 klog.Errorf("Unable to make pod data directories for pod %q: %v", format.Pod(pod), err) 784 return err 785 } 786 787 if err := e.volumeManager.WaitForAttachAndMount(pod); err != nil { 788 klog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err) 789 return err 790 } 791 792 secrets, err := e.getSecretsFromMetaManager(pod) 793 if err != nil { 794 return err 795 } 796 797 curPodStatus, err := e.podCache.Get(pod.GetUID()) 798 if err != nil { 799 klog.Errorf("Pod status for %s from cache failed: %v", podName, err) 800 return err 801 } 802 803 result := e.containerRuntime.SyncPod(pod, curPodStatus, secrets, e.podAdditionBackoff) 804 if err := result.Error(); err != nil { 805 // Do not return error if the only failures were pods in backoff 806 for _, r := range result.SyncResults { 807 if r.Error != kubecontainer.ErrCrashLoopBackOff && r.Error != images.ErrImagePullBackOff { 808 // Do not record an event here, as we keep all event logging for sync pod failures 809 // local to container runtime so we get better errors 810 return err 811 } 812 } 813 814 return nil 815 } 816 817 e.workQueue.Enqueue(pod.UID, utilwait.Jitter(time.Minute, workerResyncIntervalJitterFactor)) 818 klog.Infof("consume added pod [%s] successfully\n", podName) 819 return nil 820 } 821 822 func (e *edged) consumePodDeletion(namespacedName *types.NamespacedName) error { 823 podName := namespacedName.Name 824 klog.Infof("start to consume removed pod [%s]", podName) 825 pod, ok := e.podManager.GetPodByName(namespacedName.Namespace, podName) 826 if !ok { 827 return apis.ErrPodNotFound 828 } 829 830 podStatus, err := e.podCache.Get(pod.GetUID()) 831 if err != nil { 832 klog.Errorf("Pod status for %s from cache failed: %v", podName, err) 833 return err 834 } 835 836 err = e.containerRuntime.KillPod(pod, kubecontainer.ConvertPodStatusToRunningPod(e.containerRuntimeName, podStatus), nil) 837 if err != nil { 838 if err == apis.ErrContainerNotFound { 839 return err 840 } 841 return fmt.Errorf("consume removed pod [%s] failed, %v", podName, err) 842 } 843 klog.Infof("consume removed pod [%s] successfully\n", podName) 844 return nil 845 } 846 847 func (e *edged) syncPod() { 848 time.Sleep(10 * time.Second) 849 850 //send msg to metamanager to get existing pods 851 info := model.NewMessage("").BuildRouter(e.Name(), e.Group(), e.namespace+"/"+model.ResourceTypePod, 852 model.QueryOperation) 853 beehiveContext.Send(metamanager.MetaManagerModuleName, *info) 854 for { 855 select { 856 case <-beehiveContext.Done(): 857 klog.Warning("Sync pod stop") 858 return 859 default: 860 } 861 result, err := beehiveContext.Receive(e.Name()) 862 if err != nil { 863 klog.Errorf("failed to get pod: %v", err) 864 continue 865 } 866 867 _, resType, resID, err := util.ParseResourceEdge(result.GetResource(), result.GetOperation()) 868 if err != nil { 869 klog.Errorf("failed to parse the Resource: %v", err) 870 continue 871 } 872 op := result.GetOperation() 873 874 var content []byte 875 876 switch result.Content.(type) { 877 case []byte: 878 content = result.GetContent().([]byte) 879 default: 880 content, err = json.Marshal(result.Content) 881 if err != nil { 882 klog.Errorf("marshal message content failed: %v", err) 883 continue 884 } 885 } 886 klog.Infof("result content is %s", result.Content) 887 switch resType { 888 case model.ResourceTypePod: 889 if op == model.ResponseOperation && resID == "" && result.GetSource() == metamanager.MetaManagerModuleName { 890 err := e.handlePodListFromMetaManager(content) 891 if err != nil { 892 klog.Errorf("handle podList failed: %v", err) 893 continue 894 } 895 e.setInitPodReady(true) 896 } else if op == model.ResponseOperation && resID == "" && result.GetSource() == EdgeController { 897 err := e.handlePodListFromEdgeController(content) 898 if err != nil { 899 klog.Errorf("handle controllerPodList failed: %v", err) 900 continue 901 } 902 e.setInitPodReady(true) 903 } else { 904 err := e.handlePod(op, content) 905 if err != nil { 906 klog.Errorf("handle pod failed: %v", err) 907 continue 908 } 909 } 910 case model.ResourceTypeConfigmap: 911 if op != model.ResponseOperation { 912 err := e.handleConfigMap(op, content) 913 if err != nil { 914 klog.Errorf("handle configMap failed: %v", err) 915 } 916 } else { 917 klog.Infof("skip to handle configMap with type response") 918 continue 919 } 920 case model.ResourceTypeSecret: 921 if op != model.ResponseOperation { 922 err := e.handleSecret(op, content) 923 if err != nil { 924 klog.Errorf("handle secret failed: %v", err) 925 } 926 } else { 927 klog.Infof("skip to handle secret with type response") 928 continue 929 } 930 case constants.CSIResourceTypeVolume: 931 klog.Infof("volume operation type: %s", op) 932 res, err := e.handleVolume(op, content) 933 if err != nil { 934 klog.Errorf("handle volume failed: %v", err) 935 } else { 936 resp := result.NewRespByMessage(&result, res) 937 beehiveContext.SendResp(*resp) 938 } 939 default: 940 klog.Errorf("resType is not pod or configmap or secret or volume: esType is %s", resType) 941 continue 942 } 943 } 944 } 945 946 func (e *edged) handleVolume(op string, content []byte) (interface{}, error) { 947 switch op { 948 case constants.CSIOperationTypeCreateVolume: 949 return e.createVolume(content) 950 case constants.CSIOperationTypeDeleteVolume: 951 return e.deleteVolume(content) 952 case constants.CSIOperationTypeControllerPublishVolume: 953 return e.controllerPublishVolume(content) 954 case constants.CSIOperationTypeControllerUnpublishVolume: 955 return e.controllerUnpublishVolume(content) 956 } 957 return nil, nil 958 } 959 960 func (e *edged) createVolume(content []byte) (interface{}, error) { 961 req := &csi.CreateVolumeRequest{} 962 err := jsonpb.Unmarshal(bytes.NewReader(content), req) 963 if err != nil { 964 klog.Errorf("unmarshal create volume req error: %v", err) 965 return nil, err 966 } 967 968 klog.V(4).Infof("start create volume: %s", req.Name) 969 ctl := csiplugin.NewController() 970 res, err := ctl.CreateVolume(req) 971 if err != nil { 972 klog.Errorf("create volume error: %v", err) 973 return nil, err 974 } 975 klog.V(4).Infof("end create volume: %s result: %v", req.Name, res) 976 return res, nil 977 } 978 979 func (e *edged) deleteVolume(content []byte) (interface{}, error) { 980 req := &csi.DeleteVolumeRequest{} 981 err := jsonpb.Unmarshal(bytes.NewReader(content), req) 982 if err != nil { 983 klog.Errorf("unmarshal delete volume req error: %v", err) 984 return nil, err 985 } 986 klog.V(4).Infof("start delete volume: %s", req.VolumeId) 987 ctl := csiplugin.NewController() 988 res, err := ctl.DeleteVolume(req) 989 if err != nil { 990 klog.Errorf("delete volume error: %v", err) 991 return nil, err 992 } 993 klog.V(4).Infof("end delete volume: %s result: %v", req.VolumeId, res) 994 return res, nil 995 } 996 997 func (e *edged) controllerPublishVolume(content []byte) (interface{}, error) { 998 req := &csi.ControllerPublishVolumeRequest{} 999 err := jsonpb.Unmarshal(bytes.NewReader(content), req) 1000 if err != nil { 1001 klog.Errorf("unmarshal controller publish volume req error: %v", err) 1002 return nil, err 1003 } 1004 klog.V(4).Infof("start controller publish volume: %s", req.VolumeId) 1005 ctl := csiplugin.NewController() 1006 res, err := ctl.ControllerPublishVolume(req) 1007 if err != nil { 1008 klog.Errorf("controller publish volume error: %v", err) 1009 return nil, err 1010 } 1011 klog.V(4).Infof("end controller publish volume:: %s result: %v", req.VolumeId, res) 1012 return res, nil 1013 } 1014 1015 func (e *edged) controllerUnpublishVolume(content []byte) (interface{}, error) { 1016 req := &csi.ControllerUnpublishVolumeRequest{} 1017 err := jsonpb.Unmarshal(bytes.NewReader(content), req) 1018 if err != nil { 1019 klog.Errorf("unmarshal controller publish volume req error: %v", err) 1020 return nil, err 1021 } 1022 klog.V(4).Infof("start controller unpublish volume: %s", req.VolumeId) 1023 ctl := csiplugin.NewController() 1024 res, err := ctl.ControllerUnpublishVolume(req) 1025 if err != nil { 1026 klog.Errorf("controller unpublish volume error: %v", err) 1027 return nil, err 1028 } 1029 klog.V(4).Infof("end controller unpublish volume:: %s result: %v", req.VolumeId, res) 1030 return res, nil 1031 } 1032 1033 func (e *edged) handlePod(op string, content []byte) (err error) { 1034 var pod v1.Pod 1035 err = json.Unmarshal(content, &pod) 1036 if err != nil { 1037 return err 1038 } 1039 1040 switch op { 1041 case model.InsertOperation: 1042 e.addPod(&pod) 1043 case model.UpdateOperation: 1044 e.updatePod(&pod) 1045 case model.DeleteOperation: 1046 if delPod, ok := e.podManager.GetPodByName(pod.Namespace, pod.Name); ok { 1047 e.deletePod(delPod) 1048 } 1049 } 1050 return nil 1051 } 1052 1053 func (e *edged) handlePodListFromMetaManager(content []byte) (err error) { 1054 var lists []string 1055 err = json.Unmarshal([]byte(content), &lists) 1056 if err != nil { 1057 return err 1058 } 1059 1060 for _, list := range lists { 1061 var pod v1.Pod 1062 err = json.Unmarshal([]byte(list), &pod) 1063 if err != nil { 1064 return err 1065 } 1066 e.addPod(&pod) 1067 } 1068 1069 return nil 1070 } 1071 1072 func (e *edged) handlePodListFromEdgeController(content []byte) (err error) { 1073 var lists []v1.Pod 1074 if err := json.Unmarshal(content, &lists); err != nil { 1075 return err 1076 } 1077 1078 for _, list := range lists { 1079 e.addPod(&list) 1080 } 1081 1082 return nil 1083 } 1084 1085 func (e *edged) addPod(obj interface{}) { 1086 pod := obj.(*v1.Pod) 1087 klog.Infof("start sync addition for pod [%s]", pod.Name) 1088 attrs := &lifecycle.PodAdmitAttributes{} 1089 attrs.Pod = pod 1090 otherpods := e.podManager.GetPods() 1091 attrs.OtherPods = otherpods 1092 nodeInfo := schedulercache.NewNodeInfo(pod) 1093 e.containerManager.UpdatePluginResources(nodeInfo, attrs) 1094 key := types.NamespacedName{ 1095 Namespace: pod.Namespace, 1096 Name: pod.Name, 1097 } 1098 e.podManager.AddPod(pod) 1099 e.probeManager.AddPod(pod) 1100 e.podAdditionQueue.Add(key.String()) 1101 klog.Infof("success sync addition for pod [%s]", pod.Name) 1102 } 1103 1104 func (e *edged) updatePod(obj interface{}) { 1105 newPod := obj.(*v1.Pod) 1106 klog.Infof("start update pod [%s]", newPod.Name) 1107 key := types.NamespacedName{ 1108 Namespace: newPod.Namespace, 1109 Name: newPod.Name, 1110 } 1111 e.podManager.UpdatePod(newPod) 1112 e.probeManager.AddPod(newPod) 1113 if newPod.DeletionTimestamp == nil { 1114 e.podAdditionQueue.Add(key.String()) 1115 } else { 1116 e.podDeletionQueue.Add(key.String()) 1117 } 1118 klog.Infof("success update pod is %+v\n", newPod) 1119 } 1120 1121 func (e *edged) deletePod(obj interface{}) { 1122 pod := obj.(*v1.Pod) 1123 klog.Infof("start remove pod [%s]", pod.Name) 1124 e.podManager.DeletePod(pod) 1125 e.statusManager.TerminatePod(pod) 1126 e.probeManager.RemovePod(pod) 1127 klog.Infof("success remove pod [%s]", pod.Name) 1128 } 1129 1130 func (e *edged) getSecretsFromMetaManager(pod *v1.Pod) ([]v1.Secret, error) { 1131 var secrets []v1.Secret 1132 for _, imagePullSecret := range pod.Spec.ImagePullSecrets { 1133 secret, err := e.metaClient.Secrets(e.namespace).Get(imagePullSecret.Name) 1134 if err != nil { 1135 return nil, err 1136 } 1137 secrets = append(secrets, *secret) 1138 } 1139 1140 return secrets, nil 1141 } 1142 1143 // Get pods which should be resynchronized. Currently, the following pod should be resynchronized: 1144 // * pod whose work is ready. 1145 // * internal modules that request sync of a pod. 1146 func (e *edged) getPodsToSync() []*v1.Pod { 1147 allPods := e.podManager.GetPods() 1148 podUIDs := e.workQueue.GetWork() 1149 podUIDSet := sets.NewString() 1150 for _, podUID := range podUIDs { 1151 podUIDSet.Insert(string(podUID)) 1152 } 1153 var podsToSync []*v1.Pod 1154 for _, pod := range allPods { 1155 if podUIDSet.Has(string(pod.UID)) { 1156 // The work of the pod is ready 1157 podsToSync = append(podsToSync, pod) 1158 } 1159 } 1160 return podsToSync 1161 } 1162 1163 func (e *edged) handleConfigMap(op string, content []byte) (err error) { 1164 var configMap v1.ConfigMap 1165 err = json.Unmarshal(content, &configMap) 1166 if err != nil { 1167 return 1168 } 1169 _, exists, _ := e.configMapStore.Get(&configMap) 1170 switch op { 1171 case model.InsertOperation: 1172 err = e.configMapStore.Add(&configMap) 1173 case model.UpdateOperation: 1174 if exists { 1175 err = e.configMapStore.Update(&configMap) 1176 } 1177 case model.DeleteOperation: 1178 if exists { 1179 err = e.configMapStore.Delete(&configMap) 1180 } 1181 } 1182 if err == nil { 1183 klog.Infof("%s configMap [%s] for cache success.", op, configMap.Name) 1184 } 1185 return 1186 } 1187 1188 func (e *edged) handleSecret(op string, content []byte) (err error) { 1189 var podSecret v1.Secret 1190 err = json.Unmarshal(content, &podSecret) 1191 if err != nil { 1192 return 1193 } 1194 _, exists, _ := e.secretStore.Get(&podSecret) 1195 switch op { 1196 case model.InsertOperation: 1197 err = e.secretStore.Add(&podSecret) 1198 case model.UpdateOperation: 1199 if exists { 1200 err = e.secretStore.Update(&podSecret) 1201 } 1202 case model.DeleteOperation: 1203 if exists { 1204 err = e.secretStore.Delete(&podSecret) 1205 } 1206 } 1207 if err == nil { 1208 klog.Infof("%s secret [%s] for cache success.", op, podSecret.Name) 1209 } 1210 return 1211 } 1212 1213 // ProbeVolumePlugins collects all volume plugins into an easy to use list. 1214 // PluginDir specifies the directory to search for additional third party 1215 // volume plugins. 1216 func ProbeVolumePlugins(pluginDir string) []volume.VolumePlugin { 1217 allPlugins := []volume.VolumePlugin{} 1218 hostPathConfig := volume.VolumeConfig{} 1219 allPlugins = append(allPlugins, configmap.ProbeVolumePlugins()...) 1220 allPlugins = append(allPlugins, emptydir.ProbeVolumePlugins()...) 1221 allPlugins = append(allPlugins, secretvolume.ProbeVolumePlugins()...) 1222 allPlugins = append(allPlugins, hostpath.ProbeVolumePlugins(hostPathConfig)...) 1223 allPlugins = append(allPlugins, csiplugin.ProbeVolumePlugins()...) 1224 allPlugins = append(allPlugins, downwardapi.ProbeVolumePlugins()...) 1225 allPlugins = append(allPlugins, projected.ProbeVolumePlugins()...) 1226 return allPlugins 1227 } 1228 1229 func (e *edged) HandlePodCleanups() error { 1230 if !e.isInitPodReady() { 1231 return nil 1232 } 1233 pods := e.podManager.GetPods() 1234 containerRunningPods, err := e.containerRuntime.GetPods(false) 1235 if err != nil { 1236 return err 1237 } 1238 e.removeOrphanedPodStatuses(pods) 1239 err = e.cleanupOrphanedPodDirs(pods, containerRunningPods) 1240 if err != nil { 1241 return fmt.Errorf("Failed cleaning up orphaned pod directories: %s", err.Error()) 1242 } 1243 1244 return nil 1245 } 1246 1247 func convertStrToIP(s string) []net.IP { 1248 substrs := strings.Split(s, ",") 1249 ips := make([]net.IP, 0) 1250 for _, substr := range substrs { 1251 if ip := net.ParseIP(substr); ip != nil { 1252 ips = append(ips, ip) 1253 } 1254 } 1255 return ips 1256 }