github.com/verrazzano/verrazzano@v1.7.0/pkg/k8s/ready/daemonset_ready.go (about)

     1  // Copyright (c) 2022, Oracle and/or its affiliates.
     2  // Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
     3  
     4  package ready
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"github.com/verrazzano/verrazzano/pkg/log/vzlog"
    10  	"github.com/verrazzano/verrazzano/platform-operator/constants"
    11  	appsv1 "k8s.io/api/apps/v1"
    12  	corev1 "k8s.io/api/core/v1"
    13  	"k8s.io/apimachinery/pkg/api/errors"
    14  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    15  	"k8s.io/apimachinery/pkg/types"
    16  	"sigs.k8s.io/controller-runtime/pkg/client"
    17  )
    18  
    19  // DaemonSetsAreReady Check that the named daemonsets have the minimum number of specified nodes ready and available
    20  func DaemonSetsAreReady(log vzlog.VerrazzanoLogger, client client.Client, namespacedNames []types.NamespacedName, expectedNodes int32, prefix string) bool {
    21  	resticPodLabel := map[string]string{
    22  		"name": constants.ResticDaemonSetName,
    23  	}
    24  	resticPodSelector := &metav1.LabelSelector{
    25  		MatchLabels: resticPodLabel,
    26  	}
    27  	for _, namespacedName := range namespacedNames {
    28  		daemonset := appsv1.DaemonSet{}
    29  		if err := client.Get(context.TODO(), namespacedName, &daemonset); err != nil {
    30  			if errors.IsNotFound(err) {
    31  				log.Progressf("%s is waiting for daemonsets %v to exist", prefix, namespacedName)
    32  				return false
    33  			}
    34  			log.Errorf("Failed getting daemonset %v: %v", namespacedName, err)
    35  			return false
    36  		}
    37  		if daemonset.Status.UpdatedNumberScheduled < expectedNodes {
    38  			log.Progressf("%s is waiting for daemonset %s nodes to be %v. Current updated nodes is %v", prefix, namespacedName,
    39  				expectedNodes, daemonset.Status.NumberAvailable)
    40  			return false
    41  		}
    42  
    43  		if daemonset.Status.NumberAvailable < expectedNodes {
    44  			log.Progressf("%s is waiting for daemonset %s nodes to be %v. Current available nodes is %v", prefix, namespacedName,
    45  				expectedNodes, daemonset.Status.NumberAvailable)
    46  			return false
    47  		}
    48  
    49  		// Velero install deploys a daemonset and deployment with common labels. The labels need to be adjusted so the pod fetch logic works
    50  		// as expected
    51  		podSelector := daemonset.Spec.Selector
    52  		if namespacedName.Namespace == constants.VeleroNameSpace {
    53  			podSelector = resticPodSelector
    54  		}
    55  
    56  		if !podsReadyDaemonSet(log, client, namespacedName, podSelector, expectedNodes, prefix) {
    57  			return false
    58  		}
    59  		log.Oncef("%s has enough nodes for daemonsets %v", prefix, namespacedName)
    60  	}
    61  	return true
    62  }
    63  
    64  // podsReadyDaemonSet checks for an expected number of pods to be using the latest controllerRevision resource and are
    65  // running and ready
    66  func podsReadyDaemonSet(log vzlog.VerrazzanoLogger, client client.Client, namespacedName types.NamespacedName, selector *metav1.LabelSelector, expectedNodes int32, prefix string) bool {
    67  	// Get a list of pods for a given namespace and labels selector
    68  	pods := GetPodsList(log, client, namespacedName, selector)
    69  	if pods == nil {
    70  		return false
    71  	}
    72  
    73  	// If no pods found log a progress message and return
    74  	if len(pods.Items) == 0 {
    75  		log.Progressf("Found no pods with matching labels selector %v for namespace %s", selector, namespacedName.Namespace)
    76  		return false
    77  	}
    78  
    79  	// Loop through pods identifying pods that are using the latest controllerRevision resource
    80  	var savedPods []corev1.Pod
    81  	var savedRevision int64
    82  	var savedControllerRevisionHash string
    83  	for _, pod := range pods.Items {
    84  		// Log error and return if the controller-revision-hash label is not found.  This should never happen.
    85  		if _, ok := pod.Labels[controllerRevisionHashLabel]; !ok {
    86  			log.Errorf("Failed to find pod label [controller-revision-hash] for pod %s/%s", pod.Namespace, pod.Name)
    87  			return false
    88  		}
    89  
    90  		if pod.Labels[controllerRevisionHashLabel] == savedControllerRevisionHash {
    91  			savedPods = append(savedPods, pod)
    92  			continue
    93  		}
    94  
    95  		// Get the controllerRevision resource for the pod given the controller-revision-hash label
    96  		var cr appsv1.ControllerRevision
    97  		crName := fmt.Sprintf("%s-%s", namespacedName.Name, pod.Labels[controllerRevisionHashLabel])
    98  		err := client.Get(context.TODO(), types.NamespacedName{Namespace: namespacedName.Namespace, Name: crName}, &cr)
    99  		if err != nil {
   100  			log.Errorf("Failed to get controllerRevision %s: %v", namespacedName, err)
   101  			return false
   102  		}
   103  
   104  		if cr.Revision > savedRevision {
   105  			savedRevision = cr.Revision
   106  			savedControllerRevisionHash = pod.Labels[controllerRevisionHashLabel]
   107  			savedPods = []corev1.Pod{}
   108  			savedPods = append(savedPods, pod)
   109  		}
   110  	}
   111  
   112  	// Make sure pods using the latest controllerRevision resource are ready.
   113  	podsReady, success := EnsurePodsAreReady(log, savedPods, expectedNodes, prefix)
   114  	if !success {
   115  		return false
   116  	}
   117  
   118  	if podsReady < expectedNodes {
   119  		log.Progressf("%s is waiting for daemonset %s pods to be %v. Current available pods are %v", prefix, namespacedName,
   120  			expectedNodes, podsReady)
   121  		return false
   122  	}
   123  
   124  	return true
   125  }