github.com/verrazzano/verrazzano@v1.7.1/tests/e2e/multicluster/multicluster_helper.go (about)

     1  // Copyright (c) 2022, 2023, Oracle and/or its affiliates.
     2  // Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
     3  
     4  package multicluster
     5  
     6  import (
     7  	"bufio"
     8  	"bytes"
     9  	"context"
    10  	"encoding/json"
    11  	errs "errors"
    12  	"fmt"
    13  	"io"
    14  	"os"
    15  	"os/exec"
    16  	"strings"
    17  	"time"
    18  
    19  	mcapi "github.com/verrazzano/verrazzano/cluster-operator/apis/clusters/v1alpha1"
    20  	"github.com/verrazzano/verrazzano/pkg/k8s/resource"
    21  
    22  	oamcore "github.com/crossplane/oam-kubernetes-runtime/apis/core/v1alpha2"
    23  	"github.com/google/uuid"
    24  	"github.com/onsi/ginkgo/v2"
    25  	"github.com/onsi/gomega"
    26  	clustersv1alpha1 "github.com/verrazzano/verrazzano/application-operator/apis/clusters/v1alpha1"
    27  	mcClient "github.com/verrazzano/verrazzano/cluster-operator/clientset/versioned"
    28  	"github.com/verrazzano/verrazzano/pkg/constants"
    29  	"github.com/verrazzano/verrazzano/pkg/k8sutil"
    30  	vzapi "github.com/verrazzano/verrazzano/platform-operator/apis/verrazzano/v1alpha1"
    31  	"github.com/verrazzano/verrazzano/tests/e2e/pkg"
    32  	yv3 "gopkg.in/yaml.v3"
    33  	corev1 "k8s.io/api/core/v1"
    34  	rbac "k8s.io/api/rbac/v1"
    35  	"k8s.io/apimachinery/pkg/api/errors"
    36  	"k8s.io/apimachinery/pkg/api/meta"
    37  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    38  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    39  	"k8s.io/apimachinery/pkg/runtime/schema"
    40  	utilyaml "k8s.io/apimachinery/pkg/util/yaml"
    41  	"k8s.io/client-go/discovery"
    42  	"k8s.io/client-go/discovery/cached/memory"
    43  	"k8s.io/client-go/dynamic"
    44  	"k8s.io/client-go/kubernetes"
    45  	"k8s.io/client-go/rest"
    46  	"k8s.io/client-go/restmapper"
    47  	"k8s.io/client-go/tools/clientcmd"
    48  	cmdapi "k8s.io/client-go/tools/clientcmd/api/v1"
    49  	yml "sigs.k8s.io/yaml"
    50  )
    51  
    52  const (
    53  	comps        = "components"
    54  	mcAppConfigs = "multiclusterapplicationconfigurations"
    55  	mcNamespace  = "verrazzano-mc"
    56  	projects     = "verrazzanoprojects"
    57  )
    58  
    59  // DeployVerrazzanoProject deploys the VerrazzanoProject to the cluster with the given kubeConfig
    60  func DeployVerrazzanoProject(projectConfiguration, kubeConfig string) error {
    61  	file, err := pkg.FindTestDataFile(projectConfiguration)
    62  	if err != nil {
    63  		return err
    64  	}
    65  	if err := resource.CreateOrUpdateResourceFromFileInCluster(file, kubeConfig); err != nil {
    66  		return fmt.Errorf("failed to create project resource: %v", err)
    67  	}
    68  	return nil
    69  }
    70  
    71  // TestNamespaceExists returns true if the test namespace exists in the given cluster
    72  func TestNamespaceExists(kubeConfig string, namespace string) bool {
    73  	_, err := pkg.GetNamespaceInCluster(namespace, kubeConfig)
    74  	return err == nil
    75  }
    76  
    77  // DeployCompResource deploys the OAM Component resource to the cluster with the given kubeConfig
    78  func DeployCompResource(compConfiguration, testNamespace, kubeConfig string) error {
    79  	file, err := pkg.FindTestDataFile(compConfiguration)
    80  	if err != nil {
    81  		return err
    82  	}
    83  	if err := resource.CreateOrUpdateResourceFromFileInClusterInGeneratedNamespace(file, kubeConfig, testNamespace); err != nil {
    84  		return fmt.Errorf("failed to create multi-cluster component resources: %v", err)
    85  	}
    86  	return nil
    87  }
    88  
    89  // DeployAppResource deploys the OAM Application resource to the cluster with the given kubeConfig
    90  func DeployAppResource(appConfiguration, testNamespace, kubeConfig string) error {
    91  	file, err := pkg.FindTestDataFile(appConfiguration)
    92  	if err != nil {
    93  		return err
    94  	}
    95  	if err := resource.CreateOrUpdateResourceFromFileInClusterInGeneratedNamespace(file, kubeConfig, testNamespace); err != nil {
    96  		return fmt.Errorf("failed to create multi-cluster application resource: %v", err)
    97  	}
    98  	return nil
    99  }
   100  
   101  // VerifyMCResources verifies that the MC resources are present or absent depending on whether this is an admin
   102  // cluster and whether the resources are placed in the given cluster
   103  func VerifyMCResources(kubeConfig string, isAdminCluster bool, placedInThisCluster bool, namespace string, appConfigName string, expectedComps []string) bool {
   104  	// call both appConfExists and componentExists and store the results, to avoid short-circuiting
   105  	// since we should check both in all cases
   106  	mcAppConfExists := appConfExists(kubeConfig, namespace, appConfigName)
   107  
   108  	compExists := true
   109  	// check each component in expectedComps
   110  	for _, comp := range expectedComps {
   111  		compExists = componentExists(kubeConfig, namespace, comp) && compExists
   112  	}
   113  
   114  	if isAdminCluster || placedInThisCluster {
   115  		// always expect MC resources on admin cluster - otherwise expect them only if placed here
   116  		return mcAppConfExists && compExists
   117  	}
   118  	// don't expect either
   119  	return !mcAppConfExists && !compExists
   120  }
   121  
   122  // VerifyAppResourcesInCluster verifies that the app resources are either present or absent
   123  // depending on whether the app is placed in this cluster
   124  func VerifyAppResourcesInCluster(kubeConfig string, isAdminCluster bool, placedInThisCluster bool, projectName string, namespace string, appPods []string) (bool, error) {
   125  	projectExists := projectExists(kubeConfig, projectName)
   126  	podsRunning, err := checkPodsRunning(kubeConfig, namespace, appPods)
   127  	if err != nil {
   128  		return false, err
   129  	}
   130  
   131  	if placedInThisCluster {
   132  		return projectExists && podsRunning, nil
   133  	}
   134  	if isAdminCluster {
   135  		return projectExists && !podsRunning, nil
   136  	}
   137  	return !podsRunning && !projectExists, nil
   138  }
   139  
   140  // VerifyDeleteOnAdminCluster verifies that the app resources have been deleted from the admin
   141  // cluster after the application has been deleted
   142  func VerifyDeleteOnAdminCluster(kubeConfig string, placedInCluster bool, namespace string, projectName string, appConfigName string, appPods []string) bool {
   143  	mcResDeleted := verifyMCResourcesDeleted(kubeConfig, namespace, projectName, appConfigName, appPods)
   144  	if !placedInCluster {
   145  		return mcResDeleted
   146  	}
   147  	appDeleted := verifyAppDeleted(kubeConfig, namespace, appPods)
   148  	return mcResDeleted && appDeleted
   149  }
   150  
   151  // VerifyDeleteOnManagedCluster verifies that the app resources have been deleted from the managed
   152  // cluster after the application has been deleted
   153  func VerifyDeleteOnManagedCluster(kubeConfig string, namespace string, projectName string, appConfigName string, appPods []string) bool {
   154  	mcResDeleted := verifyMCResourcesDeleted(kubeConfig, namespace, projectName, appConfigName, appPods)
   155  	appDeleted := verifyAppDeleted(kubeConfig, namespace, appPods)
   156  
   157  	return mcResDeleted && appDeleted
   158  }
   159  
   160  // appConfExists Check if app config exists
   161  func appConfExists(kubeConfig string, namespace string, appConfigName string) bool {
   162  	gvr := schema.GroupVersionResource{
   163  		Group:    clustersv1alpha1.SchemeGroupVersion.Group,
   164  		Version:  clustersv1alpha1.SchemeGroupVersion.Version,
   165  		Resource: mcAppConfigs,
   166  	}
   167  	return resourceExists(gvr, namespace, appConfigName, kubeConfig)
   168  }
   169  
   170  // resourceExists Check if given resource exists
   171  func resourceExists(gvr schema.GroupVersionResource, ns string, name string, kubeConfig string) bool {
   172  	config, err := k8sutil.GetKubeConfigGivenPath(kubeConfig)
   173  	if err != nil {
   174  		pkg.Log(pkg.Error, fmt.Sprintf("Could not get kube config: %v\n", err))
   175  		return false
   176  	}
   177  	client, err := dynamic.NewForConfig(config)
   178  	if err != nil {
   179  		pkg.Log(pkg.Error, fmt.Sprintf("Could not create dynamic client: %v\n", err))
   180  		return false
   181  	}
   182  
   183  	u, err := client.Resource(gvr).Namespace(ns).Get(context.TODO(), name, metav1.GetOptions{})
   184  
   185  	if err != nil {
   186  		if errors.IsNotFound(err) {
   187  			return false
   188  		}
   189  		pkg.Log(pkg.Error, fmt.Sprintf("Could not retrieve resource %s: %v\n", gvr.String(), err))
   190  		return false
   191  	}
   192  	return u != nil
   193  }
   194  
   195  // componentExists Check if individual component exists
   196  func componentExists(kubeConfig string, namespace string, component string) bool {
   197  	gvr := schema.GroupVersionResource{
   198  		Group:    oamcore.Group,
   199  		Version:  oamcore.Version,
   200  		Resource: comps,
   201  	}
   202  	return resourceExists(gvr, namespace, component, kubeConfig)
   203  }
   204  
   205  // projectExists Check if project with name projectName exists
   206  func projectExists(kubeConfig string, projectName string) bool {
   207  	gvr := schema.GroupVersionResource{
   208  		Group:    clustersv1alpha1.SchemeGroupVersion.Group,
   209  		Version:  clustersv1alpha1.SchemeGroupVersion.Version,
   210  		Resource: projects,
   211  	}
   212  	return resourceExists(gvr, mcNamespace, projectName, kubeConfig)
   213  }
   214  
   215  // checkPodsRunning Check if expected pods are running on a given cluster
   216  func checkPodsRunning(kubeConfig string, namespace string, appPods []string) (bool, error) {
   217  	result, err := pkg.PodsRunningInCluster(namespace, appPods, kubeConfig)
   218  	if err != nil {
   219  		pkg.Log(pkg.Error, fmt.Sprintf("One or more pods are not running in the namespace: %v, error: %v", namespace, err))
   220  		return false, err
   221  	}
   222  	return result, nil
   223  }
   224  
   225  // verifyAppDeleted verifies that the workload and pods are deleted on the specified cluster
   226  func verifyAppDeleted(kubeConfig string, namespace string, appPods []string) bool {
   227  	podsDeleted := true
   228  	// check that each pod is deleted
   229  	for _, pod := range appPods {
   230  		podsDeleted = checkPodDeleted(namespace, pod, kubeConfig) && podsDeleted
   231  	}
   232  	return podsDeleted
   233  }
   234  
   235  // checkPodDeleted Check if expected pods are running on a given cluster
   236  func checkPodDeleted(kubeConfig string, namespace string, pod string) bool {
   237  	deletedPod := []string{pod}
   238  	result, _ := pkg.PodsRunningInCluster(namespace, deletedPod, kubeConfig)
   239  	return !result
   240  }
   241  
   242  // verifyMCResourcesDeleted verifies that any resources created by the deployment are deleted on the specified cluster
   243  func verifyMCResourcesDeleted(kubeConfig string, namespace string, projectName string, appConfigName string, appPods []string) bool {
   244  	appConfExists := appConfExists(kubeConfig, namespace, appConfigName)
   245  	projExists := projectExists(kubeConfig, projectName)
   246  
   247  	compExists := true
   248  	// check each component in appPods
   249  	for _, comp := range appPods {
   250  		compExists = componentExists(kubeConfig, namespace, comp) && compExists
   251  	}
   252  
   253  	return !appConfExists && !compExists && !projExists
   254  }
   255  
   256  const (
   257  	shortWait       = 1 * time.Minute
   258  	mediumWait      = 5 * time.Minute
   259  	pollingInterval = 5 * time.Second
   260  	manifestKey     = "yaml"
   261  )
   262  
   263  type Cluster struct {
   264  	Name           string
   265  	KubeConfigPath string
   266  	restConfig     *rest.Config
   267  	kubeClient     *kubernetes.Clientset
   268  	server         string
   269  }
   270  
   271  func getCluster(name, kcfgDir string, count int) *Cluster {
   272  	kcfgPath := fmt.Sprintf("%s/%v/kube_config", kcfgDir, count)
   273  	if _, err := os.Stat(kcfgPath); errs.Is(err, os.ErrNotExist) {
   274  		return nil
   275  	}
   276  
   277  	return newCluster(name, kcfgPath)
   278  }
   279  
   280  func ManagedClusters() []*Cluster {
   281  	kcfgDir := os.Getenv("KUBECONFIG_DIR")
   282  	if kcfgDir == "" {
   283  		ginkgo.Fail("KUBECONFIG_DIR is required")
   284  	}
   285  	var clusters []*Cluster
   286  	count := 1
   287  	for {
   288  		name := fmt.Sprintf("managed%v", count)
   289  		count = count + 1
   290  		cluster := getCluster(name, kcfgDir, count)
   291  		if cluster == nil {
   292  			return clusters
   293  		}
   294  		clusters = append(clusters, cluster)
   295  	}
   296  }
   297  
   298  func AdminCluster() *Cluster {
   299  	admKubeCfg := os.Getenv("ADMIN_KUBECONFIG")
   300  	if admKubeCfg == "" {
   301  		admKubeCfg = os.Getenv("KUBECONFIG")
   302  	}
   303  	if admKubeCfg != "" {
   304  		return newCluster("admin", admKubeCfg)
   305  	}
   306  	return getCluster("admin", os.Getenv("KUBECONFIG_DIR"), 1)
   307  }
   308  
   309  func (c *Cluster) CreateNamespace(ns string) error {
   310  	_, err := c.kubeClient.CoreV1().Namespaces().Get(context.TODO(), ns, metav1.GetOptions{})
   311  	if err != nil && errors.IsNotFound(err) {
   312  		n := &corev1.Namespace{
   313  			ObjectMeta: metav1.ObjectMeta{
   314  				Name:      ns,
   315  				Namespace: ns,
   316  			},
   317  		}
   318  		_, err = c.kubeClient.CoreV1().Namespaces().Create(context.TODO(), n, metav1.CreateOptions{})
   319  		if err != nil {
   320  			pkg.Log(pkg.Error, fmt.Sprintf("CreateNS %v error: %v", n, err))
   321  		}
   322  	}
   323  	return err
   324  }
   325  
   326  func (c *Cluster) UpsertCaSec(managedClusterName string, bytes []byte) error {
   327  	c.CreateNamespace(constants.VerrazzanoMultiClusterNamespace)
   328  	casecName := fmt.Sprintf("ca-secret-%s", managedClusterName)
   329  	secret := &corev1.Secret{
   330  		ObjectMeta: metav1.ObjectMeta{
   331  			Name:      casecName,
   332  			Namespace: constants.VerrazzanoMultiClusterNamespace,
   333  		},
   334  		Type: corev1.SecretTypeOpaque,
   335  		Data: map[string][]byte{"cacrt": bytes},
   336  	}
   337  	_, err := c.kubeClient.CoreV1().Secrets(constants.VerrazzanoMultiClusterNamespace).Get(context.TODO(), casecName, metav1.GetOptions{})
   338  	if err != nil && errors.IsNotFound(err) {
   339  		_, err = c.kubeClient.CoreV1().Secrets(constants.VerrazzanoMultiClusterNamespace).Create(context.TODO(), secret, metav1.CreateOptions{})
   340  	} else {
   341  		_, err = c.kubeClient.CoreV1().Secrets(constants.VerrazzanoMultiClusterNamespace).Update(context.TODO(), secret, metav1.UpdateOptions{})
   342  	}
   343  	if err != nil {
   344  		pkg.Log(pkg.Error, fmt.Sprintf("UpsertCaSec %v error: %v", casecName, err))
   345  	}
   346  	return err
   347  }
   348  
   349  func (c *Cluster) CreateCaSecOf(managed *Cluster) error {
   350  	c.CreateNamespace(constants.VerrazzanoMultiClusterNamespace)
   351  	bytes, err := managed.getCacrt()
   352  	if err != nil {
   353  		pkg.Log(pkg.Error, fmt.Sprintf("Error getting %v cacrt: %v", managed.Name, err))
   354  	}
   355  	return c.UpsertCaSec(managed.Name, bytes)
   356  }
   357  
   358  func (c *Cluster) ConfigAdminCluster() error {
   359  	name := "verrazzano-admin-cluster"
   360  	cm := &corev1.ConfigMap{
   361  		ObjectMeta: metav1.ObjectMeta{
   362  			Name:      name,
   363  			Namespace: constants.VerrazzanoMultiClusterNamespace,
   364  		},
   365  		Data: map[string]string{"server": c.server},
   366  	}
   367  	_, err := c.kubeClient.CoreV1().ConfigMaps(constants.VerrazzanoMultiClusterNamespace).Get(context.TODO(), name, metav1.GetOptions{})
   368  	if err != nil && errors.IsNotFound(err) {
   369  		_, err = c.kubeClient.CoreV1().ConfigMaps(constants.VerrazzanoMultiClusterNamespace).Create(context.TODO(), cm, metav1.CreateOptions{})
   370  	} else {
   371  		_, err = c.kubeClient.CoreV1().ConfigMaps(constants.VerrazzanoMultiClusterNamespace).Update(context.TODO(), cm, metav1.UpdateOptions{})
   372  	}
   373  	return err
   374  }
   375  
   376  func (c *Cluster) GetSecret(ns, name string) (*corev1.Secret, error) {
   377  	return c.kubeClient.CoreV1().Secrets(ns).Get(context.TODO(), name, metav1.GetOptions{})
   378  }
   379  
   380  func (c *Cluster) GetSecretData(ns, name, key string) ([]byte, error) {
   381  	secret, err := c.GetSecret(ns, name)
   382  	if secret == nil || err != nil {
   383  		return []byte{}, err
   384  	}
   385  	data, ok := secret.Data[key]
   386  	if !ok {
   387  		return []byte{}, fmt.Errorf("%s not found in %s", key, name)
   388  	}
   389  	return data, nil
   390  }
   391  
   392  func (c *Cluster) GetSecretDataAsString(ns, name, key string) string {
   393  	bytes, _ := c.GetSecretData(ns, name, key)
   394  	if len(bytes) > 0 {
   395  		return string(bytes)
   396  	}
   397  	return ""
   398  }
   399  
   400  func (c *Cluster) getCacrt() ([]byte, error) {
   401  	// cattle-system get secret tls-ca
   402  	data, err := c.GetSecretData(constants.RancherSystemNamespace, constants.RancherTLSCA, constants.RancherTLSCAKey)
   403  	if len(data) != 0 {
   404  		return data, err
   405  	}
   406  	return c.GetSecretData(constants.VerrazzanoSystemNamespace, "verrazzano-tls", "ca.crt")
   407  }
   408  
   409  func (c *Cluster) Apply(data []byte) {
   410  	gomega.Eventually(func() bool {
   411  		err := apply(data, c.restConfig)
   412  		if err != nil {
   413  			pkg.Log(pkg.Error, fmt.Sprintf("Error applying changes on %s: %v", c.Name, err))
   414  		}
   415  		return err == nil
   416  	}, mediumWait, pollingInterval).Should(gomega.BeTrue(), fmt.Sprintf(" %s failed registration", c.Name))
   417  }
   418  
   419  func (c *Cluster) UpsertManagedCluster(name string) error {
   420  	casec := fmt.Sprintf("ca-secret-%s", name)
   421  	vmc := &mcapi.VerrazzanoManagedCluster{
   422  		ObjectMeta: metav1.ObjectMeta{
   423  			Name:      name,
   424  			Namespace: constants.VerrazzanoMultiClusterNamespace,
   425  		},
   426  		Spec: mcapi.VerrazzanoManagedClusterSpec{
   427  			Description: "VerrazzanoManagedCluster object",
   428  			CASecret:    casec,
   429  		},
   430  	}
   431  	mcCli, err := mcClient.NewForConfig(c.restConfig)
   432  	if err != nil {
   433  		return err
   434  	}
   435  	_, err = mcCli.ClustersV1alpha1().
   436  		VerrazzanoManagedClusters(constants.VerrazzanoMultiClusterNamespace).Get(context.TODO(), name, metav1.GetOptions{})
   437  	if err != nil && errors.IsNotFound(err) {
   438  		_, err = mcCli.ClustersV1alpha1().
   439  			VerrazzanoManagedClusters(constants.VerrazzanoMultiClusterNamespace).
   440  			Create(context.TODO(), vmc, metav1.CreateOptions{})
   441  	} else {
   442  		_, err = mcCli.ClustersV1alpha1().
   443  			VerrazzanoManagedClusters(constants.VerrazzanoMultiClusterNamespace).
   444  			Update(context.TODO(), vmc, metav1.UpdateOptions{})
   445  	}
   446  	if err != nil {
   447  		return fmt.Errorf("failed to create or update VerrazzanoManagedCluster %v: %w", name, err)
   448  	}
   449  	gomega.Eventually(func() bool {
   450  		vmcCreated, err := mcCli.ClustersV1alpha1().
   451  			VerrazzanoManagedClusters(constants.VerrazzanoMultiClusterNamespace).
   452  			Get(context.TODO(), vmc.Name, metav1.GetOptions{})
   453  		if err != nil {
   454  			pkg.Log(pkg.Error, fmt.Sprintf("Error getting vmc %s: %v", vmc.Name, err))
   455  		}
   456  		size := len(vmcCreated.Status.Conditions)
   457  		if vmcCreated == nil || size == 0 {
   458  			return false
   459  		}
   460  		return vmcCreated.Status.Conditions[size-1].Type == mcapi.ConditionReady
   461  	}, mediumWait, pollingInterval).Should(gomega.BeTrue(), fmt.Sprintf("VerrazzanoManagedCluster %s is not ready", vmc.Name))
   462  	return nil
   463  }
   464  
   465  func (c *Cluster) Register(managed *Cluster) error {
   466  	err := c.CreateCaSecOf(managed)
   467  	if err != nil {
   468  		return nil
   469  	}
   470  	err = c.ConfigAdminCluster()
   471  	if err != nil {
   472  		return nil
   473  	}
   474  	err = c.UpsertManagedCluster(managed.Name)
   475  	if err != nil {
   476  		pkg.Log(pkg.Error, fmt.Sprintf("CreateManagedCluster %v error: %v", managed.Name, err))
   477  	}
   478  	reg, err := c.GetManifest(managed.Name)
   479  	if err != nil {
   480  		pkg.Log(pkg.Error, fmt.Sprintf("manifest %v error: %v", managed.Name, err))
   481  	}
   482  	managed.Apply(reg)
   483  	return nil
   484  }
   485  
   486  func (c *Cluster) GetVMC(name string) (*mcapi.VerrazzanoManagedCluster, error) {
   487  	mcCli, err := mcClient.NewForConfig(c.restConfig)
   488  	if err != nil {
   489  		return nil, err
   490  	}
   491  	return mcCli.ClustersV1alpha1().
   492  		VerrazzanoManagedClusters(constants.VerrazzanoMultiClusterNamespace).Get(context.TODO(), name, metav1.GetOptions{})
   493  }
   494  
   495  func (c *Cluster) GetManifest(name string) ([]byte, error) {
   496  	manifest := fmt.Sprintf("verrazzano-cluster-%s-manifest", name)
   497  	gomega.Eventually(func() bool {
   498  		data, _ := c.GetSecretData(constants.VerrazzanoMultiClusterNamespace, manifest, manifestKey)
   499  		return len(data) > 0
   500  	}, shortWait, pollingInterval).Should(gomega.BeTrue(), fmt.Sprintf("manifest %s is not ready", manifest))
   501  	return c.GetSecretData(constants.VerrazzanoMultiClusterNamespace, manifest, manifestKey)
   502  }
   503  
   504  func (c *Cluster) GetRegistration(name string) (*corev1.Secret, error) {
   505  	reg := fmt.Sprintf("verrazzano-cluster-%s-registration", name)
   506  	r, err := c.GetSecret(constants.VerrazzanoMultiClusterNamespace, reg)
   507  	if err != nil && errors.IsNotFound(err) {
   508  		return nil, err
   509  	}
   510  	return r, err
   511  }
   512  
   513  // GetCR gets the CR.  If it is not "Ready", wait for up to 10 minutes for it to be "Ready".
   514  func (c *Cluster) GetCR(waitForReady bool) *vzapi.Verrazzano {
   515  	if waitForReady {
   516  		gomega.Eventually(func() error {
   517  			cr, err := pkg.GetVerrazzanoInstallResourceInCluster(c.KubeConfigPath)
   518  			if err != nil {
   519  				return err
   520  			}
   521  			if cr.Status.State != vzapi.VzStateReady {
   522  				return fmt.Errorf("CR in state %s, not Ready yet", cr.Status.State)
   523  			}
   524  			return nil
   525  		}, 10*time.Minute, pollingInterval).Should(gomega.BeNil(), "Expected to get Verrazzano CR with Ready state")
   526  	}
   527  	// Get the CR
   528  	cr, err := pkg.GetVerrazzanoInstallResourceInCluster(c.KubeConfigPath)
   529  	if err != nil {
   530  		ginkgo.Fail(err.Error())
   531  	}
   532  	if cr == nil {
   533  		ginkgo.Fail("CR is nil")
   534  	}
   535  	return cr
   536  }
   537  
   538  // generate a custom CA
   539  func (c *Cluster) GenerateCA() string {
   540  	caCertTemp := `
   541  apiVersion: cert-manager.io/v1
   542  kind: Certificate
   543  metadata:
   544    name: %s
   545    namespace: cert-manager
   546  spec:
   547    commonName: %s
   548    isCA: true
   549    issuerRef:
   550      name: verrazzano-selfsigned-issuer
   551    secretName: %s
   552  `
   553  	caname := fmt.Sprintf("gen-ca-%v", uuid.NewString()[:7])
   554  	cacert := fmt.Sprintf(caCertTemp, caname, caname, caname)
   555  	c.Apply([]byte(cacert))
   556  	gomega.Eventually(func() bool {
   557  		casec, err := c.GetSecret(constants.CertManagerNamespace, caname)
   558  		if err != nil || errors.IsNotFound(err) || casec == nil {
   559  			pkg.Log(pkg.Error, fmt.Sprintf("Error getting %s: %v", caname, err))
   560  			return false
   561  		}
   562  		return true
   563  	}, mediumWait, pollingInterval).Should(gomega.BeTrue(), fmt.Sprintf("Failed creating CA %v", caname))
   564  	return caname
   565  }
   566  
   567  func (c *Cluster) FindFluentdPod() *corev1.Pod {
   568  	list, _ := c.kubeClient.CoreV1().Pods(constants.VerrazzanoSystemNamespace).List(context.TODO(), metav1.ListOptions{})
   569  	if list != nil {
   570  		for _, pod := range list.Items {
   571  			if strings.HasPrefix(pod.Name, "fluentd-") {
   572  				return &pod
   573  			}
   574  		}
   575  	}
   576  	return nil
   577  }
   578  
   579  const errMsg = "Error: %v"
   580  
   581  // FluentdLogs gets fluentd logs if the fluentd pod has been restarted sinc the time specified
   582  func (c *Cluster) FluentdLogs(lines int64, restartedAfter time.Time) string {
   583  	pod := c.FindFluentdPod()
   584  	if pod == nil {
   585  		return fmt.Sprintf(errMsg, "cannot find fluentd pod")
   586  	}
   587  	return c.PodLogs(constants.VerrazzanoSystemNamespace, pod.Name, "fluentd", lines)
   588  }
   589  
   590  func (c *Cluster) PodLogs(ns, podName, container string, lines int64) string {
   591  	logsReg := c.kubeClient.CoreV1().Pods(ns).GetLogs(podName, &corev1.PodLogOptions{
   592  		Container: container,
   593  		Follow:    false,
   594  		TailLines: &lines,
   595  	})
   596  	podLogs, err := logsReg.Stream(context.TODO())
   597  	if err != nil {
   598  		return fmt.Sprintf(errMsg, err)
   599  	}
   600  	if podLogs != nil {
   601  		defer podLogs.Close()
   602  		buf := new(bytes.Buffer)
   603  		_, err = io.Copy(buf, podLogs)
   604  		if err != nil {
   605  			return fmt.Sprintf(errMsg, err)
   606  		}
   607  		return buf.String()
   608  	}
   609  	return ""
   610  }
   611  
   612  func (c *Cluster) GetPrometheusIngress() string {
   613  	return pkg.GetPrometheusIngressHost(c.KubeConfigPath)
   614  }
   615  
   616  func (c *Cluster) GetThanosIngress() string {
   617  	return pkg.GetThanosQueryIngressHost(c.KubeConfigPath)
   618  }
   619  
   620  func (c *Cluster) GetQueryIngress() string {
   621  	return pkg.GetQueryStoreIngressHost(c.KubeConfigPath)
   622  }
   623  
   624  func newCluster(name, kubeCfgPath string) *Cluster {
   625  	server := serverFromDockerInspect(name)
   626  	if server == "" {
   627  		server = serverFromKubeConfig(kubeCfgPath, name)
   628  	}
   629  	cnf, err := clientcmd.BuildConfigFromFlags("", kubeCfgPath)
   630  	failOnErr := func(err error) {
   631  		if err != nil {
   632  			ginkgo.Fail(fmt.Sprintf("Error getting Cluster %v: %v", name, err))
   633  		}
   634  	}
   635  	failOnErr(err)
   636  	cli, err := kubernetes.NewForConfig(cnf)
   637  	failOnErr(err)
   638  	return &Cluster{Name: name, KubeConfigPath: kubeCfgPath, kubeClient: cli, server: server, restConfig: cnf}
   639  }
   640  
   641  func serverFromDockerInspect(name string) string {
   642  	cmd := exec.Command("docker", "inspect", fmt.Sprintf("%s-control-plane", name)) //nolint:gosec
   643  	out, err := cmd.Output()
   644  	if err == nil {
   645  		var info []map[string]interface{}
   646  		json.Unmarshal(out, &info)
   647  		if len(info) > 0 {
   648  			ipa := yq(info[0], "NetworkSettings", "Networks", "kind", "IPAddress")
   649  			if ipa != nil {
   650  				if addr, ok := ipa.(string); ok && addr != "" {
   651  					return fmt.Sprintf("https://%s:6443", addr)
   652  				}
   653  			}
   654  		}
   655  	}
   656  	return ""
   657  }
   658  
   659  func serverFromKubeConfig(kubeCfgPath, name string) string {
   660  	kubeServerConf := cmdapi.Config{}
   661  	cmd := exec.Command("kind", "get", "kubeconfig", "--internal", "--Name", name) //nolint:gosec
   662  	out, err := cmd.Output()
   663  	if err != nil {
   664  		out, _ = os.ReadFile(kubeCfgPath)
   665  	}
   666  	yv3.Unmarshal(out, &kubeServerConf)
   667  	for _, c := range kubeServerConf.Clusters {
   668  		return c.Cluster.Server
   669  	}
   670  	return ""
   671  }
   672  
   673  func apply(data []byte, config *rest.Config) error {
   674  	client, err := dynamic.NewForConfig(config)
   675  	if err != nil {
   676  		return fmt.Errorf("failed to create dynamic client: %w", err)
   677  	}
   678  	disco, err := discovery.NewDiscoveryClientForConfig(config)
   679  	if err != nil {
   680  		return fmt.Errorf("failed to create discovery client: %w", err)
   681  	}
   682  	mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(disco))
   683  	reader := utilyaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(data)))
   684  	for {
   685  		uns := &unstructured.Unstructured{Object: map[string]interface{}{}}
   686  		unsMap, err := readYaml(reader, mapper, uns)
   687  		if err != nil {
   688  			return fmt.Errorf("failed to read resource from bytes: %w", err)
   689  		}
   690  		if unsMap == nil {
   691  			return nil
   692  		}
   693  		if err = upsert(client, config, uns, unsMap); err != nil {
   694  			pkg.Log(pkg.Error, fmt.Sprintf("Error upsert %s: %v \n", uns.GetName(), err))
   695  			return err
   696  		}
   697  	}
   698  }
   699  
   700  func upsert(client dynamic.Interface, config *rest.Config, uns *unstructured.Unstructured, unsMap *meta.RESTMapping) error {
   701  	var err error
   702  	if uns.GetNamespace() == "" {
   703  		_, err = client.Resource(unsMap.Resource).Create(context.TODO(), uns, metav1.CreateOptions{})
   704  	} else {
   705  		_, err = client.Resource(unsMap.Resource).Namespace(uns.GetNamespace()).Create(context.TODO(), uns, metav1.CreateOptions{})
   706  	}
   707  	if err != nil && errors.IsAlreadyExists(err) {
   708  		if err = update(client, uns, unsMap); err != nil {
   709  			return err
   710  		}
   711  	} else if err != nil {
   712  		if uns.GetKind() == "ClusterRoleBinding" {
   713  			if err = upsertCRB(config, uns); err != nil {
   714  				return err
   715  			}
   716  		} else {
   717  			msg := "failed to create resource: %v"
   718  			pkg.Log(pkg.Error, fmt.Sprintf(msg, err))
   719  			return fmt.Errorf(msg, err)
   720  		}
   721  	}
   722  	return nil
   723  }
   724  
   725  func update(client dynamic.Interface, uns *unstructured.Unstructured, unsMap *meta.RESTMapping) error {
   726  	resource, err := client.Resource(unsMap.Resource).Namespace(uns.GetNamespace()).Get(context.TODO(), uns.GetName(), metav1.GetOptions{})
   727  	if err != nil {
   728  		return fmt.Errorf("failed to get resource for update: %w", err)
   729  	}
   730  	uns.SetResourceVersion(resource.GetResourceVersion())
   731  	_, err = client.Resource(unsMap.Resource).Namespace(uns.GetNamespace()).Update(context.TODO(), uns, metav1.UpdateOptions{})
   732  	if err != nil && uns.GetKind() == "Service" && uns.GetName() == "cattle-cluster-agent" {
   733  		_ = client.Resource(unsMap.Resource).Namespace(uns.GetNamespace()).Delete(context.TODO(), uns.GetName(), metav1.DeleteOptions{})
   734  		uns.SetResourceVersion("")
   735  		_, err = client.Resource(unsMap.Resource).Namespace(uns.GetNamespace()).Create(context.TODO(), uns, metav1.CreateOptions{})
   736  	}
   737  	if err != nil {
   738  		return fmt.Errorf("failed to update resource: %w", err)
   739  	}
   740  	return nil
   741  }
   742  
   743  func upsertCRB(config *rest.Config, uns *unstructured.Unstructured) error {
   744  	cli, _ := kubernetes.NewForConfig(config)
   745  	crb := clusterRoleBinding(uns)
   746  	_, err := cli.RbacV1().ClusterRoleBindings().Get(context.TODO(), crb.Name, metav1.GetOptions{})
   747  	if err != nil && errors.IsNotFound(err) {
   748  		_, err = cli.RbacV1().ClusterRoleBindings().Create(context.TODO(), crb, metav1.CreateOptions{})
   749  	} else {
   750  		_, err = cli.RbacV1().ClusterRoleBindings().Update(context.TODO(), crb, metav1.UpdateOptions{})
   751  	}
   752  	if err != nil {
   753  		return fmt.Errorf("failed to create ClusterRoleBinding: %w", err)
   754  	}
   755  	return nil
   756  }
   757  
   758  func clusterRoleBinding(uns *unstructured.Unstructured) *rbac.ClusterRoleBinding {
   759  	rb := &rbac.ClusterRoleBinding{
   760  		ObjectMeta: metav1.ObjectMeta{
   761  			Name:      uns.GetName(),
   762  			Namespace: uns.GetNamespace(),
   763  			Labels:    uns.GetLabels(),
   764  		},
   765  		Subjects: []rbac.Subject{},
   766  		RoleRef: rbac.RoleRef{
   767  			Kind:     yqString(uns.Object, "roleRef", "kind"),
   768  			Name:     yqString(uns.Object, "roleRef", "name"),
   769  			APIGroup: yqString(uns.Object, "roleRef", "apiGroup"),
   770  		},
   771  	}
   772  	if rb.Name == "" {
   773  		rb.Name = yqString(uns.Object, "metadata", "name")
   774  	}
   775  	if rb.Namespace == "" {
   776  		rb.Namespace = yqString(uns.Object, "metadata", "namespace")
   777  	}
   778  	return crbSubjects(crbLables(rb, uns), uns)
   779  }
   780  
   781  func crbLables(rb *rbac.ClusterRoleBinding, uns *unstructured.Unstructured) *rbac.ClusterRoleBinding {
   782  	if len(rb.Labels) == 0 {
   783  		rb.Labels = map[string]string{}
   784  		labels := yq(uns.Object, "metadata", "labels")
   785  		if labels != nil {
   786  			for k, v := range labels.(map[interface{}]interface{}) {
   787  				rb.Labels[k.(string)] = v.(string)
   788  			}
   789  		}
   790  	}
   791  	return rb
   792  }
   793  
   794  func crbSubjects(rb *rbac.ClusterRoleBinding, uns *unstructured.Unstructured) *rbac.ClusterRoleBinding {
   795  	if sbj := yq(uns.Object, "subjects"); sbj != nil {
   796  		arr, ok := sbj.([]interface{})
   797  		if len(arr) > 0 && ok {
   798  			for _, i := range arr {
   799  				rb.Subjects = append(rb.Subjects, rbac.Subject{
   800  					Kind:      yqString(i, "kind"),
   801  					Name:      yqString(i, "name"),
   802  					Namespace: yqString(i, "namespace"),
   803  				})
   804  			}
   805  		}
   806  	}
   807  	return rb
   808  }
   809  
   810  func readYaml(reader *utilyaml.YAMLReader, mapper *restmapper.DeferredDiscoveryRESTMapper, uns *unstructured.Unstructured) (*meta.RESTMapping, error) {
   811  	buf, err := reader.Read()
   812  	if err == io.EOF {
   813  		return nil, nil
   814  	} else if err != nil {
   815  		return nil, fmt.Errorf("failed to read resource section: %w", err)
   816  	}
   817  	if err = yml.Unmarshal(buf, &uns.Object); err != nil {
   818  		return nil, fmt.Errorf("failed to unmarshal resource: %w", err)
   819  	}
   820  	unsGvk := schema.FromAPIVersionAndKind(uns.GetAPIVersion(), uns.GetKind())
   821  	unsMap, err := mapper.RESTMapping(unsGvk.GroupKind(), unsGvk.Version)
   822  	if err != nil {
   823  		return unsMap, fmt.Errorf("failed to map resource kind: %w", err)
   824  	}
   825  	return unsMap, nil
   826  }
   827  
   828  func yq(node interface{}, path ...string) interface{} {
   829  	for _, p := range path {
   830  		if node == nil {
   831  			return nil
   832  		}
   833  		var nodeMap, ok = node.(map[string]interface{})
   834  		if ok {
   835  			node = nodeMap[p]
   836  		} else {
   837  			n, ok := node.(map[interface{}]interface{})
   838  			if ok {
   839  				node = n[p]
   840  			} else {
   841  				return nil
   842  			}
   843  		}
   844  	}
   845  	return node
   846  }
   847  
   848  func yqString(node interface{}, path ...string) string {
   849  	val := yq(node, path...)
   850  	if val == nil {
   851  		return ""
   852  	}
   853  	if s, ok := val.(string); ok {
   854  		return s
   855  	}
   856  	return fmt.Sprintf("%v", val)
   857  }