open-cluster-management.io/governance-policy-propagator@v0.13.0/test/e2e/case2_aggregation_test.go (about)

     1  // Copyright (c) 2020 Red Hat, Inc.
     2  // Copyright Contributors to the Open Cluster Management project
     3  
     4  package e2e
     5  
     6  import (
     7  	"context"
     8  
     9  	. "github.com/onsi/ginkgo/v2"
    10  	. "github.com/onsi/gomega"
    11  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    12  
    13  	policiesv1 "open-cluster-management.io/governance-policy-propagator/api/v1"
    14  	"open-cluster-management.io/governance-policy-propagator/controllers/common"
    15  	"open-cluster-management.io/governance-policy-propagator/test/utils"
    16  )
    17  
    18  var _ = Describe("Test policy status aggregation", func() {
    19  	const (
    20  		case2PolicyName string = "case2-test-policy"
    21  		case2PolicyYaml string = "../resources/case2_aggregation/case2-test-policy.yaml"
    22  		faultyPBName    string = "case2-faulty-placementbinding"
    23  		faultyPBYaml    string = "../resources/case2_aggregation/faulty-placementbinding.yaml"
    24  	)
    25  
    26  	Describe("Root status from different placements", Ordered, func() {
    27  		AfterAll(func() {
    28  			utils.Kubectl("delete",
    29  				"-f", faultyPBYaml,
    30  				"-n", testNamespace,
    31  				"--kubeconfig="+kubeconfigHub)
    32  			utils.Kubectl("delete",
    33  				"-f", case2PolicyYaml,
    34  				"-n", testNamespace,
    35  				"--kubeconfig="+kubeconfigHub)
    36  			opt := metav1.ListOptions{}
    37  			utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 0, false, 10)
    38  		})
    39  
    40  		It("should create the faulty PlacementBinding in user ns", func() {
    41  			By("Creating " + faultyPBName)
    42  			utils.Kubectl("apply",
    43  				"-f", faultyPBYaml,
    44  				"-n", testNamespace,
    45  				"--kubeconfig="+kubeconfigHub)
    46  			pb := utils.GetWithTimeout(
    47  				clientHubDynamic, gvrPlacementBinding, faultyPBName, testNamespace, true, defaultTimeoutSeconds,
    48  			)
    49  			Expect(pb).NotTo(BeNil())
    50  		})
    51  		It("should be created in user ns", func() {
    52  			By("Creating " + case2PolicyYaml)
    53  			utils.Kubectl("apply",
    54  				"-f", case2PolicyYaml,
    55  				"-n", testNamespace,
    56  				"--kubeconfig="+kubeconfigHub)
    57  			plc := utils.GetWithTimeout(
    58  				clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
    59  			)
    60  			Expect(plc).NotTo(BeNil())
    61  		})
    62  
    63  		It("should contain status.placement with managed1", func() {
    64  			By("Patching test-policy-plr with decision of cluster managed1")
    65  			plr := utils.GetWithTimeout(
    66  				clientHubDynamic, gvrPlacementRule, case2PolicyName+"-plr", testNamespace, true, defaultTimeoutSeconds,
    67  			)
    68  			plr.Object["status"] = utils.GeneratePlrStatus("managed1")
    69  			_, err := clientHubDynamic.Resource(gvrPlacementRule).Namespace(testNamespace).UpdateStatus(
    70  				context.TODO(), plr, metav1.UpdateOptions{},
    71  			)
    72  			Expect(err).ToNot(HaveOccurred())
    73  			plc := utils.GetWithTimeout(
    74  				clientHubDynamic, gvrPolicy, testNamespace+"."+case2PolicyName, "managed1", true, defaultTimeoutSeconds,
    75  			)
    76  			Expect(plc).ToNot(BeNil())
    77  			opt := metav1.ListOptions{
    78  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case2PolicyName,
    79  			}
    80  			utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 1, true, defaultTimeoutSeconds)
    81  			By("Checking the status.placement of root policy")
    82  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed1-status.yaml")
    83  			Eventually(func() interface{} {
    84  				rootPlc := utils.GetWithTimeout(
    85  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
    86  				)
    87  
    88  				return rootPlc.Object["status"]
    89  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
    90  		})
    91  		It("should contain status.placement with both managed1 and managed2", func() {
    92  			By("Patching test-policy-plr with decision of cluster managed1 and managed2")
    93  			plr := utils.GetWithTimeout(
    94  				clientHubDynamic, gvrPlacementRule, case2PolicyName+"-plr", testNamespace, true, defaultTimeoutSeconds,
    95  			)
    96  			plr.Object["status"] = utils.GeneratePlrStatus("managed1", "managed2")
    97  			_, err := clientHubDynamic.Resource(gvrPlacementRule).Namespace(testNamespace).UpdateStatus(
    98  				context.TODO(), plr, metav1.UpdateOptions{},
    99  			)
   100  			Expect(err).ToNot(HaveOccurred())
   101  			plc := utils.GetWithTimeout(
   102  				clientHubDynamic, gvrPolicy, testNamespace+"."+case2PolicyName, "managed2", true, defaultTimeoutSeconds,
   103  			)
   104  			Expect(plc).ToNot(BeNil())
   105  			opt := metav1.ListOptions{
   106  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case2PolicyName,
   107  			}
   108  			utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 2, true, defaultTimeoutSeconds)
   109  			By("Checking the status.placement of root policy")
   110  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed-both-status.yaml")
   111  			Eventually(func() interface{} {
   112  				rootPlc := utils.GetWithTimeout(
   113  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   114  				)
   115  
   116  				return rootPlc.Object["status"]
   117  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
   118  		})
   119  		It("should contain status.placement with managed2", func() {
   120  			By("Patching test-policy-plr with decision of cluster managed2")
   121  			plr := utils.GetWithTimeout(
   122  				clientHubDynamic, gvrPlacementRule, case2PolicyName+"-plr", testNamespace, true, defaultTimeoutSeconds,
   123  			)
   124  			plr.Object["status"] = utils.GeneratePlrStatus("managed2")
   125  			_, err := clientHubDynamic.Resource(gvrPlacementRule).Namespace(testNamespace).UpdateStatus(
   126  				context.TODO(), plr, metav1.UpdateOptions{},
   127  			)
   128  			Expect(err).ToNot(HaveOccurred())
   129  			plc := utils.GetWithTimeout(
   130  				clientHubDynamic, gvrPolicy, testNamespace+"."+case2PolicyName, "managed2", true, defaultTimeoutSeconds,
   131  			)
   132  			Expect(plc).ToNot(BeNil())
   133  			opt := metav1.ListOptions{
   134  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case2PolicyName,
   135  			}
   136  			utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 1, true, defaultTimeoutSeconds)
   137  			By("Checking the status.placement of root policy")
   138  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed2-status.yaml")
   139  			Eventually(func() interface{} {
   140  				rootPlc := utils.GetWithTimeout(
   141  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   142  				)
   143  
   144  				return rootPlc.Object["status"]
   145  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
   146  		})
   147  		It("should contain status.placement with two pb/plr", func() {
   148  			By("Creating pb-plr-2 to binding second set of placement")
   149  			utils.Kubectl("apply",
   150  				"-f", "../resources/case2_aggregation/pb-plr-2.yaml",
   151  				"-n", testNamespace, "--kubeconfig="+kubeconfigHub)
   152  			By("Checking the status of root policy")
   153  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed-both-placement-single-status.yaml")
   154  			Eventually(func() interface{} {
   155  				rootPlc := utils.GetWithTimeout(
   156  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   157  				)
   158  
   159  				return rootPlc.Object["status"]
   160  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
   161  		})
   162  		It("should contain status.placement with two pb/plr and both status", func() {
   163  			By("Creating pb-plr-2 to binding second set of placement")
   164  			plr := utils.GetWithTimeout(
   165  				clientHubDynamic, gvrPlacementRule, case2PolicyName+"-plr2", testNamespace, true, defaultTimeoutSeconds,
   166  			)
   167  			plr.Object["status"] = utils.GeneratePlrStatus("managed1")
   168  			_, err := clientHubDynamic.Resource(gvrPlacementRule).Namespace(testNamespace).UpdateStatus(
   169  				context.TODO(), plr, metav1.UpdateOptions{},
   170  			)
   171  			Expect(err).ToNot(HaveOccurred())
   172  			By("Checking the status of root policy")
   173  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed-both-placement-status.yaml")
   174  			Eventually(func() interface{} {
   175  				rootPlc := utils.GetWithTimeout(
   176  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   177  				)
   178  
   179  				return rootPlc.Object["status"]
   180  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
   181  		})
   182  		It("should still contain status.placement with two pb/plr and both status", func() {
   183  			By("Patch" + case2PolicyName + "-plr2 with both managed1 and managed2")
   184  			plr := utils.GetWithTimeout(
   185  				clientHubDynamic, gvrPlacementRule, case2PolicyName+"-plr2", testNamespace, true, defaultTimeoutSeconds,
   186  			)
   187  			plr.Object["status"] = utils.GeneratePlrStatus("managed1", "managed2")
   188  			_, err := clientHubDynamic.Resource(gvrPlacementRule).Namespace(testNamespace).UpdateStatus(
   189  				context.TODO(), plr, metav1.UpdateOptions{},
   190  			)
   191  			Expect(err).ToNot(HaveOccurred())
   192  			By("Checking the status of root policy")
   193  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed-both-placement-status.yaml")
   194  			Eventually(func() interface{} {
   195  				rootPlc := utils.GetWithTimeout(
   196  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   197  				)
   198  
   199  				return rootPlc.Object["status"]
   200  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
   201  		})
   202  		It("should still contain status.placement with two pb, one plr and both status", func() {
   203  			By("Remove" + case2PolicyName + "-plr")
   204  			utils.Kubectl("delete",
   205  				"placementrule", case2PolicyName+"-plr",
   206  				"-n", testNamespace, "--kubeconfig="+kubeconfigHub)
   207  			By("Checking the status of root policy")
   208  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed-both-placement-status-missing-plr.yaml")
   209  			Eventually(func() interface{} {
   210  				rootPlc := utils.GetWithTimeout(
   211  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   212  				)
   213  
   214  				return rootPlc.Object["status"]
   215  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
   216  		})
   217  		It("should clear out status.status", func() {
   218  			By("Remove" + case2PolicyName + "-plr2")
   219  			utils.Kubectl("delete",
   220  				"placementrule", case2PolicyName+"-plr2",
   221  				"-n", testNamespace, "--kubeconfig="+kubeconfigHub)
   222  			By("Checking the status of root policy")
   223  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed-both-placementbinding.yaml")
   224  			Eventually(func() interface{} {
   225  				rootPlc := utils.GetWithTimeout(
   226  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   227  				)
   228  
   229  				return rootPlc.Object["status"]
   230  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
   231  		})
   232  		It("should clear out status", func() {
   233  			By("Remove" + case2PolicyName + "-pb and " + case2PolicyName + "-pb2")
   234  			utils.Kubectl("delete",
   235  				"placementbinding", case2PolicyName+"-pb",
   236  				"-n", testNamespace, "--kubeconfig="+kubeconfigHub)
   237  			utils.Kubectl("delete",
   238  				"placementbinding", case2PolicyName+"-pb2",
   239  				"-n", testNamespace, "--kubeconfig="+kubeconfigHub)
   240  			By("Checking the status of root policy")
   241  			emptyStatus := map[string]interface{}{}
   242  			Eventually(func() interface{} {
   243  				rootPlc := utils.GetWithTimeout(
   244  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   245  				)
   246  
   247  				return rootPlc.Object["status"]
   248  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(emptyStatus))
   249  		})
   250  	})
   251  	Describe("Root compliance from managed statuses", Ordered, func() {
   252  		// To get around `testNamespace` not being initialized during Ginkgo's Tree Construction phase
   253  		listOpts := func() metav1.ListOptions {
   254  			return metav1.ListOptions{
   255  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case2PolicyName,
   256  			}
   257  		}
   258  
   259  		BeforeAll(func() {
   260  			By("Creating " + case2PolicyYaml)
   261  			utils.Kubectl("apply",
   262  				"-f", case2PolicyYaml,
   263  				"-n", testNamespace,
   264  				"--kubeconfig="+kubeconfigHub)
   265  			plc := utils.GetWithTimeout(
   266  				clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   267  			)
   268  			Expect(plc).NotTo(BeNil())
   269  
   270  			By("Patching test-policy-plr with decision of cluster managed1 and managed2")
   271  			plr := utils.GetWithTimeout(
   272  				clientHubDynamic, gvrPlacementRule, case2PolicyName+"-plr", testNamespace, true, defaultTimeoutSeconds,
   273  			)
   274  			plr.Object["status"] = utils.GeneratePlrStatus("managed1", "managed2")
   275  			_, err := clientHubDynamic.Resource(gvrPlacementRule).Namespace(testNamespace).UpdateStatus(
   276  				context.TODO(), plr, metav1.UpdateOptions{},
   277  			)
   278  			Expect(err).ToNot(HaveOccurred())
   279  			plc = utils.GetWithTimeout(
   280  				clientHubDynamic, gvrPolicy, testNamespace+"."+case2PolicyName, "managed2", true, defaultTimeoutSeconds,
   281  			)
   282  			Expect(plc).ToNot(BeNil())
   283  			utils.ListWithTimeout(clientHubDynamic, gvrPolicy, listOpts(), 2, true, defaultTimeoutSeconds)
   284  		})
   285  
   286  		AfterAll(func() {
   287  			By("Cleaning up")
   288  			utils.Kubectl("delete", "-f", case2PolicyYaml, "-n", testNamespace, "--kubeconfig="+kubeconfigHub)
   289  			utils.ListWithTimeout(clientHubDynamic, gvrPolicy, metav1.ListOptions{}, 0, false, 10)
   290  		})
   291  
   292  		It("should be compliant when both managed clusters are compliant", func() {
   293  			By("Patching both replicated policy status to compliant")
   294  			replicatedPlcList := utils.ListWithTimeout(
   295  				clientHubDynamic, gvrPolicy, listOpts(), 2, true, defaultTimeoutSeconds)
   296  			for _, replicatedPlc := range replicatedPlcList.Items {
   297  				replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   298  					ComplianceState: policiesv1.Compliant,
   299  				}
   300  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   301  					context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   302  				)
   303  				Expect(err).ToNot(HaveOccurred())
   304  			}
   305  
   306  			By("Checking the status of root policy")
   307  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed-both-status-compliant.yaml")
   308  			Eventually(func() interface{} {
   309  				rootPlc := utils.GetWithTimeout(
   310  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   311  				)
   312  
   313  				return rootPlc.Object["status"]
   314  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
   315  		})
   316  
   317  		It("should be noncompliant when one managed cluster is noncompliant", func() {
   318  			By("Patching one replicated policy status to noncompliant")
   319  			replicatedPlcList := utils.ListWithTimeout(
   320  				clientHubDynamic, gvrPolicy, listOpts(), 2, true, defaultTimeoutSeconds)
   321  			replicatedPlc := replicatedPlcList.Items[0]
   322  			if replicatedPlc.GetNamespace() == "managed2" {
   323  				replicatedPlc = replicatedPlcList.Items[1]
   324  			}
   325  
   326  			replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   327  				ComplianceState: policiesv1.NonCompliant,
   328  			}
   329  			_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   330  				context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   331  			)
   332  			Expect(err).ToNot(HaveOccurred())
   333  
   334  			By("Checking the status of root policy")
   335  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed-one-status-noncompliant.yaml")
   336  			Eventually(func() interface{} {
   337  				rootPlc := utils.GetWithTimeout(
   338  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   339  				)
   340  
   341  				return rootPlc.Object["status"]
   342  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
   343  		})
   344  
   345  		It("should be noncompliant when one is pending, and one is noncompliant", func() {
   346  			By("Patching one replicated policy to pending")
   347  			replicatedPlcList := utils.ListWithTimeout(
   348  				clientHubDynamic, gvrPolicy, listOpts(), 2, true, defaultTimeoutSeconds)
   349  			replicatedPlc := replicatedPlcList.Items[0]
   350  			if replicatedPlc.GetNamespace() == "managed1" {
   351  				replicatedPlc = replicatedPlcList.Items[1]
   352  			}
   353  
   354  			replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   355  				ComplianceState: policiesv1.Pending,
   356  			}
   357  			_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   358  				context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   359  			)
   360  			Expect(err).ToNot(HaveOccurred())
   361  
   362  			By("Checking the status of root policy")
   363  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed-mixed-pending-noncompliant.yaml")
   364  			Eventually(func() interface{} {
   365  				rootPlc := utils.GetWithTimeout(
   366  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   367  				)
   368  
   369  				return rootPlc.Object["status"]
   370  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
   371  		})
   372  
   373  		It("should be pending when one is pending, and one is compliant", func() {
   374  			By("Patching one replicated policy to compliant")
   375  			replicatedPlcList := utils.ListWithTimeout(
   376  				clientHubDynamic, gvrPolicy, listOpts(), 2, true, defaultTimeoutSeconds)
   377  			replicatedPlc := replicatedPlcList.Items[0]
   378  			if replicatedPlc.GetNamespace() == "managed2" {
   379  				replicatedPlc = replicatedPlcList.Items[1]
   380  			}
   381  
   382  			replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   383  				ComplianceState: policiesv1.Compliant,
   384  			}
   385  			_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   386  				context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   387  			)
   388  			Expect(err).ToNot(HaveOccurred())
   389  
   390  			By("Checking the status of root policy")
   391  			yamlPlc := utils.ParseYaml("../resources/case2_aggregation/managed-mixed-pending-compliant.yaml")
   392  			Eventually(func() interface{} {
   393  				rootPlc := utils.GetWithTimeout(
   394  					clientHubDynamic, gvrPolicy, case2PolicyName, testNamespace, true, defaultTimeoutSeconds,
   395  				)
   396  
   397  				return rootPlc.Object["status"]
   398  			}, defaultTimeoutSeconds, 1).Should(utils.SemanticEqual(yamlPlc.Object["status"]))
   399  		})
   400  	})
   401  })