open-cluster-management.io/governance-policy-propagator@v0.13.0/test/e2e/case5_policy_automation_test.go (about)

     1  // Copyright (c) 2021 Red Hat, Inc.
     2  // Copyright Contributors to the Open Cluster Management project
     3  
     4  package e2e
     5  
     6  import (
     7  	"context"
     8  	"sort"
     9  	"time"
    10  
    11  	. "github.com/onsi/ginkgo/v2"
    12  	. "github.com/onsi/gomega"
    13  	k8serrors "k8s.io/apimachinery/pkg/api/errors"
    14  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    15  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    16  
    17  	policiesv1 "open-cluster-management.io/governance-policy-propagator/api/v1"
    18  	policyv1beta1 "open-cluster-management.io/governance-policy-propagator/api/v1beta1"
    19  	"open-cluster-management.io/governance-policy-propagator/controllers/common"
    20  	"open-cluster-management.io/governance-policy-propagator/test/utils"
    21  )
    22  
    23  var _ = Describe("Test policy automation", Label("policyautomation"), Ordered, func() {
    24  	const (
    25  		case5PolicyName string = "case5-test-policy"
    26  		case5PolicyYaml string = "../resources/case5_policy_automation/case5-test-policy.yaml"
    27  		automationName  string = "create-service.now-ticket"
    28  	)
    29  
    30  	ansiblelistlen := 0
    31  	// Use this only when target_clusters managed1 managed2 managed3
    32  	getLastAnsiblejob := func() *unstructured.Unstructured {
    33  		ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
    34  			context.TODO(), metav1.ListOptions{},
    35  		)
    36  		ExpectWithOffset(1, err).ToNot(HaveOccurred())
    37  		for _, ansiblejob := range ansiblejobList.Items {
    38  			targetClusters, _, err := unstructured.NestedSlice(ansiblejob.Object,
    39  				"spec", "extra_vars", "target_clusters")
    40  			if err != nil {
    41  				ExpectWithOffset(1, err).ToNot(HaveOccurred())
    42  			}
    43  			for _, clusterName := range targetClusters {
    44  				if clusterName == "managed3" {
    45  					return &ansiblejob
    46  				}
    47  			}
    48  		}
    49  
    50  		return nil
    51  	}
    52  	getLastAnsiblejobByTime := func() *unstructured.Unstructured {
    53  		ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
    54  			context.TODO(), metav1.ListOptions{},
    55  		)
    56  		ExpectWithOffset(1, err).ToNot(HaveOccurred())
    57  		sort.Slice(ansiblejobList.Items, func(i, j int) bool {
    58  			p1 := ansiblejobList.Items[i].GetCreationTimestamp()
    59  			p2 := ansiblejobList.Items[j].GetCreationTimestamp()
    60  
    61  			return !p1.Before(&p2)
    62  		})
    63  
    64  		return &ansiblejobList.Items[0]
    65  	}
    66  	getTargetListlen := func(ansiblejobList *unstructured.UnstructuredList) int {
    67  		var index int
    68  		if len(ansiblejobList.Items) > 0 {
    69  			index = len(ansiblejobList.Items) - 1
    70  		} else {
    71  			return 0
    72  		}
    73  		spec := ansiblejobList.Items[index].Object["spec"]
    74  		extraVars := spec.(map[string]interface{})["extra_vars"].(map[string]interface{})
    75  
    76  		return len(extraVars["target_clusters"].([]interface{}))
    77  	}
    78  	// Use this only when target_clusters managed1 managed2 managed3
    79  	getLastJobCompliant := func() string {
    80  		ansiblejob := getLastAnsiblejob()
    81  		lastCompliant, _, err := unstructured.NestedString(ansiblejob.Object,
    82  			"spec", "extra_vars", "policy_violations", "managed3", "compliant")
    83  
    84  		Expect(err).ToNot(HaveOccurred())
    85  
    86  		return lastCompliant
    87  	}
    88  
    89  	BeforeAll(func() {
    90  		ansiblelistlen = 0
    91  		By("Create policy/pb/plc in ns:" + testNamespace + " and then update pb/plc")
    92  		By("Creating " + case5PolicyName + " in user ns")
    93  		_, err := utils.KubectlWithOutput("apply",
    94  			"-f", case5PolicyYaml,
    95  			"-n", testNamespace, "--kubeconfig="+kubeconfigHub)
    96  		Expect(err).ShouldNot(HaveOccurred())
    97  		plc := utils.GetWithTimeout(
    98  			clientHubDynamic, gvrPolicy, case5PolicyName, testNamespace, true, defaultTimeoutSeconds,
    99  		)
   100  		Expect(plc).NotTo(BeNil())
   101  		plr := utils.GetWithTimeout(
   102  			clientHubDynamic,
   103  			gvrPlacementRule,
   104  			case5PolicyName+"set-plr",
   105  			testNamespace,
   106  			true,
   107  			defaultTimeoutSeconds,
   108  		)
   109  		plr.Object["status"] = utils.GeneratePlrStatus("managed1", "managed2", "managed3")
   110  		_, err = clientHubDynamic.Resource(gvrPlacementRule).Namespace(testNamespace).UpdateStatus(
   111  			context.TODO(), plr, metav1.UpdateOptions{},
   112  		)
   113  		Expect(err).ToNot(HaveOccurred())
   114  		opt := metav1.ListOptions{
   115  			LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   116  		}
   117  		utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   118  	})
   119  
   120  	cleanupPolicyAutomation := func() {
   121  		By("Removing config map")
   122  		_, err := utils.KubectlWithOutput(
   123  			"delete", "policyautomation", "-n", testNamespace, automationName, "--kubeconfig="+kubeconfigHub,
   124  			"--ignore-not-found",
   125  		)
   126  		Expect(err).ShouldNot(HaveOccurred())
   127  		By("Ansiblejob should also be removed")
   128  		Eventually(func() interface{} {
   129  			ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   130  				context.TODO(), metav1.ListOptions{},
   131  			)
   132  			Expect(err).ToNot(HaveOccurred())
   133  
   134  			return len(ansiblejobList.Items)
   135  		}, 30, 1).Should(Equal(0))
   136  
   137  		By("Patching policy to make all clusters back to Compliant")
   138  		opt := metav1.ListOptions{
   139  			LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   140  		}
   141  		replicatedPlcList := utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   142  		for _, replicatedPlc := range replicatedPlcList.Items {
   143  			replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   144  				ComplianceState: policiesv1.Compliant,
   145  			}
   146  			_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   147  				context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   148  			)
   149  			Expect(err).ToNot(HaveOccurred())
   150  		}
   151  		Eventually(func() interface{} {
   152  			replicatedPlcList = utils.ListWithTimeout(
   153  				clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   154  			allUpdated := true
   155  			for _, replicatedPlc := range replicatedPlcList.Items {
   156  				compliantStatusStr := replicatedPlc.Object["status"].(map[string]interface{})["compliant"]
   157  				if compliantStatusStr != string(policiesv1.Compliant) {
   158  					allUpdated = false
   159  
   160  					break
   161  				}
   162  			}
   163  
   164  			return allUpdated
   165  		}, 30, 1).Should(BeTrue())
   166  	}
   167  
   168  	Describe("Test PolicyAutomation spec.mode", Ordered, func() {
   169  		It("Test mode = disable", func() {
   170  			By("Creating an policyAutomation with mode=disable")
   171  			_, err := utils.KubectlWithOutput("apply",
   172  				"-f", "../resources/case5_policy_automation/case5-policy-automation-disable.yaml",
   173  				"-n", testNamespace, "--kubeconfig="+kubeconfigHub)
   174  			Expect(err).ShouldNot(HaveOccurred())
   175  			By("Should not create any ansiblejob when mode = disable")
   176  			Consistently(func() interface{} {
   177  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).List(
   178  					context.TODO(), metav1.ListOptions{},
   179  				)
   180  				Expect(err).ToNot(HaveOccurred())
   181  
   182  				return len(ansiblejobList.Items)
   183  			}, 30, 1).Should(Equal(0))
   184  		})
   185  
   186  		It("Test mode = once", func() {
   187  			By("Patching policyAutomation with mode=once")
   188  			policyAutomation, err := clientHubDynamic.Resource(gvrPolicyAutomation).Namespace(testNamespace).Get(
   189  				context.TODO(), automationName, metav1.GetOptions{},
   190  			)
   191  			Expect(err).ToNot(HaveOccurred())
   192  			policyAutomation.Object["spec"].(map[string]interface{})["mode"] = string(policyv1beta1.Once)
   193  			policyAutomation, err = clientHubDynamic.Resource(gvrPolicyAutomation).Namespace(testNamespace).Update(
   194  				context.TODO(), policyAutomation, metav1.UpdateOptions{},
   195  			)
   196  			Expect(err).ToNot(HaveOccurred())
   197  
   198  			By("Verifying the added owner reference")
   199  			Expect(policyAutomation.GetOwnerReferences()).To(HaveLen(1))
   200  			Expect(policyAutomation.GetOwnerReferences()[0].Name).To(Equal(case5PolicyName))
   201  
   202  			By("Should still not create any ansiblejob when mode = once and policy is pending")
   203  			Consistently(func() interface{} {
   204  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).List(
   205  					context.TODO(), metav1.ListOptions{},
   206  				)
   207  				Expect(err).ToNot(HaveOccurred())
   208  
   209  				return len(ansiblejobList.Items)
   210  			}, 30, 1).Should(Equal(0))
   211  			By("Should still not create any ansiblejob when mode = once and policy is Compliant")
   212  			Consistently(func() interface{} {
   213  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).List(
   214  					context.TODO(), metav1.ListOptions{},
   215  				)
   216  				Expect(err).ToNot(HaveOccurred())
   217  
   218  				return len(ansiblejobList.Items)
   219  			}, 30, 1).Should(Equal(0))
   220  			By("Patching policy to make all clusters NonCompliant")
   221  			opt := metav1.ListOptions{
   222  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   223  			}
   224  			replicatedPlcList := utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   225  			for _, replicatedPlc := range replicatedPlcList.Items {
   226  				// mock replicated policy PolicyStatus.Details for violationContext testing
   227  				mockDetails := []*policiesv1.DetailsPerTemplate{
   228  					{
   229  						ComplianceState: policiesv1.NonCompliant,
   230  						History: []policiesv1.ComplianceHistory{
   231  							{
   232  								Message:       "testing-ViolationMessage",
   233  								LastTimestamp: metav1.NewTime(time.Now()),
   234  								EventName:     "default.test-policy.164415c7210a573c",
   235  							},
   236  						},
   237  					},
   238  				}
   239  
   240  				replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   241  					ComplianceState: policiesv1.NonCompliant,
   242  					Details:         mockDetails,
   243  				}
   244  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   245  					context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   246  				)
   247  				Expect(err).ToNot(HaveOccurred())
   248  			}
   249  			By("Should only create one ansiblejob when mode = once and policy is NonCompliant")
   250  			var ansiblejobList *unstructured.UnstructuredList
   251  			Eventually(func() interface{} {
   252  				ansiblejobList, err = clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   253  					context.TODO(), metav1.ListOptions{},
   254  				)
   255  				Expect(err).ToNot(HaveOccurred())
   256  				_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   257  					testNamespace, "--kubeconfig="+kubeconfigHub)
   258  				Expect(err).ShouldNot(HaveOccurred())
   259  
   260  				return len(ansiblejobList.Items)
   261  			}, 30, 1).Should(Equal(1))
   262  			Consistently(func() interface{} {
   263  				ansiblejobList, err = clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   264  					context.TODO(), metav1.ListOptions{},
   265  				)
   266  				Expect(err).ToNot(HaveOccurred())
   267  				_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   268  					testNamespace, "--kubeconfig="+kubeconfigHub)
   269  				Expect(err).ShouldNot(HaveOccurred())
   270  				index := len(ansiblejobList.Items) - 1
   271  				spec := ansiblejobList.Items[index].Object["spec"]
   272  				extraVars := spec.(map[string]interface{})["extra_vars"].(map[string]interface{})
   273  
   274  				return len(extraVars) > 0
   275  			}, 30, 1).Should(BeTrue())
   276  
   277  			By("Check each violation context field in extra_vars")
   278  			Expect(err).ToNot(HaveOccurred())
   279  			lastAnsiblejob := ansiblejobList.Items[0]
   280  			spec := lastAnsiblejob.Object["spec"]
   281  			extraVars := spec.(map[string]interface{})["extra_vars"].(map[string]interface{})
   282  			Expect(extraVars["policy_name"]).To(Equal("case5-test-policy"))
   283  			Expect(extraVars["policy_namespace"]).To(Equal(testNamespace))
   284  			Expect(extraVars["hub_cluster"]).To(Equal("millienium-falcon.tatooine.local"))
   285  			Expect(extraVars["target_clusters"].([]interface{})).To(HaveLen(1))
   286  			Expect(extraVars["target_clusters"].([]interface{})[0]).To(Equal("managed1"))
   287  			Expect(extraVars["policy_sets"].([]interface{})).To(HaveLen(1))
   288  			Expect(extraVars["policy_sets"].([]interface{})[0]).To(Equal("case5-test-policyset"))
   289  			managed1 := extraVars["policy_violations"].(map[string]interface{})["managed1"]
   290  			compliant := managed1.(map[string]interface{})["compliant"]
   291  			Expect(compliant).To(Equal(string(policiesv1.NonCompliant)))
   292  			violationMessage := managed1.(map[string]interface{})["violation_message"]
   293  			Expect(violationMessage).To(Equal("testing-ViolationMessage"))
   294  			detail := managed1.(map[string]interface{})["details"].([]interface{})[0]
   295  			Expect(detail.(map[string]interface{})["compliant"]).To(Equal(string(policiesv1.NonCompliant)))
   296  			Expect(detail.(map[string]interface{})["history"].([]interface{})).To(HaveLen(1))
   297  
   298  			By("Job TTL should match default (1 day)")
   299  			Eventually(func(g Gomega) interface{} {
   300  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   301  					context.TODO(), metav1.ListOptions{},
   302  				)
   303  				g.Expect(err).ToNot(HaveOccurred())
   304  
   305  				return ansiblejobList.Items[0].Object["spec"].(map[string]interface{})["job_ttl"]
   306  			}, 10, 1).Should(Equal(int64(86400)))
   307  
   308  			By("Mode should be set to disabled after ansiblejob is created")
   309  			policyAutomation, err = clientHubDynamic.Resource(gvrPolicyAutomation).Namespace(testNamespace).Get(
   310  				context.TODO(), automationName, metav1.GetOptions{},
   311  			)
   312  			Expect(err).ToNot(HaveOccurred())
   313  			Expect(
   314  				policyAutomation.Object["spec"].(map[string]interface{})["mode"],
   315  			).To(Equal(string(policyv1beta1.Disabled)))
   316  
   317  			By("Change mode to once again, should create one more ansiblejob")
   318  			Eventually(func(g Gomega) {
   319  				policyAutomation, err = clientHubDynamic.Resource(gvrPolicyAutomation).Namespace(testNamespace).Get(
   320  					context.TODO(), automationName, metav1.GetOptions{},
   321  				)
   322  				g.Expect(err).ToNot(HaveOccurred())
   323  
   324  				policyAutomation.Object["spec"].(map[string]interface{})["mode"] = string(policyv1beta1.Once)
   325  				_, err = clientHubDynamic.Resource(gvrPolicyAutomation).Namespace(testNamespace).Update(
   326  					context.TODO(), policyAutomation, metav1.UpdateOptions{},
   327  				)
   328  				g.Expect(err).ToNot(HaveOccurred())
   329  			}).Should(Succeed())
   330  
   331  			Eventually(func() interface{} {
   332  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   333  					context.TODO(), metav1.ListOptions{},
   334  				)
   335  				Expect(err).ToNot(HaveOccurred())
   336  				_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   337  					testNamespace, "--kubeconfig="+kubeconfigHub)
   338  				Expect(err).ShouldNot(HaveOccurred())
   339  
   340  				return len(ansiblejobList.Items)
   341  			}, 30, 1).Should(Equal(2))
   342  
   343  			By("Patching policy to make all clusters back to Compliant")
   344  			opt = metav1.ListOptions{
   345  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   346  			}
   347  			replicatedPlcList = utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   348  			for _, replicatedPlc := range replicatedPlcList.Items {
   349  				replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   350  					ComplianceState: policiesv1.Compliant,
   351  				}
   352  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   353  					context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   354  				)
   355  				Expect(err).ToNot(HaveOccurred())
   356  			}
   357  			Eventually(func() interface{} {
   358  				replicatedPlcList = utils.ListWithTimeout(
   359  					clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   360  				allUpdated := true
   361  				for _, replicatedPlc := range replicatedPlcList.Items {
   362  					compliantStatusStr := replicatedPlc.Object["status"].(map[string]interface{})["compliant"]
   363  					if compliantStatusStr != string(policiesv1.Compliant) {
   364  						allUpdated = false
   365  
   366  						break
   367  					}
   368  				}
   369  
   370  				return allUpdated
   371  			}, 30, 1).Should(BeTrue())
   372  
   373  			By("Removing config map")
   374  			_, err = utils.KubectlWithOutput(
   375  				"delete", "policyautomation", "-n", testNamespace, automationName, "--kubeconfig="+kubeconfigHub,
   376  			)
   377  			Expect(err).ShouldNot(HaveOccurred())
   378  			By("Ansiblejob should also be removed")
   379  			Eventually(func() interface{} {
   380  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   381  					context.TODO(), metav1.ListOptions{},
   382  				)
   383  				Expect(err).ToNot(HaveOccurred())
   384  
   385  				return len(ansiblejobList.Items)
   386  			}, 30, 1).Should(Equal(0))
   387  		})
   388  
   389  		// Create two events then two ansiblejobs for each event
   390  		It("Test mode = everyEvent without delayAfterRunSeconds", func() {
   391  			By("Creating an policyAutomation with mode=everyEvent")
   392  			_, err := utils.KubectlWithOutput("apply",
   393  				"-f", "../resources/case5_policy_automation/case5-policy-automation-everyEvent.yaml",
   394  				"-n", testNamespace, "--kubeconfig="+kubeconfigHub)
   395  			Expect(err).ShouldNot(HaveOccurred())
   396  
   397  			By("Should not create any new ansiblejob when Compliant")
   398  			Consistently(func() interface{} {
   399  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).List(
   400  					context.TODO(), metav1.ListOptions{},
   401  				)
   402  				Expect(err).ToNot(HaveOccurred())
   403  
   404  				return len(ansiblejobList.Items)
   405  			}, 15, 1).Should(Equal(0))
   406  
   407  			By("Patching policy to make all clusters NonCompliant")
   408  			opt := metav1.ListOptions{
   409  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   410  			}
   411  			replicatedPlcList := utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   412  			for _, replicatedPlc := range replicatedPlcList.Items {
   413  				replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   414  					ComplianceState: policiesv1.NonCompliant,
   415  				}
   416  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   417  					context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   418  				)
   419  				Expect(err).ToNot(HaveOccurred())
   420  			}
   421  			Eventually(func() interface{} {
   422  				replicatedPlcList = utils.ListWithTimeout(
   423  					clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   424  				allUpdated := true
   425  				for _, replicatedPlc := range replicatedPlcList.Items {
   426  					compliantStatusStr := replicatedPlc.Object["status"].(map[string]interface{})["compliant"]
   427  					if compliantStatusStr != string(policiesv1.NonCompliant) {
   428  						allUpdated = false
   429  
   430  						break
   431  					}
   432  				}
   433  
   434  				return allUpdated
   435  			}, 30, 1).Should(BeTrue())
   436  
   437  			By("Should create ansiblejobs more than 1")
   438  			Eventually(func() interface{} {
   439  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   440  					context.TODO(), metav1.ListOptions{},
   441  				)
   442  				Expect(err).ToNot(HaveOccurred())
   443  				_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   444  					testNamespace, "--kubeconfig="+kubeconfigHub)
   445  				Expect(err).ShouldNot(HaveOccurred())
   446  
   447  				return len(ansiblejobList.Items)
   448  			}, 30, 1).Should(BeNumerically(">", 0))
   449  
   450  			Consistently(func() interface{} {
   451  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   452  					context.TODO(), metav1.ListOptions{},
   453  				)
   454  				Expect(err).ToNot(HaveOccurred())
   455  				_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   456  					testNamespace, "--kubeconfig="+kubeconfigHub)
   457  				Expect(err).ShouldNot(HaveOccurred())
   458  
   459  				return len(ansiblejobList.Items)
   460  			}, 30, 1).Should(BeNumerically(">", 0))
   461  
   462  			By("Should the last ansiblejob include managed3")
   463  			Eventually(func() interface{} {
   464  				return getLastJobCompliant()
   465  			}, 30, 1).Should(Equal("NonCompliant"))
   466  
   467  			By("Job TTL should match patch (1 hour)")
   468  			Eventually(func(g Gomega) interface{} {
   469  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   470  					context.TODO(), metav1.ListOptions{},
   471  				)
   472  				g.Expect(err).ToNot(HaveOccurred())
   473  
   474  				// Save ansiblelistlen for next test
   475  				ansiblelistlen = len(ansiblejobList.Items)
   476  				index := len(ansiblejobList.Items) - 1
   477  
   478  				return ansiblejobList.Items[index].Object["spec"].(map[string]interface{})["job_ttl"]
   479  			}, 10, 1).Should(Equal(int64(3600)))
   480  
   481  			By("Patching policy to make all clusters back to Compliant")
   482  			opt = metav1.ListOptions{
   483  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   484  			}
   485  			replicatedPlcList = utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   486  			for _, replicatedPlc := range replicatedPlcList.Items {
   487  				replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   488  					ComplianceState: policiesv1.Compliant,
   489  				}
   490  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   491  					context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   492  				)
   493  				Expect(err).ToNot(HaveOccurred())
   494  			}
   495  			Eventually(func() interface{} {
   496  				replicatedPlcList = utils.ListWithTimeout(
   497  					clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   498  				allUpdated := true
   499  				for _, replicatedPlc := range replicatedPlcList.Items {
   500  					compliantStatusStr := replicatedPlc.Object["status"].(map[string]interface{})["compliant"]
   501  					if compliantStatusStr != string(policiesv1.Compliant) {
   502  						allUpdated = false
   503  
   504  						break
   505  					}
   506  				}
   507  
   508  				return allUpdated
   509  			}, 30, 1).Should(BeTrue())
   510  
   511  			By("Should not create any new ansiblejob when policies become Compliant")
   512  			Consistently(func() interface{} {
   513  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).List(
   514  					context.TODO(), metav1.ListOptions{},
   515  				)
   516  				Expect(err).ToNot(HaveOccurred())
   517  
   518  				return len(ansiblejobList.Items)
   519  			}, 15, 1).Should(Equal(ansiblelistlen))
   520  
   521  			By("Patching policy to make all clusters back to NonCompliant")
   522  			opt = metav1.ListOptions{
   523  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   524  			}
   525  			replicatedPlcList = utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   526  			for _, replicatedPlc := range replicatedPlcList.Items {
   527  				replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   528  					ComplianceState: policiesv1.NonCompliant,
   529  				}
   530  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   531  					context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   532  				)
   533  				Expect(err).ToNot(HaveOccurred())
   534  			}
   535  			Eventually(func() interface{} {
   536  				replicatedPlcList = utils.ListWithTimeout(
   537  					clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   538  				allUpdated := true
   539  				for _, replicatedPlc := range replicatedPlcList.Items {
   540  					compliantStatusStr := replicatedPlc.Object["status"].(map[string]interface{})["compliant"]
   541  					if compliantStatusStr != string(policiesv1.NonCompliant) {
   542  						allUpdated = false
   543  
   544  						break
   545  					}
   546  				}
   547  
   548  				return allUpdated
   549  			}, 30, 1).Should(BeTrue())
   550  
   551  			By("Should the last ansiblejob include managed3 that is noncompliant")
   552  			Eventually(func() interface{} {
   553  				return getLastJobCompliant()
   554  			}, 30, 1).Should(Equal("NonCompliant"))
   555  
   556  			By("Should more ansiblejobs after change Compliant to Noncompliant")
   557  			Eventually(func() interface{} {
   558  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   559  					context.TODO(), metav1.ListOptions{},
   560  				)
   561  				Expect(err).ToNot(HaveOccurred())
   562  				_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   563  					testNamespace, "--kubeconfig="+kubeconfigHub)
   564  				Expect(err).ShouldNot(HaveOccurred())
   565  
   566  				// This ansiblelistlen is the length created before changed to noncompliant
   567  				return len(ansiblejobList.Items) > ansiblelistlen
   568  			}, 30, 1).Should(BeTrue())
   569  			cleanupPolicyAutomation()
   570  		})
   571  
   572  		// Create three events during delayAfterRunSeconds period
   573  		// Got the first ansiblejobs within delayAfterRunSeconds period for the first event
   574  		// Only got the second ansiblejobs after delayAfterRunSeconds period for the last two events
   575  		It("Test mode = everyEvent with delayAfterRunSeconds", func() {
   576  			By("Creating an policyAutomation with mode=everyEvent")
   577  			_, err := utils.KubectlWithOutput("apply",
   578  				"-f", "../resources/case5_policy_automation/case5-policy-automation-everyEvent.yaml",
   579  				"-n", testNamespace, "--kubeconfig="+kubeconfigHub)
   580  			Expect(err).ShouldNot(HaveOccurred())
   581  
   582  			By("Patching everyEvent mode policyAutomation with delayAfterRunSeconds = 240")
   583  			var policyAutomation *unstructured.Unstructured
   584  			// Use Eventually since there can be a race condition for when the owner reference is added by the
   585  			// controller.
   586  			Eventually(func(g Gomega) {
   587  				policyAutomation, err = clientHubDynamic.Resource(gvrPolicyAutomation).Namespace(testNamespace).Get(
   588  					context.TODO(), automationName, metav1.GetOptions{},
   589  				)
   590  				g.Expect(err).ToNot(HaveOccurred())
   591  
   592  				policyAutomation.Object["spec"].(map[string]interface{})["delayAfterRunSeconds"] = 240
   593  				_, err = clientHubDynamic.Resource(gvrPolicyAutomation).Namespace(testNamespace).Update(
   594  					context.TODO(), policyAutomation, metav1.UpdateOptions{},
   595  				)
   596  				g.Expect(err).ToNot(HaveOccurred())
   597  			}).Should(Succeed())
   598  
   599  			By("Should not create any new ansiblejob when Compliant")
   600  			Consistently(func() interface{} {
   601  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).List(
   602  					context.TODO(), metav1.ListOptions{},
   603  				)
   604  				Expect(err).ToNot(HaveOccurred())
   605  
   606  				return len(ansiblejobList.Items)
   607  			}, 15, 1).Should(Equal(0))
   608  
   609  			By("Patching policy to make all clusters NonCompliant")
   610  			opt := metav1.ListOptions{
   611  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   612  			}
   613  			replicatedPlcList := utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   614  			for _, replicatedPlc := range replicatedPlcList.Items {
   615  				replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   616  					ComplianceState: policiesv1.NonCompliant,
   617  				}
   618  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   619  					context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   620  				)
   621  				Expect(err).ToNot(HaveOccurred())
   622  			}
   623  			Eventually(func() interface{} {
   624  				replicatedPlcList = utils.ListWithTimeout(
   625  					clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   626  				allUpdated := true
   627  				for _, replicatedPlc := range replicatedPlcList.Items {
   628  					compliantStatusStr := replicatedPlc.Object["status"].(map[string]interface{})["compliant"]
   629  					if compliantStatusStr != string(policiesv1.NonCompliant) {
   630  						allUpdated = false
   631  
   632  						break
   633  					}
   634  				}
   635  
   636  				return allUpdated
   637  			}, 30, 1).Should(BeTrue())
   638  
   639  			By("checking the last AnsibleJob has managed3 in target_clsuter for" +
   640  				"the first event during delayAfterRunSeconds period")
   641  			Eventually(func() interface{} {
   642  				_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   643  					testNamespace, "--kubeconfig="+kubeconfigHub)
   644  				Expect(err).ShouldNot(HaveOccurred())
   645  
   646  				return getLastJobCompliant()
   647  			}, 30, 1).Should(Equal("NonCompliant"))
   648  
   649  			Consistently(func() interface{} {
   650  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   651  					context.TODO(), metav1.ListOptions{},
   652  				)
   653  				Expect(err).ShouldNot(HaveOccurred())
   654  
   655  				_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   656  					testNamespace, "--kubeconfig="+kubeconfigHub)
   657  				Expect(err).ShouldNot(HaveOccurred())
   658  				// Save ansiblelistlen for next test
   659  				ansiblelistlen = len(ansiblejobList.Items)
   660  
   661  				return getLastJobCompliant()
   662  			}, 30, 1).Should(Equal("NonCompliant"))
   663  
   664  			By("Patching policy to make all clusters back to Compliant")
   665  			opt = metav1.ListOptions{
   666  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   667  			}
   668  			replicatedPlcList = utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   669  			for _, replicatedPlc := range replicatedPlcList.Items {
   670  				replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   671  					ComplianceState: policiesv1.Compliant,
   672  				}
   673  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   674  					context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   675  				)
   676  				Expect(err).ToNot(HaveOccurred())
   677  			}
   678  			Eventually(func() interface{} {
   679  				replicatedPlcList = utils.ListWithTimeout(
   680  					clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   681  				allUpdated := true
   682  				for _, replicatedPlc := range replicatedPlcList.Items {
   683  					compliantStatusStr := replicatedPlc.Object["status"].(map[string]interface{})["compliant"]
   684  					if compliantStatusStr != string(policiesv1.Compliant) {
   685  						allUpdated = false
   686  
   687  						break
   688  					}
   689  				}
   690  
   691  				return allUpdated
   692  			}, 30, 1).Should(BeTrue())
   693  			By("Should not create a new ansiblejobs when policies become Compliant")
   694  			Consistently(func() interface{} {
   695  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).List(
   696  					context.TODO(), metav1.ListOptions{},
   697  				)
   698  				Expect(err).ToNot(HaveOccurred())
   699  
   700  				return len(ansiblejobList.Items)
   701  			}, 15, 1).Should(Equal(ansiblelistlen))
   702  
   703  			// Save ansiblelistlen that indicate before compliant change
   704  			ansiblejobList, _ := clientHubDynamic.Resource(gvrAnsibleJob).List(
   705  				context.TODO(), metav1.ListOptions{},
   706  			)
   707  			ansiblelistlen = len(ansiblejobList.Items)
   708  
   709  			By("Patching policy to make all clusters back to NonCompliant")
   710  			opt = metav1.ListOptions{
   711  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   712  			}
   713  			replicatedPlcList = utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   714  			for _, replicatedPlc := range replicatedPlcList.Items {
   715  				replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   716  					ComplianceState: policiesv1.NonCompliant,
   717  				}
   718  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   719  					context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   720  				)
   721  				Expect(err).ToNot(HaveOccurred())
   722  			}
   723  			Eventually(func() interface{} {
   724  				replicatedPlcList = utils.ListWithTimeout(
   725  					clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   726  				allUpdated := true
   727  				for _, replicatedPlc := range replicatedPlcList.Items {
   728  					compliantStatusStr := replicatedPlc.Object["status"].(map[string]interface{})["compliant"]
   729  					if compliantStatusStr != string(policiesv1.NonCompliant) {
   730  						allUpdated = false
   731  
   732  						break
   733  					}
   734  				}
   735  
   736  				return allUpdated
   737  			}, 30, 1).Should(BeTrue())
   738  
   739  			By("Should not create any new ansiblejob for the second Non-Compliant event" +
   740  				" within delayAfterRunSeconds period")
   741  			Consistently(func() interface{} {
   742  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).List(
   743  					context.TODO(), metav1.ListOptions{},
   744  				)
   745  				Expect(err).ToNot(HaveOccurred())
   746  
   747  				return len(ansiblejobList.Items)
   748  			}, 30, 1).Should(Equal(ansiblelistlen))
   749  
   750  			By("Patching policy to make all clusters back to Compliant")
   751  			opt = metav1.ListOptions{
   752  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   753  			}
   754  			replicatedPlcList = utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   755  			for _, replicatedPlc := range replicatedPlcList.Items {
   756  				replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   757  					ComplianceState: policiesv1.Compliant,
   758  				}
   759  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   760  					context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   761  				)
   762  				Expect(err).ToNot(HaveOccurred())
   763  			}
   764  			Eventually(func() interface{} {
   765  				replicatedPlcList = utils.ListWithTimeout(
   766  					clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   767  				allUpdated := true
   768  				for _, replicatedPlc := range replicatedPlcList.Items {
   769  					compliantStatusStr := replicatedPlc.Object["status"].(map[string]interface{})["compliant"]
   770  					if compliantStatusStr != string(policiesv1.Compliant) {
   771  						allUpdated = false
   772  
   773  						break
   774  					}
   775  				}
   776  
   777  				return allUpdated
   778  			}, 30, 1).Should(BeTrue())
   779  
   780  			// Save ansiblelist length for next test
   781  			ansiblejobList, err = clientHubDynamic.Resource(gvrAnsibleJob).List(
   782  				context.TODO(), metav1.ListOptions{},
   783  			)
   784  			Expect(err).ToNot(HaveOccurred())
   785  
   786  			ansiblelistlen = len(ansiblejobList.Items)
   787  
   788  			By("Patching automationStartTime to an earlier time and let delayAfterRunSeconds expire immediately")
   789  			policyAutomation, err = clientHubDynamic.Resource(gvrPolicyAutomation).Namespace(testNamespace).Get(
   790  				context.TODO(), automationName, metav1.GetOptions{},
   791  			)
   792  			Expect(err).ToNot(HaveOccurred())
   793  			status := policyAutomation.Object["status"].(map[string]interface{})
   794  			clustersWithEvent := status["clustersWithEvent"].(map[string]interface{})
   795  			for _, ClusterEvent := range clustersWithEvent {
   796  				updateStartTime := time.Now().UTC().Add(-241 * time.Second).Format(time.RFC3339)
   797  				ClusterEvent.(map[string]interface{})["automationStartTime"] = updateStartTime
   798  			}
   799  			_, err = clientHubDynamic.Resource(gvrPolicyAutomation).Namespace(testNamespace).UpdateStatus(
   800  				context.TODO(), policyAutomation, metav1.UpdateOptions{},
   801  			)
   802  			Expect(err).ToNot(HaveOccurred())
   803  
   804  			By("Patching policy to make all clusters back to NonCompliant")
   805  			opt = metav1.ListOptions{
   806  				LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   807  			}
   808  			replicatedPlcList = utils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   809  			for _, replicatedPlc := range replicatedPlcList.Items {
   810  				replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   811  					ComplianceState: policiesv1.NonCompliant,
   812  				}
   813  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   814  					context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   815  				)
   816  				Expect(err).ToNot(HaveOccurred())
   817  			}
   818  			Eventually(func() interface{} {
   819  				replicatedPlcList = utils.ListWithTimeout(
   820  					clientHubDynamic, gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   821  				allUpdated := true
   822  				for _, replicatedPlc := range replicatedPlcList.Items {
   823  					compliantStatusStr := replicatedPlc.Object["status"].(map[string]interface{})["compliant"]
   824  					if compliantStatusStr != string(policiesv1.NonCompliant) {
   825  						allUpdated = false
   826  
   827  						break
   828  					}
   829  				}
   830  
   831  				return allUpdated
   832  			}, 30, 1).Should(BeTrue())
   833  
   834  			By("After delayAfterRunSeconds is expired, should only create the second ansiblejob")
   835  			Eventually(func() interface{} {
   836  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   837  					context.TODO(), metav1.ListOptions{},
   838  				)
   839  				Expect(err).ToNot(HaveOccurred())
   840  				_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   841  					testNamespace, "--kubeconfig="+kubeconfigHub)
   842  				Expect(err).ShouldNot(HaveOccurred())
   843  
   844  				return len(ansiblejobList.Items)
   845  			}, 30, 1).Should(BeNumerically(">", ansiblelistlen))
   846  
   847  			Consistently(func() interface{} {
   848  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   849  					context.TODO(), metav1.ListOptions{},
   850  				)
   851  				Expect(err).ToNot(HaveOccurred())
   852  				_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   853  					testNamespace, "--kubeconfig="+kubeconfigHub)
   854  				Expect(err).ShouldNot(HaveOccurred())
   855  
   856  				return len(ansiblejobList.Items)
   857  			}, 30, 1).Should(BeNumerically(">", ansiblelistlen))
   858  
   859  			By("Removing config map")
   860  			_, err = utils.KubectlWithOutput(
   861  				"delete", "policyautomation", "-n", testNamespace, automationName, "--kubeconfig="+kubeconfigHub,
   862  			)
   863  			Expect(err).ShouldNot(HaveOccurred())
   864  			By("Ansiblejob should also be removed")
   865  			Eventually(func() interface{} {
   866  				ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   867  					context.TODO(), metav1.ListOptions{},
   868  				)
   869  				Expect(err).ToNot(HaveOccurred())
   870  
   871  				return len(ansiblejobList.Items)
   872  			}, 30, 1).Should(Equal(0))
   873  
   874  			cleanupPolicyAutomation()
   875  		})
   876  	})
   877  
   878  	Describe("Test PolicyAutomation Manual run", func() {
   879  		Describe("Test manual run", func() {
   880  			It("should no issue when init policyAutomation with disable and manual ", func() {
   881  				By("Patching policy to make all clusters NonCompliant")
   882  				opt := metav1.ListOptions{
   883  					LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   884  				}
   885  				replicatedPlcList := utils.ListWithTimeout(clientHubDynamic, gvrPolicy,
   886  					opt, 3, true, defaultTimeoutSeconds)
   887  				for _, replicatedPlc := range replicatedPlcList.Items {
   888  					// mock replicated policy PolicyStatus.Details for violationContext testing
   889  					mockDetails := []*policiesv1.DetailsPerTemplate{
   890  						{
   891  							ComplianceState: policiesv1.NonCompliant,
   892  							History: []policiesv1.ComplianceHistory{
   893  								{
   894  									Message:       "testing-ViolationMessage",
   895  									LastTimestamp: metav1.NewTime(time.Now()),
   896  									EventName:     "default.test-policy.164415c7210a573c",
   897  								},
   898  							},
   899  						},
   900  					}
   901  
   902  					replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   903  						ComplianceState: policiesv1.NonCompliant,
   904  						Details:         mockDetails,
   905  					}
   906  					_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   907  						context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   908  					)
   909  					Expect(err).ToNot(HaveOccurred())
   910  				}
   911  
   912  				By("Creating an policyAutomation with mode=disable")
   913  				_, err := utils.KubectlWithOutput("apply",
   914  					"-f", "../resources/case5_policy_automation/case5-policy-automation-disable.yaml",
   915  					"-n", testNamespace)
   916  				Expect(err).ShouldNot(HaveOccurred())
   917  				By("Applying manual run annotation")
   918  				_, err = utils.KubectlWithOutput(
   919  					"annotate",
   920  					"policyautomation",
   921  					"-n",
   922  					testNamespace,
   923  					automationName,
   924  					"--overwrite",
   925  					"policy.open-cluster-management.io/rerun=true",
   926  					"--kubeconfig="+kubeconfigHub,
   927  				)
   928  				Expect(err).ShouldNot(HaveOccurred())
   929  			})
   930  			It("Should only create one ansiblejob which include 3 noncompliant target_clusters", func() {
   931  				Eventually(func() interface{} {
   932  					ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   933  						context.TODO(), metav1.ListOptions{},
   934  					)
   935  					Expect(err).ToNot(HaveOccurred())
   936  					_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   937  						testNamespace, "--kubeconfig="+kubeconfigHub)
   938  					Expect(err).ShouldNot(HaveOccurred())
   939  
   940  					return len(ansiblejobList.Items)
   941  				}, 30, 1).Should(Equal(1))
   942  				Consistently(func() interface{} {
   943  					ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
   944  						context.TODO(), metav1.ListOptions{},
   945  					)
   946  					Expect(err).ShouldNot(HaveOccurred())
   947  					_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
   948  						testNamespace, "--kubeconfig="+kubeconfigHub)
   949  					Expect(err).ShouldNot(HaveOccurred())
   950  
   951  					return getTargetListlen(ansiblejobList)
   952  				}, 30, 1).Should(Equal(3))
   953  			})
   954  
   955  			It("Check each violation context field is not empty in "+
   956  				"extra_vars for the violated manual run case", func() {
   957  				lastAnsiblejob := getLastAnsiblejob()
   958  				spec := lastAnsiblejob.Object["spec"]
   959  				extraVars := spec.(map[string]interface{})["extra_vars"].(map[string]interface{})
   960  				Expect(extraVars["policy_name"]).To(Equal("case5-test-policy"))
   961  				Expect(extraVars["policy_namespace"]).To(Equal(testNamespace))
   962  				Expect(extraVars["hub_cluster"]).To(Equal("millienium-falcon.tatooine.local"))
   963  				Expect(extraVars["target_clusters"].([]interface{})).To(HaveLen(3))
   964  				Expect(extraVars["policy_sets"].([]interface{})).To(HaveLen(1))
   965  				Expect(extraVars["policy_sets"].([]interface{})[0]).To(Equal("case5-test-policyset"))
   966  				managed1 := extraVars["policy_violations"].(map[string]interface{})["managed1"]
   967  				compliant := managed1.(map[string]interface{})["compliant"]
   968  				Expect(compliant).To(Equal(string(policiesv1.NonCompliant)))
   969  				managed2 := extraVars["policy_violations"].(map[string]interface{})["managed2"]
   970  				compliant = managed2.(map[string]interface{})["compliant"]
   971  				Expect(compliant).To(Equal(string(policiesv1.NonCompliant)))
   972  			})
   973  
   974  			It("Patching policy to make all clusters back to Compliant", func() {
   975  				opt := metav1.ListOptions{
   976  					LabelSelector: common.RootPolicyLabel + "=" + testNamespace + "." + case5PolicyName,
   977  				}
   978  				replicatedPlcList := utils.ListWithTimeout(clientHubDynamic,
   979  					gvrPolicy, opt, 3, true, defaultTimeoutSeconds)
   980  				for _, replicatedPlc := range replicatedPlcList.Items {
   981  					replicatedPlc.Object["status"] = &policiesv1.PolicyStatus{
   982  						ComplianceState: policiesv1.Compliant,
   983  					}
   984  					_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(replicatedPlc.GetNamespace()).UpdateStatus(
   985  						context.TODO(), &replicatedPlc, metav1.UpdateOptions{},
   986  					)
   987  					Expect(err).ToNot(HaveOccurred())
   988  				}
   989  			})
   990  
   991  			It("Should create one ansible job when the policy is compliant", func() {
   992  				By("Applying manual run annotation again")
   993  				_, err := utils.KubectlWithOutput(
   994  					"annotate",
   995  					"policyautomation",
   996  					"-n",
   997  					testNamespace,
   998  					automationName,
   999  					"--overwrite",
  1000  					"policy.open-cluster-management.io/rerun=true",
  1001  					"--kubeconfig="+kubeconfigHub,
  1002  				)
  1003  				Expect(err).ShouldNot(HaveOccurred())
  1004  				By("Should still create one more ansiblejob when policy is Compliant")
  1005  				Eventually(func() interface{} {
  1006  					ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
  1007  						context.TODO(), metav1.ListOptions{},
  1008  					)
  1009  					Expect(err).ToNot(HaveOccurred())
  1010  					_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
  1011  						testNamespace, "--kubeconfig="+kubeconfigHub)
  1012  					Expect(err).ShouldNot(HaveOccurred())
  1013  
  1014  					return len(ansiblejobList.Items)
  1015  				}, 30, 1).Should(Equal(2))
  1016  				Consistently(func() interface{} {
  1017  					ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
  1018  						context.TODO(), metav1.ListOptions{},
  1019  					)
  1020  					Expect(err).ToNot(HaveOccurred())
  1021  					_, err = utils.KubectlWithOutput("get", "ansiblejobs", "-n",
  1022  						testNamespace, "--kubeconfig="+kubeconfigHub)
  1023  					Expect(err).ShouldNot(HaveOccurred())
  1024  
  1025  					return len(ansiblejobList.Items)
  1026  				}, 30, 1).Should(Equal(2))
  1027  			})
  1028  			It("Check policy_violations is mostly empty for the compliant manual run case", func() {
  1029  				lastAnsibleJob := getLastAnsiblejobByTime()
  1030  				spec := lastAnsibleJob.Object["spec"]
  1031  
  1032  				extraVars := spec.(map[string]interface{})["extra_vars"].(map[string]interface{})
  1033  				Expect(extraVars["policy_name"]).To(Equal("case5-test-policy"))
  1034  				Expect(extraVars["policy_namespace"]).To(Equal(testNamespace))
  1035  				Expect(extraVars["hub_cluster"]).To(Equal("millienium-falcon.tatooine.local"))
  1036  				Expect(extraVars["target_clusters"].([]interface{})).To(BeEmpty())
  1037  				Expect(extraVars["policy_sets"].([]interface{})).To(HaveLen(1))
  1038  				Expect(extraVars["policy_sets"].([]interface{})[0]).To(Equal("case5-test-policyset"))
  1039  				Expect(extraVars["policy_violations"]).To(BeNil())
  1040  				cleanupPolicyAutomation()
  1041  			})
  1042  		})
  1043  		Describe("Test manual run and diable", func() {
  1044  			It("Change policy to disabled and create policyAutomation with disabled", func() {
  1045  				By("Change policy disabled to true")
  1046  				rootPlc := utils.GetWithTimeout(
  1047  					clientHubDynamic, gvrPolicy, case5PolicyName, testNamespace, true, defaultTimeoutSeconds,
  1048  				)
  1049  				Expect(rootPlc).NotTo(BeNil())
  1050  				rootPlc.Object["spec"].(map[string]interface{})["disabled"] = true
  1051  				_, err := clientHubDynamic.Resource(gvrPolicy).Namespace(testNamespace).Update(
  1052  					context.TODO(), rootPlc, metav1.UpdateOptions{},
  1053  				)
  1054  				Expect(err).ToNot(HaveOccurred())
  1055  
  1056  				By("Creating an policyAutomation with mode=disable")
  1057  				_, err = utils.KubectlWithOutput("apply",
  1058  					"-f", "../resources/case5_policy_automation/case5-policy-automation-disable.yaml",
  1059  					"-n", testNamespace, "--kubeconfig="+kubeconfigHub)
  1060  				Expect(err).ShouldNot(HaveOccurred())
  1061  			})
  1062  			It("Should no issue when policy set to disabled = true ", func() {
  1063  				By("Applying manual run annotation")
  1064  				_, err := utils.KubectlWithOutput(
  1065  					"annotate",
  1066  					"policyautomation",
  1067  					"-n",
  1068  					testNamespace,
  1069  					automationName,
  1070  					"--overwrite",
  1071  					"policy.open-cluster-management.io/rerun=true",
  1072  					"--kubeconfig="+kubeconfigHub,
  1073  				)
  1074  				Expect(err).ShouldNot(HaveOccurred())
  1075  
  1076  				By("Change policy disabled to false")
  1077  				rootPlc := utils.GetWithTimeout(
  1078  					clientHubDynamic, gvrPolicy, case5PolicyName, testNamespace, true, defaultTimeoutSeconds,
  1079  				)
  1080  				Expect(rootPlc).NotTo(BeNil())
  1081  				Expect(rootPlc.Object["spec"].(map[string]interface{})["disabled"]).To(BeTrue())
  1082  				rootPlc.Object["spec"].(map[string]interface{})["disabled"] = false
  1083  				_, err = clientHubDynamic.Resource(gvrPolicy).Namespace(testNamespace).Update(
  1084  					context.TODO(), rootPlc, metav1.UpdateOptions{},
  1085  				)
  1086  				Expect(err).ToNot(HaveOccurred())
  1087  
  1088  				By("The ansiblejob should be only one")
  1089  				Eventually(func() interface{} {
  1090  					ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
  1091  						context.TODO(), metav1.ListOptions{},
  1092  					)
  1093  					Expect(err).ToNot(HaveOccurred())
  1094  
  1095  					return len(ansiblejobList.Items)
  1096  				}, 30, 1).Should(Equal(1))
  1097  			})
  1098  			It("Should create one more ansiblejob when the manual run is set again ", func() {
  1099  				By("Applying manual run annotation")
  1100  				_, err := utils.KubectlWithOutput(
  1101  					"annotate",
  1102  					"policyautomation",
  1103  					"-n",
  1104  					testNamespace,
  1105  					automationName,
  1106  					"--overwrite",
  1107  					"policy.open-cluster-management.io/rerun=true",
  1108  					"--kubeconfig="+kubeconfigHub,
  1109  				)
  1110  				Expect(err).ShouldNot(HaveOccurred())
  1111  
  1112  				By("The ansiblejob should be two")
  1113  				Eventually(func() interface{} {
  1114  					ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
  1115  						context.TODO(), metav1.ListOptions{},
  1116  					)
  1117  					Expect(err).ToNot(HaveOccurred())
  1118  
  1119  					return len(ansiblejobList.Items)
  1120  				}, 30, 1).Should(Equal(2))
  1121  
  1122  				cleanupPolicyAutomation()
  1123  			})
  1124  		})
  1125  	})
  1126  
  1127  	AfterAll(func() {
  1128  		By("Removing policy")
  1129  		_, err := utils.KubectlWithOutput("delete", "policy", "-n",
  1130  			testNamespace, case5PolicyName, "--kubeconfig="+kubeconfigHub)
  1131  		Expect(err).ToNot(HaveOccurred())
  1132  		By("PolicyAutomation should also be removed")
  1133  		Eventually(func() *unstructured.Unstructured {
  1134  			policyAutomation, err := clientHubDynamic.Resource(gvrPolicyAutomation).Namespace(testNamespace).Get(
  1135  				context.TODO(), automationName, metav1.GetOptions{},
  1136  			)
  1137  			if !k8serrors.IsNotFound(err) {
  1138  				Expect(err).ToNot(HaveOccurred())
  1139  			}
  1140  
  1141  			return policyAutomation
  1142  		}).Should(BeNil())
  1143  		By("Ansiblejob should also be removed")
  1144  		Eventually(func() interface{} {
  1145  			ansiblejobList, err := clientHubDynamic.Resource(gvrAnsibleJob).Namespace(testNamespace).List(
  1146  				context.TODO(), metav1.ListOptions{},
  1147  			)
  1148  			Expect(err).ToNot(HaveOccurred())
  1149  
  1150  			return len(ansiblejobList.Items)
  1151  		}, 30, 1).Should(Equal(0))
  1152  	})
  1153  })