github.com/redhat-appstudio/e2e-tests@v0.0.0-20230619105049-9a422b2094d7/tests/build/chains.go (about)

     1  package build
     2  
     3  import (
     4  	"fmt"
     5  	"time"
     6  
     7  	"github.com/devfile/library/pkg/util"
     8  	ecp "github.com/enterprise-contract/enterprise-contract-controller/api/v1alpha1"
     9  	. "github.com/onsi/ginkgo/v2"
    10  	. "github.com/onsi/gomega"
    11  	"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
    12  	corev1 "k8s.io/api/core/v1"
    13  	k8sErrors "k8s.io/apimachinery/pkg/api/errors"
    14  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    15  	"sigs.k8s.io/yaml"
    16  
    17  	"github.com/redhat-appstudio/e2e-tests/pkg/constants"
    18  	"github.com/redhat-appstudio/e2e-tests/pkg/framework"
    19  	"github.com/redhat-appstudio/e2e-tests/pkg/utils"
    20  	"github.com/redhat-appstudio/e2e-tests/pkg/utils/common"
    21  	"github.com/redhat-appstudio/e2e-tests/pkg/utils/tekton"
    22  )
    23  
    24  var _ = framework.ChainsSuiteDescribe("Tekton Chains E2E tests", Label("ec", "HACBS"), func() {
    25  	defer GinkgoRecover()
    26  	var fwk *framework.Framework
    27  	var err error
    28  	var namespace string
    29  
    30  	BeforeAll(func() {
    31  		fwk, err = framework.NewFramework(utils.GetGeneratedNamespace(constants.TEKTON_CHAINS_E2E_USER))
    32  		Expect(err).NotTo(HaveOccurred())
    33  		Expect(fwk.UserNamespace).NotTo(BeNil(), "failed to create sandbox user")
    34  		namespace = fwk.UserNamespace
    35  	})
    36  
    37  	Context("infrastructure is running", Label("pipeline"), func() {
    38  		It("verifies if the chains controller is running", func() {
    39  			err := fwk.AsKubeAdmin.CommonController.WaitForPodSelector(fwk.AsKubeAdmin.CommonController.IsPodRunning, constants.TEKTON_CHAINS_NS, "app", "tekton-chains-controller", 60, 100)
    40  			Expect(err).NotTo(HaveOccurred())
    41  		})
    42  		It("verifies if the correct roles are created", func() {
    43  			_, csaErr := fwk.AsKubeAdmin.CommonController.GetRole("chains-secret-admin", constants.TEKTON_CHAINS_NS)
    44  			Expect(csaErr).NotTo(HaveOccurred())
    45  			_, srErr := fwk.AsKubeAdmin.CommonController.GetRole("secret-reader", "openshift-ingress-operator")
    46  			Expect(srErr).NotTo(HaveOccurred())
    47  		})
    48  		It("verifies if the correct rolebindings are created", func() {
    49  			_, csaErr := fwk.AsKubeAdmin.CommonController.GetRoleBinding("chains-secret-admin", constants.TEKTON_CHAINS_NS)
    50  			Expect(csaErr).NotTo(HaveOccurred())
    51  			_, csrErr := fwk.AsKubeAdmin.CommonController.GetRoleBinding("chains-secret-reader", "openshift-ingress-operator")
    52  			Expect(csrErr).NotTo(HaveOccurred())
    53  		})
    54  		It("verifies if the correct service account is created", func() {
    55  			_, err := fwk.AsKubeAdmin.CommonController.GetServiceAccount("chains-secrets-admin", constants.TEKTON_CHAINS_NS)
    56  			Expect(err).NotTo(HaveOccurred())
    57  		})
    58  	})
    59  
    60  	Context("test creating and signing an image and task", Label("pipeline"), func() {
    61  		// Make the PipelineRun name and namespace predictable. For convenience, the name of the
    62  		// PipelineRun that builds an image, is the same as the repository where the image is
    63  		// pushed to.
    64  		var buildPipelineRunName, image, imageWithDigest string
    65  		var pipelineRunTimeout int
    66  		var attestationTimeout time.Duration
    67  		var kubeController tekton.KubeController
    68  		var policySource []ecp.Source
    69  
    70  		BeforeAll(func() {
    71  			kubeController = tekton.KubeController{
    72  				Commonctrl: *fwk.AsKubeAdmin.CommonController,
    73  				Tektonctrl: *fwk.AsKubeAdmin.TektonController,
    74  				Namespace:  namespace,
    75  			}
    76  
    77  			buildPipelineRunName = fmt.Sprintf("buildah-demo-%s", util.GenerateRandomString(10))
    78  			image = fmt.Sprintf("quay.io/%s/test-images:%s", utils.GetQuayIOOrganization(), buildPipelineRunName)
    79  			sharedSecret, err := kubeController.Commonctrl.GetSecret(constants.QuayRepositorySecretNamespace, constants.QuayRepositorySecretName)
    80  			Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("error when getting shared secret - make sure the secret %s in %s namespace is created", constants.QuayRepositorySecretName, constants.QuayRepositorySecretNamespace))
    81  
    82  			_, err = kubeController.Commonctrl.GetSecret(namespace, constants.QuayRepositorySecretName)
    83  			if err == nil {
    84  				err = kubeController.Commonctrl.DeleteSecret(namespace, constants.QuayRepositorySecretName)
    85  				Expect(err).ToNot(HaveOccurred())
    86  			} else if !k8sErrors.IsNotFound(err) {
    87  				Expect(err).ToNot(HaveOccurred())
    88  			}
    89  
    90  			repositorySecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: constants.QuayRepositorySecretName, Namespace: namespace},
    91  				Type: corev1.SecretTypeDockerConfigJson,
    92  				Data: map[string][]byte{corev1.DockerConfigJsonKey: sharedSecret.Data[".dockerconfigjson"]}}
    93  			_, err = kubeController.Commonctrl.CreateSecret(namespace, repositorySecret)
    94  			Expect(err).ShouldNot(HaveOccurred())
    95  			err = kubeController.Commonctrl.LinkSecretToServiceAccount(namespace, constants.QuayRepositorySecretName, constants.DefaultPipelineServiceAccount, true)
    96  			Expect(err).ToNot(HaveOccurred())
    97  
    98  			pipelineRunTimeout = int(time.Duration(20) * time.Minute)
    99  			attestationTimeout = time.Duration(60) * time.Second
   100  
   101  			defaultEcp, err := kubeController.GetEnterpriseContractPolicy("default", "enterprise-contract-service")
   102  			Expect(err).NotTo(HaveOccurred())
   103  			policySource = defaultEcp.Spec.Sources
   104  
   105  			// if there is a ConfigMap e2e-tests/ec-config with keys `revision` and
   106  			// `repository` values from those will replace the default policy source
   107  			// this gives us a way to set the tests to use a different policy if we
   108  			// break the tests in the default policy source
   109  			// if config, err := fwk.CommonController.K8sClient.KubeInterface().CoreV1().ConfigMaps("e2e-tests").Get(context.TODO(), "ec-config", v1.GetOptions{}); err != nil {
   110  			// 	if v, ok := config.Data["revision"]; ok {
   111  			// 		policySource.Revision = &v
   112  			// 	}
   113  			// 	if v, ok := config.Data["repository"]; ok {
   114  			// 		policySource.Repository = v
   115  			// 	}
   116  			// }
   117  
   118  			// At a bare minimum, each spec within this context relies on the existence of
   119  			// an image that has been signed by Tekton Chains. Trigger a demo task to fulfill
   120  			// this purpose.
   121  
   122  			bundles, err := fwk.AsKubeAdmin.TektonController.NewBundles()
   123  			Expect(err).ShouldNot(HaveOccurred())
   124  			dockerBuildBundle := bundles.DockerBuildBundle
   125  			Expect(dockerBuildBundle).NotTo(Equal(""), "Can't continue without a docker-build pipeline got from selector config")
   126  			pr, err := kubeController.RunPipeline(tekton.BuildahDemo{Image: image, Bundle: dockerBuildBundle, Namespace: namespace, Name: buildPipelineRunName}, pipelineRunTimeout)
   127  			Expect(err).NotTo(HaveOccurred())
   128  			// Verify that the build task was created as expected.
   129  			Expect(pr.ObjectMeta.Name).To(Equal(buildPipelineRunName))
   130  			Expect(pr.ObjectMeta.Namespace).To(Equal(namespace))
   131  			Expect(kubeController.WatchPipelineRun(pr.Name, pipelineRunTimeout)).To(Succeed())
   132  			GinkgoWriter.Printf("The pipeline named %q in namespace %q succeeded\n", pr.ObjectMeta.Name, pr.ObjectMeta.Namespace)
   133  
   134  			// The PipelineRun resource has been updated, refresh our reference.
   135  			pr, err = kubeController.Tektonctrl.GetPipelineRun(pr.ObjectMeta.Name, pr.ObjectMeta.Namespace)
   136  			Expect(err).NotTo(HaveOccurred())
   137  
   138  			// Verify TaskRun has the type hinting required by Tekton Chains
   139  			digest, err := kubeController.GetTaskRunResult(fwk.AsKubeAdmin.CommonController.KubeRest(), pr, "build-container", "IMAGE_DIGEST")
   140  			Expect(err).NotTo(HaveOccurred())
   141  			i, err := kubeController.GetTaskRunResult(fwk.AsKubeAdmin.CommonController.KubeRest(), pr, "build-container", "IMAGE_URL")
   142  			Expect(err).NotTo(HaveOccurred())
   143  			Expect(i).To(Equal(image))
   144  
   145  			// Specs now have a deterministic image reference for validation \o/
   146  			imageWithDigest = fmt.Sprintf("%s@%s", image, digest)
   147  
   148  			GinkgoWriter.Printf("The image signed by Tekton Chains is %s\n", imageWithDigest)
   149  		})
   150  
   151  		It("creates signature and attestation", func() {
   152  			err := kubeController.AwaitAttestationAndSignature(imageWithDigest, attestationTimeout)
   153  			Expect(err).NotTo(
   154  				HaveOccurred(),
   155  				"Could not find .att or .sig ImageStreamTags within the %s timeout. "+
   156  					"Most likely the chains-controller did not create those in time. "+
   157  					"Look at the chains-controller logs.",
   158  				attestationTimeout.String(),
   159  			)
   160  			GinkgoWriter.Printf("Cosign verify pass with .att and .sig ImageStreamTags found for %s\n", imageWithDigest)
   161  		})
   162  
   163  		Context("verify-enterprise-contract task", func() {
   164  			var generator tekton.VerifyEnterpriseContract
   165  			var rekorHost string
   166  			var verifyECTaskBundle string
   167  			publicSecretName := "cosign-public-key"
   168  
   169  			BeforeAll(func() {
   170  				// Copy the public key from tekton-chains/signing-secrets to a new
   171  				// secret that contains just the public key to ensure that access
   172  				// to password and private key are not needed.
   173  				publicKey, err := kubeController.GetTektonChainsPublicKey()
   174  				Expect(err).ToNot(HaveOccurred())
   175  				GinkgoWriter.Printf("Copy public key from %s/signing-secrets to a new secret\n", constants.TEKTON_CHAINS_NS)
   176  				Expect(kubeController.CreateOrUpdateSigningSecret(
   177  					publicKey, publicSecretName, namespace)).To(Succeed())
   178  
   179  				rekorHost, err = kubeController.GetRekorHost()
   180  				Expect(err).ToNot(HaveOccurred())
   181  				GinkgoWriter.Printf("Configured Rekor host: %s\n", rekorHost)
   182  
   183  				cm, err := kubeController.Commonctrl.GetConfigMap("ec-defaults", "enterprise-contract-service")
   184  				Expect(err).ToNot(HaveOccurred())
   185  				verifyECTaskBundle = cm.Data["verify_ec_task_bundle"]
   186  				Expect(verifyECTaskBundle).ToNot(BeEmpty())
   187  				GinkgoWriter.Printf("Using verify EC task bundle: %s\n", verifyECTaskBundle)
   188  			})
   189  
   190  			BeforeEach(func() {
   191  				generator = tekton.VerifyEnterpriseContract{
   192  					Bundle:              verifyECTaskBundle,
   193  					Image:               imageWithDigest,
   194  					Name:                "verify-enterprise-contract",
   195  					Namespace:           namespace,
   196  					PolicyConfiguration: "ec-policy",
   197  					PublicKey:           fmt.Sprintf("k8s://%s/%s", namespace, publicSecretName),
   198  					SSLCertDir:          "/var/run/secrets/kubernetes.io/serviceaccount",
   199  					Strict:              true,
   200  					EffectiveTime:       "now",
   201  				}
   202  
   203  				// Since specs could update the config policy, make sure it has a consistent
   204  				// baseline at the start of each spec.
   205  				baselinePolicies := ecp.EnterpriseContractPolicySpec{
   206  					Configuration: &ecp.EnterpriseContractPolicyConfiguration{
   207  						// A simple policy that should always succeed in a cluster where
   208  						// Tekton Chains is properly setup.
   209  						Include: []string{"slsa_provenance_available"},
   210  					},
   211  					Sources: policySource,
   212  				}
   213  				Expect(kubeController.CreateOrUpdatePolicyConfiguration(namespace, baselinePolicies)).To(Succeed())
   214  				// printPolicyConfiguration(baselinePolicies)
   215  			})
   216  
   217  			It("succeeds when policy is met", func() {
   218  				Skip("Skip until RHTAP bug is solved: https://issues.redhat.com/browse/RHTAPBUGS-352")
   219  				pr, err := kubeController.RunPipeline(generator, pipelineRunTimeout)
   220  				Expect(err).NotTo(HaveOccurred())
   221  				Expect(kubeController.WatchPipelineRun(pr.Name, pipelineRunTimeout)).To(Succeed())
   222  
   223  				// Refresh our copy of the PipelineRun for latest results
   224  				pr, err = kubeController.Tektonctrl.GetPipelineRun(pr.Name, pr.Namespace)
   225  				Expect(err).NotTo(HaveOccurred())
   226  
   227  				tr, err := kubeController.GetTaskRunStatus(fwk.AsKubeAdmin.CommonController.KubeRest(), pr, "verify-enterprise-contract")
   228  				Expect(err).NotTo(HaveOccurred())
   229  				printTaskRunStatus(tr, namespace, *fwk.AsKubeAdmin.CommonController)
   230  				GinkgoWriter.Printf("Make sure TaskRun %s of PipelineRun %s succeeded\n", tr.PipelineTaskName, pr.Name)
   231  				Expect(tekton.DidTaskSucceed(tr)).To(BeTrue())
   232  				GinkgoWriter.Printf("Make sure result for TaskRun %q succeeded\n", tr.PipelineTaskName)
   233  				Expect(tr.Status.TaskRunResults).Should(Or(
   234  					// TODO: delete the first option after https://issues.redhat.com/browse/RHTAP-810 is completed
   235  					ContainElements(tekton.MatchTaskRunResultWithJSONPathValue(constants.OldTektonTaskTestOutputName, "{$.result}", `["SUCCESS"]`)),
   236  					ContainElements(tekton.MatchTaskRunResultWithJSONPathValue(constants.TektonTaskTestOutputName, "{$.result}", `["SUCCESS"]`)),
   237  				))
   238  			})
   239  
   240  			It("does not pass when tests are not satisfied on non-strict mode", func() {
   241  				policy := ecp.EnterpriseContractPolicySpec{
   242  					Sources: policySource,
   243  					Configuration: &ecp.EnterpriseContractPolicyConfiguration{
   244  						// The BuildahDemo pipeline used to generate the test data does not
   245  						// include the required test tasks, so this policy should always fail.
   246  						Include: []string{"test"},
   247  					},
   248  				}
   249  				Expect(kubeController.CreateOrUpdatePolicyConfiguration(namespace, policy)).To(Succeed())
   250  				// printPolicyConfiguration(policy)
   251  				generator.Strict = false
   252  				pr, err := kubeController.RunPipeline(generator, pipelineRunTimeout)
   253  				Expect(err).NotTo(HaveOccurred())
   254  				Expect(kubeController.WatchPipelineRun(pr.Name, pipelineRunTimeout)).To(Succeed())
   255  
   256  				// Refresh our copy of the PipelineRun for latest results
   257  				pr, err = kubeController.Tektonctrl.GetPipelineRun(pr.Name, pr.Namespace)
   258  				Expect(err).NotTo(HaveOccurred())
   259  
   260  				tr, err := kubeController.GetTaskRunStatus(fwk.AsKubeAdmin.CommonController.KubeRest(), pr, "verify-enterprise-contract")
   261  				Expect(err).NotTo(HaveOccurred())
   262  
   263  				printTaskRunStatus(tr, namespace, *fwk.AsKubeAdmin.CommonController)
   264  				GinkgoWriter.Printf("Make sure TaskRun %s of PipelineRun %s succeeded\n", tr.PipelineTaskName, pr.Name)
   265  				Expect(tekton.DidTaskSucceed(tr)).To(BeTrue())
   266  				GinkgoWriter.Printf("Make sure result for TaskRun %q succeeded\n", tr.PipelineTaskName)
   267  				Expect(tr.Status.TaskRunResults).Should(Or(
   268  					// TODO: delete the first option after https://issues.redhat.com/browse/RHTAP-810 is completed
   269  					ContainElements(tekton.MatchTaskRunResultWithJSONPathValue(constants.OldTektonTaskTestOutputName, "{$.result}", `["FAILURE"]`)),
   270  					ContainElements(tekton.MatchTaskRunResultWithJSONPathValue(constants.TektonTaskTestOutputName, "{$.result}", `["FAILURE"]`)),
   271  				))
   272  			})
   273  
   274  			It("fails when tests are not satisfied on strict mode", func() {
   275  				policy := ecp.EnterpriseContractPolicySpec{
   276  					Sources: policySource,
   277  					Configuration: &ecp.EnterpriseContractPolicyConfiguration{
   278  						// The BuildahDemo pipeline used to generate the test data does not
   279  						// include the required test tasks, so this policy should always fail.
   280  						Include: []string{"test"},
   281  					},
   282  				}
   283  				Expect(kubeController.CreateOrUpdatePolicyConfiguration(namespace, policy)).To(Succeed())
   284  				// printPolicyConfiguration(policy)
   285  
   286  				generator.Strict = true
   287  				pr, err := kubeController.RunPipeline(generator, pipelineRunTimeout)
   288  				Expect(err).NotTo(HaveOccurred())
   289  				Expect(kubeController.WatchPipelineRun(pr.Name, pipelineRunTimeout)).To(Succeed())
   290  
   291  				// Refresh our copy of the PipelineRun for latest results
   292  				pr, err = kubeController.Tektonctrl.GetPipelineRun(pr.Name, pr.Namespace)
   293  				Expect(err).NotTo(HaveOccurred())
   294  
   295  				tr, err := kubeController.GetTaskRunStatus(fwk.AsKubeAdmin.CommonController.KubeRest(), pr, "verify-enterprise-contract")
   296  				Expect(err).NotTo(HaveOccurred())
   297  
   298  				printTaskRunStatus(tr, namespace, *fwk.AsKubeAdmin.CommonController)
   299  				GinkgoWriter.Printf("Make sure TaskRun %s of PipelineRun %s failed\n", tr.PipelineTaskName, pr.Name)
   300  				Expect(tekton.DidTaskSucceed(tr)).To(BeFalse())
   301  				// Because the task fails, no results are created
   302  			})
   303  
   304  			It("fails when unexpected signature is used", func() {
   305  				secretName := fmt.Sprintf("dummy-public-key-%s", util.GenerateRandomString(10))
   306  				publicKey := []byte("-----BEGIN PUBLIC KEY-----\n" +
   307  					"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAENZxkE/d0fKvJ51dXHQmxXaRMTtVz\n" +
   308  					"BQWcmJD/7pcMDEmBcmk8O1yUPIiFj5TMZqabjS9CQQN+jKHG+Bfi0BYlHg==\n" +
   309  					"-----END PUBLIC KEY-----")
   310  				GinkgoWriter.Println("Create an unexpected public signing key")
   311  				Expect(kubeController.CreateOrUpdateSigningSecret(publicKey, secretName, namespace)).To(Succeed())
   312  				generator.PublicKey = fmt.Sprintf("k8s://%s/%s", namespace, secretName)
   313  
   314  				pr, err := kubeController.RunPipeline(generator, pipelineRunTimeout)
   315  				Expect(err).NotTo(HaveOccurred())
   316  				Expect(kubeController.WatchPipelineRun(pr.Name, pipelineRunTimeout)).To(Succeed())
   317  
   318  				// Refresh our copy of the PipelineRun for latest results
   319  				pr, err = kubeController.Tektonctrl.GetPipelineRun(pr.Name, pr.Namespace)
   320  				Expect(err).NotTo(HaveOccurred())
   321  
   322  				tr, err := kubeController.GetTaskRunStatus(fwk.AsKubeAdmin.CommonController.KubeRest(), pr, "verify-enterprise-contract")
   323  				Expect(err).NotTo(HaveOccurred())
   324  
   325  				printTaskRunStatus(tr, namespace, *fwk.AsKubeAdmin.CommonController)
   326  				GinkgoWriter.Printf("Make sure TaskRun %s of PipelineRun %s failed\n", tr.PipelineTaskName, pr.Name)
   327  				Expect(tekton.DidTaskSucceed(tr)).To(BeFalse())
   328  				// Because the task fails, no results are created
   329  			})
   330  		})
   331  	})
   332  })
   333  
   334  // func printPolicyConfiguration(policy ecp.EnterpriseContractPolicySpec) {
   335  // 	sources := ""
   336  // 	for i, s := range policy.Sources {
   337  // 		if i != 0 {
   338  // 			sources += "\n"
   339  // 		}
   340  // 		if s.GitRepository != nil {
   341  // 			if s.GitRepository.Revision != nil {
   342  // 				sources += fmt.Sprintf("[%d] repository: '%s', revision: '%s'", i, s.GitRepository.Repository, *s.GitRepository.Revision)
   343  // 			} else {
   344  // 				sources += fmt.Sprintf("[%d] repository: '%s'", i, s.GitRepository.Repository)
   345  // 			}
   346  // 		}
   347  // 	}
   348  // 	exceptions := "[]"
   349  // 	if policy.Exceptions != nil {
   350  // 		exceptions = fmt.Sprintf("%v", policy.Exceptions.NonBlocking)
   351  // 	}
   352  // 	GinkgoWriter.Printf("Configured sources: %s\nand non-blocking policies: %v\n", sources, exceptions)
   353  // }
   354  
   355  func printTaskRunStatus(tr *v1beta1.PipelineRunTaskRunStatus, namespace string, sc common.SuiteController) {
   356  	if tr.Status == nil {
   357  		GinkgoWriter.Println("*** TaskRun status: nil")
   358  		return
   359  	}
   360  
   361  	if y, err := yaml.Marshal(tr.Status); err == nil {
   362  		GinkgoWriter.Printf("*** TaskRun status:\n%s\n", string(y))
   363  	} else {
   364  		GinkgoWriter.Printf("*** Unable to serialize TaskRunStatus to YAML: %#v; error: %s\n", tr.Status, err)
   365  	}
   366  
   367  	for _, s := range tr.Status.TaskRunStatusFields.Steps {
   368  		if logs, err := utils.GetContainerLogs(sc.KubeInterface(), tr.Status.PodName, s.ContainerName, namespace); err == nil {
   369  			GinkgoWriter.Printf("*** Logs from pod '%s', container '%s':\n----- START -----%s----- END -----\n", tr.Status.PodName, s.ContainerName, logs)
   370  		} else {
   371  			GinkgoWriter.Printf("*** Can't fetch logs from pod '%s', container '%s': %s\n", tr.Status.PodName, s.ContainerName, err)
   372  		}
   373  	}
   374  }