github.com/operator-framework/operator-lifecycle-manager@v0.30.0/test/e2e/util.go (about) 1 package e2e 2 3 import ( 4 "bytes" 5 "context" 6 "fmt" 7 "os" 8 "regexp" 9 "strings" 10 "time" 11 12 "github.com/ghodss/yaml" 13 . "github.com/onsi/ginkgo/v2" 14 . "github.com/onsi/gomega" 15 gtypes "github.com/onsi/gomega/types" 16 operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" 17 operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" 18 "github.com/stretchr/testify/require" 19 "google.golang.org/grpc" 20 grpcinsecure "google.golang.org/grpc/credentials/insecure" 21 "google.golang.org/grpc/health/grpc_health_v1" 22 appsv1 "k8s.io/api/apps/v1" 23 corev1 "k8s.io/api/core/v1" 24 apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 25 extScheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" 26 apierrors "k8s.io/apimachinery/pkg/api/errors" 27 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 28 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 29 "k8s.io/apimachinery/pkg/fields" 30 "k8s.io/apimachinery/pkg/runtime" 31 "k8s.io/apimachinery/pkg/runtime/schema" 32 k8sjson "k8s.io/apimachinery/pkg/runtime/serializer/json" 33 "k8s.io/apimachinery/pkg/types" 34 "k8s.io/apimachinery/pkg/util/wait" 35 "k8s.io/apimachinery/pkg/watch" 36 "k8s.io/apiserver/pkg/storage/names" 37 "k8s.io/client-go/dynamic" 38 k8sscheme "k8s.io/client-go/kubernetes/scheme" 39 "k8s.io/client-go/rest" 40 k8scontrollerclient "sigs.k8s.io/controller-runtime/pkg/client" 41 42 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned" 43 "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle" 44 "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/install" 45 "github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry" 46 controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client" 47 "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient" 48 pmversioned "github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/client/clientset/versioned" 49 "github.com/operator-framework/operator-lifecycle-manager/test/e2e/ctx" 50 ) 51 52 const ( 53 pollInterval = 100 * time.Millisecond 54 pollDuration = 5 * time.Minute 55 56 olmConfigMap = "olm-operators" // No-longer used, how long do we keep this around? 57 58 packageServerCSV = "packageserver.v1.0.0" 59 ) 60 61 var ( 62 genName = names.SimpleNameGenerator.GenerateName 63 nonAlphaNumericRegexp = regexp.MustCompile(`[^a-zA-Z0-9]`) 64 ) 65 66 // newKubeClient configures a client to talk to the cluster defined by KUBECONFIG 67 func newKubeClient() operatorclient.ClientInterface { 68 return ctx.Ctx().KubeClient() 69 } 70 71 func newCRClient() versioned.Interface { 72 return ctx.Ctx().OperatorClient() 73 } 74 75 func newDynamicClient(t GinkgoTInterface, config *rest.Config) dynamic.Interface { 76 return ctx.Ctx().DynamicClient() 77 } 78 79 func newPMClient() pmversioned.Interface { 80 return ctx.Ctx().PackageClient() 81 } 82 83 // objectRefToNamespacedName is a helper function that's responsible for translating 84 // a *corev1.ObjectReference into a types.NamespacedName. 85 func objectRefToNamespacedName(ip *corev1.ObjectReference) types.NamespacedName { 86 return types.NamespacedName{ 87 Name: ip.Name, 88 Namespace: ip.Namespace, 89 } 90 } 91 92 // addBundleUnpackTimeoutOGAnnotation is a helper function that's responsible for 93 // adding the "operatorframework.io/bundle-unpack-timeout" annotation to an OperatorGroup 94 // resource. 95 func addBundleUnpackTimeoutOGAnnotation(ctx context.Context, c k8scontrollerclient.Client, ogNN types.NamespacedName, timeout string) { 96 setOGAnnotation(ctx, c, ogNN, bundle.BundleUnpackTimeoutAnnotationKey, timeout) 97 } 98 99 func setBundleUnpackRetryMinimumIntervalAnnotation(ctx context.Context, c k8scontrollerclient.Client, ogNN types.NamespacedName, interval string) { 100 setOGAnnotation(ctx, c, ogNN, bundle.BundleUnpackRetryMinimumIntervalAnnotationKey, interval) 101 } 102 103 func setOGAnnotation(ctx context.Context, c k8scontrollerclient.Client, ogNN types.NamespacedName, key, value string) { 104 Eventually(func() error { 105 og := &operatorsv1.OperatorGroup{} 106 if err := c.Get(ctx, ogNN, og); err != nil { 107 return err 108 } 109 annotations := og.GetAnnotations() 110 if len(value) == 0 { 111 delete(annotations, key) 112 } else { 113 annotations[key] = value 114 } 115 og.SetAnnotations(annotations) 116 117 return c.Update(ctx, og) 118 }).Should(Succeed()) 119 } 120 121 type cleanupFunc func() 122 123 // waitFor wraps wait.Pool with default polling parameters 124 func waitFor(fn func() (bool, error)) error { 125 return wait.Poll(pollInterval, pollDuration, func() (bool, error) { 126 return fn() 127 }) 128 } 129 130 // awaitPods waits for a set of pods to exist in the cluster 131 func awaitPods(t GinkgoTInterface, c operatorclient.ClientInterface, namespace, selector string, checkPods podsCheckFunc) (*corev1.PodList, error) { 132 var fetchedPodList *corev1.PodList 133 var err error 134 135 err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { 136 fetchedPodList, err = c.KubernetesInterface().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ 137 LabelSelector: selector, 138 }) 139 140 if err != nil { 141 return false, err 142 } 143 144 t.Logf("Waiting for pods matching selector %s to match given conditions", selector) 145 146 return checkPods(fetchedPodList), nil 147 }) 148 149 require.NoError(t, err) 150 return fetchedPodList, err 151 } 152 153 func awaitPodsWithInterval(t GinkgoTInterface, c operatorclient.ClientInterface, namespace, selector string, interval time.Duration, 154 duration time.Duration, checkPods podsCheckFunc) (*corev1.PodList, error) { 155 var fetchedPodList *corev1.PodList 156 var err error 157 158 err = wait.Poll(interval, duration, func() (bool, error) { 159 fetchedPodList, err = c.KubernetesInterface().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ 160 LabelSelector: selector, 161 }) 162 163 if err != nil { 164 return false, err 165 } 166 167 t.Logf("Waiting for pods matching selector %s to match given conditions", selector) 168 169 return checkPods(fetchedPodList), nil 170 }) 171 172 require.NoError(t, err) 173 return fetchedPodList, err 174 } 175 176 // podsCheckFunc describes a function that true if the given PodList meets some criteria; false otherwise. 177 type podsCheckFunc func(pods *corev1.PodList) bool 178 179 // unionPodsCheck returns a podsCheckFunc that represents the union of the given podsCheckFuncs. 180 func unionPodsCheck(checks ...podsCheckFunc) podsCheckFunc { 181 return func(pods *corev1.PodList) bool { 182 for _, check := range checks { 183 if !check(pods) { 184 return false 185 } 186 } 187 188 return true 189 } 190 } 191 192 // podCount returns a podsCheckFunc that returns true if a PodList is of length count; false otherwise. 193 func podCount(count int) podsCheckFunc { 194 return func(pods *corev1.PodList) bool { 195 return len(pods.Items) == count 196 } 197 } 198 199 // podsReady returns true if all of the pods in the given PodList have a ready condition with ConditionStatus "True"; false otherwise. 200 func podsReady(pods *corev1.PodList) bool { 201 for _, pod := range pods.Items { 202 if !podReady(&pod) { 203 return false 204 } 205 } 206 207 return true 208 } 209 210 // podCheckFunc describes a function that returns true if the given Pod meets some criteria; false otherwise. 211 type podCheckFunc func(pod *corev1.Pod) bool 212 213 // hasPodIP returns true if the given Pod has a PodIP. 214 func hasPodIP(pod *corev1.Pod) bool { 215 return pod.Status.PodIP != "" 216 } 217 218 // podReady returns true if the given Pod has a ready condition with ConditionStatus "True"; false otherwise. 219 func podReady(pod *corev1.Pod) bool { 220 var status corev1.ConditionStatus 221 for _, condition := range pod.Status.Conditions { 222 if condition.Type != corev1.PodReady { 223 // Ignore all condition other than PodReady 224 continue 225 } 226 227 // Found PodReady condition 228 status = condition.Status 229 break 230 } 231 232 return status == corev1.ConditionTrue 233 } 234 235 func awaitPod(t GinkgoTInterface, c operatorclient.ClientInterface, namespace, name string, checkPod podCheckFunc) *corev1.Pod { 236 var pod *corev1.Pod 237 err := wait.Poll(pollInterval, pollDuration, func() (bool, error) { 238 p, err := c.KubernetesInterface().CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) 239 if err != nil { 240 return false, err 241 } 242 pod = p 243 return checkPod(pod), nil 244 }) 245 require.NoError(t, err) 246 247 return pod 248 } 249 250 func awaitAnnotations(t GinkgoTInterface, query func() (metav1.ObjectMeta, error), expected map[string]string) error { 251 err := wait.Poll(pollInterval, pollDuration, func() (bool, error) { 252 t.Logf("Waiting for annotations to match %v", expected) 253 obj, err := query() 254 if err != nil && !apierrors.IsNotFound(err) { 255 return false, err 256 } 257 t.Logf("current annotations: %v", obj.GetAnnotations()) 258 259 if len(obj.GetAnnotations()) != len(expected) { 260 return false, nil 261 } 262 263 for key, value := range expected { 264 if v, ok := obj.GetAnnotations()[key]; !ok || v != value { 265 return false, nil 266 } 267 } 268 269 t.Logf("Annotations match") 270 return true, nil 271 }) 272 273 return err 274 } 275 276 type checkResourceFunc func() error 277 278 func waitForDelete(checkResource checkResourceFunc) error { 279 err := wait.Poll(pollInterval, pollDuration, func() (bool, error) { 280 err := checkResource() 281 if apierrors.IsNotFound(err) { 282 return true, nil 283 } 284 if err != nil { 285 return false, err 286 } 287 return false, nil 288 }) 289 290 return err 291 } 292 293 func waitForEmptyList(checkList func() (int, error)) error { 294 err := wait.Poll(pollInterval, pollDuration, func() (bool, error) { 295 count, err := checkList() 296 if err != nil { 297 return false, err 298 } 299 if count == 0 { 300 return true, nil 301 } 302 return false, nil 303 }) 304 305 return err 306 } 307 308 func waitForGVR(dynamicClient dynamic.Interface, gvr schema.GroupVersionResource, name, namespace string) error { 309 return wait.Poll(pollInterval, pollDuration, func() (bool, error) { 310 _, err := dynamicClient.Resource(gvr).Namespace(namespace).Get(context.Background(), name, metav1.GetOptions{}) 311 if err != nil { 312 if apierrors.IsNotFound(err) { 313 return false, nil 314 } 315 return false, err 316 } 317 return true, nil 318 }) 319 } 320 321 type catalogSourceCheckFunc func(*operatorsv1alpha1.CatalogSource) bool 322 323 // This check is disabled for most test runs, but can be enabled for verifying pod health if the e2e tests are running 324 // in the same kubernetes cluster as the registry pods (currently this only happens with e2e-local-docker) 325 var checkPodHealth = false 326 327 func registryPodHealthy(address string) bool { 328 if !checkPodHealth { 329 fmt.Println("skipping health check") 330 return true 331 } 332 333 conn, err := grpc.Dial(address, grpc.WithTransportCredentials(grpcinsecure.NewCredentials())) 334 if err != nil { 335 fmt.Printf("error connecting: %s\n", err.Error()) 336 return false 337 } 338 health := grpc_health_v1.NewHealthClient(conn) 339 res, err := health.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{Service: "Registry"}) 340 if err != nil { 341 fmt.Printf("error connecting: %s\n", err.Error()) 342 return false 343 } 344 if res.Status != grpc_health_v1.HealthCheckResponse_SERVING { 345 fmt.Printf("not healthy: %s\n", res.Status.String()) 346 return false 347 } 348 return true 349 } 350 351 func catalogSourceRegistryPodSynced() func(catalog *operatorsv1alpha1.CatalogSource) bool { 352 var lastState string 353 lastTime := time.Now() 354 return func(catalog *operatorsv1alpha1.CatalogSource) bool { 355 registry := catalog.Status.RegistryServiceStatus 356 connState := catalog.Status.GRPCConnectionState 357 state := "NO_CONNECTION" 358 if connState != nil { 359 state = connState.LastObservedState 360 } 361 if state != lastState { 362 fmt.Printf("waiting %s for catalog pod %s/%s to be available (for sync) - %s\n", time.Since(lastTime), catalog.GetNamespace(), catalog.GetName(), state) 363 lastState = state 364 lastTime = time.Now() 365 } 366 if registry != nil && connState != nil && !connState.LastConnectTime.IsZero() && connState.LastObservedState == "READY" { 367 fmt.Printf("probing catalog %s pod with address %s\n", catalog.GetName(), registry.Address()) 368 return registryPodHealthy(registry.Address()) 369 } 370 return false 371 } 372 } 373 374 func catalogSourceInvalidSpec(catalog *operatorsv1alpha1.CatalogSource) bool { 375 return catalog.Status.Reason == operatorsv1alpha1.CatalogSourceSpecInvalidError 376 } 377 378 func fetchCatalogSourceOnStatus(crc versioned.Interface, name, namespace string, check catalogSourceCheckFunc) (*operatorsv1alpha1.CatalogSource, error) { 379 var fetched *operatorsv1alpha1.CatalogSource 380 var err error 381 382 err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { 383 fetched, err = crc.OperatorsV1alpha1().CatalogSources(namespace).Get(context.Background(), name, metav1.GetOptions{}) 384 if err != nil || fetched == nil { 385 err = fmt.Errorf("failed to fetch catalogSource %s/%s: %v\n", namespace, name, err) 386 fmt.Println(err.Error()) 387 return false, err 388 } 389 return check(fetched), nil 390 }) 391 392 if err != nil { 393 err = fmt.Errorf("failed to wati for catalog source to reach intended state: %w", err) 394 } 395 return fetched, err 396 } 397 398 // createFieldNotEqualSelector generates a field selector that matches resources that have a field value that DOES NOT match any of a set of values. 399 // This function panics if the generated selector cannot be parsed. 400 func createFieldNotEqualSelector(field string, values ...string) fields.Selector { 401 var builder strings.Builder 402 for i, value := range values { 403 builder.WriteString(field) 404 builder.WriteString("!=") 405 builder.WriteString(value) 406 if i < len(values)-1 { 407 builder.WriteString(",") 408 } 409 } 410 411 selector, err := fields.ParseSelector(builder.String()) 412 if err != nil { 413 panic(fmt.Errorf("failed to build fields-not-equal selector: %s", err)) 414 } 415 416 return selector 417 } 418 419 // MaskNotFound "masks" an given error by returning nil when it refers to a "NotFound" API status response, otherwise returns the error unaltered. 420 func MaskNotFound(err error) error { 421 if apierrors.IsNotFound(err) { 422 return nil 423 } 424 425 return err 426 } 427 428 var ( 429 persistentCatalogNames = []string{olmConfigMap} 430 ephemeralCatalogFieldSelector = k8scontrollerclient.MatchingFieldsSelector{ 431 Selector: createFieldNotEqualSelector("metadata.name", persistentCatalogNames...), 432 } 433 persistentConfigMapNames = []string{olmConfigMap} 434 ephemeralConfigMapsFieldSelector = k8scontrollerclient.MatchingFieldsSelector{ 435 Selector: createFieldNotEqualSelector("metadata.name", persistentConfigMapNames...), 436 } 437 persistentCSVNames = []string{packageServerCSV} 438 ephemeralCSVFieldSelector = k8scontrollerclient.MatchingFieldsSelector{ 439 Selector: createFieldNotEqualSelector("metadata.name", persistentCSVNames...), 440 } 441 ) 442 443 // TearDown deletes all OLM resources in the corresponding namespace and at the cluster scope. 444 func TearDown(namespace string) { 445 if env := os.Getenv("SKIP_CLEANUP"); env != "" { 446 fmt.Printf("Skipping cleanup of namespace %s...\n", namespace) 447 return 448 } 449 var ( 450 clientCtx = context.Background() 451 client = ctx.Ctx().Client() 452 dynamic = ctx.Ctx().DynamicClient() 453 inNamespace = k8scontrollerclient.InNamespace(namespace) 454 logf = ctx.Ctx().Logf 455 ) 456 457 // Cleanup non persistent OLM CRs 458 logf("cleaning up ephemeral test resources...") 459 460 logf("deleting test subscriptions...") 461 Eventually(func() error { 462 return client.DeleteAllOf(clientCtx, &operatorsv1alpha1.Subscription{}, inNamespace) 463 }).Should(Succeed(), "failed to delete test subscriptions") 464 465 var subscriptiongvr = schema.GroupVersionResource{Group: "operators.coreos.com", Version: "v1alpha1", Resource: "subscriptions"} 466 Eventually(func() ([]unstructured.Unstructured, error) { 467 list, err := dynamic.Resource(subscriptiongvr).Namespace(namespace).List(context.Background(), metav1.ListOptions{}) 468 if err != nil { 469 return nil, err 470 } 471 return list.Items, nil 472 }).Should(BeEmpty(), "failed to await deletion of test subscriptions") 473 474 logf("deleting test installplans...") 475 Eventually(func() error { 476 return client.DeleteAllOf(clientCtx, &operatorsv1alpha1.InstallPlan{}, inNamespace) 477 }).Should(Succeed(), "failed to delete test installplans") 478 479 Eventually(func() (remaining []operatorsv1alpha1.InstallPlan, err error) { 480 list := &operatorsv1alpha1.InstallPlanList{} 481 err = client.List(clientCtx, list, inNamespace) 482 if list != nil { 483 remaining = list.Items 484 } 485 486 return 487 }).Should(BeEmpty(), "failed to await deletion of test installplans") 488 489 logf("deleting test catalogsources...") 490 Eventually(func() error { 491 return client.DeleteAllOf(clientCtx, &operatorsv1alpha1.CatalogSource{}, inNamespace, ephemeralCatalogFieldSelector) 492 }).Should(Succeed(), "failed to delete test catalogsources") 493 494 Eventually(func() (remaining []operatorsv1alpha1.CatalogSource, err error) { 495 list := &operatorsv1alpha1.CatalogSourceList{} 496 err = client.List(clientCtx, list, inNamespace, ephemeralCatalogFieldSelector) 497 if list != nil { 498 remaining = list.Items 499 } 500 501 return 502 }).Should(BeEmpty(), "failed to await deletion of test catalogsources") 503 504 logf("deleting test crds...") 505 remainingCSVs := func() (csvs []operatorsv1alpha1.ClusterServiceVersion, err error) { 506 list := &operatorsv1alpha1.ClusterServiceVersionList{} 507 err = client.List(clientCtx, list, inNamespace, ephemeralCSVFieldSelector) 508 if list != nil { 509 csvs = list.Items 510 } 511 512 return 513 } 514 515 var crds []apiextensionsv1.CustomResourceDefinition 516 Eventually(func() error { 517 csvs, err := remainingCSVs() 518 if err != nil { 519 return err 520 } 521 522 for _, csv := range csvs { 523 for _, desc := range csv.Spec.CustomResourceDefinitions.Owned { 524 crd := &apiextensionsv1.CustomResourceDefinition{} 525 err := client.Get(clientCtx, types.NamespacedName{Name: desc.Name}, crd) 526 if apierrors.IsNotFound(err) { 527 continue 528 } 529 if err != nil { 530 return err 531 } 532 crds = append(crds, *crd) 533 } 534 } 535 536 return nil 537 }).Should(Succeed(), "failed to aggregate test crds for deletion") 538 539 Eventually(func() error { 540 for _, crd := range crds { 541 // Note: NotFound errors will be masked, so we can simply iterate until no other errors are returned. 542 // This is pretty inefficient, so if we're concerned about the number of API calls, we should replace this with something more sparing. 543 if err := client.Delete(clientCtx, &crd); MaskNotFound(err) != nil { 544 return err 545 } 546 } 547 548 return nil 549 }).Should(Succeed(), "failed to delete test crds") 550 551 logf("deleting test csvs...") 552 Eventually(func() error { 553 return client.DeleteAllOf(clientCtx, &operatorsv1alpha1.ClusterServiceVersion{}, inNamespace, ephemeralCSVFieldSelector) 554 }).Should(Succeed(), "failed to delete test csvs") 555 556 Eventually(remainingCSVs).Should(BeEmpty(), "failed to await deletion of test csvs") 557 558 logf("test resources deleted") 559 } 560 561 func buildCatalogSourceCleanupFunc(c operatorclient.ClientInterface, crc versioned.Interface, namespace string, catalogSource *operatorsv1alpha1.CatalogSource) cleanupFunc { 562 return func() { 563 if env := os.Getenv("SKIP_CLEANUP"); env != "" { 564 fmt.Printf("Skipping cleanup of CatalogSource %s/%s...\n", namespace, catalogSource.GetName()) 565 return 566 } 567 ctx.Ctx().Logf("Deleting catalog source %s...", catalogSource.GetName()) 568 err := crc.OperatorsV1alpha1().CatalogSources(namespace).Delete(context.Background(), catalogSource.GetName(), metav1.DeleteOptions{}) 569 if err != nil && !apierrors.IsNotFound(err) { 570 Expect(err).ToNot(HaveOccurred()) 571 } 572 573 Eventually(func() (bool, error) { 574 listOpts := metav1.ListOptions{ 575 LabelSelector: "olm.catalogSource=" + catalogSource.GetName(), 576 FieldSelector: "status.phase=Running", 577 } 578 fetched, err := c.KubernetesInterface().CoreV1().Pods(catalogSource.GetNamespace()).List(context.Background(), listOpts) 579 if err != nil { 580 return false, err 581 } 582 if len(fetched.Items) == 0 { 583 return true, nil 584 } 585 ctx.Ctx().Logf("waiting for the catalog source %s pod to be deleted...", fetched.Items[0].GetName()) 586 return false, nil 587 }).Should(BeTrue()) 588 } 589 } 590 591 func buildConfigMapCleanupFunc(c operatorclient.ClientInterface, namespace string, configMap *corev1.ConfigMap) cleanupFunc { 592 return func() { 593 if env := os.Getenv("SKIP_CLEANUP"); env != "" { 594 fmt.Printf("Skipping cleanup of ConfigMap %s/%s...\n", namespace, configMap.GetName()) 595 return 596 } 597 ctx.Ctx().Logf("Deleting config map %s...", configMap.GetName()) 598 err := c.KubernetesInterface().CoreV1().ConfigMaps(namespace).Delete(context.Background(), configMap.GetName(), metav1.DeleteOptions{}) 599 if err != nil && !apierrors.IsNotFound(err) { 600 Expect(err).ToNot(HaveOccurred()) 601 } 602 } 603 } 604 605 func buildServiceAccountCleanupFunc(t GinkgoTInterface, c operatorclient.ClientInterface, namespace string, serviceAccount *corev1.ServiceAccount) cleanupFunc { 606 return func() { 607 if env := os.Getenv("SKIP_CLEANUP"); env != "" { 608 fmt.Printf("Skipping cleanup of ServiceAccount %s/%s...\n", namespace, serviceAccount.GetName()) 609 return 610 } 611 t.Logf("Deleting service account %s...", serviceAccount.GetName()) 612 require.NoError(t, c.KubernetesInterface().CoreV1().ServiceAccounts(namespace).Delete(context.Background(), serviceAccount.GetName(), metav1.DeleteOptions{})) 613 } 614 } 615 616 func createInvalidGRPCCatalogSource(c operatorclient.ClientInterface, crc versioned.Interface, name, namespace string) (*operatorsv1alpha1.CatalogSource, cleanupFunc) { 617 catalogSource := &operatorsv1alpha1.CatalogSource{ 618 TypeMeta: metav1.TypeMeta{ 619 Kind: operatorsv1alpha1.CatalogSourceKind, 620 APIVersion: operatorsv1alpha1.CatalogSourceCRDAPIVersion, 621 }, 622 ObjectMeta: metav1.ObjectMeta{ 623 Name: name, 624 Namespace: namespace, 625 }, 626 Spec: operatorsv1alpha1.CatalogSourceSpec{ 627 SourceType: "grpc", 628 Image: "localhost:0/not/exists:catsrc", 629 GrpcPodConfig: &operatorsv1alpha1.GrpcPodConfig{ 630 SecurityContextConfig: operatorsv1alpha1.Restricted, 631 }, 632 }, 633 } 634 635 ctx.Ctx().Logf("Creating catalog source %s in namespace %s...", name, namespace) 636 catalogSource, err := crc.OperatorsV1alpha1().CatalogSources(namespace).Create(context.Background(), catalogSource, metav1.CreateOptions{}) 637 Expect(err).ToNot(HaveOccurred()) 638 ctx.Ctx().Logf("Catalog source %s created", name) 639 return catalogSource, buildCatalogSourceCleanupFunc(c, crc, namespace, catalogSource) 640 } 641 642 func createInternalCatalogSource( 643 c operatorclient.ClientInterface, 644 crc versioned.Interface, 645 name, 646 namespace string, 647 manifests []registry.PackageManifest, 648 crds []apiextensionsv1.CustomResourceDefinition, 649 csvs []operatorsv1alpha1.ClusterServiceVersion, 650 ) (*operatorsv1alpha1.CatalogSource, cleanupFunc) { 651 configMap, configMapCleanup := createConfigMapForCatalogData(c, name, namespace, manifests, crds, csvs) 652 653 // Create an internal CatalogSource custom resource pointing to the ConfigMap 654 catalogSource := &operatorsv1alpha1.CatalogSource{ 655 TypeMeta: metav1.TypeMeta{ 656 Kind: operatorsv1alpha1.CatalogSourceKind, 657 APIVersion: operatorsv1alpha1.CatalogSourceCRDAPIVersion, 658 }, 659 ObjectMeta: metav1.ObjectMeta{ 660 Name: name, 661 Namespace: namespace, 662 }, 663 Spec: operatorsv1alpha1.CatalogSourceSpec{ 664 SourceType: "internal", 665 ConfigMap: configMap.GetName(), 666 GrpcPodConfig: &operatorsv1alpha1.GrpcPodConfig{ 667 SecurityContextConfig: operatorsv1alpha1.Restricted, 668 }, 669 }, 670 } 671 672 ctx.Ctx().Logf("Creating catalog source %s in namespace %s...", name, namespace) 673 catalogSource, err := crc.OperatorsV1alpha1().CatalogSources(namespace).Create(context.Background(), catalogSource, metav1.CreateOptions{}) 674 if err != nil && !apierrors.IsAlreadyExists(err) { 675 Expect(err).ToNot(HaveOccurred()) 676 } 677 ctx.Ctx().Logf("Catalog source %s created", name) 678 679 cleanupInternalCatalogSource := func() { 680 ctx.Ctx().Logf("Cleaning catalog source %s", name) 681 configMapCleanup() 682 buildCatalogSourceCleanupFunc(c, crc, namespace, catalogSource)() 683 ctx.Ctx().Logf("Done cleaning catalog source %s", name) 684 } 685 return catalogSource, cleanupInternalCatalogSource 686 } 687 688 func createInternalCatalogSourceWithPriority(c operatorclient.ClientInterface, 689 crc versioned.Interface, 690 name, 691 namespace string, 692 manifests []registry.PackageManifest, 693 crds []apiextensionsv1.CustomResourceDefinition, 694 csvs []operatorsv1alpha1.ClusterServiceVersion, 695 priority int, 696 ) (*operatorsv1alpha1.CatalogSource, cleanupFunc) { 697 configMap, configMapCleanup := createConfigMapForCatalogData(c, name, namespace, manifests, crds, csvs) 698 // Create an internal CatalogSource custom resource pointing to the ConfigMap 699 catalogSource := &operatorsv1alpha1.CatalogSource{ 700 TypeMeta: metav1.TypeMeta{ 701 Kind: operatorsv1alpha1.CatalogSourceKind, 702 APIVersion: operatorsv1alpha1.CatalogSourceCRDAPIVersion, 703 }, 704 ObjectMeta: metav1.ObjectMeta{ 705 Name: name, 706 Namespace: namespace, 707 }, 708 Spec: operatorsv1alpha1.CatalogSourceSpec{ 709 SourceType: "internal", 710 ConfigMap: configMap.GetName(), 711 Priority: priority, 712 GrpcPodConfig: &operatorsv1alpha1.GrpcPodConfig{ 713 SecurityContextConfig: operatorsv1alpha1.Restricted, 714 }, 715 }, 716 } 717 catalogSource.SetNamespace(namespace) 718 719 ctx.Ctx().Logf("Creating catalog source %s in namespace %s...", name, namespace) 720 catalogSource, err := crc.OperatorsV1alpha1().CatalogSources(namespace).Create(context.Background(), catalogSource, metav1.CreateOptions{}) 721 if err != nil && !apierrors.IsAlreadyExists(err) { 722 Expect(err).ToNot(HaveOccurred()) 723 } 724 ctx.Ctx().Logf("Catalog source %s created", name) 725 726 cleanupInternalCatalogSource := func() { 727 configMapCleanup() 728 buildCatalogSourceCleanupFunc(c, crc, namespace, catalogSource)() 729 } 730 return catalogSource, cleanupInternalCatalogSource 731 } 732 733 func createV1CRDInternalCatalogSource( 734 t GinkgoTInterface, 735 c operatorclient.ClientInterface, 736 crc versioned.Interface, 737 name, 738 namespace string, 739 manifests []registry.PackageManifest, 740 crds []apiextensionsv1.CustomResourceDefinition, 741 csvs []operatorsv1alpha1.ClusterServiceVersion, 742 ) (*operatorsv1alpha1.CatalogSource, cleanupFunc) { 743 configMap, configMapCleanup := createV1CRDConfigMapForCatalogData(t, c, name, namespace, manifests, crds, csvs) 744 745 // Create an internal CatalogSource custom resource pointing to the ConfigMap 746 catalogSource := &operatorsv1alpha1.CatalogSource{ 747 TypeMeta: metav1.TypeMeta{ 748 Kind: operatorsv1alpha1.CatalogSourceKind, 749 APIVersion: operatorsv1alpha1.CatalogSourceCRDAPIVersion, 750 }, 751 ObjectMeta: metav1.ObjectMeta{ 752 Name: name, 753 Namespace: namespace, 754 }, 755 Spec: operatorsv1alpha1.CatalogSourceSpec{ 756 SourceType: "internal", 757 ConfigMap: configMap.GetName(), 758 GrpcPodConfig: &operatorsv1alpha1.GrpcPodConfig{ 759 SecurityContextConfig: operatorsv1alpha1.Restricted, 760 }, 761 }, 762 } 763 catalogSource.SetNamespace(namespace) 764 765 ctx.Ctx().Logf("Creating catalog source %s in namespace %s...", name, namespace) 766 catalogSource, err := crc.OperatorsV1alpha1().CatalogSources(namespace).Create(context.Background(), catalogSource, metav1.CreateOptions{}) 767 if err != nil && !apierrors.IsAlreadyExists(err) { 768 require.NoError(t, err) 769 } 770 ctx.Ctx().Logf("Catalog source %s created", name) 771 772 cleanupInternalCatalogSource := func() { 773 configMapCleanup() 774 buildCatalogSourceCleanupFunc(c, crc, namespace, catalogSource)() 775 } 776 return catalogSource, cleanupInternalCatalogSource 777 } 778 779 func createConfigMapForCatalogData( 780 c operatorclient.ClientInterface, 781 name, 782 namespace string, 783 manifests []registry.PackageManifest, 784 crds []apiextensionsv1.CustomResourceDefinition, 785 csvs []operatorsv1alpha1.ClusterServiceVersion, 786 ) (*corev1.ConfigMap, cleanupFunc) { 787 // Create a config map containing the PackageManifests and CSVs 788 configMapName := fmt.Sprintf("%s-configmap", name) 789 catalogConfigMap := &corev1.ConfigMap{ 790 ObjectMeta: metav1.ObjectMeta{ 791 Name: configMapName, 792 Namespace: namespace, 793 Labels: map[string]string{install.OLMManagedLabelKey: install.OLMManagedLabelValue}, 794 }, 795 Data: map[string]string{}, 796 } 797 catalogConfigMap.SetNamespace(namespace) 798 799 // Add raw manifests 800 if manifests != nil { 801 manifestsRaw, err := yaml.Marshal(manifests) 802 Expect(err).ToNot(HaveOccurred()) 803 catalogConfigMap.Data[registry.ConfigMapPackageName] = string(manifestsRaw) 804 } 805 806 // Add raw CRDs 807 var crdsRaw []byte 808 if crds != nil { 809 crdStrings := []string{} 810 for _, crd := range crds { 811 crdStrings = append(crdStrings, serializeCRD(crd)) 812 } 813 var err error 814 crdsRaw, err = yaml.Marshal(crdStrings) 815 Expect(err).ToNot(HaveOccurred()) 816 } 817 catalogConfigMap.Data[registry.ConfigMapCRDName] = strings.Replace(string(crdsRaw), "- |\n ", "- ", -1) 818 819 // Add raw CSVs 820 if csvs != nil { 821 csvsRaw, err := yaml.Marshal(csvs) 822 Expect(err).ToNot(HaveOccurred()) 823 catalogConfigMap.Data[registry.ConfigMapCSVName] = string(csvsRaw) 824 } 825 826 createdConfigMap, err := c.KubernetesInterface().CoreV1().ConfigMaps(namespace).Create(context.Background(), catalogConfigMap, metav1.CreateOptions{}) 827 if err != nil && !apierrors.IsAlreadyExists(err) { 828 Expect(err).ToNot(HaveOccurred()) 829 } 830 return createdConfigMap, buildConfigMapCleanupFunc(c, namespace, createdConfigMap) 831 } 832 833 func createV1CRDConfigMapForCatalogData( 834 t GinkgoTInterface, 835 c operatorclient.ClientInterface, 836 name, 837 namespace string, 838 manifests []registry.PackageManifest, 839 crds []apiextensionsv1.CustomResourceDefinition, 840 csvs []operatorsv1alpha1.ClusterServiceVersion, 841 ) (*corev1.ConfigMap, cleanupFunc) { 842 // Create a config map containing the PackageManifests and CSVs 843 configMapName := fmt.Sprintf("%s-configmap", name) 844 catalogConfigMap := &corev1.ConfigMap{ 845 ObjectMeta: metav1.ObjectMeta{ 846 Name: configMapName, 847 Namespace: namespace, 848 Labels: map[string]string{install.OLMManagedLabelKey: install.OLMManagedLabelValue}, 849 }, 850 Data: map[string]string{}, 851 } 852 catalogConfigMap.SetNamespace(namespace) 853 854 // Add raw manifests 855 if manifests != nil { 856 manifestsRaw, err := yaml.Marshal(manifests) 857 require.NoError(t, err) 858 catalogConfigMap.Data[registry.ConfigMapPackageName] = string(manifestsRaw) 859 } 860 861 // Add raw CRDs 862 var crdsRaw []byte 863 if crds != nil { 864 crdStrings := []string{} 865 for _, crd := range crds { 866 crdStrings = append(crdStrings, serializeV1CRD(t, &crd)) 867 } 868 var err error 869 crdsRaw, err = yaml.Marshal(crdStrings) 870 require.NoError(t, err) 871 } 872 catalogConfigMap.Data[registry.ConfigMapCRDName] = strings.Replace(string(crdsRaw), "- |\n ", "- ", -1) 873 874 // Add raw CSVs 875 if csvs != nil { 876 csvsRaw, err := yaml.Marshal(csvs) 877 require.NoError(t, err) 878 catalogConfigMap.Data[registry.ConfigMapCSVName] = string(csvsRaw) 879 } 880 881 createdConfigMap, err := c.KubernetesInterface().CoreV1().ConfigMaps(namespace).Create(context.Background(), catalogConfigMap, metav1.CreateOptions{}) 882 if err != nil && !apierrors.IsAlreadyExists(err) { 883 require.NoError(t, err) 884 } 885 return createdConfigMap, buildConfigMapCleanupFunc(c, namespace, createdConfigMap) 886 } 887 888 func serializeCRD(crd apiextensionsv1.CustomResourceDefinition) string { 889 scheme := runtime.NewScheme() 890 891 Expect(extScheme.AddToScheme(scheme)).Should(Succeed()) 892 Expect(k8sscheme.AddToScheme(scheme)).Should(Succeed()) 893 Expect(apiextensionsv1.AddToScheme(scheme)).Should(Succeed()) 894 895 out := &apiextensionsv1.CustomResourceDefinition{} 896 Expect(scheme.Convert(&crd, out, nil)).To(Succeed()) 897 out.TypeMeta = metav1.TypeMeta{ 898 Kind: "CustomResourceDefinition", 899 APIVersion: "apiextensions.k8s.io/v1", 900 } 901 902 // set up object serializer 903 serializer := k8sjson.NewYAMLSerializer(k8sjson.DefaultMetaFactory, scheme, scheme) 904 905 // create an object manifest 906 var manifest bytes.Buffer 907 Expect(serializer.Encode(out, &manifest)).To(Succeed()) 908 return manifest.String() 909 } 910 911 func serializeV1CRD(t GinkgoTInterface, crd *apiextensionsv1.CustomResourceDefinition) string { 912 scheme := runtime.NewScheme() 913 require.NoError(t, apiextensionsv1.AddToScheme(scheme)) 914 915 // set up object serializer 916 serializer := k8sjson.NewYAMLSerializer(k8sjson.DefaultMetaFactory, scheme, scheme) 917 918 // create an object manifest 919 var manifest bytes.Buffer 920 require.NoError(t, serializer.Encode(crd, &manifest)) 921 return manifest.String() 922 } 923 924 func createCR(c operatorclient.ClientInterface, item *unstructured.Unstructured, apiGroup, version, namespace, resourceKind, resourceName string) (cleanupFunc, error) { 925 err := c.CreateCustomResource(item) 926 if err != nil { 927 return nil, err 928 } 929 return buildCRCleanupFunc(c, apiGroup, version, namespace, resourceKind, resourceName), nil 930 } 931 932 func buildCRCleanupFunc(c operatorclient.ClientInterface, apiGroup, version, namespace, resourceKind, resourceName string) cleanupFunc { 933 return func() { 934 if env := os.Getenv("SKIP_CLEANUP"); env != "" { 935 fmt.Printf("Skipping cleanup of custom resource %s.%s/%s %s/%s...\n", apiGroup, resourceKind, version, namespace, resourceName) 936 return 937 } 938 err := c.DeleteCustomResource(apiGroup, version, namespace, resourceKind, resourceName) 939 if err != nil { 940 fmt.Println(err) 941 } 942 943 waitForDelete(func() error { 944 _, err := c.GetCustomResource(apiGroup, version, namespace, resourceKind, resourceName) 945 return err 946 }) 947 } 948 } 949 950 // Local determines whether test is running locally or in a container on openshift-CI. 951 // Queries for a clusterversion object specific to OpenShift. 952 func Local(client operatorclient.ClientInterface) (bool, error) { 953 const ClusterVersionGroup = "config.openshift.io" 954 const ClusterVersionVersion = "v1" 955 const ClusterVersionKind = "ClusterVersion" 956 gv := metav1.GroupVersion{Group: ClusterVersionGroup, Version: ClusterVersionVersion}.String() 957 958 groups, err := client.KubernetesInterface().Discovery().ServerResourcesForGroupVersion(gv) 959 if err != nil { 960 if apierrors.IsNotFound(err) { 961 return true, nil 962 } 963 return true, fmt.Errorf("checking if cluster is local: checking server groups: %s", err) 964 } 965 966 for _, group := range groups.APIResources { 967 if group.Kind == ClusterVersionKind { 968 return false, nil 969 } 970 } 971 972 return true, nil 973 } 974 975 // predicateFunc is a predicate for watch events. 976 type predicateFunc func(event watch.Event) (met bool) 977 978 // awaitPredicates waits for all predicates to be met by events of a watch in the order given. 979 func awaitPredicates(ctx context.Context, w watch.Interface, fns ...predicateFunc) { 980 if len(fns) < 1 { 981 panic("no predicates given to await") 982 } 983 984 i := 0 985 for i < len(fns) { 986 select { 987 case <-ctx.Done(): 988 Expect(ctx.Err()).ToNot(HaveOccurred()) 989 return 990 case event, ok := <-w.ResultChan(): 991 if !ok { 992 return 993 } 994 995 if fns[i](event) { 996 i++ 997 } 998 } 999 } 1000 } 1001 1002 // filteredPredicate filters events to the given predicate by event type to the given types. 1003 // When no event types are given as arguments, all event types are passed through. 1004 func filteredPredicate(fn predicateFunc, eventTypes ...watch.EventType) predicateFunc { 1005 return func(event watch.Event) bool { 1006 valid := true 1007 for _, eventType := range eventTypes { 1008 if valid = eventType == event.Type; valid { 1009 break 1010 } 1011 } 1012 1013 if !valid { 1014 return false 1015 } 1016 1017 return fn(event) 1018 } 1019 } 1020 1021 func deploymentPredicate(fn func(*appsv1.Deployment) bool) predicateFunc { 1022 return func(event watch.Event) bool { 1023 deployment, ok := event.Object.(*appsv1.Deployment) 1024 Expect(ok).To(BeTrue(), "unexpected event object type %T in deployment", event.Object) 1025 1026 return fn(deployment) 1027 } 1028 } 1029 1030 var deploymentAvailable = filteredPredicate(deploymentPredicate(func(deployment *appsv1.Deployment) bool { 1031 for _, condition := range deployment.Status.Conditions { 1032 if condition.Type == appsv1.DeploymentAvailable && condition.Status == corev1.ConditionTrue { 1033 return true 1034 } 1035 } 1036 1037 return false 1038 }), watch.Added, watch.Modified) 1039 1040 func deploymentReplicas(replicas int32) predicateFunc { 1041 return filteredPredicate(deploymentPredicate(func(deployment *appsv1.Deployment) bool { 1042 return deployment.Status.Replicas == replicas 1043 }), watch.Added, watch.Modified) 1044 } 1045 1046 func Apply(obj controllerclient.Object, changeFunc interface{}) func() error { 1047 return ctx.Ctx().SSAClient().Apply(context.Background(), obj, changeFunc) 1048 } 1049 1050 func HavePhase(goal operatorsv1alpha1.InstallPlanPhase) gtypes.GomegaMatcher { 1051 return WithTransform(func(plan *operatorsv1alpha1.InstallPlan) operatorsv1alpha1.InstallPlanPhase { 1052 return plan.Status.Phase 1053 }, Equal(goal)) 1054 } 1055 1056 func CSVHasPhase(goal operatorsv1alpha1.ClusterServiceVersionPhase) gtypes.GomegaMatcher { 1057 return WithTransform(func(csv *operatorsv1alpha1.ClusterServiceVersion) operatorsv1alpha1.ClusterServiceVersionPhase { 1058 return csv.Status.Phase 1059 }, Equal(goal)) 1060 } 1061 1062 func HaveMessage(goal string) gtypes.GomegaMatcher { 1063 return WithTransform(func(plan *operatorsv1alpha1.InstallPlan) string { 1064 return plan.Status.Message 1065 }, ContainSubstring(goal)) 1066 } 1067 1068 func SetupGeneratedTestNamespaceWithOperatorGroup(name string, og operatorsv1.OperatorGroup) corev1.Namespace { 1069 ns := corev1.Namespace{ 1070 ObjectMeta: metav1.ObjectMeta{ 1071 Name: name, 1072 }, 1073 } 1074 Eventually(func() error { 1075 return ctx.Ctx().E2EClient().Create(context.Background(), &ns) 1076 }).Should(Succeed()) 1077 1078 ctx.Ctx().Logf("created the %s testing namespace", ns.GetName()) 1079 1080 Eventually(func() error { 1081 return ctx.Ctx().E2EClient().Create(context.Background(), &og) 1082 }).Should(Succeed()) 1083 1084 ctx.Ctx().Logf("created the %s/%s operator group", og.Namespace, og.Name) 1085 1086 return ns 1087 } 1088 1089 func SetupGeneratedTestNamespace(name string, targetNamespaces ...string) corev1.Namespace { 1090 og := operatorsv1.OperatorGroup{ 1091 ObjectMeta: metav1.ObjectMeta{ 1092 Name: fmt.Sprintf("%s-operatorgroup", name), 1093 Namespace: name, 1094 }, 1095 Spec: operatorsv1.OperatorGroupSpec{ 1096 TargetNamespaces: targetNamespaces, 1097 }, 1098 } 1099 1100 return SetupGeneratedTestNamespaceWithOperatorGroup(name, og) 1101 } 1102 1103 func TeardownNamespace(ns string) { 1104 if env := os.Getenv("SKIP_CLEANUP"); env != "" { 1105 fmt.Printf("Skipping cleanup of namespace %s...\n", ns) 1106 return 1107 } 1108 log := ctx.Ctx().Logf 1109 1110 currentTest := CurrentSpecReport() 1111 if currentTest.Failed() { 1112 log("collecting the %s namespace artifacts as the '%s' test case failed", ns, currentTest.LeafNodeText) 1113 if err := ctx.Ctx().DumpNamespaceArtifacts(ns); err != nil { 1114 log("failed to collect namespace artifacts: %v", err) 1115 } 1116 } 1117 1118 log("tearing down the %s namespace", ns) 1119 Eventually(func() error { 1120 return ctx.Ctx().E2EClient().Reset() 1121 }).Should(Succeed()) 1122 } 1123 1124 func inKind(client operatorclient.ClientInterface) (bool, error) { 1125 nodes, err := client.KubernetesInterface().CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) 1126 if err != nil { 1127 // error finding nodes 1128 return false, err 1129 } 1130 for _, node := range nodes.Items { 1131 if !strings.HasPrefix(node.GetName(), "kind-") { 1132 continue 1133 } 1134 if !strings.HasSuffix(node.GetName(), "-control-plane") { 1135 continue 1136 } 1137 return true, nil 1138 } 1139 return false, nil 1140 } 1141 1142 func K8sSafeCurrentTestDescription() string { 1143 return nonAlphaNumericRegexp.ReplaceAllString(CurrentSpecReport().LeafNodeText, "") 1144 } 1145 1146 func newTokenSecret(client operatorclient.ClientInterface, namespace, saName string) (se *corev1.Secret, cleanup cleanupFunc) { 1147 seName := saName + "-token" 1148 secret := &corev1.Secret{ 1149 ObjectMeta: metav1.ObjectMeta{ 1150 Name: seName, 1151 Namespace: namespace, 1152 Annotations: map[string]string{corev1.ServiceAccountNameKey: saName}, 1153 }, 1154 Type: corev1.SecretTypeServiceAccountToken, 1155 } 1156 1157 se, err := client.KubernetesInterface().CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) 1158 Expect(err).ToNot(HaveOccurred()) 1159 Expect(se).ToNot(BeNil()) 1160 1161 cleanup = func() { 1162 if env := os.Getenv("SKIP_CLEANUP"); env != "" { 1163 fmt.Printf("Skipping cleanup of secret %s/%s...\n", namespace, se.GetName()) 1164 return 1165 } 1166 err := client.KubernetesInterface().CoreV1().Secrets(namespace).Delete(context.TODO(), se.GetName(), metav1.DeleteOptions{}) 1167 Expect(err).ToNot(HaveOccurred()) 1168 } 1169 1170 return se, cleanup 1171 }