github.com/verrazzano/verrazzano@v1.7.0/ci/multicluster/Jenkinsfile (about) 1 // Copyright (c) 2021, 2023, Oracle and/or its affiliates. 2 // Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 3 4 def DOCKER_IMAGE_TAG 5 def SKIP_ACCEPTANCE_TESTS = false 6 def EFFECTIVE_DUMP_K8S_CLUSTER_ON_SUCCESS = false 7 def availableRegions = [ "us-ashburn-1", "ca-montreal-1", "ca-toronto-1", "eu-amsterdam-1", "eu-frankfurt-1", "eu-zurich-1", "uk-london-1" ] 8 def acmeEnvironments = [ "staging", "production" ] 9 Collections.shuffle(availableRegions) 10 def zoneId = UUID.randomUUID().toString().substring(0,6).replace('-','') 11 def dns_zone_ocid = 'dummy' 12 def OKE_CLUSTER_PREFIX = "" 13 def agentLabel = env.JOB_NAME.contains('master') ? "2.0-large-phx" : "2.0-large" 14 15 installerFileName = "install-verrazzano.yaml" 16 minimalFileName = "minimal-verrazzano.yaml" 17 18 pipeline { 19 options { 20 timeout(time: 2, unit: 'HOURS') 21 skipDefaultCheckout true 22 timestamps () 23 } 24 25 agent { 26 docker { 27 image "${RUNNER_DOCKER_IMAGE}" 28 args "${RUNNER_DOCKER_ARGS}" 29 registryUrl "${RUNNER_DOCKER_REGISTRY_URL}" 30 registryCredentialsId 'ocir-pull-and-push-account' 31 label "${agentLabel}" 32 } 33 } 34 35 parameters { 36 booleanParam (description: 'Whether to use External Elasticsearch', name: 'EXTERNAL_ELASTICSEARCH', defaultValue: false) 37 choice (description: 'Number of Cluster', name: 'TOTAL_CLUSTERS', choices: ["2", "1", "3"]) 38 choice (description: 'Verrazzano Test Environment', name: 'TEST_ENV', 39 choices: ["KIND", "magicdns_oke", "ocidns_oke"]) 40 choice (description: 'ACME Certificate Environment (Staging or Production)', name: 'ACME_ENVIRONMENT', 41 choices: acmeEnvironments) 42 choice (description: 'OCI region to launch OKE clusters', name: 'OKE_CLUSTER_REGION', 43 // 1st choice is the default value 44 choices: availableRegions ) 45 choice (description: 'OKE node pool configuration', name: 'OKE_NODE_POOL', 46 // 1st choice is the default value 47 choices: [ "VM.Standard.E3.Flex-4-2", "VM.Standard2.4-2", "VM.Standard.E3.Flex-8-2" ]) 48 choice (name: 'OKE_CLUSTER_VERSION', 49 description: 'Kubernetes Version for OKE Cluster', 50 // 1st choice is the default value 51 choices: [ "v1.27.2", "v1.26.2", "v1.25.4", "v1.24.1"]) 52 choice (name: 'KIND_CLUSTER_VERSION', 53 description: 'Kubernetes Version for KIND Cluster', 54 // 1st choice is the default value 55 choices: [ "1.27", "1.26", "1.25", "1.24" ]) 56 string (name: 'GIT_COMMIT_TO_USE', 57 defaultValue: 'NONE', 58 description: 'This is the full git commit hash from the source build to be used for all jobs', 59 trim: true) 60 string (name: 'VERRAZZANO_OPERATOR_IMAGE', 61 defaultValue: 'NONE', 62 description: 'Verrazzano platform operator image name (in ghcr.io repo). If not specified, the latest operator.yaml published to the Verrazzano Object Store will be used', 63 trim: true) 64 choice (name: 'ADMIN_CLUSTER_PROFILE', 65 description: 'Verrazzano Admin Cluster install profile name', 66 // 1st choice is the default value 67 choices: [ "prod", "dev" ]) 68 choice (name: 'MANAGED_CLUSTER_PROFILE', 69 description: 'Verrazzano Managed Cluster install profile name', 70 // 1st choice is the default value 71 choices: [ "managed-cluster", "prod", "dev" ]) 72 choice (name: 'WILDCARD_DNS_DOMAIN', 73 description: 'This is the wildcard DNS domain', 74 // 1st choice is the default value 75 choices: [ "nip.io", "sslip.io"]) 76 choice (name: 'CRD_API_VERSION', 77 description: 'This is the API crd version.', 78 // 1st choice is the default value 79 choices: [ "v1beta1", "v1alpha1"]) 80 booleanParam (description: 'Whether to create the cluster with Calico for AT testing', name: 'CREATE_CLUSTER_USE_CALICO', defaultValue: true) 81 booleanParam (name: 'DUMP_K8S_CLUSTER_ON_SUCCESS', description: 'Whether to dump k8s cluster on success (off by default, can be useful to capture for comparing to failed cluster)', defaultValue: false) 82 string (name: 'CONSOLE_REPO_BRANCH', 83 defaultValue: '', 84 description: 'The branch to check out after cloning the console repository.', 85 trim: true) 86 string (name: 'TAGGED_TESTS', 87 defaultValue: '', 88 description: 'A comma separated list of build tags for tests that should be executed (e.g. unstable_test). Default:', 89 trim: true) 90 string (name: 'INCLUDED_TESTS', 91 defaultValue: '.*', 92 description: 'A regex matching any fully qualified test file that should be executed (e.g. examples/helidon/). Default: .*', 93 trim: true) 94 string (name: 'EXCLUDED_TESTS', 95 defaultValue: '_excluded_test', 96 description: 'A regex matching any fully qualified test file that should not be executed (e.g. multicluster/|_excluded_test). Default: _excluded_test', 97 trim: true) 98 booleanParam (description: 'Whether to capture full cluster snapshot on test failure', name: 'CAPTURE_FULL_CLUSTER', defaultValue: false) 99 } 100 101 environment { 102 DOCKER_PLATFORM_CI_IMAGE_NAME = 'verrazzano-platform-operator-jenkins' 103 DOCKER_PLATFORM_PUBLISH_IMAGE_NAME = 'verrazzano-platform-operator' 104 DOCKER_OAM_CI_IMAGE_NAME = 'verrazzano-application-operator-jenkins' 105 DOCKER_OAM_PUBLISH_IMAGE_NAME = 'verrazzano-application-operator' 106 GOPATH = '/home/opc/go' 107 GO_REPO_PATH = "${GOPATH}/src/github.com/verrazzano" 108 DOCKER_CREDS = credentials('github-packages-credentials-rw') 109 DOCKER_EMAIL = credentials('github-packages-email') 110 DOCKER_REPO = 'ghcr.io' 111 DOCKER_NAMESPACE = 'verrazzano' 112 NETRC_FILE = credentials('netrc') 113 CLUSTER_NAME_PREFIX = 'verrazzano' 114 TESTS_EXECUTED_FILE = "${WORKSPACE}/tests_executed_file.tmp" 115 POST_DUMP_FAILED_FILE = "${WORKSPACE}/post_dump_failed_file.tmp" 116 KUBECONFIG_DIR = "${WORKSPACE}/kubeconfig" 117 118 OCR_CREDS = credentials('ocr-pull-and-push-account') 119 OCR_REPO = 'container-registry.oracle.com' 120 IMAGE_PULL_SECRET = 'verrazzano-container-registry' 121 122 TEST_ENV = "${params.TEST_ENV}" 123 MANAGED_CLUSTER_PROFILE = "${params.MANAGED_CLUSTER_PROFILE}" 124 ADMIN_CLUSTER_PROFILE = "${params.ADMIN_CLUSTER_PROFILE}" 125 126 // Find a better way to handle this 127 // OKE_CLUSTER_VERSION = "${params.KUBERNETES_VERSION == '1.17' ? 'v1.17.13' : 'v1.18.10'}" 128 TF_VAR_compartment_id = credentials('oci-tiburon-dev-compartment-ocid') 129 TF_VAR_tenancy_id = credentials('oci-tenancy') 130 TF_VAR_tenancy_name = credentials('oci-tenancy-name') 131 TF_VAR_user_id = credentials('oci-user-ocid') 132 TF_VAR_region = "${params.OKE_CLUSTER_REGION}" 133 TF_VAR_kubernetes_version = "${params.OKE_CLUSTER_VERSION}" 134 TF_VAR_nodepool_config = "${params.OKE_NODE_POOL}" 135 TF_VAR_api_fingerprint = credentials('oci-api-key-fingerprint') 136 TF_VAR_api_private_key_path = credentials('oci-api-key') 137 TF_VAR_s3_bucket_access_key = credentials('oci-s3-bucket-access-key') 138 TF_VAR_s3_bucket_secret_key = credentials('oci-s3-bucket-secret-key') 139 TF_VAR_ssh_public_key_path = credentials('oci-tf-pub-ssh-key') 140 141 OCI_CLI_TENANCY = credentials('oci-tenancy') 142 OCI_CLI_USER = credentials('oci-user-ocid') 143 OCI_CLI_FINGERPRINT = credentials('oci-api-key-fingerprint') 144 OCI_CLI_KEY_FILE = credentials('oci-api-key') 145 OCI_CLI_REGION = "${params.OKE_CLUSTER_REGION}" 146 OCI_CLI_SUPPRESS_FILE_PERMISSIONS_WARNING = 'True' 147 148 OPERATOR_YAML_FILE = "${WORKSPACE}/acceptance-test-operator.yaml" 149 150 INSTALL_CONFIG_FILE_KIND = "${GO_REPO_PATH}/verrazzano/tests/e2e/config/scripts/v1beta1/install-vz-prod-kind-multicluster.yaml" 151 INSTALL_CONFIG_FILE_OCIDNS = "${GO_REPO_PATH}/verrazzano/tests/e2e/config/scripts/v1beta1/install-verrazzano-ocidns.yaml" 152 INSTALL_CONFIG_FILE_NIPIO = "${GO_REPO_PATH}/verrazzano/tests/e2e/config/scripts/v1beta1/install-verrazzano-nipio.yaml" 153 INSTALL_CONFIG_FILE_MINIMAL = "${WORKSPACE}/install-verrazzano-minimal.yaml" 154 OCI_DNS_ZONE_NAME="z${zoneId}.v8o.io" 155 ACME_ENVIRONMENT="${params.ACME_ENVIRONMENT}" 156 157 TIMESTAMP = sh(returnStdout: true, script: "date +%Y%m%d%H%M%S").trim() 158 SHORT_TIME_STAMP = sh(returnStdout: true, script: "date +%m%d%H%M%S").trim() 159 TEST_SCRIPTS_DIR = "${GO_REPO_PATH}/verrazzano/tests/e2e/config/scripts" 160 LOOPING_TEST_SCRIPTS_DIR = "${TEST_SCRIPTS_DIR}/looping-test" 161 162 ADMIN_KUBECONFIG="${KUBECONFIG_DIR}/1/kube_config" 163 164 // Environment variables required to capture cluster snapshot and bug report on test failure 165 DUMP_COMMAND="${GO_REPO_PATH}/verrazzano/tools/scripts/k8s-dump-cluster.sh" 166 TEST_DUMP_ROOT="${WORKSPACE}/test-cluster-snapshots" 167 CAPTURE_FULL_CLUSTER="${params.CAPTURE_FULL_CLUSTER}" 168 169 // Environment variable for Verrazzano CLI executable 170 VZ_COMMAND="${GO_REPO_PATH}/vz" 171 172 VERRAZZANO_INSTALL_LOGS_DIR="${WORKSPACE}/verrazzano/platform-operator/scripts/install/build/logs" 173 VERRAZZANO_INSTALL_LOG="verrazzano-install.log" 174 175 EXTERNAL_ELASTICSEARCH = "${params.EXTERNAL_ELASTICSEARCH}" 176 177 // used for console artifact capture on failure 178 JENKINS_READ = credentials('jenkins-auditor') 179 OCI_CLI_AUTH="instance_principal" 180 OCI_OS_NAMESPACE = credentials('oci-os-namespace') 181 OCI_OS_ARTIFACT_BUCKET="build-failure-artifacts" 182 OCI_OS_COMMIT_BUCKET="verrazzano-builds-by-commit" 183 VZ_CLI_TARGZ="vz-linux-amd64.tar.gz" 184 185 // used to emit metrics 186 PROMETHEUS_CREDENTIALS = credentials('prometheus-credentials') 187 TEST_ENV_LABEL = "${params.TEST_ENV}" 188 SEARCH_HTTP_ENDPOINT = credentials('search-gw-url') 189 SEARCH_PASSWORD = "${PROMETHEUS_CREDENTIALS_PSW}" 190 SEARCH_USERNAME = "${PROMETHEUS_CREDENTIALS_USR}" 191 192 // sample app deployed before upgrade and UI console tests 193 SAMPLE_APP_NAME="hello-helidon" 194 SAMPLE_APP_NAMESPACE="hello-helidon-sample" 195 SAMPLE_APP_PROJECT="hello-helidon-sample-proj" 196 SAMPLE_APP_COMPONENT="hello-helidon-component" 197 198 // used by ToDoList example test 199 WEBLOGIC_PSW = credentials('weblogic-example-domain-password') 200 DATABASE_PSW = credentials('todo-mysql-password') 201 202 // used to generate Ginkgo test reports 203 TEST_REPORT = "test-report.xml" 204 GINKGO_REPORT_ARGS = "--junit-report=${TEST_REPORT} --keep-separate-reports=true" 205 TEST_REPORT_DIR = "${WORKSPACE}/tests/e2e" 206 } 207 208 stages { 209 stage('Clean workspace and checkout') { 210 steps { 211 sh """ 212 echo "${NODE_LABELS}" 213 """ 214 215 script { 216 EFFECTIVE_DUMP_K8S_CLUSTER_ON_SUCCESS = getEffectiveDumpOnSuccess() 217 if (params.GIT_COMMIT_TO_USE == "NONE") { 218 echo "Specific GIT commit was not specified, use current head" 219 def scmInfo = checkout scm 220 env.GIT_COMMIT = scmInfo.GIT_COMMIT 221 env.GIT_BRANCH = scmInfo.GIT_BRANCH 222 } else { 223 echo "SCM checkout of ${params.GIT_COMMIT_TO_USE}" 224 def scmInfo = checkout([ 225 $class: 'GitSCM', 226 branches: [[name: params.GIT_COMMIT_TO_USE]], 227 doGenerateSubmoduleConfigurations: false, 228 extensions: [], 229 submoduleCfg: [], 230 userRemoteConfigs: [[url: env.SCM_VERRAZZANO_GIT_URL]]]) 231 env.GIT_COMMIT = scmInfo.GIT_COMMIT 232 env.GIT_BRANCH = scmInfo.GIT_BRANCH 233 // If the commit we were handed is not what the SCM says we are using, fail 234 if (!env.GIT_COMMIT.equals(params.GIT_COMMIT_TO_USE)) { 235 echo "SCM didn't checkout the commit we expected. Expected: ${params.GIT_COMMIT_TO_USE}, Found: ${scmInfo.GIT_COMMIT}" 236 exit 1 237 } 238 } 239 echo "SCM checkout of ${env.GIT_BRANCH} at ${env.GIT_COMMIT}" 240 } 241 242 sh """ 243 cp -f "${NETRC_FILE}" $HOME/.netrc 244 chmod 600 $HOME/.netrc 245 """ 246 247 script { 248 try { 249 sh """ 250 echo "${DOCKER_CREDS_PSW}" | docker login ${env.DOCKER_REPO} -u ${DOCKER_CREDS_USR} --password-stdin 251 """ 252 } catch(error) { 253 echo "docker login failed, retrying after sleep" 254 retry(4) { 255 sleep(30) 256 sh """ 257 echo "${DOCKER_CREDS_PSW}" | docker login ${env.DOCKER_REPO} -u ${DOCKER_CREDS_USR} --password-stdin 258 """ 259 } 260 } 261 } 262 sh """ 263 rm -rf ${GO_REPO_PATH}/verrazzano 264 mkdir -p ${GO_REPO_PATH}/verrazzano 265 tar cf - . | (cd ${GO_REPO_PATH}/verrazzano/ ; tar xf -) 266 """ 267 script { 268 setVZCRDVersionForInstallation() 269 def props = readProperties file: '.verrazzano-development-version' 270 VERRAZZANO_DEV_VERSION = props['verrazzano-development-version'] 271 TIMESTAMP = sh(returnStdout: true, script: "date +%Y%m%d%H%M%S").trim() 272 SHORT_COMMIT_HASH = sh(returnStdout: true, script: "git rev-parse --short=8 HEAD").trim() 273 DOCKER_IMAGE_TAG = "${VERRAZZANO_DEV_VERSION}-${TIMESTAMP}-${SHORT_COMMIT_HASH}" 274 // update the description with some meaningful info 275 setDisplayName() 276 currentBuild.description = SHORT_COMMIT_HASH + " : " + env.GIT_COMMIT + " : " + params.GIT_COMMIT_TO_USE 277 278 if (params.TEST_ENV != "KIND") { 279 // derive the prefix for the OKE cluster 280 OKE_CLUSTER_PREFIX = sh(returnStdout: true, script: "${GO_REPO_PATH}/verrazzano/ci/scripts/derive_oke_cluster_name.sh").trim() 281 } 282 } 283 script { 284 sh """ 285 echo "Downloading VZ CLI from object storage" 286 oci --region us-phoenix-1 os object get --namespace ${OCI_OS_NAMESPACE} -bn ${OCI_OS_COMMIT_BUCKET} --name ephemeral/${env.BRANCH_NAME}/${SHORT_COMMIT_HASH}/${VZ_CLI_TARGZ} --file ${VZ_CLI_TARGZ} 287 tar xzf ${VZ_CLI_TARGZ} -C ${GO_REPO_PATH} 288 ${VZ_COMMAND} version 289 """ 290 } 291 } 292 } 293 294 stage('Install and Configure') { 295 when { 296 allOf { 297 not { buildingTag() } 298 anyOf { 299 branch 'master'; 300 expression {SKIP_ACCEPTANCE_TESTS == false}; 301 } 302 } 303 } 304 stages { 305 stage('Prepare AT environment') { 306 parallel { 307 stage('Create Kind Clusters') { 308 when { expression { return params.TEST_ENV == 'KIND' } } 309 steps { 310 createKindClusters() 311 } 312 } 313 stage('Create OKE Clusters') { 314 when { expression { return params.TEST_ENV == 'ocidns_oke' || params.TEST_ENV == 'magicdns_oke'} } 315 steps { 316 echo "OKE Cluster Prefix: ${OKE_CLUSTER_PREFIX}" 317 createOKEClusters("${OKE_CLUSTER_PREFIX}") 318 } 319 } 320 } 321 } 322 stage("Configure Clusters") { 323 parallel { 324 stage("Configure OKE/OCI DNS") { 325 when { expression { return params.TEST_ENV == 'ocidns_oke' } } 326 stages { 327 stage('Create OCI DNS zone') { 328 steps { 329 script { 330 dns_zone_ocid = sh(script: "${GO_REPO_PATH}/verrazzano/tests/e2e/config/scripts/oci_dns_ops.sh -o create -c ${TF_VAR_compartment_id} -s z${zoneId}", returnStdout: true) 331 } 332 } 333 } 334 stage('Configure OCI DNS Resources') { 335 environment { 336 OCI_DNS_COMPARTMENT_OCID = credentials('oci-dns-compartment') 337 OCI_PRIVATE_KEY_FILE = credentials('oci-api-key') 338 OCI_DNS_ZONE_OCID = "${dns_zone_ocid}" 339 } 340 steps { 341 script { 342 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 343 for(int count=1; count<=clusterCount; count++) { 344 sh """ 345 export KUBECONFIG=${KUBECONFIG_DIR}/$count/kube_config 346 cd ${GO_REPO_PATH}/verrazzano 347 ./tests/e2e/config/scripts/create-test-oci-config-secret.sh 348 """ 349 } 350 } 351 } 352 } 353 stage('Configure OCI DNS Installers') { 354 environment { 355 OCI_DNS_COMPARTMENT_OCID = credentials('oci-dns-compartment') 356 OCI_PRIVATE_KEY_FILE = credentials('oci-api-key') 357 OCI_DNS_ZONE_OCID = "${dns_zone_ocid}" 358 } 359 steps { 360 script { 361 configureVerrazzanoInstallers(env.INSTALL_CONFIG_FILE_OCIDNS, "./tests/e2e/config/scripts/process_oci_dns_install_yaml.sh", "acme", params.ACME_ENVIRONMENT) 362 downloadMinimalVerrazzanoYaml() 363 configureManagedInstallers(env.INSTALL_CONFIG_FILE_MINIMAL, "./tests/e2e/config/scripts/process_oci_dns_install_yaml.sh", minimalFileName, params.ACME_ENVIRONMENT) 364 } 365 } 366 } 367 } 368 } 369 stage("Configure KinD") { 370 when { expression { return params.TEST_ENV == 'KIND' } } 371 steps { 372 configureVerrazzanoInstallers(env.INSTALL_CONFIG_FILE_KIND,"./tests/e2e/config/scripts/process_kind_install_yaml.sh", params.WILDCARD_DNS_DOMAIN) 373 downloadMinimalVerrazzanoYaml() 374 configureManagedInstallers(env.INSTALL_CONFIG_FILE_MINIMAL, "./tests/e2e/config/scripts/process_kind_install_yaml.sh", minimalFileName, params.WILDCARD_DNS_DOMAIN) 375 } 376 } 377 stage("Configure OKE/MagicDNS") { 378 when { expression { return params.TEST_ENV == 'magicdns_oke' } } 379 steps { 380 configureVerrazzanoInstallers(env.INSTALL_CONFIG_FILE_NIPIO, "./tests/e2e/config/scripts/process_nipio_install_yaml.sh", params.WILDCARD_DNS_DOMAIN) 381 downloadMinimalVerrazzanoYaml() 382 configureManagedInstallers(env.INSTALL_CONFIG_FILE_MINIMAL, "./tests/e2e/config/scripts/process_nipio_install_yaml.sh", minimalFileName, params.WILDCARD_DNS_DOMAIN) 383 } 384 } 385 } 386 } 387 stage("Minimal Managed Cluster Tests") { 388 stages { 389 stage('Install Verrazzano') { 390 steps { 391 script { 392 getVerrazzanoOperatorYaml() 393 } 394 installMinimalVerrazzano() 395 } 396 post { 397 always { 398 script { 399 dumpInstallLogs() 400 } 401 } 402 failure { 403 script { 404 dumpK8sCluster("${WORKSPACE}/install-failure-cluster-snapshot") 405 } 406 } 407 } 408 } 409 stage('Register min managed cluster') { 410 steps { 411 registerManagedClusters() 412 } 413 } 414 stage('Verify min managed cluster') { 415 parallel { 416 stage('Verify Install Cluster Agent') { 417 steps { 418 runGinkgoRandomize('verify-install/clusteragent') 419 } 420 } 421 stage('Verify Register Min Managed Cluster') { 422 steps { 423 verifyRegisterManagedClusters(true) 424 } 425 } 426 } 427 } 428 stage('Verify deregister min managed cluster') { 429 steps { 430 verifyDeregisterManagedClusters() 431 } 432 } 433 stage('Delete min managed cluster') { 434 steps { 435 deleteClusters(2) 436 } 437 } 438 } 439 } 440 stage('Prepare full managed cluster(s)') { 441 stages { 442 stage('Create KinD managed cluster') { 443 steps { 444 // create managed clusters, pass in false for cleanup existing kind clusters, since we want 445 // the admin cluster to be left as-is 446 createKindClusters(2, false) 447 } 448 } 449 stage('Configure KinD managed cluster') { 450 steps { 451 configureManagedInstallers(env.INSTALL_CONFIG_FILE_KIND,"./tests/e2e/config/scripts/process_kind_install_yaml.sh", installerFileName, params.WILDCARD_DNS_DOMAIN) 452 } 453 } 454 stage ('Install managed clusters') { 455 steps { 456 installManagedClusters() 457 } 458 } 459 } 460 } 461 462 stage ('Register managed clusters') { 463 steps { 464 registerManagedClusters() 465 } 466 } 467 stage ('Deploy Sample Application') { 468 steps { 469 deploySampleApp() 470 } 471 } 472 stage ('verify-register') { 473 steps { 474 verifyRegisterManagedClusters(false) 475 } 476 } 477 stage ('system component metrics') { 478 steps { 479 runGinkgoRandomize('metrics/syscomponents') 480 } 481 } 482 } 483 post { 484 failure { 485 script { 486 dumpK8sCluster("${WORKSPACE}/multicluster-install-cluster-snapshot") 487 } 488 } 489 } 490 } 491 492 stage ('Verify Install') { 493 stages { 494 stage("verify-fluentd-update") { 495 steps { 496 runGinkgoAdmin('update/fluentdextes') 497 } 498 } 499 stage("verify-dns-update") { 500 steps { 501 script { 502 parallel verifyDNSUpdate() 503 } 504 } 505 } 506 // cert-manager update suites related to admin and managed clusters should be run sequentially to 507 // avoid intermittent test failures. 508 stage("verify-admin-cluster-cert-manager-update") { 509 steps { 510 script { 511 runGinkgoAdmin('update/certac') 512 } 513 } 514 } 515 stage("verify-managed-clusters-cert-manager-update") { 516 steps { 517 script { 518 runGinkgoAdmin('update/certmc') 519 } 520 } 521 } 522 stage('verify-install') { 523 steps { 524 runGinkgoRandomize('verify-install') 525 } 526 } 527 stage('verify-argocd') { 528 steps { 529 runGinkgo('multicluster/verify-argocd', '${TEST_DUMP_ROOT}/verify-argocd') 530 } 531 } 532 stage('verify-rancher') { 533 steps { 534 runGinkgo('multicluster/verify-rancher', '${TEST_DUMP_ROOT}/verify-rancher') 535 } 536 } 537 stage('verify-cluster-sync') { 538 steps { 539 runGinkgoAdmin('multicluster/verify-cluster-sync') 540 } 541 } 542 stage ('mc verify-install') { 543 steps { 544 runGinkgoRandomize('multicluster/verify-install') 545 } 546 } 547 } 548 post { 549 failure { 550 script { 551 dumpK8sCluster("${WORKSPACE}/multicluster-verify-cluster-snapshot") 552 } 553 } 554 aborted { 555 script { 556 dumpK8sCluster("${WORKSPACE}/multicluster-verify-cluster-snapshot") 557 } 558 } 559 success { 560 script { 561 if (EFFECTIVE_DUMP_K8S_CLUSTER_ON_SUCCESS == true) { 562 dumpK8sCluster("${WORKSPACE}/multicluster-verify-cluster-snapshot") 563 } 564 } 565 } 566 } 567 } 568 569 stage ('Verify Infra') { 570 steps { 571 script { 572 parallel verifyInfra() 573 } 574 } 575 post { 576 always { 577 archiveArtifacts artifacts: '**/coverage.html,**/logs/*', allowEmptyArchive: true 578 junit testResults: '**/*test-result.xml', allowEmptyResults: true 579 } 580 } 581 } 582 583 stage('Acceptance Tests') { 584 stages { 585 stage ('Example apps') { 586 steps { 587 script { 588 parallel verifyExamples() 589 } 590 } 591 } 592 stage ('Console') { 593 steps { 594 runConsoleTests() 595 } 596 post { 597 always { 598 sh "${GO_REPO_PATH}/verrazzano/ci/scripts/save_console_test_artifacts.sh" 599 } 600 } 601 } 602 stage ('Undeploy Sample Application') { 603 steps { 604 undeploySampleApp() 605 } 606 } 607 stage ('mc verify-api') { 608 steps { 609 catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { 610 script { 611 runMulticlusterVerifyApi() 612 } 613 } 614 } 615 post { 616 failure { 617 script { 618 dumpK8sCluster("${WORKSPACE}/multicluster-acceptance-tests-cluster-snapshot-pre-uninstall") 619 } 620 } 621 } 622 } 623 } 624 post { 625 failure { 626 script { 627 dumpK8sCluster("${WORKSPACE}/multicluster-acceptance-tests-cluster-snapshot") 628 } 629 } 630 aborted { 631 script { 632 dumpK8sCluster("${WORKSPACE}/multicluster-acceptance-tests-cluster-snapshot") 633 } 634 } 635 success { 636 script { 637 if (EFFECTIVE_DUMP_K8S_CLUSTER_ON_SUCCESS == true) { 638 dumpK8sCluster("${WORKSPACE}/multicluster-acceptance-tests-cluster-snapshot") 639 } 640 } 641 } 642 } 643 } 644 stage('Cleanup Tests') { 645 stages { 646 stage('verify deregister') { 647 steps { 648 verifyDeregisterManagedClusters() 649 } 650 } 651 } 652 post { 653 failure { 654 script { 655 dumpK8sCluster("${WORKSPACE}/multicluster-cleanup-tests-cluster-snapshot") 656 } 657 } 658 } 659 } 660 stage('Uninstall Verrazzano') { 661 stages { 662 stage('Uninstall') { 663 steps { 664 uninstallVerrazzano() 665 } 666 } 667 stage('Verify Uninstall') { 668 steps { 669 verifyUninstall() 670 } 671 } 672 } 673 post { 674 failure { 675 script { 676 dumpK8sCluster("${WORKSPACE}/multicluster-uninstall-dump") 677 } 678 } 679 aborted { 680 script { 681 dumpK8sCluster("${WORKSPACE}/multicluster-uninstall-dump") 682 } 683 } 684 success { 685 script { 686 if (EFFECTIVE_DUMP_K8S_CLUSTER_ON_SUCCESS == true) { 687 dumpK8sCluster("${WORKSPACE}/multicluster-uninstall-dump") 688 } 689 } 690 } 691 always { 692 archiveArtifacts artifacts: "**/multicluster-uninstall-dump/**,**/*full-cluster*/**,**/*bug-report*/**,**/test-cluster-snapshots/**", allowEmptyArchive: true 693 } 694 } 695 } 696 } 697 post { 698 failure { 699 sh """ 700 curl -k -u ${JENKINS_READ_USR}:${JENKINS_READ_PSW} -o ${WORKSPACE}/build-console-output.log ${BUILD_URL}consoleText 701 """ 702 archiveArtifacts artifacts: '**/build-console-output.log,**/Screenshot*.png,**/ConsoleLog*.log', allowEmptyArchive: true 703 // Ignore failures in any of the following actions so that the "always" post step that cleans up clusters is executed 704 sh """ 705 curl -k -u ${JENKINS_READ_USR}:${JENKINS_READ_PSW} -o archive.zip ${BUILD_URL}artifact/*zip*/archive.zip || true 706 oci --region us-phoenix-1 os object put --force --namespace ${OCI_OS_NAMESPACE} -bn ${OCI_OS_ARTIFACT_BUCKET} --name ${env.JOB_NAME}/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/archive.zip --file archive.zip || true 707 rm archive.zip || true 708 """ 709 } 710 always { 711 script { 712 if ( fileExists(env.TESTS_EXECUTED_FILE) ) { 713 dumpVerrazzanoSystemPods() 714 dumpCattleSystemPods() 715 dumpNginxIngressControllerLogs() 716 dumpVerrazzanoPlatformOperatorLogs() 717 dumpVerrazzanoApplicationOperatorLogs() 718 dumpOamKubernetesRuntimeLogs() 719 dumpVerrazzanoApiLogs() 720 } 721 } 722 sh """ 723 # Copy the generated test reports to WORKSPACE to archive them 724 mkdir -p ${TEST_REPORT_DIR} 725 cd ${GO_REPO_PATH}/verrazzano/tests/e2e 726 find . -name "${TEST_REPORT}" | cpio -pdm ${TEST_REPORT_DIR} 727 """ 728 archiveArtifacts artifacts: "**/*-operator.yaml,**/install-verrazzano.yaml,**/kube_config,**/coverage.html,**/logs/**,**/build/resources/**,**/verrazzano_images.txt,**/*full-cluster*/**,**/*bug-report*/**,**/test-cluster-snapshots/**,**/${TEST_REPORT},**/Screenshot*.png", allowEmptyArchive: true 729 junit testResults: "**/${TEST_REPORT}", allowEmptyResults: true 730 731 script { 732 sh """ 733 if [ -f ${POST_DUMP_FAILED_FILE} ]; then 734 echo "Failures seen during dumping of artifacts, treat post as failed" 735 exit 1 736 fi 737 """ 738 } 739 } 740 cleanup { 741 // Delete clusters as the very first thing in cleanup to reclaim cluster resources especially OKE resources 742 deleteClusters() 743 deleteDir() 744 } 745 } 746 } 747 748 def deleteOkeClusters() { 749 script { 750 sh """ 751 mkdir -p ${KUBECONFIG_DIR} 752 if [ "${TEST_ENV}" == "ocidns_oke" ]; then 753 cd ${GO_REPO_PATH}/verrazzano 754 ./tests/e2e/config/scripts/oci_dns_ops.sh -o delete -s z${zoneId} || echo "Failed to delete DNS zone z${zoneId}" 755 fi 756 cd ${TEST_SCRIPTS_DIR} 757 TF_VAR_label_prefix=${OKE_CLUSTER_PREFIX} TF_VAR_state_name=multicluster-${env.BUILD_NUMBER}-${env.BRANCH_NAME} ./delete_oke_cluster.sh "$clusterCount" "${KUBECONFIG_DIR}" || true 758 """ 759 } 760 } 761 762 def deleteClusters(start = 1) { 763 script { 764 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 765 if (env.TEST_ENV == "KIND") { 766 for(int count=start; count<=clusterCount; count++) { 767 sh """ 768 if [ "${env.TEST_ENV}" == "KIND" ] 769 then 770 kind delete cluster --name ${CLUSTER_NAME_PREFIX}-$count 771 fi 772 """ 773 } 774 } else { 775 deleteOkeClusters() 776 } 777 } 778 } 779 780 // Create a KinD cluster instance 781 // - count - the cluster index into $KUBECONFIG_DIR 782 // - metallbAddressRange - the address range to provide the Metallb install within the KinD Docker bridge network address range 783 // - cleanupKindContainers - indicates to the script whether or not to remove any existing clusters with the same name before creating the new one 784 // - connectJenkinsRunnerToNetwork - indicates whether or not to connect the KinD Docker bridge network to the Jenkins local docker network 785 def installKindCluster(count, metallbAddressRange, cleanupKindContainers, connectJenkinsRunnerToNetwork) { 786 // For parallel execution, wrap this in a Groovy enclosure {} 787 return script { 788 sh """ 789 echo ${CLUSTER_NAME_PREFIX}-$count 790 echo ${KUBECONFIG_DIR}/$count/kube_config 791 mkdir -p ${KUBECONFIG_DIR}/$count 792 export KUBECONFIG=${KUBECONFIG_DIR}/$count/kube_config 793 echo "Create Kind cluster \$1" 794 cd ${TEST_SCRIPTS_DIR} 795 # As a stop gap, for now we are using the api/vpo caches here to see if it helps with rate limiting issues, we will need to add specific caches so for now 796 # specify the cache name based on the count value, this is assuming 1 or 2 clusters 797 case "${count}" in 798 1) 799 ./create_kind_cluster.sh "${CLUSTER_NAME_PREFIX}-$count" "${GO_REPO_PATH}/verrazzano/platform-operator" "${KUBECONFIG_DIR}/$count/kube_config" "${params.KIND_CLUSTER_VERSION}" "$cleanupKindContainers" "$connectJenkinsRunnerToNetwork" true ${params.CREATE_CLUSTER_USE_CALICO} "vpo_integ" 800 ;; 801 2) 802 ./create_kind_cluster.sh "${CLUSTER_NAME_PREFIX}-$count" "${GO_REPO_PATH}/verrazzano/platform-operator" "${KUBECONFIG_DIR}/$count/kube_config" "${params.KIND_CLUSTER_VERSION}" "$cleanupKindContainers" "$connectJenkinsRunnerToNetwork" true ${params.CREATE_CLUSTER_USE_CALICO} "apo_integ" 803 ;; 804 *) 805 ./create_kind_cluster.sh "${CLUSTER_NAME_PREFIX}-$count" "${GO_REPO_PATH}/verrazzano/platform-operator" "${KUBECONFIG_DIR}/$count/kube_config" "${params.KIND_CLUSTER_VERSION}" "$cleanupKindContainers" "$connectJenkinsRunnerToNetwork" false ${params.CREATE_CLUSTER_USE_CALICO} "NONE" 806 ;; 807 esac 808 if [ ${params.CREATE_CLUSTER_USE_CALICO} == true ]; then 809 echo "Install Calico" 810 cd ${GO_REPO_PATH}/verrazzano 811 ./ci/scripts/install_calico.sh "${CLUSTER_NAME_PREFIX}-$count" 812 fi 813 kubectl wait --for=condition=ready nodes/${CLUSTER_NAME_PREFIX}-$count-control-plane --timeout=5m 814 kubectl wait --for=condition=ready pods/kube-controller-manager-${CLUSTER_NAME_PREFIX}-$count-control-plane -n kube-system --timeout=5m 815 echo "Listing pods in kube-system namespace ..." 816 kubectl get pods -n kube-system 817 echo "Install metallb" 818 cd ${GO_REPO_PATH}/verrazzano 819 ./tests/e2e/config/scripts/install-metallb.sh $metallbAddressRange 820 echo "Deploy external es and create its secret on the admin cluster if EXTERNAL_ELASTICSEARCH is true" 821 CLUSTER_NUMBER=${count} ./tests/e2e/config/scripts/create-external-os.sh 822 """ 823 } 824 } 825 826 // Either download the specified release of the platform operator YAML, or create one 827 // using the specific operator image provided by the user. 828 def getVerrazzanoOperatorYaml() { 829 script { 830 sh """ 831 echo "Platform Operator Configuration" 832 cd ${GO_REPO_PATH}/verrazzano 833 if [ "NONE" == "${params.VERRAZZANO_OPERATOR_IMAGE}" ]; then 834 echo "Downloading operator.yaml from branch ${env.BRANCH_NAME} for commit ${SHORT_COMMIT_HASH}" 835 oci --region us-phoenix-1 os object get --namespace ${OCI_OS_NAMESPACE} -bn ${OCI_OS_COMMIT_BUCKET} --name ephemeral/${env.BRANCH_NAME}/${SHORT_COMMIT_HASH}/operator.yaml --file ${OPERATOR_YAML_FILE} 836 else 837 echo "Generating operator.yaml based on image name provided: ${params.VERRAZZANO_OPERATOR_IMAGE}" 838 env IMAGE_PULL_SECRETS=verrazzano-container-registry DOCKER_IMAGE=${params.VERRAZZANO_OPERATOR_IMAGE} ./tools/scripts/generate_operator_yaml.sh > ${OPERATOR_YAML_FILE} 839 fi 840 """ 841 } 842 } 843 844 // download the file for the minimal verrazzano install 845 def downloadMinimalVerrazzanoYaml() { 846 script { 847 sh """ 848 echo "Downloading minimal-thanos.yaml from branch ${env.BRANCH_NAME} to ${env.INSTALL_CONFIG_FILE_MINIMAL}" 849 wget -O ${env.INSTALL_CONFIG_FILE_MINIMAL} https://raw.githubusercontent.com/verrazzano/verrazzano/${env.BRANCH_NAME}/examples/multicluster/managed-clusters/minimal-thanos.yaml 850 """ 851 } 852 } 853 854 // Update Verrazzano on each of the managed clusters 855 def installManagedClusters() { 856 script { 857 def verrazzanoInstallStages = [:] 858 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 859 for(int count=2; count<=clusterCount; count++) { 860 def key = "vz-mgd-${count-1}" 861 def installerPath="${KUBECONFIG_DIR}/${count}/${installerFileName}" 862 verrazzanoInstallStages["${key}"] = installVerrazzanoOnCluster(count, installerPath, false) 863 } 864 parallel verrazzanoInstallStages 865 } 866 } 867 868 def installMinimalVerrazzano() { 869 script { 870 // Create a dictionary of Verrazzano install steps to be executed in parallel 871 // - the first one will always be the Admin cluster 872 // - clusters 2-max are managed clusters 873 def verrazzanoInstallStages = [:] 874 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 875 for(int count=1; count<=clusterCount; count++) { 876 def installerPath="${KUBECONFIG_DIR}/${count}/${minimalFileName}" 877 def key = "vz-mgd-${count-1}" 878 def minimalInstall = true 879 if (count == 1) { 880 key = "vz-admin" 881 installerPath="${KUBECONFIG_DIR}/${count}/${installerFileName}" 882 minimalInstall = false 883 } 884 verrazzanoInstallStages["${key}"] = installVerrazzanoOnCluster(count, installerPath, minimalInstall) 885 } 886 parallel verrazzanoInstallStages 887 } 888 } 889 890 // Install Verrazzano on a target cluster 891 // - count is the cluster index into the $KUBECONFIG_DIR 892 // - verrazzanoConfig is the Verrazzano CR to use to install VZ on the cluster 893 def installVerrazzanoOnCluster(count, verrazzanoConfig, minimalInstall) { 894 // For parallel execution, wrap this in a Groovy enclosure {} 895 return { 896 script { 897 sh """ 898 export KUBECONFIG=${KUBECONFIG_DIR}/$count/kube_config 899 cd ${GO_REPO_PATH}/verrazzano 900 901 # Display the kubectl and cluster versions 902 kubectl version 903 # Display the VZ CLI version 904 ${VZ_COMMAND} version 905 906 echo "Create Image Pull Secrets" 907 ./tests/e2e/config/scripts/create-image-pull-secret.sh "${IMAGE_PULL_SECRET}" "${DOCKER_REPO}" "${DOCKER_CREDS_USR}" "${DOCKER_CREDS_PSW}" 908 ./tests/e2e/config/scripts/create-image-pull-secret.sh github-packages "${DOCKER_REPO}" "${DOCKER_CREDS_USR}" "${DOCKER_CREDS_PSW}" 909 ./tests/e2e/config/scripts/create-image-pull-secret.sh ocr "${OCR_REPO}" "${OCR_CREDS_USR}" "${OCR_CREDS_PSW}" 910 911 # make sure ns exists and create secret in verrazzano-install ns 912 kubectl create namespace verrazzano-install || true 913 ./tests/e2e/config/scripts/check_verrazzano_ns_exists.sh verrazzano-install 914 ./tests/e2e/config/scripts/create-image-pull-secret.sh "${IMAGE_PULL_SECRET}" "${DOCKER_REPO}" "${DOCKER_CREDS_USR}" "${DOCKER_CREDS_PSW}" "verrazzano-install" 915 916 ${LOOPING_TEST_SCRIPTS_DIR}/dump_cluster.sh ${WORKSPACE}/verrazzano/build/resources/cluster${count}/pre-install-resources 917 918 # Update VZ CR to enable required components 919 ./tests/e2e/config/scripts/multicluster_edit_vz.sh ${count} ${verrazzanoConfig} ${minimalInstall} 920 921 echo "Installing the Verrazzano Platform Operator" 922 time ${VZ_COMMAND} install --manifests ${OPERATOR_YAML_FILE} -f ${verrazzanoConfig} 923 """ 924 } 925 } 926 } 927 928 def uninstallVerrazzano() { 929 script { 930 // Create a dictionary of Verrazzano uninstall steps to be executed in parallel 931 // - the first one will always be the Admin cluster 932 // - clusters 2-max are managed clusters 933 def verrazzanoUninstallStages = [:] 934 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 935 for (int count = 1; count <= clusterCount; count++) { 936 def installerPath = "${KUBECONFIG_DIR}/${count}/${installerFileName}" 937 def key = "vz-mgd-${count - 1}" 938 if (count == 1) { 939 key = "vz-admin" 940 } 941 verrazzanoUninstallStages["${key}"] = uninstallVerrazzanoOnCluster(count, installerPath) 942 } 943 parallel verrazzanoUninstallStages 944 } 945 } 946 947 // Uninstall Verrazzano 948 // - count is the cluster index into the $KUBECONFIG_DIR 949 // - verrazzanoConfig is the Verrazzano CR to use to install VZ on the cluster 950 def uninstallVerrazzanoOnCluster(count, verrazzanoConfig) { 951 // For parallel execution, wrap this in a Groovy enclosure {} 952 return { 953 script { 954 sh """ 955 export KUBECONFIG="${KUBECONFIG_DIR}/${count}/kube_config" 956 echo "Deleting \\$verrazzanoConfig" 957 time ${VZ_COMMAND} uninstall -y --timeout 45m 958 """ 959 } 960 } 961 } 962 963 // Verify uninstall on all clusters 964 def verifyUninstall() { 965 script { 966 // Create a dictionary of Verrazzano verify uninstall steps to be executed in parallel 967 // - the first one will always be the Admin cluster 968 // - clusters 2-max are managed clusters 969 def verifyUninstallStages = [:] 970 // disabling for managed cluster since complete removal of resources on managed cluster interferes with cluster 971 // management facilities in Rancher. See VZ-10055 972 //int clusterCount = params.TOTAL_CLUSTERS.toInteger() 973 int clusterCount = 1 974 for (int count = 1; count <= clusterCount; count++) { 975 def key = "vz-mgd-${count - 1}" 976 if (count == 1) { 977 key = "vz-admin" 978 } 979 verifyUninstallStages["${key}"] = verifyUninstallOnCluster(count) 980 } 981 parallel verifyUninstallStages 982 } 983 } 984 985 // Verify uninstall Verrazzano on a single cluster 986 // - count is the cluster index into the $KUBECONFIG_DIR 987 def verifyUninstallOnCluster(count) { 988 // For parallel execution, wrap this in a Groovy enclosure {} 989 return { 990 script { 991 sh """ 992 export KUBECONFIG="${KUBECONFIG_DIR}/${count}/kube_config" 993 ${LOOPING_TEST_SCRIPTS_DIR}/dump_cluster.sh ${WORKSPACE}/verrazzano/build/resources/cluster${count}/post-uninstall-resources false 994 ${LOOPING_TEST_SCRIPTS_DIR}/verify_uninstall.sh ${WORKSPACE}/verrazzano/build/resources/cluster${count} 995 """ 996 } 997 } 998 } 999 1000 // register all managed clusters 1001 def registerManagedClusters() { 1002 catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { 1003 script { 1004 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1005 for(int count=2; count<=clusterCount; count++) { 1006 sh """ 1007 export MANAGED_CLUSTER_DIR="${KUBECONFIG_DIR}/${count}" 1008 export MANAGED_CLUSTER_NAME="managed${count-1}" 1009 export MANAGED_KUBECONFIG="${KUBECONFIG_DIR}/${count}/kube_config" 1010 export MANAGED_CLUSTER_ENV="mgd${count-1}" 1011 cd ${GO_REPO_PATH}/verrazzano 1012 ./tests/e2e/config/scripts/register_managed_cluster.sh 1013 """ 1014 } 1015 // ADMIN_VZ_VERSION_AT_REGISTRATION is used by verify register test 1016 env.ADMIN_VZ_VERSION_AT_REGISTRATION = sh(returnStdout: true, 1017 script:"KUBECONFIG=${ADMIN_KUBECONFIG} kubectl get verrazzano -o jsonpath='{.items[0].status.version}'").trim() 1018 print "Admin VZ version at registration is ${env.ADMIN_VZ_VERSION_AT_REGISTRATION}" 1019 } 1020 } 1021 } 1022 1023 // Verify the register of the managed clusters 1024 def verifyRegisterManagedClusters(skipLogging) { 1025 catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { 1026 script { 1027 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1028 for(int count=2; count<=clusterCount; count++) { 1029 sh """ 1030 export MANAGED_CLUSTER_NAME="managed${count-1}" 1031 export MANAGED_KUBECONFIG="${KUBECONFIG_DIR}/${count}/kube_config" 1032 cd ${GO_REPO_PATH}/verrazzano/tests/e2e 1033 ginkgo -p --randomize-all -v --keep-going --no-color ${GINKGO_REPORT_ARGS} -tags="${params.TAGGED_TESTS}" --focus-file="${params.INCLUDED_TESTS}" --skip-file="${params.EXCLUDED_TESTS}" multicluster/verify-register/... -- --skipLogging=${skipLogging} 1034 """ 1035 } 1036 } 1037 } 1038 } 1039 1040 // Utility method to create a map that will be used later to perform parallel operations against multiple clusters 1041 Map createClusterExecutionsMap() { 1042 script { 1043 // Create a dictionary of steps to be executed in parallel 1044 // - the first one will always be the Admin cluster 1045 // - clusters 2-max are managed clusters 1046 def clusterExecutionsMap = [:] 1047 return clusterExecutionsMap 1048 } 1049 } 1050 1051 // Verify the deregister of the managed clusters 1052 def verifyDeregisterManagedClusters() { 1053 catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { 1054 script { 1055 def verrazzanoDeregisterManagedClusterStages = createClusterExecutionsMap() 1056 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1057 for(int count=2; count<=clusterCount; count++) { 1058 verrazzanoDeregisterManagedClusterStages["${count} - Verify Deregister Managed Cluster"] = verifyDeregisterManagedCluster(count) 1059 } 1060 parallel verrazzanoDeregisterManagedClusterStages 1061 } 1062 } 1063 } 1064 1065 def verifyDeregisterManagedCluster(count) { 1066 // For parallel execution, wrap this in a Groovy enclosure {} 1067 return { 1068 script { 1069 sh """ 1070 export MANAGED_CLUSTER_NAME="managed${count-1}" 1071 export MANAGED_KUBECONFIG="${KUBECONFIG_DIR}/${count}/kube_config" 1072 export ADMIN_KUBECONFIG="${KUBECONFIG_DIR}/1/kube_config" 1073 ./tests/e2e/config/scripts/deregister_managed_cluster.sh 1074 cd ${GO_REPO_PATH}/verrazzano/tests/e2e 1075 ginkgo build multicluster/verify-deregister/ 1076 ginkgo -p --randomize-all -v --keep-going --no-color ${GINKGO_REPORT_ARGS} -tags="${params.TAGGED_TESTS}" --focus-file="${params.INCLUDED_TESTS}" --skip-file="${params.EXCLUDED_TESTS}" multicluster/verify-deregister/*.test 1077 """ 1078 } 1079 } 1080 } 1081 1082 // Verify the managed cluster permissions 1083 def verifyManagedClusterPermissions() { 1084 catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { 1085 script { 1086 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1087 for(int count=2; count<=clusterCount; count++) { 1088 sh """ 1089 export MANAGED_CLUSTER_NAME="managed${count-1}" 1090 export MANAGED_KUBECONFIG="${KUBECONFIG_DIR}/${count}/kube_config" 1091 export MANAGED_ACCESS_KUBECONFIG="${KUBECONFIG_DIR}/${count}/managed_kube_config" 1092 cd ${GO_REPO_PATH}/verrazzano/tests/e2e 1093 ginkgo -v -stream --keep-going --no-color ${GINKGO_REPORT_ARGS} -tags="${params.TAGGED_TESTS}" --focus-file="${params.INCLUDED_TESTS}" --skip-file="${params.EXCLUDED_TESTS}" multicluster/verify-permissions/... 1094 """ 1095 } 1096 } 1097 } 1098 } 1099 1100 // Run ginkgo test suites 1101 def runGinkgo(testSuitePath, clusterDumpDirectory) { 1102 script { 1103 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1104 sh """ 1105 export KUBECONFIG="${KUBECONFIG_DIR}/1/kube_config" 1106 export MANAGED_CLUSTER_NAME="managed1" 1107 export MANAGED_KUBECONFIG="${KUBECONFIG_DIR}/2/kube_config" 1108 export CLUSTER_COUNT=$clusterCount 1109 cd ${GO_REPO_PATH}/verrazzano/tests/e2e 1110 export DUMP_KUBECONFIG="${KUBECONFIG_DIR}/2/kube_config" 1111 export DUMP_DIRECTORY="${clusterDumpDirectory}" 1112 ginkgo -v --keep-going --no-color ${GINKGO_REPORT_ARGS} -tags="${params.TAGGED_TESTS}" --focus-file="${params.INCLUDED_TESTS}" --skip-file="${params.EXCLUDED_TESTS}" ${testSuitePath}/... 1113 """ 1114 } 1115 } 1116 1117 // Run a test suite against all clusters 1118 def runGinkgoRandomize(testSuitePath) { 1119 catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { 1120 script { 1121 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1122 def clusterName = "" 1123 for(int count=1; count<=clusterCount; count++) { 1124 // The first cluster created by this script is named as admin, and the subsequent clusters are named as 1125 // managed1, managed2, etc. 1126 if (count == 1) { 1127 clusterName="admin" 1128 } else { 1129 clusterName="managed${count-1}" 1130 } 1131 sh """ 1132 export KUBECONFIG="${KUBECONFIG_DIR}/${count}/kube_config" 1133 export CLUSTER_NAME="${clusterName}" 1134 cd ${GO_REPO_PATH}/verrazzano/tests/e2e 1135 ginkgo -p --randomize-all -v --keep-going --no-color ${GINKGO_REPORT_ARGS} -tags="${params.TAGGED_TESTS}" --focus-file="${params.INCLUDED_TESTS}" --skip-file="${params.EXCLUDED_TESTS}" ${testSuitePath}/... 1136 """ 1137 } 1138 } 1139 } 1140 } 1141 1142 // Run a test suite against just the admin cluster 1143 def runGinkgoRandomizeAdmin(testSuitePath) { 1144 sh """ 1145 export KUBECONFIG="${KUBECONFIG_DIR}/1/kube_config" 1146 export CLUSTER_NAME="admin" 1147 cd ${GO_REPO_PATH}/verrazzano/tests/e2e 1148 ginkgo -p --randomize-all -v --keep-going --no-color -tags="${params.TAGGED_TESTS}" --focus-file="${params.INCLUDED_TESTS}" --skip-file="${params.EXCLUDED_TESTS}" ${testSuitePath}/... 1149 """ 1150 } 1151 1152 // Run a test suite against just the admin cluster 1153 def runGinkgoAdmin(testSuitePath) { 1154 sh """ 1155 export KUBECONFIG="${KUBECONFIG_DIR}/1/kube_config" 1156 export CLUSTER_NAME="admin" 1157 cd ${GO_REPO_PATH}/verrazzano/tests/e2e 1158 ginkgo -v --keep-going --no-color ${GINKGO_REPORT_ARGS} -tags="${params.TAGGED_TESTS}" --focus-file="${params.INCLUDED_TESTS}" --skip-file="${params.EXCLUDED_TESTS}" ${testSuitePath}/... 1159 """ 1160 } 1161 1162 // Configure the Admin and Managed cluster installer custom resources 1163 def configureVerrazzanoInstallers(installResourceTemplate, configProcessorScript, String... extraArgs) { 1164 script { 1165 // Concatenate the variable args into a single string 1166 String allArgs = "" 1167 extraArgs.each { allArgs += it + " " } 1168 1169 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1170 for(int count=1; count<=clusterCount; count++) { 1171 def destinationPath = "${env.KUBECONFIG_DIR}/${count}/${installerFileName}" 1172 def installProfile = env.MANAGED_CLUSTER_PROFILE 1173 // Installs using OCI DNS require a unique domain per cluster 1174 // - also, the env name must be <= 10 chars for some reason. 1175 def envName = "mgd${count-1}" 1176 if (count == 1) { 1177 // Cluster "1" is always the admin cluster, use the chosen profile for the VZ install 1178 // with the env name "admin" 1179 installProfile = env.ADMIN_CLUSTER_PROFILE 1180 envName = "admin" 1181 } 1182 sh """ 1183 mkdir -p "${KUBECONFIG_DIR}/${count}" 1184 export PATH=${HOME}/go/bin:${PATH} 1185 cd ${GO_REPO_PATH}/verrazzano 1186 # Copy the template config over for the mgd cluster profile configuration 1187 cp $installResourceTemplate $destinationPath 1188 VZ_ENVIRONMENT_NAME="${envName}" INSTALL_PROFILE=$installProfile $configProcessorScript $destinationPath $allArgs 1189 """ 1190 } 1191 } 1192 } 1193 1194 def configureManagedInstallers(installResourceTemplate, configProcessorScript, filename, String... extraArgs) { 1195 script { 1196 // Concatenate the variable args into a single string 1197 String allArgs = "" 1198 extraArgs.each { allArgs += it + " " } 1199 1200 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1201 1202 // start at 2 to skip the admin cluster 1203 for(int count=2; count<=clusterCount; count++) { 1204 def destinationPath = "${env.KUBECONFIG_DIR}/${count}/${filename}" 1205 def installProfile = env.MANAGED_CLUSTER_PROFILE 1206 def envName = "mgd${count-1}" 1207 sh """ 1208 cd ${GO_REPO_PATH}/verrazzano 1209 # Copy the template config over for the mgd cluster profile configuration 1210 cp $installResourceTemplate $destinationPath 1211 VZ_ENVIRONMENT_NAME="${envName}" INSTALL_PROFILE=$installProfile $configProcessorScript $destinationPath $allArgs 1212 """ 1213 } 1214 } 1215 } 1216 1217 // Create the required KinD clusters 1218 def createKindClusters(start = 1, cleanupKindContainers = true) { 1219 script { 1220 sh """ 1221 echo "tests will execute" > ${TESTS_EXECUTED_FILE} 1222 """ 1223 // NOTE: Eventually we should be able to parallelize the cluster creation, however 1224 // we seem to be getting some kind of timing issue on cluster create; the 2nd 1225 // cluster always seems to get a connect/timeout issue, so for now we are keeping 1226 // the KinD cluster creation serial 1227 1228 // Can these for now, but eventually we should be able to build this based on 1229 // inspecting the Docker bridge network CIDR and splitting up a range based on 1230 // the cluster count. 1231 1232 def addressRanges = [ "172.18.0.231-172.18.0.238", "172.18.0.239-172.18.0.246", "172.18.0.247-172.18.0.254"] 1233 def clusterInstallStages = [:] 1234 boolean connectJenkinsRunnerToNetwork = true 1235 if(start > 1) { 1236 connectJenkinsRunnerToNetwork = false 1237 } 1238 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1239 for(int count=start; count<=clusterCount; count++) { 1240 string metallbAddressRange = addressRanges.get(count-1) 1241 def deployStep = "cluster-${count}" 1242 // Create dictionary of steps for parallel execution 1243 //clusterInstallStages[deployStep] = installKindCluster(count, metallbAddressRange, cleanupKindContainers, connectJenkinsRunnerToNetwork) 1244 // For sequential execution of steps 1245 installKindCluster(count, metallbAddressRange, cleanupKindContainers, connectJenkinsRunnerToNetwork) 1246 cleanupKindContainers = false 1247 connectJenkinsRunnerToNetwork = false 1248 } 1249 // Execute steps in parallel 1250 //parallel clusterInstallStages 1251 } 1252 } 1253 1254 // Invoke the OKE cluster creation script for the desired number of clusters 1255 def createOKEClusters(clusterPrefix) { 1256 script { 1257 sh """ 1258 echo "tests will execute" > ${TESTS_EXECUTED_FILE} 1259 """ 1260 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1261 sh """ 1262 mkdir -p ${KUBECONFIG_DIR} 1263 echo "Create OKE cluster" 1264 cd ${TEST_SCRIPTS_DIR} 1265 TF_VAR_label_prefix=${clusterPrefix} TF_VAR_state_name=multicluster-${env.BUILD_NUMBER}-${env.BRANCH_NAME} ./create_oke_multi_cluster.sh "$clusterCount" "${KUBECONFIG_DIR}" ${params.CREATE_CLUSTER_USE_CALICO} 1266 """ 1267 } 1268 } 1269 1270 def dumpK8sCluster(dumpDirectory) { 1271 script { 1272 if ( fileExists(env.TESTS_EXECUTED_FILE) ) { 1273 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1274 for (int count = 1; count <= clusterCount; count++) { 1275 sh """ 1276 export KUBECONFIG="${KUBECONFIG_DIR}/${count}/kube_config" 1277 ${GO_REPO_PATH}/verrazzano/ci/scripts/capture_cluster_snapshot.sh ${dumpDirectory}/cluster-snapshot-${count} 1278 """ 1279 } 1280 } 1281 } 1282 } 1283 1284 def dumpVerrazzanoSystemPods() { 1285 script { 1286 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1287 for(int count=1; count<=clusterCount; count++) { 1288 LOG_DIR="${VERRAZZANO_INSTALL_LOGS_DIR}/cluster-$count" 1289 sh """ 1290 export KUBECONFIG=${KUBECONFIG_DIR}/$count/kube_config 1291 mkdir -p ${LOG_DIR} 1292 export DIAGNOSTIC_LOG="${LOG_DIR}/verrazzano-system-pods.log" 1293 ${GO_REPO_PATH}/verrazzano/platform-operator/scripts/install/k8s-dump-objects.sh -o pods -n verrazzano-system -m "verrazzano system pods" || echo "failed" > ${POST_DUMP_FAILED_FILE} 1294 export DIAGNOSTIC_LOG="${LOG_DIR}/verrazzano-system-certs.log" 1295 ${GO_REPO_PATH}/verrazzano/platform-operator/scripts/install/k8s-dump-objects.sh -o cert -n verrazzano-system -m "verrazzano system certs" || echo "failed" > ${POST_DUMP_FAILED_FILE} 1296 export DIAGNOSTIC_LOG="${LOG_DIR}/verrazzano-system-osd.log" 1297 ${GO_REPO_PATH}/verrazzano/platform-operator/scripts/install/k8s-dump-objects.sh -o pods -n verrazzano-system -r "vmi-system-osd-*" -m "verrazzano system opensearchdashboards log" -l -c osd || echo "failed" > ${POST_DUMP_FAILED_FILE} 1298 export DIAGNOSTIC_LOG="${LOG_DIR}/verrazzano-system-es-master.log" 1299 ${GO_REPO_PATH}/verrazzano/platform-operator/scripts/install/k8s-dump-objects.sh -o pods -n verrazzano-system -r "vmi-system-es-master-*" -m "verrazzano system opensearchdashboards log" -l -c es-master || echo "failed" > ${POST_DUMP_FAILED_FILE} 1300 """ 1301 } 1302 } 1303 } 1304 1305 def dumpCattleSystemPods() { 1306 script { 1307 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1308 for(int count=1; count<=clusterCount; count++) { 1309 LOG_DIR="${VERRAZZANO_INSTALL_LOGS_DIR}/cluster-$count" 1310 sh """ 1311 export KUBECONFIG=${KUBECONFIG_DIR}/$count/kube_config 1312 mkdir -p ${LOG_DIR} 1313 export DIAGNOSTIC_LOG="${LOG_DIR}/cattle-system-pods.log" 1314 ${GO_REPO_PATH}/verrazzano/platform-operator/scripts/install/k8s-dump-objects.sh -o pods -n cattle-system -m "cattle system pods" || echo "failed" > ${POST_DUMP_FAILED_FILE} 1315 export DIAGNOSTIC_LOG="${LOG_DIR}/rancher.log" 1316 ${GO_REPO_PATH}/verrazzano/platform-operator/scripts/install/k8s-dump-objects.sh -o pods -n cattle-system -r "rancher-*" -m "Rancher logs" -c rancher -l || echo "failed" > ${POST_DUMP_FAILED_FILE} 1317 """ 1318 } 1319 } 1320 } 1321 1322 def dumpNginxIngressControllerLogs() { 1323 script { 1324 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1325 for(int count=1; count<=clusterCount; count++) { 1326 LOG_DIR="${VERRAZZANO_INSTALL_LOGS_DIR}/cluster-$count" 1327 sh """ 1328 export KUBECONFIG=${KUBECONFIG_DIR}/$count/kube_config 1329 mkdir -p ${LOG_DIR} 1330 export DIAGNOSTIC_LOG="${LOG_DIR}/nginx-ingress-controller.log" 1331 ${GO_REPO_PATH}/verrazzano/platform-operator/scripts/install/k8s-dump-objects.sh -o pods -n ingress-nginx -r "nginx-ingress-controller-*" -m "Nginx Ingress Controller" -c controller -l || echo "failed" > ${POST_DUMP_FAILED_FILE} 1332 """ 1333 } 1334 } 1335 } 1336 1337 def dumpVerrazzanoPlatformOperatorLogs() { 1338 script { 1339 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1340 for(int count=1; count<=clusterCount; count++) { 1341 LOG_DIR="${WORKSPACE}/verrazzano-platform-operator/logs/cluster-$count" 1342 sh """ 1343 ## dump out verrazzano-platform-operator logs 1344 export KUBECONFIG=${KUBECONFIG_DIR}/$count/kube_config 1345 mkdir -p ${LOG_DIR} 1346 kubectl -n verrazzano-install logs --selector=app=verrazzano-platform-operator > ${LOG_DIR}/verrazzano-platform-operator-pod.log --tail -1 || echo "failed" > ${POST_DUMP_FAILED_FILE} 1347 kubectl -n verrazzano-install describe pod --selector=app=verrazzano-platform-operator > ${LOG_DIR}/verrazzano-platform-operator-pod.out || echo "failed" > ${POST_DUMP_FAILED_FILE} 1348 echo "verrazzano-platform-operator logs dumped to verrazzano-platform-operator-pod.log" 1349 echo "verrazzano-platform-operator pod description dumped to verrazzano-platform-operator-pod.out" 1350 echo "------------------------------------------" 1351 """ 1352 } 1353 } 1354 } 1355 1356 def dumpVerrazzanoApplicationOperatorLogs() { 1357 script { 1358 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1359 for(int count=1; count<=clusterCount; count++) { 1360 LOG_DIR="${WORKSPACE}/verrazzano-application-operator/logs/cluster-$count" 1361 sh """ 1362 ## dump out verrazzano-application-operator logs 1363 export KUBECONFIG=${KUBECONFIG_DIR}/$count/kube_config 1364 mkdir -p ${LOG_DIR} 1365 kubectl -n verrazzano-system logs --selector=app=verrazzano-application-operator > ${LOG_DIR}/verrazzano-application-operator-pod.log --tail -1 || echo "failed" > ${POST_DUMP_FAILED_FILE} 1366 kubectl -n verrazzano-system describe pod --selector=app=verrazzano-application-operator > ${LOG_DIR}/verrazzano-application-operator-pod.out || echo "failed" > ${POST_DUMP_FAILED_FILE} 1367 echo "verrazzano-application-operator logs dumped to verrazzano-application-operator-pod.log" 1368 echo "verrazzano-application-operator pod description dumped to verrazzano-application-operator-pod.out" 1369 echo "------------------------------------------" 1370 """ 1371 } 1372 } 1373 } 1374 1375 def dumpOamKubernetesRuntimeLogs() { 1376 script { 1377 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1378 for(int count=1; count<=clusterCount; count++) { 1379 LOG_DIR="${WORKSPACE}/oam-kubernetes-runtime/logs/cluster-$count" 1380 sh """ 1381 ## dump out oam-kubernetes-runtime logs 1382 export KUBECONFIG=${KUBECONFIG_DIR}/$count/kube_config 1383 mkdir -p ${LOG_DIR} 1384 kubectl -n verrazzano-system logs --selector=app.kubernetes.io/instance=oam-kubernetes-runtime > ${LOG_DIR}/oam-kubernetes-runtime-pod.log --tail -1 || echo "failed" > ${POST_DUMP_FAILED_FILE} 1385 kubectl -n verrazzano-system describe pod --selector=app.kubernetes.io/instance=oam-kubernetes-runtime > ${LOG_DIR}/oam-kubernetes-runtime-pod.out || echo "failed" > ${POST_DUMP_FAILED_FILE} 1386 echo "verrazzano-application-operator logs dumped to oam-kubernetes-runtime-pod.log" 1387 echo "verrazzano-application-operator pod description dumped to oam-kubernetes-runtime-pod.out" 1388 echo "------------------------------------------" 1389 """ 1390 } 1391 } 1392 } 1393 1394 def dumpVerrazzanoApiLogs() { 1395 script { 1396 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1397 for(int count=1; count<=clusterCount; count++) { 1398 LOG_DIR="${VERRAZZANO_INSTALL_LOGS_DIR}/cluster-$count" 1399 sh """ 1400 export KUBECONFIG=${KUBECONFIG_DIR}/$count/kube_config 1401 mkdir -p ${LOG_DIR} 1402 export DIAGNOSTIC_LOG="${LOG_DIR}/verrazzano-authproxy.log" 1403 ${GO_REPO_PATH}/verrazzano/platform-operator/scripts/install/k8s-dump-objects.sh -o pods -n verrazzano-system -r "verrazzano-authproxy-*" -m "verrazzano api" -c verrazzano-authproxy -l || echo "failed" > ${POST_DUMP_FAILED_FILE} 1404 """ 1405 } 1406 } 1407 } 1408 1409 def dumpInstallLogs() { 1410 script { 1411 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1412 for(int count=1; count<=clusterCount; count++) { 1413 LOG_DIR="${VERRAZZANO_INSTALL_LOGS_DIR}/cluster-$count" 1414 1415 // This function may run on older versions of Verrazzano that have the logs stored in the default namespace 1416 def namespace = "verrazzano-install" 1417 sh """ 1418 ## dump Verrazzano install logs 1419 export KUBECONFIG=${KUBECONFIG_DIR}/$count/kube_config 1420 mkdir -p ${LOG_DIR} 1421 kubectl -n ${namespace} logs --selector=job-name=verrazzano-install-my-verrazzano > ${LOG_DIR}/${VERRAZZANO_INSTALL_LOG} --tail -1 1422 kubectl -n ${namespace} describe pod --selector=job-name=verrazzano-install-my-verrazzano > ${LOG_DIR}/verrazzano-install-job-pod.out 1423 echo "------------------------------------------" 1424 """ 1425 } 1426 } 1427 } 1428 1429 def getEffectiveDumpOnSuccess() { 1430 def effectiveValue = params.DUMP_K8S_CLUSTER_ON_SUCCESS 1431 if (FORCE_DUMP_K8S_CLUSTER_ON_SUCCESS.equals("true") && (env.BRANCH_NAME.equals("master"))) { 1432 effectiveValue = true 1433 echo "Forcing dump on success based on global override setting" 1434 } 1435 return effectiveValue 1436 } 1437 1438 def setDisplayName() { 1439 echo "Start setDisplayName" 1440 def causes = currentBuild.getBuildCauses() 1441 echo "causes: " + causes.toString() 1442 for (cause in causes) { 1443 def causeString = cause.toString() 1444 echo "current cause: " + causeString 1445 if (causeString.contains("UpstreamCause") && causeString.contains("Started by upstream project")) { 1446 echo "This job was caused by " + causeString 1447 if (causeString.contains("verrazzano-periodic-triggered-tests")) { 1448 currentBuild.displayName = env.BUILD_NUMBER + " : PERIODIC" 1449 } else if (causeString.contains("verrazzano-flaky-tests")) { 1450 currentBuild.displayName = env.BUILD_NUMBER + " : FLAKY" 1451 } 1452 } 1453 } 1454 echo "End setDisplayName" 1455 } 1456 1457 def modifyHelloHelidonApp(newNamespace, newProjName) { 1458 sh """ 1459 # create modified versions of the hello helidon MC example 1460 export MC_HH_DEST_DIR=${GO_REPO_PATH}/verrazzano/examples/multicluster/${newNamespace} 1461 export MC_HH_SOURCE_DIR=${GO_REPO_PATH}/verrazzano/examples/multicluster/hello-helidon 1462 export MC_APP_NAMESPACE="${newNamespace}" 1463 export MC_PROJ_NAME="${newProjName}" 1464 ${GO_REPO_PATH}/verrazzano/ci/scripts/generate_mc_hello_deployment_files.sh 1465 """ 1466 } 1467 1468 def deploySampleApp() { 1469 modifyHelloHelidonApp("${SAMPLE_APP_NAMESPACE}", "${SAMPLE_APP_PROJECT}") 1470 sh """ 1471 export KUBECONFIG=$ADMIN_KUBECONFIG 1472 kubectl apply -f ${GO_REPO_PATH}/verrazzano/examples/multicluster/hello-helidon-sample/verrazzano-project.yaml 1473 kubectl apply -f ${GO_REPO_PATH}/verrazzano/examples/multicluster/hello-helidon-sample/hello-helidon-comp.yaml 1474 kubectl apply -f ${GO_REPO_PATH}/verrazzano/examples/multicluster/hello-helidon-sample/mc-hello-helidon-app.yaml 1475 """ 1476 } 1477 1478 def undeploySampleApp() { 1479 sh """ 1480 export KUBECONFIG=$ADMIN_KUBECONFIG 1481 kubectl delete -f ${GO_REPO_PATH}/verrazzano/examples/multicluster/hello-helidon-sample/mc-hello-helidon-app.yaml 1482 kubectl delete -f ${GO_REPO_PATH}/verrazzano/examples/multicluster/hello-helidon-sample/hello-helidon-comp.yaml 1483 kubectl delete -f ${GO_REPO_PATH}/verrazzano/examples/multicluster/hello-helidon-sample/verrazzano-project.yaml 1484 """ 1485 } 1486 1487 def runHelidonNsOpsTest() { 1488 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1489 modifyHelloHelidonApp("hello-helidon-ns", "hello-helidon-ns") 1490 sh """ 1491 export MANAGED_CLUSTER_NAME="managed1" 1492 export MANAGED_KUBECONFIG="${KUBECONFIG_DIR}/2/kube_config" 1493 export CLUSTER_COUNT=$clusterCount 1494 cd ${GO_REPO_PATH}/verrazzano/tests/e2e 1495 export DUMP_KUBECONFIG="${KUBECONFIG_DIR}/2/kube_config" 1496 export DUMP_DIRECTORY="${TEST_DUMP_ROOT}/examples-helidon-ns-ops" 1497 ginkgo -v --keep-going --no-color ${GINKGO_REPORT_ARGS} -tags="${params.TAGGED_TESTS}" --focus-file="${params.INCLUDED_TESTS}" --skip-file="${params.EXCLUDED_TESTS}" multicluster/examples/helidon-ns-ops/... 1498 """ 1499 } 1500 1501 def runMulticlusterVerifyApi() { 1502 int clusterCount = params.TOTAL_CLUSTERS.toInteger() 1503 for(int count=2; count<=clusterCount; count++) { 1504 sh """ 1505 export MANAGED_CLUSTER_NAME="managed${count-1}" 1506 export MANAGED_KUBECONFIG="${KUBECONFIG_DIR}/${count}/kube_config" 1507 cd ${GO_REPO_PATH}/verrazzano/tests/e2e 1508 ginkgo -v --keep-going --no-color ${GINKGO_REPORT_ARGS} -tags="${params.TAGGED_TESTS}" --focus-file="${params.INCLUDED_TESTS}" --skip-file="${params.EXCLUDED_TESTS}" multicluster/verify-api/... 1509 """ 1510 } 1511 } 1512 1513 def runConsoleTests() { 1514 // Set app information used by the application page UI tests to assert for app info 1515 // Console runs on admin cluster and the KUBECONFIG is pointed at it (which is cluster 1) 1516 // Make sure that application page tests are also run by setting RUN_APP_TESTS=true since we deployed 1517 // a sample app for that purpose 1518 catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { 1519 sh """ 1520 export DUMP_DIRECTORY="${TEST_DUMP_ROOT}/console" 1521 export CONSOLE_REPO_BRANCH="${params.CONSOLE_REPO_BRANCH}" 1522 export CONSOLE_APP_NAME="${SAMPLE_APP_NAME}" 1523 export CONSOLE_APP_NAMESPACE="${SAMPLE_APP_NAMESPACE}" 1524 export CONSOLE_APP_CLUSTER="managed1" 1525 export CONSOLE_APP_COMP="${SAMPLE_APP_COMPONENT}" 1526 KUBECONFIG=${ADMIN_KUBECONFIG} RUN_APP_TESTS=true ${GO_REPO_PATH}/verrazzano/ci/scripts/run_console_tests.sh 1527 """ 1528 } 1529 } 1530 1531 def verifyUpgrade() { 1532 return [ 1533 "Verify Register": { 1534 verifyRegisterManagedClusters() 1535 }, 1536 "verify-permissions": { 1537 verifyManagedClusterPermissions() 1538 }, 1539 "system component metrics": { 1540 runGinkgoRandomize('metrics/syscomponents') 1541 }, 1542 ] 1543 } 1544 1545 def verifyDNSUpdate() { 1546 return [ 1547 "verify-admin-cluster-dns-update": { 1548 runGinkgoAdmin('update/dnsac') 1549 }, 1550 "verify-managed-cluster-dns-update": { 1551 runGinkgoAdmin('update/dnsmc') 1552 }, 1553 ] 1554 } 1555 1556 // NOTE: The stages are executed in parallel, however the tests themselves are executed against 1557 // each cluster in sequence. If possible, we should parallelize that as well. 1558 def verifyInfra() { 1559 return [ 1560 "verify-scripts": { 1561 runGinkgoRandomize('scripts') 1562 }, 1563 "verify-infra restapi": { 1564 runGinkgoRandomize('verify-infra/restapi') 1565 }, 1566 "verify-infra oam": { 1567 runGinkgoRandomize('verify-infra/oam') 1568 }, 1569 "verify-infra vmi": { 1570 runGinkgoRandomize('verify-infra/vmi') 1571 }, 1572 "mc verify-jaeger-install": { 1573 runGinkgoRandomize('multicluster/verify-jaeger/install') 1574 }, 1575 "mc verify-jaeger-system": { 1576 runGinkgoRandomize('multicluster/verify-jaeger/system') 1577 }, 1578 ] 1579 } 1580 1581 def verifyExamples() { 1582 return [ 1583 "mc examples helidon": { 1584 runGinkgo("multicluster/examples/helidon", "${TEST_DUMP_ROOT}/helidon-workload") 1585 }, 1586 "mc examples helidon deprecated": { 1587 runGinkgo("multicluster/examples/helidon-deprecated", "${TEST_DUMP_ROOT}/helidon-deprecated") 1588 }, 1589 "Delete Deployed App NS": { 1590 runHelidonNsOpsTest() 1591 }, 1592 "mc weblogic workload": { 1593 runGinkgo("multicluster/workloads/mcweblogic", "${TEST_DUMP_ROOT}/weblogic-workload") 1594 }, 1595 "mc coherence workload": { 1596 runGinkgo("multicluster/workloads/mccoherence", "${TEST_DUMP_ROOT}/coherence-workload") 1597 }, 1598 "jaeger helidon": { 1599 runGinkgo("multicluster/verify-jaeger/helidon", "${TEST_DUMP_ROOT}/jaeger-helidon") 1600 }, 1601 ] 1602 } 1603 1604 def setVZCRDVersionForInstallation(){ 1605 if(params.CRD_API_VERSION == "v1alpha1"){ 1606 INSTALL_CONFIG_FILE_KIND = "${GO_REPO_PATH}/verrazzano/tests/e2e/config/scripts/v1alpha1/install-vz-prod-kind-multicluster.yaml" 1607 INSTALL_CONFIG_FILE_OCIDNS = "${GO_REPO_PATH}/verrazzano/tests/e2e/config/scripts/v1alpha1/install-verrazzano-ocidns.yaml" 1608 INSTALL_CONFIG_FILE_NIPIO = "${GO_REPO_PATH}/verrazzano/tests/e2e/config/scripts/v1alpha1/install-verrazzano-nipio.yaml" 1609 } 1610 }