github.com/zppinho/prow@v0.0.0-20240510014325-1738badeb017/test/integration/lib.sh (about)

     1  #!/usr/bin/env bash
     2  # Copyright 2022 The Kubernetes Authors.
     3  #
     4  # Licensed under the Apache License, Version 2.0 (the "License");
     5  # you may not use this file except in compliance with the License.
     6  # You may obtain a copy of the License at
     7  #
     8  #     http://www.apache.org/licenses/LICENSE-2.0
     9  #
    10  # Unless required by applicable law or agreed to in writing, software
    11  # distributed under the License is distributed on an "AS IS" BASIS,
    12  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  # See the License for the specific language governing permissions and
    14  # limitations under the License.
    15  
    16  # shellcheck disable=SC2034
    17  
    18  SCRIPT_ROOT="$(cd "$(dirname "$0")" && pwd)"
    19  REPO_ROOT="$(cd "${SCRIPT_ROOT}/../.." && pwd -P)"
    20  
    21  # Default variables. Note that these variables are not environment variables and
    22  # are local to this script and other scripts that source this script (that is,
    23  # even if you change them outside of this script, they are ignored as they are
    24  # redeclared here).
    25  #
    26  readonly _KIND_CLUSTER_NAME="kind-prow-integration"
    27  readonly _KIND_CONTEXT="kind-${_KIND_CLUSTER_NAME}"
    28  readonly LOCAL_DOCKER_REGISTRY_NAME="${_KIND_CLUSTER_NAME}-registry"
    29  readonly LOCAL_DOCKER_REGISTRY_PORT="5001"
    30  
    31  # These are the components to test (by default). These are the services that
    32  # must be deployed into the test cluster in order to test all integration tests.
    33  #
    34  # Note that some of these components use the same image. For example, deck and
    35  # deck-tenanted both use the "deck" image in PROW_IMAGES.
    36  declare -ra PROW_COMPONENTS=(
    37    crier
    38    deck
    39    deck-tenanted
    40    fakegcsserver
    41    fakegerritserver
    42    fakegitserver
    43    fakeghserver
    44    fakepubsub
    45    gerrit
    46    hook
    47    horologium
    48    moonraker
    49    prow-controller-manager
    50    sinker
    51    sub
    52    tide
    53    webhook-server
    54  )
    55  
    56  # These are the images to build. The keys are the short (unique) image names,
    57  # and the values are the paths from REPO_ROOT that define where the source code
    58  # is located.
    59  declare -rA PROW_IMAGES=(
    60    # Actual Prow components.
    61    [crier]=cmd/crier
    62    [deck]=cmd/deck
    63    [gangway]=cmd/gangway
    64    [gerrit]=cmd/gerrit
    65    [hook]=cmd/hook
    66    [horologium]=cmd/horologium
    67    [moonraker]=cmd/moonraker
    68    [prow-controller-manager]=cmd/prow-controller-manager
    69    [sinker]=cmd/sinker
    70    [sub]=cmd/sub
    71    [tide]=cmd/tide
    72    # Fakes.
    73    [fakegcsserver]=test/integration/cmd/fakegcsserver
    74    [fakegerritserver]=test/integration/cmd/fakegerritserver
    75    [fakegitserver]=test/integration/cmd/fakegitserver
    76    [fakeghserver]=test/integration/cmd/fakeghserver
    77    [fakepubsub]=test/integration/cmd/fakepubsub
    78    # Utility images. These images are not Prow components per se, and so do not
    79    # have corresponding Kubernetes configurations.
    80    [clonerefs]=cmd/clonerefs
    81    [initupload]=cmd/initupload
    82    [entrypoint]=cmd/entrypoint
    83    [sidecar]=cmd/sidecar
    84    [webhook-server]=cmd/webhook-server
    85  )
    86  
    87  # Defines the one-to-many relationship between Prow images and components. This
    88  # mapping tells us which Prow components need to be redeployed depending on what
    89  # images are rebuilt.
    90  declare -rA PROW_IMAGES_TO_COMPONENTS=(
    91    [crier]=crier
    92    [deck]="deck,deck-tenanted"
    93    [gangway]=gangway
    94    [gerrit]=gerrit
    95    [hook]=hook
    96    [horologium]=horologium
    97    [moonraker]=moonraker
    98    [prow-controller-manager]=prow-controller-manager
    99    [sinker]=sinker
   100    [sub]=sub
   101    [tide]=tide
   102    [fakegcsserver]=fakegcsserver
   103    [fakegerritserver]=fakegerritserver
   104    [fakegitserver]=fakegitserver
   105    [fakeghserver]=fakeghserver
   106    [fakepubsub]=fakepubsub
   107    [webhook-server]=webhook-server
   108  )
   109  
   110  # Defines the order in which we'll start and wait for components to be ready.
   111  # Each element is deployed in order. If we encounter a WAIT value, we wait until
   112  # the component is ready before proceeding with further deployments.
   113  declare -ra PROW_DEPLOYMENT_ORDER=(
   114    # Start up basic, dependency-free components (and non-components like secrets,
   115    # ingress, etc) first.
   116    50_crd.yaml
   117    WAIT_FOR_CRD_prowjobs.prow.k8s.io,default
   118  
   119    git-config-system.yaml
   120    WAIT_FOR_RESOURCE_configmaps,git-config-system,default
   121  
   122    100_starter.yaml
   123    WAIT_FOR_RESOURCE_namespaces,test-pods,default
   124    WAIT_FOR_RESOURCE_secrets,oauth-token,default
   125    WAIT_FOR_RESOURCE_secrets,kubeconfig,default
   126  
   127    101_secrets.yaml
   128    WAIT_FOR_RESOURCE_secrets,hmac-token,default
   129    WAIT_FOR_RESOURCE_secrets,http-cookiefile,default
   130    WAIT_FOR_RESOURCE_secrets,cookie,default
   131    WAIT_FOR_RESOURCE_secrets,github-oauth-config,default
   132  
   133    200_ingress.yaml
   134    WAIT_FOR_RESOURCE_ingresses,strip-path-prefix,default
   135    WAIT_FOR_RESOURCE_ingresses,no-strip-path-prefix,default
   136  
   137    # Create ghserver early, because other things depend on it. Otherwise we end
   138    # up logging a lot of errors about failing to connect to a fake service (e.g.,
   139    # fakeghserver) because it is not running yet. Connection failures slow down
   140    # the startup time a bit because they can lead to exponential backoffs until
   141    # the connections succeed.
   142    fakeghserver.yaml
   143    WAIT_fakeghserver
   144  
   145    # Start fakepubsub early, but don't wait for it just yet. This is because this
   146    # is a big image and if the local registry is empty (we're running integraion
   147    # tests on a cold machine), it takes a long time for the deployment to pull it
   148    # from the local registry.
   149    fakepubsub.yaml
   150    # Sub can't properly start its PullServer unless the subscriptions have
   151    # already been created. So wait for fakepubsub to be initialized with those
   152    # subscriptions first.
   153    WAIT_fakepubsub
   154  
   155    fakegcsserver.yaml
   156    WAIT_fakegcsserver
   157  
   158    fakegerritserver.yaml
   159    WAIT_fakegerritserver
   160  
   161    fakegitserver.yaml
   162    WAIT_fakegitserver
   163  
   164    gerrit.yaml
   165    WAIT_FOR_RESOURCE_roles,gerrit,default
   166    WAIT_FOR_RESOURCE_rolebindings,gerrit,default
   167    WAIT_FOR_RESOURCE_serviceaccounts,gerrit,default
   168    WAIT_gerrit
   169  
   170    horologium_rbac.yaml
   171    horologium_service.yaml
   172    horologium_deployment.yaml
   173    WAIT_FOR_RESOURCE_roles,horologium,default
   174    WAIT_FOR_RESOURCE_rolebindings,horologium,default
   175    WAIT_FOR_RESOURCE_serviceaccounts,horologium,default
   176    WAIT_horologium
   177  
   178    prow_controller_manager_rbac.yaml
   179    prow_controller_manager_service.yaml
   180    prow_controller_manager_deployment.yaml
   181    WAIT_FOR_RESOURCE_roles,prow-controller-manager,default
   182    WAIT_FOR_RESOURCE_roles,prow-controller-manager,test-pods
   183    WAIT_FOR_RESOURCE_rolebindings,prow-controller-manager,default
   184    WAIT_FOR_RESOURCE_rolebindings,prow-controller-manager,test-pods
   185    WAIT_FOR_RESOURCE_serviceaccounts,prow-controller-manager,default
   186    WAIT_prow-controller-manager
   187  
   188    sinker_rbac.yaml
   189    sinker_service.yaml
   190    sinker.yaml
   191    WAIT_FOR_RESOURCE_roles,sinker,default
   192    WAIT_FOR_RESOURCE_roles,sinker,test-pods
   193    WAIT_FOR_RESOURCE_rolebindings,sinker,default
   194    WAIT_FOR_RESOURCE_rolebindings,sinker,test-pods
   195    WAIT_FOR_RESOURCE_serviceaccounts,sinker,default
   196    WAIT_sinker
   197  
   198    # Deploy hook and tide early because crier, deck, etc. depend on them.
   199    hook_rbac.yaml
   200    hook_service.yaml
   201    hook_deployment.yaml
   202    WAIT_FOR_RESOURCE_roles,hook,default
   203    WAIT_FOR_RESOURCE_rolebindings,hook,default
   204    WAIT_FOR_RESOURCE_serviceaccounts,hook,default
   205    WAIT_hook
   206  
   207    tide_rbac.yaml
   208    tide_service.yaml
   209    tide_deployment.yaml
   210    WAIT_FOR_RESOURCE_roles,tide,default
   211    WAIT_FOR_RESOURCE_rolebindings,tide,default
   212    WAIT_FOR_RESOURCE_serviceaccounts,tide,default
   213    WAIT_tide
   214  
   215    crier_rbac.yaml
   216    crier_service.yaml
   217    crier_deployment.yaml
   218    WAIT_FOR_RESOURCE_roles,crier,default
   219    WAIT_FOR_RESOURCE_roles,crier,test-pods
   220    WAIT_FOR_RESOURCE_rolebindings,crier-namespaced,default
   221    WAIT_FOR_RESOURCE_rolebindings,crier-namespaced,test-pods
   222    WAIT_FOR_RESOURCE_serviceaccounts,crier,default
   223    WAIT_crier
   224  
   225    deck_rbac.yaml
   226    deck_service.yaml
   227    deck_deployment.yaml
   228    deck_tenant_deployment.yaml
   229    WAIT_FOR_RESOURCE_roles,deck,default
   230    WAIT_FOR_RESOURCE_roles,deck,test-pods
   231    WAIT_FOR_RESOURCE_rolebindings,deck,default
   232    WAIT_FOR_RESOURCE_rolebindings,deck,test-pods
   233    WAIT_FOR_RESOURCE_serviceaccounts,deck,default
   234    WAIT_deck
   235    WAIT_deck-tenanted
   236  
   237    webhook_server_rbac.yaml
   238    webhook_server_service.yaml
   239    webhook_server_deployment.yaml
   240    WAIT_FOR_RESOURCE_clusterroles,webhook-server,default
   241    WAIT_FOR_RESOURCE_clusterrolebindings,webhook-server,default
   242    WAIT_FOR_RESOURCE_serviceaccounts,webhook-server,default
   243    WAIT_webhook-server
   244  
   245    moonraker_rbac.yaml
   246    moonraker_service.yaml
   247    moonraker_deployment.yaml
   248    WAIT_FOR_RESOURCE_serviceaccounts,moonraker,default
   249    WAIT_moonraker
   250  
   251    gangway_rbac.yaml
   252    gangway_service.yaml
   253    gangway_deployment.yaml
   254    WAIT_FOR_RESOURCE_roles,gangway,default
   255    WAIT_FOR_RESOURCE_rolebindings,gangway,default
   256    WAIT_FOR_RESOURCE_serviceaccounts,gangway,default
   257    WAIT_gangway
   258  
   259    sub.yaml
   260    WAIT_FOR_RESOURCE_roles,sub,default
   261    WAIT_FOR_RESOURCE_rolebindings,sub,default
   262    WAIT_FOR_RESOURCE_serviceaccounts,sub,default
   263    WAIT_sub
   264  )
   265  
   266  function do_kubectl() {
   267    kubectl --context="${_KIND_CONTEXT}" "$@"
   268  }
   269  
   270  function log() {
   271    >&2 cat <<EOF
   272  
   273  ==> $@
   274  
   275  EOF
   276  }
   277  
   278  function wait_for_readiness() {
   279    local component
   280  
   281    component="${1}"
   282  
   283    echo >&2 "Waiting for ${component}"
   284    for _ in $(seq 1 180); do
   285      if  >/dev/null 2>&1 do_kubectl wait pod \
   286        --for=condition=ready \
   287        --selector=app="${component}" \
   288        --namespace=default \
   289        --timeout=5s; then
   290        return
   291      else
   292        echo >&2 "waiting..."
   293        sleep 1
   294      fi
   295    done
   296  
   297    echo >&2 "${component} failed to get ready"
   298    return 1
   299  }
   300  
   301  function wait_for_resource() {
   302    local arg
   303    local resource
   304    local name
   305    local namespace
   306  
   307    arg="${1:-}"
   308  
   309    declare -a args
   310    # shellcheck disable=SC2206
   311    args=(${arg//,/ })
   312    if ((${#args[@]} != 3)); then
   313      echo >&2 "wanted a CSV of exactly 3 values, with the syntax '<RESOURCE>,<NAME>,<NAMESPACE>'; got '${arg}'"
   314    fi
   315  
   316    resource="${args[0]}"
   317    name="${args[1]}"
   318    namespace="${args[2]}"
   319  
   320    echo >&2 "waiting for ${resource}/${name} in namespace '${namespace}'..."
   321  
   322    # Check to see that the named resource exists in the given namespace. Time out
   323    # after ~10 seconds.
   324    for _ in $(seq 1 10); do
   325      if >/dev/null do_kubectl get -n "${namespace}" "${resource}" "${name}"; then
   326        return 0
   327      fi
   328      sleep 1
   329      echo >&2 "waiting for ${resource}/${name} in namespace '${namespace}'..."
   330    done
   331  
   332    return 1
   333  }
   334  
   335  function wait_for_crd() {
   336    local arg
   337    local name
   338    local namespace
   339  
   340    arg="${1:-}"
   341  
   342    declare -a args
   343    # shellcheck disable=SC2206
   344    args=(${arg//,/ })
   345    if ((${#args[@]} != 2)); then
   346      echo >&2 "wanted a CSV of exactly 2 values, with the syntax '<NAME>,<NAMESPACE>'; got '${arg}'"
   347    fi
   348  
   349    name="${args[0]}"
   350    namespace="${args[1]}"
   351  
   352    echo >&2 "waiting for CRD ${name} in namespace '${namespace}'..."
   353  
   354    for _ in $(seq 1 10); do
   355      if >/dev/null do_kubectl wait -n "${namespace}" --for condition=established --timeout=120s crd "${name}"; then
   356        return 0
   357      fi
   358      sleep 1
   359      echo >&2 "waiting for CRD ${name} in namespace '${namespace}'..."
   360    done
   361  
   362    return 1
   363  }
   364  
   365  function get_random_node_port() {
   366    # 30000-32767 is the default NodePort range. If "shuf" isn't available, use
   367    # 30303 as a default.
   368    shuf -i 30000-32767 -n 1 || echo 30303
   369  }