gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/.buildkite/hooks/pre-command (about)

     1  set -euo pipefail
     2  
     3  # Use a per-day bazel remote cache.
     4  export BAZEL_REMOTE_CACHE="--remote_cache=https://storage.googleapis.com/gvisor-buildkite-bazel-cache/$(date +%Y-%m-%d) --google_default_credentials"
     5  
     6  if POSIXLY_CORRECT=true df --local --output=pcent,ipcent,target | grep -vE '/snap/' | grep -qE '9[4-9]%|100%'; then
     7    echo "Disk usage has reached critical level, node is bad." >&2
     8    echo "Automated monitoring should recycle this node soon." >&2
     9    echo "If this made your build pipeline fail, sorry!" >&2
    10    echo "Try your luck again." >&2
    11    sudo df -h --local | sed -r 's/^/[df] /' >&2
    12    sleep 10
    13    killall buildkite-agent
    14    exit 1
    15  fi
    16  
    17  if [[ "${BUILDKITE_PIPELINE_NAME:-}" == "Pipeline" ]]; then
    18    # Check that the target branch exists.
    19    git fetch origin "${BUILDKITE_BRANCH}" || {
    20      echo "Branch ${BUILDKITE_BRANCH} no longer exists; it was probably deleted by the time we got to it." >&2
    21      exit 1
    22    }
    23  fi
    24  
    25  # Download any build-specific configuration that has been uploaded.
    26  # This allows top-level steps to override all subsequent steps.
    27  buildkite-agent artifact download 'tools/bazeldefs/*' . || true
    28  
    29  # Install packages we need. Docker must be installed and configured,
    30  # as should Go itself. We just install some extra bits and pieces.
    31  function install_pkgs() {
    32    export DEBIAN_FRONTEND=noninteractive
    33    while true; do
    34      if sudo -E apt-get update -q && sudo -E apt-get install -qy "$@"; then
    35        break
    36      fi
    37    done
    38  }
    39  install_pkgs make linux-libc-dev graphviz jq curl binutils gnupg gnupg-agent \
    40    gcc pkg-config apt-transport-https ca-certificates \
    41    software-properties-common rsync kmod systemd unzip
    42  
    43  # Install headers, only if available.
    44  if test -n "$(apt-cache search --names-only "^linux-headers-$(uname -r)$")"; then
    45    install_pkgs "linux-headers-$(uname -r)"
    46  elif test -n "$(apt-cache search --names-only "^linux-gcp-headers-$(uname -r | cut -d- -f1-2)$")"; then
    47    install_pkgs "linux-gcp-headers-$(uname -r | cut -d- -f1-2)"
    48  fi
    49  
    50  set -x
    51  
    52  # Setup for parallelization with PARTITION and TOTAL_PARTITIONS.
    53  export PARTITION=${BUILDKITE_PARALLEL_JOB:-0}
    54  PARTITION=$((${PARTITION}+1)) # 1-indexed, but PARALLEL_JOB is 0-indexed.
    55  export TOTAL_PARTITIONS=${BUILDKITE_PARALLEL_JOB_COUNT:-1}
    56  
    57  if [[ "${BUILDKITE_BRANCH}" =~ ^test/ ]]; then
    58    # STABLE_VERSION depends on the most recent tag, so let's set the same tag
    59    # for all tests changes to better utilize the bazel cache. We have to be sure
    60    # that binaries depends only on code changes but not metadata such as commit
    61    # id.
    62  
    63    # LINT.IfChange
    64    git tag -f buildkite-test-branch
    65    # LINT.ThenChange(../../tools/make_release.sh)
    66  fi
    67  
    68  # LINT.IfChange
    69  export RUNTIME="buildkite_runtime"
    70  # LINT.ThenChange(post-command)
    71  if [ -d "/tmp/${RUNTIME}/" ]; then
    72    sudo rm -rf "/tmp/${RUNTIME}/"
    73  fi
    74  
    75  # If running in a container, set the reload command appropriately.
    76  if [[ -x /tmp/buildkite-reload-host-docker/reload ]]; then
    77    export DOCKER_RELOAD_COMMAND='/tmp/buildkite-reload-host-docker/reload'
    78  else
    79    export DOCKER_RELOAD_COMMAND='sudo systemctl reload docker'
    80  fi
    81  
    82  if [[ "${BUILDKITE_PIPELINE_INSTALL_RUNTIME:-}" == "true" ]]; then
    83    # Ensure Docker has experimental enabled, install runtimes.
    84    echo 'Current Docker daemon configuration:' >&2
    85    cat /etc/docker/daemon.json >&2
    86    HAD_EXPERIMENTAL="$(cat /etc/docker/daemon.json | jq '.experimental')"
    87    if [[ -n "${STAGED_BINARIES:-}" ]]; then
    88      # Used `runsc` from STAGED_BINARIES instead of building it from scratch.
    89      export BUILDKITE_STAGED_BINARIES_DIRECTORY="$(mktemp -d)"
    90      gsutil cat "$STAGED_BINARIES" \
    91        | tar -C "$BUILDKITE_STAGED_BINARIES_DIRECTORY" -zxvf - runsc
    92      chmod +x "$BUILDKITE_STAGED_BINARIES_DIRECTORY/runsc"
    93      sudo "$BUILDKITE_STAGED_BINARIES_DIRECTORY/runsc" install \
    94        --experimental=true --runtime="${RUNTIME}" \
    95        -- "${RUNTIME_ARGS:-}"
    96    else
    97      make sudo TARGETS=//runsc:runsc \
    98        ARGS="install --experimental=true --runtime=${RUNTIME} -- ${RUNTIME_ARGS:-}"
    99    fi
   100    if [[ "$HAD_EXPERIMENTAL" != true ]]; then
   101      # WARNING: We may be running in a container when this command executes.
   102      # This only makes sense if Docker's `live-restore` feature is enabled.
   103      echo 'Restarting Docker daemon with this new configuration:' >&2
   104      cat /etc/docker/daemon.json >&2
   105      sudo systemctl restart docker
   106    else
   107      # If experimental-ness was already enabled, we don't need to restart, as the
   108      # only thing we modified is the list of runtimes, which can be reloaded with
   109      # just a SIGHUP.
   110      echo 'Reloading Docker daemon with this new configuration:' >&2
   111      cat /etc/docker/daemon.json >&2
   112      bash -xc "$DOCKER_RELOAD_COMMAND"
   113    fi
   114  fi
   115  
   116  # Helper for benchmarks, based on the branch.
   117  if test "${BUILDKITE_BRANCH}" = "master"; then
   118    export BENCHMARKS_OFFICIAL=true
   119  else
   120    export BENCHMARKS_OFFICIAL=false
   121  fi
   122  
   123  # Clear existing profiles.
   124  sudo rm -rf /tmp/profile
   125  
   126  # Allow to read dmesg for all users. It is required for the syslog test.
   127  sudo sysctl -w kernel.dmesg_restrict=0
   128  
   129  # Download credentials, if a release agent.
   130  if test "${BUILDKITE_AGENT_META_DATA_QUEUE}" = "release"; then
   131    # Pull down secrets.
   132    gcloud secrets versions access --secret="repo-key" --format='get(payload.data)' latest | tr '_-' '/+' | base64 -d  > repo.key
   133  
   134    # Configure the Docker credential helper (to push images).
   135    gcloud auth configure-docker -q
   136    gcloud auth configure-docker -q us-central1-docker.pkg.dev
   137  fi
   138  
   139  sudo sysctl -w kernel.print-fatal-signals=1
   140  
   141  pipeline_add_env() {
   142    sed -i "/^env:/a\  $1: '$2'" .buildkite/pipeline.yaml
   143  }
   144  
   145  # Try to skip unaffected tests on pre-submits.
   146  # BUILDKITE_PIPELINE_ID is set only on the pipeline upload step.
   147  if [[ -n "${BUILDKITE_PIPELINE_ID}" && "${BUILDKITE_BRANCH}" =~ ^test/ ]]; then
   148    # Skip load-all-test-images tests if the change doesn't affect images.
   149    if ./tools/builkite-check-paths.sh images/ tools/images.mk Makefile; then
   150      echo "Skip load-all-test-images tests"
   151      pipeline_add_env SKIP_LOADING_IMAGES 1
   152    fi
   153  fi