github.com/openflowlabs/storage@v1.12.13/hack/get_ci_vm.sh (about) 1 #!/bin/bash 2 3 set -e 4 5 RED="\e[1;36;41m" 6 YEL="\e[1;33;44m" 7 NOR="\e[0m" 8 USAGE_WARNING=" 9 ${YEL}WARNING: This will not work without local sudo access to run podman,${NOR} 10 ${YEL}and prior authorization to use the storage GCP project. Also,${NOR} 11 ${YEL}possession of the proper ssh private key is required.${NOR} 12 " 13 # TODO: Many/most of these values should come from .cirrus.yml 14 ZONE="us-central1-b" 15 CPUS="2" 16 MEMORY="4Gb" 17 DISK="200" 18 PROJECT="storage-240716" 19 GOSRC="/var/tmp/go/src/github.com/containers/storage" 20 GCLOUD_IMAGE=${GCLOUD_IMAGE:-quay.io/cevich/gcloud_centos:latest} 21 GCLOUD_SUDO=${GCLOUD_SUDO-sudo} 22 SSHUSER="root" 23 24 # Shared tmp directory between container and us 25 TMPDIR=$(mktemp -d --tmpdir $(basename $0)_tmpdir_XXXXXX) 26 27 STORAGEROOT=$(realpath "$(dirname $0)/../") 28 # else: Assume $PWD is the root of the storage repository 29 [[ "$STORAGEROOT" != "/" ]] || STORAGEROOT=$PWD 30 31 # Command shortcuts save some typing (asumes $STORAGEROOT is subdir of $HOME) 32 PGCLOUD="$GCLOUD_SUDO podman run -it --rm -e AS_ID=$UID -e AS_USER=$USER --security-opt label=disable -v $TMPDIR:$HOME -v $HOME/.config/gcloud:$HOME/.config/gcloud -v $HOME/.config/gcloud/ssh:$HOME/.ssh -v $STORAGEROOT:$STORAGEROOT $GCLOUD_IMAGE --configuration=storage --project=$PROJECT" 33 SCP_CMD="$PGCLOUD compute scp" 34 35 36 showrun() { 37 if [[ "$1" == "--background" ]] 38 then 39 shift 40 # Properly escape any nested spaces, so command can be copy-pasted 41 echo '+ '$(printf " %q" "$@")' &' > /dev/stderr 42 "$@" & 43 echo -e "${RED}<backgrounded>${NOR}" 44 else 45 echo '+ '$(printf " %q" "$@") > /dev/stderr 46 "$@" 47 fi 48 } 49 50 cleanup() { 51 RET=$? 52 set +e 53 wait 54 55 # set GCLOUD_DEBUG to leave tmpdir behind for postmortem 56 test -z "$GCLOUD_DEBUG" && rm -rf $TMPDIR 57 58 # Not always called from an exit handler, but should always exit when called 59 exit $RET 60 } 61 trap cleanup EXIT 62 63 delvm() { 64 echo -e "\n" 65 echo -e "\n${YEL}Offering to Delete $VMNAME ${RED}(Might take a minute or two)${NOR}" 66 echo -e "\n${YEL}Note: It's safe to answer N, then re-run script again later.${NOR}" 67 showrun $CLEANUP_CMD # prompts for Yes/No 68 cleanup 69 } 70 71 image_hints() { 72 egrep '[[:space:]]+[[:alnum:]].+_CACHE_IMAGE_NAME:[[:space:]+"[[:print:]]+"' \ 73 "$STORAGEROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]' | \ 74 grep -v 'notready' | sort -u 75 } 76 77 show_usage() { 78 echo -e "\n${RED}ERROR: $1${NOR}" 79 echo -e "${YEL}Usage: $(basename $0) <image_name>${NOR}" 80 echo "" 81 if [[ -r ".cirrus.yml" ]] 82 then 83 echo -e "${YEL}Some possible image_name values (from .cirrus.yml):${NOR}" 84 image_hints 85 echo "" 86 fi 87 exit 1 88 } 89 90 get_env_vars() { 91 python -c ' 92 import yaml 93 env=yaml.load(open(".cirrus.yml"), Loader=yaml.SafeLoader)["env"] 94 keys=[k for k in env if "ENCRYPTED" not in str(env[k])] 95 for k,v in env.items(): 96 v=str(v) 97 if "ENCRYPTED" not in v: 98 print "{0}=\"{1}\"".format(k, v), 99 ' 100 } 101 102 parse_args(){ 103 echo -e "$USAGE_WARNING" 104 105 if [[ "$USER" =~ "root" ]] 106 then 107 show_usage "This script must be run as a regular user." 108 fi 109 110 ENVS="$(get_env_vars)" 111 IMAGE_NAME="$1" 112 if [[ -z "$IMAGE_NAME" ]] 113 then 114 show_usage "No image-name specified." 115 fi 116 117 ENVS="$ENVS SPECIALMODE=\"$SPECIALMODE\"" 118 SETUP_CMD="env $ENVS $GOSRC/contrib/cirrus/setup.sh" 119 VMNAME="${VMNAME:-${USER}-${IMAGE_NAME}}" 120 CREATE_CMD="$PGCLOUD compute instances create --zone=$ZONE --image-project=libpod-218412 --image=${IMAGE_NAME} --custom-cpu=$CPUS --custom-memory=$MEMORY --boot-disk-size=$DISK --labels=in-use-by=$USER $VMNAME" 121 SSH_CMD="$PGCLOUD compute ssh $SSHUSER@$VMNAME" 122 CLEANUP_CMD="$PGCLOUD compute instances delete --zone $ZONE --delete-disks=all $VMNAME" 123 } 124 125 ##### main 126 127 [[ "${STORAGEROOT%%${STORAGEROOT##$HOME}}" == "$HOME" ]] || \ 128 show_usage "Repo clone must be sub-dir of $HOME" 129 130 cd "$STORAGEROOT" 131 132 parse_args "$@" 133 134 # Ensure mount-points and data directories exist on host as $USER. Also prevents 135 # permission-denied errors during cleanup() b/c `sudo podman` created mount-points 136 # owned by root. 137 mkdir -p $TMPDIR/${STORAGEROOT##$HOME} 138 mkdir -p $TMPDIR/.ssh 139 mkdir -p {$HOME,$TMPDIR}/.config/gcloud/ssh 140 chmod 700 {$HOME,$TMPDIR}/.config/gcloud/ssh $TMPDIR/.ssh 141 142 cd $STORAGEROOT 143 144 # Attempt to determine if named 'storage' gcloud configuration exists 145 showrun $PGCLOUD info > $TMPDIR/gcloud-info 146 if egrep -q "Account:.*None" $TMPDIR/gcloud-info 147 then 148 echo -e "\n${YEL}WARNING: Can't find gcloud configuration for 'storage', running init.${NOR}" 149 echo -e " ${RED}Please choose '#1: Re-initialize' and 'login' if asked.${NOR}" 150 echo -e " ${RED}Please set Compute Region and Zone (if asked) to 'us-central1-b'.${NOR}" 151 echo -e " ${RED}DO NOT set any password for the generated ssh key.${NOR}" 152 showrun $PGCLOUD init --project=$PROJECT --console-only --skip-diagnostics 153 154 # Verify it worked (account name == someone@example.com) 155 $PGCLOUD info > $TMPDIR/gcloud-info-after-init 156 if egrep -q "Account:.*None" $TMPDIR/gcloud-info-after-init 157 then 158 echo -e "${RED}ERROR: Could not initialize 'storage' configuration in gcloud.${NOR}" 159 exit 5 160 fi 161 162 # If this is the only config, make it the default to avoid persistent warnings from gcloud 163 [[ -r "$HOME/.config/gcloud/configurations/config_default" ]] || \ 164 ln "$HOME/.config/gcloud/configurations/config_storage" \ 165 "$HOME/.config/gcloud/configurations/config_default" 166 fi 167 168 # Couldn't make rsync work with gcloud's ssh wrapper: ssh-keys generated on the fly 169 TARBALL=$VMNAME.tar.bz2 170 echo -e "\n${YEL}Packing up local repository into a tarball.${NOR}" 171 showrun --background tar cjf $TMPDIR/$TARBALL --warning=no-file-changed --exclude-vcs-ignores -C $STORAGEROOT . 172 173 trap delvm INT # Allow deleting VM if CTRL-C during create 174 # This fails if VM already exists: permit this usage to re-init 175 echo -e "\n${YEL}Trying to create a VM named $VMNAME\n${RED}(might take a minute/two. Errors ignored).${NOR}" 176 showrun $CREATE_CMD || true # allow re-running commands below when "delete: N" 177 178 # Any subsequent failure should prompt for VM deletion 179 trap delvm EXIT 180 181 echo -e "\n${YEL}Retrying for 30s for ssh port to open (may give some errors)${NOR}" 182 trap 'COUNT=9999' INT 183 ATTEMPTS=10 184 for (( COUNT=1 ; COUNT <= $ATTEMPTS ; COUNT++ )) 185 do 186 if $SSH_CMD --command "true"; then break; else sleep 3s; fi 187 done 188 if (( COUNT > $ATTEMPTS )) 189 then 190 echo -e "\n${RED}Failed${NOR}" 191 exit 7 192 fi 193 echo -e "${YEL}Got it${NOR}" 194 195 echo -e "\n${YEL}Removing and re-creating $GOSRC on $VMNAME.${NOR}" 196 showrun $SSH_CMD --command "rm -rf $GOSRC" 197 showrun $SSH_CMD --command "mkdir -p $GOSRC" 198 199 echo -e "\n${YEL}Transfering tarball to $VMNAME.${NOR}" 200 wait 201 showrun $SCP_CMD $HOME/$TARBALL $SSHUSER@$VMNAME:/tmp/$TARBALL 202 203 echo -e "\n${YEL}Unpacking tarball into $GOSRC on $VMNAME.${NOR}" 204 showrun $SSH_CMD --command "tar xjf /tmp/$TARBALL -C $GOSRC" 205 206 echo -e "\n${YEL}Removing tarball on $VMNAME.${NOR}" 207 showrun $SSH_CMD --command "rm -f /tmp/$TARBALL" 208 209 echo -e "\n${YEL}Executing environment setup${NOR}" 210 showrun $SSH_CMD --command "$SETUP_CMD" 211 212 VMIP=$($PGCLOUD compute instances describe $VMNAME --format='get(networkInterfaces[0].accessConfigs[0].natIP)') 213 214 echo -e "\n${YEL}Connecting to $VMNAME${NOR}\nPublic IP Address: $VMIP\n${RED}(option to delete VM upon logout).${NOR}\n" 215 showrun $SSH_CMD -- -t "cd $GOSRC && exec env $ENVS bash -il"