github.com/sijibomii/docker@v0.0.0-20231230191044-5cf6ca554647/hack/release.sh (about)

     1  #!/usr/bin/env bash
     2  set -e
     3  
     4  # This script looks for bundles built by make.sh, and releases them on a
     5  # public S3 bucket.
     6  #
     7  # Bundles should be available for the VERSION string passed as argument.
     8  #
     9  # The correct way to call this script is inside a container built by the
    10  # official Dockerfile at the root of the Docker source code. The Dockerfile,
    11  # make.sh and release.sh should all be from the same source code revision.
    12  
    13  set -o pipefail
    14  
    15  # Print a usage message and exit.
    16  usage() {
    17  	cat >&2 <<'EOF'
    18  To run, I need:
    19  - to be in a container generated by the Dockerfile at the top of the Docker
    20    repository;
    21  - to be provided with the location of an S3 bucket and path, in
    22    environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
    23  - to be provided with AWS credentials for this S3 bucket, in environment
    24    variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY;
    25  - a generous amount of good will and nice manners.
    26  The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
    27  
    28  docker run -e AWS_S3_BUCKET=test.docker.com \
    29             -e AWS_ACCESS_KEY_ID     \
    30             -e AWS_SECRET_ACCESS_KEY \
    31             -e AWS_DEFAULT_REGION    \
    32             -it --privileged         \
    33             docker ./hack/release.sh
    34  EOF
    35  	exit 1
    36  }
    37  
    38  [ "$AWS_S3_BUCKET" ] || usage
    39  [ "$AWS_ACCESS_KEY_ID" ] || usage
    40  [ "$AWS_SECRET_ACCESS_KEY" ] || usage
    41  [ -d /go/src/github.com/docker/docker ] || usage
    42  cd /go/src/github.com/docker/docker
    43  [ -x hack/make.sh ] || usage
    44  
    45  export AWS_DEFAULT_REGION
    46  : ${AWS_DEFAULT_REGION:=us-west-1}
    47  
    48  RELEASE_BUNDLES=(
    49  	binary
    50  	cross
    51  	tgz
    52  )
    53  
    54  if [ "$1" != '--release-regardless-of-test-failure' ]; then
    55  	RELEASE_BUNDLES=(
    56  		test-unit
    57  		"${RELEASE_BUNDLES[@]}"
    58  		test-integration-cli
    59  	)
    60  fi
    61  
    62  VERSION=$(< VERSION)
    63  BUCKET=$AWS_S3_BUCKET
    64  BUCKET_PATH=$BUCKET
    65  [[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
    66  
    67  if command -v git &> /dev/null && git rev-parse &> /dev/null; then
    68  	if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
    69  		echo "You cannot run the release script on a repo with uncommitted changes"
    70  		usage
    71  	fi
    72  fi
    73  
    74  # These are the 2 keys we've used to sign the deb's
    75  #   release (get.docker.com)
    76  #	GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
    77  #   test    (test.docker.com)
    78  #	GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
    79  
    80  setup_s3() {
    81  	echo "Setting up S3"
    82  	# Try creating the bucket. Ignore errors (it might already exist).
    83  	aws s3 mb "s3://$BUCKET" 2>/dev/null || true
    84  	# Check access to the bucket.
    85  	aws s3 ls "s3://$BUCKET" >/dev/null
    86  	# Make the bucket accessible through website endpoints.
    87  	aws s3 website --index-document index --error-document error "s3://$BUCKET"
    88  }
    89  
    90  # write_to_s3 uploads the contents of standard input to the specified S3 url.
    91  write_to_s3() {
    92  	DEST=$1
    93  	F=`mktemp`
    94  	cat > "$F"
    95  	aws s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST"
    96  	rm -f "$F"
    97  }
    98  
    99  s3_url() {
   100  	case "$BUCKET" in
   101  		get.docker.com|test.docker.com|experimental.docker.com)
   102  			echo "https://$BUCKET_PATH"
   103  			;;
   104  		*)
   105  			BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com"
   106  			if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
   107  				echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
   108  			else
   109  				echo "$BASE_URL"
   110  			fi
   111  			;;
   112  	esac
   113  }
   114  
   115  build_all() {
   116  	echo "Building release"
   117  	if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
   118  		echo >&2
   119  		echo >&2 'The build or tests appear to have failed.'
   120  		echo >&2
   121  		echo >&2 'You, as the release  maintainer, now have a couple options:'
   122  		echo >&2 '- delay release and fix issues'
   123  		echo >&2 '- delay release and fix issues'
   124  		echo >&2 '- did we mention how important this is?  issues need fixing :)'
   125  		echo >&2
   126  		echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
   127  		echo >&2 ' really knows all the hairy problems at hand with the current release'
   128  		echo >&2 ' issues) may bypass this checking by running this script again with the'
   129  		echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
   130  		echo >&2 ' running the test suite, and will only build the binaries and packages.  Please'
   131  		echo >&2 ' avoid using this if at all possible.'
   132  		echo >&2
   133  		echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
   134  		echo >&2 ' should be used.  If there are release issues, we should always err on the'
   135  		echo >&2 ' side of caution.'
   136  		echo >&2
   137  		exit 1
   138  	fi
   139  }
   140  
   141  upload_release_build() {
   142  	src="$1"
   143  	dst="$2"
   144  	latest="$3"
   145  
   146  	echo
   147  	echo "Uploading $src"
   148  	echo "  to $dst"
   149  	echo
   150  	aws s3 cp --follow-symlinks --acl public-read "$src" "$dst"
   151  	if [ "$latest" ]; then
   152  		echo
   153  		echo "Copying to $latest"
   154  		echo
   155  		aws s3 cp --acl public-read "$dst" "$latest"
   156  	fi
   157  
   158  	# get hash files too (see hash_files() in hack/make.sh)
   159  	for hashAlgo in md5 sha256; do
   160  		if [ -e "$src.$hashAlgo" ]; then
   161  			echo
   162  			echo "Uploading $src.$hashAlgo"
   163  			echo "  to $dst.$hashAlgo"
   164  			echo
   165  			aws s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo"
   166  			if [ "$latest" ]; then
   167  				echo
   168  				echo "Copying to $latest.$hashAlgo"
   169  				echo
   170  				aws s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo"
   171  			fi
   172  		fi
   173  	done
   174  }
   175  
   176  release_build() {
   177  	echo "Releasing binaries"
   178  	GOOS=$1
   179  	GOARCH=$2
   180  
   181  	binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
   182  	tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
   183  	binary=docker-$VERSION
   184  	zipExt=".tgz"
   185  	binaryExt=""
   186  	tgz=$binary$zipExt
   187  
   188  	latestBase=
   189  	if [ -z "$NOLATEST" ]; then
   190  		latestBase=docker-latest
   191  	fi
   192  
   193  	# we need to map our GOOS and GOARCH to uname values
   194  	# see https://en.wikipedia.org/wiki/Uname
   195  	# ie, GOOS=linux -> "uname -s"=Linux
   196  
   197  	s3Os=$GOOS
   198  	case "$s3Os" in
   199  		darwin)
   200  			s3Os=Darwin
   201  			;;
   202  		freebsd)
   203  			s3Os=FreeBSD
   204  			;;
   205  		linux)
   206  			s3Os=Linux
   207  			;;
   208  		windows)
   209  			# this is windows use the .zip and .exe extentions for the files.
   210  			s3Os=Windows
   211  			zipExt=".zip"
   212  			binaryExt=".exe"
   213  			tgz=$binary$zipExt
   214  			binary+=$binaryExt
   215  			;;
   216  		*)
   217  			echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
   218  			exit 1
   219  			;;
   220  	esac
   221  
   222  	s3Arch=$GOARCH
   223  	case "$s3Arch" in
   224  		amd64)
   225  			s3Arch=x86_64
   226  			;;
   227  		386)
   228  			s3Arch=i386
   229  			;;
   230  		arm)
   231  			s3Arch=armel
   232  			# someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
   233  			;;
   234  		*)
   235  			echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
   236  			exit 1
   237  			;;
   238  	esac
   239  
   240  	s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
   241  	# latest=
   242  	latestTgz=
   243  	if [ "$latestBase" ]; then
   244  		# commented out since we aren't uploading binaries right now.
   245  		# latest="$s3Dir/$latestBase$binaryExt"
   246  		# we don't include the $binaryExt because we don't want docker.exe.zip
   247  		latestTgz="$s3Dir/$latestBase$zipExt"
   248  	fi
   249  
   250  	if [ ! -f "$tgzDir/$tgz" ]; then
   251  		echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
   252  		exit 1
   253  	fi
   254  	# disable binary uploads for now. Only providing tgz downloads
   255  	# upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
   256  	upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
   257  }
   258  
   259  # Upload binaries and tgz files to S3
   260  release_binaries() {
   261  	[ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || {
   262  		echo >&2 './hack/make.sh must be run before release_binaries'
   263  		exit 1
   264  	}
   265  
   266  	for d in bundles/$VERSION/cross/*/*; do
   267  		GOARCH="$(basename "$d")"
   268  		GOOS="$(basename "$(dirname "$d")")"
   269  		release_build "$GOOS" "$GOARCH"
   270  	done
   271  
   272  	# TODO create redirect from builds/*/i686 to builds/*/i386
   273  
   274  	cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
   275  # To install, run the following command as root:
   276  curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz && sudo tar zxf docker-$VERSION.tgz -C /
   277  # Then start docker in daemon mode:
   278  sudo /usr/local/bin/docker daemon
   279  EOF
   280  
   281  	# Add redirect at /builds/info for URL-backwards-compatibility
   282  	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
   283  	aws s3 cp --acl public-read --website-redirect '/builds/' --content-type='text/plain' /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
   284  
   285  	if [ -z "$NOLATEST" ]; then
   286  		echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
   287  		echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
   288  	fi
   289  }
   290  
   291  # Upload the index script
   292  release_index() {
   293  	echo "Releasing index"
   294  	url="$(s3_url)/" hack/make.sh install-script
   295  	write_to_s3 "s3://$BUCKET_PATH/index" < "bundles/$VERSION/install-script/install.sh"
   296  }
   297  
   298  main() {
   299  	build_all
   300  	setup_s3
   301  	release_binaries
   302  	release_index
   303  }
   304  
   305  main
   306  
   307  echo
   308  echo
   309  echo "Release complete; see $(s3_url)"
   310  echo "Use the following text to announce the release:"
   311  echo
   312  echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
   313  echo
   314  echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
   315  echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
   316  echo "Windows 64bit client zip: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.zip"
   317  echo "Windows 32bit client zip: $(s3_url)/builds/Windows/i386/docker-$VERSION.zip"
   318  echo