github.com/tompao/docker@v1.9.1/hack/release.sh (about)

     1  #!/usr/bin/env bash
     2  set -e
     3  
     4  # This script looks for bundles built by make.sh, and releases them on a
     5  # public S3 bucket.
     6  #
     7  # Bundles should be available for the VERSION string passed as argument.
     8  #
     9  # The correct way to call this script is inside a container built by the
    10  # official Dockerfile at the root of the Docker source code. The Dockerfile,
    11  # make.sh and release.sh should all be from the same source code revision.
    12  
    13  set -o pipefail
    14  
    15  # Print a usage message and exit.
    16  usage() {
    17  	cat >&2 <<'EOF'
    18  To run, I need:
    19  - to be in a container generated by the Dockerfile at the top of the Docker
    20    repository;
    21  - to be provided with the location of an S3 bucket and path, in
    22    environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
    23  - to be provided with AWS credentials for this S3 bucket, in environment
    24    variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
    25  - the passphrase to unlock the GPG key specified by the optional environment
    26    variable GPG_KEYID (default: releasedocker) which will sign the deb
    27    packages (passed as environment variable GPG_PASSPHRASE);
    28  - a generous amount of good will and nice manners.
    29  The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
    30  
    31  docker run -e AWS_S3_BUCKET=test.docker.com \
    32             -e AWS_ACCESS_KEY=... \
    33             -e AWS_SECRET_KEY=... \
    34             -e GPG_PASSPHRASE=... \
    35             -i -t --privileged \
    36             docker ./hack/release.sh
    37  EOF
    38  	exit 1
    39  }
    40  
    41  [ "$AWS_S3_BUCKET" ] || usage
    42  [ "$AWS_ACCESS_KEY" ] || usage
    43  [ "$AWS_SECRET_KEY" ] || usage
    44  [ "$GPG_PASSPHRASE" ] || usage
    45  : ${GPG_KEYID:=releasedocker}
    46  [ -d /go/src/github.com/docker/docker ] || usage
    47  cd /go/src/github.com/docker/docker
    48  [ -x hack/make.sh ] || usage
    49  
    50  RELEASE_BUNDLES=(
    51  	binary
    52  	cross
    53  	tgz
    54  	ubuntu
    55  )
    56  
    57  if [ "$1" != '--release-regardless-of-test-failure' ]; then
    58  	RELEASE_BUNDLES=(
    59  		test-unit
    60  		"${RELEASE_BUNDLES[@]}"
    61  		test-integration-cli
    62  	)
    63  fi
    64  
    65  VERSION=$(< VERSION)
    66  BUCKET=$AWS_S3_BUCKET
    67  BUCKET_PATH=$BUCKET
    68  [[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
    69  
    70  if command -v git &> /dev/null && git rev-parse &> /dev/null; then
    71  	if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
    72  		echo "You cannot run the release script on a repo with uncommitted changes"
    73  		usage
    74  	fi
    75  fi
    76  
    77  # These are the 2 keys we've used to sign the deb's
    78  #   release (get.docker.com)
    79  #	GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
    80  #   test    (test.docker.com)
    81  #	GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
    82  
    83  setup_s3() {
    84  	echo "Setting up S3"
    85  	# Try creating the bucket. Ignore errors (it might already exist).
    86  	s3cmd mb "s3://$BUCKET" 2>/dev/null || true
    87  	# Check access to the bucket.
    88  	# s3cmd has no useful exit status, so we cannot check that.
    89  	# Instead, we check if it outputs anything on standard output.
    90  	# (When there are problems, it uses standard error instead.)
    91  	s3cmd info "s3://$BUCKET" | grep -q .
    92  	# Make the bucket accessible through website endpoints.
    93  	s3cmd ws-create --ws-index index --ws-error error "s3://$BUCKET"
    94  }
    95  
    96  # write_to_s3 uploads the contents of standard input to the specified S3 url.
    97  write_to_s3() {
    98  	DEST=$1
    99  	F=`mktemp`
   100  	cat > "$F"
   101  	s3cmd --acl-public --mime-type='text/plain' put "$F" "$DEST"
   102  	rm -f "$F"
   103  }
   104  
   105  s3_url() {
   106  	case "$BUCKET" in
   107  		get.docker.com|test.docker.com|experimental.docker.com)
   108  			echo "https://$BUCKET_PATH"
   109  			;;
   110  		*)
   111  			BASE_URL=$( s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' )
   112  			if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
   113  				echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
   114  			else
   115  				echo "$BASE_URL"
   116  			fi
   117  			;;
   118  	esac
   119  }
   120  
   121  build_all() {
   122  	echo "Building release"
   123  	if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
   124  		echo >&2
   125  		echo >&2 'The build or tests appear to have failed.'
   126  		echo >&2
   127  		echo >&2 'You, as the release  maintainer, now have a couple options:'
   128  		echo >&2 '- delay release and fix issues'
   129  		echo >&2 '- delay release and fix issues'
   130  		echo >&2 '- did we mention how important this is?  issues need fixing :)'
   131  		echo >&2
   132  		echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
   133  		echo >&2 ' really knows all the hairy problems at hand with the current release'
   134  		echo >&2 ' issues) may bypass this checking by running this script again with the'
   135  		echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
   136  		echo >&2 ' running the test suite, and will only build the binaries and packages.  Please'
   137  		echo >&2 ' avoid using this if at all possible.'
   138  		echo >&2
   139  		echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
   140  		echo >&2 ' should be used.  If there are release issues, we should always err on the'
   141  		echo >&2 ' side of caution.'
   142  		echo >&2
   143  		exit 1
   144  	fi
   145  }
   146  
   147  upload_release_build() {
   148  	src="$1"
   149  	dst="$2"
   150  	latest="$3"
   151  
   152  	echo
   153  	echo "Uploading $src"
   154  	echo "  to $dst"
   155  	echo
   156  	s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst"
   157  	if [ "$latest" ]; then
   158  		echo
   159  		echo "Copying to $latest"
   160  		echo
   161  		s3cmd --acl-public cp "$dst" "$latest"
   162  	fi
   163  
   164  	# get hash files too (see hash_files() in hack/make.sh)
   165  	for hashAlgo in md5 sha256; do
   166  		if [ -e "$src.$hashAlgo" ]; then
   167  			echo
   168  			echo "Uploading $src.$hashAlgo"
   169  			echo "  to $dst.$hashAlgo"
   170  			echo
   171  			s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo"
   172  			if [ "$latest" ]; then
   173  				echo
   174  				echo "Copying to $latest.$hashAlgo"
   175  				echo
   176  				s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo"
   177  			fi
   178  		fi
   179  	done
   180  }
   181  
   182  release_build() {
   183  	echo "Releasing binaries"
   184  	GOOS=$1
   185  	GOARCH=$2
   186  
   187  	binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
   188  	tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
   189  	binary=docker-$VERSION
   190  	tgz=docker-$VERSION.tgz
   191  
   192  	latestBase=
   193  	if [ -z "$NOLATEST" ]; then
   194  		latestBase=docker-latest
   195  	fi
   196  
   197  	# we need to map our GOOS and GOARCH to uname values
   198  	# see https://en.wikipedia.org/wiki/Uname
   199  	# ie, GOOS=linux -> "uname -s"=Linux
   200  
   201  	s3Os=$GOOS
   202  	case "$s3Os" in
   203  		darwin)
   204  			s3Os=Darwin
   205  			;;
   206  		freebsd)
   207  			s3Os=FreeBSD
   208  			;;
   209  		linux)
   210  			s3Os=Linux
   211  			;;
   212  		windows)
   213  			s3Os=Windows
   214  			binary+='.exe'
   215  			if [ "$latestBase" ]; then
   216  				latestBase+='.exe'
   217  			fi
   218  			;;
   219  		*)
   220  			echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
   221  			exit 1
   222  			;;
   223  	esac
   224  
   225  	s3Arch=$GOARCH
   226  	case "$s3Arch" in
   227  		amd64)
   228  			s3Arch=x86_64
   229  			;;
   230  		386)
   231  			s3Arch=i386
   232  			;;
   233  		arm)
   234  			s3Arch=armel
   235  			# someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too
   236  			;;
   237  		*)
   238  			echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
   239  			exit 1
   240  			;;
   241  	esac
   242  
   243  	s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
   244  	latest=
   245  	latestTgz=
   246  	if [ "$latestBase" ]; then
   247  		latest="$s3Dir/$latestBase"
   248  		latestTgz="$s3Dir/$latestBase.tgz"
   249  	fi
   250  
   251  	if [ ! -x "$binDir/$binary" ]; then
   252  		echo >&2 "error: can't find $binDir/$binary - was it compiled properly?"
   253  		exit 1
   254  	fi
   255  	if [ ! -f "$tgzDir/$tgz" ]; then
   256  		echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
   257  		exit 1
   258  	fi
   259  
   260  	upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
   261  	upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
   262  }
   263  
   264  # Upload the 'ubuntu' bundle to S3:
   265  # 1. A full APT repository is published at $BUCKET/ubuntu/
   266  # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index
   267  release_ubuntu() {
   268  	echo "Releasing ubuntu"
   269  	[ -e "bundles/$VERSION/ubuntu" ] || {
   270  		echo >&2 './hack/make.sh must be run before release_ubuntu'
   271  		exit 1
   272  	}
   273  
   274  	local debfiles=( "bundles/$VERSION/ubuntu/"*.deb )
   275  
   276  	# Sign our packages
   277  	dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k "$GPG_KEYID" --sign builder "${debfiles[@]}"
   278  
   279  	# Setup the APT repo
   280  	APTDIR=bundles/$VERSION/ubuntu/apt
   281  	mkdir -p "$APTDIR/conf" "$APTDIR/db"
   282  	s3cmd sync "s3://$BUCKET/ubuntu/db/" "$APTDIR/db/" || true
   283  	cat > "$APTDIR/conf/distributions" <<EOF
   284  Codename: docker
   285  Components: main
   286  Architectures: amd64 i386
   287  EOF
   288  
   289  	# Add the DEB package to the APT repo
   290  	reprepro -b "$APTDIR" includedeb docker "${debfiles[@]}"
   291  
   292  	# Sign
   293  	for F in $(find $APTDIR -name Release); do
   294  		gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \
   295  			--armor --sign --detach-sign \
   296  			--output "$F.gpg" "$F"
   297  	done
   298  
   299  	# Upload keys
   300  	s3cmd sync "$HOME/.gnupg/" "s3://$BUCKET/ubuntu/.gnupg/"
   301  	gpg --armor --export "$GPG_KEYID" > "bundles/$VERSION/ubuntu/gpg"
   302  	s3cmd --acl-public put "bundles/$VERSION/ubuntu/gpg" "s3://$BUCKET/gpg"
   303  
   304  	local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9
   305  	local s3Headers=
   306  	if [[ $BUCKET == test* ]]; then
   307  		gpgFingerprint=740B314AE3941731B942C66ADF4FD13717AAD7D6
   308  	elif [[ $BUCKET == experimental* ]]; then
   309  		gpgFingerprint=E33FF7BF5C91D50A6F91FFFD4CC38D40F9A96B49
   310  		s3Headers='--add-header=Cache-Control:no-cache'
   311  	fi
   312  
   313  	# Upload repo
   314  	s3cmd --acl-public $s3Headers sync "$APTDIR/" "s3://$BUCKET/ubuntu/"
   315  	cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index
   316  echo "# WARNING! This script is deprecated. Please use the script"
   317  echo "# at https://get.docker.com/"
   318  EOF
   319  
   320  	# Add redirect at /ubuntu/info for URL-backwards-compatibility
   321  	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
   322  	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET/ubuntu/info"
   323  
   324  	echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu"
   325  }
   326  
   327  # Upload binaries and tgz files to S3
   328  release_binaries() {
   329  	[ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || {
   330  		echo >&2 './hack/make.sh must be run before release_binaries'
   331  		exit 1
   332  	}
   333  
   334  	for d in bundles/$VERSION/cross/*/*; do
   335  		GOARCH="$(basename "$d")"
   336  		GOOS="$(basename "$(dirname "$d")")"
   337  		release_build "$GOOS" "$GOARCH"
   338  	done
   339  
   340  	# TODO create redirect from builds/*/i686 to builds/*/i386
   341  
   342  	cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
   343  # To install, run the following command as root:
   344  curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
   345  # Then start docker in daemon mode:
   346  sudo /usr/local/bin/docker daemon
   347  EOF
   348  
   349  	# Add redirect at /builds/info for URL-backwards-compatibility
   350  	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
   351  	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
   352  
   353  	if [ -z "$NOLATEST" ]; then
   354  		echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
   355  		echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
   356  	fi
   357  }
   358  
   359  # Upload the index script
   360  release_index() {
   361  	echo "Releasing index"
   362  	sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET_PATH/index"
   363  }
   364  
   365  release_test() {
   366  	echo "Releasing tests"
   367  	if [ -e "bundles/$VERSION/test" ]; then
   368  		s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET_PATH/test/"
   369  	fi
   370  }
   371  
   372  setup_gpg() {
   373  	echo "Setting up GPG"
   374  	# Make sure that we have our keys
   375  	mkdir -p "$HOME/.gnupg/"
   376  	s3cmd sync "s3://$BUCKET/ubuntu/.gnupg/" "$HOME/.gnupg/" || true
   377  	gpg --list-keys "$GPG_KEYID" >/dev/null || {
   378  		gpg --gen-key --batch <<EOF
   379  Key-Type: RSA
   380  Key-Length: 4096
   381  Passphrase: $GPG_PASSPHRASE
   382  Name-Real: Docker Release Tool
   383  Name-Email: docker@docker.com
   384  Name-Comment: $GPG_KEYID
   385  Expire-Date: 0
   386  %commit
   387  EOF
   388  	}
   389  }
   390  
   391  main() {
   392  	build_all
   393  	setup_s3
   394  	setup_gpg
   395  	release_binaries
   396  	release_ubuntu
   397  	release_index
   398  	release_test
   399  }
   400  
   401  main
   402  
   403  echo
   404  echo
   405  echo "Release complete; see $(s3_url)"
   406  echo "Use the following text to announce the release:"
   407  echo
   408  echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
   409  echo
   410  echo "Ubuntu/Debian: curl -sSL $(s3_url) | sh"
   411  echo "Linux 64bit binary: $(s3_url)/builds/Linux/x86_64/docker-$VERSION"
   412  echo "Darwin/OSX 64bit client binary: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION"
   413  echo "Darwin/OSX 32bit client binary: $(s3_url)/builds/Darwin/i386/docker-$VERSION"
   414  echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
   415  echo "Windows 64bit client binary: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.exe"
   416  echo "Windows 32bit client binary: $(s3_url)/builds/Windows/i386/docker-$VERSION.exe"
   417  echo