github.com/mheon/docker@v0.11.2-0.20150922122814-44f47903a831/hack/release.sh (about)

     1  #!/usr/bin/env bash
     2  set -e
     3  
     4  # This script looks for bundles built by make.sh, and releases them on a
     5  # public S3 bucket.
     6  #
     7  # Bundles should be available for the VERSION string passed as argument.
     8  #
     9  # The correct way to call this script is inside a container built by the
    10  # official Dockerfile at the root of the Docker source code. The Dockerfile,
    11  # make.sh and release.sh should all be from the same source code revision.
    12  
    13  set -o pipefail
    14  
    15  # Print a usage message and exit.
    16  usage() {
    17  	cat >&2 <<'EOF'
    18  To run, I need:
    19  - to be in a container generated by the Dockerfile at the top of the Docker
    20    repository;
    21  - to be provided with the name of an S3 bucket, in environment variable
    22    AWS_S3_BUCKET;
    23  - to be provided with AWS credentials for this S3 bucket, in environment
    24    variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
    25  - the passphrase to unlock the GPG key which will sign the deb packages
    26    (passed as environment variable GPG_PASSPHRASE);
    27  - a generous amount of good will and nice manners.
    28  The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
    29  
    30  docker run -e AWS_S3_BUCKET=test.docker.com \
    31             -e AWS_ACCESS_KEY=... \
    32             -e AWS_SECRET_KEY=... \
    33             -e GPG_PASSPHRASE=... \
    34             -i -t --privileged \
    35             docker ./hack/release.sh
    36  EOF
    37  	exit 1
    38  }
    39  
    40  [ "$AWS_S3_BUCKET" ] || usage
    41  [ "$AWS_ACCESS_KEY" ] || usage
    42  [ "$AWS_SECRET_KEY" ] || usage
    43  [ "$GPG_PASSPHRASE" ] || usage
    44  [ -d /go/src/github.com/docker/docker ] || usage
    45  cd /go/src/github.com/docker/docker
    46  [ -x hack/make.sh ] || usage
    47  
    48  RELEASE_BUNDLES=(
    49  	binary
    50  	cross
    51  	tgz
    52  	ubuntu
    53  )
    54  
    55  if [ "$1" != '--release-regardless-of-test-failure' ]; then
    56  	RELEASE_BUNDLES=(
    57  		test-unit
    58  		"${RELEASE_BUNDLES[@]}"
    59  		test-integration-cli
    60  	)
    61  fi
    62  
    63  VERSION=$(< VERSION)
    64  BUCKET=$AWS_S3_BUCKET
    65  
    66  if command -v git &> /dev/null && git rev-parse &> /dev/null; then
    67  	if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
    68  		echo "You cannot run the release script on a repo with uncommitted changes"
    69  		usage
    70  	fi
    71  fi
    72  
    73  # These are the 2 keys we've used to sign the deb's
    74  #   release (get.docker.com)
    75  #	GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
    76  #   test    (test.docker.com)
    77  #	GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
    78  
    79  setup_s3() {
    80  	echo "Setting up S3"
    81  	# Try creating the bucket. Ignore errors (it might already exist).
    82  	s3cmd mb "s3://$BUCKET" 2>/dev/null || true
    83  	# Check access to the bucket.
    84  	# s3cmd has no useful exit status, so we cannot check that.
    85  	# Instead, we check if it outputs anything on standard output.
    86  	# (When there are problems, it uses standard error instead.)
    87  	s3cmd info "s3://$BUCKET" | grep -q .
    88  	# Make the bucket accessible through website endpoints.
    89  	s3cmd ws-create --ws-index index --ws-error error "s3://$BUCKET"
    90  }
    91  
    92  # write_to_s3 uploads the contents of standard input to the specified S3 url.
    93  write_to_s3() {
    94  	DEST=$1
    95  	F=`mktemp`
    96  	cat > "$F"
    97  	s3cmd --acl-public --mime-type='text/plain' put "$F" "$DEST"
    98  	rm -f "$F"
    99  }
   100  
   101  s3_url() {
   102  	case "$BUCKET" in
   103  		get.docker.com|test.docker.com|experimental.docker.com)
   104  			echo "https://$BUCKET"
   105  			;;
   106  		*)
   107  			s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }'
   108  			;;
   109  	esac
   110  }
   111  
   112  build_all() {
   113  	echo "Building release"
   114  	if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
   115  		echo >&2
   116  		echo >&2 'The build or tests appear to have failed.'
   117  		echo >&2
   118  		echo >&2 'You, as the release  maintainer, now have a couple options:'
   119  		echo >&2 '- delay release and fix issues'
   120  		echo >&2 '- delay release and fix issues'
   121  		echo >&2 '- did we mention how important this is?  issues need fixing :)'
   122  		echo >&2
   123  		echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
   124  		echo >&2 ' really knows all the hairy problems at hand with the current release'
   125  		echo >&2 ' issues) may bypass this checking by running this script again with the'
   126  		echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
   127  		echo >&2 ' running the test suite, and will only build the binaries and packages.  Please'
   128  		echo >&2 ' avoid using this if at all possible.'
   129  		echo >&2
   130  		echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
   131  		echo >&2 ' should be used.  If there are release issues, we should always err on the'
   132  		echo >&2 ' side of caution.'
   133  		echo >&2
   134  		exit 1
   135  	fi
   136  }
   137  
   138  upload_release_build() {
   139  	src="$1"
   140  	dst="$2"
   141  	latest="$3"
   142  
   143  	echo
   144  	echo "Uploading $src"
   145  	echo "  to $dst"
   146  	echo
   147  	s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst"
   148  	if [ "$latest" ]; then
   149  		echo
   150  		echo "Copying to $latest"
   151  		echo
   152  		s3cmd --acl-public cp "$dst" "$latest"
   153  	fi
   154  
   155  	# get hash files too (see hash_files() in hack/make.sh)
   156  	for hashAlgo in md5 sha256; do
   157  		if [ -e "$src.$hashAlgo" ]; then
   158  			echo
   159  			echo "Uploading $src.$hashAlgo"
   160  			echo "  to $dst.$hashAlgo"
   161  			echo
   162  			s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo"
   163  			if [ "$latest" ]; then
   164  				echo
   165  				echo "Copying to $latest.$hashAlgo"
   166  				echo
   167  				s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo"
   168  			fi
   169  		fi
   170  	done
   171  }
   172  
   173  release_build() {
   174  	echo "Releasing binaries"
   175  	GOOS=$1
   176  	GOARCH=$2
   177  
   178  	binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
   179  	tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
   180  	binary=docker-$VERSION
   181  	tgz=docker-$VERSION.tgz
   182  
   183  	latestBase=
   184  	if [ -z "$NOLATEST" ]; then
   185  		latestBase=docker-latest
   186  	fi
   187  
   188  	# we need to map our GOOS and GOARCH to uname values
   189  	# see https://en.wikipedia.org/wiki/Uname
   190  	# ie, GOOS=linux -> "uname -s"=Linux
   191  
   192  	s3Os=$GOOS
   193  	case "$s3Os" in
   194  		darwin)
   195  			s3Os=Darwin
   196  			;;
   197  		freebsd)
   198  			s3Os=FreeBSD
   199  			;;
   200  		linux)
   201  			s3Os=Linux
   202  			;;
   203  		windows)
   204  			s3Os=Windows
   205  			binary+='.exe'
   206  			if [ "$latestBase" ]; then
   207  				latestBase+='.exe'
   208  			fi
   209  			;;
   210  		*)
   211  			echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
   212  			exit 1
   213  			;;
   214  	esac
   215  
   216  	s3Arch=$GOARCH
   217  	case "$s3Arch" in
   218  		amd64)
   219  			s3Arch=x86_64
   220  			;;
   221  		386)
   222  			s3Arch=i386
   223  			;;
   224  		arm)
   225  			s3Arch=armel
   226  			# someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too
   227  			;;
   228  		*)
   229  			echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
   230  			exit 1
   231  			;;
   232  	esac
   233  
   234  	s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch
   235  	latest=
   236  	latestTgz=
   237  	if [ "$latestBase" ]; then
   238  		latest="$s3Dir/$latestBase"
   239  		latestTgz="$s3Dir/$latestBase.tgz"
   240  	fi
   241  
   242  	if [ ! -x "$binDir/$binary" ]; then
   243  		echo >&2 "error: can't find $binDir/$binary - was it compiled properly?"
   244  		exit 1
   245  	fi
   246  	if [ ! -f "$tgzDir/$tgz" ]; then
   247  		echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
   248  		exit 1
   249  	fi
   250  
   251  	upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
   252  	upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
   253  }
   254  
   255  # Upload the 'ubuntu' bundle to S3:
   256  # 1. A full APT repository is published at $BUCKET/ubuntu/
   257  # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index
   258  release_ubuntu() {
   259  	echo "Releasing ubuntu"
   260  	[ -e "bundles/$VERSION/ubuntu" ] || {
   261  		echo >&2 './hack/make.sh must be run before release_ubuntu'
   262  		exit 1
   263  	}
   264  
   265  	local debfiles=( "bundles/$VERSION/ubuntu/"*.deb )
   266  
   267  	# Sign our packages
   268  	dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker --sign builder "${debfiles[@]}"
   269  
   270  	# Setup the APT repo
   271  	APTDIR=bundles/$VERSION/ubuntu/apt
   272  	mkdir -p "$APTDIR/conf" "$APTDIR/db"
   273  	s3cmd sync "s3://$BUCKET/ubuntu/db/" "$APTDIR/db/" || true
   274  	cat > "$APTDIR/conf/distributions" <<EOF
   275  Codename: docker
   276  Components: main
   277  Architectures: amd64 i386
   278  EOF
   279  
   280  	# Add the DEB package to the APT repo
   281  	reprepro -b "$APTDIR" includedeb docker "${debfiles[@]}"
   282  
   283  	# Sign
   284  	for F in $(find $APTDIR -name Release); do
   285  		gpg -u releasedocker --passphrase "$GPG_PASSPHRASE" \
   286  			--armor --sign --detach-sign \
   287  			--output "$F.gpg" "$F"
   288  	done
   289  
   290  	# Upload keys
   291  	s3cmd sync "$HOME/.gnupg/" "s3://$BUCKET/ubuntu/.gnupg/"
   292  	gpg --armor --export releasedocker > "bundles/$VERSION/ubuntu/gpg"
   293  	s3cmd --acl-public put "bundles/$VERSION/ubuntu/gpg" "s3://$BUCKET/gpg"
   294  
   295  	local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9
   296  	local s3Headers=
   297  	if [[ $BUCKET == test* ]]; then
   298  		gpgFingerprint=740B314AE3941731B942C66ADF4FD13717AAD7D6
   299  	elif [[ $BUCKET == experimental* ]]; then
   300  		gpgFingerprint=E33FF7BF5C91D50A6F91FFFD4CC38D40F9A96B49
   301  		s3Headers='--add-header=Cache-Control:no-cache'
   302  	fi
   303  
   304  	# Upload repo
   305  	s3cmd --acl-public "$s3Headers" sync "$APTDIR/" "s3://$BUCKET/ubuntu/"
   306  	cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index
   307  echo "# WARNING! This script is deprecated. Please use the script"
   308  echo "# at https://get.docker.com/"
   309  EOF
   310  
   311  	# Add redirect at /ubuntu/info for URL-backwards-compatibility
   312  	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
   313  	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET/ubuntu/info"
   314  
   315  	echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu"
   316  }
   317  
   318  # Upload binaries and tgz files to S3
   319  release_binaries() {
   320  	[ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || {
   321  		echo >&2 './hack/make.sh must be run before release_binaries'
   322  		exit 1
   323  	}
   324  
   325  	for d in bundles/$VERSION/cross/*/*; do
   326  		GOARCH="$(basename "$d")"
   327  		GOOS="$(basename "$(dirname "$d")")"
   328  		release_build "$GOOS" "$GOARCH"
   329  	done
   330  
   331  	# TODO create redirect from builds/*/i686 to builds/*/i386
   332  
   333  	cat <<EOF | write_to_s3 s3://$BUCKET/builds/index
   334  # To install, run the following command as root:
   335  curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
   336  # Then start docker in daemon mode:
   337  sudo /usr/local/bin/docker daemon
   338  EOF
   339  
   340  	# Add redirect at /builds/info for URL-backwards-compatibility
   341  	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
   342  	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET/builds/info"
   343  
   344  	if [ -z "$NOLATEST" ]; then
   345  		echo "Advertising $VERSION on $BUCKET as most recent version"
   346  		echo "$VERSION" | write_to_s3 "s3://$BUCKET/latest"
   347  	fi
   348  }
   349  
   350  # Upload the index script
   351  release_index() {
   352  	echo "Releasing index"
   353  	sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET/index"
   354  }
   355  
   356  release_test() {
   357  	echo "Releasing tests"
   358  	if [ -e "bundles/$VERSION/test" ]; then
   359  		s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET/test/"
   360  	fi
   361  }
   362  
   363  setup_gpg() {
   364  	echo "Setting up GPG"
   365  	# Make sure that we have our keys
   366  	mkdir -p "$HOME/.gnupg/"
   367  	s3cmd sync "s3://$BUCKET/ubuntu/.gnupg/" "$HOME/.gnupg/" || true
   368  	gpg --list-keys releasedocker >/dev/null || {
   369  		gpg --gen-key --batch <<EOF
   370  Key-Type: RSA
   371  Key-Length: 4096
   372  Passphrase: $GPG_PASSPHRASE
   373  Name-Real: Docker Release Tool
   374  Name-Email: docker@docker.com
   375  Name-Comment: releasedocker
   376  Expire-Date: 0
   377  %commit
   378  EOF
   379  	}
   380  }
   381  
   382  main() {
   383  	build_all
   384  	setup_s3
   385  	setup_gpg
   386  	release_binaries
   387  	release_ubuntu
   388  	release_index
   389  	release_test
   390  }
   391  
   392  main
   393  
   394  echo
   395  echo
   396  echo "Release complete; see $(s3_url)"
   397  echo "Use the following text to announce the release:"
   398  echo
   399  echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
   400  echo
   401  echo "Ubuntu/Debian: curl -sSL $(s3_url) | sh"
   402  echo "Linux 64bit binary: $(s3_url)/builds/Linux/x86_64/docker-$VERSION"
   403  echo "Darwin/OSX 64bit client binary: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION"
   404  echo "Darwin/OSX 32bit client binary: $(s3_url)/builds/Darwin/i386/docker-$VERSION"
   405  echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
   406  echo "Windows 64bit client binary: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.exe"
   407  echo "Windows 32bit client binary: $(s3_url)/builds/Windows/i386/docker-$VERSION.exe"
   408  echo