github.com/clcy1243/docker@v1.6.0-rc3/hack/release.sh (about)

     1  #!/usr/bin/env bash
     2  set -e
     3  
     4  # This script looks for bundles built by make.sh, and releases them on a
     5  # public S3 bucket.
     6  #
     7  # Bundles should be available for the VERSION string passed as argument.
     8  #
     9  # The correct way to call this script is inside a container built by the
    10  # official Dockerfile at the root of the Docker source code. The Dockerfile,
    11  # make.sh and release.sh should all be from the same source code revision.
    12  
    13  set -o pipefail
    14  
    15  # Print a usage message and exit.
    16  usage() {
    17  	cat >&2 <<'EOF'
    18  To run, I need:
    19  - to be in a container generated by the Dockerfile at the top of the Docker
    20    repository;
    21  - to be provided with the name of an S3 bucket, in environment variable
    22    AWS_S3_BUCKET;
    23  - to be provided with AWS credentials for this S3 bucket, in environment
    24    variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
    25  - the passphrase to unlock the GPG key which will sign the deb packages
    26    (passed as environment variable GPG_PASSPHRASE);
    27  - a generous amount of good will and nice manners.
    28  The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
    29  
    30  docker run -e AWS_S3_BUCKET=test.docker.com \
    31             -e AWS_ACCESS_KEY=... \
    32             -e AWS_SECRET_KEY=... \
    33             -e GPG_PASSPHRASE=... \
    34             -i -t --privileged \
    35             docker ./hack/release.sh
    36  EOF
    37  	exit 1
    38  }
    39  
    40  [ "$AWS_S3_BUCKET" ] || usage
    41  [ "$AWS_ACCESS_KEY" ] || usage
    42  [ "$AWS_SECRET_KEY" ] || usage
    43  [ "$GPG_PASSPHRASE" ] || usage
    44  [ -d /go/src/github.com/docker/docker ] || usage
    45  cd /go/src/github.com/docker/docker
    46  [ -x hack/make.sh ] || usage
    47  
    48  RELEASE_BUNDLES=(
    49  	binary
    50  	cross
    51  	tgz
    52  	ubuntu
    53  )
    54  
    55  if [ "$1" != '--release-regardless-of-test-failure' ]; then
    56  	RELEASE_BUNDLES=(
    57  		test-unit test-integration
    58  		"${RELEASE_BUNDLES[@]}"
    59  		test-integration-cli
    60  	)
    61  fi
    62  
    63  VERSION=$(cat VERSION)
    64  BUCKET=$AWS_S3_BUCKET
    65  
    66  # These are the 2 keys we've used to sign the deb's
    67  #   release (get.docker.com)
    68  #	GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
    69  #   test    (test.docker.com)
    70  #	GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
    71  
    72  setup_s3() {
    73  	# Try creating the bucket. Ignore errors (it might already exist).
    74  	s3cmd mb s3://$BUCKET 2>/dev/null || true
    75  	# Check access to the bucket.
    76  	# s3cmd has no useful exit status, so we cannot check that.
    77  	# Instead, we check if it outputs anything on standard output.
    78  	# (When there are problems, it uses standard error instead.)
    79  	s3cmd info s3://$BUCKET | grep -q .
    80  	# Make the bucket accessible through website endpoints.
    81  	s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET
    82  }
    83  
    84  # write_to_s3 uploads the contents of standard input to the specified S3 url.
    85  write_to_s3() {
    86  	DEST=$1
    87  	F=`mktemp`
    88  	cat > $F
    89  	s3cmd --acl-public --mime-type='text/plain' put $F $DEST
    90  	rm -f $F
    91  }
    92  
    93  s3_url() {
    94  	case "$BUCKET" in
    95  		get.docker.com|test.docker.com)
    96  			echo "https://$BUCKET"
    97  			;;
    98  		*)
    99  			s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }'
   100  			;;
   101  	esac
   102  }
   103  
   104  build_all() {
   105  	if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
   106  		echo >&2
   107  		echo >&2 'The build or tests appear to have failed.'
   108  		echo >&2
   109  		echo >&2 'You, as the release  maintainer, now have a couple options:'
   110  		echo >&2 '- delay release and fix issues'
   111  		echo >&2 '- delay release and fix issues'
   112  		echo >&2 '- did we mention how important this is?  issues need fixing :)'
   113  		echo >&2
   114  		echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
   115  		echo >&2 ' really knows all the hairy problems at hand with the current release'
   116  		echo >&2 ' issues) may bypass this checking by running this script again with the'
   117  		echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
   118  		echo >&2 ' running the test suite, and will only build the binaries and packages.  Please'
   119  		echo >&2 ' avoid using this if at all possible.'
   120  		echo >&2
   121  		echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
   122  		echo >&2 ' should be used.  If there are release issues, we should always err on the'
   123  		echo >&2 ' side of caution.'
   124  		echo >&2
   125  		exit 1
   126  	fi
   127  }
   128  
   129  upload_release_build() {
   130  	src="$1"
   131  	dst="$2"
   132  	latest="$3"
   133  
   134  	echo
   135  	echo "Uploading $src"
   136  	echo "  to $dst"
   137  	echo
   138  	s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst"
   139  	if [ "$latest" ]; then
   140  		echo
   141  		echo "Copying to $latest"
   142  		echo
   143  		s3cmd --acl-public cp "$dst" "$latest"
   144  	fi
   145  
   146  	# get hash files too (see hash_files() in hack/make.sh)
   147  	for hashAlgo in md5 sha256; do
   148  		if [ -e "$src.$hashAlgo" ]; then
   149  			echo
   150  			echo "Uploading $src.$hashAlgo"
   151  			echo "  to $dst.$hashAlgo"
   152  			echo
   153  			s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo"
   154  			if [ "$latest" ]; then
   155  				echo
   156  				echo "Copying to $latest.$hashAlgo"
   157  				echo
   158  				s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo"
   159  			fi
   160  		fi
   161  	done
   162  }
   163  
   164  release_build() {
   165  	GOOS=$1
   166  	GOARCH=$2
   167  
   168  	binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
   169  	tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
   170  	binary=docker-$VERSION
   171  	tgz=docker-$VERSION.tgz
   172  
   173  	latestBase=
   174  	if [ -z "$NOLATEST" ]; then
   175  		latestBase=docker-latest
   176  	fi
   177  
   178  	# we need to map our GOOS and GOARCH to uname values
   179  	# see https://en.wikipedia.org/wiki/Uname
   180  	# ie, GOOS=linux -> "uname -s"=Linux
   181  
   182  	s3Os=$GOOS
   183  	case "$s3Os" in
   184  		darwin)
   185  			s3Os=Darwin
   186  			;;
   187  		freebsd)
   188  			s3Os=FreeBSD
   189  			;;
   190  		linux)
   191  			s3Os=Linux
   192  			;;
   193  		windows)
   194  			s3Os=Windows
   195  			binary+='.exe'
   196  			if [ "$latestBase" ]; then
   197  				latestBase+='.exe'
   198  			fi
   199  			;;
   200  		*)
   201  			echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
   202  			exit 1
   203  			;;
   204  	esac
   205  
   206  	s3Arch=$GOARCH
   207  	case "$s3Arch" in
   208  		amd64)
   209  			s3Arch=x86_64
   210  			;;
   211  		386)
   212  			s3Arch=i386
   213  			;;
   214  		arm)
   215  			s3Arch=armel
   216  			# someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too
   217  			;;
   218  		*)
   219  			echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
   220  			exit 1
   221  			;;
   222  	esac
   223  
   224  	s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch
   225  	latest=
   226  	latestTgz=
   227  	if [ "$latestBase" ]; then
   228  		latest="$s3Dir/$latestBase"
   229  		latestTgz="$s3Dir/$latestBase.tgz"
   230  	fi
   231  
   232  	if [ ! -x "$binDir/$binary" ]; then
   233  		echo >&2 "error: can't find $binDir/$binary - was it compiled properly?"
   234  		exit 1
   235  	fi
   236  	if [ ! -f "$tgzDir/$tgz" ]; then
   237  		echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
   238  		exit 1
   239  	fi
   240  
   241  	upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
   242  	upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
   243  }
   244  
   245  # Upload the 'ubuntu' bundle to S3:
   246  # 1. A full APT repository is published at $BUCKET/ubuntu/
   247  # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index
   248  release_ubuntu() {
   249  	[ -e bundles/$VERSION/ubuntu ] || {
   250  		echo >&2 './hack/make.sh must be run before release_ubuntu'
   251  		exit 1
   252  	}
   253  
   254  	# Sign our packages
   255  	dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \
   256  		--sign builder bundles/$VERSION/ubuntu/*.deb
   257  
   258  	# Setup the APT repo
   259  	APTDIR=bundles/$VERSION/ubuntu/apt
   260  	mkdir -p $APTDIR/conf $APTDIR/db
   261  	s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true
   262  	cat > $APTDIR/conf/distributions <<EOF
   263  Codename: docker
   264  Components: main
   265  Architectures: amd64 i386
   266  EOF
   267  
   268  	# Add the DEB package to the APT repo
   269  	DEBFILE=bundles/$VERSION/ubuntu/lxc-docker*.deb
   270  	reprepro -b $APTDIR includedeb docker $DEBFILE
   271  
   272  	# Sign
   273  	for F in $(find $APTDIR -name Release); do
   274  		gpg -u releasedocker --passphrase $GPG_PASSPHRASE \
   275  			--armor --sign --detach-sign \
   276  			--output $F.gpg $F
   277  	done
   278  
   279  	# Upload keys
   280  	s3cmd sync $HOME/.gnupg/ s3://$BUCKET/ubuntu/.gnupg/
   281  	gpg --armor --export releasedocker > bundles/$VERSION/ubuntu/gpg
   282  	s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg
   283  
   284  	local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9
   285  	if [[ $BUCKET == test* ]]; then
   286  		gpgFingerprint=740B314AE3941731B942C66ADF4FD13717AAD7D6
   287  	fi
   288  
   289  	# Upload repo
   290  	s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/
   291  	cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index
   292  # Check that HTTPS transport is available to APT
   293  if [ ! -e /usr/lib/apt/methods/https ]; then
   294  	apt-get update
   295  	apt-get install -y apt-transport-https
   296  fi
   297  
   298  # Add the repository to your APT sources
   299  echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
   300  
   301  # Then import the repository key
   302  apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys $gpgFingerprint
   303  
   304  # Install docker
   305  apt-get update
   306  apt-get install -y lxc-docker
   307  
   308  #
   309  # Alternatively, just use the curl-able install.sh script provided at $(s3_url)
   310  #
   311  EOF
   312  
   313  	# Add redirect at /ubuntu/info for URL-backwards-compatibility
   314  	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
   315  	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info
   316  
   317  	echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu"
   318  }
   319  
   320  # Upload binaries and tgz files to S3
   321  release_binaries() {
   322  	[ -e bundles/$VERSION/cross/linux/amd64/docker-$VERSION ] || {
   323  		echo >&2 './hack/make.sh must be run before release_binaries'
   324  		exit 1
   325  	}
   326  
   327  	for d in bundles/$VERSION/cross/*/*; do
   328  		GOARCH="$(basename "$d")"
   329  		GOOS="$(basename "$(dirname "$d")")"
   330  		release_build "$GOOS" "$GOARCH"
   331  	done
   332  
   333  	# TODO create redirect from builds/*/i686 to builds/*/i386
   334  
   335  	cat <<EOF | write_to_s3 s3://$BUCKET/builds/index
   336  # To install, run the following command as root:
   337  curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
   338  # Then start docker in daemon mode:
   339  sudo /usr/local/bin/docker -d
   340  EOF
   341  
   342  	# Add redirect at /builds/info for URL-backwards-compatibility
   343  	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
   344  	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/builds/info
   345  
   346  	if [ -z "$NOLATEST" ]; then
   347  		echo "Advertising $VERSION on $BUCKET as most recent version"
   348  		echo $VERSION | write_to_s3 s3://$BUCKET/latest
   349  	fi
   350  }
   351  
   352  # Upload the index script
   353  release_index() {
   354  	sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 s3://$BUCKET/index
   355  }
   356  
   357  release_test() {
   358  	if [ -e "bundles/$VERSION/test" ]; then
   359  		s3cmd --acl-public sync bundles/$VERSION/test/ s3://$BUCKET/test/
   360  	fi
   361  }
   362  
   363  setup_gpg() {
   364  	# Make sure that we have our keys
   365  	mkdir -p $HOME/.gnupg/
   366  	s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ $HOME/.gnupg/ || true
   367  	gpg --list-keys releasedocker >/dev/null || {
   368  		gpg --gen-key --batch <<EOF
   369  Key-Type: RSA
   370  Key-Length: 4096
   371  Passphrase: $GPG_PASSPHRASE
   372  Name-Real: Docker Release Tool
   373  Name-Email: docker@docker.com
   374  Name-Comment: releasedocker
   375  Expire-Date: 0
   376  %commit
   377  EOF
   378  	}
   379  }
   380  
   381  main() {
   382  	build_all
   383  	setup_s3
   384  	setup_gpg
   385  	release_binaries
   386  	release_ubuntu
   387  	release_index
   388  	release_test
   389  }
   390  
   391  main
   392  
   393  echo
   394  echo
   395  echo "Release complete; see $(s3_url)"
   396  echo