github.com/cnaize/docker@v1.10.2/hack/release.sh (about)

     1  #!/usr/bin/env bash
     2  set -e
     3  
     4  # This script looks for bundles built by make.sh, and releases them on a
     5  # public S3 bucket.
     6  #
     7  # Bundles should be available for the VERSION string passed as argument.
     8  #
     9  # The correct way to call this script is inside a container built by the
    10  # official Dockerfile at the root of the Docker source code. The Dockerfile,
    11  # make.sh and release.sh should all be from the same source code revision.
    12  
    13  set -o pipefail
    14  
    15  # Print a usage message and exit.
    16  usage() {
    17  	cat >&2 <<'EOF'
    18  To run, I need:
    19  - to be in a container generated by the Dockerfile at the top of the Docker
    20    repository;
    21  - to be provided with the location of an S3 bucket and path, in
    22    environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
    23  - to be provided with AWS credentials for this S3 bucket, in environment
    24    variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
    25  - a generous amount of good will and nice manners.
    26  The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
    27  
    28  docker run -e AWS_S3_BUCKET=test.docker.com \
    29             -e AWS_ACCESS_KEY=... \
    30             -e AWS_SECRET_KEY=... \
    31             -i -t --privileged \
    32             docker ./hack/release.sh
    33  EOF
    34  	exit 1
    35  }
    36  
    37  [ "$AWS_S3_BUCKET" ] || usage
    38  [ "$AWS_ACCESS_KEY" ] || usage
    39  [ "$AWS_SECRET_KEY" ] || usage
    40  [ -d /go/src/github.com/docker/docker ] || usage
    41  cd /go/src/github.com/docker/docker
    42  [ -x hack/make.sh ] || usage
    43  
    44  RELEASE_BUNDLES=(
    45  	binary
    46  	cross
    47  	tgz
    48  )
    49  
    50  if [ "$1" != '--release-regardless-of-test-failure' ]; then
    51  	RELEASE_BUNDLES=(
    52  		test-unit
    53  		"${RELEASE_BUNDLES[@]}"
    54  		test-integration-cli
    55  	)
    56  fi
    57  
    58  VERSION=$(< VERSION)
    59  BUCKET=$AWS_S3_BUCKET
    60  BUCKET_PATH=$BUCKET
    61  [[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
    62  
    63  if command -v git &> /dev/null && git rev-parse &> /dev/null; then
    64  	if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
    65  		echo "You cannot run the release script on a repo with uncommitted changes"
    66  		usage
    67  	fi
    68  fi
    69  
    70  # These are the 2 keys we've used to sign the deb's
    71  #   release (get.docker.com)
    72  #	GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
    73  #   test    (test.docker.com)
    74  #	GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
    75  
    76  setup_s3() {
    77  	echo "Setting up S3"
    78  	# Try creating the bucket. Ignore errors (it might already exist).
    79  	s3cmd mb "s3://$BUCKET" 2>/dev/null || true
    80  	# Check access to the bucket.
    81  	# s3cmd has no useful exit status, so we cannot check that.
    82  	# Instead, we check if it outputs anything on standard output.
    83  	# (When there are problems, it uses standard error instead.)
    84  	s3cmd info "s3://$BUCKET" | grep -q .
    85  	# Make the bucket accessible through website endpoints.
    86  	s3cmd ws-create --ws-index index --ws-error error "s3://$BUCKET"
    87  }
    88  
    89  # write_to_s3 uploads the contents of standard input to the specified S3 url.
    90  write_to_s3() {
    91  	DEST=$1
    92  	F=`mktemp`
    93  	cat > "$F"
    94  	s3cmd --acl-public --mime-type='text/plain' put "$F" "$DEST"
    95  	rm -f "$F"
    96  }
    97  
    98  s3_url() {
    99  	case "$BUCKET" in
   100  		get.docker.com|test.docker.com|experimental.docker.com)
   101  			echo "https://$BUCKET_PATH"
   102  			;;
   103  		*)
   104  			BASE_URL=$( s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' )
   105  			if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
   106  				echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
   107  			else
   108  				echo "$BASE_URL"
   109  			fi
   110  			;;
   111  	esac
   112  }
   113  
   114  build_all() {
   115  	echo "Building release"
   116  	if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
   117  		echo >&2
   118  		echo >&2 'The build or tests appear to have failed.'
   119  		echo >&2
   120  		echo >&2 'You, as the release  maintainer, now have a couple options:'
   121  		echo >&2 '- delay release and fix issues'
   122  		echo >&2 '- delay release and fix issues'
   123  		echo >&2 '- did we mention how important this is?  issues need fixing :)'
   124  		echo >&2
   125  		echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
   126  		echo >&2 ' really knows all the hairy problems at hand with the current release'
   127  		echo >&2 ' issues) may bypass this checking by running this script again with the'
   128  		echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
   129  		echo >&2 ' running the test suite, and will only build the binaries and packages.  Please'
   130  		echo >&2 ' avoid using this if at all possible.'
   131  		echo >&2
   132  		echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
   133  		echo >&2 ' should be used.  If there are release issues, we should always err on the'
   134  		echo >&2 ' side of caution.'
   135  		echo >&2
   136  		exit 1
   137  	fi
   138  }
   139  
   140  upload_release_build() {
   141  	src="$1"
   142  	dst="$2"
   143  	latest="$3"
   144  
   145  	echo
   146  	echo "Uploading $src"
   147  	echo "  to $dst"
   148  	echo
   149  	s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst"
   150  	if [ "$latest" ]; then
   151  		echo
   152  		echo "Copying to $latest"
   153  		echo
   154  		s3cmd --acl-public cp "$dst" "$latest"
   155  	fi
   156  
   157  	# get hash files too (see hash_files() in hack/make.sh)
   158  	for hashAlgo in md5 sha256; do
   159  		if [ -e "$src.$hashAlgo" ]; then
   160  			echo
   161  			echo "Uploading $src.$hashAlgo"
   162  			echo "  to $dst.$hashAlgo"
   163  			echo
   164  			s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo"
   165  			if [ "$latest" ]; then
   166  				echo
   167  				echo "Copying to $latest.$hashAlgo"
   168  				echo
   169  				s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo"
   170  			fi
   171  		fi
   172  	done
   173  }
   174  
   175  release_build() {
   176  	echo "Releasing binaries"
   177  	GOOS=$1
   178  	GOARCH=$2
   179  
   180  	binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
   181  	tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
   182  	binary=docker-$VERSION
   183  	tgz=docker-$VERSION.tgz
   184  
   185  	latestBase=
   186  	if [ -z "$NOLATEST" ]; then
   187  		latestBase=docker-latest
   188  	fi
   189  
   190  	# we need to map our GOOS and GOARCH to uname values
   191  	# see https://en.wikipedia.org/wiki/Uname
   192  	# ie, GOOS=linux -> "uname -s"=Linux
   193  
   194  	s3Os=$GOOS
   195  	case "$s3Os" in
   196  		darwin)
   197  			s3Os=Darwin
   198  			;;
   199  		freebsd)
   200  			s3Os=FreeBSD
   201  			;;
   202  		linux)
   203  			s3Os=Linux
   204  			;;
   205  		windows)
   206  			s3Os=Windows
   207  			binary+='.exe'
   208  			if [ "$latestBase" ]; then
   209  				latestBase+='.exe'
   210  			fi
   211  			;;
   212  		*)
   213  			echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
   214  			exit 1
   215  			;;
   216  	esac
   217  
   218  	s3Arch=$GOARCH
   219  	case "$s3Arch" in
   220  		amd64)
   221  			s3Arch=x86_64
   222  			;;
   223  		386)
   224  			s3Arch=i386
   225  			;;
   226  		arm)
   227  			s3Arch=armel
   228  			# someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
   229  			;;
   230  		*)
   231  			echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
   232  			exit 1
   233  			;;
   234  	esac
   235  
   236  	s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
   237  	latest=
   238  	latestTgz=
   239  	if [ "$latestBase" ]; then
   240  		latest="$s3Dir/$latestBase"
   241  		latestTgz="$s3Dir/$latestBase.tgz"
   242  	fi
   243  
   244  	if [ ! -x "$binDir/$binary" ]; then
   245  		echo >&2 "error: can't find $binDir/$binary - was it compiled properly?"
   246  		exit 1
   247  	fi
   248  	if [ ! -f "$tgzDir/$tgz" ]; then
   249  		echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
   250  		exit 1
   251  	fi
   252  
   253  	upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
   254  	upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
   255  }
   256  
   257  # Upload binaries and tgz files to S3
   258  release_binaries() {
   259  	[ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || {
   260  		echo >&2 './hack/make.sh must be run before release_binaries'
   261  		exit 1
   262  	}
   263  
   264  	for d in bundles/$VERSION/cross/*/*; do
   265  		GOARCH="$(basename "$d")"
   266  		GOOS="$(basename "$(dirname "$d")")"
   267  		release_build "$GOOS" "$GOARCH"
   268  	done
   269  
   270  	# TODO create redirect from builds/*/i686 to builds/*/i386
   271  
   272  	cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
   273  # To install, run the following command as root:
   274  curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
   275  # Then start docker in daemon mode:
   276  sudo /usr/local/bin/docker daemon
   277  EOF
   278  
   279  	# Add redirect at /builds/info for URL-backwards-compatibility
   280  	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
   281  	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
   282  
   283  	if [ -z "$NOLATEST" ]; then
   284  		echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
   285  		echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
   286  	fi
   287  }
   288  
   289  # Upload the index script
   290  release_index() {
   291  	echo "Releasing index"
   292  	sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET_PATH/index"
   293  }
   294  
   295  release_test() {
   296  	echo "Releasing tests"
   297  	if [ -e "bundles/$VERSION/test" ]; then
   298  		s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET_PATH/test/"
   299  	fi
   300  }
   301  
   302  main() {
   303  	build_all
   304  	setup_s3
   305  	release_binaries
   306  	release_index
   307  	release_test
   308  }
   309  
   310  main
   311  
   312  echo
   313  echo
   314  echo "Release complete; see $(s3_url)"
   315  echo "Use the following text to announce the release:"
   316  echo
   317  echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
   318  echo
   319  echo "Linux 64bit binary: $(s3_url)/builds/Linux/x86_64/docker-$VERSION"
   320  echo "Darwin/OSX 64bit client binary: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION"
   321  echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
   322  echo "Windows 64bit client binary: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.exe"
   323  echo "Windows 32bit client binary: $(s3_url)/builds/Windows/i386/docker-$VERSION.exe"
   324  echo