github.com/ctmnz/docker@v1.6.0-rc3/docs/release.sh (about)

     1  #!/bin/bash
     2  set -e
     3  
     4  set -o pipefail
     5  
     6  usage() {
     7  	cat >&2 <<'EOF'
     8  To publish the Docker documentation you need to set your access_key and secret_key in the docs/awsconfig file 
     9  (with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file)
    10  and set the AWS_S3_BUCKET env var to the name of your bucket.
    11  
    12  If you're publishing the current release's documentation, also set `BUILD_ROOT=yes`
    13  
    14  make AWS_S3_BUCKET=docs-stage.docker.com docs-release
    15  
    16  will then push the documentation site to your s3 bucket.
    17  
    18   Note: you can add `OPTIONS=--dryrun` to see what will be done without sending to the server
    19   You can also add NOCACHE=1 to publish without a cache, which is what we do for the master docs.
    20  EOF
    21  	exit 1
    22  }
    23  
    24  create_robots_txt() {
    25  	cat > ./sources/robots.txt <<'EOF'
    26  User-agent: *
    27  Disallow: /
    28  EOF
    29  }
    30  
    31  setup_s3() {
    32  	# Try creating the bucket. Ignore errors (it might already exist).
    33  	echo "create $BUCKET if it does not exist"
    34  	aws s3 mb --profile $BUCKET s3://$BUCKET 2>/dev/null || true
    35  
    36  	# Check access to the bucket.
    37  	echo "test $BUCKET exists"
    38  	aws s3 --profile $BUCKET ls s3://$BUCKET
    39  
    40  	# Make the bucket accessible through website endpoints.
    41  	echo "make $BUCKET accessible as a website"
    42  	#aws s3 website s3://$BUCKET --index-document index.html --error-document jsearch/index.html
    43  	local s3conf=$(cat s3_website.json | envsubst)
    44  	aws s3api --profile $BUCKET put-bucket-website --bucket $BUCKET --website-configuration "$s3conf"
    45  }
    46  
    47  build_current_documentation() {
    48  	mkdocs build
    49  	cd site/
    50  	gzip -9k -f search_content.json
    51  	cd ..
    52  }
    53  
    54  upload_current_documentation() {
    55  	src=site/
    56  	dst=s3://$BUCKET$1
    57  
    58  	cache=max-age=3600
    59  	if [ "$NOCACHE" ]; then
    60  		cache=no-cache
    61  	fi
    62  
    63  	printf "\nUploading $src to $dst\n"
    64  
    65  	# a really complicated way to send only the files we want
    66  	# if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go
    67  	#  versions.html_fragment
    68  	include="--recursive --include \"*.$i\" "
    69  	run="aws s3 cp $src $dst $OPTIONS --profile $BUCKET --cache-control $cache --acl public-read $include"
    70  	printf "\n=====\n$run\n=====\n"
    71  	$run
    72  
    73  	# Make sure the search_content.json.gz file has the right content-encoding
    74  	aws s3 cp --profile $BUCKET --cache-control $cache --content-encoding="gzip" --acl public-read "site/search_content.json.gz" "$dst"
    75  }
    76  
    77  invalidate_cache() {
    78  	if [[ -z "$DISTRIBUTION_ID" ]]; then
    79  		echo "Skipping Cloudfront cache invalidation"
    80  		return
    81  	fi
    82  
    83  	dst=$1
    84  
    85  	aws configure set preview.cloudfront true
    86  
    87  	# Get all the files
    88  	# not .md~ files
    89  	# replace spaces w %20 so urlencoded
    90  	files=( $(find site/ -not -name "*.md*" -type f | sed 's/site//g' | sed 's/ /%20/g') )
    91  
    92  	len=${#files[@]}
    93  	last_file=${files[$((len-1))]}
    94  
    95  	echo "aws cloudfront  create-invalidation --profile $AWS_S3_BUCKET --distribution-id $DISTRIBUTION_ID --invalidation-batch '" > batchfile
    96  	echo "{\"Paths\":{\"Quantity\":$len," >> batchfile
    97  	echo "\"Items\": [" >> batchfile
    98  
    99  	for file in "${files[@]}" ; do
   100  		if [[ $file == $last_file ]]; then
   101  			comma=""
   102  		else
   103  			comma=","
   104  		fi
   105  		echo "\"$dst$file\"$comma" >> batchfile
   106  	done
   107  
   108  	echo "]}, \"CallerReference\":\"$(date)\"}'" >> batchfile
   109  
   110  	sh batchfile
   111  }
   112  
   113  main() {
   114  	[ "$AWS_S3_BUCKET" ] || usage
   115  
   116  	# Make sure there is an awsconfig file
   117  	export AWS_CONFIG_FILE=$(pwd)/awsconfig
   118  	[ -f "$AWS_CONFIG_FILE" ] || usage
   119  
   120  	# Get the version
   121  	VERSION=$(cat VERSION)
   122  
   123  	# Disallow pushing dev docs to master
   124  	if [ "$AWS_S3_BUCKET" == "docs.docker.com" ] && [ "${VERSION%-dev}" != "$VERSION" ]; then
   125  		echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)"
   126  		exit 1
   127  	fi
   128  
   129  	# Clean version - 1.0.2-dev -> 1.0
   130  	export MAJOR_MINOR="v${VERSION%.*}"
   131  
   132  	export BUCKET=$AWS_S3_BUCKET
   133  	export AWS_DEFAULT_PROFILE=$BUCKET
   134  
   135  	# debug variables
   136  	echo "bucket: $BUCKET, full version: $VERSION, major-minor: $MAJOR_MINOR"
   137  	echo "cfg file: $AWS_CONFIG_FILE ; profile: $AWS_DEFAULT_PROFILE"
   138  
   139  	# create the robots.txt
   140  	create_robots_txt
   141  
   142  	if [ "$OPTIONS" != "--dryrun" ]; then
   143  		setup_s3
   144  	fi
   145  
   146  	# Default to only building the version specific docs
   147  	# so we don't clober the latest by accident with old versions
   148  	if [ "$BUILD_ROOT" == "yes" ]; then
   149  		echo "Building root documentation"
   150  		build_current_documentation
   151  
   152  		echo "Uploading root documentation"
   153  		upload_current_documentation
   154  		[ "$NOCACHE" ] || invalidate_cache
   155  	fi
   156  
   157  	#build again with /v1.0/ prefix
   158  	sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml
   159  	echo "Building the /$MAJOR_MINOR/ documentation"
   160  	build_current_documentation
   161  
   162  	echo "Uploading the documentation"
   163  	upload_current_documentation "/$MAJOR_MINOR/"
   164  
   165  	# Invalidating cache
   166  	[ "$NOCACHE" ] || invalidate_cache "/$MAJOR_MINOR"
   167  }
   168  
   169  main