hack/release.sh
b1953bab
 #!/usr/bin/env bash
83a2e92d
 set -e
ccc39695
 
d9f76993
 # This script looks for bundles built by make.sh, and releases them on a
 # public S3 bucket.
ccc39695
 #
 # Bundles should be available for the VERSION string passed as argument.
 #
d9f76993
 # The correct way to call this script is inside a container built by the
 # official Dockerfile at the root of the Docker source code. The Dockerfile,
 # make.sh and release.sh should all be from the same source code revision.
ccc39695
 
83a2e92d
 set -o pipefail
ccc39695
 
 # Print a usage message and exit.
 usage() {
83a2e92d
 	cat >&2 <<'EOF'
ff30eb96
 To run, I need:
 - to be in a container generated by the Dockerfile at the top of the Docker
   repository;
b46c15e7
 - to be provided with the location of an S3 bucket and path, in
   environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
ff30eb96
 - to be provided with AWS credentials for this S3 bucket, in environment
dd51e85c
   variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY;
ff30eb96
 - a generous amount of good will and nice manners.
 The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
 
80825765
 docker run -e AWS_S3_BUCKET=test.docker.com \
dd51e85c
            -e AWS_ACCESS_KEY_ID     \
            -e AWS_SECRET_ACCESS_KEY \
            -e AWS_DEFAULT_REGION    \
            -it --privileged         \
83a2e92d
            docker ./hack/release.sh
ff30eb96
 EOF
ccc39695
 	exit 1
 }
 
ff30eb96
 [ "$AWS_S3_BUCKET" ] || usage
dd51e85c
 [ "$AWS_ACCESS_KEY_ID" ] || usage
 [ "$AWS_SECRET_ACCESS_KEY" ] || usage
b3ee9ac7
 [ -d /go/src/github.com/docker/docker ] || usage
 cd /go/src/github.com/docker/docker
83a2e92d
 [ -x hack/make.sh ] || usage
 
dd51e85c
 export AWS_DEFAULT_REGION
996138bf
 : ${AWS_DEFAULT_REGION:=us-west-1}
dd51e85c
 
83a2e92d
 RELEASE_BUNDLES=(
 	binary
62a81370
 	cross
b3f5973f
 	tgz
83a2e92d
 )
 
 if [ "$1" != '--release-regardless-of-test-failure' ]; then
886eb85d
 	RELEASE_BUNDLES=(
e096d567
 		test-unit
886eb85d
 		"${RELEASE_BUNDLES[@]}"
 		test-integration-cli
 	)
83a2e92d
 fi
886eb85d
 
4d53a195
 VERSION=$(< VERSION)
ab4fb9bb
 BUCKET=$AWS_S3_BUCKET
b46c15e7
 BUCKET_PATH=$BUCKET
 [[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
ccc39695
 
b18d6646
 if command -v git &> /dev/null && git rev-parse &> /dev/null; then
 	if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
 		echo "You cannot run the release script on a repo with uncommitted changes"
 		usage
 	fi
 fi
 
179e2c92
 # These are the 2 keys we've used to sign the deb's
80825765
 #   release (get.docker.com)
179e2c92
 #	GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
80825765
 #   test    (test.docker.com)
179e2c92
 #	GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
 
ccc39695
 setup_s3() {
776600fa
 	echo "Setting up S3"
ccc39695
 	# Try creating the bucket. Ignore errors (it might already exist).
dd51e85c
 	aws s3 mb "s3://$BUCKET" 2>/dev/null || true
ab4fb9bb
 	# Check access to the bucket.
dd51e85c
 	aws s3 ls "s3://$BUCKET" >/dev/null
0469e476
 	# Make the bucket accessible through website endpoints.
dd51e85c
 	aws s3 website --index-document index --error-document error "s3://$BUCKET"
ccc39695
 }
 
 # write_to_s3 uploads the contents of standard input to the specified S3 url.
 write_to_s3() {
 	DEST=$1
 	F=`mktemp`
ac20568b
 	cat > "$F"
dd51e85c
 	aws s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST"
ac20568b
 	rm -f "$F"
ccc39695
 }
 
 s3_url() {
94bf5b00
 	case "$BUCKET" in
0f0f5ecd
 		get.docker.com|test.docker.com|experimental.docker.com)
b46c15e7
 			echo "https://$BUCKET_PATH"
94bf5b00
 			;;
 		*)
996138bf
 			BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com"
b46c15e7
 			if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
 				echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
 			else
 				echo "$BASE_URL"
 			fi
94bf5b00
 			;;
 	esac
ccc39695
 }
 
179e2c92
 build_all() {
776600fa
 	echo "Building release"
179e2c92
 	if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
 		echo >&2
 		echo >&2 'The build or tests appear to have failed.'
 		echo >&2
 		echo >&2 'You, as the release  maintainer, now have a couple options:'
 		echo >&2 '- delay release and fix issues'
 		echo >&2 '- delay release and fix issues'
 		echo >&2 '- did we mention how important this is?  issues need fixing :)'
 		echo >&2
 		echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
 		echo >&2 ' really knows all the hairy problems at hand with the current release'
 		echo >&2 ' issues) may bypass this checking by running this script again with the'
 		echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
 		echo >&2 ' running the test suite, and will only build the binaries and packages.  Please'
 		echo >&2 ' avoid using this if at all possible.'
 		echo >&2
 		echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
 		echo >&2 ' should be used.  If there are release issues, we should always err on the'
 		echo >&2 ' side of caution.'
 		echo >&2
 		exit 1
 	fi
 }
 
6b46a091
 upload_release_build() {
 	src="$1"
 	dst="$2"
 	latest="$3"
 
 	echo
 	echo "Uploading $src"
 	echo "  to $dst"
 	echo
dd51e85c
 	aws s3 cp --follow-symlinks --acl public-read "$src" "$dst"
6b46a091
 	if [ "$latest" ]; then
 		echo
 		echo "Copying to $latest"
 		echo
dd51e85c
 		aws s3 cp --acl public-read "$dst" "$latest"
6b46a091
 	fi
 
 	# get hash files too (see hash_files() in hack/make.sh)
 	for hashAlgo in md5 sha256; do
 		if [ -e "$src.$hashAlgo" ]; then
 			echo
 			echo "Uploading $src.$hashAlgo"
 			echo "  to $dst.$hashAlgo"
 			echo
dd51e85c
 			aws s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo"
6b46a091
 			if [ "$latest" ]; then
 				echo
 				echo "Copying to $latest.$hashAlgo"
 				echo
dd51e85c
 				aws s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo"
6b46a091
 			fi
 		fi
 	done
 }
 
4100e9b7
 release_build() {
776600fa
 	echo "Releasing binaries"
4100e9b7
 	GOOS=$1
 	GOARCH=$2
 
6b46a091
 	binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
 	tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
 	binary=docker-$VERSION
fda99a7e
 	zipExt=".tgz"
 	binaryExt=""
 	tgz=$binary$zipExt
6b46a091
 
 	latestBase=
 	if [ -z "$NOLATEST" ]; then
 		latestBase=docker-latest
 	fi
4100e9b7
 
 	# we need to map our GOOS and GOARCH to uname values
 	# see https://en.wikipedia.org/wiki/Uname
 	# ie, GOOS=linux -> "uname -s"=Linux
 
6b46a091
 	s3Os=$GOOS
 	case "$s3Os" in
4100e9b7
 		darwin)
6b46a091
 			s3Os=Darwin
4100e9b7
 			;;
 		freebsd)
6b46a091
 			s3Os=FreeBSD
4100e9b7
 			;;
 		linux)
6b46a091
 			s3Os=Linux
4100e9b7
 			;;
241c3719
 		windows)
fda99a7e
 			# this is windows use the .zip and .exe extentions for the files.
241c3719
 			s3Os=Windows
fda99a7e
 			zipExt=".zip"
 			binaryExt=".exe"
 			tgz=$binary$zipExt
 			binary+=$binaryExt
241c3719
 			;;
4100e9b7
 		*)
6b46a091
 			echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
4100e9b7
 			exit 1
 			;;
 	esac
 
6b46a091
 	s3Arch=$GOARCH
 	case "$s3Arch" in
4100e9b7
 		amd64)
6b46a091
 			s3Arch=x86_64
4100e9b7
 			;;
 		386)
6b46a091
 			s3Arch=i386
4100e9b7
 			;;
 		arm)
6b46a091
 			s3Arch=armel
927b334e
 			# someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
4100e9b7
 			;;
 		*)
6b46a091
 			echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
4100e9b7
 			exit 1
 			;;
 	esac
 
b46c15e7
 	s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
fda99a7e
 	# latest=
6b46a091
 	latestTgz=
 	if [ "$latestBase" ]; then
fda99a7e
 		# commented out since we aren't uploading binaries right now.
 		# latest="$s3Dir/$latestBase$binaryExt"
 		# we don't include the $binaryExt because we don't want docker.exe.zip
 		latestTgz="$s3Dir/$latestBase$zipExt"
6b46a091
 	fi
4100e9b7
 
6b46a091
 	if [ ! -f "$tgzDir/$tgz" ]; then
 		echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
4100e9b7
 		exit 1
 	fi
5e3a8ef8
 	# disable binary uploads for now. Only providing tgz downloads
 	# upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
6b46a091
 	upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
4100e9b7
 }
 
 # Upload binaries and tgz files to S3
 release_binaries() {
ac20568b
 	[ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || {
4100e9b7
 		echo >&2 './hack/make.sh must be run before release_binaries'
b3f5973f
 		exit 1
 	}
 
4100e9b7
 	for d in bundles/$VERSION/cross/*/*; do
 		GOARCH="$(basename "$d")"
 		GOOS="$(basename "$(dirname "$d")")"
 		release_build "$GOOS" "$GOARCH"
 	done
b3f5973f
 
4100e9b7
 	# TODO create redirect from builds/*/i686 to builds/*/i386
f56945d7
 
b46c15e7
 	cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
dd5573bc
 # To install, run the following commands as root:
 curl -fsSLO $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz && tar --strip-components=1 -xvzf docker-$VERSION.tgz -C /usr/local/bin
 
ccc39695
 # Then start docker in daemon mode:
dd5573bc
 /usr/local/bin/dockerd
ccc39695
 EOF
f56945d7
 
 	# Add redirect at /builds/info for URL-backwards-compatibility
 	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
dd51e85c
 	aws s3 cp --acl public-read --website-redirect '/builds/' --content-type='text/plain' /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
f56945d7
 
ccc39695
 	if [ -z "$NOLATEST" ]; then
b46c15e7
 		echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
 		echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
ccc39695
 	fi
 }
 
0469e476
 # Upload the index script
 release_index() {
776600fa
 	echo "Releasing index"
0f379fab
 	url="$(s3_url)/" hack/make.sh install-script
51dad118
 	write_to_s3 "s3://$BUCKET_PATH/index" < "bundles/$VERSION/install-script/install.sh"
0469e476
 }
 
ccc39695
 main() {
179e2c92
 	build_all
ccc39695
 	setup_s3
4100e9b7
 	release_binaries
0469e476
 	release_index
ccc39695
 }
 
 main
4100e9b7
 
 echo
 echo
 echo "Release complete; see $(s3_url)"
eae99e9d
 echo "Use the following text to announce the release:"
 echo
 echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
 echo
 echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
dd5573bc
 echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
 echo "Windows 64bit zip: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.zip"
e7d54a41
 echo "Windows 32bit client zip: $(s3_url)/builds/Windows/i386/docker-$VERSION.zip"
4100e9b7
 echo