b1953bab |
#!/usr/bin/env bash |
83a2e92d |
set -e |
ccc39695 |
|
d9f76993 |
# This script looks for bundles built by make.sh, and releases them on a
# public S3 bucket. |
ccc39695 |
#
# Bundles should be available for the VERSION string passed as argument.
# |
d9f76993 |
# The correct way to call this script is inside a container built by the
# official Dockerfile at the root of the Docker source code. The Dockerfile,
# make.sh and release.sh should all be from the same source code revision. |
ccc39695 |
|
83a2e92d |
set -o pipefail |
ccc39695 |
# Print a usage message and exit.
usage() { |
83a2e92d |
cat >&2 <<'EOF' |
ff30eb96 |
To run, I need:
- to be in a container generated by the Dockerfile at the top of the Docker
repository; |
b46c15e7 |
- to be provided with the location of an S3 bucket and path, in
environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: ''); |
ff30eb96 |
- to be provided with AWS credentials for this S3 bucket, in environment
variables AWS_ACCESS_KEY and AWS_SECRET_KEY; |
7ca017eb |
- the passphrase to unlock the GPG key specified by the optional environment
variable GPG_KEYID (default: releasedocker) which will sign the deb
packages (passed as environment variable GPG_PASSPHRASE); |
ff30eb96 |
- a generous amount of good will and nice manners.
The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
|
80825765 |
docker run -e AWS_S3_BUCKET=test.docker.com \
-e AWS_ACCESS_KEY=... \
-e AWS_SECRET_KEY=... \
-e GPG_PASSPHRASE=... \ |
44fe8cbb |
-i -t --privileged \ |
83a2e92d |
docker ./hack/release.sh |
ff30eb96 |
EOF |
ccc39695 |
exit 1
}
|
ff30eb96 |
[ "$AWS_S3_BUCKET" ] || usage
[ "$AWS_ACCESS_KEY" ] || usage
[ "$AWS_SECRET_KEY" ] || usage |
9c06420b |
[ "$GPG_PASSPHRASE" ] || usage |
cb0ce5bb |
: ${GPG_KEYID:=releasedocker} |
b3ee9ac7 |
[ -d /go/src/github.com/docker/docker ] || usage
cd /go/src/github.com/docker/docker |
83a2e92d |
[ -x hack/make.sh ] || usage
RELEASE_BUNDLES=(
binary |
62a81370 |
cross |
b3f5973f |
tgz |
83a2e92d |
ubuntu
)
if [ "$1" != '--release-regardless-of-test-failure' ]; then |
886eb85d |
RELEASE_BUNDLES=( |
e096d567 |
test-unit |
886eb85d |
"${RELEASE_BUNDLES[@]}"
test-integration-cli
) |
83a2e92d |
fi |
886eb85d |
|
4d53a195 |
VERSION=$(< VERSION) |
ab4fb9bb |
BUCKET=$AWS_S3_BUCKET |
b46c15e7 |
BUCKET_PATH=$BUCKET
[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH |
ccc39695 |
|
b18d6646 |
if command -v git &> /dev/null && git rev-parse &> /dev/null; then
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
echo "You cannot run the release script on a repo with uncommitted changes"
usage
fi
fi
|
179e2c92 |
# These are the 2 keys we've used to sign the deb's |
80825765 |
# release (get.docker.com) |
179e2c92 |
# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" |
80825765 |
# test (test.docker.com) |
179e2c92 |
# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
|
ccc39695 |
setup_s3() { |
776600fa |
echo "Setting up S3" |
ccc39695 |
# Try creating the bucket. Ignore errors (it might already exist). |
ac20568b |
s3cmd mb "s3://$BUCKET" 2>/dev/null || true |
ab4fb9bb |
# Check access to the bucket.
# s3cmd has no useful exit status, so we cannot check that.
# Instead, we check if it outputs anything on standard output.
# (When there are problems, it uses standard error instead.) |
ac20568b |
s3cmd info "s3://$BUCKET" | grep -q . |
0469e476 |
# Make the bucket accessible through website endpoints. |
ac20568b |
s3cmd ws-create --ws-index index --ws-error error "s3://$BUCKET" |
ccc39695 |
}
# write_to_s3 uploads the contents of standard input to the specified S3 url.
write_to_s3() {
DEST=$1
F=`mktemp` |
ac20568b |
cat > "$F"
s3cmd --acl-public --mime-type='text/plain' put "$F" "$DEST"
rm -f "$F" |
ccc39695 |
}
s3_url() { |
94bf5b00 |
case "$BUCKET" in |
0f0f5ecd |
get.docker.com|test.docker.com|experimental.docker.com) |
b46c15e7 |
echo "https://$BUCKET_PATH" |
94bf5b00 |
;;
*) |
b46c15e7 |
BASE_URL=$( s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' )
if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
else
echo "$BASE_URL"
fi |
94bf5b00 |
;;
esac |
ccc39695 |
}
|
179e2c92 |
build_all() { |
776600fa |
echo "Building release" |
179e2c92 |
if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
echo >&2
echo >&2 'The build or tests appear to have failed.'
echo >&2
echo >&2 'You, as the release maintainer, now have a couple options:'
echo >&2 '- delay release and fix issues'
echo >&2 '- delay release and fix issues'
echo >&2 '- did we mention how important this is? issues need fixing :)'
echo >&2
echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
echo >&2 ' really knows all the hairy problems at hand with the current release'
echo >&2 ' issues) may bypass this checking by running this script again with the'
echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
echo >&2 ' running the test suite, and will only build the binaries and packages. Please'
echo >&2 ' avoid using this if at all possible.'
echo >&2
echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
echo >&2 ' should be used. If there are release issues, we should always err on the'
echo >&2 ' side of caution.'
echo >&2
exit 1
fi
}
|
6b46a091 |
upload_release_build() {
src="$1"
dst="$2"
latest="$3"
echo
echo "Uploading $src"
echo " to $dst"
echo
s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst"
if [ "$latest" ]; then
echo
echo "Copying to $latest"
echo
s3cmd --acl-public cp "$dst" "$latest"
fi
# get hash files too (see hash_files() in hack/make.sh)
for hashAlgo in md5 sha256; do
if [ -e "$src.$hashAlgo" ]; then
echo
echo "Uploading $src.$hashAlgo"
echo " to $dst.$hashAlgo"
echo
s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo"
if [ "$latest" ]; then
echo
echo "Copying to $latest.$hashAlgo"
echo
s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo"
fi
fi
done
}
|
4100e9b7 |
release_build() { |
776600fa |
echo "Releasing binaries" |
4100e9b7 |
GOOS=$1
GOARCH=$2
|
6b46a091 |
binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
binary=docker-$VERSION
tgz=docker-$VERSION.tgz
latestBase=
if [ -z "$NOLATEST" ]; then
latestBase=docker-latest
fi |
4100e9b7 |
# we need to map our GOOS and GOARCH to uname values
# see https://en.wikipedia.org/wiki/Uname
# ie, GOOS=linux -> "uname -s"=Linux
|
6b46a091 |
s3Os=$GOOS
case "$s3Os" in |
4100e9b7 |
darwin) |
6b46a091 |
s3Os=Darwin |
4100e9b7 |
;;
freebsd) |
6b46a091 |
s3Os=FreeBSD |
4100e9b7 |
;;
linux) |
6b46a091 |
s3Os=Linux |
4100e9b7 |
;; |
241c3719 |
windows)
s3Os=Windows
binary+='.exe'
if [ "$latestBase" ]; then
latestBase+='.exe'
fi
;; |
4100e9b7 |
*) |
6b46a091 |
echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" |
4100e9b7 |
exit 1
;;
esac
|
6b46a091 |
s3Arch=$GOARCH
case "$s3Arch" in |
4100e9b7 |
amd64) |
6b46a091 |
s3Arch=x86_64 |
4100e9b7 |
;;
386) |
6b46a091 |
s3Arch=i386 |
4100e9b7 |
;;
arm) |
6b46a091 |
s3Arch=armel |
065dd231 |
# someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too |
4100e9b7 |
;;
*) |
6b46a091 |
echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" |
4100e9b7 |
exit 1
;;
esac
|
b46c15e7 |
s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch" |
6b46a091 |
latest=
latestTgz=
if [ "$latestBase" ]; then
latest="$s3Dir/$latestBase"
latestTgz="$s3Dir/$latestBase.tgz"
fi |
4100e9b7 |
|
6b46a091 |
if [ ! -x "$binDir/$binary" ]; then
echo >&2 "error: can't find $binDir/$binary - was it compiled properly?" |
4100e9b7 |
exit 1
fi |
6b46a091 |
if [ ! -f "$tgzDir/$tgz" ]; then
echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" |
4100e9b7 |
exit 1
fi
|
6b46a091 |
upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" |
4100e9b7 |
}
|
ccc39695 |
# Upload the 'ubuntu' bundle to S3:
# 1. A full APT repository is published at $BUCKET/ubuntu/ |
f56945d7 |
# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index |
ccc39695 |
release_ubuntu() { |
776600fa |
echo "Releasing ubuntu" |
ac20568b |
[ -e "bundles/$VERSION/ubuntu" ] || { |
83a2e92d |
echo >&2 './hack/make.sh must be run before release_ubuntu'
exit 1
} |
9c06420b |
|
4572329d |
local debfiles=( "bundles/$VERSION/ubuntu/"*.deb )
|
9c06420b |
# Sign our packages |
7ca017eb |
dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k "$GPG_KEYID" --sign builder "${debfiles[@]}" |
9c06420b |
|
5b0eaef6 |
# Setup the APT repo
APTDIR=bundles/$VERSION/ubuntu/apt |
ac20568b |
mkdir -p "$APTDIR/conf" "$APTDIR/db"
s3cmd sync "s3://$BUCKET/ubuntu/db/" "$APTDIR/db/" || true
cat > "$APTDIR/conf/distributions" <<EOF |
5b0eaef6 |
Codename: docker
Components: main |
87872006 |
Architectures: amd64 i386 |
5b0eaef6 |
EOF
# Add the DEB package to the APT repo |
4572329d |
reprepro -b "$APTDIR" includedeb docker "${debfiles[@]}" |
5b0eaef6 |
|
9c06420b |
# Sign |
83a2e92d |
for F in $(find $APTDIR -name Release); do |
7ca017eb |
gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ |
9c06420b |
--armor --sign --detach-sign \ |
ac20568b |
--output "$F.gpg" "$F" |
9c06420b |
done
# Upload keys |
ac20568b |
s3cmd sync "$HOME/.gnupg/" "s3://$BUCKET/ubuntu/.gnupg/" |
7ca017eb |
gpg --armor --export "$GPG_KEYID" > "bundles/$VERSION/ubuntu/gpg" |
ac20568b |
s3cmd --acl-public put "bundles/$VERSION/ubuntu/gpg" "s3://$BUCKET/gpg" |
9c06420b |
|
440eb7fe |
local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9 |
0cc664fa |
local s3Headers= |
440eb7fe |
if [[ $BUCKET == test* ]]; then
gpgFingerprint=740B314AE3941731B942C66ADF4FD13717AAD7D6 |
957622d4 |
elif [[ $BUCKET == experimental* ]]; then
gpgFingerprint=E33FF7BF5C91D50A6F91FFFD4CC38D40F9A96B49 |
0cc664fa |
s3Headers='--add-header=Cache-Control:no-cache' |
440eb7fe |
fi
|
9c06420b |
# Upload repo |
b83bbbe8 |
s3cmd --acl-public $s3Headers sync "$APTDIR/" "s3://$BUCKET/ubuntu/" |
f56945d7 |
cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index |
ff271f51 |
echo "# WARNING! This script is deprecated. Please use the script"
echo "# at https://get.docker.com/" |
ccc39695 |
EOF |
f56945d7 |
# Add redirect at /ubuntu/info for URL-backwards-compatibility
rm -rf /tmp/emptyfile && touch /tmp/emptyfile |
ac20568b |
s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET/ubuntu/info" |
f56945d7 |
echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu" |
ccc39695 |
}
|
4100e9b7 |
# Upload binaries and tgz files to S3
release_binaries() { |
ac20568b |
[ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || { |
4100e9b7 |
echo >&2 './hack/make.sh must be run before release_binaries' |
b3f5973f |
exit 1
}
|
4100e9b7 |
for d in bundles/$VERSION/cross/*/*; do
GOARCH="$(basename "$d")"
GOOS="$(basename "$(dirname "$d")")"
release_build "$GOOS" "$GOARCH"
done |
b3f5973f |
|
4100e9b7 |
# TODO create redirect from builds/*/i686 to builds/*/i386 |
f56945d7 |
|
b46c15e7 |
cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index |
ccc39695 |
# To install, run the following command as root: |
9f61e233 |
curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker |
ccc39695 |
# Then start docker in daemon mode: |
81cc8ebc |
sudo /usr/local/bin/docker daemon |
ccc39695 |
EOF |
f56945d7 |
# Add redirect at /builds/info for URL-backwards-compatibility
rm -rf /tmp/emptyfile && touch /tmp/emptyfile |
b46c15e7 |
s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET_PATH/builds/info" |
f56945d7 |
|
ccc39695 |
if [ -z "$NOLATEST" ]; then |
b46c15e7 |
echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest" |
ccc39695 |
fi
}
|
0469e476 |
# Upload the index script
release_index() { |
776600fa |
echo "Releasing index" |
b46c15e7 |
sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET_PATH/index" |
0469e476 |
}
|
59856a20 |
release_test() { |
776600fa |
echo "Releasing tests" |
59856a20 |
if [ -e "bundles/$VERSION/test" ]; then |
b46c15e7 |
s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET_PATH/test/" |
59856a20 |
fi
}
|
179e2c92 |
setup_gpg() { |
776600fa |
echo "Setting up GPG" |
179e2c92 |
# Make sure that we have our keys |
ac20568b |
mkdir -p "$HOME/.gnupg/"
s3cmd sync "s3://$BUCKET/ubuntu/.gnupg/" "$HOME/.gnupg/" || true |
7ca017eb |
gpg --list-keys "$GPG_KEYID" >/dev/null || { |
179e2c92 |
gpg --gen-key --batch <<EOF
Key-Type: RSA |
6b46a091 |
Key-Length: 4096 |
179e2c92 |
Passphrase: $GPG_PASSPHRASE
Name-Real: Docker Release Tool |
b3ee9ac7 |
Name-Email: docker@docker.com |
7ca017eb |
Name-Comment: $GPG_KEYID |
179e2c92 |
Expire-Date: 0
%commit
EOF
}
}
|
ccc39695 |
main() { |
179e2c92 |
build_all |
ccc39695 |
setup_s3 |
179e2c92 |
setup_gpg |
4100e9b7 |
release_binaries |
ccc39695 |
release_ubuntu |
0469e476 |
release_index |
59856a20 |
release_test |
ccc39695 |
}
main |
4100e9b7 |
echo
echo
echo "Release complete; see $(s3_url)" |
eae99e9d |
echo "Use the following text to announce the release:"
echo
echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
echo
echo "Ubuntu/Debian: curl -sSL $(s3_url) | sh"
echo "Linux 64bit binary: $(s3_url)/builds/Linux/x86_64/docker-$VERSION"
echo "Darwin/OSX 64bit client binary: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION"
echo "Darwin/OSX 32bit client binary: $(s3_url)/builds/Darwin/i386/docker-$VERSION"
echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
echo "Windows 64bit client binary: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.exe"
echo "Windows 32bit client binary: $(s3_url)/builds/Windows/i386/docker-$VERSION.exe" |
4100e9b7 |
echo |