Browse code

Add support for longer S3 bucket paths

Signed-off-by: Mike Dougherty <mike.dougherty@docker.com>

Mike Dougherty authored on 2015/08/26 04:18:04
Showing 1 changed files
... ...
@@ -18,8 +18,8 @@ usage() {
18 18
 To run, I need:
19 19
 - to be in a container generated by the Dockerfile at the top of the Docker
20 20
   repository;
21
-- to be provided with the name of an S3 bucket, in environment variable
22
-  AWS_S3_BUCKET;
21
+- to be provided with the location of an S3 bucket and path, in
22
+  environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
23 23
 - to be provided with AWS credentials for this S3 bucket, in environment
24 24
   variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
25 25
 - the passphrase to unlock the GPG key which will sign the deb packages
... ...
@@ -62,6 +62,8 @@ fi
62 62
 
63 63
 VERSION=$(< VERSION)
64 64
 BUCKET=$AWS_S3_BUCKET
65
+BUCKET_PATH=$BUCKET
66
+[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
65 67
 
66 68
 # These are the 2 keys we've used to sign the deb's
67 69
 #   release (get.docker.com)
... ...
@@ -94,10 +96,15 @@ write_to_s3() {
94 94
 s3_url() {
95 95
 	case "$BUCKET" in
96 96
 		get.docker.com|test.docker.com|experimental.docker.com)
97
-			echo "https://$BUCKET"
97
+			echo "https://$BUCKET_PATH"
98 98
 			;;
99 99
 		*)
100
-			s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }'
100
+			BASE_URL=$( s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' )
101
+			if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
102
+				echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
103
+			else
104
+				echo "$BASE_URL"
105
+			fi
101 106
 			;;
102 107
 	esac
103 108
 }
... ...
@@ -224,7 +231,7 @@ release_build() {
224 224
 			;;
225 225
 	esac
226 226
 
227
-	s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch
227
+	s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
228 228
 	latest=
229 229
 	latestTgz=
230 230
 	if [ "$latestBase" ]; then
... ...
@@ -323,7 +330,7 @@ release_binaries() {
323 323
 
324 324
 	# TODO create redirect from builds/*/i686 to builds/*/i386
325 325
 
326
-	cat <<EOF | write_to_s3 s3://$BUCKET/builds/index
326
+	cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
327 327
 # To install, run the following command as root:
328 328
 curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
329 329
 # Then start docker in daemon mode:
... ...
@@ -332,24 +339,24 @@ EOF
332 332
 
333 333
 	# Add redirect at /builds/info for URL-backwards-compatibility
334 334
 	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
335
-	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET/builds/info"
335
+	s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
336 336
 
337 337
 	if [ -z "$NOLATEST" ]; then
338
-		echo "Advertising $VERSION on $BUCKET as most recent version"
339
-		echo "$VERSION" | write_to_s3 "s3://$BUCKET/latest"
338
+		echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
339
+		echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
340 340
 	fi
341 341
 }
342 342
 
343 343
 # Upload the index script
344 344
 release_index() {
345 345
 	echo "Releasing index"
346
-	sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET/index"
346
+	sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET_PATH/index"
347 347
 }
348 348
 
349 349
 release_test() {
350 350
 	echo "Releasing tests"
351 351
 	if [ -e "bundles/$VERSION/test" ]; then
352
-		s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET/test/"
352
+		s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET_PATH/test/"
353 353
 	fi
354 354
 }
355 355