This also removes the now-defunct `*maintainer*.sh` scripts that don't work with the new TOML format, and moves a couple not-build-or-release-related scripts to `contrib/` instead.
Signed-off-by: Andrew "Tianon" Page <admwiggin@gmail.com>
| ... | ... |
@@ -1,5 +1,5 @@ |
| 1 | 1 |
# This file lists all individuals having contributed content to the repository. |
| 2 |
-# For how it is generated, see `project/generate-authors.sh`. |
|
| 2 |
+# For how it is generated, see `hack/generate-authors.sh`. |
|
| 3 | 3 |
|
| 4 | 4 |
Aanand Prasad <aanand.prasad@gmail.com> |
| 5 | 5 |
Aaron Feng <aaron.feng@gmail.com> |
| 6 | 6 |
new file mode 100755 |
| ... | ... |
@@ -0,0 +1,22 @@ |
| 0 |
+#!/usr/bin/env bash |
|
| 1 |
+ |
|
| 2 |
+## Run this script from the root of the docker repository |
|
| 3 |
+## to query project stats useful to the maintainers. |
|
| 4 |
+## You will need to install `pulls` and `issues` from |
|
| 5 |
+## http://github.com/crosbymichael/pulls |
|
| 6 |
+ |
|
| 7 |
+set -e |
|
| 8 |
+ |
|
| 9 |
+echo -n "Open pulls: " |
|
| 10 |
+PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 |
|
| 11 |
+echo $PULLS |
|
| 12 |
+ |
|
| 13 |
+echo -n "Pulls alru: " |
|
| 14 |
+pulls alru |
|
| 15 |
+ |
|
| 16 |
+echo -n "Open issues: " |
|
| 17 |
+ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 |
|
| 18 |
+echo $ISSUES |
|
| 19 |
+ |
|
| 20 |
+echo -n "Issues alru: " |
|
| 21 |
+issues alru |
| 0 | 22 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,105 @@ |
| 0 |
+#!/bin/sh |
|
| 1 |
+ |
|
| 2 |
+# This is a convenience script for reporting issues that include a base |
|
| 3 |
+# template of information. See https://github.com/docker/docker/pull/8845 |
|
| 4 |
+ |
|
| 5 |
+set -e |
|
| 6 |
+ |
|
| 7 |
+DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"}
|
|
| 8 |
+DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "}
|
|
| 9 |
+DOCKER=${DOCKER:-"docker"}
|
|
| 10 |
+DOCKER_COMMAND="${DOCKER}"
|
|
| 11 |
+export DOCKER_COMMAND |
|
| 12 |
+ |
|
| 13 |
+# pulled from https://gist.github.com/cdown/1163649 |
|
| 14 |
+function urlencode() {
|
|
| 15 |
+ # urlencode <string> |
|
| 16 |
+ |
|
| 17 |
+ local length="${#1}"
|
|
| 18 |
+ for (( i = 0; i < length; i++ )); do |
|
| 19 |
+ local c="${1:i:1}"
|
|
| 20 |
+ case $c in |
|
| 21 |
+ [a-zA-Z0-9.~_-]) printf "$c" ;; |
|
| 22 |
+ *) printf '%%%02X' "'$c" |
|
| 23 |
+ esac |
|
| 24 |
+ done |
|
| 25 |
+} |
|
| 26 |
+ |
|
| 27 |
+function template() {
|
|
| 28 |
+# this should always match the template from CONTRIBUTING.md |
|
| 29 |
+ cat <<- EOM |
|
| 30 |
+ Description of problem: |
|
| 31 |
+ |
|
| 32 |
+ |
|
| 33 |
+ \`docker version\`: |
|
| 34 |
+ `${DOCKER_COMMAND} -D version`
|
|
| 35 |
+ |
|
| 36 |
+ |
|
| 37 |
+ \`docker info\`: |
|
| 38 |
+ `${DOCKER_COMMAND} -D info`
|
|
| 39 |
+ |
|
| 40 |
+ |
|
| 41 |
+ \`uname -a\`: |
|
| 42 |
+ `uname -a` |
|
| 43 |
+ |
|
| 44 |
+ |
|
| 45 |
+ Environment details (AWS, VirtualBox, physical, etc.): |
|
| 46 |
+ |
|
| 47 |
+ |
|
| 48 |
+ How reproducible: |
|
| 49 |
+ |
|
| 50 |
+ |
|
| 51 |
+ Steps to Reproduce: |
|
| 52 |
+ 1. |
|
| 53 |
+ 2. |
|
| 54 |
+ 3. |
|
| 55 |
+ |
|
| 56 |
+ |
|
| 57 |
+ Actual Results: |
|
| 58 |
+ |
|
| 59 |
+ |
|
| 60 |
+ Expected Results: |
|
| 61 |
+ |
|
| 62 |
+ |
|
| 63 |
+ Additional info: |
|
| 64 |
+ |
|
| 65 |
+ |
|
| 66 |
+ EOM |
|
| 67 |
+} |
|
| 68 |
+ |
|
| 69 |
+function format_issue_url() {
|
|
| 70 |
+ if [ ${#@} -ne 2 ] ; then
|
|
| 71 |
+ return 1 |
|
| 72 |
+ fi |
|
| 73 |
+ local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}")
|
|
| 74 |
+ local issue_body=$(urlencode "${2}")
|
|
| 75 |
+ echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}"
|
|
| 76 |
+} |
|
| 77 |
+ |
|
| 78 |
+ |
|
| 79 |
+echo -ne "Do you use \`sudo\` to call docker? [y|N]: " |
|
| 80 |
+read -r -n 1 use_sudo |
|
| 81 |
+echo "" |
|
| 82 |
+ |
|
| 83 |
+if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then
|
|
| 84 |
+ export DOCKER_COMMAND="sudo ${DOCKER}"
|
|
| 85 |
+fi |
|
| 86 |
+ |
|
| 87 |
+echo -ne "Title of new issue?: " |
|
| 88 |
+read -r issue_title |
|
| 89 |
+echo "" |
|
| 90 |
+ |
|
| 91 |
+issue_url=$(format_issue_url "${issue_title}" "$(template)")
|
|
| 92 |
+ |
|
| 93 |
+if which xdg-open 2>/dev/null >/dev/null ; then |
|
| 94 |
+ echo -ne "Would like to launch this report in your browser? [Y|n]: " |
|
| 95 |
+ read -r -n 1 launch_now |
|
| 96 |
+ echo "" |
|
| 97 |
+ |
|
| 98 |
+ if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then
|
|
| 99 |
+ xdg-open "${issue_url}"
|
|
| 100 |
+ fi |
|
| 101 |
+fi |
|
| 102 |
+ |
|
| 103 |
+echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}"
|
|
| 104 |
+ |
| ... | ... |
@@ -242,9 +242,9 @@ build and run a `docker` binary in your container. |
| 242 | 242 |
|
| 243 | 243 |
4. From the `/go/src/github.com/docker/docker` directory make a `docker` binary with the `make.sh` script. |
| 244 | 244 |
|
| 245 |
- root@5f8630b873fe:/go/src/github.com/docker/docker# project/make.sh binary |
|
| 245 |
+ root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh binary |
|
| 246 | 246 |
|
| 247 |
- You only call `project/make.sh` to build a binary _inside_ a Docker |
|
| 247 |
+ You only call `hack/make.sh` to build a binary _inside_ a Docker |
|
| 248 | 248 |
development container as you are now. On your host, you'll use `make` |
| 249 | 249 |
commands (more about this later). |
| 250 | 250 |
|
| ... | ... |
@@ -133,7 +133,7 @@ Run the entire test suite on your current repository: |
| 133 | 133 |
### Run test targets inside the development container |
| 134 | 134 |
|
| 135 | 135 |
If you are working inside a Docker development container, you use the |
| 136 |
-`project/make.sh` script to run tests. The `project/make.sh` script doesn't |
|
| 136 |
+`hack/make.sh` script to run tests. The `hack/make.sh` script doesn't |
|
| 137 | 137 |
have a single target that runs all the tests. Instead, you provide a single |
| 138 | 138 |
commmand line with multiple targets that does the same thing. |
| 139 | 139 |
|
| ... | ... |
@@ -148,9 +148,9 @@ Try this now. |
| 148 | 148 |
|
| 149 | 149 |
$ docker run --privileged --rm -ti -v `pwd`:/go/src/github.com/docker/docker dry-run-test /bin/bash |
| 150 | 150 |
|
| 151 |
-3. Run the tests using the `project/make.sh` script. |
|
| 151 |
+3. Run the tests using the `hack/make.sh` script. |
|
| 152 | 152 |
|
| 153 |
- root@5f8630b873fe:/go/src/github.com/docker/docker# project/make.sh dynbinary binary cross test-unit test-integration test-integration-cli test-docker-py |
|
| 153 |
+ root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh dynbinary binary cross test-unit test-integration test-integration-cli test-docker-py |
|
| 154 | 154 |
|
| 155 | 155 |
The tests run just as they did within your local host. |
| 156 | 156 |
|
| ... | ... |
@@ -158,7 +158,7 @@ Try this now. |
| 158 | 158 |
Of course, you can also run a subset of these targets too. For example, to run |
| 159 | 159 |
just the unit tests: |
| 160 | 160 |
|
| 161 |
- root@5f8630b873fe:/go/src/github.com/docker/docker# project/make.sh dynbinary binary cross test-unit |
|
| 161 |
+ root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh dynbinary binary cross test-unit |
|
| 162 | 162 |
|
| 163 | 163 |
Most test targets require that you build these precursor targets first: |
| 164 | 164 |
`dynbinary binary cross` |
| ... | ... |
@@ -174,7 +174,7 @@ your local host you can run the `TestBuild` test with this command: |
| 174 | 174 |
|
| 175 | 175 |
To run the same test inside your Docker development container, you do this: |
| 176 | 176 |
|
| 177 |
- root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-run ^TestBuild$' project/make.sh |
|
| 177 |
+ root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-run ^TestBuild$' hack/make.sh |
|
| 178 | 178 |
|
| 179 | 179 |
## If test under Boot2Docker fail do to space errors |
| 180 | 180 |
|
| 3 | 2 |
new file mode 100755 |
| ... | ... |
@@ -0,0 +1,88 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+# DinD: a wrapper script which allows docker to be run inside a docker container. |
|
| 4 |
+# Original version by Jerome Petazzoni <jerome@docker.com> |
|
| 5 |
+# See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/ |
|
| 6 |
+# |
|
| 7 |
+# This script should be executed inside a docker container in privilieged mode |
|
| 8 |
+# ('docker run --privileged', introduced in docker 0.6).
|
|
| 9 |
+ |
|
| 10 |
+# Usage: dind CMD [ARG...] |
|
| 11 |
+ |
|
| 12 |
+# apparmor sucks and Docker needs to know that it's in a container (c) @tianon |
|
| 13 |
+export container=docker |
|
| 14 |
+ |
|
| 15 |
+# First, make sure that cgroups are mounted correctly. |
|
| 16 |
+CGROUP=/cgroup |
|
| 17 |
+ |
|
| 18 |
+mkdir -p "$CGROUP" |
|
| 19 |
+ |
|
| 20 |
+if ! mountpoint -q "$CGROUP"; then |
|
| 21 |
+ mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
|
|
| 22 |
+ echo >&2 'Could not make a tmpfs mount. Did you use --privileged?' |
|
| 23 |
+ exit 1 |
|
| 24 |
+ } |
|
| 25 |
+fi |
|
| 26 |
+ |
|
| 27 |
+if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then |
|
| 28 |
+ mount -t securityfs none /sys/kernel/security || {
|
|
| 29 |
+ echo >&2 'Could not mount /sys/kernel/security.' |
|
| 30 |
+ echo >&2 'AppArmor detection and -privileged mode might break.' |
|
| 31 |
+ } |
|
| 32 |
+fi |
|
| 33 |
+ |
|
| 34 |
+# Mount the cgroup hierarchies exactly as they are in the parent system. |
|
| 35 |
+for SUBSYS in $(cut -d: -f2 /proc/1/cgroup); do |
|
| 36 |
+ mkdir -p "$CGROUP/$SUBSYS" |
|
| 37 |
+ if ! mountpoint -q $CGROUP/$SUBSYS; then |
|
| 38 |
+ mount -n -t cgroup -o "$SUBSYS" cgroup "$CGROUP/$SUBSYS" |
|
| 39 |
+ fi |
|
| 40 |
+ |
|
| 41 |
+ # The two following sections address a bug which manifests itself |
|
| 42 |
+ # by a cryptic "lxc-start: no ns_cgroup option specified" when |
|
| 43 |
+ # trying to start containers withina container. |
|
| 44 |
+ # The bug seems to appear when the cgroup hierarchies are not |
|
| 45 |
+ # mounted on the exact same directories in the host, and in the |
|
| 46 |
+ # container. |
|
| 47 |
+ |
|
| 48 |
+ # Named, control-less cgroups are mounted with "-o name=foo" |
|
| 49 |
+ # (and appear as such under /proc/<pid>/cgroup) but are usually |
|
| 50 |
+ # mounted on a directory named "foo" (without the "name=" prefix). |
|
| 51 |
+ # Systemd and OpenRC (and possibly others) both create such a |
|
| 52 |
+ # cgroup. To avoid the aforementioned bug, we symlink "foo" to |
|
| 53 |
+ # "name=foo". This shouldn't have any adverse effect. |
|
| 54 |
+ name="${SUBSYS#name=}"
|
|
| 55 |
+ if [ "$name" != "$SUBSYS" ]; then |
|
| 56 |
+ ln -s "$SUBSYS" "$CGROUP/$name" |
|
| 57 |
+ fi |
|
| 58 |
+ |
|
| 59 |
+ # Likewise, on at least one system, it has been reported that |
|
| 60 |
+ # systemd would mount the CPU and CPU accounting controllers |
|
| 61 |
+ # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" |
|
| 62 |
+ # but on a directory called "cpu,cpuacct" (note the inversion |
|
| 63 |
+ # in the order of the groups). This tries to work around it. |
|
| 64 |
+ if [ "$SUBSYS" = 'cpuacct,cpu' ]; then |
|
| 65 |
+ ln -s "$SUBSYS" "$CGROUP/cpu,cpuacct" |
|
| 66 |
+ fi |
|
| 67 |
+done |
|
| 68 |
+ |
|
| 69 |
+# Note: as I write those lines, the LXC userland tools cannot setup |
|
| 70 |
+# a "sub-container" properly if the "devices" cgroup is not in its |
|
| 71 |
+# own hierarchy. Let's detect this and issue a warning. |
|
| 72 |
+if ! grep -q :devices: /proc/1/cgroup; then |
|
| 73 |
+ echo >&2 'WARNING: the "devices" cgroup should be in its own hierarchy.' |
|
| 74 |
+fi |
|
| 75 |
+if ! grep -qw devices /proc/1/cgroup; then |
|
| 76 |
+ echo >&2 'WARNING: it looks like the "devices" cgroup is not mounted.' |
|
| 77 |
+fi |
|
| 78 |
+ |
|
| 79 |
+# Mount /tmp |
|
| 80 |
+mount -t tmpfs none /tmp |
|
| 81 |
+ |
|
| 82 |
+if [ $# -gt 0 ]; then |
|
| 83 |
+ exec "$@" |
|
| 84 |
+fi |
|
| 85 |
+ |
|
| 86 |
+echo >&2 'ERROR: No command specified.' |
|
| 87 |
+echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' |
| 0 | 88 |
new file mode 100755 |
| ... | ... |
@@ -0,0 +1,15 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." |
|
| 4 |
+ |
|
| 5 |
+# see also ".mailmap" for how email addresses and names are deduplicated |
|
| 6 |
+ |
|
| 7 |
+{
|
|
| 8 |
+ cat <<-'EOH' |
|
| 9 |
+ # This file lists all individuals having contributed content to the repository. |
|
| 10 |
+ # For how it is generated, see `hack/generate-authors.sh`. |
|
| 11 |
+ EOH |
|
| 12 |
+ echo |
|
| 13 |
+ git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf |
|
| 14 |
+} > AUTHORS |
| 0 | 15 |
new file mode 100755 |
| ... | ... |
@@ -0,0 +1,225 @@ |
| 0 |
+#!/bin/sh |
|
| 1 |
+set -e |
|
| 2 |
+# |
|
| 3 |
+# This script is meant for quick & easy install via: |
|
| 4 |
+# 'curl -sSL https://get.docker.com/ | sh' |
|
| 5 |
+# or: |
|
| 6 |
+# 'wget -qO- https://get.docker.com/ | sh' |
|
| 7 |
+# |
|
| 8 |
+# |
|
| 9 |
+# Docker Maintainers: |
|
| 10 |
+# To update this script on https://get.docker.com, |
|
| 11 |
+# use hack/release.sh during a normal release, |
|
| 12 |
+# or the following one-liner for script hotfixes: |
|
| 13 |
+# s3cmd put --acl-public -P hack/install.sh s3://get.docker.com/index |
|
| 14 |
+# |
|
| 15 |
+ |
|
| 16 |
+url='https://get.docker.com/' |
|
| 17 |
+ |
|
| 18 |
+command_exists() {
|
|
| 19 |
+ command -v "$@" > /dev/null 2>&1 |
|
| 20 |
+} |
|
| 21 |
+ |
|
| 22 |
+case "$(uname -m)" in |
|
| 23 |
+ *64) |
|
| 24 |
+ ;; |
|
| 25 |
+ *) |
|
| 26 |
+ echo >&2 'Error: you are not using a 64bit platform.' |
|
| 27 |
+ echo >&2 'Docker currently only supports 64bit platforms.' |
|
| 28 |
+ exit 1 |
|
| 29 |
+ ;; |
|
| 30 |
+esac |
|
| 31 |
+ |
|
| 32 |
+if command_exists docker || command_exists lxc-docker; then |
|
| 33 |
+ echo >&2 'Warning: "docker" or "lxc-docker" command appears to already exist.' |
|
| 34 |
+ echo >&2 'Please ensure that you do not already have docker installed.' |
|
| 35 |
+ echo >&2 'You may press Ctrl+C now to abort this process and rectify this situation.' |
|
| 36 |
+ ( set -x; sleep 20 ) |
|
| 37 |
+fi |
|
| 38 |
+ |
|
| 39 |
+user="$(id -un 2>/dev/null || true)" |
|
| 40 |
+ |
|
| 41 |
+sh_c='sh -c' |
|
| 42 |
+if [ "$user" != 'root' ]; then |
|
| 43 |
+ if command_exists sudo; then |
|
| 44 |
+ sh_c='sudo -E sh -c' |
|
| 45 |
+ elif command_exists su; then |
|
| 46 |
+ sh_c='su -c' |
|
| 47 |
+ else |
|
| 48 |
+ echo >&2 'Error: this installer needs the ability to run commands as root.' |
|
| 49 |
+ echo >&2 'We are unable to find either "sudo" or "su" available to make this happen.' |
|
| 50 |
+ exit 1 |
|
| 51 |
+ fi |
|
| 52 |
+fi |
|
| 53 |
+ |
|
| 54 |
+curl='' |
|
| 55 |
+if command_exists curl; then |
|
| 56 |
+ curl='curl -sSL' |
|
| 57 |
+elif command_exists wget; then |
|
| 58 |
+ curl='wget -qO-' |
|
| 59 |
+elif command_exists busybox && busybox --list-modules | grep -q wget; then |
|
| 60 |
+ curl='busybox wget -qO-' |
|
| 61 |
+fi |
|
| 62 |
+ |
|
| 63 |
+# perform some very rudimentary platform detection |
|
| 64 |
+lsb_dist='' |
|
| 65 |
+if command_exists lsb_release; then |
|
| 66 |
+ lsb_dist="$(lsb_release -si)" |
|
| 67 |
+fi |
|
| 68 |
+if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then |
|
| 69 |
+ lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" |
|
| 70 |
+fi |
|
| 71 |
+if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then |
|
| 72 |
+ lsb_dist='debian' |
|
| 73 |
+fi |
|
| 74 |
+if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then |
|
| 75 |
+ lsb_dist='fedora' |
|
| 76 |
+fi |
|
| 77 |
+if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then |
|
| 78 |
+ lsb_dist="$(. /etc/os-release && echo "$ID")" |
|
| 79 |
+fi |
|
| 80 |
+ |
|
| 81 |
+lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" |
|
| 82 |
+case "$lsb_dist" in |
|
| 83 |
+ amzn|fedora) |
|
| 84 |
+ if [ "$lsb_dist" = 'amzn' ]; then |
|
| 85 |
+ ( |
|
| 86 |
+ set -x |
|
| 87 |
+ $sh_c 'sleep 3; yum -y -q install docker' |
|
| 88 |
+ ) |
|
| 89 |
+ else |
|
| 90 |
+ ( |
|
| 91 |
+ set -x |
|
| 92 |
+ $sh_c 'sleep 3; yum -y -q install docker-io' |
|
| 93 |
+ ) |
|
| 94 |
+ fi |
|
| 95 |
+ if command_exists docker && [ -e /var/run/docker.sock ]; then |
|
| 96 |
+ ( |
|
| 97 |
+ set -x |
|
| 98 |
+ $sh_c 'docker version' |
|
| 99 |
+ ) || true |
|
| 100 |
+ fi |
|
| 101 |
+ your_user=your-user |
|
| 102 |
+ [ "$user" != 'root' ] && your_user="$user" |
|
| 103 |
+ echo |
|
| 104 |
+ echo 'If you would like to use Docker as a non-root user, you should now consider' |
|
| 105 |
+ echo 'adding your user to the "docker" group with something like:' |
|
| 106 |
+ echo |
|
| 107 |
+ echo ' sudo usermod -aG docker' $your_user |
|
| 108 |
+ echo |
|
| 109 |
+ echo 'Remember that you will have to log out and back in for this to take effect!' |
|
| 110 |
+ echo |
|
| 111 |
+ exit 0 |
|
| 112 |
+ ;; |
|
| 113 |
+ |
|
| 114 |
+ ubuntu|debian|linuxmint) |
|
| 115 |
+ export DEBIAN_FRONTEND=noninteractive |
|
| 116 |
+ |
|
| 117 |
+ did_apt_get_update= |
|
| 118 |
+ apt_get_update() {
|
|
| 119 |
+ if [ -z "$did_apt_get_update" ]; then |
|
| 120 |
+ ( set -x; $sh_c 'sleep 3; apt-get update' ) |
|
| 121 |
+ did_apt_get_update=1 |
|
| 122 |
+ fi |
|
| 123 |
+ } |
|
| 124 |
+ |
|
| 125 |
+ # aufs is preferred over devicemapper; try to ensure the driver is available. |
|
| 126 |
+ if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then |
|
| 127 |
+ kern_extras="linux-image-extra-$(uname -r)" |
|
| 128 |
+ |
|
| 129 |
+ apt_get_update |
|
| 130 |
+ ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true |
|
| 131 |
+ |
|
| 132 |
+ if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then |
|
| 133 |
+ echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' |
|
| 134 |
+ echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' |
|
| 135 |
+ ( set -x; sleep 10 ) |
|
| 136 |
+ fi |
|
| 137 |
+ fi |
|
| 138 |
+ |
|
| 139 |
+ # install apparmor utils if they're missing and apparmor is enabled in the kernel |
|
| 140 |
+ # otherwise Docker will fail to start |
|
| 141 |
+ if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then |
|
| 142 |
+ if command -v apparmor_parser &> /dev/null; then |
|
| 143 |
+ echo 'apparmor is enabled in the kernel and apparmor utils were already installed' |
|
| 144 |
+ else |
|
| 145 |
+ echo 'apparmor is enabled in the kernel, but apparmor_parser missing' |
|
| 146 |
+ apt_get_update |
|
| 147 |
+ ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) |
|
| 148 |
+ fi |
|
| 149 |
+ fi |
|
| 150 |
+ |
|
| 151 |
+ if [ ! -e /usr/lib/apt/methods/https ]; then |
|
| 152 |
+ apt_get_update |
|
| 153 |
+ ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) |
|
| 154 |
+ fi |
|
| 155 |
+ if [ -z "$curl" ]; then |
|
| 156 |
+ apt_get_update |
|
| 157 |
+ ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) |
|
| 158 |
+ curl='curl -sSL' |
|
| 159 |
+ fi |
|
| 160 |
+ ( |
|
| 161 |
+ set -x |
|
| 162 |
+ if [ "https://get.docker.com/" = "$url" ]; then |
|
| 163 |
+ $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9" |
|
| 164 |
+ elif [ "https://test.docker.com/" = "$url" ]; then |
|
| 165 |
+ $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6" |
|
| 166 |
+ else |
|
| 167 |
+ $sh_c "$curl ${url}gpg | apt-key add -"
|
|
| 168 |
+ fi |
|
| 169 |
+ $sh_c "echo deb ${url}ubuntu docker main > /etc/apt/sources.list.d/docker.list"
|
|
| 170 |
+ $sh_c 'sleep 3; apt-get update; apt-get install -y -q lxc-docker' |
|
| 171 |
+ ) |
|
| 172 |
+ if command_exists docker && [ -e /var/run/docker.sock ]; then |
|
| 173 |
+ ( |
|
| 174 |
+ set -x |
|
| 175 |
+ $sh_c 'docker version' |
|
| 176 |
+ ) || true |
|
| 177 |
+ fi |
|
| 178 |
+ your_user=your-user |
|
| 179 |
+ [ "$user" != 'root' ] && your_user="$user" |
|
| 180 |
+ echo |
|
| 181 |
+ echo 'If you would like to use Docker as a non-root user, you should now consider' |
|
| 182 |
+ echo 'adding your user to the "docker" group with something like:' |
|
| 183 |
+ echo |
|
| 184 |
+ echo ' sudo usermod -aG docker' $your_user |
|
| 185 |
+ echo |
|
| 186 |
+ echo 'Remember that you will have to log out and back in for this to take effect!' |
|
| 187 |
+ echo |
|
| 188 |
+ exit 0 |
|
| 189 |
+ ;; |
|
| 190 |
+ |
|
| 191 |
+ gentoo) |
|
| 192 |
+ if [ "$url" = "https://test.docker.com/" ]; then |
|
| 193 |
+ echo >&2 |
|
| 194 |
+ echo >&2 ' You appear to be trying to install the latest nightly build in Gentoo.' |
|
| 195 |
+ echo >&2 ' The portage tree should contain the latest stable release of Docker, but' |
|
| 196 |
+ echo >&2 ' if you want something more recent, you can always use the live ebuild' |
|
| 197 |
+ echo >&2 ' provided in the "docker" overlay available via layman. For more' |
|
| 198 |
+ echo >&2 ' instructions, please see the following URL:' |
|
| 199 |
+ echo >&2 ' https://github.com/tianon/docker-overlay#using-this-overlay' |
|
| 200 |
+ echo >&2 ' After adding the "docker" overlay, you should be able to:' |
|
| 201 |
+ echo >&2 ' emerge -av =app-emulation/docker-9999' |
|
| 202 |
+ echo >&2 |
|
| 203 |
+ exit 1 |
|
| 204 |
+ fi |
|
| 205 |
+ |
|
| 206 |
+ ( |
|
| 207 |
+ set -x |
|
| 208 |
+ $sh_c 'sleep 3; emerge app-emulation/docker' |
|
| 209 |
+ ) |
|
| 210 |
+ exit 0 |
|
| 211 |
+ ;; |
|
| 212 |
+esac |
|
| 213 |
+ |
|
| 214 |
+cat >&2 <<'EOF' |
|
| 215 |
+ |
|
| 216 |
+ Either your platform is not easily detectable, is not supported by this |
|
| 217 |
+ installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have |
|
| 218 |
+ a package for Docker. Please visit the following URL for more detailed |
|
| 219 |
+ installation instructions: |
|
| 220 |
+ |
|
| 221 |
+ https://docs.docker.com/en/latest/installation/ |
|
| 222 |
+ |
|
| 223 |
+EOF |
|
| 224 |
+exit 1 |
| 0 | 225 |
new file mode 100755 |
| ... | ... |
@@ -0,0 +1,273 @@ |
| 0 |
+#!/usr/bin/env bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+# This script builds various binary artifacts from a checkout of the docker |
|
| 4 |
+# source code. |
|
| 5 |
+# |
|
| 6 |
+# Requirements: |
|
| 7 |
+# - The current directory should be a checkout of the docker source code |
|
| 8 |
+# (http://github.com/docker/docker). Whatever version is checked out |
|
| 9 |
+# will be built. |
|
| 10 |
+# - The VERSION file, at the root of the repository, should exist, and |
|
| 11 |
+# will be used as Docker binary version and package version. |
|
| 12 |
+# - The hash of the git commit will also be included in the Docker binary, |
|
| 13 |
+# with the suffix -dirty if the repository isn't clean. |
|
| 14 |
+# - The script is intented to be run inside the docker container specified |
|
| 15 |
+# in the Dockerfile at the root of the source. In other words: |
|
| 16 |
+# DO NOT CALL THIS SCRIPT DIRECTLY. |
|
| 17 |
+# - The right way to call this script is to invoke "make" from |
|
| 18 |
+# your checkout of the Docker repository. |
|
| 19 |
+# the Makefile will do a "docker build -t docker ." and then |
|
| 20 |
+# "docker run hack/make.sh" in the resulting image. |
|
| 21 |
+# |
|
| 22 |
+ |
|
| 23 |
+set -o pipefail |
|
| 24 |
+ |
|
| 25 |
+export DOCKER_PKG='github.com/docker/docker' |
|
| 26 |
+ |
|
| 27 |
+# We're a nice, sexy, little shell script, and people might try to run us; |
|
| 28 |
+# but really, they shouldn't. We want to be in a container! |
|
| 29 |
+if [ "$(pwd)" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then |
|
| 30 |
+ {
|
|
| 31 |
+ echo "# WARNING! I don't seem to be running in the Docker container." |
|
| 32 |
+ echo "# The result of this command might be an incorrect build, and will not be" |
|
| 33 |
+ echo "# officially supported." |
|
| 34 |
+ echo "#" |
|
| 35 |
+ echo "# Try this instead: make all" |
|
| 36 |
+ echo "#" |
|
| 37 |
+ } >&2 |
|
| 38 |
+fi |
|
| 39 |
+ |
|
| 40 |
+echo |
|
| 41 |
+ |
|
| 42 |
+# List of bundles to create when no argument is passed |
|
| 43 |
+DEFAULT_BUNDLES=( |
|
| 44 |
+ validate-dco |
|
| 45 |
+ validate-gofmt |
|
| 46 |
+ validate-toml |
|
| 47 |
+ |
|
| 48 |
+ binary |
|
| 49 |
+ |
|
| 50 |
+ test-unit |
|
| 51 |
+ test-integration-cli |
|
| 52 |
+ test-docker-py |
|
| 53 |
+ |
|
| 54 |
+ dynbinary |
|
| 55 |
+ test-integration |
|
| 56 |
+ |
|
| 57 |
+ cover |
|
| 58 |
+ cross |
|
| 59 |
+ tgz |
|
| 60 |
+ ubuntu |
|
| 61 |
+) |
|
| 62 |
+ |
|
| 63 |
+VERSION=$(cat ./VERSION) |
|
| 64 |
+if command -v git &> /dev/null && git rev-parse &> /dev/null; then |
|
| 65 |
+ GITCOMMIT=$(git rev-parse --short HEAD) |
|
| 66 |
+ if [ -n "$(git status --porcelain --untracked-files=no)" ]; then |
|
| 67 |
+ GITCOMMIT="$GITCOMMIT-dirty" |
|
| 68 |
+ fi |
|
| 69 |
+elif [ "$DOCKER_GITCOMMIT" ]; then |
|
| 70 |
+ GITCOMMIT="$DOCKER_GITCOMMIT" |
|
| 71 |
+else |
|
| 72 |
+ echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' |
|
| 73 |
+ echo >&2 ' Please either build with the .git directory accessible, or specify the' |
|
| 74 |
+ echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' |
|
| 75 |
+ echo >&2 ' future accountability in diagnosing build issues. Thanks!' |
|
| 76 |
+ exit 1 |
|
| 77 |
+fi |
|
| 78 |
+ |
|
| 79 |
+if [ "$AUTO_GOPATH" ]; then |
|
| 80 |
+ rm -rf .gopath |
|
| 81 |
+ mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
|
|
| 82 |
+ ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
|
|
| 83 |
+ export GOPATH="$(pwd)/.gopath:$(pwd)/vendor" |
|
| 84 |
+fi |
|
| 85 |
+ |
|
| 86 |
+if [ ! "$GOPATH" ]; then |
|
| 87 |
+ echo >&2 'error: missing GOPATH; please see http://golang.org/doc/code.html#GOPATH' |
|
| 88 |
+ echo >&2 ' alternatively, set AUTO_GOPATH=1' |
|
| 89 |
+ exit 1 |
|
| 90 |
+fi |
|
| 91 |
+ |
|
| 92 |
+if [ -z "$DOCKER_CLIENTONLY" ]; then |
|
| 93 |
+ DOCKER_BUILDTAGS+=" daemon" |
|
| 94 |
+fi |
|
| 95 |
+ |
|
| 96 |
+if [ "$DOCKER_EXECDRIVER" = 'lxc' ]; then |
|
| 97 |
+ DOCKER_BUILDTAGS+=' test_no_exec' |
|
| 98 |
+fi |
|
| 99 |
+ |
|
| 100 |
+# Use these flags when compiling the tests and final binary |
|
| 101 |
+ |
|
| 102 |
+IAMSTATIC='true' |
|
| 103 |
+source "$(dirname "$BASH_SOURCE")/make/.go-autogen" |
|
| 104 |
+LDFLAGS='-w' |
|
| 105 |
+ |
|
| 106 |
+LDFLAGS_STATIC='-linkmode external' |
|
| 107 |
+# Cgo -H windows is incompatible with -linkmode external. |
|
| 108 |
+if [ "$(go env GOOS)" == 'windows' ]; then |
|
| 109 |
+ LDFLAGS_STATIC='' |
|
| 110 |
+fi |
|
| 111 |
+EXTLDFLAGS_STATIC='-static' |
|
| 112 |
+# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build |
|
| 113 |
+# with options like -race. |
|
| 114 |
+ORIG_BUILDFLAGS=( -a -tags "netgo static_build $DOCKER_BUILDTAGS" -installsuffix netgo ) |
|
| 115 |
+# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here |
|
| 116 |
+BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" )
|
|
| 117 |
+# Test timeout. |
|
| 118 |
+: ${TIMEOUT:=30m}
|
|
| 119 |
+TESTFLAGS+=" -test.timeout=${TIMEOUT}"
|
|
| 120 |
+ |
|
| 121 |
+# A few more flags that are specific just to building a completely-static binary (see hack/make/binary) |
|
| 122 |
+# PLEASE do not use these anywhere else. |
|
| 123 |
+EXTLDFLAGS_STATIC_DOCKER="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files" |
|
| 124 |
+LDFLAGS_STATIC_DOCKER=" |
|
| 125 |
+ $LDFLAGS_STATIC |
|
| 126 |
+ -extldflags \"$EXTLDFLAGS_STATIC_DOCKER\" |
|
| 127 |
+" |
|
| 128 |
+ |
|
| 129 |
+if [ "$(uname -s)" = 'FreeBSD' ]; then |
|
| 130 |
+ # Tell cgo the compiler is Clang, not GCC |
|
| 131 |
+ # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 |
|
| 132 |
+ export CC=clang |
|
| 133 |
+ |
|
| 134 |
+ # "-extld clang" is a workaround for |
|
| 135 |
+ # https://code.google.com/p/go/issues/detail?id=6845 |
|
| 136 |
+ LDFLAGS="$LDFLAGS -extld clang" |
|
| 137 |
+fi |
|
| 138 |
+ |
|
| 139 |
+# If sqlite3.h doesn't exist under /usr/include, |
|
| 140 |
+# check /usr/local/include also just in case |
|
| 141 |
+# (e.g. FreeBSD Ports installs it under the directory) |
|
| 142 |
+if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then |
|
| 143 |
+ export CGO_CFLAGS='-I/usr/local/include' |
|
| 144 |
+ export CGO_LDFLAGS='-L/usr/local/lib' |
|
| 145 |
+fi |
|
| 146 |
+ |
|
| 147 |
+HAVE_GO_TEST_COVER= |
|
| 148 |
+if \ |
|
| 149 |
+ go help testflag | grep -- -cover > /dev/null \ |
|
| 150 |
+ && go tool -n cover > /dev/null 2>&1 \ |
|
| 151 |
+; then |
|
| 152 |
+ HAVE_GO_TEST_COVER=1 |
|
| 153 |
+fi |
|
| 154 |
+ |
|
| 155 |
+# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. |
|
| 156 |
+# You can use this to select certain tests to run, eg. |
|
| 157 |
+# |
|
| 158 |
+# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test |
|
| 159 |
+# |
|
| 160 |
+go_test_dir() {
|
|
| 161 |
+ dir=$1 |
|
| 162 |
+ coverpkg=$2 |
|
| 163 |
+ testcover=() |
|
| 164 |
+ if [ "$HAVE_GO_TEST_COVER" ]; then |
|
| 165 |
+ # if our current go install has -cover, we want to use it :) |
|
| 166 |
+ mkdir -p "$DEST/coverprofiles" |
|
| 167 |
+ coverprofile="docker${dir#.}"
|
|
| 168 |
+ coverprofile="$DEST/coverprofiles/${coverprofile//\//-}"
|
|
| 169 |
+ testcover=( -cover -coverprofile "$coverprofile" $coverpkg ) |
|
| 170 |
+ fi |
|
| 171 |
+ ( |
|
| 172 |
+ export DEST |
|
| 173 |
+ echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}"
|
|
| 174 |
+ cd "$dir" |
|
| 175 |
+ test_env go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS
|
|
| 176 |
+ ) |
|
| 177 |
+} |
|
| 178 |
+test_env() {
|
|
| 179 |
+ # use "env -i" to tightly control the environment variables that bleed into the tests |
|
| 180 |
+ env -i \ |
|
| 181 |
+ DEST="$DEST" \ |
|
| 182 |
+ DOCKER_EXECDRIVER="$DOCKER_EXECDRIVER" \ |
|
| 183 |
+ DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \ |
|
| 184 |
+ DOCKER_HOST="$DOCKER_HOST" \ |
|
| 185 |
+ GOPATH="$GOPATH" \ |
|
| 186 |
+ HOME="$DEST/fake-HOME" \ |
|
| 187 |
+ PATH="$PATH" \ |
|
| 188 |
+ TEST_DOCKERINIT_PATH="$TEST_DOCKERINIT_PATH" \ |
|
| 189 |
+ "$@" |
|
| 190 |
+} |
|
| 191 |
+ |
|
| 192 |
+# a helper to provide ".exe" when it's appropriate |
|
| 193 |
+binary_extension() {
|
|
| 194 |
+ if [ "$(go env GOOS)" = 'windows' ]; then |
|
| 195 |
+ echo -n '.exe' |
|
| 196 |
+ fi |
|
| 197 |
+} |
|
| 198 |
+ |
|
| 199 |
+# This helper function walks the current directory looking for directories |
|
| 200 |
+# holding certain files ($1 parameter), and prints their paths on standard |
|
| 201 |
+# output, one per line. |
|
| 202 |
+find_dirs() {
|
|
| 203 |
+ find . -not \( \ |
|
| 204 |
+ \( \ |
|
| 205 |
+ -path './vendor/*' \ |
|
| 206 |
+ -o -path './integration/*' \ |
|
| 207 |
+ -o -path './integration-cli/*' \ |
|
| 208 |
+ -o -path './contrib/*' \ |
|
| 209 |
+ -o -path './pkg/mflag/example/*' \ |
|
| 210 |
+ -o -path './.git/*' \ |
|
| 211 |
+ -o -path './bundles/*' \ |
|
| 212 |
+ -o -path './docs/*' \ |
|
| 213 |
+ -o -path './pkg/libcontainer/nsinit/*' \ |
|
| 214 |
+ \) \ |
|
| 215 |
+ -prune \ |
|
| 216 |
+ \) -name "$1" -print0 | xargs -0n1 dirname | sort -u |
|
| 217 |
+} |
|
| 218 |
+ |
|
| 219 |
+hash_files() {
|
|
| 220 |
+ while [ $# -gt 0 ]; do |
|
| 221 |
+ f="$1" |
|
| 222 |
+ shift |
|
| 223 |
+ dir="$(dirname "$f")" |
|
| 224 |
+ base="$(basename "$f")" |
|
| 225 |
+ for hashAlgo in md5 sha256; do |
|
| 226 |
+ if command -v "${hashAlgo}sum" &> /dev/null; then
|
|
| 227 |
+ ( |
|
| 228 |
+ # subshell and cd so that we get output files like: |
|
| 229 |
+ # $HASH docker-$VERSION |
|
| 230 |
+ # instead of: |
|
| 231 |
+ # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION |
|
| 232 |
+ cd "$dir" |
|
| 233 |
+ "${hashAlgo}sum" "$base" > "$base.$hashAlgo"
|
|
| 234 |
+ ) |
|
| 235 |
+ fi |
|
| 236 |
+ done |
|
| 237 |
+ done |
|
| 238 |
+} |
|
| 239 |
+ |
|
| 240 |
+bundle() {
|
|
| 241 |
+ bundlescript=$1 |
|
| 242 |
+ bundle=$(basename $bundlescript) |
|
| 243 |
+ echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)" |
|
| 244 |
+ mkdir -p bundles/$VERSION/$bundle |
|
| 245 |
+ source "$bundlescript" "$(pwd)/bundles/$VERSION/$bundle" |
|
| 246 |
+} |
|
| 247 |
+ |
|
| 248 |
+main() {
|
|
| 249 |
+ # We want this to fail if the bundles already exist and cannot be removed. |
|
| 250 |
+ # This is to avoid mixing bundles from different versions of the code. |
|
| 251 |
+ mkdir -p bundles |
|
| 252 |
+ if [ -e "bundles/$VERSION" ]; then |
|
| 253 |
+ echo "bundles/$VERSION already exists. Removing." |
|
| 254 |
+ rm -fr bundles/$VERSION && mkdir bundles/$VERSION || exit 1 |
|
| 255 |
+ echo |
|
| 256 |
+ fi |
|
| 257 |
+ SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 258 |
+ if [ $# -lt 1 ]; then |
|
| 259 |
+ bundles=(${DEFAULT_BUNDLES[@]})
|
|
| 260 |
+ else |
|
| 261 |
+ bundles=($@) |
|
| 262 |
+ fi |
|
| 263 |
+ for bundle in ${bundles[@]}; do
|
|
| 264 |
+ bundle $SCRIPTDIR/make/$bundle |
|
| 265 |
+ echo |
|
| 266 |
+ done |
|
| 267 |
+ |
|
| 268 |
+ # if we get all the way through successfully, let's delete our autogenerated code! |
|
| 269 |
+ rm -r autogen |
|
| 270 |
+} |
|
| 271 |
+ |
|
| 272 |
+main "$@" |
| 0 | 273 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,32 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+IAMSTATIC="true" |
|
| 4 |
+source "$(dirname "$BASH_SOURCE")/.go-autogen" |
|
| 5 |
+ |
|
| 6 |
+# dockerinit still needs to be a static binary, even if docker is dynamic |
|
| 7 |
+go build \ |
|
| 8 |
+ -o "$DEST/dockerinit-$VERSION" \ |
|
| 9 |
+ "${BUILDFLAGS[@]}" \
|
|
| 10 |
+ -ldflags " |
|
| 11 |
+ $LDFLAGS |
|
| 12 |
+ $LDFLAGS_STATIC |
|
| 13 |
+ -extldflags \"$EXTLDFLAGS_STATIC\" |
|
| 14 |
+ " \ |
|
| 15 |
+ ./dockerinit |
|
| 16 |
+echo "Created binary: $DEST/dockerinit-$VERSION" |
|
| 17 |
+ln -sf "dockerinit-$VERSION" "$DEST/dockerinit" |
|
| 18 |
+ |
|
| 19 |
+sha1sum= |
|
| 20 |
+if command -v sha1sum &> /dev/null; then |
|
| 21 |
+ sha1sum=sha1sum |
|
| 22 |
+elif command -v shasum &> /dev/null; then |
|
| 23 |
+ # Mac OS X - why couldn't they just use the same command name and be happy? |
|
| 24 |
+ sha1sum=shasum |
|
| 25 |
+else |
|
| 26 |
+ echo >&2 'error: cannot find sha1sum command or equivalent' |
|
| 27 |
+ exit 1 |
|
| 28 |
+fi |
|
| 29 |
+ |
|
| 30 |
+# sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another |
|
| 31 |
+export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" |
| 0 | 32 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,23 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+if ! docker inspect emptyfs &> /dev/null; then |
|
| 4 |
+ # let's build a "docker save" tarball for "emptyfs" |
|
| 5 |
+ # see https://github.com/docker/docker/pull/5262 |
|
| 6 |
+ # and also https://github.com/docker/docker/issues/4242 |
|
| 7 |
+ dir="$DEST/emptyfs" |
|
| 8 |
+ mkdir -p "$dir" |
|
| 9 |
+ ( |
|
| 10 |
+ cd "$dir" |
|
| 11 |
+ echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories
|
|
| 12 |
+ mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 |
|
| 13 |
+ ( |
|
| 14 |
+ cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 |
|
| 15 |
+ echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json
|
|
| 16 |
+ echo '1.0' > VERSION |
|
| 17 |
+ tar -cf layer.tar --files-from /dev/null |
|
| 18 |
+ ) |
|
| 19 |
+ ) |
|
| 20 |
+ ( set -x; tar -cC "$dir" . | docker load ) |
|
| 21 |
+ rm -rf "$dir" |
|
| 22 |
+fi |
| 0 | 23 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,43 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+# this list should match roughly what's in the Dockerfile (minus the explicit image IDs, of course) |
|
| 4 |
+images=( |
|
| 5 |
+ busybox:latest |
|
| 6 |
+ hello-world:latest |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+if ! docker inspect "${images[@]}" &> /dev/null; then
|
|
| 10 |
+ hardCodedDir='/docker-frozen-images' |
|
| 11 |
+ if [ -d "$hardCodedDir" ]; then |
|
| 12 |
+ ( set -x; tar -cC "$hardCodedDir" . | docker load ) |
|
| 13 |
+ elif [ -e Dockerfile ] && command -v curl > /dev/null; then |
|
| 14 |
+ # testing for "curl" because "download-frozen-image.sh" is built around curl |
|
| 15 |
+ dir="$DEST/frozen-images" |
|
| 16 |
+ # extract the exact "RUN download-frozen-image.sh" line from the Dockerfile itself for consistency |
|
| 17 |
+ awk ' |
|
| 18 |
+ $1 == "RUN" && $2 == "./contrib/download-frozen-image.sh" {
|
|
| 19 |
+ for (i = 2; i < NF; i++) |
|
| 20 |
+ printf ( $i == "'"$hardCodedDir"'" ? "'"$dir"'" : $i ) " "; |
|
| 21 |
+ print $NF; |
|
| 22 |
+ if (/\\$/) {
|
|
| 23 |
+ inCont = 1; |
|
| 24 |
+ next; |
|
| 25 |
+ } |
|
| 26 |
+ } |
|
| 27 |
+ inCont {
|
|
| 28 |
+ print; |
|
| 29 |
+ if (!/\\$/) {
|
|
| 30 |
+ inCont = 0; |
|
| 31 |
+ } |
|
| 32 |
+ } |
|
| 33 |
+ ' Dockerfile | sh -x |
|
| 34 |
+ ( set -x; tar -cC "$dir" . | docker load ) |
|
| 35 |
+ else |
|
| 36 |
+ for image in "${images[@]}"; do
|
|
| 37 |
+ if ! docker inspect "$image" &> /dev/null; then |
|
| 38 |
+ ( set -x; docker pull "$image" ) |
|
| 39 |
+ fi |
|
| 40 |
+ done |
|
| 41 |
+ fi |
|
| 42 |
+fi |
| 0 | 43 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,15 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+# Build a Go static web server on top of busybox image |
|
| 4 |
+# and compile it for target daemon |
|
| 5 |
+ |
|
| 6 |
+dir="$DEST/httpserver" |
|
| 7 |
+mkdir -p "$dir" |
|
| 8 |
+( |
|
| 9 |
+ cd "$dir" |
|
| 10 |
+ GOOS=linux GOARCH=amd64 go build -o httpserver github.com/docker/docker/contrib/httpserver |
|
| 11 |
+ cp ../../../../contrib/httpserver/Dockerfile . |
|
| 12 |
+ docker build -qt httpserver . > /dev/null |
|
| 13 |
+) |
|
| 14 |
+rm -rf "$dir" |
| 0 | 15 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,18 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+ |
|
| 2 |
+rm -rf autogen |
|
| 3 |
+ |
|
| 4 |
+mkdir -p autogen/dockerversion |
|
| 5 |
+cat > autogen/dockerversion/dockerversion.go <<EOF |
|
| 6 |
+// AUTOGENERATED FILE; see $BASH_SOURCE |
|
| 7 |
+package dockerversion |
|
| 8 |
+ |
|
| 9 |
+var ( |
|
| 10 |
+ GITCOMMIT string = "$GITCOMMIT" |
|
| 11 |
+ VERSION string = "$VERSION" |
|
| 12 |
+ |
|
| 13 |
+ IAMSTATIC string = "${IAMSTATIC:-true}"
|
|
| 14 |
+ INITSHA1 string = "$DOCKER_INITSHA1" |
|
| 15 |
+ INITPATH string = "$DOCKER_INITPATH" |
|
| 16 |
+) |
|
| 17 |
+EOF |
| 0 | 18 |
new file mode 100755 |
| ... | ... |
@@ -0,0 +1,35 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+# Compile phase run by parallel in test-unit. No support for coverpkg |
|
| 4 |
+ |
|
| 5 |
+dir=$1 |
|
| 6 |
+in_file="$dir/$(basename "$dir").test" |
|
| 7 |
+out_file="$DEST/precompiled/$dir.test" |
|
| 8 |
+# we want to use binary_extension() here, but we can't because it's in main.sh and this file gets re-execed |
|
| 9 |
+if [ "$(go env GOOS)" = 'windows' ]; then |
|
| 10 |
+ in_file+='.exe' |
|
| 11 |
+ out_file+='.exe' |
|
| 12 |
+fi |
|
| 13 |
+testcover=() |
|
| 14 |
+if [ "$HAVE_GO_TEST_COVER" ]; then |
|
| 15 |
+ # if our current go install has -cover, we want to use it :) |
|
| 16 |
+ mkdir -p "$DEST/coverprofiles" |
|
| 17 |
+ coverprofile="docker${dir#.}"
|
|
| 18 |
+ coverprofile="$DEST/coverprofiles/${coverprofile//\//-}"
|
|
| 19 |
+ testcover=( -cover -coverprofile "$coverprofile" ) # missing $coverpkg |
|
| 20 |
+fi |
|
| 21 |
+if [ "$BUILDFLAGS_FILE" ]; then |
|
| 22 |
+ readarray -t BUILDFLAGS < "$BUILDFLAGS_FILE" |
|
| 23 |
+fi |
|
| 24 |
+ |
|
| 25 |
+if ! ( |
|
| 26 |
+ cd "$dir" |
|
| 27 |
+ go test "${testcover[@]}" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS -c
|
|
| 28 |
+); then |
|
| 29 |
+ exit 1 |
|
| 30 |
+fi |
|
| 31 |
+ |
|
| 32 |
+mkdir -p "$(dirname "$out_file")" |
|
| 33 |
+mv "$in_file" "$out_file" |
|
| 34 |
+echo "Precompiled: ${DOCKER_PKG}${dir#.}"
|
| 0 | 35 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,47 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+ |
|
| 2 |
+# see test-integration-cli for example usage of this script |
|
| 3 |
+ |
|
| 4 |
+export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH" |
|
| 5 |
+ |
|
| 6 |
+if ! command -v docker &> /dev/null; then |
|
| 7 |
+ echo >&2 'error: binary or dynbinary must be run before .integration-daemon-start' |
|
| 8 |
+ false |
|
| 9 |
+fi |
|
| 10 |
+ |
|
| 11 |
+# intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers |
|
| 12 |
+exec 41>&1 42>&2 |
|
| 13 |
+ |
|
| 14 |
+export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs}
|
|
| 15 |
+export DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native}
|
|
| 16 |
+ |
|
| 17 |
+if [ -z "$DOCKER_TEST_HOST" ]; then |
|
| 18 |
+ export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one |
|
| 19 |
+ ( set -x; exec \ |
|
| 20 |
+ docker --daemon --debug \ |
|
| 21 |
+ --host "$DOCKER_HOST" \ |
|
| 22 |
+ --storage-driver "$DOCKER_GRAPHDRIVER" \ |
|
| 23 |
+ --exec-driver "$DOCKER_EXECDRIVER" \ |
|
| 24 |
+ --pidfile "$DEST/docker.pid" \ |
|
| 25 |
+ &> "$DEST/docker.log" |
|
| 26 |
+ ) & |
|
| 27 |
+else |
|
| 28 |
+ export DOCKER_HOST="$DOCKER_TEST_HOST" |
|
| 29 |
+fi |
|
| 30 |
+ |
|
| 31 |
+# give it a second to come up so it's "ready" |
|
| 32 |
+tries=10 |
|
| 33 |
+while ! docker version &> /dev/null; do |
|
| 34 |
+ (( tries-- )) |
|
| 35 |
+ if [ $tries -le 0 ]; then |
|
| 36 |
+ if [ -z "$DOCKER_HOST" ]; then |
|
| 37 |
+ echo >&2 "error: daemon failed to start" |
|
| 38 |
+ echo >&2 " check $DEST/docker.log for details" |
|
| 39 |
+ else |
|
| 40 |
+ echo >&2 "error: daemon at $DOCKER_HOST fails to 'docker version':" |
|
| 41 |
+ docker version >&2 || true |
|
| 42 |
+ fi |
|
| 43 |
+ false |
|
| 44 |
+ fi |
|
| 45 |
+ sleep 2 |
|
| 46 |
+done |
| 0 | 9 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,33 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+ |
|
| 2 |
+if [ -z "$VALIDATE_UPSTREAM" ]; then |
|
| 3 |
+ # this is kind of an expensive check, so let's not do this twice if we |
|
| 4 |
+ # are running more than one validate bundlescript |
|
| 5 |
+ |
|
| 6 |
+ VALIDATE_REPO='https://github.com/docker/docker.git' |
|
| 7 |
+ VALIDATE_BRANCH='master' |
|
| 8 |
+ |
|
| 9 |
+ if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then |
|
| 10 |
+ VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
|
|
| 11 |
+ VALIDATE_BRANCH="${TRAVIS_BRANCH}"
|
|
| 12 |
+ fi |
|
| 13 |
+ |
|
| 14 |
+ VALIDATE_HEAD="$(git rev-parse --verify HEAD)" |
|
| 15 |
+ |
|
| 16 |
+ git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" |
|
| 17 |
+ VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" |
|
| 18 |
+ |
|
| 19 |
+ VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" |
|
| 20 |
+ VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" |
|
| 21 |
+ |
|
| 22 |
+ validate_diff() {
|
|
| 23 |
+ if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then |
|
| 24 |
+ git diff "$VALIDATE_COMMIT_DIFF" "$@" |
|
| 25 |
+ fi |
|
| 26 |
+ } |
|
| 27 |
+ validate_log() {
|
|
| 28 |
+ if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then |
|
| 29 |
+ git log "$VALIDATE_COMMIT_LOG" "$@" |
|
| 30 |
+ fi |
|
| 31 |
+ } |
|
| 32 |
+fi |
| 0 | 33 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,17 @@ |
| 0 |
+This directory holds scripts called by `make.sh` in the parent directory. |
|
| 1 |
+ |
|
| 2 |
+Each script is named after the bundle it creates. |
|
| 3 |
+They should not be called directly - instead, pass it as argument to make.sh, for example: |
|
| 4 |
+ |
|
| 5 |
+``` |
|
| 6 |
+./hack/make.sh test |
|
| 7 |
+./hack/make.sh binary ubuntu |
|
| 8 |
+ |
|
| 9 |
+# Or to run all bundles: |
|
| 10 |
+./hack/make.sh |
|
| 11 |
+``` |
|
| 12 |
+ |
|
| 13 |
+To add a bundle: |
|
| 14 |
+ |
|
| 15 |
+* Create a shell-compatible file here |
|
| 16 |
+* Add it to $DEFAULT_BUNDLES in make.sh |
| 0 | 17 |
new file mode 100755 |
| ... | ... |
@@ -0,0 +1,27 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+DEST=$1 |
|
| 4 |
+BINARY_NAME="docker-$VERSION" |
|
| 5 |
+BINARY_EXTENSION="$(binary_extension)" |
|
| 6 |
+BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" |
|
| 7 |
+ |
|
| 8 |
+# Cygdrive paths don't play well with go build -o. |
|
| 9 |
+if [[ "$(uname -s)" == CYGWIN* ]]; then |
|
| 10 |
+ DEST=$(cygpath -mw $DEST) |
|
| 11 |
+fi |
|
| 12 |
+ |
|
| 13 |
+source "$(dirname "$BASH_SOURCE")/.go-autogen" |
|
| 14 |
+ |
|
| 15 |
+go build \ |
|
| 16 |
+ -o "$DEST/$BINARY_FULLNAME" \ |
|
| 17 |
+ "${BUILDFLAGS[@]}" \
|
|
| 18 |
+ -ldflags " |
|
| 19 |
+ $LDFLAGS |
|
| 20 |
+ $LDFLAGS_STATIC_DOCKER |
|
| 21 |
+ " \ |
|
| 22 |
+ ./docker |
|
| 23 |
+echo "Created binary: $DEST/$BINARY_FULLNAME" |
|
| 24 |
+ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION" |
|
| 25 |
+ |
|
| 26 |
+hash_files "$DEST/$BINARY_FULLNAME" |
| 0 | 27 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,22 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+DEST="$1" |
|
| 4 |
+ |
|
| 5 |
+bundle_cover() {
|
|
| 6 |
+ coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) |
|
| 7 |
+ for p in "${coverprofiles[@]}"; do
|
|
| 8 |
+ echo |
|
| 9 |
+ ( |
|
| 10 |
+ set -x |
|
| 11 |
+ go tool cover -func="$p" |
|
| 12 |
+ ) |
|
| 13 |
+ done |
|
| 14 |
+} |
|
| 15 |
+ |
|
| 16 |
+if [ "$HAVE_GO_TEST_COVER" ]; then |
|
| 17 |
+ bundle_cover 2>&1 | tee "$DEST/report.log" |
|
| 18 |
+else |
|
| 19 |
+ echo >&2 'warning: the current version of go does not support -cover' |
|
| 20 |
+ echo >&2 ' skipping test coverage report' |
|
| 21 |
+fi |
| 0 | 22 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,33 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+DEST=$1 |
|
| 4 |
+ |
|
| 5 |
+# explicit list of os/arch combos that support being a daemon |
|
| 6 |
+declare -A daemonSupporting |
|
| 7 |
+daemonSupporting=( |
|
| 8 |
+ [linux/amd64]=1 |
|
| 9 |
+) |
|
| 10 |
+ |
|
| 11 |
+# if we have our linux/amd64 version compiled, let's symlink it in |
|
| 12 |
+if [ -x "$DEST/../binary/docker-$VERSION" ]; then |
|
| 13 |
+ mkdir -p "$DEST/linux/amd64" |
|
| 14 |
+ ( |
|
| 15 |
+ cd "$DEST/linux/amd64" |
|
| 16 |
+ ln -s ../../../binary/* ./ |
|
| 17 |
+ ) |
|
| 18 |
+ echo "Created symlinks:" "$DEST/linux/amd64/"* |
|
| 19 |
+fi |
|
| 20 |
+ |
|
| 21 |
+for platform in $DOCKER_CROSSPLATFORMS; do |
|
| 22 |
+ ( |
|
| 23 |
+ mkdir -p "$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION |
|
| 24 |
+ export GOOS=${platform%/*}
|
|
| 25 |
+ export GOARCH=${platform##*/}
|
|
| 26 |
+ if [ -z "${daemonSupporting[$platform]}" ]; then
|
|
| 27 |
+ export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms |
|
| 28 |
+ export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported
|
|
| 29 |
+ fi |
|
| 30 |
+ source "$(dirname "$BASH_SOURCE")/binary" "$DEST/$platform" |
|
| 31 |
+ ) |
|
| 32 |
+done |
| 0 | 33 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,22 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+DEST=$1 |
|
| 4 |
+ |
|
| 5 |
+if [ -z "$DOCKER_CLIENTONLY" ]; then |
|
| 6 |
+ source "$(dirname "$BASH_SOURCE")/.dockerinit" |
|
| 7 |
+ |
|
| 8 |
+ hash_files "$DEST/dockerinit-$VERSION" |
|
| 9 |
+else |
|
| 10 |
+ # DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :) |
|
| 11 |
+ export DOCKER_INITSHA1="" |
|
| 12 |
+fi |
|
| 13 |
+# DOCKER_INITSHA1 is exported so that other bundlescripts can easily access it later without recalculating it |
|
| 14 |
+ |
|
| 15 |
+( |
|
| 16 |
+ export IAMSTATIC="false" |
|
| 17 |
+ export LDFLAGS_STATIC_DOCKER='' |
|
| 18 |
+ export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary
|
|
| 19 |
+ export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here
|
|
| 20 |
+ source "$(dirname "$BASH_SOURCE")/binary" |
|
| 21 |
+) |
| 0 | 22 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,30 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+DEST=$1 |
|
| 4 |
+ |
|
| 5 |
+# subshell so that we can export PATH without breaking other things |
|
| 6 |
+( |
|
| 7 |
+ source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" |
|
| 8 |
+ |
|
| 9 |
+ # we need to wrap up everything in between integration-daemon-start and |
|
| 10 |
+ # integration-daemon-stop to make sure we kill the daemon and don't hang, |
|
| 11 |
+ # even and especially on test failures |
|
| 12 |
+ didFail= |
|
| 13 |
+ if ! {
|
|
| 14 |
+ dockerPy='/docker-py' |
|
| 15 |
+ [ -d "$dockerPy" ] || {
|
|
| 16 |
+ dockerPy="$DEST/docker-py" |
|
| 17 |
+ git clone https://github.com/docker/docker-py.git "$dockerPy" |
|
| 18 |
+ } |
|
| 19 |
+ |
|
| 20 |
+ # exporting PYTHONPATH to import "docker" from our local docker-py |
|
| 21 |
+ test_env PYTHONPATH="$dockerPy" python "$dockerPy/tests/integration_test.py" |
|
| 22 |
+ }; then |
|
| 23 |
+ didFail=1 |
|
| 24 |
+ fi |
|
| 25 |
+ |
|
| 26 |
+ source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" |
|
| 27 |
+ |
|
| 28 |
+ [ -z "$didFail" ] # "set -e" ftw |
|
| 29 |
+) 2>&1 | tee -a $DEST/test.log |
| 0 | 30 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,25 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+DEST=$1 |
|
| 4 |
+ |
|
| 5 |
+INIT=$DEST/../dynbinary/dockerinit-$VERSION |
|
| 6 |
+[ -x "$INIT" ] || {
|
|
| 7 |
+ source "$(dirname "$BASH_SOURCE")/.dockerinit" |
|
| 8 |
+ INIT="$DEST/dockerinit" |
|
| 9 |
+} |
|
| 10 |
+export TEST_DOCKERINIT_PATH="$INIT" |
|
| 11 |
+ |
|
| 12 |
+bundle_test_integration() {
|
|
| 13 |
+ LDFLAGS=" |
|
| 14 |
+ $LDFLAGS |
|
| 15 |
+ -X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" |
|
| 16 |
+ " go_test_dir ./integration \ |
|
| 17 |
+ "-coverpkg $(find_dirs '*.go' | sed 's,^\.,'$DOCKER_PKG',g' | paste -d, -s)" |
|
| 18 |
+} |
|
| 19 |
+ |
|
| 20 |
+# this "grep" hides some really irritating warnings that "go test -coverpkg" |
|
| 21 |
+# spews when it is given packages that aren't used |
|
| 22 |
+bundle_test_integration 2>&1 \ |
|
| 23 |
+ | grep --line-buffered -v '^warning: no packages being tested depend on ' \ |
|
| 24 |
+ | tee -a $DEST/test.log |
| 0 | 25 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,31 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+DEST=$1 |
|
| 4 |
+ |
|
| 5 |
+bundle_test_integration_cli() {
|
|
| 6 |
+ go_test_dir ./integration-cli |
|
| 7 |
+} |
|
| 8 |
+ |
|
| 9 |
+# subshell so that we can export PATH without breaking other things |
|
| 10 |
+( |
|
| 11 |
+ source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" |
|
| 12 |
+ |
|
| 13 |
+ # we need to wrap up everything in between integration-daemon-start and |
|
| 14 |
+ # integration-daemon-stop to make sure we kill the daemon and don't hang, |
|
| 15 |
+ # even and especially on test failures |
|
| 16 |
+ didFail= |
|
| 17 |
+ if ! {
|
|
| 18 |
+ source "$(dirname "$BASH_SOURCE")/.ensure-frozen-images" |
|
| 19 |
+ source "$(dirname "$BASH_SOURCE")/.ensure-httpserver" |
|
| 20 |
+ source "$(dirname "$BASH_SOURCE")/.ensure-emptyfs" |
|
| 21 |
+ |
|
| 22 |
+ bundle_test_integration_cli |
|
| 23 |
+ }; then |
|
| 24 |
+ didFail=1 |
|
| 25 |
+ fi |
|
| 26 |
+ |
|
| 27 |
+ source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" |
|
| 28 |
+ |
|
| 29 |
+ [ -z "$didFail" ] # "set -e" ftw |
|
| 30 |
+) 2>&1 | tee -a $DEST/test.log |
| 0 | 31 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,88 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+DEST=$1 |
|
| 4 |
+: ${PARALLEL_JOBS:=$(nproc 2>/dev/null || echo 1)} # if nproc fails (usually because we don't have it), let's not parallelize by default
|
|
| 5 |
+ |
|
| 6 |
+RED=$'\033[31m' |
|
| 7 |
+GREEN=$'\033[32m' |
|
| 8 |
+TEXTRESET=$'\033[0m' # reset the foreground colour |
|
| 9 |
+ |
|
| 10 |
+# Run Docker's test suite, including sub-packages, and store their output as a bundle |
|
| 11 |
+# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. |
|
| 12 |
+# You can use this to select certain tests to run, eg. |
|
| 13 |
+# |
|
| 14 |
+# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test-unit |
|
| 15 |
+# |
|
| 16 |
+bundle_test_unit() {
|
|
| 17 |
+ {
|
|
| 18 |
+ date |
|
| 19 |
+ |
|
| 20 |
+ # Run all the tests if no TESTDIRS were specified. |
|
| 21 |
+ if [ -z "$TESTDIRS" ]; then |
|
| 22 |
+ TESTDIRS=$(find_dirs '*_test.go') |
|
| 23 |
+ fi |
|
| 24 |
+ ( |
|
| 25 |
+ export LDFLAGS |
|
| 26 |
+ export TESTFLAGS |
|
| 27 |
+ export HAVE_GO_TEST_COVER |
|
| 28 |
+ export DEST |
|
| 29 |
+ |
|
| 30 |
+ # some hack to export array variables |
|
| 31 |
+ export BUILDFLAGS_FILE="buildflags_file" |
|
| 32 |
+ ( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE"
|
|
| 33 |
+ |
|
| 34 |
+ if command -v parallel &> /dev/null; then |
|
| 35 |
+ # accomodate parallel to be able to access variables |
|
| 36 |
+ export SHELL="$BASH" |
|
| 37 |
+ export HOME="$(mktemp -d)" |
|
| 38 |
+ mkdir -p "$HOME/.parallel" |
|
| 39 |
+ touch "$HOME/.parallel/ignored_vars" |
|
| 40 |
+ |
|
| 41 |
+ echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --env _ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" |
|
| 42 |
+ rm -rf "$HOME" |
|
| 43 |
+ else |
|
| 44 |
+ # aww, no "parallel" available - fall back to boring |
|
| 45 |
+ for test_dir in $TESTDIRS; do |
|
| 46 |
+ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" || true |
|
| 47 |
+ # don't let one directory that fails to build tank _all_ our tests! |
|
| 48 |
+ done |
|
| 49 |
+ fi |
|
| 50 |
+ rm -f "$BUILDFLAGS_FILE" |
|
| 51 |
+ ) |
|
| 52 |
+ echo "$TESTDIRS" | go_run_test_dir |
|
| 53 |
+ } |
|
| 54 |
+} |
|
| 55 |
+ |
|
| 56 |
+go_run_test_dir() {
|
|
| 57 |
+ TESTS_FAILED=() |
|
| 58 |
+ while read dir; do |
|
| 59 |
+ echo |
|
| 60 |
+ echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}"
|
|
| 61 |
+ precompiled="$DEST/precompiled/$dir.test$(binary_extension)" |
|
| 62 |
+ if ! ( cd "$dir" && test_env "$precompiled" $TESTFLAGS ); then |
|
| 63 |
+ TESTS_FAILED+=("$dir")
|
|
| 64 |
+ echo |
|
| 65 |
+ echo "${RED}Tests failed: $dir${TEXTRESET}"
|
|
| 66 |
+ sleep 1 # give it a second, so observers watching can take note |
|
| 67 |
+ fi |
|
| 68 |
+ done |
|
| 69 |
+ |
|
| 70 |
+ echo |
|
| 71 |
+ echo |
|
| 72 |
+ echo |
|
| 73 |
+ |
|
| 74 |
+ # if some tests fail, we want the bundlescript to fail, but we want to |
|
| 75 |
+ # try running ALL the tests first, hence TESTS_FAILED |
|
| 76 |
+ if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then
|
|
| 77 |
+ echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}"
|
|
| 78 |
+ echo |
|
| 79 |
+ false |
|
| 80 |
+ else |
|
| 81 |
+ echo "${GREEN}Test success${TEXTRESET}"
|
|
| 82 |
+ echo |
|
| 83 |
+ true |
|
| 84 |
+ fi |
|
| 85 |
+} |
|
| 86 |
+ |
|
| 87 |
+bundle_test_unit 2>&1 | tee -a $DEST/test.log |
| 0 | 88 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,34 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+ |
|
| 2 |
+DEST="$1" |
|
| 3 |
+CROSS="$DEST/../cross" |
|
| 4 |
+ |
|
| 5 |
+set -e |
|
| 6 |
+ |
|
| 7 |
+if [ ! -d "$CROSS/linux/amd64" ]; then |
|
| 8 |
+ echo >&2 'error: binary and cross must be run before tgz' |
|
| 9 |
+ false |
|
| 10 |
+fi |
|
| 11 |
+ |
|
| 12 |
+for d in "$CROSS/"*/*; do |
|
| 13 |
+ GOARCH="$(basename "$d")" |
|
| 14 |
+ GOOS="$(basename "$(dirname "$d")")" |
|
| 15 |
+ BINARY_NAME="docker-$VERSION" |
|
| 16 |
+ BINARY_EXTENSION="$(export GOOS && binary_extension)" |
|
| 17 |
+ BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" |
|
| 18 |
+ mkdir -p "$DEST/$GOOS/$GOARCH" |
|
| 19 |
+ TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME.tgz" |
|
| 20 |
+ |
|
| 21 |
+ mkdir -p "$DEST/build" |
|
| 22 |
+ |
|
| 23 |
+ mkdir -p "$DEST/build/usr/local/bin" |
|
| 24 |
+ cp -L "$d/$BINARY_FULLNAME" "$DEST/build/usr/local/bin/docker$BINARY_EXTENSION" |
|
| 25 |
+ |
|
| 26 |
+ tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr |
|
| 27 |
+ |
|
| 28 |
+ hash_files "$TGZ" |
|
| 29 |
+ |
|
| 30 |
+ rm -rf "$DEST/build" |
|
| 31 |
+ |
|
| 32 |
+ echo "Created tgz: $TGZ" |
|
| 33 |
+done |
| 0 | 34 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,191 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+ |
|
| 2 |
+DEST=$1 |
|
| 3 |
+ |
|
| 4 |
+PKGVERSION="${VERSION//-/'~'}"
|
|
| 5 |
+# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better |
|
| 6 |
+if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then |
|
| 7 |
+ GIT_UNIX="$(git log -1 --pretty='%at')" |
|
| 8 |
+ GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" |
|
| 9 |
+ GIT_COMMIT="$(git log -1 --pretty='%h')" |
|
| 10 |
+ GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}"
|
|
| 11 |
+ # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' |
|
| 12 |
+ PKGVERSION="$PKGVERSION~$GIT_VERSION" |
|
| 13 |
+fi |
|
| 14 |
+ |
|
| 15 |
+# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false |
|
| 16 |
+# true |
|
| 17 |
+# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false |
|
| 18 |
+# true |
|
| 19 |
+# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false |
|
| 20 |
+# true |
|
| 21 |
+ |
|
| 22 |
+# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a |
|
| 23 |
+ |
|
| 24 |
+PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" |
|
| 25 |
+PACKAGE_URL="http://www.docker.com/" |
|
| 26 |
+PACKAGE_MAINTAINER="support@docker.com" |
|
| 27 |
+PACKAGE_DESCRIPTION="Linux container runtime |
|
| 28 |
+Docker complements LXC with a high-level API which operates at the process |
|
| 29 |
+level. It runs unix processes with strong guarantees of isolation and |
|
| 30 |
+repeatability across servers. |
|
| 31 |
+Docker is a great building block for automating distributed systems: |
|
| 32 |
+large-scale web deployments, database clusters, continuous deployment systems, |
|
| 33 |
+private PaaS, service-oriented architectures, etc." |
|
| 34 |
+PACKAGE_LICENSE="Apache-2.0" |
|
| 35 |
+ |
|
| 36 |
+# Build docker as an ubuntu package using FPM and REPREPRO (sue me). |
|
| 37 |
+# bundle_binary must be called first. |
|
| 38 |
+bundle_ubuntu() {
|
|
| 39 |
+ DIR=$DEST/build |
|
| 40 |
+ |
|
| 41 |
+ # Include our udev rules |
|
| 42 |
+ mkdir -p $DIR/etc/udev/rules.d |
|
| 43 |
+ cp contrib/udev/80-docker.rules $DIR/etc/udev/rules.d/ |
|
| 44 |
+ |
|
| 45 |
+ # Include our init scripts |
|
| 46 |
+ mkdir -p $DIR/etc/init |
|
| 47 |
+ cp contrib/init/upstart/docker.conf $DIR/etc/init/ |
|
| 48 |
+ mkdir -p $DIR/etc/init.d |
|
| 49 |
+ cp contrib/init/sysvinit-debian/docker $DIR/etc/init.d/ |
|
| 50 |
+ mkdir -p $DIR/etc/default |
|
| 51 |
+ cp contrib/init/sysvinit-debian/docker.default $DIR/etc/default/docker |
|
| 52 |
+ mkdir -p $DIR/lib/systemd/system |
|
| 53 |
+ cp contrib/init/systemd/docker.{service,socket} $DIR/lib/systemd/system/
|
|
| 54 |
+ |
|
| 55 |
+ # Include contributed completions |
|
| 56 |
+ mkdir -p $DIR/etc/bash_completion.d |
|
| 57 |
+ cp contrib/completion/bash/docker $DIR/etc/bash_completion.d/ |
|
| 58 |
+ mkdir -p $DIR/usr/share/zsh/vendor-completions |
|
| 59 |
+ cp contrib/completion/zsh/_docker $DIR/usr/share/zsh/vendor-completions/ |
|
| 60 |
+ mkdir -p $DIR/etc/fish/completions |
|
| 61 |
+ cp contrib/completion/fish/docker.fish $DIR/etc/fish/completions/ |
|
| 62 |
+ |
|
| 63 |
+ # Include contributed man pages |
|
| 64 |
+ docs/man/md2man-all.sh -q |
|
| 65 |
+ manRoot="$DIR/usr/share/man" |
|
| 66 |
+ mkdir -p "$manRoot" |
|
| 67 |
+ for manDir in docs/man/man?; do |
|
| 68 |
+ manBase="$(basename "$manDir")" # "man1" |
|
| 69 |
+ for manFile in "$manDir"/*; do |
|
| 70 |
+ manName="$(basename "$manFile")" # "docker-build.1" |
|
| 71 |
+ mkdir -p "$manRoot/$manBase" |
|
| 72 |
+ gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" |
|
| 73 |
+ done |
|
| 74 |
+ done |
|
| 75 |
+ |
|
| 76 |
+ # Copy the binary |
|
| 77 |
+ # This will fail if the binary bundle hasn't been built |
|
| 78 |
+ mkdir -p $DIR/usr/bin |
|
| 79 |
+ cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker |
|
| 80 |
+ |
|
| 81 |
+ # Generate postinst/prerm/postrm scripts |
|
| 82 |
+ cat > $DEST/postinst <<'EOF' |
|
| 83 |
+#!/bin/sh |
|
| 84 |
+set -e |
|
| 85 |
+set -u |
|
| 86 |
+ |
|
| 87 |
+if [ "$1" = 'configure' ] && [ -z "$2" ]; then |
|
| 88 |
+ if ! getent group docker > /dev/null; then |
|
| 89 |
+ groupadd --system docker |
|
| 90 |
+ fi |
|
| 91 |
+fi |
|
| 92 |
+ |
|
| 93 |
+if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then
|
|
| 94 |
+ # we only need to do this if upstart isn't in charge |
|
| 95 |
+ update-rc.d docker defaults > /dev/null || true |
|
| 96 |
+fi |
|
| 97 |
+if [ -n "$2" ]; then |
|
| 98 |
+ _dh_action=restart |
|
| 99 |
+else |
|
| 100 |
+ _dh_action=start |
|
| 101 |
+fi |
|
| 102 |
+service docker $_dh_action 2>/dev/null || true |
|
| 103 |
+ |
|
| 104 |
+#DEBHELPER# |
|
| 105 |
+EOF |
|
| 106 |
+ cat > $DEST/prerm <<'EOF' |
|
| 107 |
+#!/bin/sh |
|
| 108 |
+set -e |
|
| 109 |
+set -u |
|
| 110 |
+ |
|
| 111 |
+service docker stop 2>/dev/null || true |
|
| 112 |
+ |
|
| 113 |
+#DEBHELPER# |
|
| 114 |
+EOF |
|
| 115 |
+ cat > $DEST/postrm <<'EOF' |
|
| 116 |
+#!/bin/sh |
|
| 117 |
+set -e |
|
| 118 |
+set -u |
|
| 119 |
+ |
|
| 120 |
+if [ "$1" = "purge" ] ; then |
|
| 121 |
+ update-rc.d docker remove > /dev/null || true |
|
| 122 |
+fi |
|
| 123 |
+ |
|
| 124 |
+# In case this system is running systemd, we make systemd reload the unit files |
|
| 125 |
+# to pick up changes. |
|
| 126 |
+if [ -d /run/systemd/system ] ; then |
|
| 127 |
+ systemctl --system daemon-reload > /dev/null || true |
|
| 128 |
+fi |
|
| 129 |
+ |
|
| 130 |
+#DEBHELPER# |
|
| 131 |
+EOF |
|
| 132 |
+ # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way |
|
| 133 |
+ chmod +x $DEST/postinst $DEST/prerm $DEST/postrm |
|
| 134 |
+ |
|
| 135 |
+ ( |
|
| 136 |
+ # switch directories so we create *.deb in the right folder |
|
| 137 |
+ cd $DEST |
|
| 138 |
+ |
|
| 139 |
+ # create lxc-docker-VERSION package |
|
| 140 |
+ fpm -s dir -C $DIR \ |
|
| 141 |
+ --name lxc-docker-$VERSION --version "$PKGVERSION" \ |
|
| 142 |
+ --after-install $DEST/postinst \ |
|
| 143 |
+ --before-remove $DEST/prerm \ |
|
| 144 |
+ --after-remove $DEST/postrm \ |
|
| 145 |
+ --architecture "$PACKAGE_ARCHITECTURE" \ |
|
| 146 |
+ --prefix / \ |
|
| 147 |
+ --depends iptables \ |
|
| 148 |
+ --deb-recommends aufs-tools \ |
|
| 149 |
+ --deb-recommends ca-certificates \ |
|
| 150 |
+ --deb-recommends git \ |
|
| 151 |
+ --deb-recommends xz-utils \ |
|
| 152 |
+ --deb-recommends 'cgroupfs-mount | cgroup-lite' \ |
|
| 153 |
+ --description "$PACKAGE_DESCRIPTION" \ |
|
| 154 |
+ --maintainer "$PACKAGE_MAINTAINER" \ |
|
| 155 |
+ --conflicts docker \ |
|
| 156 |
+ --conflicts docker.io \ |
|
| 157 |
+ --conflicts lxc-docker-virtual-package \ |
|
| 158 |
+ --provides lxc-docker \ |
|
| 159 |
+ --provides lxc-docker-virtual-package \ |
|
| 160 |
+ --replaces lxc-docker \ |
|
| 161 |
+ --replaces lxc-docker-virtual-package \ |
|
| 162 |
+ --url "$PACKAGE_URL" \ |
|
| 163 |
+ --license "$PACKAGE_LICENSE" \ |
|
| 164 |
+ --config-files /etc/udev/rules.d/80-docker.rules \ |
|
| 165 |
+ --config-files /etc/init/docker.conf \ |
|
| 166 |
+ --config-files /etc/init.d/docker \ |
|
| 167 |
+ --config-files /etc/default/docker \ |
|
| 168 |
+ --deb-compression gz \ |
|
| 169 |
+ -t deb . |
|
| 170 |
+ # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available |
|
| 171 |
+ |
|
| 172 |
+ # create empty lxc-docker wrapper package |
|
| 173 |
+ fpm -s empty \ |
|
| 174 |
+ --name lxc-docker --version "$PKGVERSION" \ |
|
| 175 |
+ --architecture "$PACKAGE_ARCHITECTURE" \ |
|
| 176 |
+ --depends lxc-docker-$VERSION \ |
|
| 177 |
+ --description "$PACKAGE_DESCRIPTION" \ |
|
| 178 |
+ --maintainer "$PACKAGE_MAINTAINER" \ |
|
| 179 |
+ --url "$PACKAGE_URL" \ |
|
| 180 |
+ --license "$PACKAGE_LICENSE" \ |
|
| 181 |
+ --deb-compression gz \ |
|
| 182 |
+ -t deb |
|
| 183 |
+ ) |
|
| 184 |
+ |
|
| 185 |
+ # clean up after ourselves so we have a clean output directory |
|
| 186 |
+ rm $DEST/postinst $DEST/prerm $DEST/postrm |
|
| 187 |
+ rm -r $DIR |
|
| 188 |
+} |
|
| 189 |
+ |
|
| 190 |
+bundle_ubuntu |
| 0 | 191 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,54 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+ |
|
| 2 |
+source "$(dirname "$BASH_SOURCE")/.validate" |
|
| 3 |
+ |
|
| 4 |
+adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }')
|
|
| 5 |
+dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }')
|
|
| 6 |
+#notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')"
|
|
| 7 |
+ |
|
| 8 |
+: ${adds:=0}
|
|
| 9 |
+: ${dels:=0}
|
|
| 10 |
+ |
|
| 11 |
+# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" |
|
| 12 |
+githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' |
|
| 13 |
+ |
|
| 14 |
+# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work |
|
| 15 |
+dcoPrefix='Signed-off-by:' |
|
| 16 |
+dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" |
|
| 17 |
+ |
|
| 18 |
+check_dco() {
|
|
| 19 |
+ grep -qE "$dcoRegex" |
|
| 20 |
+} |
|
| 21 |
+ |
|
| 22 |
+if [ $adds -eq 0 -a $dels -eq 0 ]; then |
|
| 23 |
+ echo '0 adds, 0 deletions; nothing to validate! :)' |
|
| 24 |
+else |
|
| 25 |
+ commits=( $(validate_log --format='format:%H%n') ) |
|
| 26 |
+ badCommits=() |
|
| 27 |
+ for commit in "${commits[@]}"; do
|
|
| 28 |
+ if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then |
|
| 29 |
+ # no content (ie, Merge commit, etc) |
|
| 30 |
+ continue |
|
| 31 |
+ fi |
|
| 32 |
+ if ! git log -1 --format='format:%B' "$commit" | check_dco; then |
|
| 33 |
+ badCommits+=( "$commit" ) |
|
| 34 |
+ fi |
|
| 35 |
+ done |
|
| 36 |
+ if [ ${#badCommits[@]} -eq 0 ]; then
|
|
| 37 |
+ echo "Congratulations! All commits are properly signed with the DCO!" |
|
| 38 |
+ else |
|
| 39 |
+ {
|
|
| 40 |
+ echo "These commits do not have a proper '$dcoPrefix' marker:" |
|
| 41 |
+ for commit in "${badCommits[@]}"; do
|
|
| 42 |
+ echo " - $commit" |
|
| 43 |
+ done |
|
| 44 |
+ echo |
|
| 45 |
+ echo 'Please amend each commit to include a properly formatted DCO marker.' |
|
| 46 |
+ echo |
|
| 47 |
+ echo 'Visit the following URL for information about the Docker DCO:' |
|
| 48 |
+ echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' |
|
| 49 |
+ echo |
|
| 50 |
+ } >&2 |
|
| 51 |
+ false |
|
| 52 |
+ fi |
|
| 53 |
+fi |
| 0 | 54 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,30 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+ |
|
| 2 |
+source "$(dirname "$BASH_SOURCE")/.validate" |
|
| 3 |
+ |
|
| 4 |
+IFS=$'\n' |
|
| 5 |
+files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) |
|
| 6 |
+unset IFS |
|
| 7 |
+ |
|
| 8 |
+badFiles=() |
|
| 9 |
+for f in "${files[@]}"; do
|
|
| 10 |
+ # we use "git show" here to validate that what's committed is formatted |
|
| 11 |
+ if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then |
|
| 12 |
+ badFiles+=( "$f" ) |
|
| 13 |
+ fi |
|
| 14 |
+done |
|
| 15 |
+ |
|
| 16 |
+if [ ${#badFiles[@]} -eq 0 ]; then
|
|
| 17 |
+ echo 'Congratulations! All Go source files are properly formatted.' |
|
| 18 |
+else |
|
| 19 |
+ {
|
|
| 20 |
+ echo "These files are not properly gofmt'd:" |
|
| 21 |
+ for f in "${badFiles[@]}"; do
|
|
| 22 |
+ echo " - $f" |
|
| 23 |
+ done |
|
| 24 |
+ echo |
|
| 25 |
+ echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' |
|
| 26 |
+ echo |
|
| 27 |
+ } >&2 |
|
| 28 |
+ false |
|
| 29 |
+fi |
| 0 | 30 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,30 @@ |
| 0 |
+#!/bin/bash |
|
| 1 |
+ |
|
| 2 |
+source "$(dirname "$BASH_SOURCE")/.validate" |
|
| 3 |
+ |
|
| 4 |
+IFS=$'\n' |
|
| 5 |
+files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) |
|
| 6 |
+unset IFS |
|
| 7 |
+ |
|
| 8 |
+badFiles=() |
|
| 9 |
+for f in "${files[@]}"; do
|
|
| 10 |
+ # we use "git show" here to validate that what's committed is formatted |
|
| 11 |
+ if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then |
|
| 12 |
+ badFiles+=( "$f" ) |
|
| 13 |
+ fi |
|
| 14 |
+done |
|
| 15 |
+ |
|
| 16 |
+if [ ${#badFiles[@]} -eq 0 ]; then
|
|
| 17 |
+ echo 'Congratulations! All toml source files changed here have valid syntax.' |
|
| 18 |
+else |
|
| 19 |
+ {
|
|
| 20 |
+ echo "These files are not valid toml:" |
|
| 21 |
+ for f in "${badFiles[@]}"; do
|
|
| 22 |
+ echo " - $f" |
|
| 23 |
+ done |
|
| 24 |
+ echo |
|
| 25 |
+ echo 'Please reformat the above files as valid toml' |
|
| 26 |
+ echo |
|
| 27 |
+ } >&2 |
|
| 28 |
+ false |
|
| 29 |
+fi |
| 0 | 30 |
new file mode 100755 |
| ... | ... |
@@ -0,0 +1,389 @@ |
| 0 |
+#!/usr/bin/env bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+# This script looks for bundles built by make.sh, and releases them on a |
|
| 4 |
+# public S3 bucket. |
|
| 5 |
+# |
|
| 6 |
+# Bundles should be available for the VERSION string passed as argument. |
|
| 7 |
+# |
|
| 8 |
+# The correct way to call this script is inside a container built by the |
|
| 9 |
+# official Dockerfile at the root of the Docker source code. The Dockerfile, |
|
| 10 |
+# make.sh and release.sh should all be from the same source code revision. |
|
| 11 |
+ |
|
| 12 |
+set -o pipefail |
|
| 13 |
+ |
|
| 14 |
+# Print a usage message and exit. |
|
| 15 |
+usage() {
|
|
| 16 |
+ cat >&2 <<'EOF' |
|
| 17 |
+To run, I need: |
|
| 18 |
+- to be in a container generated by the Dockerfile at the top of the Docker |
|
| 19 |
+ repository; |
|
| 20 |
+- to be provided with the name of an S3 bucket, in environment variable |
|
| 21 |
+ AWS_S3_BUCKET; |
|
| 22 |
+- to be provided with AWS credentials for this S3 bucket, in environment |
|
| 23 |
+ variables AWS_ACCESS_KEY and AWS_SECRET_KEY; |
|
| 24 |
+- the passphrase to unlock the GPG key which will sign the deb packages |
|
| 25 |
+ (passed as environment variable GPG_PASSPHRASE); |
|
| 26 |
+- a generous amount of good will and nice manners. |
|
| 27 |
+The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" |
|
| 28 |
+ |
|
| 29 |
+docker run -e AWS_S3_BUCKET=test.docker.com \ |
|
| 30 |
+ -e AWS_ACCESS_KEY=... \ |
|
| 31 |
+ -e AWS_SECRET_KEY=... \ |
|
| 32 |
+ -e GPG_PASSPHRASE=... \ |
|
| 33 |
+ -i -t --privileged \ |
|
| 34 |
+ docker ./hack/release.sh |
|
| 35 |
+EOF |
|
| 36 |
+ exit 1 |
|
| 37 |
+} |
|
| 38 |
+ |
|
| 39 |
+[ "$AWS_S3_BUCKET" ] || usage |
|
| 40 |
+[ "$AWS_ACCESS_KEY" ] || usage |
|
| 41 |
+[ "$AWS_SECRET_KEY" ] || usage |
|
| 42 |
+[ "$GPG_PASSPHRASE" ] || usage |
|
| 43 |
+[ -d /go/src/github.com/docker/docker ] || usage |
|
| 44 |
+cd /go/src/github.com/docker/docker |
|
| 45 |
+[ -x hack/make.sh ] || usage |
|
| 46 |
+ |
|
| 47 |
+RELEASE_BUNDLES=( |
|
| 48 |
+ binary |
|
| 49 |
+ cross |
|
| 50 |
+ tgz |
|
| 51 |
+ ubuntu |
|
| 52 |
+) |
|
| 53 |
+ |
|
| 54 |
+if [ "$1" != '--release-regardless-of-test-failure' ]; then |
|
| 55 |
+ RELEASE_BUNDLES=( |
|
| 56 |
+ test-unit test-integration |
|
| 57 |
+ "${RELEASE_BUNDLES[@]}"
|
|
| 58 |
+ test-integration-cli |
|
| 59 |
+ ) |
|
| 60 |
+fi |
|
| 61 |
+ |
|
| 62 |
+VERSION=$(cat VERSION) |
|
| 63 |
+BUCKET=$AWS_S3_BUCKET |
|
| 64 |
+ |
|
| 65 |
+# These are the 2 keys we've used to sign the deb's |
|
| 66 |
+# release (get.docker.com) |
|
| 67 |
+# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" |
|
| 68 |
+# test (test.docker.com) |
|
| 69 |
+# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" |
|
| 70 |
+ |
|
| 71 |
+setup_s3() {
|
|
| 72 |
+ # Try creating the bucket. Ignore errors (it might already exist). |
|
| 73 |
+ s3cmd mb s3://$BUCKET 2>/dev/null || true |
|
| 74 |
+ # Check access to the bucket. |
|
| 75 |
+ # s3cmd has no useful exit status, so we cannot check that. |
|
| 76 |
+ # Instead, we check if it outputs anything on standard output. |
|
| 77 |
+ # (When there are problems, it uses standard error instead.) |
|
| 78 |
+ s3cmd info s3://$BUCKET | grep -q . |
|
| 79 |
+ # Make the bucket accessible through website endpoints. |
|
| 80 |
+ s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET |
|
| 81 |
+} |
|
| 82 |
+ |
|
| 83 |
+# write_to_s3 uploads the contents of standard input to the specified S3 url. |
|
| 84 |
+write_to_s3() {
|
|
| 85 |
+ DEST=$1 |
|
| 86 |
+ F=`mktemp` |
|
| 87 |
+ cat > $F |
|
| 88 |
+ s3cmd --acl-public --mime-type='text/plain' put $F $DEST |
|
| 89 |
+ rm -f $F |
|
| 90 |
+} |
|
| 91 |
+ |
|
| 92 |
+s3_url() {
|
|
| 93 |
+ case "$BUCKET" in |
|
| 94 |
+ get.docker.com|test.docker.com) |
|
| 95 |
+ echo "https://$BUCKET" |
|
| 96 |
+ ;; |
|
| 97 |
+ *) |
|
| 98 |
+ s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }'
|
|
| 99 |
+ ;; |
|
| 100 |
+ esac |
|
| 101 |
+} |
|
| 102 |
+ |
|
| 103 |
+build_all() {
|
|
| 104 |
+ if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
|
|
| 105 |
+ echo >&2 |
|
| 106 |
+ echo >&2 'The build or tests appear to have failed.' |
|
| 107 |
+ echo >&2 |
|
| 108 |
+ echo >&2 'You, as the release maintainer, now have a couple options:' |
|
| 109 |
+ echo >&2 '- delay release and fix issues' |
|
| 110 |
+ echo >&2 '- delay release and fix issues' |
|
| 111 |
+ echo >&2 '- did we mention how important this is? issues need fixing :)' |
|
| 112 |
+ echo >&2 |
|
| 113 |
+ echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' |
|
| 114 |
+ echo >&2 ' really knows all the hairy problems at hand with the current release' |
|
| 115 |
+ echo >&2 ' issues) may bypass this checking by running this script again with the' |
|
| 116 |
+ echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' |
|
| 117 |
+ echo >&2 ' running the test suite, and will only build the binaries and packages. Please' |
|
| 118 |
+ echo >&2 ' avoid using this if at all possible.' |
|
| 119 |
+ echo >&2 |
|
| 120 |
+ echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' |
|
| 121 |
+ echo >&2 ' should be used. If there are release issues, we should always err on the' |
|
| 122 |
+ echo >&2 ' side of caution.' |
|
| 123 |
+ echo >&2 |
|
| 124 |
+ exit 1 |
|
| 125 |
+ fi |
|
| 126 |
+} |
|
| 127 |
+ |
|
| 128 |
+upload_release_build() {
|
|
| 129 |
+ src="$1" |
|
| 130 |
+ dst="$2" |
|
| 131 |
+ latest="$3" |
|
| 132 |
+ |
|
| 133 |
+ echo |
|
| 134 |
+ echo "Uploading $src" |
|
| 135 |
+ echo " to $dst" |
|
| 136 |
+ echo |
|
| 137 |
+ s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst" |
|
| 138 |
+ if [ "$latest" ]; then |
|
| 139 |
+ echo |
|
| 140 |
+ echo "Copying to $latest" |
|
| 141 |
+ echo |
|
| 142 |
+ s3cmd --acl-public cp "$dst" "$latest" |
|
| 143 |
+ fi |
|
| 144 |
+ |
|
| 145 |
+ # get hash files too (see hash_files() in hack/make.sh) |
|
| 146 |
+ for hashAlgo in md5 sha256; do |
|
| 147 |
+ if [ -e "$src.$hashAlgo" ]; then |
|
| 148 |
+ echo |
|
| 149 |
+ echo "Uploading $src.$hashAlgo" |
|
| 150 |
+ echo " to $dst.$hashAlgo" |
|
| 151 |
+ echo |
|
| 152 |
+ s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo" |
|
| 153 |
+ if [ "$latest" ]; then |
|
| 154 |
+ echo |
|
| 155 |
+ echo "Copying to $latest.$hashAlgo" |
|
| 156 |
+ echo |
|
| 157 |
+ s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo" |
|
| 158 |
+ fi |
|
| 159 |
+ fi |
|
| 160 |
+ done |
|
| 161 |
+} |
|
| 162 |
+ |
|
| 163 |
+release_build() {
|
|
| 164 |
+ GOOS=$1 |
|
| 165 |
+ GOARCH=$2 |
|
| 166 |
+ |
|
| 167 |
+ binDir=bundles/$VERSION/cross/$GOOS/$GOARCH |
|
| 168 |
+ tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH |
|
| 169 |
+ binary=docker-$VERSION |
|
| 170 |
+ tgz=docker-$VERSION.tgz |
|
| 171 |
+ |
|
| 172 |
+ latestBase= |
|
| 173 |
+ if [ -z "$NOLATEST" ]; then |
|
| 174 |
+ latestBase=docker-latest |
|
| 175 |
+ fi |
|
| 176 |
+ |
|
| 177 |
+ # we need to map our GOOS and GOARCH to uname values |
|
| 178 |
+ # see https://en.wikipedia.org/wiki/Uname |
|
| 179 |
+ # ie, GOOS=linux -> "uname -s"=Linux |
|
| 180 |
+ |
|
| 181 |
+ s3Os=$GOOS |
|
| 182 |
+ case "$s3Os" in |
|
| 183 |
+ darwin) |
|
| 184 |
+ s3Os=Darwin |
|
| 185 |
+ ;; |
|
| 186 |
+ freebsd) |
|
| 187 |
+ s3Os=FreeBSD |
|
| 188 |
+ ;; |
|
| 189 |
+ linux) |
|
| 190 |
+ s3Os=Linux |
|
| 191 |
+ ;; |
|
| 192 |
+ *) |
|
| 193 |
+ echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" |
|
| 194 |
+ exit 1 |
|
| 195 |
+ ;; |
|
| 196 |
+ esac |
|
| 197 |
+ |
|
| 198 |
+ s3Arch=$GOARCH |
|
| 199 |
+ case "$s3Arch" in |
|
| 200 |
+ amd64) |
|
| 201 |
+ s3Arch=x86_64 |
|
| 202 |
+ ;; |
|
| 203 |
+ 386) |
|
| 204 |
+ s3Arch=i386 |
|
| 205 |
+ ;; |
|
| 206 |
+ arm) |
|
| 207 |
+ s3Arch=armel |
|
| 208 |
+ # someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too |
|
| 209 |
+ ;; |
|
| 210 |
+ *) |
|
| 211 |
+ echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" |
|
| 212 |
+ exit 1 |
|
| 213 |
+ ;; |
|
| 214 |
+ esac |
|
| 215 |
+ |
|
| 216 |
+ s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch |
|
| 217 |
+ latest= |
|
| 218 |
+ latestTgz= |
|
| 219 |
+ if [ "$latestBase" ]; then |
|
| 220 |
+ latest="$s3Dir/$latestBase" |
|
| 221 |
+ latestTgz="$s3Dir/$latestBase.tgz" |
|
| 222 |
+ fi |
|
| 223 |
+ |
|
| 224 |
+ if [ ! -x "$binDir/$binary" ]; then |
|
| 225 |
+ echo >&2 "error: can't find $binDir/$binary - was it compiled properly?" |
|
| 226 |
+ exit 1 |
|
| 227 |
+ fi |
|
| 228 |
+ if [ ! -f "$tgzDir/$tgz" ]; then |
|
| 229 |
+ echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" |
|
| 230 |
+ exit 1 |
|
| 231 |
+ fi |
|
| 232 |
+ |
|
| 233 |
+ upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" |
|
| 234 |
+ upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" |
|
| 235 |
+} |
|
| 236 |
+ |
|
| 237 |
+# Upload the 'ubuntu' bundle to S3: |
|
| 238 |
+# 1. A full APT repository is published at $BUCKET/ubuntu/ |
|
| 239 |
+# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index |
|
| 240 |
+release_ubuntu() {
|
|
| 241 |
+ [ -e bundles/$VERSION/ubuntu ] || {
|
|
| 242 |
+ echo >&2 './hack/make.sh must be run before release_ubuntu' |
|
| 243 |
+ exit 1 |
|
| 244 |
+ } |
|
| 245 |
+ |
|
| 246 |
+ # Sign our packages |
|
| 247 |
+ dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \ |
|
| 248 |
+ --sign builder bundles/$VERSION/ubuntu/*.deb |
|
| 249 |
+ |
|
| 250 |
+ # Setup the APT repo |
|
| 251 |
+ APTDIR=bundles/$VERSION/ubuntu/apt |
|
| 252 |
+ mkdir -p $APTDIR/conf $APTDIR/db |
|
| 253 |
+ s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true |
|
| 254 |
+ cat > $APTDIR/conf/distributions <<EOF |
|
| 255 |
+Codename: docker |
|
| 256 |
+Components: main |
|
| 257 |
+Architectures: amd64 i386 |
|
| 258 |
+EOF |
|
| 259 |
+ |
|
| 260 |
+ # Add the DEB package to the APT repo |
|
| 261 |
+ DEBFILE=bundles/$VERSION/ubuntu/lxc-docker*.deb |
|
| 262 |
+ reprepro -b $APTDIR includedeb docker $DEBFILE |
|
| 263 |
+ |
|
| 264 |
+ # Sign |
|
| 265 |
+ for F in $(find $APTDIR -name Release); do |
|
| 266 |
+ gpg -u releasedocker --passphrase $GPG_PASSPHRASE \ |
|
| 267 |
+ --armor --sign --detach-sign \ |
|
| 268 |
+ --output $F.gpg $F |
|
| 269 |
+ done |
|
| 270 |
+ |
|
| 271 |
+ # Upload keys |
|
| 272 |
+ s3cmd sync $HOME/.gnupg/ s3://$BUCKET/ubuntu/.gnupg/ |
|
| 273 |
+ gpg --armor --export releasedocker > bundles/$VERSION/ubuntu/gpg |
|
| 274 |
+ s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg |
|
| 275 |
+ |
|
| 276 |
+ local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9 |
|
| 277 |
+ if [[ $BUCKET == test* ]]; then |
|
| 278 |
+ gpgFingerprint=740B314AE3941731B942C66ADF4FD13717AAD7D6 |
|
| 279 |
+ fi |
|
| 280 |
+ |
|
| 281 |
+ # Upload repo |
|
| 282 |
+ s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/ |
|
| 283 |
+ cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index |
|
| 284 |
+# Check that HTTPS transport is available to APT |
|
| 285 |
+if [ ! -e /usr/lib/apt/methods/https ]; then |
|
| 286 |
+ apt-get update |
|
| 287 |
+ apt-get install -y apt-transport-https |
|
| 288 |
+fi |
|
| 289 |
+ |
|
| 290 |
+# Add the repository to your APT sources |
|
| 291 |
+echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list |
|
| 292 |
+ |
|
| 293 |
+# Then import the repository key |
|
| 294 |
+apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys $gpgFingerprint |
|
| 295 |
+ |
|
| 296 |
+# Install docker |
|
| 297 |
+apt-get update |
|
| 298 |
+apt-get install -y lxc-docker |
|
| 299 |
+ |
|
| 300 |
+# |
|
| 301 |
+# Alternatively, just use the curl-able install.sh script provided at $(s3_url) |
|
| 302 |
+# |
|
| 303 |
+EOF |
|
| 304 |
+ |
|
| 305 |
+ # Add redirect at /ubuntu/info for URL-backwards-compatibility |
|
| 306 |
+ rm -rf /tmp/emptyfile && touch /tmp/emptyfile |
|
| 307 |
+ s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info |
|
| 308 |
+ |
|
| 309 |
+ echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu" |
|
| 310 |
+} |
|
| 311 |
+ |
|
| 312 |
+# Upload binaries and tgz files to S3 |
|
| 313 |
+release_binaries() {
|
|
| 314 |
+ [ -e bundles/$VERSION/cross/linux/amd64/docker-$VERSION ] || {
|
|
| 315 |
+ echo >&2 './hack/make.sh must be run before release_binaries' |
|
| 316 |
+ exit 1 |
|
| 317 |
+ } |
|
| 318 |
+ |
|
| 319 |
+ for d in bundles/$VERSION/cross/*/*; do |
|
| 320 |
+ GOARCH="$(basename "$d")" |
|
| 321 |
+ GOOS="$(basename "$(dirname "$d")")" |
|
| 322 |
+ release_build "$GOOS" "$GOARCH" |
|
| 323 |
+ done |
|
| 324 |
+ |
|
| 325 |
+ # TODO create redirect from builds/*/i686 to builds/*/i386 |
|
| 326 |
+ |
|
| 327 |
+ cat <<EOF | write_to_s3 s3://$BUCKET/builds/index |
|
| 328 |
+# To install, run the following command as root: |
|
| 329 |
+curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker |
|
| 330 |
+# Then start docker in daemon mode: |
|
| 331 |
+sudo /usr/local/bin/docker -d |
|
| 332 |
+EOF |
|
| 333 |
+ |
|
| 334 |
+ # Add redirect at /builds/info for URL-backwards-compatibility |
|
| 335 |
+ rm -rf /tmp/emptyfile && touch /tmp/emptyfile |
|
| 336 |
+ s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/builds/info |
|
| 337 |
+ |
|
| 338 |
+ if [ -z "$NOLATEST" ]; then |
|
| 339 |
+ echo "Advertising $VERSION on $BUCKET as most recent version" |
|
| 340 |
+ echo $VERSION | write_to_s3 s3://$BUCKET/latest |
|
| 341 |
+ fi |
|
| 342 |
+} |
|
| 343 |
+ |
|
| 344 |
+# Upload the index script |
|
| 345 |
+release_index() {
|
|
| 346 |
+ sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 s3://$BUCKET/index |
|
| 347 |
+} |
|
| 348 |
+ |
|
| 349 |
+release_test() {
|
|
| 350 |
+ if [ -e "bundles/$VERSION/test" ]; then |
|
| 351 |
+ s3cmd --acl-public sync bundles/$VERSION/test/ s3://$BUCKET/test/ |
|
| 352 |
+ fi |
|
| 353 |
+} |
|
| 354 |
+ |
|
| 355 |
+setup_gpg() {
|
|
| 356 |
+ # Make sure that we have our keys |
|
| 357 |
+ mkdir -p $HOME/.gnupg/ |
|
| 358 |
+ s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ $HOME/.gnupg/ || true |
|
| 359 |
+ gpg --list-keys releasedocker >/dev/null || {
|
|
| 360 |
+ gpg --gen-key --batch <<EOF |
|
| 361 |
+Key-Type: RSA |
|
| 362 |
+Key-Length: 4096 |
|
| 363 |
+Passphrase: $GPG_PASSPHRASE |
|
| 364 |
+Name-Real: Docker Release Tool |
|
| 365 |
+Name-Email: docker@docker.com |
|
| 366 |
+Name-Comment: releasedocker |
|
| 367 |
+Expire-Date: 0 |
|
| 368 |
+%commit |
|
| 369 |
+EOF |
|
| 370 |
+ } |
|
| 371 |
+} |
|
| 372 |
+ |
|
| 373 |
+main() {
|
|
| 374 |
+ build_all |
|
| 375 |
+ setup_s3 |
|
| 376 |
+ setup_gpg |
|
| 377 |
+ release_binaries |
|
| 378 |
+ release_ubuntu |
|
| 379 |
+ release_index |
|
| 380 |
+ release_test |
|
| 381 |
+} |
|
| 382 |
+ |
|
| 383 |
+main |
|
| 384 |
+ |
|
| 385 |
+echo |
|
| 386 |
+echo |
|
| 387 |
+echo "Release complete; see $(s3_url)" |
|
| 388 |
+echo |
| 0 | 389 |
new file mode 100755 |
| ... | ... |
@@ -0,0 +1,75 @@ |
| 0 |
+#!/usr/bin/env bash |
|
| 1 |
+set -e |
|
| 2 |
+ |
|
| 3 |
+cd "$(dirname "$BASH_SOURCE")/.." |
|
| 4 |
+ |
|
| 5 |
+# Downloads dependencies into vendor/ directory |
|
| 6 |
+mkdir -p vendor |
|
| 7 |
+cd vendor |
|
| 8 |
+ |
|
| 9 |
+clone() {
|
|
| 10 |
+ vcs=$1 |
|
| 11 |
+ pkg=$2 |
|
| 12 |
+ rev=$3 |
|
| 13 |
+ |
|
| 14 |
+ pkg_url=https://$pkg |
|
| 15 |
+ target_dir=src/$pkg |
|
| 16 |
+ |
|
| 17 |
+ echo -n "$pkg @ $rev: " |
|
| 18 |
+ |
|
| 19 |
+ if [ -d $target_dir ]; then |
|
| 20 |
+ echo -n 'rm old, ' |
|
| 21 |
+ rm -fr $target_dir |
|
| 22 |
+ fi |
|
| 23 |
+ |
|
| 24 |
+ echo -n 'clone, ' |
|
| 25 |
+ case $vcs in |
|
| 26 |
+ git) |
|
| 27 |
+ git clone --quiet --no-checkout $pkg_url $target_dir |
|
| 28 |
+ ( cd $target_dir && git reset --quiet --hard $rev ) |
|
| 29 |
+ ;; |
|
| 30 |
+ hg) |
|
| 31 |
+ hg clone --quiet --updaterev $rev $pkg_url $target_dir |
|
| 32 |
+ ;; |
|
| 33 |
+ esac |
|
| 34 |
+ |
|
| 35 |
+ echo -n 'rm VCS, ' |
|
| 36 |
+ ( cd $target_dir && rm -rf .{git,hg} )
|
|
| 37 |
+ |
|
| 38 |
+ echo done |
|
| 39 |
+} |
|
| 40 |
+ |
|
| 41 |
+clone git github.com/kr/pty 05017fcccf |
|
| 42 |
+ |
|
| 43 |
+clone git github.com/gorilla/context 14f550f51a |
|
| 44 |
+ |
|
| 45 |
+clone git github.com/gorilla/mux 136d54f81f |
|
| 46 |
+ |
|
| 47 |
+clone git github.com/tchap/go-patricia v1.0.1 |
|
| 48 |
+ |
|
| 49 |
+clone hg code.google.com/p/go.net 84a4013f96e0 |
|
| 50 |
+ |
|
| 51 |
+clone hg code.google.com/p/gosqlite 74691fb6f837 |
|
| 52 |
+ |
|
| 53 |
+clone git github.com/docker/libtrust 230dfd18c232 |
|
| 54 |
+ |
|
| 55 |
+clone git github.com/Sirupsen/logrus v0.6.6 |
|
| 56 |
+ |
|
| 57 |
+clone git github.com/go-fsnotify/fsnotify v1.0.4 |
|
| 58 |
+ |
|
| 59 |
+# get Go tip's archive/tar, for xattr support and improved performance |
|
| 60 |
+# TODO after Go 1.4 drops, bump our minimum supported version and drop this vendored dep |
|
| 61 |
+if [ "$1" = '--go' ]; then |
|
| 62 |
+ # Go takes forever and a half to clone, so we only redownload it when explicitly requested via the "--go" flag to this script. |
|
| 63 |
+ clone hg code.google.com/p/go 1b17b3426e3c |
|
| 64 |
+ mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar |
|
| 65 |
+ rm -rf src/code.google.com/p/go |
|
| 66 |
+ mkdir -p src/code.google.com/p/go/src/pkg/archive |
|
| 67 |
+ mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar |
|
| 68 |
+fi |
|
| 69 |
+ |
|
| 70 |
+clone git github.com/docker/libcontainer 5d6c507d7cfeff97172deedf3db13b5295bcacef |
|
| 71 |
+# see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) |
|
| 72 |
+rm -rf src/github.com/docker/libcontainer/vendor |
|
| 73 |
+eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli' | grep -v 'github.com/Sirupsen/logrus')" |
|
| 74 |
+# we exclude "github.com/codegangsta/cli" here because it's only needed for "nsinit", which Docker doesn't include |
| 4 | 1 |
deleted file mode 100755 |
| ... | ... |
@@ -1,88 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-# DinD: a wrapper script which allows docker to be run inside a docker container. |
|
| 5 |
-# Original version by Jerome Petazzoni <jerome@docker.com> |
|
| 6 |
-# See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/ |
|
| 7 |
-# |
|
| 8 |
-# This script should be executed inside a docker container in privilieged mode |
|
| 9 |
-# ('docker run --privileged', introduced in docker 0.6).
|
|
| 10 |
- |
|
| 11 |
-# Usage: dind CMD [ARG...] |
|
| 12 |
- |
|
| 13 |
-# apparmor sucks and Docker needs to know that it's in a container (c) @tianon |
|
| 14 |
-export container=docker |
|
| 15 |
- |
|
| 16 |
-# First, make sure that cgroups are mounted correctly. |
|
| 17 |
-CGROUP=/cgroup |
|
| 18 |
- |
|
| 19 |
-mkdir -p "$CGROUP" |
|
| 20 |
- |
|
| 21 |
-if ! mountpoint -q "$CGROUP"; then |
|
| 22 |
- mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
|
|
| 23 |
- echo >&2 'Could not make a tmpfs mount. Did you use --privileged?' |
|
| 24 |
- exit 1 |
|
| 25 |
- } |
|
| 26 |
-fi |
|
| 27 |
- |
|
| 28 |
-if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then |
|
| 29 |
- mount -t securityfs none /sys/kernel/security || {
|
|
| 30 |
- echo >&2 'Could not mount /sys/kernel/security.' |
|
| 31 |
- echo >&2 'AppArmor detection and -privileged mode might break.' |
|
| 32 |
- } |
|
| 33 |
-fi |
|
| 34 |
- |
|
| 35 |
-# Mount the cgroup hierarchies exactly as they are in the parent system. |
|
| 36 |
-for SUBSYS in $(cut -d: -f2 /proc/1/cgroup); do |
|
| 37 |
- mkdir -p "$CGROUP/$SUBSYS" |
|
| 38 |
- if ! mountpoint -q $CGROUP/$SUBSYS; then |
|
| 39 |
- mount -n -t cgroup -o "$SUBSYS" cgroup "$CGROUP/$SUBSYS" |
|
| 40 |
- fi |
|
| 41 |
- |
|
| 42 |
- # The two following sections address a bug which manifests itself |
|
| 43 |
- # by a cryptic "lxc-start: no ns_cgroup option specified" when |
|
| 44 |
- # trying to start containers withina container. |
|
| 45 |
- # The bug seems to appear when the cgroup hierarchies are not |
|
| 46 |
- # mounted on the exact same directories in the host, and in the |
|
| 47 |
- # container. |
|
| 48 |
- |
|
| 49 |
- # Named, control-less cgroups are mounted with "-o name=foo" |
|
| 50 |
- # (and appear as such under /proc/<pid>/cgroup) but are usually |
|
| 51 |
- # mounted on a directory named "foo" (without the "name=" prefix). |
|
| 52 |
- # Systemd and OpenRC (and possibly others) both create such a |
|
| 53 |
- # cgroup. To avoid the aforementioned bug, we symlink "foo" to |
|
| 54 |
- # "name=foo". This shouldn't have any adverse effect. |
|
| 55 |
- name="${SUBSYS#name=}"
|
|
| 56 |
- if [ "$name" != "$SUBSYS" ]; then |
|
| 57 |
- ln -s "$SUBSYS" "$CGROUP/$name" |
|
| 58 |
- fi |
|
| 59 |
- |
|
| 60 |
- # Likewise, on at least one system, it has been reported that |
|
| 61 |
- # systemd would mount the CPU and CPU accounting controllers |
|
| 62 |
- # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" |
|
| 63 |
- # but on a directory called "cpu,cpuacct" (note the inversion |
|
| 64 |
- # in the order of the groups). This tries to work around it. |
|
| 65 |
- if [ "$SUBSYS" = 'cpuacct,cpu' ]; then |
|
| 66 |
- ln -s "$SUBSYS" "$CGROUP/cpu,cpuacct" |
|
| 67 |
- fi |
|
| 68 |
-done |
|
| 69 |
- |
|
| 70 |
-# Note: as I write those lines, the LXC userland tools cannot setup |
|
| 71 |
-# a "sub-container" properly if the "devices" cgroup is not in its |
|
| 72 |
-# own hierarchy. Let's detect this and issue a warning. |
|
| 73 |
-if ! grep -q :devices: /proc/1/cgroup; then |
|
| 74 |
- echo >&2 'WARNING: the "devices" cgroup should be in its own hierarchy.' |
|
| 75 |
-fi |
|
| 76 |
-if ! grep -qw devices /proc/1/cgroup; then |
|
| 77 |
- echo >&2 'WARNING: it looks like the "devices" cgroup is not mounted.' |
|
| 78 |
-fi |
|
| 79 |
- |
|
| 80 |
-# Mount /tmp |
|
| 81 |
-mount -t tmpfs none /tmp |
|
| 82 |
- |
|
| 83 |
-if [ $# -gt 0 ]; then |
|
| 84 |
- exec "$@" |
|
| 85 |
-fi |
|
| 86 |
- |
|
| 87 |
-echo >&2 'ERROR: No command specified.' |
|
| 88 |
-echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' |
| 89 | 1 |
deleted file mode 100755 |
| ... | ... |
@@ -1,15 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." |
|
| 5 |
- |
|
| 6 |
-# see also ".mailmap" for how email addresses and names are deduplicated |
|
| 7 |
- |
|
| 8 |
-{
|
|
| 9 |
- cat <<-'EOH' |
|
| 10 |
- # This file lists all individuals having contributed content to the repository. |
|
| 11 |
- # For how it is generated, see `project/generate-authors.sh`. |
|
| 12 |
- EOH |
|
| 13 |
- echo |
|
| 14 |
- git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf |
|
| 15 |
-} > AUTHORS |
| 16 | 1 |
deleted file mode 100755 |
| ... | ... |
@@ -1,62 +0,0 @@ |
| 1 |
-#!/usr/bin/env bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-if [ $# -ne 1 ]; then |
|
| 5 |
- echo >&2 "Usage: $0 PATH" |
|
| 6 |
- echo >&2 "Show the primary and secondary maintainers for a given path" |
|
| 7 |
- exit 1 |
|
| 8 |
-fi |
|
| 9 |
- |
|
| 10 |
-set -e |
|
| 11 |
- |
|
| 12 |
-DEST=$1 |
|
| 13 |
-DESTFILE="" |
|
| 14 |
-if [ ! -d $DEST ]; then |
|
| 15 |
- DESTFILE=$(basename $DEST) |
|
| 16 |
- DEST=$(dirname $DEST) |
|
| 17 |
-fi |
|
| 18 |
- |
|
| 19 |
-MAINTAINERS=() |
|
| 20 |
-cd $DEST |
|
| 21 |
-while true; do |
|
| 22 |
- if [ -e ./MAINTAINERS ]; then |
|
| 23 |
- {
|
|
| 24 |
- while read line; do |
|
| 25 |
- re='^([^:]*): *(.*)$' |
|
| 26 |
- file=$(echo $line | sed -E -n "s/$re/\1/p") |
|
| 27 |
- if [ ! -z "$file" ]; then |
|
| 28 |
- if [ "$file" = "$DESTFILE" ]; then |
|
| 29 |
- echo "Override: $line" |
|
| 30 |
- maintainer=$(echo $line | sed -E -n "s/$re/\2/p") |
|
| 31 |
- MAINTAINERS=("$maintainer" "${MAINTAINERS[@]}")
|
|
| 32 |
- fi |
|
| 33 |
- else |
|
| 34 |
- MAINTAINERS+=("$line");
|
|
| 35 |
- fi |
|
| 36 |
- done; |
|
| 37 |
- } < MAINTAINERS |
|
| 38 |
- break |
|
| 39 |
- fi |
|
| 40 |
- if [ -d .git ]; then |
|
| 41 |
- break |
|
| 42 |
- fi |
|
| 43 |
- if [ "$(pwd)" = "/" ]; then |
|
| 44 |
- break |
|
| 45 |
- fi |
|
| 46 |
- cd .. |
|
| 47 |
-done |
|
| 48 |
- |
|
| 49 |
-PRIMARY="${MAINTAINERS[0]}"
|
|
| 50 |
-PRIMARY_FIRSTNAME=$(echo $PRIMARY | cut -d' ' -f1) |
|
| 51 |
-LGTM_COUNT=${#MAINTAINERS[@]}
|
|
| 52 |
-LGTM_COUNT=$((LGTM_COUNT%2 +1)) |
|
| 53 |
- |
|
| 54 |
-firstname() {
|
|
| 55 |
- echo $1 | cut -d' ' -f1 |
|
| 56 |
-} |
|
| 57 |
- |
|
| 58 |
-echo "A pull request in $1 will need $LGTM_COUNT LGTM's to be merged." |
|
| 59 |
-echo "--- $PRIMARY is the PRIMARY MAINTAINER of $1." |
|
| 60 |
-for SECONDARY in "${MAINTAINERS[@]:1}"; do
|
|
| 61 |
- echo "--- $SECONDARY" |
|
| 62 |
-done |
| 63 | 1 |
deleted file mode 100755 |
| ... | ... |
@@ -1,225 +0,0 @@ |
| 1 |
-#!/bin/sh |
|
| 2 |
-set -e |
|
| 3 |
-# |
|
| 4 |
-# This script is meant for quick & easy install via: |
|
| 5 |
-# 'curl -sSL https://get.docker.com/ | sh' |
|
| 6 |
-# or: |
|
| 7 |
-# 'wget -qO- https://get.docker.com/ | sh' |
|
| 8 |
-# |
|
| 9 |
-# |
|
| 10 |
-# Docker Maintainers: |
|
| 11 |
-# To update this script on https://get.docker.com, |
|
| 12 |
-# use hack/release.sh during a normal release, |
|
| 13 |
-# or the following one-liner for script hotfixes: |
|
| 14 |
-# s3cmd put --acl-public -P hack/install.sh s3://get.docker.com/index |
|
| 15 |
-# |
|
| 16 |
- |
|
| 17 |
-url='https://get.docker.com/' |
|
| 18 |
- |
|
| 19 |
-command_exists() {
|
|
| 20 |
- command -v "$@" > /dev/null 2>&1 |
|
| 21 |
-} |
|
| 22 |
- |
|
| 23 |
-case "$(uname -m)" in |
|
| 24 |
- *64) |
|
| 25 |
- ;; |
|
| 26 |
- *) |
|
| 27 |
- echo >&2 'Error: you are not using a 64bit platform.' |
|
| 28 |
- echo >&2 'Docker currently only supports 64bit platforms.' |
|
| 29 |
- exit 1 |
|
| 30 |
- ;; |
|
| 31 |
-esac |
|
| 32 |
- |
|
| 33 |
-if command_exists docker || command_exists lxc-docker; then |
|
| 34 |
- echo >&2 'Warning: "docker" or "lxc-docker" command appears to already exist.' |
|
| 35 |
- echo >&2 'Please ensure that you do not already have docker installed.' |
|
| 36 |
- echo >&2 'You may press Ctrl+C now to abort this process and rectify this situation.' |
|
| 37 |
- ( set -x; sleep 20 ) |
|
| 38 |
-fi |
|
| 39 |
- |
|
| 40 |
-user="$(id -un 2>/dev/null || true)" |
|
| 41 |
- |
|
| 42 |
-sh_c='sh -c' |
|
| 43 |
-if [ "$user" != 'root' ]; then |
|
| 44 |
- if command_exists sudo; then |
|
| 45 |
- sh_c='sudo -E sh -c' |
|
| 46 |
- elif command_exists su; then |
|
| 47 |
- sh_c='su -c' |
|
| 48 |
- else |
|
| 49 |
- echo >&2 'Error: this installer needs the ability to run commands as root.' |
|
| 50 |
- echo >&2 'We are unable to find either "sudo" or "su" available to make this happen.' |
|
| 51 |
- exit 1 |
|
| 52 |
- fi |
|
| 53 |
-fi |
|
| 54 |
- |
|
| 55 |
-curl='' |
|
| 56 |
-if command_exists curl; then |
|
| 57 |
- curl='curl -sSL' |
|
| 58 |
-elif command_exists wget; then |
|
| 59 |
- curl='wget -qO-' |
|
| 60 |
-elif command_exists busybox && busybox --list-modules | grep -q wget; then |
|
| 61 |
- curl='busybox wget -qO-' |
|
| 62 |
-fi |
|
| 63 |
- |
|
| 64 |
-# perform some very rudimentary platform detection |
|
| 65 |
-lsb_dist='' |
|
| 66 |
-if command_exists lsb_release; then |
|
| 67 |
- lsb_dist="$(lsb_release -si)" |
|
| 68 |
-fi |
|
| 69 |
-if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then |
|
| 70 |
- lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" |
|
| 71 |
-fi |
|
| 72 |
-if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then |
|
| 73 |
- lsb_dist='debian' |
|
| 74 |
-fi |
|
| 75 |
-if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then |
|
| 76 |
- lsb_dist='fedora' |
|
| 77 |
-fi |
|
| 78 |
-if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then |
|
| 79 |
- lsb_dist="$(. /etc/os-release && echo "$ID")" |
|
| 80 |
-fi |
|
| 81 |
- |
|
| 82 |
-lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" |
|
| 83 |
-case "$lsb_dist" in |
|
| 84 |
- amzn|fedora) |
|
| 85 |
- if [ "$lsb_dist" = 'amzn' ]; then |
|
| 86 |
- ( |
|
| 87 |
- set -x |
|
| 88 |
- $sh_c 'sleep 3; yum -y -q install docker' |
|
| 89 |
- ) |
|
| 90 |
- else |
|
| 91 |
- ( |
|
| 92 |
- set -x |
|
| 93 |
- $sh_c 'sleep 3; yum -y -q install docker-io' |
|
| 94 |
- ) |
|
| 95 |
- fi |
|
| 96 |
- if command_exists docker && [ -e /var/run/docker.sock ]; then |
|
| 97 |
- ( |
|
| 98 |
- set -x |
|
| 99 |
- $sh_c 'docker version' |
|
| 100 |
- ) || true |
|
| 101 |
- fi |
|
| 102 |
- your_user=your-user |
|
| 103 |
- [ "$user" != 'root' ] && your_user="$user" |
|
| 104 |
- echo |
|
| 105 |
- echo 'If you would like to use Docker as a non-root user, you should now consider' |
|
| 106 |
- echo 'adding your user to the "docker" group with something like:' |
|
| 107 |
- echo |
|
| 108 |
- echo ' sudo usermod -aG docker' $your_user |
|
| 109 |
- echo |
|
| 110 |
- echo 'Remember that you will have to log out and back in for this to take effect!' |
|
| 111 |
- echo |
|
| 112 |
- exit 0 |
|
| 113 |
- ;; |
|
| 114 |
- |
|
| 115 |
- ubuntu|debian|linuxmint) |
|
| 116 |
- export DEBIAN_FRONTEND=noninteractive |
|
| 117 |
- |
|
| 118 |
- did_apt_get_update= |
|
| 119 |
- apt_get_update() {
|
|
| 120 |
- if [ -z "$did_apt_get_update" ]; then |
|
| 121 |
- ( set -x; $sh_c 'sleep 3; apt-get update' ) |
|
| 122 |
- did_apt_get_update=1 |
|
| 123 |
- fi |
|
| 124 |
- } |
|
| 125 |
- |
|
| 126 |
- # aufs is preferred over devicemapper; try to ensure the driver is available. |
|
| 127 |
- if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then |
|
| 128 |
- kern_extras="linux-image-extra-$(uname -r)" |
|
| 129 |
- |
|
| 130 |
- apt_get_update |
|
| 131 |
- ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true |
|
| 132 |
- |
|
| 133 |
- if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then |
|
| 134 |
- echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' |
|
| 135 |
- echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' |
|
| 136 |
- ( set -x; sleep 10 ) |
|
| 137 |
- fi |
|
| 138 |
- fi |
|
| 139 |
- |
|
| 140 |
- # install apparmor utils if they're missing and apparmor is enabled in the kernel |
|
| 141 |
- # otherwise Docker will fail to start |
|
| 142 |
- if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then |
|
| 143 |
- if command -v apparmor_parser &> /dev/null; then |
|
| 144 |
- echo 'apparmor is enabled in the kernel and apparmor utils were already installed' |
|
| 145 |
- else |
|
| 146 |
- echo 'apparmor is enabled in the kernel, but apparmor_parser missing' |
|
| 147 |
- apt_get_update |
|
| 148 |
- ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) |
|
| 149 |
- fi |
|
| 150 |
- fi |
|
| 151 |
- |
|
| 152 |
- if [ ! -e /usr/lib/apt/methods/https ]; then |
|
| 153 |
- apt_get_update |
|
| 154 |
- ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) |
|
| 155 |
- fi |
|
| 156 |
- if [ -z "$curl" ]; then |
|
| 157 |
- apt_get_update |
|
| 158 |
- ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) |
|
| 159 |
- curl='curl -sSL' |
|
| 160 |
- fi |
|
| 161 |
- ( |
|
| 162 |
- set -x |
|
| 163 |
- if [ "https://get.docker.com/" = "$url" ]; then |
|
| 164 |
- $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9" |
|
| 165 |
- elif [ "https://test.docker.com/" = "$url" ]; then |
|
| 166 |
- $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6" |
|
| 167 |
- else |
|
| 168 |
- $sh_c "$curl ${url}gpg | apt-key add -"
|
|
| 169 |
- fi |
|
| 170 |
- $sh_c "echo deb ${url}ubuntu docker main > /etc/apt/sources.list.d/docker.list"
|
|
| 171 |
- $sh_c 'sleep 3; apt-get update; apt-get install -y -q lxc-docker' |
|
| 172 |
- ) |
|
| 173 |
- if command_exists docker && [ -e /var/run/docker.sock ]; then |
|
| 174 |
- ( |
|
| 175 |
- set -x |
|
| 176 |
- $sh_c 'docker version' |
|
| 177 |
- ) || true |
|
| 178 |
- fi |
|
| 179 |
- your_user=your-user |
|
| 180 |
- [ "$user" != 'root' ] && your_user="$user" |
|
| 181 |
- echo |
|
| 182 |
- echo 'If you would like to use Docker as a non-root user, you should now consider' |
|
| 183 |
- echo 'adding your user to the "docker" group with something like:' |
|
| 184 |
- echo |
|
| 185 |
- echo ' sudo usermod -aG docker' $your_user |
|
| 186 |
- echo |
|
| 187 |
- echo 'Remember that you will have to log out and back in for this to take effect!' |
|
| 188 |
- echo |
|
| 189 |
- exit 0 |
|
| 190 |
- ;; |
|
| 191 |
- |
|
| 192 |
- gentoo) |
|
| 193 |
- if [ "$url" = "https://test.docker.com/" ]; then |
|
| 194 |
- echo >&2 |
|
| 195 |
- echo >&2 ' You appear to be trying to install the latest nightly build in Gentoo.' |
|
| 196 |
- echo >&2 ' The portage tree should contain the latest stable release of Docker, but' |
|
| 197 |
- echo >&2 ' if you want something more recent, you can always use the live ebuild' |
|
| 198 |
- echo >&2 ' provided in the "docker" overlay available via layman. For more' |
|
| 199 |
- echo >&2 ' instructions, please see the following URL:' |
|
| 200 |
- echo >&2 ' https://github.com/tianon/docker-overlay#using-this-overlay' |
|
| 201 |
- echo >&2 ' After adding the "docker" overlay, you should be able to:' |
|
| 202 |
- echo >&2 ' emerge -av =app-emulation/docker-9999' |
|
| 203 |
- echo >&2 |
|
| 204 |
- exit 1 |
|
| 205 |
- fi |
|
| 206 |
- |
|
| 207 |
- ( |
|
| 208 |
- set -x |
|
| 209 |
- $sh_c 'sleep 3; emerge app-emulation/docker' |
|
| 210 |
- ) |
|
| 211 |
- exit 0 |
|
| 212 |
- ;; |
|
| 213 |
-esac |
|
| 214 |
- |
|
| 215 |
-cat >&2 <<'EOF' |
|
| 216 |
- |
|
| 217 |
- Either your platform is not easily detectable, is not supported by this |
|
| 218 |
- installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have |
|
| 219 |
- a package for Docker. Please visit the following URL for more detailed |
|
| 220 |
- installation instructions: |
|
| 221 |
- |
|
| 222 |
- https://docs.docker.com/en/latest/installation/ |
|
| 223 |
- |
|
| 224 |
-EOF |
|
| 225 |
-exit 1 |
| 226 | 1 |
deleted file mode 100755 |
| ... | ... |
@@ -1,273 +0,0 @@ |
| 1 |
-#!/usr/bin/env bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-# This script builds various binary artifacts from a checkout of the docker |
|
| 5 |
-# source code. |
|
| 6 |
-# |
|
| 7 |
-# Requirements: |
|
| 8 |
-# - The current directory should be a checkout of the docker source code |
|
| 9 |
-# (http://github.com/docker/docker). Whatever version is checked out |
|
| 10 |
-# will be built. |
|
| 11 |
-# - The VERSION file, at the root of the repository, should exist, and |
|
| 12 |
-# will be used as Docker binary version and package version. |
|
| 13 |
-# - The hash of the git commit will also be included in the Docker binary, |
|
| 14 |
-# with the suffix -dirty if the repository isn't clean. |
|
| 15 |
-# - The script is intented to be run inside the docker container specified |
|
| 16 |
-# in the Dockerfile at the root of the source. In other words: |
|
| 17 |
-# DO NOT CALL THIS SCRIPT DIRECTLY. |
|
| 18 |
-# - The right way to call this script is to invoke "make" from |
|
| 19 |
-# your checkout of the Docker repository. |
|
| 20 |
-# the Makefile will do a "docker build -t docker ." and then |
|
| 21 |
-# "docker run hack/make.sh" in the resulting image. |
|
| 22 |
-# |
|
| 23 |
- |
|
| 24 |
-set -o pipefail |
|
| 25 |
- |
|
| 26 |
-export DOCKER_PKG='github.com/docker/docker' |
|
| 27 |
- |
|
| 28 |
-# We're a nice, sexy, little shell script, and people might try to run us; |
|
| 29 |
-# but really, they shouldn't. We want to be in a container! |
|
| 30 |
-if [ "$(pwd)" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then |
|
| 31 |
- {
|
|
| 32 |
- echo "# WARNING! I don't seem to be running in the Docker container." |
|
| 33 |
- echo "# The result of this command might be an incorrect build, and will not be" |
|
| 34 |
- echo "# officially supported." |
|
| 35 |
- echo "#" |
|
| 36 |
- echo "# Try this instead: make all" |
|
| 37 |
- echo "#" |
|
| 38 |
- } >&2 |
|
| 39 |
-fi |
|
| 40 |
- |
|
| 41 |
-echo |
|
| 42 |
- |
|
| 43 |
-# List of bundles to create when no argument is passed |
|
| 44 |
-DEFAULT_BUNDLES=( |
|
| 45 |
- validate-dco |
|
| 46 |
- validate-gofmt |
|
| 47 |
- validate-toml |
|
| 48 |
- |
|
| 49 |
- binary |
|
| 50 |
- |
|
| 51 |
- test-unit |
|
| 52 |
- test-integration-cli |
|
| 53 |
- test-docker-py |
|
| 54 |
- |
|
| 55 |
- dynbinary |
|
| 56 |
- test-integration |
|
| 57 |
- |
|
| 58 |
- cover |
|
| 59 |
- cross |
|
| 60 |
- tgz |
|
| 61 |
- ubuntu |
|
| 62 |
-) |
|
| 63 |
- |
|
| 64 |
-VERSION=$(cat ./VERSION) |
|
| 65 |
-if command -v git &> /dev/null && git rev-parse &> /dev/null; then |
|
| 66 |
- GITCOMMIT=$(git rev-parse --short HEAD) |
|
| 67 |
- if [ -n "$(git status --porcelain --untracked-files=no)" ]; then |
|
| 68 |
- GITCOMMIT="$GITCOMMIT-dirty" |
|
| 69 |
- fi |
|
| 70 |
-elif [ "$DOCKER_GITCOMMIT" ]; then |
|
| 71 |
- GITCOMMIT="$DOCKER_GITCOMMIT" |
|
| 72 |
-else |
|
| 73 |
- echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' |
|
| 74 |
- echo >&2 ' Please either build with the .git directory accessible, or specify the' |
|
| 75 |
- echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' |
|
| 76 |
- echo >&2 ' future accountability in diagnosing build issues. Thanks!' |
|
| 77 |
- exit 1 |
|
| 78 |
-fi |
|
| 79 |
- |
|
| 80 |
-if [ "$AUTO_GOPATH" ]; then |
|
| 81 |
- rm -rf .gopath |
|
| 82 |
- mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
|
|
| 83 |
- ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
|
|
| 84 |
- export GOPATH="$(pwd)/.gopath:$(pwd)/vendor" |
|
| 85 |
-fi |
|
| 86 |
- |
|
| 87 |
-if [ ! "$GOPATH" ]; then |
|
| 88 |
- echo >&2 'error: missing GOPATH; please see http://golang.org/doc/code.html#GOPATH' |
|
| 89 |
- echo >&2 ' alternatively, set AUTO_GOPATH=1' |
|
| 90 |
- exit 1 |
|
| 91 |
-fi |
|
| 92 |
- |
|
| 93 |
-if [ -z "$DOCKER_CLIENTONLY" ]; then |
|
| 94 |
- DOCKER_BUILDTAGS+=" daemon" |
|
| 95 |
-fi |
|
| 96 |
- |
|
| 97 |
-if [ "$DOCKER_EXECDRIVER" = 'lxc' ]; then |
|
| 98 |
- DOCKER_BUILDTAGS+=' test_no_exec' |
|
| 99 |
-fi |
|
| 100 |
- |
|
| 101 |
-# Use these flags when compiling the tests and final binary |
|
| 102 |
- |
|
| 103 |
-IAMSTATIC='true' |
|
| 104 |
-source "$(dirname "$BASH_SOURCE")/make/.go-autogen" |
|
| 105 |
-LDFLAGS='-w' |
|
| 106 |
- |
|
| 107 |
-LDFLAGS_STATIC='-linkmode external' |
|
| 108 |
-# Cgo -H windows is incompatible with -linkmode external. |
|
| 109 |
-if [ "$(go env GOOS)" == 'windows' ]; then |
|
| 110 |
- LDFLAGS_STATIC='' |
|
| 111 |
-fi |
|
| 112 |
-EXTLDFLAGS_STATIC='-static' |
|
| 113 |
-# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build |
|
| 114 |
-# with options like -race. |
|
| 115 |
-ORIG_BUILDFLAGS=( -a -tags "netgo static_build $DOCKER_BUILDTAGS" -installsuffix netgo ) |
|
| 116 |
-# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here |
|
| 117 |
-BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" )
|
|
| 118 |
-# Test timeout. |
|
| 119 |
-: ${TIMEOUT:=30m}
|
|
| 120 |
-TESTFLAGS+=" -test.timeout=${TIMEOUT}"
|
|
| 121 |
- |
|
| 122 |
-# A few more flags that are specific just to building a completely-static binary (see hack/make/binary) |
|
| 123 |
-# PLEASE do not use these anywhere else. |
|
| 124 |
-EXTLDFLAGS_STATIC_DOCKER="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files" |
|
| 125 |
-LDFLAGS_STATIC_DOCKER=" |
|
| 126 |
- $LDFLAGS_STATIC |
|
| 127 |
- -extldflags \"$EXTLDFLAGS_STATIC_DOCKER\" |
|
| 128 |
-" |
|
| 129 |
- |
|
| 130 |
-if [ "$(uname -s)" = 'FreeBSD' ]; then |
|
| 131 |
- # Tell cgo the compiler is Clang, not GCC |
|
| 132 |
- # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 |
|
| 133 |
- export CC=clang |
|
| 134 |
- |
|
| 135 |
- # "-extld clang" is a workaround for |
|
| 136 |
- # https://code.google.com/p/go/issues/detail?id=6845 |
|
| 137 |
- LDFLAGS="$LDFLAGS -extld clang" |
|
| 138 |
-fi |
|
| 139 |
- |
|
| 140 |
-# If sqlite3.h doesn't exist under /usr/include, |
|
| 141 |
-# check /usr/local/include also just in case |
|
| 142 |
-# (e.g. FreeBSD Ports installs it under the directory) |
|
| 143 |
-if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then |
|
| 144 |
- export CGO_CFLAGS='-I/usr/local/include' |
|
| 145 |
- export CGO_LDFLAGS='-L/usr/local/lib' |
|
| 146 |
-fi |
|
| 147 |
- |
|
| 148 |
-HAVE_GO_TEST_COVER= |
|
| 149 |
-if \ |
|
| 150 |
- go help testflag | grep -- -cover > /dev/null \ |
|
| 151 |
- && go tool -n cover > /dev/null 2>&1 \ |
|
| 152 |
-; then |
|
| 153 |
- HAVE_GO_TEST_COVER=1 |
|
| 154 |
-fi |
|
| 155 |
- |
|
| 156 |
-# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. |
|
| 157 |
-# You can use this to select certain tests to run, eg. |
|
| 158 |
-# |
|
| 159 |
-# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test |
|
| 160 |
-# |
|
| 161 |
-go_test_dir() {
|
|
| 162 |
- dir=$1 |
|
| 163 |
- coverpkg=$2 |
|
| 164 |
- testcover=() |
|
| 165 |
- if [ "$HAVE_GO_TEST_COVER" ]; then |
|
| 166 |
- # if our current go install has -cover, we want to use it :) |
|
| 167 |
- mkdir -p "$DEST/coverprofiles" |
|
| 168 |
- coverprofile="docker${dir#.}"
|
|
| 169 |
- coverprofile="$DEST/coverprofiles/${coverprofile//\//-}"
|
|
| 170 |
- testcover=( -cover -coverprofile "$coverprofile" $coverpkg ) |
|
| 171 |
- fi |
|
| 172 |
- ( |
|
| 173 |
- export DEST |
|
| 174 |
- echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}"
|
|
| 175 |
- cd "$dir" |
|
| 176 |
- test_env go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS
|
|
| 177 |
- ) |
|
| 178 |
-} |
|
| 179 |
-test_env() {
|
|
| 180 |
- # use "env -i" to tightly control the environment variables that bleed into the tests |
|
| 181 |
- env -i \ |
|
| 182 |
- DEST="$DEST" \ |
|
| 183 |
- DOCKER_EXECDRIVER="$DOCKER_EXECDRIVER" \ |
|
| 184 |
- DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \ |
|
| 185 |
- DOCKER_HOST="$DOCKER_HOST" \ |
|
| 186 |
- GOPATH="$GOPATH" \ |
|
| 187 |
- HOME="$DEST/fake-HOME" \ |
|
| 188 |
- PATH="$PATH" \ |
|
| 189 |
- TEST_DOCKERINIT_PATH="$TEST_DOCKERINIT_PATH" \ |
|
| 190 |
- "$@" |
|
| 191 |
-} |
|
| 192 |
- |
|
| 193 |
-# a helper to provide ".exe" when it's appropriate |
|
| 194 |
-binary_extension() {
|
|
| 195 |
- if [ "$(go env GOOS)" = 'windows' ]; then |
|
| 196 |
- echo -n '.exe' |
|
| 197 |
- fi |
|
| 198 |
-} |
|
| 199 |
- |
|
| 200 |
-# This helper function walks the current directory looking for directories |
|
| 201 |
-# holding certain files ($1 parameter), and prints their paths on standard |
|
| 202 |
-# output, one per line. |
|
| 203 |
-find_dirs() {
|
|
| 204 |
- find . -not \( \ |
|
| 205 |
- \( \ |
|
| 206 |
- -path './vendor/*' \ |
|
| 207 |
- -o -path './integration/*' \ |
|
| 208 |
- -o -path './integration-cli/*' \ |
|
| 209 |
- -o -path './contrib/*' \ |
|
| 210 |
- -o -path './pkg/mflag/example/*' \ |
|
| 211 |
- -o -path './.git/*' \ |
|
| 212 |
- -o -path './bundles/*' \ |
|
| 213 |
- -o -path './docs/*' \ |
|
| 214 |
- -o -path './pkg/libcontainer/nsinit/*' \ |
|
| 215 |
- \) \ |
|
| 216 |
- -prune \ |
|
| 217 |
- \) -name "$1" -print0 | xargs -0n1 dirname | sort -u |
|
| 218 |
-} |
|
| 219 |
- |
|
| 220 |
-hash_files() {
|
|
| 221 |
- while [ $# -gt 0 ]; do |
|
| 222 |
- f="$1" |
|
| 223 |
- shift |
|
| 224 |
- dir="$(dirname "$f")" |
|
| 225 |
- base="$(basename "$f")" |
|
| 226 |
- for hashAlgo in md5 sha256; do |
|
| 227 |
- if command -v "${hashAlgo}sum" &> /dev/null; then
|
|
| 228 |
- ( |
|
| 229 |
- # subshell and cd so that we get output files like: |
|
| 230 |
- # $HASH docker-$VERSION |
|
| 231 |
- # instead of: |
|
| 232 |
- # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION |
|
| 233 |
- cd "$dir" |
|
| 234 |
- "${hashAlgo}sum" "$base" > "$base.$hashAlgo"
|
|
| 235 |
- ) |
|
| 236 |
- fi |
|
| 237 |
- done |
|
| 238 |
- done |
|
| 239 |
-} |
|
| 240 |
- |
|
| 241 |
-bundle() {
|
|
| 242 |
- bundlescript=$1 |
|
| 243 |
- bundle=$(basename $bundlescript) |
|
| 244 |
- echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)" |
|
| 245 |
- mkdir -p bundles/$VERSION/$bundle |
|
| 246 |
- source "$bundlescript" "$(pwd)/bundles/$VERSION/$bundle" |
|
| 247 |
-} |
|
| 248 |
- |
|
| 249 |
-main() {
|
|
| 250 |
- # We want this to fail if the bundles already exist and cannot be removed. |
|
| 251 |
- # This is to avoid mixing bundles from different versions of the code. |
|
| 252 |
- mkdir -p bundles |
|
| 253 |
- if [ -e "bundles/$VERSION" ]; then |
|
| 254 |
- echo "bundles/$VERSION already exists. Removing." |
|
| 255 |
- rm -fr bundles/$VERSION && mkdir bundles/$VERSION || exit 1 |
|
| 256 |
- echo |
|
| 257 |
- fi |
|
| 258 |
- SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 259 |
- if [ $# -lt 1 ]; then |
|
| 260 |
- bundles=(${DEFAULT_BUNDLES[@]})
|
|
| 261 |
- else |
|
| 262 |
- bundles=($@) |
|
| 263 |
- fi |
|
| 264 |
- for bundle in ${bundles[@]}; do
|
|
| 265 |
- bundle $SCRIPTDIR/make/$bundle |
|
| 266 |
- echo |
|
| 267 |
- done |
|
| 268 |
- |
|
| 269 |
- # if we get all the way through successfully, let's delete our autogenerated code! |
|
| 270 |
- rm -r autogen |
|
| 271 |
-} |
|
| 272 |
- |
|
| 273 |
-main "$@" |
| 274 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,32 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-IAMSTATIC="true" |
|
| 5 |
-source "$(dirname "$BASH_SOURCE")/.go-autogen" |
|
| 6 |
- |
|
| 7 |
-# dockerinit still needs to be a static binary, even if docker is dynamic |
|
| 8 |
-go build \ |
|
| 9 |
- -o "$DEST/dockerinit-$VERSION" \ |
|
| 10 |
- "${BUILDFLAGS[@]}" \
|
|
| 11 |
- -ldflags " |
|
| 12 |
- $LDFLAGS |
|
| 13 |
- $LDFLAGS_STATIC |
|
| 14 |
- -extldflags \"$EXTLDFLAGS_STATIC\" |
|
| 15 |
- " \ |
|
| 16 |
- ./dockerinit |
|
| 17 |
-echo "Created binary: $DEST/dockerinit-$VERSION" |
|
| 18 |
-ln -sf "dockerinit-$VERSION" "$DEST/dockerinit" |
|
| 19 |
- |
|
| 20 |
-sha1sum= |
|
| 21 |
-if command -v sha1sum &> /dev/null; then |
|
| 22 |
- sha1sum=sha1sum |
|
| 23 |
-elif command -v shasum &> /dev/null; then |
|
| 24 |
- # Mac OS X - why couldn't they just use the same command name and be happy? |
|
| 25 |
- sha1sum=shasum |
|
| 26 |
-else |
|
| 27 |
- echo >&2 'error: cannot find sha1sum command or equivalent' |
|
| 28 |
- exit 1 |
|
| 29 |
-fi |
|
| 30 |
- |
|
| 31 |
-# sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another |
|
| 32 |
-export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" |
| 33 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,23 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-if ! docker inspect emptyfs &> /dev/null; then |
|
| 5 |
- # let's build a "docker save" tarball for "emptyfs" |
|
| 6 |
- # see https://github.com/docker/docker/pull/5262 |
|
| 7 |
- # and also https://github.com/docker/docker/issues/4242 |
|
| 8 |
- dir="$DEST/emptyfs" |
|
| 9 |
- mkdir -p "$dir" |
|
| 10 |
- ( |
|
| 11 |
- cd "$dir" |
|
| 12 |
- echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories
|
|
| 13 |
- mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 |
|
| 14 |
- ( |
|
| 15 |
- cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 |
|
| 16 |
- echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json
|
|
| 17 |
- echo '1.0' > VERSION |
|
| 18 |
- tar -cf layer.tar --files-from /dev/null |
|
| 19 |
- ) |
|
| 20 |
- ) |
|
| 21 |
- ( set -x; tar -cC "$dir" . | docker load ) |
|
| 22 |
- rm -rf "$dir" |
|
| 23 |
-fi |
| 24 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,43 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-# this list should match roughly what's in the Dockerfile (minus the explicit image IDs, of course) |
|
| 5 |
-images=( |
|
| 6 |
- busybox:latest |
|
| 7 |
- hello-world:latest |
|
| 8 |
-) |
|
| 9 |
- |
|
| 10 |
-if ! docker inspect "${images[@]}" &> /dev/null; then
|
|
| 11 |
- hardCodedDir='/docker-frozen-images' |
|
| 12 |
- if [ -d "$hardCodedDir" ]; then |
|
| 13 |
- ( set -x; tar -cC "$hardCodedDir" . | docker load ) |
|
| 14 |
- elif [ -e Dockerfile ] && command -v curl > /dev/null; then |
|
| 15 |
- # testing for "curl" because "download-frozen-image.sh" is built around curl |
|
| 16 |
- dir="$DEST/frozen-images" |
|
| 17 |
- # extract the exact "RUN download-frozen-image.sh" line from the Dockerfile itself for consistency |
|
| 18 |
- awk ' |
|
| 19 |
- $1 == "RUN" && $2 == "./contrib/download-frozen-image.sh" {
|
|
| 20 |
- for (i = 2; i < NF; i++) |
|
| 21 |
- printf ( $i == "'"$hardCodedDir"'" ? "'"$dir"'" : $i ) " "; |
|
| 22 |
- print $NF; |
|
| 23 |
- if (/\\$/) {
|
|
| 24 |
- inCont = 1; |
|
| 25 |
- next; |
|
| 26 |
- } |
|
| 27 |
- } |
|
| 28 |
- inCont {
|
|
| 29 |
- print; |
|
| 30 |
- if (!/\\$/) {
|
|
| 31 |
- inCont = 0; |
|
| 32 |
- } |
|
| 33 |
- } |
|
| 34 |
- ' Dockerfile | sh -x |
|
| 35 |
- ( set -x; tar -cC "$dir" . | docker load ) |
|
| 36 |
- else |
|
| 37 |
- for image in "${images[@]}"; do
|
|
| 38 |
- if ! docker inspect "$image" &> /dev/null; then |
|
| 39 |
- ( set -x; docker pull "$image" ) |
|
| 40 |
- fi |
|
| 41 |
- done |
|
| 42 |
- fi |
|
| 43 |
-fi |
| 44 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,15 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-# Build a Go static web server on top of busybox image |
|
| 5 |
-# and compile it for target daemon |
|
| 6 |
- |
|
| 7 |
-dir="$DEST/httpserver" |
|
| 8 |
-mkdir -p "$dir" |
|
| 9 |
-( |
|
| 10 |
- cd "$dir" |
|
| 11 |
- GOOS=linux GOARCH=amd64 go build -o httpserver github.com/docker/docker/contrib/httpserver |
|
| 12 |
- cp ../../../../contrib/httpserver/Dockerfile . |
|
| 13 |
- docker build -qt httpserver . > /dev/null |
|
| 14 |
-) |
|
| 15 |
-rm -rf "$dir" |
| 16 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,18 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
- |
|
| 3 |
-rm -rf autogen |
|
| 4 |
- |
|
| 5 |
-mkdir -p autogen/dockerversion |
|
| 6 |
-cat > autogen/dockerversion/dockerversion.go <<EOF |
|
| 7 |
-// AUTOGENERATED FILE; see $BASH_SOURCE |
|
| 8 |
-package dockerversion |
|
| 9 |
- |
|
| 10 |
-var ( |
|
| 11 |
- GITCOMMIT string = "$GITCOMMIT" |
|
| 12 |
- VERSION string = "$VERSION" |
|
| 13 |
- |
|
| 14 |
- IAMSTATIC string = "${IAMSTATIC:-true}"
|
|
| 15 |
- INITSHA1 string = "$DOCKER_INITSHA1" |
|
| 16 |
- INITPATH string = "$DOCKER_INITPATH" |
|
| 17 |
-) |
|
| 18 |
-EOF |
| 19 | 1 |
deleted file mode 100755 |
| ... | ... |
@@ -1,35 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-# Compile phase run by parallel in test-unit. No support for coverpkg |
|
| 5 |
- |
|
| 6 |
-dir=$1 |
|
| 7 |
-in_file="$dir/$(basename "$dir").test" |
|
| 8 |
-out_file="$DEST/precompiled/$dir.test" |
|
| 9 |
-# we want to use binary_extension() here, but we can't because it's in main.sh and this file gets re-execed |
|
| 10 |
-if [ "$(go env GOOS)" = 'windows' ]; then |
|
| 11 |
- in_file+='.exe' |
|
| 12 |
- out_file+='.exe' |
|
| 13 |
-fi |
|
| 14 |
-testcover=() |
|
| 15 |
-if [ "$HAVE_GO_TEST_COVER" ]; then |
|
| 16 |
- # if our current go install has -cover, we want to use it :) |
|
| 17 |
- mkdir -p "$DEST/coverprofiles" |
|
| 18 |
- coverprofile="docker${dir#.}"
|
|
| 19 |
- coverprofile="$DEST/coverprofiles/${coverprofile//\//-}"
|
|
| 20 |
- testcover=( -cover -coverprofile "$coverprofile" ) # missing $coverpkg |
|
| 21 |
-fi |
|
| 22 |
-if [ "$BUILDFLAGS_FILE" ]; then |
|
| 23 |
- readarray -t BUILDFLAGS < "$BUILDFLAGS_FILE" |
|
| 24 |
-fi |
|
| 25 |
- |
|
| 26 |
-if ! ( |
|
| 27 |
- cd "$dir" |
|
| 28 |
- go test "${testcover[@]}" -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS -c
|
|
| 29 |
-); then |
|
| 30 |
- exit 1 |
|
| 31 |
-fi |
|
| 32 |
- |
|
| 33 |
-mkdir -p "$(dirname "$out_file")" |
|
| 34 |
-mv "$in_file" "$out_file" |
|
| 35 |
-echo "Precompiled: ${DOCKER_PKG}${dir#.}"
|
| 36 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,47 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
- |
|
| 3 |
-# see test-integration-cli for example usage of this script |
|
| 4 |
- |
|
| 5 |
-export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH" |
|
| 6 |
- |
|
| 7 |
-if ! command -v docker &> /dev/null; then |
|
| 8 |
- echo >&2 'error: binary or dynbinary must be run before .integration-daemon-start' |
|
| 9 |
- false |
|
| 10 |
-fi |
|
| 11 |
- |
|
| 12 |
-# intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers |
|
| 13 |
-exec 41>&1 42>&2 |
|
| 14 |
- |
|
| 15 |
-export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs}
|
|
| 16 |
-export DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native}
|
|
| 17 |
- |
|
| 18 |
-if [ -z "$DOCKER_TEST_HOST" ]; then |
|
| 19 |
- export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one |
|
| 20 |
- ( set -x; exec \ |
|
| 21 |
- docker --daemon --debug \ |
|
| 22 |
- --host "$DOCKER_HOST" \ |
|
| 23 |
- --storage-driver "$DOCKER_GRAPHDRIVER" \ |
|
| 24 |
- --exec-driver "$DOCKER_EXECDRIVER" \ |
|
| 25 |
- --pidfile "$DEST/docker.pid" \ |
|
| 26 |
- &> "$DEST/docker.log" |
|
| 27 |
- ) & |
|
| 28 |
-else |
|
| 29 |
- export DOCKER_HOST="$DOCKER_TEST_HOST" |
|
| 30 |
-fi |
|
| 31 |
- |
|
| 32 |
-# give it a second to come up so it's "ready" |
|
| 33 |
-tries=10 |
|
| 34 |
-while ! docker version &> /dev/null; do |
|
| 35 |
- (( tries-- )) |
|
| 36 |
- if [ $tries -le 0 ]; then |
|
| 37 |
- if [ -z "$DOCKER_HOST" ]; then |
|
| 38 |
- echo >&2 "error: daemon failed to start" |
|
| 39 |
- echo >&2 " check $DEST/docker.log for details" |
|
| 40 |
- else |
|
| 41 |
- echo >&2 "error: daemon at $DOCKER_HOST fails to 'docker version':" |
|
| 42 |
- docker version >&2 || true |
|
| 43 |
- fi |
|
| 44 |
- false |
|
| 45 |
- fi |
|
| 46 |
- sleep 2 |
|
| 47 |
-done |
| 10 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,33 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
- |
|
| 3 |
-if [ -z "$VALIDATE_UPSTREAM" ]; then |
|
| 4 |
- # this is kind of an expensive check, so let's not do this twice if we |
|
| 5 |
- # are running more than one validate bundlescript |
|
| 6 |
- |
|
| 7 |
- VALIDATE_REPO='https://github.com/docker/docker.git' |
|
| 8 |
- VALIDATE_BRANCH='master' |
|
| 9 |
- |
|
| 10 |
- if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then |
|
| 11 |
- VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git"
|
|
| 12 |
- VALIDATE_BRANCH="${TRAVIS_BRANCH}"
|
|
| 13 |
- fi |
|
| 14 |
- |
|
| 15 |
- VALIDATE_HEAD="$(git rev-parse --verify HEAD)" |
|
| 16 |
- |
|
| 17 |
- git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" |
|
| 18 |
- VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" |
|
| 19 |
- |
|
| 20 |
- VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" |
|
| 21 |
- VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" |
|
| 22 |
- |
|
| 23 |
- validate_diff() {
|
|
| 24 |
- if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then |
|
| 25 |
- git diff "$VALIDATE_COMMIT_DIFF" "$@" |
|
| 26 |
- fi |
|
| 27 |
- } |
|
| 28 |
- validate_log() {
|
|
| 29 |
- if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then |
|
| 30 |
- git log "$VALIDATE_COMMIT_LOG" "$@" |
|
| 31 |
- fi |
|
| 32 |
- } |
|
| 33 |
-fi |
| 34 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,17 +0,0 @@ |
| 1 |
-This directory holds scripts called by `make.sh` in the parent directory. |
|
| 2 |
- |
|
| 3 |
-Each script is named after the bundle it creates. |
|
| 4 |
-They should not be called directly - instead, pass it as argument to make.sh, for example: |
|
| 5 |
- |
|
| 6 |
-``` |
|
| 7 |
-./project/make.sh test |
|
| 8 |
-./project/make.sh binary ubuntu |
|
| 9 |
- |
|
| 10 |
-# Or to run all bundles: |
|
| 11 |
-./project/make.sh |
|
| 12 |
-``` |
|
| 13 |
- |
|
| 14 |
-To add a bundle: |
|
| 15 |
- |
|
| 16 |
-* Create a shell-compatible file here |
|
| 17 |
-* Add it to $DEFAULT_BUNDLES in make.sh |
| 18 | 1 |
deleted file mode 100755 |
| ... | ... |
@@ -1,27 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-DEST=$1 |
|
| 5 |
-BINARY_NAME="docker-$VERSION" |
|
| 6 |
-BINARY_EXTENSION="$(binary_extension)" |
|
| 7 |
-BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" |
|
| 8 |
- |
|
| 9 |
-# Cygdrive paths don't play well with go build -o. |
|
| 10 |
-if [[ "$(uname -s)" == CYGWIN* ]]; then |
|
| 11 |
- DEST=$(cygpath -mw $DEST) |
|
| 12 |
-fi |
|
| 13 |
- |
|
| 14 |
-source "$(dirname "$BASH_SOURCE")/.go-autogen" |
|
| 15 |
- |
|
| 16 |
-go build \ |
|
| 17 |
- -o "$DEST/$BINARY_FULLNAME" \ |
|
| 18 |
- "${BUILDFLAGS[@]}" \
|
|
| 19 |
- -ldflags " |
|
| 20 |
- $LDFLAGS |
|
| 21 |
- $LDFLAGS_STATIC_DOCKER |
|
| 22 |
- " \ |
|
| 23 |
- ./docker |
|
| 24 |
-echo "Created binary: $DEST/$BINARY_FULLNAME" |
|
| 25 |
-ln -sf "$BINARY_FULLNAME" "$DEST/docker$BINARY_EXTENSION" |
|
| 26 |
- |
|
| 27 |
-hash_files "$DEST/$BINARY_FULLNAME" |
| 28 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,22 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-DEST="$1" |
|
| 5 |
- |
|
| 6 |
-bundle_cover() {
|
|
| 7 |
- coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) |
|
| 8 |
- for p in "${coverprofiles[@]}"; do
|
|
| 9 |
- echo |
|
| 10 |
- ( |
|
| 11 |
- set -x |
|
| 12 |
- go tool cover -func="$p" |
|
| 13 |
- ) |
|
| 14 |
- done |
|
| 15 |
-} |
|
| 16 |
- |
|
| 17 |
-if [ "$HAVE_GO_TEST_COVER" ]; then |
|
| 18 |
- bundle_cover 2>&1 | tee "$DEST/report.log" |
|
| 19 |
-else |
|
| 20 |
- echo >&2 'warning: the current version of go does not support -cover' |
|
| 21 |
- echo >&2 ' skipping test coverage report' |
|
| 22 |
-fi |
| 23 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,33 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-DEST=$1 |
|
| 5 |
- |
|
| 6 |
-# explicit list of os/arch combos that support being a daemon |
|
| 7 |
-declare -A daemonSupporting |
|
| 8 |
-daemonSupporting=( |
|
| 9 |
- [linux/amd64]=1 |
|
| 10 |
-) |
|
| 11 |
- |
|
| 12 |
-# if we have our linux/amd64 version compiled, let's symlink it in |
|
| 13 |
-if [ -x "$DEST/../binary/docker-$VERSION" ]; then |
|
| 14 |
- mkdir -p "$DEST/linux/amd64" |
|
| 15 |
- ( |
|
| 16 |
- cd "$DEST/linux/amd64" |
|
| 17 |
- ln -s ../../../binary/* ./ |
|
| 18 |
- ) |
|
| 19 |
- echo "Created symlinks:" "$DEST/linux/amd64/"* |
|
| 20 |
-fi |
|
| 21 |
- |
|
| 22 |
-for platform in $DOCKER_CROSSPLATFORMS; do |
|
| 23 |
- ( |
|
| 24 |
- mkdir -p "$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION |
|
| 25 |
- export GOOS=${platform%/*}
|
|
| 26 |
- export GOARCH=${platform##*/}
|
|
| 27 |
- if [ -z "${daemonSupporting[$platform]}" ]; then
|
|
| 28 |
- export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms |
|
| 29 |
- export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported
|
|
| 30 |
- fi |
|
| 31 |
- source "$(dirname "$BASH_SOURCE")/binary" "$DEST/$platform" |
|
| 32 |
- ) |
|
| 33 |
-done |
| 34 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,22 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-DEST=$1 |
|
| 5 |
- |
|
| 6 |
-if [ -z "$DOCKER_CLIENTONLY" ]; then |
|
| 7 |
- source "$(dirname "$BASH_SOURCE")/.dockerinit" |
|
| 8 |
- |
|
| 9 |
- hash_files "$DEST/dockerinit-$VERSION" |
|
| 10 |
-else |
|
| 11 |
- # DOCKER_CLIENTONLY must be truthy, so we don't need to bother with dockerinit :) |
|
| 12 |
- export DOCKER_INITSHA1="" |
|
| 13 |
-fi |
|
| 14 |
-# DOCKER_INITSHA1 is exported so that other bundlescripts can easily access it later without recalculating it |
|
| 15 |
- |
|
| 16 |
-( |
|
| 17 |
- export IAMSTATIC="false" |
|
| 18 |
- export LDFLAGS_STATIC_DOCKER='' |
|
| 19 |
- export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary
|
|
| 20 |
- export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here
|
|
| 21 |
- source "$(dirname "$BASH_SOURCE")/binary" |
|
| 22 |
-) |
| 23 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,30 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-DEST=$1 |
|
| 5 |
- |
|
| 6 |
-# subshell so that we can export PATH without breaking other things |
|
| 7 |
-( |
|
| 8 |
- source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" |
|
| 9 |
- |
|
| 10 |
- # we need to wrap up everything in between integration-daemon-start and |
|
| 11 |
- # integration-daemon-stop to make sure we kill the daemon and don't hang, |
|
| 12 |
- # even and especially on test failures |
|
| 13 |
- didFail= |
|
| 14 |
- if ! {
|
|
| 15 |
- dockerPy='/docker-py' |
|
| 16 |
- [ -d "$dockerPy" ] || {
|
|
| 17 |
- dockerPy="$DEST/docker-py" |
|
| 18 |
- git clone https://github.com/docker/docker-py.git "$dockerPy" |
|
| 19 |
- } |
|
| 20 |
- |
|
| 21 |
- # exporting PYTHONPATH to import "docker" from our local docker-py |
|
| 22 |
- test_env PYTHONPATH="$dockerPy" python "$dockerPy/tests/integration_test.py" |
|
| 23 |
- }; then |
|
| 24 |
- didFail=1 |
|
| 25 |
- fi |
|
| 26 |
- |
|
| 27 |
- source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" |
|
| 28 |
- |
|
| 29 |
- [ -z "$didFail" ] # "set -e" ftw |
|
| 30 |
-) 2>&1 | tee -a $DEST/test.log |
| 31 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,25 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-DEST=$1 |
|
| 5 |
- |
|
| 6 |
-INIT=$DEST/../dynbinary/dockerinit-$VERSION |
|
| 7 |
-[ -x "$INIT" ] || {
|
|
| 8 |
- source "$(dirname "$BASH_SOURCE")/.dockerinit" |
|
| 9 |
- INIT="$DEST/dockerinit" |
|
| 10 |
-} |
|
| 11 |
-export TEST_DOCKERINIT_PATH="$INIT" |
|
| 12 |
- |
|
| 13 |
-bundle_test_integration() {
|
|
| 14 |
- LDFLAGS=" |
|
| 15 |
- $LDFLAGS |
|
| 16 |
- -X $DOCKER_PKG/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" |
|
| 17 |
- " go_test_dir ./integration \ |
|
| 18 |
- "-coverpkg $(find_dirs '*.go' | sed 's,^\.,'$DOCKER_PKG',g' | paste -d, -s)" |
|
| 19 |
-} |
|
| 20 |
- |
|
| 21 |
-# this "grep" hides some really irritating warnings that "go test -coverpkg" |
|
| 22 |
-# spews when it is given packages that aren't used |
|
| 23 |
-bundle_test_integration 2>&1 \ |
|
| 24 |
- | grep --line-buffered -v '^warning: no packages being tested depend on ' \ |
|
| 25 |
- | tee -a $DEST/test.log |
| 26 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,31 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-DEST=$1 |
|
| 5 |
- |
|
| 6 |
-bundle_test_integration_cli() {
|
|
| 7 |
- go_test_dir ./integration-cli |
|
| 8 |
-} |
|
| 9 |
- |
|
| 10 |
-# subshell so that we can export PATH without breaking other things |
|
| 11 |
-( |
|
| 12 |
- source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" |
|
| 13 |
- |
|
| 14 |
- # we need to wrap up everything in between integration-daemon-start and |
|
| 15 |
- # integration-daemon-stop to make sure we kill the daemon and don't hang, |
|
| 16 |
- # even and especially on test failures |
|
| 17 |
- didFail= |
|
| 18 |
- if ! {
|
|
| 19 |
- source "$(dirname "$BASH_SOURCE")/.ensure-frozen-images" |
|
| 20 |
- source "$(dirname "$BASH_SOURCE")/.ensure-httpserver" |
|
| 21 |
- source "$(dirname "$BASH_SOURCE")/.ensure-emptyfs" |
|
| 22 |
- |
|
| 23 |
- bundle_test_integration_cli |
|
| 24 |
- }; then |
|
| 25 |
- didFail=1 |
|
| 26 |
- fi |
|
| 27 |
- |
|
| 28 |
- source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" |
|
| 29 |
- |
|
| 30 |
- [ -z "$didFail" ] # "set -e" ftw |
|
| 31 |
-) 2>&1 | tee -a $DEST/test.log |
| 32 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,88 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-DEST=$1 |
|
| 5 |
-: ${PARALLEL_JOBS:=$(nproc 2>/dev/null || echo 1)} # if nproc fails (usually because we don't have it), let's not parallelize by default
|
|
| 6 |
- |
|
| 7 |
-RED=$'\033[31m' |
|
| 8 |
-GREEN=$'\033[32m' |
|
| 9 |
-TEXTRESET=$'\033[0m' # reset the foreground colour |
|
| 10 |
- |
|
| 11 |
-# Run Docker's test suite, including sub-packages, and store their output as a bundle |
|
| 12 |
-# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. |
|
| 13 |
-# You can use this to select certain tests to run, eg. |
|
| 14 |
-# |
|
| 15 |
-# TESTFLAGS='-run ^TestBuild$' ./hack/make.sh test-unit |
|
| 16 |
-# |
|
| 17 |
-bundle_test_unit() {
|
|
| 18 |
- {
|
|
| 19 |
- date |
|
| 20 |
- |
|
| 21 |
- # Run all the tests if no TESTDIRS were specified. |
|
| 22 |
- if [ -z "$TESTDIRS" ]; then |
|
| 23 |
- TESTDIRS=$(find_dirs '*_test.go') |
|
| 24 |
- fi |
|
| 25 |
- ( |
|
| 26 |
- export LDFLAGS |
|
| 27 |
- export TESTFLAGS |
|
| 28 |
- export HAVE_GO_TEST_COVER |
|
| 29 |
- export DEST |
|
| 30 |
- |
|
| 31 |
- # some hack to export array variables |
|
| 32 |
- export BUILDFLAGS_FILE="buildflags_file" |
|
| 33 |
- ( IFS=$'\n'; echo "${BUILDFLAGS[*]}" ) > "$BUILDFLAGS_FILE"
|
|
| 34 |
- |
|
| 35 |
- if command -v parallel &> /dev/null; then |
|
| 36 |
- # accomodate parallel to be able to access variables |
|
| 37 |
- export SHELL="$BASH" |
|
| 38 |
- export HOME="$(mktemp -d)" |
|
| 39 |
- mkdir -p "$HOME/.parallel" |
|
| 40 |
- touch "$HOME/.parallel/ignored_vars" |
|
| 41 |
- |
|
| 42 |
- echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --env _ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" |
|
| 43 |
- rm -rf "$HOME" |
|
| 44 |
- else |
|
| 45 |
- # aww, no "parallel" available - fall back to boring |
|
| 46 |
- for test_dir in $TESTDIRS; do |
|
| 47 |
- "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" || true |
|
| 48 |
- # don't let one directory that fails to build tank _all_ our tests! |
|
| 49 |
- done |
|
| 50 |
- fi |
|
| 51 |
- rm -f "$BUILDFLAGS_FILE" |
|
| 52 |
- ) |
|
| 53 |
- echo "$TESTDIRS" | go_run_test_dir |
|
| 54 |
- } |
|
| 55 |
-} |
|
| 56 |
- |
|
| 57 |
-go_run_test_dir() {
|
|
| 58 |
- TESTS_FAILED=() |
|
| 59 |
- while read dir; do |
|
| 60 |
- echo |
|
| 61 |
- echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}"
|
|
| 62 |
- precompiled="$DEST/precompiled/$dir.test$(binary_extension)" |
|
| 63 |
- if ! ( cd "$dir" && test_env "$precompiled" $TESTFLAGS ); then |
|
| 64 |
- TESTS_FAILED+=("$dir")
|
|
| 65 |
- echo |
|
| 66 |
- echo "${RED}Tests failed: $dir${TEXTRESET}"
|
|
| 67 |
- sleep 1 # give it a second, so observers watching can take note |
|
| 68 |
- fi |
|
| 69 |
- done |
|
| 70 |
- |
|
| 71 |
- echo |
|
| 72 |
- echo |
|
| 73 |
- echo |
|
| 74 |
- |
|
| 75 |
- # if some tests fail, we want the bundlescript to fail, but we want to |
|
| 76 |
- # try running ALL the tests first, hence TESTS_FAILED |
|
| 77 |
- if [ "${#TESTS_FAILED[@]}" -gt 0 ]; then
|
|
| 78 |
- echo "${RED}Test failures in: ${TESTS_FAILED[@]}${TEXTRESET}"
|
|
| 79 |
- echo |
|
| 80 |
- false |
|
| 81 |
- else |
|
| 82 |
- echo "${GREEN}Test success${TEXTRESET}"
|
|
| 83 |
- echo |
|
| 84 |
- true |
|
| 85 |
- fi |
|
| 86 |
-} |
|
| 87 |
- |
|
| 88 |
-bundle_test_unit 2>&1 | tee -a $DEST/test.log |
| 89 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,34 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
- |
|
| 3 |
-DEST="$1" |
|
| 4 |
-CROSS="$DEST/../cross" |
|
| 5 |
- |
|
| 6 |
-set -e |
|
| 7 |
- |
|
| 8 |
-if [ ! -d "$CROSS/linux/amd64" ]; then |
|
| 9 |
- echo >&2 'error: binary and cross must be run before tgz' |
|
| 10 |
- false |
|
| 11 |
-fi |
|
| 12 |
- |
|
| 13 |
-for d in "$CROSS/"*/*; do |
|
| 14 |
- GOARCH="$(basename "$d")" |
|
| 15 |
- GOOS="$(basename "$(dirname "$d")")" |
|
| 16 |
- BINARY_NAME="docker-$VERSION" |
|
| 17 |
- BINARY_EXTENSION="$(export GOOS && binary_extension)" |
|
| 18 |
- BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" |
|
| 19 |
- mkdir -p "$DEST/$GOOS/$GOARCH" |
|
| 20 |
- TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME.tgz" |
|
| 21 |
- |
|
| 22 |
- mkdir -p "$DEST/build" |
|
| 23 |
- |
|
| 24 |
- mkdir -p "$DEST/build/usr/local/bin" |
|
| 25 |
- cp -L "$d/$BINARY_FULLNAME" "$DEST/build/usr/local/bin/docker$BINARY_EXTENSION" |
|
| 26 |
- |
|
| 27 |
- tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr |
|
| 28 |
- |
|
| 29 |
- hash_files "$TGZ" |
|
| 30 |
- |
|
| 31 |
- rm -rf "$DEST/build" |
|
| 32 |
- |
|
| 33 |
- echo "Created tgz: $TGZ" |
|
| 34 |
-done |
| 35 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,191 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
- |
|
| 3 |
-DEST=$1 |
|
| 4 |
- |
|
| 5 |
-PKGVERSION="${VERSION//-/'~'}"
|
|
| 6 |
-# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better |
|
| 7 |
-if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then |
|
| 8 |
- GIT_UNIX="$(git log -1 --pretty='%at')" |
|
| 9 |
- GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" |
|
| 10 |
- GIT_COMMIT="$(git log -1 --pretty='%h')" |
|
| 11 |
- GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}"
|
|
| 12 |
- # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' |
|
| 13 |
- PKGVERSION="$PKGVERSION~$GIT_VERSION" |
|
| 14 |
-fi |
|
| 15 |
- |
|
| 16 |
-# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false |
|
| 17 |
-# true |
|
| 18 |
-# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false |
|
| 19 |
-# true |
|
| 20 |
-# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false |
|
| 21 |
-# true |
|
| 22 |
- |
|
| 23 |
-# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a |
|
| 24 |
- |
|
| 25 |
-PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" |
|
| 26 |
-PACKAGE_URL="http://www.docker.com/" |
|
| 27 |
-PACKAGE_MAINTAINER="support@docker.com" |
|
| 28 |
-PACKAGE_DESCRIPTION="Linux container runtime |
|
| 29 |
-Docker complements LXC with a high-level API which operates at the process |
|
| 30 |
-level. It runs unix processes with strong guarantees of isolation and |
|
| 31 |
-repeatability across servers. |
|
| 32 |
-Docker is a great building block for automating distributed systems: |
|
| 33 |
-large-scale web deployments, database clusters, continuous deployment systems, |
|
| 34 |
-private PaaS, service-oriented architectures, etc." |
|
| 35 |
-PACKAGE_LICENSE="Apache-2.0" |
|
| 36 |
- |
|
| 37 |
-# Build docker as an ubuntu package using FPM and REPREPRO (sue me). |
|
| 38 |
-# bundle_binary must be called first. |
|
| 39 |
-bundle_ubuntu() {
|
|
| 40 |
- DIR=$DEST/build |
|
| 41 |
- |
|
| 42 |
- # Include our udev rules |
|
| 43 |
- mkdir -p $DIR/etc/udev/rules.d |
|
| 44 |
- cp contrib/udev/80-docker.rules $DIR/etc/udev/rules.d/ |
|
| 45 |
- |
|
| 46 |
- # Include our init scripts |
|
| 47 |
- mkdir -p $DIR/etc/init |
|
| 48 |
- cp contrib/init/upstart/docker.conf $DIR/etc/init/ |
|
| 49 |
- mkdir -p $DIR/etc/init.d |
|
| 50 |
- cp contrib/init/sysvinit-debian/docker $DIR/etc/init.d/ |
|
| 51 |
- mkdir -p $DIR/etc/default |
|
| 52 |
- cp contrib/init/sysvinit-debian/docker.default $DIR/etc/default/docker |
|
| 53 |
- mkdir -p $DIR/lib/systemd/system |
|
| 54 |
- cp contrib/init/systemd/docker.{service,socket} $DIR/lib/systemd/system/
|
|
| 55 |
- |
|
| 56 |
- # Include contributed completions |
|
| 57 |
- mkdir -p $DIR/etc/bash_completion.d |
|
| 58 |
- cp contrib/completion/bash/docker $DIR/etc/bash_completion.d/ |
|
| 59 |
- mkdir -p $DIR/usr/share/zsh/vendor-completions |
|
| 60 |
- cp contrib/completion/zsh/_docker $DIR/usr/share/zsh/vendor-completions/ |
|
| 61 |
- mkdir -p $DIR/etc/fish/completions |
|
| 62 |
- cp contrib/completion/fish/docker.fish $DIR/etc/fish/completions/ |
|
| 63 |
- |
|
| 64 |
- # Include contributed man pages |
|
| 65 |
- docs/man/md2man-all.sh -q |
|
| 66 |
- manRoot="$DIR/usr/share/man" |
|
| 67 |
- mkdir -p "$manRoot" |
|
| 68 |
- for manDir in docs/man/man?; do |
|
| 69 |
- manBase="$(basename "$manDir")" # "man1" |
|
| 70 |
- for manFile in "$manDir"/*; do |
|
| 71 |
- manName="$(basename "$manFile")" # "docker-build.1" |
|
| 72 |
- mkdir -p "$manRoot/$manBase" |
|
| 73 |
- gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" |
|
| 74 |
- done |
|
| 75 |
- done |
|
| 76 |
- |
|
| 77 |
- # Copy the binary |
|
| 78 |
- # This will fail if the binary bundle hasn't been built |
|
| 79 |
- mkdir -p $DIR/usr/bin |
|
| 80 |
- cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker |
|
| 81 |
- |
|
| 82 |
- # Generate postinst/prerm/postrm scripts |
|
| 83 |
- cat > $DEST/postinst <<'EOF' |
|
| 84 |
-#!/bin/sh |
|
| 85 |
-set -e |
|
| 86 |
-set -u |
|
| 87 |
- |
|
| 88 |
-if [ "$1" = 'configure' ] && [ -z "$2" ]; then |
|
| 89 |
- if ! getent group docker > /dev/null; then |
|
| 90 |
- groupadd --system docker |
|
| 91 |
- fi |
|
| 92 |
-fi |
|
| 93 |
- |
|
| 94 |
-if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then
|
|
| 95 |
- # we only need to do this if upstart isn't in charge |
|
| 96 |
- update-rc.d docker defaults > /dev/null || true |
|
| 97 |
-fi |
|
| 98 |
-if [ -n "$2" ]; then |
|
| 99 |
- _dh_action=restart |
|
| 100 |
-else |
|
| 101 |
- _dh_action=start |
|
| 102 |
-fi |
|
| 103 |
-service docker $_dh_action 2>/dev/null || true |
|
| 104 |
- |
|
| 105 |
-#DEBHELPER# |
|
| 106 |
-EOF |
|
| 107 |
- cat > $DEST/prerm <<'EOF' |
|
| 108 |
-#!/bin/sh |
|
| 109 |
-set -e |
|
| 110 |
-set -u |
|
| 111 |
- |
|
| 112 |
-service docker stop 2>/dev/null || true |
|
| 113 |
- |
|
| 114 |
-#DEBHELPER# |
|
| 115 |
-EOF |
|
| 116 |
- cat > $DEST/postrm <<'EOF' |
|
| 117 |
-#!/bin/sh |
|
| 118 |
-set -e |
|
| 119 |
-set -u |
|
| 120 |
- |
|
| 121 |
-if [ "$1" = "purge" ] ; then |
|
| 122 |
- update-rc.d docker remove > /dev/null || true |
|
| 123 |
-fi |
|
| 124 |
- |
|
| 125 |
-# In case this system is running systemd, we make systemd reload the unit files |
|
| 126 |
-# to pick up changes. |
|
| 127 |
-if [ -d /run/systemd/system ] ; then |
|
| 128 |
- systemctl --system daemon-reload > /dev/null || true |
|
| 129 |
-fi |
|
| 130 |
- |
|
| 131 |
-#DEBHELPER# |
|
| 132 |
-EOF |
|
| 133 |
- # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way |
|
| 134 |
- chmod +x $DEST/postinst $DEST/prerm $DEST/postrm |
|
| 135 |
- |
|
| 136 |
- ( |
|
| 137 |
- # switch directories so we create *.deb in the right folder |
|
| 138 |
- cd $DEST |
|
| 139 |
- |
|
| 140 |
- # create lxc-docker-VERSION package |
|
| 141 |
- fpm -s dir -C $DIR \ |
|
| 142 |
- --name lxc-docker-$VERSION --version "$PKGVERSION" \ |
|
| 143 |
- --after-install $DEST/postinst \ |
|
| 144 |
- --before-remove $DEST/prerm \ |
|
| 145 |
- --after-remove $DEST/postrm \ |
|
| 146 |
- --architecture "$PACKAGE_ARCHITECTURE" \ |
|
| 147 |
- --prefix / \ |
|
| 148 |
- --depends iptables \ |
|
| 149 |
- --deb-recommends aufs-tools \ |
|
| 150 |
- --deb-recommends ca-certificates \ |
|
| 151 |
- --deb-recommends git \ |
|
| 152 |
- --deb-recommends xz-utils \ |
|
| 153 |
- --deb-recommends 'cgroupfs-mount | cgroup-lite' \ |
|
| 154 |
- --description "$PACKAGE_DESCRIPTION" \ |
|
| 155 |
- --maintainer "$PACKAGE_MAINTAINER" \ |
|
| 156 |
- --conflicts docker \ |
|
| 157 |
- --conflicts docker.io \ |
|
| 158 |
- --conflicts lxc-docker-virtual-package \ |
|
| 159 |
- --provides lxc-docker \ |
|
| 160 |
- --provides lxc-docker-virtual-package \ |
|
| 161 |
- --replaces lxc-docker \ |
|
| 162 |
- --replaces lxc-docker-virtual-package \ |
|
| 163 |
- --url "$PACKAGE_URL" \ |
|
| 164 |
- --license "$PACKAGE_LICENSE" \ |
|
| 165 |
- --config-files /etc/udev/rules.d/80-docker.rules \ |
|
| 166 |
- --config-files /etc/init/docker.conf \ |
|
| 167 |
- --config-files /etc/init.d/docker \ |
|
| 168 |
- --config-files /etc/default/docker \ |
|
| 169 |
- --deb-compression gz \ |
|
| 170 |
- -t deb . |
|
| 171 |
- # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available |
|
| 172 |
- |
|
| 173 |
- # create empty lxc-docker wrapper package |
|
| 174 |
- fpm -s empty \ |
|
| 175 |
- --name lxc-docker --version "$PKGVERSION" \ |
|
| 176 |
- --architecture "$PACKAGE_ARCHITECTURE" \ |
|
| 177 |
- --depends lxc-docker-$VERSION \ |
|
| 178 |
- --description "$PACKAGE_DESCRIPTION" \ |
|
| 179 |
- --maintainer "$PACKAGE_MAINTAINER" \ |
|
| 180 |
- --url "$PACKAGE_URL" \ |
|
| 181 |
- --license "$PACKAGE_LICENSE" \ |
|
| 182 |
- --deb-compression gz \ |
|
| 183 |
- -t deb |
|
| 184 |
- ) |
|
| 185 |
- |
|
| 186 |
- # clean up after ourselves so we have a clean output directory |
|
| 187 |
- rm $DEST/postinst $DEST/prerm $DEST/postrm |
|
| 188 |
- rm -r $DIR |
|
| 189 |
-} |
|
| 190 |
- |
|
| 191 |
-bundle_ubuntu |
| 192 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,54 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
- |
|
| 3 |
-source "$(dirname "$BASH_SOURCE")/.validate" |
|
| 4 |
- |
|
| 5 |
-adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }')
|
|
| 6 |
-dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }')
|
|
| 7 |
-#notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')"
|
|
| 8 |
- |
|
| 9 |
-: ${adds:=0}
|
|
| 10 |
-: ${dels:=0}
|
|
| 11 |
- |
|
| 12 |
-# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" |
|
| 13 |
-githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' |
|
| 14 |
- |
|
| 15 |
-# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work |
|
| 16 |
-dcoPrefix='Signed-off-by:' |
|
| 17 |
-dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" |
|
| 18 |
- |
|
| 19 |
-check_dco() {
|
|
| 20 |
- grep -qE "$dcoRegex" |
|
| 21 |
-} |
|
| 22 |
- |
|
| 23 |
-if [ $adds -eq 0 -a $dels -eq 0 ]; then |
|
| 24 |
- echo '0 adds, 0 deletions; nothing to validate! :)' |
|
| 25 |
-else |
|
| 26 |
- commits=( $(validate_log --format='format:%H%n') ) |
|
| 27 |
- badCommits=() |
|
| 28 |
- for commit in "${commits[@]}"; do
|
|
| 29 |
- if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then |
|
| 30 |
- # no content (ie, Merge commit, etc) |
|
| 31 |
- continue |
|
| 32 |
- fi |
|
| 33 |
- if ! git log -1 --format='format:%B' "$commit" | check_dco; then |
|
| 34 |
- badCommits+=( "$commit" ) |
|
| 35 |
- fi |
|
| 36 |
- done |
|
| 37 |
- if [ ${#badCommits[@]} -eq 0 ]; then
|
|
| 38 |
- echo "Congratulations! All commits are properly signed with the DCO!" |
|
| 39 |
- else |
|
| 40 |
- {
|
|
| 41 |
- echo "These commits do not have a proper '$dcoPrefix' marker:" |
|
| 42 |
- for commit in "${badCommits[@]}"; do
|
|
| 43 |
- echo " - $commit" |
|
| 44 |
- done |
|
| 45 |
- echo |
|
| 46 |
- echo 'Please amend each commit to include a properly formatted DCO marker.' |
|
| 47 |
- echo |
|
| 48 |
- echo 'Visit the following URL for information about the Docker DCO:' |
|
| 49 |
- echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' |
|
| 50 |
- echo |
|
| 51 |
- } >&2 |
|
| 52 |
- false |
|
| 53 |
- fi |
|
| 54 |
-fi |
| 55 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,30 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
- |
|
| 3 |
-source "$(dirname "$BASH_SOURCE")/.validate" |
|
| 4 |
- |
|
| 5 |
-IFS=$'\n' |
|
| 6 |
-files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) |
|
| 7 |
-unset IFS |
|
| 8 |
- |
|
| 9 |
-badFiles=() |
|
| 10 |
-for f in "${files[@]}"; do
|
|
| 11 |
- # we use "git show" here to validate that what's committed is formatted |
|
| 12 |
- if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then |
|
| 13 |
- badFiles+=( "$f" ) |
|
| 14 |
- fi |
|
| 15 |
-done |
|
| 16 |
- |
|
| 17 |
-if [ ${#badFiles[@]} -eq 0 ]; then
|
|
| 18 |
- echo 'Congratulations! All Go source files are properly formatted.' |
|
| 19 |
-else |
|
| 20 |
- {
|
|
| 21 |
- echo "These files are not properly gofmt'd:" |
|
| 22 |
- for f in "${badFiles[@]}"; do
|
|
| 23 |
- echo " - $f" |
|
| 24 |
- done |
|
| 25 |
- echo |
|
| 26 |
- echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' |
|
| 27 |
- echo |
|
| 28 |
- } >&2 |
|
| 29 |
- false |
|
| 30 |
-fi |
| 31 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,30 +0,0 @@ |
| 1 |
-#!/bin/bash |
|
| 2 |
- |
|
| 3 |
-source "$(dirname "$BASH_SOURCE")/.validate" |
|
| 4 |
- |
|
| 5 |
-IFS=$'\n' |
|
| 6 |
-files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) |
|
| 7 |
-unset IFS |
|
| 8 |
- |
|
| 9 |
-badFiles=() |
|
| 10 |
-for f in "${files[@]}"; do
|
|
| 11 |
- # we use "git show" here to validate that what's committed is formatted |
|
| 12 |
- if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then |
|
| 13 |
- badFiles+=( "$f" ) |
|
| 14 |
- fi |
|
| 15 |
-done |
|
| 16 |
- |
|
| 17 |
-if [ ${#badFiles[@]} -eq 0 ]; then
|
|
| 18 |
- echo 'Congratulations! All toml source files changed here have valid syntax.' |
|
| 19 |
-else |
|
| 20 |
- {
|
|
| 21 |
- echo "These files are not valid toml:" |
|
| 22 |
- for f in "${badFiles[@]}"; do
|
|
| 23 |
- echo " - $f" |
|
| 24 |
- done |
|
| 25 |
- echo |
|
| 26 |
- echo 'Please reformat the above files as valid toml' |
|
| 27 |
- echo |
|
| 28 |
- } >&2 |
|
| 29 |
- false |
|
| 30 |
-fi |
| 31 | 1 |
deleted file mode 100755 |
| ... | ... |
@@ -1,389 +0,0 @@ |
| 1 |
-#!/usr/bin/env bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-# This script looks for bundles built by make.sh, and releases them on a |
|
| 5 |
-# public S3 bucket. |
|
| 6 |
-# |
|
| 7 |
-# Bundles should be available for the VERSION string passed as argument. |
|
| 8 |
-# |
|
| 9 |
-# The correct way to call this script is inside a container built by the |
|
| 10 |
-# official Dockerfile at the root of the Docker source code. The Dockerfile, |
|
| 11 |
-# make.sh and release.sh should all be from the same source code revision. |
|
| 12 |
- |
|
| 13 |
-set -o pipefail |
|
| 14 |
- |
|
| 15 |
-# Print a usage message and exit. |
|
| 16 |
-usage() {
|
|
| 17 |
- cat >&2 <<'EOF' |
|
| 18 |
-To run, I need: |
|
| 19 |
-- to be in a container generated by the Dockerfile at the top of the Docker |
|
| 20 |
- repository; |
|
| 21 |
-- to be provided with the name of an S3 bucket, in environment variable |
|
| 22 |
- AWS_S3_BUCKET; |
|
| 23 |
-- to be provided with AWS credentials for this S3 bucket, in environment |
|
| 24 |
- variables AWS_ACCESS_KEY and AWS_SECRET_KEY; |
|
| 25 |
-- the passphrase to unlock the GPG key which will sign the deb packages |
|
| 26 |
- (passed as environment variable GPG_PASSPHRASE); |
|
| 27 |
-- a generous amount of good will and nice manners. |
|
| 28 |
-The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" |
|
| 29 |
- |
|
| 30 |
-docker run -e AWS_S3_BUCKET=test.docker.com \ |
|
| 31 |
- -e AWS_ACCESS_KEY=... \ |
|
| 32 |
- -e AWS_SECRET_KEY=... \ |
|
| 33 |
- -e GPG_PASSPHRASE=... \ |
|
| 34 |
- -i -t --privileged \ |
|
| 35 |
- docker ./hack/release.sh |
|
| 36 |
-EOF |
|
| 37 |
- exit 1 |
|
| 38 |
-} |
|
| 39 |
- |
|
| 40 |
-[ "$AWS_S3_BUCKET" ] || usage |
|
| 41 |
-[ "$AWS_ACCESS_KEY" ] || usage |
|
| 42 |
-[ "$AWS_SECRET_KEY" ] || usage |
|
| 43 |
-[ "$GPG_PASSPHRASE" ] || usage |
|
| 44 |
-[ -d /go/src/github.com/docker/docker ] || usage |
|
| 45 |
-cd /go/src/github.com/docker/docker |
|
| 46 |
-[ -x hack/make.sh ] || usage |
|
| 47 |
- |
|
| 48 |
-RELEASE_BUNDLES=( |
|
| 49 |
- binary |
|
| 50 |
- cross |
|
| 51 |
- tgz |
|
| 52 |
- ubuntu |
|
| 53 |
-) |
|
| 54 |
- |
|
| 55 |
-if [ "$1" != '--release-regardless-of-test-failure' ]; then |
|
| 56 |
- RELEASE_BUNDLES=( |
|
| 57 |
- test-unit test-integration |
|
| 58 |
- "${RELEASE_BUNDLES[@]}"
|
|
| 59 |
- test-integration-cli |
|
| 60 |
- ) |
|
| 61 |
-fi |
|
| 62 |
- |
|
| 63 |
-VERSION=$(cat VERSION) |
|
| 64 |
-BUCKET=$AWS_S3_BUCKET |
|
| 65 |
- |
|
| 66 |
-# These are the 2 keys we've used to sign the deb's |
|
| 67 |
-# release (get.docker.com) |
|
| 68 |
-# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" |
|
| 69 |
-# test (test.docker.com) |
|
| 70 |
-# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" |
|
| 71 |
- |
|
| 72 |
-setup_s3() {
|
|
| 73 |
- # Try creating the bucket. Ignore errors (it might already exist). |
|
| 74 |
- s3cmd mb s3://$BUCKET 2>/dev/null || true |
|
| 75 |
- # Check access to the bucket. |
|
| 76 |
- # s3cmd has no useful exit status, so we cannot check that. |
|
| 77 |
- # Instead, we check if it outputs anything on standard output. |
|
| 78 |
- # (When there are problems, it uses standard error instead.) |
|
| 79 |
- s3cmd info s3://$BUCKET | grep -q . |
|
| 80 |
- # Make the bucket accessible through website endpoints. |
|
| 81 |
- s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET |
|
| 82 |
-} |
|
| 83 |
- |
|
| 84 |
-# write_to_s3 uploads the contents of standard input to the specified S3 url. |
|
| 85 |
-write_to_s3() {
|
|
| 86 |
- DEST=$1 |
|
| 87 |
- F=`mktemp` |
|
| 88 |
- cat > $F |
|
| 89 |
- s3cmd --acl-public --mime-type='text/plain' put $F $DEST |
|
| 90 |
- rm -f $F |
|
| 91 |
-} |
|
| 92 |
- |
|
| 93 |
-s3_url() {
|
|
| 94 |
- case "$BUCKET" in |
|
| 95 |
- get.docker.com|test.docker.com) |
|
| 96 |
- echo "https://$BUCKET" |
|
| 97 |
- ;; |
|
| 98 |
- *) |
|
| 99 |
- s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }'
|
|
| 100 |
- ;; |
|
| 101 |
- esac |
|
| 102 |
-} |
|
| 103 |
- |
|
| 104 |
-build_all() {
|
|
| 105 |
- if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
|
|
| 106 |
- echo >&2 |
|
| 107 |
- echo >&2 'The build or tests appear to have failed.' |
|
| 108 |
- echo >&2 |
|
| 109 |
- echo >&2 'You, as the release maintainer, now have a couple options:' |
|
| 110 |
- echo >&2 '- delay release and fix issues' |
|
| 111 |
- echo >&2 '- delay release and fix issues' |
|
| 112 |
- echo >&2 '- did we mention how important this is? issues need fixing :)' |
|
| 113 |
- echo >&2 |
|
| 114 |
- echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' |
|
| 115 |
- echo >&2 ' really knows all the hairy problems at hand with the current release' |
|
| 116 |
- echo >&2 ' issues) may bypass this checking by running this script again with the' |
|
| 117 |
- echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' |
|
| 118 |
- echo >&2 ' running the test suite, and will only build the binaries and packages. Please' |
|
| 119 |
- echo >&2 ' avoid using this if at all possible.' |
|
| 120 |
- echo >&2 |
|
| 121 |
- echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' |
|
| 122 |
- echo >&2 ' should be used. If there are release issues, we should always err on the' |
|
| 123 |
- echo >&2 ' side of caution.' |
|
| 124 |
- echo >&2 |
|
| 125 |
- exit 1 |
|
| 126 |
- fi |
|
| 127 |
-} |
|
| 128 |
- |
|
| 129 |
-upload_release_build() {
|
|
| 130 |
- src="$1" |
|
| 131 |
- dst="$2" |
|
| 132 |
- latest="$3" |
|
| 133 |
- |
|
| 134 |
- echo |
|
| 135 |
- echo "Uploading $src" |
|
| 136 |
- echo " to $dst" |
|
| 137 |
- echo |
|
| 138 |
- s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst" |
|
| 139 |
- if [ "$latest" ]; then |
|
| 140 |
- echo |
|
| 141 |
- echo "Copying to $latest" |
|
| 142 |
- echo |
|
| 143 |
- s3cmd --acl-public cp "$dst" "$latest" |
|
| 144 |
- fi |
|
| 145 |
- |
|
| 146 |
- # get hash files too (see hash_files() in hack/make.sh) |
|
| 147 |
- for hashAlgo in md5 sha256; do |
|
| 148 |
- if [ -e "$src.$hashAlgo" ]; then |
|
| 149 |
- echo |
|
| 150 |
- echo "Uploading $src.$hashAlgo" |
|
| 151 |
- echo " to $dst.$hashAlgo" |
|
| 152 |
- echo |
|
| 153 |
- s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo" |
|
| 154 |
- if [ "$latest" ]; then |
|
| 155 |
- echo |
|
| 156 |
- echo "Copying to $latest.$hashAlgo" |
|
| 157 |
- echo |
|
| 158 |
- s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo" |
|
| 159 |
- fi |
|
| 160 |
- fi |
|
| 161 |
- done |
|
| 162 |
-} |
|
| 163 |
- |
|
| 164 |
-release_build() {
|
|
| 165 |
- GOOS=$1 |
|
| 166 |
- GOARCH=$2 |
|
| 167 |
- |
|
| 168 |
- binDir=bundles/$VERSION/cross/$GOOS/$GOARCH |
|
| 169 |
- tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH |
|
| 170 |
- binary=docker-$VERSION |
|
| 171 |
- tgz=docker-$VERSION.tgz |
|
| 172 |
- |
|
| 173 |
- latestBase= |
|
| 174 |
- if [ -z "$NOLATEST" ]; then |
|
| 175 |
- latestBase=docker-latest |
|
| 176 |
- fi |
|
| 177 |
- |
|
| 178 |
- # we need to map our GOOS and GOARCH to uname values |
|
| 179 |
- # see https://en.wikipedia.org/wiki/Uname |
|
| 180 |
- # ie, GOOS=linux -> "uname -s"=Linux |
|
| 181 |
- |
|
| 182 |
- s3Os=$GOOS |
|
| 183 |
- case "$s3Os" in |
|
| 184 |
- darwin) |
|
| 185 |
- s3Os=Darwin |
|
| 186 |
- ;; |
|
| 187 |
- freebsd) |
|
| 188 |
- s3Os=FreeBSD |
|
| 189 |
- ;; |
|
| 190 |
- linux) |
|
| 191 |
- s3Os=Linux |
|
| 192 |
- ;; |
|
| 193 |
- *) |
|
| 194 |
- echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" |
|
| 195 |
- exit 1 |
|
| 196 |
- ;; |
|
| 197 |
- esac |
|
| 198 |
- |
|
| 199 |
- s3Arch=$GOARCH |
|
| 200 |
- case "$s3Arch" in |
|
| 201 |
- amd64) |
|
| 202 |
- s3Arch=x86_64 |
|
| 203 |
- ;; |
|
| 204 |
- 386) |
|
| 205 |
- s3Arch=i386 |
|
| 206 |
- ;; |
|
| 207 |
- arm) |
|
| 208 |
- s3Arch=armel |
|
| 209 |
- # someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too |
|
| 210 |
- ;; |
|
| 211 |
- *) |
|
| 212 |
- echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" |
|
| 213 |
- exit 1 |
|
| 214 |
- ;; |
|
| 215 |
- esac |
|
| 216 |
- |
|
| 217 |
- s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch |
|
| 218 |
- latest= |
|
| 219 |
- latestTgz= |
|
| 220 |
- if [ "$latestBase" ]; then |
|
| 221 |
- latest="$s3Dir/$latestBase" |
|
| 222 |
- latestTgz="$s3Dir/$latestBase.tgz" |
|
| 223 |
- fi |
|
| 224 |
- |
|
| 225 |
- if [ ! -x "$binDir/$binary" ]; then |
|
| 226 |
- echo >&2 "error: can't find $binDir/$binary - was it compiled properly?" |
|
| 227 |
- exit 1 |
|
| 228 |
- fi |
|
| 229 |
- if [ ! -f "$tgzDir/$tgz" ]; then |
|
| 230 |
- echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" |
|
| 231 |
- exit 1 |
|
| 232 |
- fi |
|
| 233 |
- |
|
| 234 |
- upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" |
|
| 235 |
- upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" |
|
| 236 |
-} |
|
| 237 |
- |
|
| 238 |
-# Upload the 'ubuntu' bundle to S3: |
|
| 239 |
-# 1. A full APT repository is published at $BUCKET/ubuntu/ |
|
| 240 |
-# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index |
|
| 241 |
-release_ubuntu() {
|
|
| 242 |
- [ -e bundles/$VERSION/ubuntu ] || {
|
|
| 243 |
- echo >&2 './hack/make.sh must be run before release_ubuntu' |
|
| 244 |
- exit 1 |
|
| 245 |
- } |
|
| 246 |
- |
|
| 247 |
- # Sign our packages |
|
| 248 |
- dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \ |
|
| 249 |
- --sign builder bundles/$VERSION/ubuntu/*.deb |
|
| 250 |
- |
|
| 251 |
- # Setup the APT repo |
|
| 252 |
- APTDIR=bundles/$VERSION/ubuntu/apt |
|
| 253 |
- mkdir -p $APTDIR/conf $APTDIR/db |
|
| 254 |
- s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true |
|
| 255 |
- cat > $APTDIR/conf/distributions <<EOF |
|
| 256 |
-Codename: docker |
|
| 257 |
-Components: main |
|
| 258 |
-Architectures: amd64 i386 |
|
| 259 |
-EOF |
|
| 260 |
- |
|
| 261 |
- # Add the DEB package to the APT repo |
|
| 262 |
- DEBFILE=bundles/$VERSION/ubuntu/lxc-docker*.deb |
|
| 263 |
- reprepro -b $APTDIR includedeb docker $DEBFILE |
|
| 264 |
- |
|
| 265 |
- # Sign |
|
| 266 |
- for F in $(find $APTDIR -name Release); do |
|
| 267 |
- gpg -u releasedocker --passphrase $GPG_PASSPHRASE \ |
|
| 268 |
- --armor --sign --detach-sign \ |
|
| 269 |
- --output $F.gpg $F |
|
| 270 |
- done |
|
| 271 |
- |
|
| 272 |
- # Upload keys |
|
| 273 |
- s3cmd sync $HOME/.gnupg/ s3://$BUCKET/ubuntu/.gnupg/ |
|
| 274 |
- gpg --armor --export releasedocker > bundles/$VERSION/ubuntu/gpg |
|
| 275 |
- s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg |
|
| 276 |
- |
|
| 277 |
- local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9 |
|
| 278 |
- if [[ $BUCKET == test* ]]; then |
|
| 279 |
- gpgFingerprint=740B314AE3941731B942C66ADF4FD13717AAD7D6 |
|
| 280 |
- fi |
|
| 281 |
- |
|
| 282 |
- # Upload repo |
|
| 283 |
- s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/ |
|
| 284 |
- cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index |
|
| 285 |
-# Check that HTTPS transport is available to APT |
|
| 286 |
-if [ ! -e /usr/lib/apt/methods/https ]; then |
|
| 287 |
- apt-get update |
|
| 288 |
- apt-get install -y apt-transport-https |
|
| 289 |
-fi |
|
| 290 |
- |
|
| 291 |
-# Add the repository to your APT sources |
|
| 292 |
-echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list |
|
| 293 |
- |
|
| 294 |
-# Then import the repository key |
|
| 295 |
-apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys $gpgFingerprint |
|
| 296 |
- |
|
| 297 |
-# Install docker |
|
| 298 |
-apt-get update |
|
| 299 |
-apt-get install -y lxc-docker |
|
| 300 |
- |
|
| 301 |
-# |
|
| 302 |
-# Alternatively, just use the curl-able install.sh script provided at $(s3_url) |
|
| 303 |
-# |
|
| 304 |
-EOF |
|
| 305 |
- |
|
| 306 |
- # Add redirect at /ubuntu/info for URL-backwards-compatibility |
|
| 307 |
- rm -rf /tmp/emptyfile && touch /tmp/emptyfile |
|
| 308 |
- s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info |
|
| 309 |
- |
|
| 310 |
- echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu" |
|
| 311 |
-} |
|
| 312 |
- |
|
| 313 |
-# Upload binaries and tgz files to S3 |
|
| 314 |
-release_binaries() {
|
|
| 315 |
- [ -e bundles/$VERSION/cross/linux/amd64/docker-$VERSION ] || {
|
|
| 316 |
- echo >&2 './hack/make.sh must be run before release_binaries' |
|
| 317 |
- exit 1 |
|
| 318 |
- } |
|
| 319 |
- |
|
| 320 |
- for d in bundles/$VERSION/cross/*/*; do |
|
| 321 |
- GOARCH="$(basename "$d")" |
|
| 322 |
- GOOS="$(basename "$(dirname "$d")")" |
|
| 323 |
- release_build "$GOOS" "$GOARCH" |
|
| 324 |
- done |
|
| 325 |
- |
|
| 326 |
- # TODO create redirect from builds/*/i686 to builds/*/i386 |
|
| 327 |
- |
|
| 328 |
- cat <<EOF | write_to_s3 s3://$BUCKET/builds/index |
|
| 329 |
-# To install, run the following command as root: |
|
| 330 |
-curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker |
|
| 331 |
-# Then start docker in daemon mode: |
|
| 332 |
-sudo /usr/local/bin/docker -d |
|
| 333 |
-EOF |
|
| 334 |
- |
|
| 335 |
- # Add redirect at /builds/info for URL-backwards-compatibility |
|
| 336 |
- rm -rf /tmp/emptyfile && touch /tmp/emptyfile |
|
| 337 |
- s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/builds/info |
|
| 338 |
- |
|
| 339 |
- if [ -z "$NOLATEST" ]; then |
|
| 340 |
- echo "Advertising $VERSION on $BUCKET as most recent version" |
|
| 341 |
- echo $VERSION | write_to_s3 s3://$BUCKET/latest |
|
| 342 |
- fi |
|
| 343 |
-} |
|
| 344 |
- |
|
| 345 |
-# Upload the index script |
|
| 346 |
-release_index() {
|
|
| 347 |
- sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 s3://$BUCKET/index |
|
| 348 |
-} |
|
| 349 |
- |
|
| 350 |
-release_test() {
|
|
| 351 |
- if [ -e "bundles/$VERSION/test" ]; then |
|
| 352 |
- s3cmd --acl-public sync bundles/$VERSION/test/ s3://$BUCKET/test/ |
|
| 353 |
- fi |
|
| 354 |
-} |
|
| 355 |
- |
|
| 356 |
-setup_gpg() {
|
|
| 357 |
- # Make sure that we have our keys |
|
| 358 |
- mkdir -p $HOME/.gnupg/ |
|
| 359 |
- s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ $HOME/.gnupg/ || true |
|
| 360 |
- gpg --list-keys releasedocker >/dev/null || {
|
|
| 361 |
- gpg --gen-key --batch <<EOF |
|
| 362 |
-Key-Type: RSA |
|
| 363 |
-Key-Length: 4096 |
|
| 364 |
-Passphrase: $GPG_PASSPHRASE |
|
| 365 |
-Name-Real: Docker Release Tool |
|
| 366 |
-Name-Email: docker@docker.com |
|
| 367 |
-Name-Comment: releasedocker |
|
| 368 |
-Expire-Date: 0 |
|
| 369 |
-%commit |
|
| 370 |
-EOF |
|
| 371 |
- } |
|
| 372 |
-} |
|
| 373 |
- |
|
| 374 |
-main() {
|
|
| 375 |
- build_all |
|
| 376 |
- setup_s3 |
|
| 377 |
- setup_gpg |
|
| 378 |
- release_binaries |
|
| 379 |
- release_ubuntu |
|
| 380 |
- release_index |
|
| 381 |
- release_test |
|
| 382 |
-} |
|
| 383 |
- |
|
| 384 |
-main |
|
| 385 |
- |
|
| 386 |
-echo |
|
| 387 |
-echo |
|
| 388 |
-echo "Release complete; see $(s3_url)" |
|
| 389 |
-echo |
| 390 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,105 +0,0 @@ |
| 1 |
-#!/bin/sh |
|
| 2 |
- |
|
| 3 |
-# This is a convenience script for reporting issues that include a base |
|
| 4 |
-# template of information. See https://github.com/docker/docker/pull/8845 |
|
| 5 |
- |
|
| 6 |
-set -e |
|
| 7 |
- |
|
| 8 |
-DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"}
|
|
| 9 |
-DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "}
|
|
| 10 |
-DOCKER=${DOCKER:-"docker"}
|
|
| 11 |
-DOCKER_COMMAND="${DOCKER}"
|
|
| 12 |
-export DOCKER_COMMAND |
|
| 13 |
- |
|
| 14 |
-# pulled from https://gist.github.com/cdown/1163649 |
|
| 15 |
-function urlencode() {
|
|
| 16 |
- # urlencode <string> |
|
| 17 |
- |
|
| 18 |
- local length="${#1}"
|
|
| 19 |
- for (( i = 0; i < length; i++ )); do |
|
| 20 |
- local c="${1:i:1}"
|
|
| 21 |
- case $c in |
|
| 22 |
- [a-zA-Z0-9.~_-]) printf "$c" ;; |
|
| 23 |
- *) printf '%%%02X' "'$c" |
|
| 24 |
- esac |
|
| 25 |
- done |
|
| 26 |
-} |
|
| 27 |
- |
|
| 28 |
-function template() {
|
|
| 29 |
-# this should always match the template from CONTRIBUTING.md |
|
| 30 |
- cat <<- EOM |
|
| 31 |
- Description of problem: |
|
| 32 |
- |
|
| 33 |
- |
|
| 34 |
- \`docker version\`: |
|
| 35 |
- `${DOCKER_COMMAND} -D version`
|
|
| 36 |
- |
|
| 37 |
- |
|
| 38 |
- \`docker info\`: |
|
| 39 |
- `${DOCKER_COMMAND} -D info`
|
|
| 40 |
- |
|
| 41 |
- |
|
| 42 |
- \`uname -a\`: |
|
| 43 |
- `uname -a` |
|
| 44 |
- |
|
| 45 |
- |
|
| 46 |
- Environment details (AWS, VirtualBox, physical, etc.): |
|
| 47 |
- |
|
| 48 |
- |
|
| 49 |
- How reproducible: |
|
| 50 |
- |
|
| 51 |
- |
|
| 52 |
- Steps to Reproduce: |
|
| 53 |
- 1. |
|
| 54 |
- 2. |
|
| 55 |
- 3. |
|
| 56 |
- |
|
| 57 |
- |
|
| 58 |
- Actual Results: |
|
| 59 |
- |
|
| 60 |
- |
|
| 61 |
- Expected Results: |
|
| 62 |
- |
|
| 63 |
- |
|
| 64 |
- Additional info: |
|
| 65 |
- |
|
| 66 |
- |
|
| 67 |
- EOM |
|
| 68 |
-} |
|
| 69 |
- |
|
| 70 |
-function format_issue_url() {
|
|
| 71 |
- if [ ${#@} -ne 2 ] ; then
|
|
| 72 |
- return 1 |
|
| 73 |
- fi |
|
| 74 |
- local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}")
|
|
| 75 |
- local issue_body=$(urlencode "${2}")
|
|
| 76 |
- echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}"
|
|
| 77 |
-} |
|
| 78 |
- |
|
| 79 |
- |
|
| 80 |
-echo -ne "Do you use \`sudo\` to call docker? [y|N]: " |
|
| 81 |
-read -r -n 1 use_sudo |
|
| 82 |
-echo "" |
|
| 83 |
- |
|
| 84 |
-if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then
|
|
| 85 |
- export DOCKER_COMMAND="sudo ${DOCKER}"
|
|
| 86 |
-fi |
|
| 87 |
- |
|
| 88 |
-echo -ne "Title of new issue?: " |
|
| 89 |
-read -r issue_title |
|
| 90 |
-echo "" |
|
| 91 |
- |
|
| 92 |
-issue_url=$(format_issue_url "${issue_title}" "$(template)")
|
|
| 93 |
- |
|
| 94 |
-if which xdg-open 2>/dev/null >/dev/null ; then |
|
| 95 |
- echo -ne "Would like to launch this report in your browser? [Y|n]: " |
|
| 96 |
- read -r -n 1 launch_now |
|
| 97 |
- echo "" |
|
| 98 |
- |
|
| 99 |
- if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then
|
|
| 100 |
- xdg-open "${issue_url}"
|
|
| 101 |
- fi |
|
| 102 |
-fi |
|
| 103 |
- |
|
| 104 |
-echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}"
|
|
| 105 |
- |
| 106 | 1 |
deleted file mode 100755 |
| ... | ... |
@@ -1,22 +0,0 @@ |
| 1 |
-#!/usr/bin/env bash |
|
| 2 |
- |
|
| 3 |
-## Run this script from the root of the docker repository |
|
| 4 |
-## to query project stats useful to the maintainers. |
|
| 5 |
-## You will need to install `pulls` and `issues` from |
|
| 6 |
-## http://github.com/crosbymichael/pulls |
|
| 7 |
- |
|
| 8 |
-set -e |
|
| 9 |
- |
|
| 10 |
-echo -n "Open pulls: " |
|
| 11 |
-PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 |
|
| 12 |
-echo $PULLS |
|
| 13 |
- |
|
| 14 |
-echo -n "Pulls alru: " |
|
| 15 |
-pulls alru |
|
| 16 |
- |
|
| 17 |
-echo -n "Open issues: " |
|
| 18 |
-ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 |
|
| 19 |
-echo $ISSUES |
|
| 20 |
- |
|
| 21 |
-echo -n "Issues alru: " |
|
| 22 |
-issues alru |
| 23 | 1 |
deleted file mode 100755 |
| ... | ... |
@@ -1,75 +0,0 @@ |
| 1 |
-#!/usr/bin/env bash |
|
| 2 |
-set -e |
|
| 3 |
- |
|
| 4 |
-cd "$(dirname "$BASH_SOURCE")/.." |
|
| 5 |
- |
|
| 6 |
-# Downloads dependencies into vendor/ directory |
|
| 7 |
-mkdir -p vendor |
|
| 8 |
-cd vendor |
|
| 9 |
- |
|
| 10 |
-clone() {
|
|
| 11 |
- vcs=$1 |
|
| 12 |
- pkg=$2 |
|
| 13 |
- rev=$3 |
|
| 14 |
- |
|
| 15 |
- pkg_url=https://$pkg |
|
| 16 |
- target_dir=src/$pkg |
|
| 17 |
- |
|
| 18 |
- echo -n "$pkg @ $rev: " |
|
| 19 |
- |
|
| 20 |
- if [ -d $target_dir ]; then |
|
| 21 |
- echo -n 'rm old, ' |
|
| 22 |
- rm -fr $target_dir |
|
| 23 |
- fi |
|
| 24 |
- |
|
| 25 |
- echo -n 'clone, ' |
|
| 26 |
- case $vcs in |
|
| 27 |
- git) |
|
| 28 |
- git clone --quiet --no-checkout $pkg_url $target_dir |
|
| 29 |
- ( cd $target_dir && git reset --quiet --hard $rev ) |
|
| 30 |
- ;; |
|
| 31 |
- hg) |
|
| 32 |
- hg clone --quiet --updaterev $rev $pkg_url $target_dir |
|
| 33 |
- ;; |
|
| 34 |
- esac |
|
| 35 |
- |
|
| 36 |
- echo -n 'rm VCS, ' |
|
| 37 |
- ( cd $target_dir && rm -rf .{git,hg} )
|
|
| 38 |
- |
|
| 39 |
- echo done |
|
| 40 |
-} |
|
| 41 |
- |
|
| 42 |
-clone git github.com/kr/pty 05017fcccf |
|
| 43 |
- |
|
| 44 |
-clone git github.com/gorilla/context 14f550f51a |
|
| 45 |
- |
|
| 46 |
-clone git github.com/gorilla/mux 136d54f81f |
|
| 47 |
- |
|
| 48 |
-clone git github.com/tchap/go-patricia v1.0.1 |
|
| 49 |
- |
|
| 50 |
-clone hg code.google.com/p/go.net 84a4013f96e0 |
|
| 51 |
- |
|
| 52 |
-clone hg code.google.com/p/gosqlite 74691fb6f837 |
|
| 53 |
- |
|
| 54 |
-clone git github.com/docker/libtrust 230dfd18c232 |
|
| 55 |
- |
|
| 56 |
-clone git github.com/Sirupsen/logrus v0.6.6 |
|
| 57 |
- |
|
| 58 |
-clone git github.com/go-fsnotify/fsnotify v1.0.4 |
|
| 59 |
- |
|
| 60 |
-# get Go tip's archive/tar, for xattr support and improved performance |
|
| 61 |
-# TODO after Go 1.4 drops, bump our minimum supported version and drop this vendored dep |
|
| 62 |
-if [ "$1" = '--go' ]; then |
|
| 63 |
- # Go takes forever and a half to clone, so we only redownload it when explicitly requested via the "--go" flag to this script. |
|
| 64 |
- clone hg code.google.com/p/go 1b17b3426e3c |
|
| 65 |
- mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar |
|
| 66 |
- rm -rf src/code.google.com/p/go |
|
| 67 |
- mkdir -p src/code.google.com/p/go/src/pkg/archive |
|
| 68 |
- mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar |
|
| 69 |
-fi |
|
| 70 |
- |
|
| 71 |
-clone git github.com/docker/libcontainer 5d6c507d7cfeff97172deedf3db13b5295bcacef |
|
| 72 |
-# see src/github.com/docker/libcontainer/update-vendor.sh which is the "source of truth" for libcontainer deps (just like this file) |
|
| 73 |
-rm -rf src/github.com/docker/libcontainer/vendor |
|
| 74 |
-eval "$(grep '^clone ' src/github.com/docker/libcontainer/update-vendor.sh | grep -v 'github.com/codegangsta/cli' | grep -v 'github.com/Sirupsen/logrus')" |
|
| 75 |
-# we exclude "github.com/codegangsta/cli" here because it's only needed for "nsinit", which Docker doesn't include |