git grep --name-only '^#!' | egrep -v '(vendor|\.go|Jenkinsfile)' | xargs shfmt -w -bn -ci -sr
Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
| ... | ... |
@@ -43,7 +43,7 @@ is_set_as_module() {
|
| 43 | 43 |
color() {
|
| 44 | 44 |
local codes=() |
| 45 | 45 |
if [ "$1" = 'bold' ]; then |
| 46 |
- codes=( "${codes[@]}" '1' )
|
|
| 46 |
+ codes=("${codes[@]}" '1')
|
|
| 47 | 47 |
shift |
| 48 | 48 |
fi |
| 49 | 49 |
if [ "$#" -gt 0 ]; then |
| ... | ... |
@@ -60,7 +60,7 @@ color() {
|
| 60 | 60 |
white) code=37 ;; |
| 61 | 61 |
esac |
| 62 | 62 |
if [ "$code" ]; then |
| 63 |
- codes=( "${codes[@]}" "$code" )
|
|
| 63 |
+ codes=("${codes[@]}" "$code")
|
|
| 64 | 64 |
fi |
| 65 | 65 |
fi |
| 66 | 66 |
local IFS=';' |
| ... | ... |
@@ -98,12 +98,13 @@ check_flag() {
|
| 98 | 98 |
|
| 99 | 99 |
check_flags() {
|
| 100 | 100 |
for flag in "$@"; do |
| 101 |
- echo -n "- "; check_flag "$flag" |
|
| 101 |
+ echo -n "- " |
|
| 102 |
+ check_flag "$flag" |
|
| 102 | 103 |
done |
| 103 | 104 |
} |
| 104 | 105 |
|
| 105 | 106 |
check_command() {
|
| 106 |
- if command -v "$1" >/dev/null 2>&1; then |
|
| 107 |
+ if command -v "$1" > /dev/null 2>&1; then |
|
| 107 | 108 |
wrap_good "$1 command" 'available' |
| 108 | 109 |
else |
| 109 | 110 |
wrap_bad "$1 command" 'missing' |
| ... | ... |
@@ -121,7 +122,7 @@ check_device() {
|
| 121 | 121 |
} |
| 122 | 122 |
|
| 123 | 123 |
check_distro_userns() {
|
| 124 |
- source /etc/os-release 2>/dev/null || /bin/true |
|
| 124 |
+ source /etc/os-release 2> /dev/null || /bin/true |
|
| 125 | 125 |
if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then
|
| 126 | 126 |
# this is a CentOS7 or RHEL7 system |
| 127 | 127 |
grep -q "user_namespace.enable=1" /proc/cmdline || {
|
| ... | ... |
@@ -156,7 +157,7 @@ echo 'Generally Necessary:' |
| 156 | 156 |
echo -n '- ' |
| 157 | 157 |
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
|
| 158 | 158 |
cgroupDir="$(dirname "$cgroupSubsystemDir")" |
| 159 |
-if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then |
|
| 159 |
+if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then |
|
| 160 | 160 |
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" |
| 161 | 161 |
else |
| 162 | 162 |
if [ "$cgroupSubsystemDir" ]; then |
| ... | ... |
@@ -168,7 +169,7 @@ else |
| 168 | 168 |
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" |
| 169 | 169 |
fi |
| 170 | 170 |
|
| 171 |
-if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then |
|
| 171 |
+if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then |
|
| 172 | 172 |
echo -n '- ' |
| 173 | 173 |
if command -v apparmor_parser &> /dev/null; then |
| 174 | 174 |
wrap_good 'apparmor' 'enabled and tools installed' |
| ... | ... |
@@ -199,8 +200,8 @@ flags=( |
| 199 | 199 |
POSIX_MQUEUE |
| 200 | 200 |
) |
| 201 | 201 |
check_flags "${flags[@]}"
|
| 202 |
-if [ "$kernelMajor" -lt 4 ] || ( [ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ] ); then |
|
| 203 |
- check_flags DEVPTS_MULTIPLE_INSTANCES |
|
| 202 |
+if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then |
|
| 203 |
+ check_flags DEVPTS_MULTIPLE_INSTANCES |
|
| 204 | 204 |
fi |
| 205 | 205 |
|
| 206 | 206 |
echo |
| ... | ... |
@@ -228,12 +229,15 @@ echo 'Optional Features:' |
| 228 | 228 |
} |
| 229 | 229 |
{
|
| 230 | 230 |
if is_set LEGACY_VSYSCALL_NATIVE; then |
| 231 |
- echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled' |
|
| 231 |
+ echo -n "- " |
|
| 232 |
+ wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled' |
|
| 232 | 233 |
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)" |
| 233 | 234 |
elif is_set LEGACY_VSYSCALL_EMULATE; then |
| 234 |
- echo -n "- "; wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled' |
|
| 235 |
+ echo -n "- " |
|
| 236 |
+ wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled' |
|
| 235 | 237 |
elif is_set LEGACY_VSYSCALL_NONE; then |
| 236 |
- echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled' |
|
| 238 |
+ echo -n "- " |
|
| 239 |
+ wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled' |
|
| 237 | 240 |
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)" |
| 238 | 241 |
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)" |
| 239 | 242 |
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)" |
| ... | ... |
@@ -245,15 +249,15 @@ echo 'Optional Features:' |
| 245 | 245 |
fi |
| 246 | 246 |
} |
| 247 | 247 |
|
| 248 |
-if [ "$kernelMajor" -lt 4 ] || ( [ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ] ); then |
|
| 248 |
+if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then |
|
| 249 | 249 |
check_flags MEMCG_KMEM |
| 250 | 250 |
fi |
| 251 | 251 |
|
| 252 |
-if [ "$kernelMajor" -lt 3 ] || ( [ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ] ); then |
|
| 252 |
+if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then |
|
| 253 | 253 |
check_flags RESOURCE_COUNTERS |
| 254 | 254 |
fi |
| 255 | 255 |
|
| 256 |
-if [ "$kernelMajor" -lt 3 ] || ( [ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ] ); then |
|
| 256 |
+if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then |
|
| 257 | 257 |
netprio=NETPRIO_CGROUP |
| 258 | 258 |
else |
| 259 | 259 |
netprio=CGROUP_NET_PRIO |
| ... | ... |
@@ -270,7 +274,7 @@ flags=( |
| 270 | 270 |
IP_VS_NFCT |
| 271 | 271 |
IP_VS_PROTO_TCP |
| 272 | 272 |
IP_VS_PROTO_UDP |
| 273 |
- IP_VS_RR |
|
| 273 |
+ IP_VS_RR |
|
| 274 | 274 |
) |
| 275 | 275 |
check_flags "${flags[@]}"
|
| 276 | 276 |
|
| ... | ... |
@@ -295,7 +299,7 @@ echo " - \"$(wrap_color 'overlay' blue)\":" |
| 295 | 295 |
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /' |
| 296 | 296 |
echo ' Optional (for encrypted networks):' |
| 297 | 297 |
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \ |
| 298 |
- XFRM XFRM_USER XFRM_ALGO INET_ESP INET_XFRM_MODE_TRANSPORT | sed 's/^/ /' |
|
| 298 |
+ XFRM XFRM_USER XFRM_ALGO INET_ESP INET_XFRM_MODE_TRANSPORT | sed 's/^/ /' |
|
| 299 | 299 |
echo " - \"$(wrap_color 'ipvlan' blue)\":" |
| 300 | 300 |
check_flags IPVLAN | sed 's/^/ /' |
| 301 | 301 |
echo " - \"$(wrap_color 'macvlan' blue)\":" |
| ... | ... |
@@ -334,9 +338,12 @@ check_flags OVERLAY_FS | sed 's/^/ /' |
| 334 | 334 |
EXITCODE=0 |
| 335 | 335 |
|
| 336 | 336 |
echo " - \"$(wrap_color 'zfs' blue)\":" |
| 337 |
-echo -n " - "; check_device /dev/zfs |
|
| 338 |
-echo -n " - "; check_command zfs |
|
| 339 |
-echo -n " - "; check_command zpool |
|
| 337 |
+echo -n " - " |
|
| 338 |
+check_device /dev/zfs |
|
| 339 |
+echo -n " - " |
|
| 340 |
+check_command zfs |
|
| 341 |
+echo -n " - " |
|
| 342 |
+check_command zpool |
|
| 340 | 343 |
[ "$EXITCODE" = 0 ] && STORAGE=0 |
| 341 | 344 |
EXITCODE=0 |
| 342 | 345 |
|
| ... | ... |
@@ -345,8 +352,7 @@ EXITCODE=$CODE |
| 345 | 345 |
|
| 346 | 346 |
echo |
| 347 | 347 |
|
| 348 |
-check_limit_over() |
|
| 349 |
-{
|
|
| 348 |
+check_limit_over() {
|
|
| 350 | 349 |
if [ "$(cat "$1")" -le "$2" ]; then |
| 351 | 350 |
wrap_bad "- $1" "$(cat "$1")" |
| 352 | 351 |
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black |
| ... | ... |
@@ -16,37 +16,38 @@ set -e |
| 16 | 16 |
set -o pipefail |
| 17 | 17 |
|
| 18 | 18 |
errexit() {
|
| 19 |
- echo "$1" |
|
| 20 |
- exit 1 |
|
| 19 |
+ echo "$1" |
|
| 20 |
+ exit 1 |
|
| 21 | 21 |
} |
| 22 | 22 |
|
| 23 | 23 |
BUNDLE="bundles/$(cat VERSION)" |
| 24 | 24 |
|
| 25 |
-bundle_files(){
|
|
| 26 |
- # prefer dynbinary if exists |
|
| 27 |
- for f in dockerd docker-proxy; do |
|
| 28 |
- if [ -d $BUNDLE/dynbinary-daemon ]; then |
|
| 29 |
- echo $BUNDLE/dynbinary-daemon/$f |
|
| 25 |
+bundle_files() {
|
|
| 26 |
+ # prefer dynbinary if exists |
|
| 27 |
+ for f in dockerd docker-proxy; do |
|
| 28 |
+ if [ -d $BUNDLE/dynbinary-daemon ]; then |
|
| 29 |
+ echo $BUNDLE/dynbinary-daemon/$f |
|
| 30 |
+ else |
|
| 31 |
+ echo $BUNDLE/binary-daemon/$f |
|
| 32 |
+ fi |
|
| 33 |
+ done |
|
| 34 |
+ for f in containerd ctr containerd-shim docker-init runc; do |
|
| 35 |
+ echo $BUNDLE/binary-daemon/$f |
|
| 36 |
+ done |
|
| 37 |
+ if [ -d $BUNDLE/dynbinary-client ]; then |
|
| 38 |
+ echo $BUNDLE/dynbinary-client/docker |
|
| 30 | 39 |
else |
| 31 |
- echo $BUNDLE/binary-daemon/$f |
|
| 40 |
+ echo $BUNDLE/binary-client/docker |
|
| 32 | 41 |
fi |
| 33 |
- done |
|
| 34 |
- for f in containerd ctr containerd-shim docker-init runc; do |
|
| 35 |
- echo $BUNDLE/binary-daemon/$f |
|
| 36 |
- done |
|
| 37 |
- if [ -d $BUNDLE/dynbinary-client ]; then |
|
| 38 |
- echo $BUNDLE/dynbinary-client/docker |
|
| 39 |
- else |
|
| 40 |
- echo $BUNDLE/binary-client/docker |
|
| 41 |
- fi |
|
| 42 | 42 |
} |
| 43 | 43 |
|
| 44 |
-control_docker(){
|
|
| 45 |
- m=$1; op=$2 |
|
| 46 |
- # NOTE: `docker-machine ssh $m sh -c "foo bar"` does not work |
|
| 47 |
- # (but `docker-machine ssh $m sh -c "foo\ bar"` works) |
|
| 48 |
- # Anyway we avoid using `sh -c` here for avoiding confusion |
|
| 49 |
- cat <<EOF | docker-machine ssh $m sudo sh |
|
| 44 |
+control_docker() {
|
|
| 45 |
+ m=$1 |
|
| 46 |
+ op=$2 |
|
| 47 |
+ # NOTE: `docker-machine ssh $m sh -c "foo bar"` does not work |
|
| 48 |
+ # (but `docker-machine ssh $m sh -c "foo\ bar"` works) |
|
| 49 |
+ # Anyway we avoid using `sh -c` here for avoiding confusion |
|
| 50 |
+ cat << EOF | docker-machine ssh $m sudo sh |
|
| 50 | 51 |
if command -v systemctl > /dev/null; then |
| 51 | 52 |
systemctl $op docker |
| 52 | 53 |
elif command -v service > /dev/null; then |
| ... | ... |
@@ -60,52 +61,58 @@ fi |
| 60 | 60 |
EOF |
| 61 | 61 |
} |
| 62 | 62 |
|
| 63 |
-detect_prefix(){
|
|
| 64 |
- m=$1 |
|
| 65 |
- script='dirname $(dirname $(which dockerd))' |
|
| 66 |
- echo $script | docker-machine ssh $m sh |
|
| 63 |
+detect_prefix() {
|
|
| 64 |
+ m=$1 |
|
| 65 |
+ script='dirname $(dirname $(which dockerd))' |
|
| 66 |
+ echo $script | docker-machine ssh $m sh |
|
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 |
-install_to(){
|
|
| 70 |
- m=$1; shift; files=$@ |
|
| 71 |
- echo "$m: detecting docker" |
|
| 72 |
- prefix=$(detect_prefix $m) |
|
| 73 |
- echo "$m: detected docker on $prefix" |
|
| 74 |
- echo "$m: stopping docker" |
|
| 75 |
- control_docker $m stop |
|
| 76 |
- echo "$m: installing docker" |
|
| 77 |
- # NOTE: GNU tar is required because we use --transform here |
|
| 78 |
- # TODO: compression (should not be default) |
|
| 79 |
- tar ch --transform 's/.*\///' $files | docker-machine ssh $m sudo tar Cx $prefix/bin |
|
| 80 |
- echo "$m: starting docker" |
|
| 81 |
- control_docker $m start |
|
| 82 |
- echo "$m: done" |
|
| 69 |
+install_to() {
|
|
| 70 |
+ m=$1 |
|
| 71 |
+ shift |
|
| 72 |
+ files=$@ |
|
| 73 |
+ echo "$m: detecting docker" |
|
| 74 |
+ prefix=$(detect_prefix $m) |
|
| 75 |
+ echo "$m: detected docker on $prefix" |
|
| 76 |
+ echo "$m: stopping docker" |
|
| 77 |
+ control_docker $m stop |
|
| 78 |
+ echo "$m: installing docker" |
|
| 79 |
+ # NOTE: GNU tar is required because we use --transform here |
|
| 80 |
+ # TODO: compression (should not be default) |
|
| 81 |
+ tar ch --transform 's/.*\///' $files | docker-machine ssh $m sudo tar Cx $prefix/bin |
|
| 82 |
+ echo "$m: starting docker" |
|
| 83 |
+ control_docker $m start |
|
| 84 |
+ echo "$m: done" |
|
| 83 | 85 |
} |
| 84 | 86 |
|
| 85 |
-check_prereq(){
|
|
| 86 |
- command -v docker-machine > /dev/null || errexit "docker-machine not installed" |
|
| 87 |
- ( tar --version | grep GNU > /dev/null ) || errexit "GNU tar not installed" |
|
| 87 |
+check_prereq() {
|
|
| 88 |
+ command -v docker-machine > /dev/null || errexit "docker-machine not installed" |
|
| 89 |
+ (tar --version | grep GNU > /dev/null) || errexit "GNU tar not installed" |
|
| 88 | 90 |
} |
| 89 | 91 |
|
| 90 | 92 |
case "$1" in |
| 91 |
- "install") |
|
| 92 |
- shift; machines=$@ |
|
| 93 |
- check_prereq |
|
| 94 |
- files=$(bundle_files) |
|
| 95 |
- echo "Files to be installed:" |
|
| 96 |
- for f in $files; do echo $f; done |
|
| 97 |
- pids=() |
|
| 98 |
- for m in $machines; do |
|
| 99 |
- install_to $m $files & |
|
| 100 |
- pids+=($!) |
|
| 101 |
- done |
|
| 102 |
- status=0 |
|
| 103 |
- for pid in ${pids[@]}; do
|
|
| 104 |
- wait $pid || { status=$?; echo "background process $pid failed with exit status $status"; }
|
|
| 105 |
- done |
|
| 106 |
- exit $status |
|
| 107 |
- ;; |
|
| 108 |
- *) |
|
| 109 |
- errexit "Usage: $0 install MACHINES" |
|
| 110 |
- ;; |
|
| 93 |
+ "install") |
|
| 94 |
+ shift |
|
| 95 |
+ machines=$@ |
|
| 96 |
+ check_prereq |
|
| 97 |
+ files=$(bundle_files) |
|
| 98 |
+ echo "Files to be installed:" |
|
| 99 |
+ for f in $files; do echo $f; done |
|
| 100 |
+ pids=() |
|
| 101 |
+ for m in $machines; do |
|
| 102 |
+ install_to $m $files & |
|
| 103 |
+ pids+=($!) |
|
| 104 |
+ done |
|
| 105 |
+ status=0 |
|
| 106 |
+ for pid in ${pids[@]}; do
|
|
| 107 |
+ wait $pid || {
|
|
| 108 |
+ status=$? |
|
| 109 |
+ echo "background process $pid failed with exit status $status" |
|
| 110 |
+ } |
|
| 111 |
+ done |
|
| 112 |
+ exit $status |
|
| 113 |
+ ;; |
|
| 114 |
+ *) |
|
| 115 |
+ errexit "Usage: $0 install MACHINES" |
|
| 116 |
+ ;; |
|
| 111 | 117 |
esac |
| ... | ... |
@@ -25,7 +25,7 @@ fi |
| 25 | 25 |
|
| 26 | 26 |
rootlesskit="" |
| 27 | 27 |
for f in docker-rootlesskit rootlesskit; do |
| 28 |
- if which $f >/dev/null 2>&1; then |
|
| 28 |
+ if which $f > /dev/null 2>&1; then |
|
| 29 | 29 |
rootlesskit=$f |
| 30 | 30 |
break |
| 31 | 31 |
fi |
| ... | ... |
@@ -43,7 +43,7 @@ fi |
| 43 | 43 |
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET |
| 44 | 44 |
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU |
| 45 | 45 |
if [ -z $net ]; then |
| 46 |
- if which slirp4netns >/dev/null 2>&1; then |
|
| 46 |
+ if which slirp4netns > /dev/null 2>&1; then |
|
| 47 | 47 |
if slirp4netns --help | grep -- --disable-host-loopback; then |
| 48 | 48 |
net=slirp4netns |
| 49 | 49 |
if [ -z $mtu ]; then |
| ... | ... |
@@ -54,7 +54,7 @@ if [ -z $net ]; then |
| 54 | 54 |
fi |
| 55 | 55 |
fi |
| 56 | 56 |
if [ -z $net ]; then |
| 57 |
- if which vpnkit >/dev/null 2>&1; then |
|
| 57 |
+ if which vpnkit > /dev/null 2>&1; then |
|
| 58 | 58 |
net=vpnkit |
| 59 | 59 |
else |
| 60 | 60 |
echo "Either slirp4netns (v0.3+) or vpnkit needs to be installed" |
| ... | ... |
@@ -2,21 +2,21 @@ |
| 2 | 2 |
set -e |
| 3 | 3 |
|
| 4 | 4 |
if ! command -v qemu-nbd &> /dev/null; then |
| 5 |
- echo >&2 'error: "qemu-nbd" not found!' |
|
| 6 |
- exit 1 |
|
| 5 |
+ echo >&2 'error: "qemu-nbd" not found!' |
|
| 6 |
+ exit 1 |
|
| 7 | 7 |
fi |
| 8 | 8 |
|
| 9 | 9 |
usage() {
|
| 10 |
- echo "Convert disk image to docker image" |
|
| 11 |
- echo "" |
|
| 12 |
- echo "usage: $0 image-name disk-image-file [ base-image ]" |
|
| 13 |
- echo " ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img" |
|
| 14 |
- echo " $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04" |
|
| 10 |
+ echo "Convert disk image to docker image" |
|
| 11 |
+ echo "" |
|
| 12 |
+ echo "usage: $0 image-name disk-image-file [ base-image ]" |
|
| 13 |
+ echo " ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img" |
|
| 14 |
+ echo " $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04" |
|
| 15 | 15 |
} |
| 16 | 16 |
|
| 17 | 17 |
if [ "$#" -lt 2 ]; then |
| 18 |
- usage |
|
| 19 |
- exit 1 |
|
| 18 |
+ usage |
|
| 19 |
+ exit 1 |
|
| 20 | 20 |
fi |
| 21 | 21 |
|
| 22 | 22 |
CURDIR=$(pwd) |
| ... | ... |
@@ -24,7 +24,7 @@ CURDIR=$(pwd) |
| 24 | 24 |
image_name="${1%:*}"
|
| 25 | 25 |
image_tag="${1#*:}"
|
| 26 | 26 |
if [ "$image_tag" == "$1" ]; then |
| 27 |
- image_tag="latest" |
|
| 27 |
+ image_tag="latest" |
|
| 28 | 28 |
fi |
| 29 | 29 |
|
| 30 | 30 |
disk_image_file="$2" |
| ... | ... |
@@ -35,10 +35,10 @@ block_device=/dev/nbd0 |
| 35 | 35 |
builddir=$(mktemp -d) |
| 36 | 36 |
|
| 37 | 37 |
cleanup() {
|
| 38 |
- umount "$builddir/disk_image" || true |
|
| 39 |
- umount "$builddir/workdir" || true |
|
| 40 |
- qemu-nbd -d $block_device &> /dev/null || true |
|
| 41 |
- rm -rf $builddir |
|
| 38 |
+ umount "$builddir/disk_image" || true |
|
| 39 |
+ umount "$builddir/workdir" || true |
|
| 40 |
+ qemu-nbd -d $block_device &> /dev/null || true |
|
| 41 |
+ rm -rf $builddir |
|
| 42 | 42 |
} |
| 43 | 43 |
trap cleanup EXIT |
| 44 | 44 |
|
| ... | ... |
@@ -55,18 +55,18 @@ base_image_mounts="" |
| 55 | 55 |
|
| 56 | 56 |
# Unpack base image |
| 57 | 57 |
if [ -n "$docker_base_image" ]; then |
| 58 |
- mkdir -p "$builddir/base" |
|
| 59 |
- docker pull "$docker_base_image" |
|
| 60 |
- docker save "$docker_base_image" | tar -xC "$builddir/base" |
|
| 61 |
- |
|
| 62 |
- image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image")
|
|
| 63 |
- while [ -n "$image_id" ]; do |
|
| 64 |
- mkdir -p "$builddir/base/$image_id/layer" |
|
| 65 |
- tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer" |
|
| 66 |
- |
|
| 67 |
- base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh"
|
|
| 68 |
- image_id=$(docker inspect -f "{{.Parent}}" "$image_id")
|
|
| 69 |
- done |
|
| 58 |
+ mkdir -p "$builddir/base" |
|
| 59 |
+ docker pull "$docker_base_image" |
|
| 60 |
+ docker save "$docker_base_image" | tar -xC "$builddir/base" |
|
| 61 |
+ |
|
| 62 |
+ image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image")
|
|
| 63 |
+ while [ -n "$image_id" ]; do |
|
| 64 |
+ mkdir -p "$builddir/base/$image_id/layer" |
|
| 65 |
+ tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer" |
|
| 66 |
+ |
|
| 67 |
+ base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh"
|
|
| 68 |
+ image_id=$(docker inspect -f "{{.Parent}}" "$image_id")
|
|
| 69 |
+ done |
|
| 70 | 70 |
fi |
| 71 | 71 |
|
| 72 | 72 |
# Mount work directory |
| ... | ... |
@@ -75,20 +75,20 @@ mount -t aufs -o "br=$builddir/diff=rw${base_image_mounts},dio,xino=/dev/shm/auf
|
| 75 | 75 |
# Update files |
| 76 | 76 |
cd $builddir |
| 77 | 77 |
LC_ALL=C diff -rq disk_image workdir \ |
| 78 |
- | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \ |
|
| 79 |
- | while read action entry; do |
|
| 80 |
- case "$action" in |
|
| 81 |
- ADD|UPDATE) |
|
| 82 |
- cp -a "disk_image$entry" "workdir$entry" |
|
| 83 |
- ;; |
|
| 84 |
- DEL) |
|
| 85 |
- rm -rf "workdir$entry" |
|
| 86 |
- ;; |
|
| 87 |
- *) |
|
| 88 |
- echo "Error: unknown diff line: $action $entry" >&2 |
|
| 89 |
- ;; |
|
| 90 |
- esac |
|
| 91 |
- done |
|
| 78 |
+ | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \ |
|
| 79 |
+ | while read action entry; do |
|
| 80 |
+ case "$action" in |
|
| 81 |
+ ADD | UPDATE) |
|
| 82 |
+ cp -a "disk_image$entry" "workdir$entry" |
|
| 83 |
+ ;; |
|
| 84 |
+ DEL) |
|
| 85 |
+ rm -rf "workdir$entry" |
|
| 86 |
+ ;; |
|
| 87 |
+ *) |
|
| 88 |
+ echo "Error: unknown diff line: $action $entry" >&2 |
|
| 89 |
+ ;; |
|
| 90 |
+ esac |
|
| 91 |
+ done |
|
| 92 | 92 |
|
| 93 | 93 |
# Pack new image |
| 94 | 94 |
new_image_id="$(for i in $(seq 1 32); do printf "%02x" $(($RANDOM % 256)); done)" |
| ... | ... |
@@ -96,15 +96,15 @@ mkdir -p $builddir/result/$new_image_id |
| 96 | 96 |
cd diff |
| 97 | 97 |
tar -cf $builddir/result/$new_image_id/layer.tar * |
| 98 | 98 |
echo "1.0" > $builddir/result/$new_image_id/VERSION |
| 99 |
-cat > $builddir/result/$new_image_id/json <<-EOS |
|
| 100 |
-{ "docker_version": "1.4.1"
|
|
| 101 |
-, "id": "$new_image_id" |
|
| 102 |
-, "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)" |
|
| 99 |
+cat > $builddir/result/$new_image_id/json <<- EOS |
|
| 100 |
+ { "docker_version": "1.4.1"
|
|
| 101 |
+ , "id": "$new_image_id" |
|
| 102 |
+ , "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)" |
|
| 103 | 103 |
EOS |
| 104 | 104 |
|
| 105 | 105 |
if [ -n "$docker_base_image" ]; then |
| 106 |
- image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image")
|
|
| 107 |
- echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json |
|
| 106 |
+ image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image")
|
|
| 107 |
+ echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json |
|
| 108 | 108 |
fi |
| 109 | 109 |
|
| 110 | 110 |
echo "}" >> $builddir/result/$new_image_id/json |
| ... | ... |
@@ -59,13 +59,13 @@ while [ $# -gt 0 ]; do |
| 59 | 59 |
fi |
| 60 | 60 |
|
| 61 | 61 |
IFS=',' |
| 62 |
- ancestry=( ${ancestryJson//[\[\] \"]/} )
|
|
| 62 |
+ ancestry=(${ancestryJson//[\[\] \"]/})
|
|
| 63 | 63 |
unset IFS |
| 64 | 64 |
|
| 65 | 65 |
if [ -s "$dir/tags-$imageFile.tmp" ]; then |
| 66 | 66 |
echo -n ', ' >> "$dir/tags-$imageFile.tmp" |
| 67 | 67 |
else |
| 68 |
- images=( "${images[@]}" "$image" )
|
|
| 68 |
+ images=("${images[@]}" "$image")
|
|
| 69 | 69 |
fi |
| 70 | 70 |
echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" |
| 71 | 71 |
|
| ... | ... |
@@ -49,11 +49,15 @@ authService='registry.docker.io' |
| 49 | 49 |
|
| 50 | 50 |
# https://github.com/moby/moby/issues/33700 |
| 51 | 51 |
fetch_blob() {
|
| 52 |
- local token="$1"; shift |
|
| 53 |
- local image="$1"; shift |
|
| 54 |
- local digest="$1"; shift |
|
| 55 |
- local targetFile="$1"; shift |
|
| 56 |
- local curlArgs=( "$@" ) |
|
| 52 |
+ local token="$1" |
|
| 53 |
+ shift |
|
| 54 |
+ local image="$1" |
|
| 55 |
+ shift |
|
| 56 |
+ local digest="$1" |
|
| 57 |
+ shift |
|
| 58 |
+ local targetFile="$1" |
|
| 59 |
+ shift |
|
| 60 |
+ local curlArgs=("$@")
|
|
| 57 | 61 |
|
| 58 | 62 |
local curlHeaders="$( |
| 59 | 63 |
curl -S "${curlArgs[@]}" \
|
| ... | ... |
@@ -63,7 +67,7 @@ fetch_blob() {
|
| 63 | 63 |
-D- |
| 64 | 64 |
)" |
| 65 | 65 |
curlHeaders="$(echo "$curlHeaders" | tr -d '\r')" |
| 66 |
- if grep -qE "^HTTP/[0-9].[0-9] 3" <<<"$curlHeaders"; then |
|
| 66 |
+ if grep -qE "^HTTP/[0-9].[0-9] 3" <<< "$curlHeaders"; then |
|
| 67 | 67 |
rm -f "$targetFile" |
| 68 | 68 |
|
| 69 | 69 |
local blobRedirect="$(echo "$curlHeaders" | awk -F ': ' 'tolower($1) == "location" { print $2; exit }')"
|
| ... | ... |
@@ -81,7 +85,8 @@ fetch_blob() {
|
| 81 | 81 |
|
| 82 | 82 |
# handle 'application/vnd.docker.distribution.manifest.v2+json' manifest |
| 83 | 83 |
handle_single_manifest_v2() {
|
| 84 |
- local manifestJson="$1"; shift |
|
| 84 |
+ local manifestJson="$1" |
|
| 85 |
+ shift |
|
| 85 | 86 |
|
| 86 | 87 |
local configDigest="$(echo "$manifestJson" | jq --raw-output '.config.digest')" |
| 87 | 88 |
local imageId="${configDigest#*:}" # strip off "sha256:"
|
| ... | ... |
@@ -91,7 +96,7 @@ handle_single_manifest_v2() {
|
| 91 | 91 |
|
| 92 | 92 |
local layersFs="$(echo "$manifestJson" | jq --raw-output --compact-output '.layers[]')" |
| 93 | 93 |
local IFS="$newlineIFS" |
| 94 |
- local layers=( $layersFs ) |
|
| 94 |
+ local layers=($layersFs) |
|
| 95 | 95 |
unset IFS |
| 96 | 96 |
|
| 97 | 97 |
echo "Downloading '$imageIdentifier' (${#layers[@]} layers)..."
|
| ... | ... |
@@ -116,7 +121,7 @@ handle_single_manifest_v2() {
|
| 116 | 116 |
local parentJson="$(printf ', parent: "%s"' "$parentId")" |
| 117 | 117 |
local addJson="$(printf '{ id: "%s"%s }' "$layerId" "${parentId:+$parentJson}")"
|
| 118 | 118 |
# this starter JSON is taken directly from Docker's own "docker save" output for unimportant layers |
| 119 |
- jq "$addJson + ." > "$dir/$layerId/json" <<-'EOJSON' |
|
| 119 |
+ jq "$addJson + ." > "$dir/$layerId/json" <<- 'EOJSON' |
|
| 120 | 120 |
{
|
| 121 | 121 |
"created": "0001-01-01T00:00:00Z", |
| 122 | 122 |
"container_config": {
|
| ... | ... |
@@ -145,7 +150,7 @@ handle_single_manifest_v2() {
|
| 145 | 145 |
case "$layerMediaType" in |
| 146 | 146 |
application/vnd.docker.image.rootfs.diff.tar.gzip) |
| 147 | 147 |
local layerTar="$layerId/layer.tar" |
| 148 |
- layerFiles=( "${layerFiles[@]}" "$layerTar" )
|
|
| 148 |
+ layerFiles=("${layerFiles[@]}" "$layerTar")
|
|
| 149 | 149 |
# TODO figure out why "-C -" doesn't work here |
| 150 | 150 |
# "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." |
| 151 | 151 |
# "HTTP/1.1 416 Requested Range Not Satisfiable" |
| ... | ... |
@@ -179,7 +184,7 @@ handle_single_manifest_v2() {
|
| 179 | 179 |
Layers: '"$(echo '[]' | jq --raw-output ".$(for layerFile in "${layerFiles[@]}"; do echo " + [ \"$layerFile\" ]"; done)")"'
|
| 180 | 180 |
}' |
| 181 | 181 |
)" |
| 182 |
- manifestJsonEntries=( "${manifestJsonEntries[@]}" "$manifestJsonEntry" )
|
|
| 182 |
+ manifestJsonEntries=("${manifestJsonEntries[@]}" "$manifestJsonEntry")
|
|
| 183 | 183 |
} |
| 184 | 184 |
|
| 185 | 185 |
while [ $# -gt 0 ]; do |
| ... | ... |
@@ -227,7 +232,7 @@ while [ $# -gt 0 ]; do |
| 227 | 227 |
application/vnd.docker.distribution.manifest.list.v2+json) |
| 228 | 228 |
layersFs="$(echo "$manifestJson" | jq --raw-output --compact-output '.manifests[]')" |
| 229 | 229 |
IFS="$newlineIFS" |
| 230 |
- layers=( $layersFs ) |
|
| 230 |
+ layers=($layersFs) |
|
| 231 | 231 |
unset IFS |
| 232 | 232 |
|
| 233 | 233 |
found="" |
| ... | ... |
@@ -273,7 +278,7 @@ while [ $# -gt 0 ]; do |
| 273 | 273 |
|
| 274 | 274 |
layersFs="$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum')" |
| 275 | 275 |
IFS="$newlineIFS" |
| 276 |
- layers=( $layersFs ) |
|
| 276 |
+ layers=($layersFs) |
|
| 277 | 277 |
unset IFS |
| 278 | 278 |
|
| 279 | 279 |
history="$(echo "$manifestJson" | jq '.history | [.[] | .v1Compatibility]')" |
| ... | ... |
@@ -314,7 +319,7 @@ while [ $# -gt 0 ]; do |
| 314 | 314 |
if [ -s "$dir/tags-$imageFile.tmp" ]; then |
| 315 | 315 |
echo -n ', ' >> "$dir/tags-$imageFile.tmp" |
| 316 | 316 |
else |
| 317 |
- images=( "${images[@]}" "$image" )
|
|
| 317 |
+ images=("${images[@]}" "$image")
|
|
| 318 | 318 |
fi |
| 319 | 319 |
echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" |
| 320 | 320 |
done |
| ... | ... |
@@ -8,9 +8,9 @@ |
| 8 | 8 |
# $> ./generate_aliases <mailmap_file> > aliases |
| 9 | 9 |
# |
| 10 | 10 |
|
| 11 |
-cat $1 | \ |
|
| 12 |
- grep -v '^#' | \ |
|
| 13 |
- sed 's/^[^<]*<\([^>]*\)>/\1/' | \ |
|
| 14 |
- grep '<.*>' | sed -e 's/[<>]/ /g' | \ |
|
| 15 |
- awk '{if ($3 != "") { print $3" "$1 } else {print $2" "$1}}' | \
|
|
| 16 |
- sort | uniq |
|
| 11 |
+cat $1 \ |
|
| 12 |
+ | grep -v '^#' \ |
|
| 13 |
+ | sed 's/^[^<]*<\([^>]*\)>/\1/' \ |
|
| 14 |
+ | grep '<.*>' | sed -e 's/[<>]/ /g' \ |
|
| 15 |
+ | awk '{if ($3 != "") { print $3" "$1 } else {print $2" "$1}}' \
|
|
| 16 |
+ | sort | uniq |
| ... | ... |
@@ -22,7 +22,7 @@ start_pre() {
|
| 22 | 22 |
} |
| 23 | 23 |
|
| 24 | 24 |
reload() {
|
| 25 |
- ebegin "Reloading ${RC_SVCNAME}"
|
|
| 26 |
- start-stop-daemon --signal HUP --pidfile "${pidfile}"
|
|
| 27 |
- eend $? "Failed to stop ${RC_SVCNAME}"
|
|
| 25 |
+ ebegin "Reloading ${RC_SVCNAME}"
|
|
| 26 |
+ start-stop-daemon --signal HUP --pidfile "${pidfile}"
|
|
| 27 |
+ eend $? "Failed to stop ${RC_SVCNAME}"
|
|
| 28 | 28 |
} |
| ... | ... |
@@ -46,7 +46,7 @@ fi |
| 46 | 46 |
|
| 47 | 47 |
check_init() {
|
| 48 | 48 |
# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly) |
| 49 |
- if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then |
|
| 49 |
+ if [ -x /sbin/initctl ] && /sbin/initctl version 2> /dev/null | grep -q upstart; then |
|
| 50 | 50 |
log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" |
| 51 | 51 |
exit 1 |
| 52 | 52 |
fi |
| ... | ... |
@@ -85,7 +85,7 @@ cgroupfs_mount() {
|
| 85 | 85 |
case "$1" in |
| 86 | 86 |
start) |
| 87 | 87 |
check_init |
| 88 |
- |
|
| 88 |
+ |
|
| 89 | 89 |
fail_unless_root |
| 90 | 90 |
|
| 91 | 91 |
cgroupfs_mount |
| ... | ... |
@@ -110,9 +110,9 @@ case "$1" in |
| 110 | 110 |
--pidfile "$DOCKER_SSD_PIDFILE" \ |
| 111 | 111 |
--make-pidfile \ |
| 112 | 112 |
-- \ |
| 113 |
- -p "$DOCKER_PIDFILE" \ |
|
| 114 |
- $DOCKER_OPTS \ |
|
| 115 |
- >> "$DOCKER_LOGFILE" 2>&1 |
|
| 113 |
+ -p "$DOCKER_PIDFILE" \ |
|
| 114 |
+ $DOCKER_OPTS \ |
|
| 115 |
+ >> "$DOCKER_LOGFILE" 2>&1 |
|
| 116 | 116 |
log_end_msg $? |
| 117 | 117 |
;; |
| 118 | 118 |
|
| ... | ... |
@@ -131,7 +131,7 @@ case "$1" in |
| 131 | 131 |
restart) |
| 132 | 132 |
check_init |
| 133 | 133 |
fail_unless_root |
| 134 |
- docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` |
|
| 134 |
+ docker_pid=$(cat "$DOCKER_SSD_PIDFILE" 2> /dev/null) |
|
| 135 | 135 |
[ -n "$docker_pid" ] \ |
| 136 | 136 |
&& ps -p $docker_pid > /dev/null 2>&1 \ |
| 137 | 137 |
&& $0 stop |
| ... | ... |
@@ -109,10 +109,9 @@ rh_status() {
|
| 109 | 109 |
} |
| 110 | 110 |
|
| 111 | 111 |
rh_status_q() {
|
| 112 |
- rh_status >/dev/null 2>&1 |
|
| 112 |
+ rh_status > /dev/null 2>&1 |
|
| 113 | 113 |
} |
| 114 | 114 |
|
| 115 |
- |
|
| 116 | 115 |
check_for_cleanup() {
|
| 117 | 116 |
if [ -f ${pidfile} ]; then
|
| 118 | 117 |
/bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile}
|
| ... | ... |
@@ -141,13 +140,14 @@ case "$1" in |
| 141 | 141 |
status) |
| 142 | 142 |
rh_status |
| 143 | 143 |
;; |
| 144 |
- condrestart|try-restart) |
|
| 144 |
+ condrestart | try-restart) |
|
| 145 | 145 |
rh_status_q || exit 0 |
| 146 | 146 |
restart |
| 147 | 147 |
;; |
| 148 | 148 |
*) |
| 149 | 149 |
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
| 150 | 150 |
exit 2 |
| 151 |
+ ;; |
|
| 151 | 152 |
esac |
| 152 | 153 |
|
| 153 | 154 |
exit $? |
| ... | ... |
@@ -20,26 +20,26 @@ DATABASE_KEY="$DATABASE/com.docker.driver.amd64-linux/bundle" |
| 20 | 20 |
[ -d "$DATABASE" ] || errexit "Docker for Mac must be installed for this script" |
| 21 | 21 |
|
| 22 | 22 |
case "$1" in |
| 23 |
-"install") |
|
| 24 |
- [ -d "$BUNDLE" ] || errexit "cannot find bundle $BUNDLE" |
|
| 25 |
- [ -e "$CLIENT_PATH" ] || errexit "you need to run make cross first" |
|
| 26 |
- [ -e "$BUNDLE/binary-daemon/dockerd" ] || errexit "you need to build binaries first" |
|
| 27 |
- [ -f "$BUNDLE/binary-client/docker" ] || errexit "you need to build binaries first" |
|
| 28 |
- git -C "$DATABASE" reset --hard >/dev/null |
|
| 29 |
- echo "$BUNDLE_PATH" > "$DATABASE_KEY" |
|
| 30 |
- git -C "$DATABASE" add "$DATABASE_KEY" |
|
| 31 |
- git -C "$DATABASE" commit -m "update bundle to $BUNDLE_PATH" |
|
| 32 |
- rm -f /usr/local/bin/docker |
|
| 33 |
- cp "$CLIENT_PATH" /usr/local/bin |
|
| 34 |
- echo "Bundle installed. Restart Docker to use. To uninstall, reset Docker to factory defaults." |
|
| 35 |
- ;; |
|
| 36 |
-"undo") |
|
| 37 |
- git -C "$DATABASE" reset --hard >/dev/null |
|
| 38 |
- [ -f "$DATABASE_KEY" ] || errexit "bundle not set" |
|
| 39 |
- git -C "$DATABASE" rm "$DATABASE_KEY" |
|
| 40 |
- git -C "$DATABASE" commit -m "remove bundle" |
|
| 41 |
- rm -f /usr/local/bin/docker |
|
| 42 |
- ln -s "$HOME/Library/Group Containers/group.com.docker/bin/docker" /usr/local/bin |
|
| 43 |
- echo "Bundle removed. Using dev versions may cause issues, a reset to factory defaults is recommended." |
|
| 44 |
- ;; |
|
| 23 |
+ "install") |
|
| 24 |
+ [ -d "$BUNDLE" ] || errexit "cannot find bundle $BUNDLE" |
|
| 25 |
+ [ -e "$CLIENT_PATH" ] || errexit "you need to run make cross first" |
|
| 26 |
+ [ -e "$BUNDLE/binary-daemon/dockerd" ] || errexit "you need to build binaries first" |
|
| 27 |
+ [ -f "$BUNDLE/binary-client/docker" ] || errexit "you need to build binaries first" |
|
| 28 |
+ git -C "$DATABASE" reset --hard > /dev/null |
|
| 29 |
+ echo "$BUNDLE_PATH" > "$DATABASE_KEY" |
|
| 30 |
+ git -C "$DATABASE" add "$DATABASE_KEY" |
|
| 31 |
+ git -C "$DATABASE" commit -m "update bundle to $BUNDLE_PATH" |
|
| 32 |
+ rm -f /usr/local/bin/docker |
|
| 33 |
+ cp "$CLIENT_PATH" /usr/local/bin |
|
| 34 |
+ echo "Bundle installed. Restart Docker to use. To uninstall, reset Docker to factory defaults." |
|
| 35 |
+ ;; |
|
| 36 |
+ "undo") |
|
| 37 |
+ git -C "$DATABASE" reset --hard > /dev/null |
|
| 38 |
+ [ -f "$DATABASE_KEY" ] || errexit "bundle not set" |
|
| 39 |
+ git -C "$DATABASE" rm "$DATABASE_KEY" |
|
| 40 |
+ git -C "$DATABASE" commit -m "remove bundle" |
|
| 41 |
+ rm -f /usr/local/bin/docker |
|
| 42 |
+ ln -s "$HOME/Library/Group Containers/group.com.docker/bin/docker" /usr/local/bin |
|
| 43 |
+ echo "Bundle removed. Using dev versions may cause issues, a reset to factory defaults is recommended." |
|
| 44 |
+ ;; |
|
| 45 | 45 |
esac |
| ... | ... |
@@ -19,13 +19,13 @@ tmp() {
|
| 19 | 19 |
} |
| 20 | 20 |
|
| 21 | 21 |
apkv() {
|
| 22 |
- curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | |
|
| 23 |
- grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 |
|
| 22 |
+ curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz \ |
|
| 23 |
+ | grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 |
|
| 24 | 24 |
} |
| 25 | 25 |
|
| 26 | 26 |
getapk() {
|
| 27 |
- curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk | |
|
| 28 |
- tar -xz -C $TMP sbin/apk.static |
|
| 27 |
+ curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk \ |
|
| 28 |
+ | tar -xz -C $TMP sbin/apk.static |
|
| 29 | 29 |
} |
| 30 | 30 |
|
| 31 | 31 |
mkbase() {
|
| ... | ... |
@@ -7,12 +7,12 @@ set -e |
| 7 | 7 |
# reset umask to default |
| 8 | 8 |
umask 022 |
| 9 | 9 |
|
| 10 |
-hash pacstrap &>/dev/null || {
|
|
| 10 |
+hash pacstrap &> /dev/null || {
|
|
| 11 | 11 |
echo "Could not find pacstrap. Run pacman -S arch-install-scripts" |
| 12 | 12 |
exit 1 |
| 13 | 13 |
} |
| 14 | 14 |
|
| 15 |
-hash expect &>/dev/null || {
|
|
| 15 |
+hash expect &> /dev/null || {
|
|
| 16 | 16 |
echo "Could not find expect. Run pacman -S expect" |
| 17 | 17 |
exit 1 |
| 18 | 18 |
} |
| ... | ... |
@@ -64,7 +64,7 @@ PKGIGNORE=( |
| 64 | 64 |
|
| 65 | 65 |
PKGREMOVE=( |
| 66 | 66 |
gawk |
| 67 |
- haveged |
|
| 67 |
+ haveged |
|
| 68 | 68 |
less |
| 69 | 69 |
linux-libre |
| 70 | 70 |
linux-libre-firmware |
| ... | ... |
@@ -79,7 +79,7 @@ PKGREMOVE="${PKGREMOVE[*]}"
|
| 79 | 79 |
arch="$(uname -m)" |
| 80 | 80 |
case "$arch" in |
| 81 | 81 |
armv*) |
| 82 |
- if pacman -Q archlinuxarm-keyring >/dev/null 2>&1; then |
|
| 82 |
+ if pacman -Q archlinuxarm-keyring > /dev/null 2>&1; then |
|
| 83 | 83 |
pacman-key --init |
| 84 | 84 |
pacman-key --populate archlinuxarm |
| 85 | 85 |
else |
| ... | ... |
@@ -107,7 +107,7 @@ esac |
| 107 | 107 |
|
| 108 | 108 |
export PACMAN_MIRRORLIST |
| 109 | 109 |
|
| 110 |
-expect <<EOF |
|
| 110 |
+expect << EOF |
|
| 111 | 111 |
set send_slow {1 .1}
|
| 112 | 112 |
proc send {ignore arg} {
|
| 113 | 113 |
sleep .1 |
| ... | ... |
@@ -5,9 +5,9 @@ |
| 5 | 5 |
|
| 6 | 6 |
set -e |
| 7 | 7 |
|
| 8 |
-die () {
|
|
| 9 |
- echo >&2 "$@" |
|
| 10 |
- exit 1 |
|
| 8 |
+die() {
|
|
| 9 |
+ echo >&2 "$@" |
|
| 10 |
+ exit 1 |
|
| 11 | 11 |
} |
| 12 | 12 |
|
| 13 | 13 |
[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" |
| ... | ... |
@@ -33,15 +33,15 @@ export PATH="$TMP/usr/bin:$PATH" |
| 33 | 33 |
mkdir -p $ROOTFS/var/lib/pkg |
| 34 | 34 |
touch $ROOTFS/var/lib/pkg/db |
| 35 | 35 |
for pkg in $CRUX/crux/core/*; do |
| 36 |
- pkgadd -r $ROOTFS $pkg |
|
| 36 |
+ pkgadd -r $ROOTFS $pkg |
|
| 37 | 37 |
done |
| 38 | 38 |
|
| 39 | 39 |
# Remove agetty and inittab config |
| 40 | 40 |
if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then
|
| 41 |
- echo "Removing agetty from /etc/inittab ..." |
|
| 42 |
- chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab
|
|
| 43 |
- chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab
|
|
| 44 |
- chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab
|
|
| 41 |
+ echo "Removing agetty from /etc/inittab ..." |
|
| 42 |
+ chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab
|
|
| 43 |
+ chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab
|
|
| 44 |
+ chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab
|
|
| 45 | 45 |
fi |
| 46 | 46 |
|
| 47 | 47 |
# Remove kernel source |
| ... | ... |
@@ -9,7 +9,7 @@ |
| 9 | 9 |
set -e |
| 10 | 10 |
|
| 11 | 11 |
usage() {
|
| 12 |
- cat <<EOOPTS |
|
| 12 |
+ cat << EOOPTS |
|
| 13 | 13 |
$(basename $0) [OPTIONS] <name> |
| 14 | 14 |
OPTIONS: |
| 15 | 15 |
-p "<packages>" The list of packages to install in the container. |
| ... | ... |
@@ -21,7 +21,7 @@ OPTIONS: |
| 21 | 21 |
-t <tag> Specify Tag information. |
| 22 | 22 |
default is reffered at /etc/{redhat,system}-release
|
| 23 | 23 |
EOOPTS |
| 24 |
- exit 1 |
|
| 24 |
+ exit 1 |
|
| 25 | 25 |
} |
| 26 | 26 |
|
| 27 | 27 |
# option defaults |
| ... | ... |
@@ -35,38 +35,38 @@ install_groups=() |
| 35 | 35 |
install_packages=() |
| 36 | 36 |
version= |
| 37 | 37 |
while getopts ":y:p:g:t:h" opt; do |
| 38 |
- case $opt in |
|
| 39 |
- y) |
|
| 40 |
- yum_config=$OPTARG |
|
| 41 |
- ;; |
|
| 42 |
- h) |
|
| 43 |
- usage |
|
| 44 |
- ;; |
|
| 45 |
- p) |
|
| 46 |
- install_packages+=("$OPTARG")
|
|
| 47 |
- ;; |
|
| 48 |
- g) |
|
| 49 |
- install_groups+=("$OPTARG")
|
|
| 50 |
- ;; |
|
| 51 |
- t) |
|
| 52 |
- version="$OPTARG" |
|
| 53 |
- ;; |
|
| 54 |
- \?) |
|
| 55 |
- echo "Invalid option: -$OPTARG" |
|
| 56 |
- usage |
|
| 57 |
- ;; |
|
| 58 |
- esac |
|
| 38 |
+ case $opt in |
|
| 39 |
+ y) |
|
| 40 |
+ yum_config=$OPTARG |
|
| 41 |
+ ;; |
|
| 42 |
+ h) |
|
| 43 |
+ usage |
|
| 44 |
+ ;; |
|
| 45 |
+ p) |
|
| 46 |
+ install_packages+=("$OPTARG")
|
|
| 47 |
+ ;; |
|
| 48 |
+ g) |
|
| 49 |
+ install_groups+=("$OPTARG")
|
|
| 50 |
+ ;; |
|
| 51 |
+ t) |
|
| 52 |
+ version="$OPTARG" |
|
| 53 |
+ ;; |
|
| 54 |
+ \?) |
|
| 55 |
+ echo "Invalid option: -$OPTARG" |
|
| 56 |
+ usage |
|
| 57 |
+ ;; |
|
| 58 |
+ esac |
|
| 59 | 59 |
done |
| 60 | 60 |
shift $((OPTIND - 1)) |
| 61 | 61 |
name=$1 |
| 62 | 62 |
|
| 63 | 63 |
if [[ -z $name ]]; then |
| 64 |
- usage |
|
| 64 |
+ usage |
|
| 65 | 65 |
fi |
| 66 | 66 |
|
| 67 | 67 |
# default to Core group if not specified otherwise |
| 68 | 68 |
if [ ${#install_groups[*]} -eq 0 ]; then
|
| 69 |
- install_groups=('Core')
|
|
| 69 |
+ install_groups=('Core')
|
|
| 70 | 70 |
fi |
| 71 | 71 |
|
| 72 | 72 |
target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) |
| ... | ... |
@@ -91,21 +91,19 @@ if [ -d /etc/yum/vars ]; then |
| 91 | 91 |
cp -a /etc/yum/vars "$target"/etc/yum/ |
| 92 | 92 |
fi |
| 93 | 93 |
|
| 94 |
-if [[ -n "$install_groups" ]]; |
|
| 95 |
-then |
|
| 96 |
- yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ |
|
| 97 |
- --setopt=group_package_types=mandatory -y groupinstall "${install_groups[@]}"
|
|
| 94 |
+if [[ -n "$install_groups" ]]; then |
|
| 95 |
+ yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ |
|
| 96 |
+ --setopt=group_package_types=mandatory -y groupinstall "${install_groups[@]}"
|
|
| 98 | 97 |
fi |
| 99 | 98 |
|
| 100 |
-if [[ -n "$install_packages" ]]; |
|
| 101 |
-then |
|
| 102 |
- yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ |
|
| 103 |
- --setopt=group_package_types=mandatory -y install "${install_packages[@]}"
|
|
| 99 |
+if [[ -n "$install_packages" ]]; then |
|
| 100 |
+ yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ |
|
| 101 |
+ --setopt=group_package_types=mandatory -y install "${install_packages[@]}"
|
|
| 104 | 102 |
fi |
| 105 | 103 |
|
| 106 | 104 |
yum -c "$yum_config" --installroot="$target" -y clean all |
| 107 | 105 |
|
| 108 |
-cat > "$target"/etc/sysconfig/network <<EOF |
|
| 106 |
+cat > "$target"/etc/sysconfig/network << EOF |
|
| 109 | 107 |
NETWORKING=yes |
| 110 | 108 |
HOSTNAME=localhost.localdomain |
| 111 | 109 |
EOF |
| ... | ... |
@@ -129,18 +127,17 @@ rm -rf "$target"/etc/ld.so.cache "$target"/var/cache/ldconfig |
| 129 | 129 |
mkdir -p --mode=0755 "$target"/var/cache/ldconfig |
| 130 | 130 |
|
| 131 | 131 |
if [ -z "$version" ]; then |
| 132 |
- for file in "$target"/etc/{redhat,system}-release
|
|
| 133 |
- do |
|
| 134 |
- if [ -r "$file" ]; then |
|
| 135 |
- version="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' "$file")" |
|
| 136 |
- break |
|
| 137 |
- fi |
|
| 138 |
- done |
|
| 132 |
+ for file in "$target"/etc/{redhat,system}-release; do
|
|
| 133 |
+ if [ -r "$file" ]; then |
|
| 134 |
+ version="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' "$file")" |
|
| 135 |
+ break |
|
| 136 |
+ fi |
|
| 137 |
+ done |
|
| 139 | 138 |
fi |
| 140 | 139 |
|
| 141 | 140 |
if [ -z "$version" ]; then |
| 142 |
- echo >&2 "warning: cannot autodetect OS version, using '$name' as tag" |
|
| 143 |
- version=$name |
|
| 141 |
+ echo >&2 "warning: cannot autodetect OS version, using '$name' as tag" |
|
| 142 |
+ version=$name |
|
| 144 | 143 |
fi |
| 145 | 144 |
|
| 146 | 145 |
tar --numeric-owner -c -C "$target" . | docker import - $name:$version |
| ... | ... |
@@ -28,12 +28,27 @@ tag= |
| 28 | 28 |
compression="auto" |
| 29 | 29 |
while true; do |
| 30 | 30 |
case "$1" in |
| 31 |
- -d|--dir) dir="$2" ; shift 2 ;; |
|
| 32 |
- -t|--tag) tag="$2" ; shift 2 ;; |
|
| 33 |
- --compression) compression="$2" ; shift 2 ;; |
|
| 34 |
- --no-compression) compression="none" ; shift 1 ;; |
|
| 35 |
- -h|--help) usage ;; |
|
| 36 |
- --) shift ; break ;; |
|
| 31 |
+ -d | --dir) |
|
| 32 |
+ dir="$2" |
|
| 33 |
+ shift 2 |
|
| 34 |
+ ;; |
|
| 35 |
+ -t | --tag) |
|
| 36 |
+ tag="$2" |
|
| 37 |
+ shift 2 |
|
| 38 |
+ ;; |
|
| 39 |
+ --compression) |
|
| 40 |
+ compression="$2" |
|
| 41 |
+ shift 2 |
|
| 42 |
+ ;; |
|
| 43 |
+ --no-compression) |
|
| 44 |
+ compression="none" |
|
| 45 |
+ shift 1 |
|
| 46 |
+ ;; |
|
| 47 |
+ -h | --help) usage ;; |
|
| 48 |
+ --) |
|
| 49 |
+ shift |
|
| 50 |
+ break |
|
| 51 |
+ ;; |
|
| 37 | 52 |
esac |
| 38 | 53 |
done |
| 39 | 54 |
|
| ... | ... |
@@ -41,9 +56,8 @@ script="$1" |
| 41 | 41 |
[ "$script" ] || usage |
| 42 | 42 |
shift |
| 43 | 43 |
|
| 44 |
-if [ "$compression" == 'auto' ] || [ -z "$compression" ] |
|
| 45 |
-then |
|
| 46 |
- compression='xz' |
|
| 44 |
+if [ "$compression" == 'auto' ] || [ -z "$compression" ]; then |
|
| 45 |
+ compression='xz' |
|
| 47 | 46 |
fi |
| 48 | 47 |
|
| 49 | 48 |
[ "$compression" == 'none' ] && compression='' |
| ... | ... |
@@ -68,7 +82,10 @@ if [ -z "$dir" ]; then |
| 68 | 68 |
fi |
| 69 | 69 |
|
| 70 | 70 |
rootfsDir="$dir/rootfs" |
| 71 |
-( set -x; mkdir -p "$rootfsDir" ) |
|
| 71 |
+( |
|
| 72 |
+ set -x |
|
| 73 |
+ mkdir -p "$rootfsDir" |
|
| 74 |
+) |
|
| 72 | 75 |
|
| 73 | 76 |
# pass all remaining arguments to $script |
| 74 | 77 |
"$scriptDir/$script" "$rootfsDir" "$@" |
| ... | ... |
@@ -79,7 +96,7 @@ mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" |
| 79 | 79 |
|
| 80 | 80 |
# make sure /etc/resolv.conf has something useful in it |
| 81 | 81 |
mkdir -p "$rootfsDir/etc" |
| 82 |
-cat > "$rootfsDir/etc/resolv.conf" <<'EOF' |
|
| 82 |
+cat > "$rootfsDir/etc/resolv.conf" << 'EOF' |
|
| 83 | 83 |
nameserver 8.8.8.8 |
| 84 | 84 |
nameserver 8.8.4.4 |
| 85 | 85 |
EOF |
| ... | ... |
@@ -93,7 +110,7 @@ touch "$tarFile" |
| 93 | 93 |
) |
| 94 | 94 |
|
| 95 | 95 |
echo >&2 "+ cat > '$dir/Dockerfile'" |
| 96 |
-cat > "$dir/Dockerfile" <<EOF |
|
| 96 |
+cat > "$dir/Dockerfile" << EOF |
|
| 97 | 97 |
FROM scratch |
| 98 | 98 |
ADD $(basename "$tarFile") / |
| 99 | 99 |
EOF |
| ... | ... |
@@ -101,20 +118,35 @@ EOF |
| 101 | 101 |
# if our generated image has a decent shell, let's set a default command |
| 102 | 102 |
for shell in /bin/bash /usr/bin/fish /usr/bin/zsh /bin/sh; do |
| 103 | 103 |
if [ -x "$rootfsDir/$shell" ]; then |
| 104 |
- ( set -x; echo 'CMD ["'"$shell"'"]' >> "$dir/Dockerfile" ) |
|
| 104 |
+ ( |
|
| 105 |
+ set -x |
|
| 106 |
+ echo 'CMD ["'"$shell"'"]' >> "$dir/Dockerfile" |
|
| 107 |
+ ) |
|
| 105 | 108 |
break |
| 106 | 109 |
fi |
| 107 | 110 |
done |
| 108 | 111 |
|
| 109 |
-( set -x; rm -rf "$rootfsDir" ) |
|
| 112 |
+( |
|
| 113 |
+ set -x |
|
| 114 |
+ rm -rf "$rootfsDir" |
|
| 115 |
+) |
|
| 110 | 116 |
|
| 111 | 117 |
if [ "$tag" ]; then |
| 112 |
- ( set -x; docker build -t "$tag" "$dir" ) |
|
| 118 |
+ ( |
|
| 119 |
+ set -x |
|
| 120 |
+ docker build -t "$tag" "$dir" |
|
| 121 |
+ ) |
|
| 113 | 122 |
elif [ "$delDir" ]; then |
| 114 | 123 |
# if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ |
| 115 |
- ( set -x; docker build "$dir" ) |
|
| 124 |
+ ( |
|
| 125 |
+ set -x |
|
| 126 |
+ docker build "$dir" |
|
| 127 |
+ ) |
|
| 116 | 128 |
fi |
| 117 | 129 |
|
| 118 | 130 |
if [ "$delDir" ]; then |
| 119 |
- ( set -x; rm -rf "$dir" ) |
|
| 131 |
+ ( |
|
| 132 |
+ set -x |
|
| 133 |
+ rm -rf "$dir" |
|
| 134 |
+ ) |
|
| 120 | 135 |
fi |
| ... | ... |
@@ -4,7 +4,7 @@ set -e |
| 4 | 4 |
rootfsDir="$1" |
| 5 | 5 |
shift |
| 6 | 6 |
|
| 7 |
-busybox="$(which busybox 2>/dev/null || true)" |
|
| 7 |
+busybox="$(which busybox 2> /dev/null || true)" |
|
| 8 | 8 |
if [ -z "$busybox" ]; then |
| 9 | 9 |
echo >&2 'error: busybox: not found' |
| 10 | 10 |
echo >&2 ' install it with your distribution "busybox-static" package' |
| ... | ... |
@@ -24,7 +24,7 @@ cp "$busybox" "$rootfsDir/bin/busybox" |
| 24 | 24 |
cd "$rootfsDir" |
| 25 | 25 |
|
| 26 | 26 |
IFS=$'\n' |
| 27 |
- modules=( $(bin/busybox --list-modules) ) |
|
| 27 |
+ modules=($(bin/busybox --list-modules)) |
|
| 28 | 28 |
unset IFS |
| 29 | 29 |
|
| 30 | 30 |
for module in "${modules[@]}"; do
|
| ... | ... |
@@ -22,7 +22,7 @@ shift |
| 22 | 22 |
|
| 23 | 23 |
before=() |
| 24 | 24 |
while [ $# -gt 0 ] && [[ "$1" == -* ]]; do |
| 25 |
- before+=( "$1" ) |
|
| 25 |
+ before+=("$1")
|
|
| 26 | 26 |
shift |
| 27 | 27 |
done |
| 28 | 28 |
|
| ... | ... |
@@ -62,7 +62,7 @@ rootfs_chroot() {
|
| 62 | 62 |
|
| 63 | 63 |
# prevent init scripts from running during install/update |
| 64 | 64 |
echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" |
| 65 |
-cat > "$rootfsDir/usr/sbin/policy-rc.d" <<-'EOF' |
|
| 65 |
+cat > "$rootfsDir/usr/sbin/policy-rc.d" <<- 'EOF' |
|
| 66 | 66 |
#!/bin/sh |
| 67 | 67 |
|
| 68 | 68 |
# For most Docker users, "apt-get install" only happens during "docker build", |
| ... | ... |
@@ -82,7 +82,10 @@ chmod +x "$rootfsDir/usr/sbin/policy-rc.d" |
| 82 | 82 |
) |
| 83 | 83 |
|
| 84 | 84 |
# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) |
| 85 |
-( set -x; rootfs_chroot apt-get clean ) |
|
| 85 |
+( |
|
| 86 |
+ set -x |
|
| 87 |
+ rootfs_chroot apt-get clean |
|
| 88 |
+) |
|
| 86 | 89 |
|
| 87 | 90 |
# this file is one APT creates to make sure we don't "autoremove" our currently |
| 88 | 91 |
# in-use kernel, which doesn't really apply to debootstraps/Docker images that |
| ... | ... |
@@ -93,7 +96,7 @@ rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels" |
| 93 | 93 |
if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then |
| 94 | 94 |
# force dpkg not to call sync() after package extraction (speeding up installs) |
| 95 | 95 |
echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" |
| 96 |
- cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' |
|
| 96 |
+ cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<- 'EOF' |
|
| 97 | 97 |
# For most Docker users, package installs happen during "docker build", which |
| 98 | 98 |
# doesn't survive power loss and gets restarted clean afterwards anyhow, so |
| 99 | 99 |
# this minor tweak gives us a nice speedup (much nicer on spinning disks, |
| ... | ... |
@@ -107,7 +110,7 @@ if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then |
| 107 | 107 |
# _keep_ us lean by effectively running "apt-get clean" after every install |
| 108 | 108 |
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' |
| 109 | 109 |
echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" |
| 110 |
- cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF |
|
| 110 |
+ cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<- EOF |
|
| 111 | 111 |
# Since for most Docker users, package installs happen in "docker build" steps, |
| 112 | 112 |
# they essentially become individual layers due to the way Docker handles |
| 113 | 113 |
# layering, especially using CoW filesystems. What this means for us is that |
| ... | ... |
@@ -131,7 +134,7 @@ if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then |
| 131 | 131 |
|
| 132 | 132 |
# remove apt-cache translations for fast "apt-get update" |
| 133 | 133 |
echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" |
| 134 |
- cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' |
|
| 134 |
+ cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<- 'EOF' |
|
| 135 | 135 |
# In Docker, we don't often need the "Translations" files, so we're just wasting |
| 136 | 136 |
# time and space by downloading them, and this inhibits that. For users that do |
| 137 | 137 |
# need them, it's a simple matter to delete this file and "apt-get update". :) |
| ... | ... |
@@ -140,7 +143,7 @@ if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then |
| 140 | 140 |
EOF |
| 141 | 141 |
|
| 142 | 142 |
echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" |
| 143 |
- cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' |
|
| 143 |
+ cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<- 'EOF' |
|
| 144 | 144 |
# Since Docker users using "RUN apt-get update && apt-get install -y ..." in |
| 145 | 145 |
# their Dockerfiles don't go delete the lists files afterwards, we want them to |
| 146 | 146 |
# be as small as possible on-disk, so we explicitly request "gz" versions and |
| ... | ... |
@@ -156,7 +159,7 @@ if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then |
| 156 | 156 |
|
| 157 | 157 |
# update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed |
| 158 | 158 |
echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'" |
| 159 |
- cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF' |
|
| 159 |
+ cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<- 'EOF' |
|
| 160 | 160 |
# Since Docker users are looking for the smallest possible final images, the |
| 161 | 161 |
# following emerges as a very common pattern: |
| 162 | 162 |
|
| ... | ... |
@@ -20,9 +20,18 @@ installversion= |
| 20 | 20 |
mirror= |
| 21 | 21 |
while true; do |
| 22 | 22 |
case "$1" in |
| 23 |
- -v|--version) installversion="$2" ; shift 2 ;; |
|
| 24 |
- -m|--mirror) mirror="$2" ; shift 2 ;; |
|
| 25 |
- --) shift ; break ;; |
|
| 23 |
+ -v | --version) |
|
| 24 |
+ installversion="$2" |
|
| 25 |
+ shift 2 |
|
| 26 |
+ ;; |
|
| 27 |
+ -m | --mirror) |
|
| 28 |
+ mirror="$2" |
|
| 29 |
+ shift 2 |
|
| 30 |
+ ;; |
|
| 31 |
+ --) |
|
| 32 |
+ shift |
|
| 33 |
+ break |
|
| 34 |
+ ;; |
|
| 26 | 35 |
esac |
| 27 | 36 |
done |
| 28 | 37 |
|
| ... | ... |
@@ -31,7 +31,10 @@ echo "Nuking $dir ..." |
| 31 | 31 |
echo ' (if this is wrong, press Ctrl+C NOW!)' |
| 32 | 32 |
echo |
| 33 | 33 |
|
| 34 |
-( set -x; sleep 10 ) |
|
| 34 |
+( |
|
| 35 |
+ set -x |
|
| 36 |
+ sleep 10 |
|
| 37 |
+) |
|
| 35 | 38 |
echo |
| 36 | 39 |
|
| 37 | 40 |
dir_in_dir() {
|
| ... | ... |
@@ -45,7 +48,10 @@ dir_in_dir() {
|
| 45 | 45 |
for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do
|
| 46 | 46 |
mount="$(readlink -f "$mount" || true)" |
| 47 | 47 |
if [ "$dir" != "$mount" ] && dir_in_dir "$mount" "$dir"; then |
| 48 |
- ( set -x; umount -f "$mount" ) |
|
| 48 |
+ ( |
|
| 49 |
+ set -x |
|
| 50 |
+ umount -f "$mount" |
|
| 51 |
+ ) |
|
| 49 | 52 |
fi |
| 50 | 53 |
done |
| 51 | 54 |
|
| ... | ... |
@@ -55,10 +61,17 @@ if command -v btrfs > /dev/null 2>&1; then |
| 55 | 55 |
# Source: http://stackoverflow.com/a/32865333 |
| 56 | 56 |
for subvol in $(find "$dir" -type d -inum 256 | sort -r); do |
| 57 | 57 |
if [ "$dir" != "$subvol" ]; then |
| 58 |
- ( set -x; btrfs subvolume delete "$subvol" ) |
|
| 58 |
+ ( |
|
| 59 |
+ set -x |
|
| 60 |
+ btrfs subvolume delete "$subvol" |
|
| 61 |
+ ) |
|
| 59 | 62 |
fi |
| 60 | 63 |
done |
| 61 | 64 |
fi |
| 62 | 65 |
|
| 63 | 66 |
# finally, DESTROY ALL THINGS |
| 64 |
-( shopt -s dotglob; set -x; rm -rf "$dir"/* ) |
|
| 67 |
+( |
|
| 68 |
+ shopt -s dotglob |
|
| 69 |
+ set -x |
|
| 70 |
+ rm -rf "$dir"/* |
|
| 71 |
+) |
| ... | ... |
@@ -16,59 +16,59 @@ function urlencode() {
|
| 16 | 16 |
# urlencode <string> |
| 17 | 17 |
|
| 18 | 18 |
local length="${#1}"
|
| 19 |
- for (( i = 0; i < length; i++ )); do |
|
| 20 |
- local c="${1:i:1}"
|
|
| 21 |
- case $c in |
|
| 22 |
- [a-zA-Z0-9.~_-]) printf "$c" ;; |
|
| 23 |
- *) printf '%%%02X' "'$c" |
|
| 24 |
- esac |
|
| 19 |
+ for ((i = 0; i < length; i++)); do |
|
| 20 |
+ local c="${1:i:1}"
|
|
| 21 |
+ case $c in |
|
| 22 |
+ [a-zA-Z0-9.~_-]) printf "$c" ;; |
|
| 23 |
+ *) printf '%%%02X' "'$c" ;; |
|
| 24 |
+ esac |
|
| 25 | 25 |
done |
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 | 28 |
function template() {
|
| 29 |
-# this should always match the template from CONTRIBUTING.md |
|
| 29 |
+ # this should always match the template from CONTRIBUTING.md |
|
| 30 | 30 |
cat <<- EOM |
| 31 |
- Description of problem: |
|
| 31 |
+ Description of problem: |
|
| 32 | 32 |
|
| 33 | 33 |
|
| 34 |
- \`docker version\`: |
|
| 35 |
- `${DOCKER_COMMAND} -D version`
|
|
| 34 |
+ \`docker version\`: |
|
| 35 |
+ $(${DOCKER_COMMAND} -D version)
|
|
| 36 | 36 |
|
| 37 | 37 |
|
| 38 |
- \`docker info\`: |
|
| 39 |
- `${DOCKER_COMMAND} -D info`
|
|
| 38 |
+ \`docker info\`: |
|
| 39 |
+ $(${DOCKER_COMMAND} -D info)
|
|
| 40 | 40 |
|
| 41 | 41 |
|
| 42 |
- \`uname -a\`: |
|
| 43 |
- `uname -a` |
|
| 42 |
+ \`uname -a\`: |
|
| 43 |
+ $(uname -a) |
|
| 44 | 44 |
|
| 45 | 45 |
|
| 46 |
- Environment details (AWS, VirtualBox, physical, etc.): |
|
| 46 |
+ Environment details (AWS, VirtualBox, physical, etc.): |
|
| 47 | 47 |
|
| 48 | 48 |
|
| 49 |
- How reproducible: |
|
| 49 |
+ How reproducible: |
|
| 50 | 50 |
|
| 51 | 51 |
|
| 52 |
- Steps to Reproduce: |
|
| 53 |
- 1. |
|
| 54 |
- 2. |
|
| 55 |
- 3. |
|
| 52 |
+ Steps to Reproduce: |
|
| 53 |
+ 1. |
|
| 54 |
+ 2. |
|
| 55 |
+ 3. |
|
| 56 | 56 |
|
| 57 | 57 |
|
| 58 |
- Actual Results: |
|
| 58 |
+ Actual Results: |
|
| 59 | 59 |
|
| 60 | 60 |
|
| 61 |
- Expected Results: |
|
| 61 |
+ Expected Results: |
|
| 62 | 62 |
|
| 63 | 63 |
|
| 64 |
- Additional info: |
|
| 64 |
+ Additional info: |
|
| 65 | 65 |
|
| 66 | 66 |
|
| 67 | 67 |
EOM |
| 68 | 68 |
} |
| 69 | 69 |
|
| 70 | 70 |
function format_issue_url() {
|
| 71 |
- if [ ${#@} -ne 2 ] ; then
|
|
| 71 |
+ if [ ${#@} -ne 2 ]; then
|
|
| 72 | 72 |
return 1 |
| 73 | 73 |
fi |
| 74 | 74 |
local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}")
|
| ... | ... |
@@ -76,7 +76,6 @@ function format_issue_url() {
|
| 76 | 76 |
echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}"
|
| 77 | 77 |
} |
| 78 | 78 |
|
| 79 |
- |
|
| 80 | 79 |
echo -ne "Do you use \`sudo\` to call docker? [y|N]: " |
| 81 | 80 |
read -r -n 1 use_sudo |
| 82 | 81 |
echo "" |
| ... | ... |
@@ -91,7 +90,7 @@ echo "" |
| 91 | 91 |
|
| 92 | 92 |
issue_url=$(format_issue_url "${issue_title}" "$(template)")
|
| 93 | 93 |
|
| 94 |
-if which xdg-open 2>/dev/null >/dev/null ; then |
|
| 94 |
+if which xdg-open 2> /dev/null > /dev/null; then |
|
| 95 | 95 |
echo -ne "Would like to launch this report in your browser? [Y|n]: " |
| 96 | 96 |
read -r -n 1 launch_now |
| 97 | 97 |
echo "" |
| ... | ... |
@@ -102,4 +101,3 @@ if which xdg-open 2>/dev/null >/dev/null ; then |
| 102 | 102 |
fi |
| 103 | 103 |
|
| 104 | 104 |
echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}"
|
| 105 |
- |
| ... | ... |
@@ -5,17 +5,17 @@ |
| 5 | 5 |
|
| 6 | 6 |
install_rootlesskit() {
|
| 7 | 7 |
case "$1" in |
| 8 |
- "dynamic") |
|
| 9 |
- install_rootlesskit_dynamic |
|
| 10 |
- return |
|
| 11 |
- ;; |
|
| 12 |
- "") |
|
| 13 |
- export CGO_ENABLED=0 |
|
| 14 |
- _install_rootlesskit |
|
| 15 |
- ;; |
|
| 16 |
- *) |
|
| 17 |
- echo 'Usage: $0 [dynamic]' |
|
| 18 |
- ;; |
|
| 8 |
+ "dynamic") |
|
| 9 |
+ install_rootlesskit_dynamic |
|
| 10 |
+ return |
|
| 11 |
+ ;; |
|
| 12 |
+ "") |
|
| 13 |
+ export CGO_ENABLED=0 |
|
| 14 |
+ _install_rootlesskit |
|
| 15 |
+ ;; |
|
| 16 |
+ *) |
|
| 17 |
+ echo 'Usage: $0 [dynamic]' |
|
| 18 |
+ ;; |
|
| 19 | 19 |
esac |
| 20 | 20 |
} |
| 21 | 21 |
|
| ... | ... |
@@ -6,9 +6,9 @@ cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." |
| 6 | 6 |
# see also ".mailmap" for how email addresses and names are deduplicated |
| 7 | 7 |
|
| 8 | 8 |
{
|
| 9 |
- cat <<-'EOH' |
|
| 10 |
- # This file lists all individuals having contributed content to the repository. |
|
| 11 |
- # For how it is generated, see `hack/generate-authors.sh`. |
|
| 9 |
+ cat <<- 'EOH' |
|
| 10 |
+ # This file lists all individuals having contributed content to the repository. |
|
| 11 |
+ # For how it is generated, see `hack/generate-authors.sh`. |
|
| 12 | 12 |
EOH |
| 13 | 13 |
echo |
| 14 | 14 |
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf |
| ... | ... |
@@ -24,7 +24,7 @@ set -e |
| 24 | 24 |
set -o pipefail |
| 25 | 25 |
|
| 26 | 26 |
export DOCKER_PKG='github.com/docker/docker' |
| 27 |
-export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 27 |
+export SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
| 28 | 28 |
export MAKEDIR="$SCRIPTDIR/make" |
| 29 | 29 |
export PKG_CONFIG=${PKG_CONFIG:-pkg-config}
|
| 30 | 30 |
|
| ... | ... |
@@ -34,10 +34,10 @@ echo |
| 34 | 34 |
DEFAULT_BUNDLES=( |
| 35 | 35 |
binary-daemon |
| 36 | 36 |
dynbinary |
| 37 |
- |
|
| 37 |
+ \ |
|
| 38 | 38 |
test-integration |
| 39 | 39 |
test-docker-py |
| 40 |
- |
|
| 40 |
+ \ |
|
| 41 | 41 |
cross |
| 42 | 42 |
) |
| 43 | 43 |
|
| ... | ... |
@@ -85,9 +85,9 @@ add_buildtag() {
|
| 85 | 85 |
[[ " $DOCKER_BUILDTAGS" == *" $1_"* ]] || DOCKER_BUILDTAGS+=" $1_$2" |
| 86 | 86 |
} |
| 87 | 87 |
|
| 88 |
-if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null ; then
|
|
| 88 |
+if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null; then
|
|
| 89 | 89 |
DOCKER_BUILDTAGS+=" journald" |
| 90 |
-elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null ; then
|
|
| 90 |
+elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null; then
|
|
| 91 | 91 |
DOCKER_BUILDTAGS+=" journald journald_compat" |
| 92 | 92 |
fi |
| 93 | 93 |
|
| ... | ... |
@@ -95,10 +95,11 @@ fi |
| 95 | 95 |
# functionality. We favour libdm_dlsym_deferred_remove over |
| 96 | 96 |
# libdm_no_deferred_remove in dynamic cases because the binary could be shipped |
| 97 | 97 |
# with a newer libdevmapper than the one it was built with. |
| 98 |
-if \ |
|
| 98 |
+if |
|
| 99 | 99 |
command -v gcc &> /dev/null \ |
| 100 |
- && ! ( echo -e '#include <libdevmapper.h>\nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -o /dev/null $(pkg-config --libs devmapper) &> /dev/null ) \
|
|
| 101 |
-; then |
|
| 100 |
+ && ! (echo -e '#include <libdevmapper.h>\nint main() { dm_task_deferred_remove(NULL); }' | gcc -xc - -o /dev/null $(pkg-config --libs devmapper) &> /dev/null) \
|
|
| 101 |
+ ; |
|
| 102 |
+then |
|
| 102 | 103 |
add_buildtag libdm dlsym_deferred_remove |
| 103 | 104 |
fi |
| 104 | 105 |
|
| ... | ... |
@@ -113,10 +114,10 @@ LDFLAGS_STATIC='' |
| 113 | 113 |
EXTLDFLAGS_STATIC='-static' |
| 114 | 114 |
# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build |
| 115 | 115 |
# with options like -race. |
| 116 |
-ORIG_BUILDFLAGS=( -tags "netgo osusergo static_build $DOCKER_BUILDTAGS" -installsuffix netgo ) |
|
| 116 |
+ORIG_BUILDFLAGS=(-tags "netgo osusergo static_build $DOCKER_BUILDTAGS" -installsuffix netgo) |
|
| 117 | 117 |
# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here |
| 118 | 118 |
|
| 119 |
-BUILDFLAGS=( ${BUILDFLAGS} "${ORIG_BUILDFLAGS[@]}" )
|
|
| 119 |
+BUILDFLAGS=(${BUILDFLAGS} "${ORIG_BUILDFLAGS[@]}")
|
|
| 120 | 120 |
|
| 121 | 121 |
LDFLAGS_STATIC_DOCKER=" |
| 122 | 122 |
$LDFLAGS_STATIC |
| ... | ... |
@@ -134,7 +135,8 @@ if [ "$(uname -s)" = 'FreeBSD' ]; then |
| 134 | 134 |
fi |
| 135 | 135 |
|
| 136 | 136 |
bundle() {
|
| 137 |
- local bundle="$1"; shift |
|
| 137 |
+ local bundle="$1" |
|
| 138 |
+ shift |
|
| 138 | 139 |
echo "---> Making bundle: $(basename "$bundle") (in $DEST)" |
| 139 | 140 |
source "$SCRIPTDIR/make/$bundle" "$@" |
| 140 | 141 |
} |
| ... | ... |
@@ -38,58 +38,58 @@ hash_files() {
|
| 38 | 38 |
} |
| 39 | 39 |
|
| 40 | 40 |
( |
| 41 |
-export GOGC=${DOCKER_BUILD_GOGC:-1000}
|
|
| 41 |
+ export GOGC=${DOCKER_BUILD_GOGC:-1000}
|
|
| 42 | 42 |
|
| 43 |
-if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then |
|
| 44 |
- # must be cross-compiling! |
|
| 45 |
- case "$(go env GOOS)/$(go env GOARCH)" in |
|
| 46 |
- windows/amd64) |
|
| 47 |
- export CC="${CC:-x86_64-w64-mingw32-gcc}"
|
|
| 48 |
- export CGO_ENABLED=1 |
|
| 49 |
- ;; |
|
| 50 |
- linux/arm) |
|
| 51 |
- case "${GOARM}" in
|
|
| 52 |
- 5|"") |
|
| 53 |
- export CC="${CC:-arm-linux-gnueabi-gcc}"
|
|
| 43 |
+ if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then |
|
| 44 |
+ # must be cross-compiling! |
|
| 45 |
+ case "$(go env GOOS)/$(go env GOARCH)" in |
|
| 46 |
+ windows/amd64) |
|
| 47 |
+ export CC="${CC:-x86_64-w64-mingw32-gcc}"
|
|
| 54 | 48 |
export CGO_ENABLED=1 |
| 55 | 49 |
;; |
| 56 |
- 7) |
|
| 57 |
- export CC="${CC:-arm-linux-gnueabihf-gcc}"
|
|
| 50 |
+ linux/arm) |
|
| 51 |
+ case "${GOARM}" in
|
|
| 52 |
+ 5 | "") |
|
| 53 |
+ export CC="${CC:-arm-linux-gnueabi-gcc}"
|
|
| 54 |
+ export CGO_ENABLED=1 |
|
| 55 |
+ ;; |
|
| 56 |
+ 7) |
|
| 57 |
+ export CC="${CC:-arm-linux-gnueabihf-gcc}"
|
|
| 58 |
+ export CGO_ENABLED=1 |
|
| 59 |
+ ;; |
|
| 60 |
+ esac |
|
| 61 |
+ ;; |
|
| 62 |
+ linux/arm64) |
|
| 63 |
+ export CC="${CC:-aarch64-linux-gnu-gcc}"
|
|
| 58 | 64 |
export CGO_ENABLED=1 |
| 59 | 65 |
;; |
| 60 |
- esac |
|
| 61 |
- ;; |
|
| 62 |
- linux/arm64) |
|
| 63 |
- export CC="${CC:-aarch64-linux-gnu-gcc}"
|
|
| 64 |
- export CGO_ENABLED=1 |
|
| 65 |
- ;; |
|
| 66 |
- linux/amd64) |
|
| 67 |
- export CC="${CC:-x86_64-linux-gnu-gcc}"
|
|
| 68 |
- export CGO_ENABLED=1 |
|
| 66 |
+ linux/amd64) |
|
| 67 |
+ export CC="${CC:-x86_64-linux-gnu-gcc}"
|
|
| 68 |
+ export CGO_ENABLED=1 |
|
| 69 |
+ ;; |
|
| 70 |
+ esac |
|
| 71 |
+ fi |
|
| 72 |
+ |
|
| 73 |
+ # -buildmode=pie is not supported on Windows and Linux on mips. |
|
| 74 |
+ case "$(go env GOOS)/$(go env GOARCH)" in |
|
| 75 |
+ windows/* | linux/mips*) ;; |
|
| 76 |
+ |
|
| 77 |
+ *) |
|
| 78 |
+ BUILDFLAGS+=("-buildmode=pie")
|
|
| 69 | 79 |
;; |
| 70 | 80 |
esac |
| 71 |
-fi |
|
| 72 |
- |
|
| 73 |
-# -buildmode=pie is not supported on Windows and Linux on mips. |
|
| 74 |
-case "$(go env GOOS)/$(go env GOARCH)" in |
|
| 75 |
- windows/*|linux/mips*) |
|
| 76 |
- ;; |
|
| 77 |
- *) |
|
| 78 |
- BUILDFLAGS+=( "-buildmode=pie" ) |
|
| 79 |
- ;; |
|
| 80 |
-esac |
|
| 81 | 81 |
|
| 82 |
-echo "Building: $DEST/$BINARY_FULLNAME" |
|
| 83 |
-echo "GOOS=\"${GOOS}\" GOARCH=\"${GOARCH}\" GOARM=\"${GOARM}\""
|
|
| 84 |
-go build \ |
|
| 85 |
- -o "$DEST/$BINARY_FULLNAME" \ |
|
| 86 |
- "${BUILDFLAGS[@]}" \
|
|
| 87 |
- -ldflags " |
|
| 82 |
+ echo "Building: $DEST/$BINARY_FULLNAME" |
|
| 83 |
+ echo "GOOS=\"${GOOS}\" GOARCH=\"${GOARCH}\" GOARM=\"${GOARM}\""
|
|
| 84 |
+ go build \ |
|
| 85 |
+ -o "$DEST/$BINARY_FULLNAME" \ |
|
| 86 |
+ "${BUILDFLAGS[@]}" \
|
|
| 87 |
+ -ldflags " |
|
| 88 | 88 |
$LDFLAGS |
| 89 | 89 |
$LDFLAGS_STATIC_DOCKER |
| 90 | 90 |
$DOCKER_LDFLAGS |
| 91 | 91 |
" \ |
| 92 |
- ${GO_PACKAGE}
|
|
| 92 |
+ ${GO_PACKAGE}
|
|
| 93 | 93 |
) |
| 94 | 94 |
|
| 95 | 95 |
echo "Created binary: $DEST/$BINARY_FULLNAME" |
| ... | ... |
@@ -2,13 +2,13 @@ |
| 2 | 2 |
set -e |
| 3 | 3 |
|
| 4 | 4 |
docker-version-osarch() {
|
| 5 |
- if ! type docker &>/dev/null; then |
|
| 5 |
+ if ! type docker &> /dev/null; then |
|
| 6 | 6 |
# docker is not installed |
| 7 | 7 |
return |
| 8 | 8 |
fi |
| 9 | 9 |
local target="$1" # "Client" or "Server" |
| 10 | 10 |
local fmtStr="{{.${target}.Os}}/{{.${target}.Arch}}"
|
| 11 |
- if docker version -f "$fmtStr" 2>/dev/null; then |
|
| 11 |
+ if docker version -f "$fmtStr" 2> /dev/null; then |
|
| 12 | 12 |
# if "docker version -f" works, let's just use that! |
| 13 | 13 |
return |
| 14 | 14 |
fi |
| ... | ... |
@@ -58,7 +58,7 @@ if [ "$DOCKER_REMAP_ROOT" ]; then |
| 58 | 58 |
fi |
| 59 | 59 |
|
| 60 | 60 |
# example usage: DOCKER_EXPERIMENTAL=1 |
| 61 |
-if [ "$DOCKER_EXPERIMENTAL" ]; then |
|
| 61 |
+if [ "$DOCKER_EXPERIMENTAL" ]; then |
|
| 62 | 62 |
echo >&2 '# DOCKER_EXPERIMENTAL is set: starting daemon with experimental features enabled! ' |
| 63 | 63 |
extra_params="$extra_params --experimental" |
| 64 | 64 |
fi |
| ... | ... |
@@ -88,7 +88,7 @@ if [ -z "$DOCKER_TEST_HOST" ]; then |
| 88 | 88 |
--userland-proxy="$DOCKER_USERLANDPROXY" \ |
| 89 | 89 |
${storage_params} \
|
| 90 | 90 |
${extra_params} \
|
| 91 |
- &> "$DEST/docker.log" |
|
| 91 |
+ &> "$DEST/docker.log" |
|
| 92 | 92 |
) & |
| 93 | 93 |
else |
| 94 | 94 |
export DOCKER_HOST="$DOCKER_TEST_HOST" |
| ... | ... |
@@ -98,7 +98,7 @@ fi |
| 98 | 98 |
tries=60 |
| 99 | 99 |
echo "INFO: Waiting for daemon to start..." |
| 100 | 100 |
while ! ${TEST_CLIENT_BINARY} version &> /dev/null; do
|
| 101 |
- (( tries-- )) |
|
| 101 |
+ ((tries--)) |
|
| 102 | 102 |
if [ $tries -le 0 ]; then |
| 103 | 103 |
printf "\n" |
| 104 | 104 |
if [ -z "$DOCKER_HOST" ]; then |
| ... | ... |
@@ -2,7 +2,10 @@ |
| 2 | 2 |
|
| 3 | 3 |
if [ ! "$(go env GOOS)" = 'windows' ]; then |
| 4 | 4 |
for pidFile in $(find "$DEST" -name docker.pid); do |
| 5 |
- pid=$([ -n "$TESTDEBUG" ] && set -x; cat "$pidFile") |
|
| 5 |
+ pid=$( |
|
| 6 |
+ [ -n "$TESTDEBUG" ] && set -x |
|
| 7 |
+ cat "$pidFile" |
|
| 8 |
+ ) |
|
| 6 | 9 |
( |
| 7 | 10 |
[ -n "$TESTDEBUG" ] && set -x |
| 8 | 11 |
kill "$pid" |
| ... | ... |
@@ -26,7 +29,7 @@ if [ ! "$(go env GOOS)" = 'windows' ]; then |
| 26 | 26 |
fi |
| 27 | 27 |
fi |
| 28 | 28 |
else |
| 29 |
- # Note this script is not actionable on Windows to Linux CI. Instead the |
|
| 29 |
+ # Note this script is not actionable on Windows to Linux CI. Instead the |
|
| 30 | 30 |
# DIND daemon under test is torn down by the Jenkins tear-down script |
| 31 | 31 |
echo "INFO: Not stopping daemon on Windows CI" |
| 32 | 32 |
fi |
| ... | ... |
@@ -7,7 +7,7 @@ |
| 7 | 7 |
# |
| 8 | 8 |
|
| 9 | 9 |
if [ -z "${MAKEDIR}" ]; then
|
| 10 |
- MAKEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 10 |
+ MAKEDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
| 11 | 11 |
export MAKEDIR |
| 12 | 12 |
fi |
| 13 | 13 |
source "${MAKEDIR}/.go-autogen"
|
| ... | ... |
@@ -46,7 +46,7 @@ setup_integration_test_filter() {
|
| 46 | 46 |
|
| 47 | 47 |
setup_integration_test_filter |
| 48 | 48 |
if [ -z "${TEST_SKIP_INTEGRATION}" ] && [ -z "${TEST_INTEGRATION_DIR}" ]; then
|
| 49 |
- integration_api_dirs="$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/...)"
|
|
| 49 |
+ integration_api_dirs="$(go list -test -f '{{- if ne .ForTest "" -}}{{- .Dir -}}{{- end -}}' ./integration/...)"
|
|
| 50 | 50 |
else |
| 51 | 51 |
integration_api_dirs="${TEST_INTEGRATION_DIR}"
|
| 52 | 52 |
fi |
| ... | ... |
@@ -163,8 +163,8 @@ error_on_leaked_containerd_shims() {
|
| 163 | 163 |
return |
| 164 | 164 |
fi |
| 165 | 165 |
|
| 166 |
- leftovers=$(ps -ax -o pid,cmd | |
|
| 167 |
- awk '$2 == "containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration/ { print $1 }')
|
|
| 166 |
+ leftovers=$(ps -ax -o pid,cmd \ |
|
| 167 |
+ | awk '$2 == "containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration/ { print $1 }')
|
|
| 168 | 168 |
if [ -n "$leftovers" ]; then |
| 169 | 169 |
ps aux |
| 170 | 170 |
# shellcheck disable=SC2086 |
| ... | ... |
@@ -190,6 +190,6 @@ set_platform_timeout() {
|
| 190 | 190 |
# - remove last character (usually 'm' from '10m') |
| 191 | 191 |
# - multiply by testcount |
| 192 | 192 |
# - add last character back |
| 193 |
- TIMEOUT=$((${TIMEOUT::-1} * ${TEST_REPEAT}))${TIMEOUT:$((${#TIMEOUT}-1)):1}
|
|
| 193 |
+ TIMEOUT=$((${TIMEOUT::-1} * ${TEST_REPEAT}))${TIMEOUT:$((${#TIMEOUT} - 1)):1}
|
|
| 194 | 194 |
fi |
| 195 | 195 |
} |
| ... | ... |
@@ -22,7 +22,7 @@ copy_binaries() {
|
| 22 | 22 |
done |
| 23 | 23 |
|
| 24 | 24 |
# vpnkit is amd64 only |
| 25 |
- if command -v "vpnkit.$(uname -m)" 2>&1 >/dev/null; then |
|
| 25 |
+ if command -v "vpnkit.$(uname -m)" 2>&1 > /dev/null; then |
|
| 26 | 26 |
cp -f "$(command -v "vpnkit.$(uname -m)")" "$dir/vpnkit" |
| 27 | 27 |
if [ "$hash" = "hash" ]; then |
| 28 | 28 |
hash_files "$dir/vpnkit" |
| ... | ... |
@@ -4,8 +4,8 @@ set -e |
| 4 | 4 |
( |
| 5 | 5 |
export IAMSTATIC='false' |
| 6 | 6 |
export LDFLAGS_STATIC_DOCKER='' |
| 7 |
- export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary
|
|
| 8 |
- export BUILDFLAGS=( "${BUILDFLAGS[@]/osusergo /}" ) # ditto for osusergo
|
|
| 9 |
- export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here
|
|
| 7 |
+ export BUILDFLAGS=("${BUILDFLAGS[@]/netgo /}") # disable netgo, since we don't need it for a dynamic binary
|
|
| 8 |
+ export BUILDFLAGS=("${BUILDFLAGS[@]/osusergo /}") # ditto for osusergo
|
|
| 9 |
+ export BUILDFLAGS=("${BUILDFLAGS[@]/static_build /}") # we're not building a "static" binary here
|
|
| 10 | 10 |
source "${MAKEDIR}/.binary"
|
| 11 | 11 |
) |
| ... | ... |
@@ -37,6 +37,7 @@ source hack/make/.integration-test-helpers |
| 37 | 37 |
echo "WARN: Skipping test-docker-py: connecting to docker daemon using ${docker_host_scheme} (${DOCKER_HOST}) not supported"
|
| 38 | 38 |
bundle .integration-daemon-stop |
| 39 | 39 |
return 0 |
| 40 |
+ ;; |
|
| 40 | 41 |
esac |
| 41 | 42 |
|
| 42 | 43 |
docker_py_image="docker-sdk-python3:${DOCKER_PY_COMMIT}"
|
| ... | ... |
@@ -3,11 +3,10 @@ set -e -o pipefail |
| 3 | 3 |
|
| 4 | 4 |
source hack/validate/.validate |
| 5 | 5 |
|
| 6 |
- |
|
| 7 | 6 |
run_integration_flaky() {
|
| 8 | 7 |
new_tests=$( |
| 9 |
- validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' | |
|
| 10 |
- grep -E '^(\+func Test)(.*)(\*testing\.T\))' || true |
|
| 8 |
+ validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' \ |
|
| 9 |
+ | grep -E '^(\+func Test)(.*)(\*testing\.T\))' || true |
|
| 11 | 10 |
) |
| 12 | 11 |
|
| 13 | 12 |
if [ -z "$new_tests" ]; then |
| ... | ... |
@@ -3,7 +3,7 @@ set -e -u -o pipefail |
| 3 | 3 |
|
| 4 | 4 |
ARCH=$(uname -m) |
| 5 | 5 |
if [ "$ARCH" = "x86_64" ]; then |
| 6 |
- ARCH="amd64" |
|
| 6 |
+ ARCH="amd64" |
|
| 7 | 7 |
fi |
| 8 | 8 |
|
| 9 | 9 |
export DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:-${ARCH}}
|
| ... | ... |
@@ -13,8 +13,9 @@ export DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:-${ARCH}}
|
| 13 | 13 |
: ${TESTDEBUG:=}
|
| 14 | 14 |
|
| 15 | 15 |
integration_api_dirs=${TEST_INTEGRATION_DIR:-"$(
|
| 16 |
- find /tests/integration -type d | |
|
| 17 |
- grep -vE '(^/tests/integration($|/internal)|/testdata)')"} |
|
| 16 |
+ find /tests/integration -type d \ |
|
| 17 |
+ | grep -vE '(^/tests/integration($|/internal)|/testdata)' |
|
| 18 |
+)"} |
|
| 18 | 19 |
|
| 19 | 20 |
run_test_integration() {
|
| 20 | 21 |
set_platform_timeout |
| ... | ... |
@@ -12,7 +12,7 @@ |
| 12 | 12 |
# |
| 13 | 13 |
set -eu -o pipefail |
| 14 | 14 |
|
| 15 |
-BUILDFLAGS=( -tags 'netgo seccomp libdm_no_deferred_remove' ) |
|
| 15 |
+BUILDFLAGS=(-tags 'netgo seccomp libdm_no_deferred_remove') |
|
| 16 | 16 |
TESTFLAGS+=" -test.timeout=${TIMEOUT:-5m}"
|
| 17 | 17 |
TESTDIRS="${TESTDIRS:-./...}"
|
| 18 | 18 |
exclude_paths='/vendor/|/integration' |
| ... | ... |
@@ -1,6 +1,6 @@ |
| 1 | 1 |
#!/usr/bin/env bash |
| 2 | 2 |
|
| 3 |
-export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 3 |
+export SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
| 4 | 4 |
source "${SCRIPTDIR}/.validate"
|
| 5 | 5 |
|
| 6 | 6 |
adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }')
|
| ... | ... |
@@ -24,7 +24,7 @@ check_dco() {
|
| 24 | 24 |
if [ ${adds} -eq 0 -a ${dels} -eq 0 ]; then
|
| 25 | 25 |
echo '0 adds, 0 deletions; nothing to validate! :)' |
| 26 | 26 |
else |
| 27 |
- commits=( $(validate_log --format='format:%H%n') ) |
|
| 27 |
+ commits=($(validate_log --format='format:%H%n')) |
|
| 28 | 28 |
badCommits=() |
| 29 | 29 |
for commit in "${commits[@]}"; do
|
| 30 | 30 |
if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then |
| ... | ... |
@@ -32,7 +32,7 @@ else |
| 32 | 32 |
continue |
| 33 | 33 |
fi |
| 34 | 34 |
if ! git log -1 --format='format:%B' "$commit" | check_dco; then |
| 35 |
- badCommits+=( "$commit" ) |
|
| 35 |
+ badCommits+=("$commit")
|
|
| 36 | 36 |
fi |
| 37 | 37 |
done |
| 38 | 38 |
if [ ${#badCommits[@]} -eq 0 ]; then
|
| ... | ... |
@@ -2,7 +2,7 @@ |
| 2 | 2 |
# |
| 3 | 3 |
# Run default validation, exclude vendor because it's slow |
| 4 | 4 |
|
| 5 |
-export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 5 |
+export SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
| 6 | 6 |
|
| 7 | 7 |
. "${SCRIPTDIR}"/dco
|
| 8 | 8 |
. "${SCRIPTDIR}"/default-seccomp
|
| ... | ... |
@@ -1,17 +1,17 @@ |
| 1 | 1 |
#!/usr/bin/env bash |
| 2 | 2 |
|
| 3 |
-export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 3 |
+export SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
| 4 | 4 |
source "${SCRIPTDIR}/.validate"
|
| 5 | 5 |
|
| 6 | 6 |
IFS=$'\n' |
| 7 |
-files=( $(validate_diff --diff-filter=ACMR --name-only -- 'profiles/seccomp' || true) ) |
|
| 7 |
+files=($(validate_diff --diff-filter=ACMR --name-only -- 'profiles/seccomp' || true)) |
|
| 8 | 8 |
unset IFS |
| 9 | 9 |
|
| 10 | 10 |
if [ -n "${TEST_FORCE_VALIDATE:-}" ] || [ ${#files[@]} -gt 0 ]; then
|
| 11 | 11 |
# We run 'go generate' and see if we have a diff afterwards |
| 12 |
- go generate ./profiles/seccomp/ >/dev/null |
|
| 12 |
+ go generate ./profiles/seccomp/ > /dev/null |
|
| 13 | 13 |
# Let see if the working directory is clean |
| 14 |
- diffs="$(git status --porcelain -- profiles/seccomp 2>/dev/null)" |
|
| 14 |
+ diffs="$(git status --porcelain -- profiles/seccomp 2> /dev/null)" |
|
| 15 | 15 |
if [ "$diffs" ]; then |
| 16 | 16 |
{
|
| 17 | 17 |
echo 'The result of go generate ./profiles/seccomp/ differs' |
| ... | ... |
@@ -1,12 +1,12 @@ |
| 1 | 1 |
#!/usr/bin/env bash |
| 2 | 2 |
# Check that no new tests are being added to integration-cli |
| 3 | 3 |
|
| 4 |
-export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 4 |
+export SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
| 5 | 5 |
source "${SCRIPTDIR}/.validate"
|
| 6 | 6 |
|
| 7 | 7 |
new_tests=$( |
| 8 |
- validate_diff --diff-filter=ACMR --unified=0 -- 'integration-cli/*_api_*.go' 'integration-cli/*_cli_*.go' | |
|
| 9 |
- grep -E '^\+func (.*) Test' || true |
|
| 8 |
+ validate_diff --diff-filter=ACMR --unified=0 -- 'integration-cli/*_api_*.go' 'integration-cli/*_cli_*.go' \ |
|
| 9 |
+ | grep -E '^\+func (.*) Test' || true |
|
| 10 | 10 |
) |
| 11 | 11 |
|
| 12 | 12 |
if [ -n "$new_tests" ]; then |
| ... | ... |
@@ -1,7 +1,7 @@ |
| 1 | 1 |
#!/usr/bin/env bash |
| 2 | 2 |
set -e -o pipefail |
| 3 | 3 |
|
| 4 |
-SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 4 |
+SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
| 5 | 5 |
|
| 6 | 6 |
# CI platforms differ, so per-platform GOLANGCI_LINT_OPTS can be set |
| 7 | 7 |
# from a platform-specific Dockerfile, otherwise let's just set |
| ... | ... |
@@ -11,9 +11,9 @@ SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
| 11 | 11 |
[ -n "${TESTDEBUG}" ] && set -x
|
| 12 | 12 |
|
| 13 | 13 |
# TODO find a way to share this code with hack/make.sh |
| 14 |
-if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null ; then
|
|
| 14 |
+if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null; then
|
|
| 15 | 15 |
DOCKER_BUILDTAGS+=" journald" |
| 16 |
-elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null ; then
|
|
| 16 |
+elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null; then
|
|
| 17 | 17 |
DOCKER_BUILDTAGS+=" journald journald_compat" |
| 18 | 18 |
fi |
| 19 | 19 |
|
| ... | ... |
@@ -1,21 +1,21 @@ |
| 1 | 1 |
#!/usr/bin/env bash |
| 2 | 2 |
set -e |
| 3 | 3 |
|
| 4 |
-export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 4 |
+export SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
| 5 | 5 |
source "${SCRIPTDIR}/.validate"
|
| 6 | 6 |
|
| 7 | 7 |
IFS=$'\n' |
| 8 |
-files=( $(validate_diff --diff-filter=ACMR --name-only -- 'pkg/*.go' || true) ) |
|
| 8 |
+files=($(validate_diff --diff-filter=ACMR --name-only -- 'pkg/*.go' || true)) |
|
| 9 | 9 |
unset IFS |
| 10 | 10 |
|
| 11 | 11 |
badFiles=() |
| 12 | 12 |
for f in "${files[@]}"; do
|
| 13 | 13 |
IFS=$'\n' |
| 14 |
- badImports=( $(go list -e -f '{{ join .Deps "\n" }}' "$f" | sort -u | grep -vE '^github.com/docker/docker/pkg/' | grep -vE '^github.com/docker/docker/vendor' | grep -E '^github.com/docker/docker' || true) )
|
|
| 14 |
+ badImports=($(go list -e -f '{{ join .Deps "\n" }}' "$f" | sort -u | grep -vE '^github.com/docker/docker/pkg/' | grep -vE '^github.com/docker/docker/vendor' | grep -E '^github.com/docker/docker' || true))
|
|
| 15 | 15 |
unset IFS |
| 16 | 16 |
|
| 17 | 17 |
for import in "${badImports[@]}"; do
|
| 18 |
- badFiles+=( "$f imports $import" ) |
|
| 18 |
+ badFiles+=("$f imports $import")
|
|
| 19 | 19 |
done |
| 20 | 20 |
done |
| 21 | 21 |
|
| ... | ... |
@@ -1,10 +1,10 @@ |
| 1 | 1 |
#!/usr/bin/env bash |
| 2 | 2 |
set -e |
| 3 |
-export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 3 |
+export SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
| 4 | 4 |
source "${SCRIPTDIR}/.validate"
|
| 5 | 5 |
|
| 6 | 6 |
IFS=$'\n' |
| 7 |
-files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/swagger.yaml' || true) ) |
|
| 7 |
+files=($(validate_diff --diff-filter=ACMR --name-only -- 'api/swagger.yaml' || true)) |
|
| 8 | 8 |
unset IFS |
| 9 | 9 |
|
| 10 | 10 |
if [ -n "${TEST_FORCE_VALIDATE:-}" ] || [ ${#files[@]} -gt 0 ]; then
|
| ... | ... |
@@ -12,7 +12,7 @@ if [ -n "${TEST_FORCE_VALIDATE:-}" ] || [ ${#files[@]} -gt 0 ]; then
|
| 12 | 12 |
if out=$(swagger validate api/swagger.yaml); then |
| 13 | 13 |
echo "Congratulations! ${out}"
|
| 14 | 14 |
else |
| 15 |
- echo "${out}" >&2
|
|
| 15 |
+ echo "${out}" >&2
|
|
| 16 | 16 |
false |
| 17 | 17 |
fi |
| 18 | 18 |
fi |
| ... | ... |
@@ -1,10 +1,10 @@ |
| 1 | 1 |
#!/usr/bin/env bash |
| 2 | 2 |
|
| 3 |
-export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 3 |
+export SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
| 4 | 4 |
source "${SCRIPTDIR}/.validate"
|
| 5 | 5 |
|
| 6 | 6 |
IFS=$'\n' |
| 7 |
-files=( $(validate_diff --diff-filter=ACMR --name-only -- 'api/types/' 'api/swagger.yaml' || true) ) |
|
| 7 |
+files=($(validate_diff --diff-filter=ACMR --name-only -- 'api/types/' 'api/swagger.yaml' || true)) |
|
| 8 | 8 |
unset IFS |
| 9 | 9 |
|
| 10 | 10 |
if [ -n "${TEST_FORCE_VALIDATE:-}" ] || [ ${#files[@]} -gt 0 ]; then
|
| ... | ... |
@@ -1,17 +1,17 @@ |
| 1 | 1 |
#!/usr/bin/env bash |
| 2 | 2 |
|
| 3 |
-export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|
| 3 |
+export SCRIPTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
| 4 | 4 |
source "${SCRIPTDIR}/.validate"
|
| 5 | 5 |
|
| 6 | 6 |
IFS=$'\n' |
| 7 |
-files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) |
|
| 7 |
+files=($(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true)) |
|
| 8 | 8 |
unset IFS |
| 9 | 9 |
|
| 10 | 10 |
badFiles=() |
| 11 | 11 |
for f in "${files[@]}"; do
|
| 12 | 12 |
# we use "git show" here to validate that what's committed has valid TOML syntax |
| 13 |
- if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then |
|
| 14 |
- badFiles+=( "$f" ) |
|
| 13 |
+ if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0; then |
|
| 14 |
+ badFiles+=("$f")
|
|
| 15 | 15 |
fi |
| 16 | 16 |
done |
| 17 | 17 |
|
| ... | ... |
@@ -6,7 +6,7 @@ listFile=shell_test_list.json |
| 6 | 6 |
|
| 7 | 7 |
case $1 in |
| 8 | 8 |
"store") |
| 9 |
- in=$(</dev/stdin) |
|
| 9 |
+ in=$(< /dev/stdin) |
|
| 10 | 10 |
server=$(echo "$in" | jq --raw-output ".ServerURL") |
| 11 | 11 |
serverHash=$(echo "$server" | sha1sum - | awk '{print $1}')
|
| 12 | 12 |
|
| ... | ... |
@@ -17,34 +17,34 @@ case $1 in |
| 17 | 17 |
if [[ ! -f $TEMP/$listFile ]]; then |
| 18 | 18 |
echo "{ \"${server}\": \"${username}\" }" > $TEMP/$listFile
|
| 19 | 19 |
else |
| 20 |
- list=$(<$TEMP/$listFile) |
|
| 20 |
+ list=$(< $TEMP/$listFile) |
|
| 21 | 21 |
echo "$list" | jq ". + {\"${server}\": \"${username}\"}" > $TEMP/$listFile
|
| 22 | 22 |
fi |
| 23 | 23 |
;; |
| 24 | 24 |
"get") |
| 25 |
- in=$(</dev/stdin) |
|
| 25 |
+ in=$(< /dev/stdin) |
|
| 26 | 26 |
serverHash=$(echo "$in" | sha1sum - | awk '{print $1}')
|
| 27 | 27 |
if [[ ! -f $TEMP/$serverHash ]]; then |
| 28 | 28 |
echo "credentials not found in native keychain" |
| 29 | 29 |
exit 1 |
| 30 | 30 |
fi |
| 31 |
- payload=$(<$TEMP/$serverHash) |
|
| 31 |
+ payload=$(< $TEMP/$serverHash) |
|
| 32 | 32 |
echo "$payload" |
| 33 | 33 |
;; |
| 34 | 34 |
"erase") |
| 35 |
- in=$(</dev/stdin) |
|
| 35 |
+ in=$(< /dev/stdin) |
|
| 36 | 36 |
serverHash=$(echo "$in" | sha1sum - | awk '{print $1}')
|
| 37 | 37 |
rm -f $TEMP/$serverHash |
| 38 | 38 |
|
| 39 | 39 |
# Remove the server from the list |
| 40 |
- list=$(<$TEMP/$listFile) |
|
| 40 |
+ list=$(< $TEMP/$listFile) |
|
| 41 | 41 |
echo "$list" | jq "del(.[\"${in}\"])" > $TEMP/$listFile
|
| 42 | 42 |
;; |
| 43 | 43 |
"list") |
| 44 | 44 |
if [[ ! -f $TEMP/$listFile ]]; then |
| 45 | 45 |
echo "{}"
|
| 46 | 46 |
else |
| 47 |
- payload=$(<$TEMP/$listFile) |
|
| 47 |
+ payload=$(< $TEMP/$listFile) |
|
| 48 | 48 |
echo "$payload" |
| 49 | 49 |
fi |
| 50 | 50 |
;; |