Browse code

Merge pull request #29775 from AkihiroSuda/integration-cli-on-swarm

[EXPERIMENTAL] Integration Test on Swarm (completes in about 5 minutes, with 10 Swarm nodes)

Sebastiaan van Stijn authored on 2017/03/02 05:16:05
Showing 24 changed files
... ...
@@ -3,3 +3,5 @@ bundles
3 3
 vendor/pkg
4 4
 .go-pkg-cache
5 5
 .git
6
+hack/integration-cli-on-swarm/integration-cli-on-swarm
7
+
... ...
@@ -31,3 +31,4 @@ man/man1
31 31
 man/man5
32 32
 man/man8
33 33
 vendor/pkg/
34
+hack/integration-cli-on-swarm/integration-cli-on-swarm
... ...
@@ -59,10 +59,11 @@ DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docke
59 59
 DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),)
60 60
 
61 61
 # enable package cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set
62
-PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo
62
+PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo
63 63
 PKGCACHE_VOLROOT := dockerdev-go-pkg-cache
64 64
 PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-)
65
-DOCKER_MOUNT := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_MOUNT) $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),$(DOCKER_MOUNT))
65
+DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),)
66
+DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE)
66 67
 
67 68
 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
68 69
 GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
... ...
@@ -75,6 +76,9 @@ export BUILD_APT_MIRROR
75 75
 
76 76
 SWAGGER_DOCS_PORT ?= 9000
77 77
 
78
+INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master)
79
+INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker)
80
+
78 81
 # if this session isn't interactive, then we don't want to allocate a
79 82
 # TTY, which would fail, but if it is interactive, we do want to attach
80 83
 # so that the user can send e.g. ^C through.
... ...
@@ -173,3 +177,19 @@ swagger-docs: ## preview the API documentation
173 173
 		-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
174 174
 		-p $(SWAGGER_DOCS_PORT):80 \
175 175
 		bfirsh/redoc:1.6.2
176
+
177
+build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel
178
+	@echo "Building hack/integration-cli-on-swarm"
179
+	go build -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host
180
+	@echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)"
181
+	docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent
182
+# For worker, we don't use `docker build` so as to enable DOCKER_INCREMENTAL_BINARY and so on
183
+	@echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)"
184
+	$(eval tmp := integration-cli-worker-tmp)
185
+# We mount pkgcache, but not bundle (bundle needs to be baked into the image)
186
+# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here
187
+	docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS -e DOCKER_INCREMENTAL_BINARY --privileged $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_IMAGE) top
188
+	docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary
189
+	docker exec $(tmp) go build -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker
190
+	docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE)
191
+	docker rm -f $(tmp)
176 192
new file mode 100644
... ...
@@ -0,0 +1,66 @@
0
+# Integration Testing on Swarm
1
+
2
+IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
3
+
4
+## Architecture
5
+
6
+### Master service
7
+
8
+  - Works as a funker caller
9
+  - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
10
+
11
+### Worker service
12
+
13
+  - Works as a funker callee
14
+  - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
15
+
16
+### Client
17
+
18
+  - Controls master and workers via `docker stack`
19
+  - No need to have a local daemon
20
+
21
+Typically, the master and workers are supposed to be running on a cloud environment,
22
+while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
23
+
24
+## Requirement
25
+
26
+  - Docker daemon 1.13 or later
27
+  - Private registry for distributed execution with multiple nodes
28
+
29
+## Usage
30
+
31
+### Step 1: Prepare images
32
+
33
+    $ make build-integration-cli-on-swarm
34
+
35
+Following environment variables are known to work in this step:
36
+
37
+ - `BUILDFLAGS`
38
+ - `DOCKER_INCREMENTAL_BINARY`
39
+
40
+### Step 2: Execute tests
41
+
42
+    $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest 
43
+
44
+Following environment variables are known to work in this step:
45
+
46
+ - `DOCKER_GRAPHDRIVER`
47
+ - `DOCKER_EXPERIMENTAL`
48
+
49
+#### Flags
50
+
51
+Basic flags:
52
+
53
+ - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
54
+ - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
55
+ - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
56
+
57
+Experimental flags for mitigating makespan nonuniformity:
58
+
59
+ - `-shuffle`: Shuffle the test filter strings
60
+
61
+Flags for debugging IT on Swarm itself:
62
+
63
+ - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
64
+ - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
65
+ - `-dry-run`: skip the actual workload
0 66
new file mode 100644
... ...
@@ -0,0 +1,6 @@
0
+# this Dockerfile is solely used for the master image.
1
+# Please refer to the top-level Makefile for the worker image.
2
+FROM golang:1.7
3
+ADD . /go/src/github.com/docker/docker/hack/integration-cli-on-swarm/agent
4
+RUN go build -o /master github.com/docker/docker/hack/integration-cli-on-swarm/agent/master
5
+ENTRYPOINT ["/master"]
0 6
new file mode 100644
... ...
@@ -0,0 +1,132 @@
0
+package main
1
+
2
+import (
3
+	"encoding/json"
4
+	"fmt"
5
+	"log"
6
+	"strings"
7
+	"sync"
8
+	"sync/atomic"
9
+	"time"
10
+
11
+	"github.com/bfirsh/funker-go"
12
+	"github.com/docker/docker/hack/integration-cli-on-swarm/agent/types"
13
+)
14
+
15
+const (
16
+	// funkerRetryTimeout is for the issue https://github.com/bfirsh/funker/issues/3
17
+	// When all the funker replicas are busy in their own job, we cannot connect to funker.
18
+	funkerRetryTimeout  = 1 * time.Hour
19
+	funkerRetryDuration = 1 * time.Second
20
+)
21
+
22
+// ticker is needed for some CI (e.g., on Travis, job is aborted when no output emitted for 10 minutes)
23
+func ticker(d time.Duration) chan struct{} {
24
+	t := time.NewTicker(d)
25
+	stop := make(chan struct{})
26
+	go func() {
27
+		for {
28
+			select {
29
+			case <-t.C:
30
+				log.Printf("tick (just for keeping CI job active) per %s", d.String())
31
+			case <-stop:
32
+				t.Stop()
33
+			}
34
+		}
35
+	}()
36
+	return stop
37
+}
38
+
39
+func executeTests(funkerName string, testChunks [][]string) error {
40
+	tickerStopper := ticker(9*time.Minute + 55*time.Second)
41
+	defer func() {
42
+		close(tickerStopper)
43
+	}()
44
+	begin := time.Now()
45
+	log.Printf("Executing %d chunks in parallel, against %q", len(testChunks), funkerName)
46
+	var wg sync.WaitGroup
47
+	var passed, failed uint32
48
+	for chunkID, tests := range testChunks {
49
+		log.Printf("Executing chunk %d (contains %d test filters)", chunkID, len(tests))
50
+		wg.Add(1)
51
+		go func(chunkID int, tests []string) {
52
+			defer wg.Done()
53
+			chunkBegin := time.Now()
54
+			result, err := executeTestChunkWithRetry(funkerName, types.Args{
55
+				ChunkID: chunkID,
56
+				Tests:   tests,
57
+			})
58
+			if result.RawLog != "" {
59
+				for _, s := range strings.Split(result.RawLog, "\n") {
60
+					log.Printf("Log (chunk %d): %s", chunkID, s)
61
+				}
62
+			}
63
+			if err != nil {
64
+				log.Printf("Error while executing chunk %d: %v",
65
+					chunkID, err)
66
+				atomic.AddUint32(&failed, 1)
67
+			} else {
68
+				if result.Code == 0 {
69
+					atomic.AddUint32(&passed, 1)
70
+				} else {
71
+					atomic.AddUint32(&failed, 1)
72
+				}
73
+				log.Printf("Finished chunk %d [%d/%d] with %d test filters in %s, code=%d.",
74
+					chunkID, passed+failed, len(testChunks), len(tests),
75
+					time.Now().Sub(chunkBegin), result.Code)
76
+			}
77
+		}(chunkID, tests)
78
+	}
79
+	wg.Wait()
80
+	// TODO: print actual tests rather than chunks
81
+	log.Printf("Executed %d chunks in %s. PASS: %d, FAIL: %d.",
82
+		len(testChunks), time.Now().Sub(begin), passed, failed)
83
+	if failed > 0 {
84
+		return fmt.Errorf("%d chunks failed", failed)
85
+	}
86
+	return nil
87
+}
88
+
89
+func executeTestChunk(funkerName string, args types.Args) (types.Result, error) {
90
+	ret, err := funker.Call(funkerName, args)
91
+	if err != nil {
92
+		return types.Result{}, err
93
+	}
94
+	tmp, err := json.Marshal(ret)
95
+	if err != nil {
96
+		return types.Result{}, err
97
+	}
98
+	var result types.Result
99
+	err = json.Unmarshal(tmp, &result)
100
+	return result, err
101
+}
102
+
103
+func executeTestChunkWithRetry(funkerName string, args types.Args) (types.Result, error) {
104
+	begin := time.Now()
105
+	for i := 0; time.Now().Sub(begin) < funkerRetryTimeout; i++ {
106
+		result, err := executeTestChunk(funkerName, args)
107
+		if err == nil {
108
+			log.Printf("executeTestChunk(%q, %d) returned code %d in trial %d", funkerName, args.ChunkID, result.Code, i)
109
+			return result, nil
110
+		}
111
+		if errorSeemsInteresting(err) {
112
+			log.Printf("Error while calling executeTestChunk(%q, %d), will retry (trial %d): %v",
113
+				funkerName, args.ChunkID, i, err)
114
+		}
115
+		// TODO: non-constant sleep
116
+		time.Sleep(funkerRetryDuration)
117
+	}
118
+	return types.Result{}, fmt.Errorf("could not call executeTestChunk(%q, %d) in %v", funkerName, args.ChunkID, funkerRetryTimeout)
119
+}
120
+
121
+//  errorSeemsInteresting returns true if err does not seem about https://github.com/bfirsh/funker/issues/3
122
+func errorSeemsInteresting(err error) bool {
123
+	boringSubstrs := []string{"connection refused", "connection reset by peer", "no such host", "transport endpoint is not connected", "no route to host"}
124
+	errS := err.Error()
125
+	for _, boringS := range boringSubstrs {
126
+		if strings.Contains(errS, boringS) {
127
+			return false
128
+		}
129
+	}
130
+	return true
131
+}
0 132
new file mode 100644
... ...
@@ -0,0 +1,65 @@
0
+package main
1
+
2
+import (
3
+	"errors"
4
+	"flag"
5
+	"io/ioutil"
6
+	"log"
7
+	"strings"
8
+)
9
+
10
+func main() {
11
+	if err := xmain(); err != nil {
12
+		log.Fatalf("fatal error: %v", err)
13
+	}
14
+}
15
+
16
+func xmain() error {
17
+	workerService := flag.String("worker-service", "", "Name of worker service")
18
+	chunks := flag.Int("chunks", 0, "Number of chunks")
19
+	input := flag.String("input", "", "Path to input file")
20
+	randSeed := flag.Int64("rand-seed", int64(0), "Random seed")
21
+	shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity")
22
+	flag.Parse()
23
+	if *workerService == "" {
24
+		return errors.New("worker-service unset")
25
+	}
26
+	if *chunks == 0 {
27
+		return errors.New("chunks unset")
28
+	}
29
+	if *input == "" {
30
+		return errors.New("input unset")
31
+	}
32
+
33
+	tests, err := loadTests(*input)
34
+	if err != nil {
35
+		return err
36
+	}
37
+	testChunks := chunkTests(tests, *chunks, *shuffle, *randSeed)
38
+	log.Printf("Loaded %d tests (%d chunks)", len(tests), len(testChunks))
39
+	return executeTests(*workerService, testChunks)
40
+}
41
+
42
+func chunkTests(tests []string, numChunks int, shuffle bool, randSeed int64) [][]string {
43
+	// shuffling (experimental) mitigates makespan nonuniformity
44
+	// Not sure this can cause some locality problem..
45
+	if shuffle {
46
+		shuffleStrings(tests, randSeed)
47
+	}
48
+	return chunkStrings(tests, numChunks)
49
+}
50
+
51
+func loadTests(filename string) ([]string, error) {
52
+	b, err := ioutil.ReadFile(filename)
53
+	if err != nil {
54
+		return nil, err
55
+	}
56
+	var tests []string
57
+	for _, line := range strings.Split(string(b), "\n") {
58
+		s := strings.TrimSpace(line)
59
+		if s != "" {
60
+			tests = append(tests, s)
61
+		}
62
+	}
63
+	return tests, nil
64
+}
0 65
new file mode 100644
... ...
@@ -0,0 +1,28 @@
0
+package main
1
+
2
+import (
3
+	"math/rand"
4
+)
5
+
6
+// chunkStrings chunks the string slice
7
+func chunkStrings(x []string, numChunks int) [][]string {
8
+	var result [][]string
9
+	chunkSize := (len(x) + numChunks - 1) / numChunks
10
+	for i := 0; i < len(x); i += chunkSize {
11
+		ub := i + chunkSize
12
+		if ub > len(x) {
13
+			ub = len(x)
14
+		}
15
+		result = append(result, x[i:ub])
16
+	}
17
+	return result
18
+}
19
+
20
+// shuffleStrings shuffles strings
21
+func shuffleStrings(x []string, seed int64) {
22
+	r := rand.New(rand.NewSource(seed))
23
+	for i := range x {
24
+		j := r.Intn(i + 1)
25
+		x[i], x[j] = x[j], x[i]
26
+	}
27
+}
0 28
new file mode 100644
... ...
@@ -0,0 +1,63 @@
0
+package main
1
+
2
+import (
3
+	"fmt"
4
+	"reflect"
5
+	"testing"
6
+	"time"
7
+)
8
+
9
+func generateInput(inputLen int) []string {
10
+	input := []string{}
11
+	for i := 0; i < inputLen; i++ {
12
+		input = append(input, fmt.Sprintf("s%d", i))
13
+	}
14
+
15
+	return input
16
+}
17
+
18
+func testChunkStrings(t *testing.T, inputLen, numChunks int) {
19
+	t.Logf("inputLen=%d, numChunks=%d", inputLen, numChunks)
20
+	input := generateInput(inputLen)
21
+	result := chunkStrings(input, numChunks)
22
+	t.Logf("result has %d chunks", len(result))
23
+	inputReconstructedFromResult := []string{}
24
+	for i, chunk := range result {
25
+		t.Logf("chunk %d has %d elements", i, len(chunk))
26
+		inputReconstructedFromResult = append(inputReconstructedFromResult, chunk...)
27
+	}
28
+	if !reflect.DeepEqual(input, inputReconstructedFromResult) {
29
+		t.Fatal("input != inputReconstructedFromResult")
30
+	}
31
+}
32
+
33
+func TestChunkStrings_4_4(t *testing.T) {
34
+	testChunkStrings(t, 4, 4)
35
+}
36
+
37
+func TestChunkStrings_4_1(t *testing.T) {
38
+	testChunkStrings(t, 4, 1)
39
+}
40
+
41
+func TestChunkStrings_1_4(t *testing.T) {
42
+	testChunkStrings(t, 1, 4)
43
+}
44
+
45
+func TestChunkStrings_1000_8(t *testing.T) {
46
+	testChunkStrings(t, 1000, 8)
47
+}
48
+
49
+func TestChunkStrings_1000_9(t *testing.T) {
50
+	testChunkStrings(t, 1000, 9)
51
+}
52
+
53
+func testShuffleStrings(t *testing.T, inputLen int, seed int64) {
54
+	t.Logf("inputLen=%d, seed=%d", inputLen, seed)
55
+	x := generateInput(inputLen)
56
+	shuffleStrings(x, seed)
57
+	t.Logf("shuffled: %v", x)
58
+}
59
+
60
+func TestShuffleStrings_100(t *testing.T) {
61
+	testShuffleStrings(t, 100, time.Now().UnixNano())
62
+}
0 63
new file mode 100644
... ...
@@ -0,0 +1,18 @@
0
+package types
1
+
2
+// Args is the type for funker args
3
+type Args struct {
4
+	// ChunkID is an unique number of the chunk
5
+	ChunkID int `json:"chunk_id"`
6
+	// Tests is the set of the strings that are passed as `-check.f` filters
7
+	Tests []string `json:"tests"`
8
+}
9
+
10
+// Result is the type for funker result
11
+type Result struct {
12
+	// ChunkID corresponds to Args.ChunkID
13
+	ChunkID int `json:"chunk_id"`
14
+	// Code is the exit code
15
+	Code   int    `json:"code"`
16
+	RawLog string `json:"raw_log"`
17
+}
0 18
new file mode 100644
... ...
@@ -0,0 +1,2 @@
0
+# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
1
+github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773
0 2
new file mode 100644
... ...
@@ -0,0 +1,191 @@
0
+
1
+                                 Apache License
2
+                           Version 2.0, January 2004
3
+                        http://www.apache.org/licenses/
4
+
5
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+   1. Definitions.
8
+
9
+      "License" shall mean the terms and conditions for use, reproduction,
10
+      and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+      "Licensor" shall mean the copyright owner or entity authorized by
13
+      the copyright owner that is granting the License.
14
+
15
+      "Legal Entity" shall mean the union of the acting entity and all
16
+      other entities that control, are controlled by, or are under common
17
+      control with that entity. For the purposes of this definition,
18
+      "control" means (i) the power, direct or indirect, to cause the
19
+      direction or management of such entity, whether by contract or
20
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+      outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+      "You" (or "Your") shall mean an individual or Legal Entity
24
+      exercising permissions granted by this License.
25
+
26
+      "Source" form shall mean the preferred form for making modifications,
27
+      including but not limited to software source code, documentation
28
+      source, and configuration files.
29
+
30
+      "Object" form shall mean any form resulting from mechanical
31
+      transformation or translation of a Source form, including but
32
+      not limited to compiled object code, generated documentation,
33
+      and conversions to other media types.
34
+
35
+      "Work" shall mean the work of authorship, whether in Source or
36
+      Object form, made available under the License, as indicated by a
37
+      copyright notice that is included in or attached to the work
38
+      (an example is provided in the Appendix below).
39
+
40
+      "Derivative Works" shall mean any work, whether in Source or Object
41
+      form, that is based on (or derived from) the Work and for which the
42
+      editorial revisions, annotations, elaborations, or other modifications
43
+      represent, as a whole, an original work of authorship. For the purposes
44
+      of this License, Derivative Works shall not include works that remain
45
+      separable from, or merely link (or bind by name) to the interfaces of,
46
+      the Work and Derivative Works thereof.
47
+
48
+      "Contribution" shall mean any work of authorship, including
49
+      the original version of the Work and any modifications or additions
50
+      to that Work or Derivative Works thereof, that is intentionally
51
+      submitted to Licensor for inclusion in the Work by the copyright owner
52
+      or by an individual or Legal Entity authorized to submit on behalf of
53
+      the copyright owner. For the purposes of this definition, "submitted"
54
+      means any form of electronic, verbal, or written communication sent
55
+      to the Licensor or its representatives, including but not limited to
56
+      communication on electronic mailing lists, source code control systems,
57
+      and issue tracking systems that are managed by, or on behalf of, the
58
+      Licensor for the purpose of discussing and improving the Work, but
59
+      excluding communication that is conspicuously marked or otherwise
60
+      designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+      "Contributor" shall mean Licensor and any individual or Legal Entity
63
+      on behalf of whom a Contribution has been received by Licensor and
64
+      subsequently incorporated within the Work.
65
+
66
+   2. Grant of Copyright License. Subject to the terms and conditions of
67
+      this License, each Contributor hereby grants to You a perpetual,
68
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+      copyright license to reproduce, prepare Derivative Works of,
70
+      publicly display, publicly perform, sublicense, and distribute the
71
+      Work and such Derivative Works in Source or Object form.
72
+
73
+   3. Grant of Patent License. Subject to the terms and conditions of
74
+      this License, each Contributor hereby grants to You a perpetual,
75
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+      (except as stated in this section) patent license to make, have made,
77
+      use, offer to sell, sell, import, and otherwise transfer the Work,
78
+      where such license applies only to those patent claims licensable
79
+      by such Contributor that are necessarily infringed by their
80
+      Contribution(s) alone or by combination of their Contribution(s)
81
+      with the Work to which such Contribution(s) was submitted. If You
82
+      institute patent litigation against any entity (including a
83
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+      or a Contribution incorporated within the Work constitutes direct
85
+      or contributory patent infringement, then any patent licenses
86
+      granted to You under this License for that Work shall terminate
87
+      as of the date such litigation is filed.
88
+
89
+   4. Redistribution. You may reproduce and distribute copies of the
90
+      Work or Derivative Works thereof in any medium, with or without
91
+      modifications, and in Source or Object form, provided that You
92
+      meet the following conditions:
93
+
94
+      (a) You must give any other recipients of the Work or
95
+          Derivative Works a copy of this License; and
96
+
97
+      (b) You must cause any modified files to carry prominent notices
98
+          stating that You changed the files; and
99
+
100
+      (c) You must retain, in the Source form of any Derivative Works
101
+          that You distribute, all copyright, patent, trademark, and
102
+          attribution notices from the Source form of the Work,
103
+          excluding those notices that do not pertain to any part of
104
+          the Derivative Works; and
105
+
106
+      (d) If the Work includes a "NOTICE" text file as part of its
107
+          distribution, then any Derivative Works that You distribute must
108
+          include a readable copy of the attribution notices contained
109
+          within such NOTICE file, excluding those notices that do not
110
+          pertain to any part of the Derivative Works, in at least one
111
+          of the following places: within a NOTICE text file distributed
112
+          as part of the Derivative Works; within the Source form or
113
+          documentation, if provided along with the Derivative Works; or,
114
+          within a display generated by the Derivative Works, if and
115
+          wherever such third-party notices normally appear. The contents
116
+          of the NOTICE file are for informational purposes only and
117
+          do not modify the License. You may add Your own attribution
118
+          notices within Derivative Works that You distribute, alongside
119
+          or as an addendum to the NOTICE text from the Work, provided
120
+          that such additional attribution notices cannot be construed
121
+          as modifying the License.
122
+
123
+      You may add Your own copyright statement to Your modifications and
124
+      may provide additional or different license terms and conditions
125
+      for use, reproduction, or distribution of Your modifications, or
126
+      for any such Derivative Works as a whole, provided Your use,
127
+      reproduction, and distribution of the Work otherwise complies with
128
+      the conditions stated in this License.
129
+
130
+   5. Submission of Contributions. Unless You explicitly state otherwise,
131
+      any Contribution intentionally submitted for inclusion in the Work
132
+      by You to the Licensor shall be under the terms and conditions of
133
+      this License, without any additional terms or conditions.
134
+      Notwithstanding the above, nothing herein shall supersede or modify
135
+      the terms of any separate license agreement you may have executed
136
+      with Licensor regarding such Contributions.
137
+
138
+   6. Trademarks. This License does not grant permission to use the trade
139
+      names, trademarks, service marks, or product names of the Licensor,
140
+      except as required for reasonable and customary use in describing the
141
+      origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+   7. Disclaimer of Warranty. Unless required by applicable law or
144
+      agreed to in writing, Licensor provides the Work (and each
145
+      Contributor provides its Contributions) on an "AS IS" BASIS,
146
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+      implied, including, without limitation, any warranties or conditions
148
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+      PARTICULAR PURPOSE. You are solely responsible for determining the
150
+      appropriateness of using or redistributing the Work and assume any
151
+      risks associated with Your exercise of permissions under this License.
152
+
153
+   8. Limitation of Liability. In no event and under no legal theory,
154
+      whether in tort (including negligence), contract, or otherwise,
155
+      unless required by applicable law (such as deliberate and grossly
156
+      negligent acts) or agreed to in writing, shall any Contributor be
157
+      liable to You for damages, including any direct, indirect, special,
158
+      incidental, or consequential damages of any character arising as a
159
+      result of this License or out of the use or inability to use the
160
+      Work (including but not limited to damages for loss of goodwill,
161
+      work stoppage, computer failure or malfunction, or any and all
162
+      other commercial damages or losses), even if such Contributor
163
+      has been advised of the possibility of such damages.
164
+
165
+   9. Accepting Warranty or Additional Liability. While redistributing
166
+      the Work or Derivative Works thereof, You may choose to offer,
167
+      and charge a fee for, acceptance of support, warranty, indemnity,
168
+      or other liability obligations and/or rights consistent with this
169
+      License. However, in accepting such obligations, You may act only
170
+      on Your own behalf and on Your sole responsibility, not on behalf
171
+      of any other Contributor, and only if You agree to indemnify,
172
+      defend, and hold each Contributor harmless for any liability
173
+      incurred by, or claims asserted against, such Contributor by reason
174
+      of your accepting any such warranty or additional liability.
175
+
176
+   END OF TERMS AND CONDITIONS
177
+
178
+   Copyright 2016 Docker, Inc.
179
+
180
+   Licensed under the Apache License, Version 2.0 (the "License");
181
+   you may not use this file except in compliance with the License.
182
+   You may obtain a copy of the License at
183
+
184
+       http://www.apache.org/licenses/LICENSE-2.0
185
+
186
+   Unless required by applicable law or agreed to in writing, software
187
+   distributed under the License is distributed on an "AS IS" BASIS,
188
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
189
+   See the License for the specific language governing permissions and
190
+   limitations under the License.
0 191
new file mode 100644
... ...
@@ -0,0 +1,50 @@
0
+package funker
1
+
2
+import (
3
+	"encoding/json"
4
+	"io/ioutil"
5
+	"net"
6
+	"time"
7
+)
8
+
9
+// Call a Funker function
10
+func Call(name string, args interface{}) (interface{}, error) {
11
+	argsJSON, err := json.Marshal(args)
12
+	if err != nil {
13
+		return nil, err
14
+	}
15
+
16
+	addr, err := net.ResolveTCPAddr("tcp", name+":9999")
17
+	if err != nil {
18
+		return nil, err
19
+	}
20
+
21
+	conn, err := net.DialTCP("tcp", nil, addr)
22
+	if err != nil {
23
+		return nil, err
24
+	}
25
+	// Keepalive is a workaround for docker/docker#29655 .
26
+	// The implementation of FIN_WAIT2 seems weird on Swarm-mode.
27
+	// It seems always refuseing any packet after 60 seconds.
28
+	//
29
+	// TODO: remove this workaround if the issue gets resolved on the Docker side
30
+	if err := conn.SetKeepAlive(true); err != nil {
31
+		return nil, err
32
+	}
33
+	if err := conn.SetKeepAlivePeriod(30 * time.Second); err != nil {
34
+		return nil, err
35
+	}
36
+	if _, err = conn.Write(argsJSON); err != nil {
37
+		return nil, err
38
+	}
39
+	if err = conn.CloseWrite(); err != nil {
40
+		return nil, err
41
+	}
42
+	retJSON, err := ioutil.ReadAll(conn)
43
+	if err != nil {
44
+		return nil, err
45
+	}
46
+	var ret interface{}
47
+	err = json.Unmarshal(retJSON, &ret)
48
+	return ret, err
49
+}
0 50
new file mode 100644
... ...
@@ -0,0 +1,54 @@
0
+package funker
1
+
2
+import (
3
+	"encoding/json"
4
+	"fmt"
5
+	"io/ioutil"
6
+	"net"
7
+	"reflect"
8
+)
9
+
10
+// Handle a Funker function.
11
+func Handle(handler interface{}) error {
12
+	handlerValue := reflect.ValueOf(handler)
13
+	handlerType := handlerValue.Type()
14
+	if handlerType.Kind() != reflect.Func || handlerType.NumIn() != 1 || handlerType.NumOut() != 1 {
15
+		return fmt.Errorf("Handler must be a function with a single parameter and single return value.")
16
+	}
17
+	argsValue := reflect.New(handlerType.In(0))
18
+
19
+	listener, err := net.Listen("tcp", ":9999")
20
+	if err != nil {
21
+		return err
22
+	}
23
+	conn, err := listener.Accept()
24
+	if err != nil {
25
+		return err
26
+	}
27
+	// We close listener, because we only allow single request.
28
+	// Note that TCP "backlog" cannot be used for that purpose.
29
+	// http://www.perlmonks.org/?node_id=940662
30
+	if err = listener.Close(); err != nil {
31
+		return err
32
+	}
33
+	argsJSON, err := ioutil.ReadAll(conn)
34
+	if err != nil {
35
+		return err
36
+	}
37
+	err = json.Unmarshal(argsJSON, argsValue.Interface())
38
+	if err != nil {
39
+		return err
40
+	}
41
+
42
+	ret := handlerValue.Call([]reflect.Value{argsValue.Elem()})[0].Interface()
43
+	retJSON, err := json.Marshal(ret)
44
+	if err != nil {
45
+		return err
46
+	}
47
+
48
+	if _, err = conn.Write(retJSON); err != nil {
49
+		return err
50
+	}
51
+
52
+	return conn.Close()
53
+}
0 54
new file mode 100644
... ...
@@ -0,0 +1,109 @@
0
+package main
1
+
2
+import (
3
+	"bytes"
4
+	"context"
5
+	"fmt"
6
+	"io"
7
+	"os"
8
+	"strings"
9
+
10
+	"github.com/docker/docker/api/types"
11
+	"github.com/docker/docker/api/types/container"
12
+	"github.com/docker/docker/api/types/mount"
13
+	"github.com/docker/docker/client"
14
+	"github.com/docker/docker/pkg/stdcopy"
15
+)
16
+
17
+// testChunkExecutor executes integration-cli binary.
18
+// image needs to be the worker image itself. testFlags are OR-set of regexp for filtering tests.
19
+type testChunkExecutor func(image string, tests []string) (int64, string, error)
20
+
21
+func dryTestChunkExecutor(image string, tests []string) (int64, string, error) {
22
+	return 0, fmt.Sprintf("DRY RUN (image=%q, tests=%v)", image, tests), nil
23
+}
24
+
25
+// privilegedTestChunkExecutor invokes a privileged container from the worker
26
+// service via bind-mounted API socket so as to execute the test chunk
27
+func privilegedTestChunkExecutor(image string, tests []string) (int64, string, error) {
28
+	cli, err := client.NewEnvClient()
29
+	if err != nil {
30
+		return 0, "", err
31
+	}
32
+	// propagate variables from the host (needs to be defined in the compose file)
33
+	experimental := os.Getenv("DOCKER_EXPERIMENTAL")
34
+	graphdriver := os.Getenv("DOCKER_GRAPHDRIVER")
35
+	if graphdriver == "" {
36
+		info, err := cli.Info(context.Background())
37
+		if err != nil {
38
+			return 0, "", err
39
+		}
40
+		graphdriver = info.Driver
41
+	}
42
+	// `daemon_dest` is similar to `$DEST` (e.g. `bundles/VERSION/test-integration-cli`)
43
+	// but it exists outside of `bundles` so as to make `$DOCKER_GRAPHDRIVER` work.
44
+	//
45
+	// Without this hack, `$DOCKER_GRAPHDRIVER` fails because of (e.g.) `overlay2 is not supported over overlayfs`
46
+	//
47
+	// see integration-cli/daemon/daemon.go
48
+	daemonDest := "/daemon_dest"
49
+	config := container.Config{
50
+		Image: image,
51
+		Env: []string{
52
+			"TESTFLAGS=-check.f " + strings.Join(tests, "|"),
53
+			"KEEPBUNDLE=1",
54
+			"DOCKER_INTEGRATION_TESTS_VERIFIED=1", // for avoiding rebuilding integration-cli
55
+			"DOCKER_EXPERIMENTAL=" + experimental,
56
+			"DOCKER_GRAPHDRIVER=" + graphdriver,
57
+			"DOCKER_INTEGRATION_DAEMON_DEST=" + daemonDest,
58
+		},
59
+		// TODO: set label?
60
+		Entrypoint: []string{"hack/dind"},
61
+		Cmd:        []string{"hack/make.sh", "test-integration-cli"},
62
+	}
63
+	hostConfig := container.HostConfig{
64
+		AutoRemove: true,
65
+		Privileged: true,
66
+		Mounts: []mount.Mount{
67
+			{
68
+				Type:   mount.TypeVolume,
69
+				Target: daemonDest,
70
+			},
71
+		},
72
+	}
73
+	id, stream, err := runContainer(context.Background(), cli, config, hostConfig)
74
+	if err != nil {
75
+		return 0, "", err
76
+	}
77
+	var b bytes.Buffer
78
+	teeContainerStream(&b, os.Stdout, os.Stderr, stream)
79
+	rc, err := cli.ContainerWait(context.Background(), id)
80
+	if err != nil {
81
+		return 0, "", err
82
+	}
83
+	return rc, b.String(), nil
84
+}
85
+
86
+func runContainer(ctx context.Context, cli *client.Client, config container.Config, hostConfig container.HostConfig) (string, io.ReadCloser, error) {
87
+	created, err := cli.ContainerCreate(context.Background(),
88
+		&config, &hostConfig, nil, "")
89
+	if err != nil {
90
+		return "", nil, err
91
+	}
92
+	if err = cli.ContainerStart(ctx, created.ID, types.ContainerStartOptions{}); err != nil {
93
+		return "", nil, err
94
+	}
95
+	stream, err := cli.ContainerLogs(ctx,
96
+		created.ID,
97
+		types.ContainerLogsOptions{
98
+			ShowStdout: true,
99
+			ShowStderr: true,
100
+			Follow:     true,
101
+		})
102
+	return created.ID, stream, err
103
+}
104
+
105
+func teeContainerStream(w, stdout, stderr io.Writer, stream io.ReadCloser) {
106
+	stdcopy.StdCopy(io.MultiWriter(w, stdout), io.MultiWriter(w, stderr), stream)
107
+	stream.Close()
108
+}
0 109
new file mode 100644
... ...
@@ -0,0 +1,68 @@
0
+package main
1
+
2
+import (
3
+	"flag"
4
+	"fmt"
5
+	"log"
6
+	"time"
7
+
8
+	"github.com/bfirsh/funker-go"
9
+	"github.com/docker/distribution/reference"
10
+	"github.com/docker/docker/hack/integration-cli-on-swarm/agent/types"
11
+)
12
+
13
+func main() {
14
+	if err := xmain(); err != nil {
15
+		log.Fatalf("fatal error: %v", err)
16
+	}
17
+}
18
+
19
+func validImageDigest(s string) bool {
20
+	return reference.DigestRegexp.FindString(s) != ""
21
+}
22
+
23
+func xmain() error {
24
+	workerImageDigest := flag.String("worker-image-digest", "", "Needs to be the digest of this worker image itself")
25
+	dryRun := flag.Bool("dry-run", false, "Dry run")
26
+	flag.Parse()
27
+	if !validImageDigest(*workerImageDigest) {
28
+		// Because of issue #29582.
29
+		// `docker service create localregistry.example.com/blahblah:latest` pulls the image data to local, but not a tag.
30
+		// So, `docker run localregistry.example.com/blahblah:latest` fails: `Unable to find image 'localregistry.example.com/blahblah:latest' locally`
31
+		return fmt.Errorf("worker-image-digest must be a digest, got %q", *workerImageDigest)
32
+	}
33
+	executor := privilegedTestChunkExecutor
34
+	if *dryRun {
35
+		executor = dryTestChunkExecutor
36
+	}
37
+	return handle(*workerImageDigest, executor)
38
+}
39
+
40
+func handle(workerImageDigest string, executor testChunkExecutor) error {
41
+	log.Printf("Waiting for a funker request")
42
+	return funker.Handle(func(args *types.Args) types.Result {
43
+		log.Printf("Executing chunk %d, contains %d test filters",
44
+			args.ChunkID, len(args.Tests))
45
+		begin := time.Now()
46
+		code, rawLog, err := executor(workerImageDigest, args.Tests)
47
+		if err != nil {
48
+			log.Printf("Error while executing chunk %d: %v", args.ChunkID, err)
49
+			if code == 0 {
50
+				// Make sure this is a failure
51
+				code = 1
52
+			}
53
+			return types.Result{
54
+				ChunkID: args.ChunkID,
55
+				Code:    int(code),
56
+				RawLog:  rawLog,
57
+			}
58
+		}
59
+		elapsed := time.Now().Sub(begin)
60
+		log.Printf("Finished chunk %d, code=%d, elapsed=%v", args.ChunkID, code, elapsed)
61
+		return types.Result{
62
+			ChunkID: args.ChunkID,
63
+			Code:    int(code),
64
+			RawLog:  rawLog,
65
+		}
66
+	})
67
+}
0 68
new file mode 100644
... ...
@@ -0,0 +1,121 @@
0
+package main
1
+
2
+import (
3
+	"context"
4
+	"io/ioutil"
5
+	"os"
6
+	"path/filepath"
7
+	"text/template"
8
+
9
+	"github.com/docker/docker/client"
10
+)
11
+
12
+const composeTemplate = `# generated by integration-cli-on-swarm
13
+version: "3"
14
+
15
+services:
16
+  worker:
17
+    image: "{{.WorkerImage}}"
18
+    command: ["-worker-image-digest={{.WorkerImageDigest}}", "-dry-run={{.DryRun}}"]
19
+    networks:
20
+      - net
21
+    volumes:
22
+# Bind-mount the API socket so that we can invoke "docker run --privileged" within the service containers
23
+      - /var/run/docker.sock:/var/run/docker.sock
24
+    environment:
25
+      - DOCKER_GRAPHDRIVER={{.EnvDockerGraphDriver}}
26
+      - DOCKER_EXPERIMENTAL={{.EnvDockerExperimental}}
27
+    deploy:
28
+      mode: replicated
29
+      replicas: {{.Replicas}}
30
+      restart_policy:
31
+# The restart condition needs to be any for funker function
32
+        condition: any
33
+
34
+  master:
35
+    image: "{{.MasterImage}}"
36
+    command: ["-worker-service=worker", "-input=/mnt/input", "-chunks={{.Chunks}}", "-shuffle={{.Shuffle}}", "-rand-seed={{.RandSeed}}"]
37
+    networks:
38
+      - net
39
+    volumes:
40
+      - {{.Volume}}:/mnt
41
+    deploy:
42
+      mode: replicated
43
+      replicas: 1
44
+      restart_policy:
45
+        condition: none
46
+      placement:
47
+# Make sure the master can access the volume
48
+        constraints: [node.id == {{.SelfNodeID}}]
49
+
50
+networks:
51
+  net:
52
+
53
+volumes:
54
+  {{.Volume}}:
55
+    external: true
56
+`
57
+
58
+type composeOptions struct {
59
+	Replicas    int
60
+	Chunks      int
61
+	MasterImage string
62
+	WorkerImage string
63
+	Volume      string
64
+	Shuffle     bool
65
+	RandSeed    int64
66
+	DryRun      bool
67
+}
68
+
69
+type composeTemplateOptions struct {
70
+	composeOptions
71
+	WorkerImageDigest     string
72
+	SelfNodeID            string
73
+	EnvDockerGraphDriver  string
74
+	EnvDockerExperimental string
75
+}
76
+
77
+// createCompose creates "dir/docker-compose.yml".
78
+// If dir is empty, TempDir() is used.
79
+func createCompose(dir string, cli *client.Client, opts composeOptions) (string, error) {
80
+	if dir == "" {
81
+		var err error
82
+		dir, err = ioutil.TempDir("", "integration-cli-on-swarm-")
83
+		if err != nil {
84
+			return "", err
85
+		}
86
+	}
87
+	resolved := composeTemplateOptions{}
88
+	resolved.composeOptions = opts
89
+	workerImageInspect, _, err := cli.ImageInspectWithRaw(context.Background(), defaultWorkerImageName)
90
+	if err != nil {
91
+		return "", err
92
+	}
93
+	if len(workerImageInspect.RepoDigests) > 0 {
94
+		resolved.WorkerImageDigest = workerImageInspect.RepoDigests[0]
95
+	} else {
96
+		// fall back for non-pushed image
97
+		resolved.WorkerImageDigest = workerImageInspect.ID
98
+	}
99
+	info, err := cli.Info(context.Background())
100
+	if err != nil {
101
+		return "", err
102
+	}
103
+	resolved.SelfNodeID = info.Swarm.NodeID
104
+	resolved.EnvDockerGraphDriver = os.Getenv("DOCKER_GRAPHDRIVER")
105
+	resolved.EnvDockerExperimental = os.Getenv("DOCKER_EXPERIMENTAL")
106
+	composeFilePath := filepath.Join(dir, "docker-compose.yml")
107
+	tmpl, err := template.New("").Parse(composeTemplate)
108
+	if err != nil {
109
+		return "", err
110
+	}
111
+	f, err := os.Create(composeFilePath)
112
+	if err != nil {
113
+		return "", err
114
+	}
115
+	defer f.Close()
116
+	if err = tmpl.Execute(f, resolved); err != nil {
117
+		return "", err
118
+	}
119
+	return composeFilePath, nil
120
+}
0 121
new file mode 100644
... ...
@@ -0,0 +1,64 @@
0
+package main
1
+
2
+import (
3
+	"fmt"
4
+	"os"
5
+	"os/exec"
6
+	"strings"
7
+	"time"
8
+
9
+	"github.com/docker/docker/client"
10
+)
11
+
12
+func system(commands [][]string) error {
13
+	for _, c := range commands {
14
+		cmd := exec.Command(c[0], c[1:]...)
15
+		cmd.Stdout = os.Stdout
16
+		cmd.Stderr = os.Stderr
17
+		cmd.Env = os.Environ()
18
+		if err := cmd.Run(); err != nil {
19
+			return err
20
+		}
21
+	}
22
+	return nil
23
+}
24
+
25
+func pushImage(unusedCli *client.Client, remote, local string) error {
26
+	// FIXME: eliminate os/exec (but it is hard to pass auth without os/exec ...)
27
+	return system([][]string{
28
+		{"docker", "image", "tag", local, remote},
29
+		{"docker", "image", "push", remote},
30
+	})
31
+}
32
+
33
+func deployStack(unusedCli *client.Client, stackName, composeFilePath string) error {
34
+	// FIXME: eliminate os/exec (but stack is implemented in CLI ...)
35
+	return system([][]string{
36
+		{"docker", "stack", "deploy",
37
+			"--compose-file", composeFilePath,
38
+			"--with-registry-auth",
39
+			stackName},
40
+	})
41
+}
42
+
43
+func hasStack(unusedCli *client.Client, stackName string) bool {
44
+	// FIXME: eliminate os/exec (but stack is implemented in CLI ...)
45
+	out, err := exec.Command("docker", "stack", "ls").Output()
46
+	if err != nil {
47
+		panic(fmt.Errorf("`docker stack ls` failed with: %s", string(out)))
48
+	}
49
+	// FIXME: not accurate
50
+	return strings.Contains(string(out), stackName)
51
+}
52
+
53
+func removeStack(unusedCli *client.Client, stackName string) error {
54
+	// FIXME: eliminate os/exec (but stack is implemented in CLI ...)
55
+	if err := system([][]string{
56
+		{"docker", "stack", "rm", stackName},
57
+	}); err != nil {
58
+		return err
59
+	}
60
+	// FIXME
61
+	time.Sleep(10 * time.Second)
62
+	return nil
63
+}
0 64
new file mode 100644
... ...
@@ -0,0 +1,55 @@
0
+package main
1
+
2
+import (
3
+	"fmt"
4
+	"io/ioutil"
5
+	"path/filepath"
6
+	"regexp"
7
+)
8
+
9
+var testFuncRegexp *regexp.Regexp
10
+
11
+func init() {
12
+	testFuncRegexp = regexp.MustCompile(`(?m)^\s*func\s+\(\w*\s*\*(\w+Suite)\)\s+(Test\w+)`)
13
+}
14
+
15
+func enumerateTestsForBytes(b []byte) ([]string, error) {
16
+	var tests []string
17
+	submatches := testFuncRegexp.FindAllSubmatch(b, -1)
18
+	for _, submatch := range submatches {
19
+		if len(submatch) == 3 {
20
+			tests = append(tests, fmt.Sprintf("%s.%s$", submatch[1], submatch[2]))
21
+		}
22
+	}
23
+	return tests, nil
24
+}
25
+
26
+// enumareteTests enumerates valid `-check.f` strings for all the test functions.
27
+// Note that we use regexp rather than parsing Go files for performance reason.
28
+// (Try `TESTFLAGS=-check.list make test-integration-cli` to see the slowness of parsing)
29
+// The files needs to be `gofmt`-ed
30
+//
31
+// The result will be as follows, but unsorted ('$' is appended because they are regexp for `-check.f`):
32
+//  "DockerAuthzSuite.TestAuthZPluginAPIDenyResponse$"
33
+//  "DockerAuthzSuite.TestAuthZPluginAllowEventStream$"
34
+//  ...
35
+//  "DockerTrustedSwarmSuite.TestTrustedServiceUpdate$"
36
+func enumerateTests(wd string) ([]string, error) {
37
+	testGoFiles, err := filepath.Glob(filepath.Join(wd, "integration-cli", "*_test.go"))
38
+	if err != nil {
39
+		return nil, err
40
+	}
41
+	var allTests []string
42
+	for _, testGoFile := range testGoFiles {
43
+		b, err := ioutil.ReadFile(testGoFile)
44
+		if err != nil {
45
+			return nil, err
46
+		}
47
+		tests, err := enumerateTestsForBytes(b)
48
+		if err != nil {
49
+			return nil, err
50
+		}
51
+		allTests = append(allTests, tests...)
52
+	}
53
+	return allTests, nil
54
+}
0 55
new file mode 100644
... ...
@@ -0,0 +1,84 @@
0
+package main
1
+
2
+import (
3
+	"os"
4
+	"path/filepath"
5
+	"reflect"
6
+	"sort"
7
+	"strings"
8
+	"testing"
9
+)
10
+
11
+func getRepoTopDir(t *testing.T) string {
12
+	wd, err := os.Getwd()
13
+	if err != nil {
14
+		t.Fatal(err)
15
+	}
16
+	wd = filepath.Clean(wd)
17
+	suffix := "hack/integration-cli-on-swarm/host"
18
+	if !strings.HasSuffix(wd, suffix) {
19
+		t.Skipf("cwd seems strange (needs to have suffix %s): %v", suffix, wd)
20
+	}
21
+	return filepath.Clean(filepath.Join(wd, "../../.."))
22
+}
23
+
24
+func TestEnumerateTests(t *testing.T) {
25
+	if testing.Short() {
26
+		t.Skip("skipping in short mode")
27
+	}
28
+	tests, err := enumerateTests(getRepoTopDir(t))
29
+	if err != nil {
30
+		t.Fatal(err)
31
+	}
32
+	sort.Strings(tests)
33
+	t.Logf("enumerated %d test filter strings:", len(tests))
34
+	for _, s := range tests {
35
+		t.Logf("- %q", s)
36
+	}
37
+}
38
+
39
+func TestEnumerateTestsForBytes(t *testing.T) {
40
+	b := []byte(`package main
41
+import (
42
+	"github.com/go-check/check"
43
+)
44
+
45
+func (s *FooSuite) TestA(c *check.C) {
46
+}
47
+
48
+func (s *FooSuite) TestAAA(c *check.C) {
49
+}
50
+
51
+func (s *BarSuite) TestBar(c *check.C) {
52
+}
53
+
54
+func (x *FooSuite) TestC(c *check.C) {
55
+}
56
+
57
+func (*FooSuite) TestD(c *check.C) {
58
+}
59
+
60
+// should not be counted
61
+func (s *FooSuite) testE(c *check.C) {
62
+}
63
+
64
+// counted, although we don't support ungofmt file
65
+  func   (s *FooSuite)    TestF  (c   *check.C){}
66
+`)
67
+	expected := []string{
68
+		"FooSuite.TestA$",
69
+		"FooSuite.TestAAA$",
70
+		"BarSuite.TestBar$",
71
+		"FooSuite.TestC$",
72
+		"FooSuite.TestD$",
73
+		"FooSuite.TestF$",
74
+	}
75
+
76
+	actual, err := enumerateTestsForBytes(b)
77
+	if err != nil {
78
+		t.Fatal(err)
79
+	}
80
+	if !reflect.DeepEqual(expected, actual) {
81
+		t.Fatalf("expected %q, got %q", expected, actual)
82
+	}
83
+}
0 84
new file mode 100644
... ...
@@ -0,0 +1,190 @@
0
+package main
1
+
2
+import (
3
+	"context"
4
+	"flag"
5
+	"fmt"
6
+	"io"
7
+	"io/ioutil"
8
+	"os"
9
+	"strings"
10
+	"time"
11
+
12
+	"github.com/Sirupsen/logrus"
13
+	"github.com/docker/docker/api/types"
14
+	"github.com/docker/docker/api/types/filters"
15
+	"github.com/docker/docker/client"
16
+	"github.com/docker/docker/pkg/stdcopy"
17
+)
18
+
19
+const (
20
+	defaultStackName       = "integration-cli-on-swarm"
21
+	defaultVolumeName      = "integration-cli-on-swarm"
22
+	defaultMasterImageName = "integration-cli-master"
23
+	defaultWorkerImageName = "integration-cli-worker"
24
+)
25
+
26
+func main() {
27
+	if err := xmain(); err != nil {
28
+		logrus.Fatalf("fatal error: %v", err)
29
+	}
30
+}
31
+
32
+// xmain can call os.Exit()
33
+func xmain() error {
34
+	// Should we use cobra maybe?
35
+	replicas := flag.Int("replicas", 1, "Number of worker service replica")
36
+	chunks := flag.Int("chunks", 0, "Number of test chunks executed in batch (0 == replicas)")
37
+	pushWorkerImage := flag.String("push-worker-image", "", "Push the worker image to the registry. Required for distribuetd execution. (empty == not to push)")
38
+	shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity")
39
+	// flags below are rarely used
40
+	randSeed := flag.Int64("rand-seed", int64(0), "Random seed used for shuffling (0 == curent time)")
41
+	filtersFile := flag.String("filters-file", "", "Path to optional file composed of `-check.f` filter strings")
42
+	dryRun := flag.Bool("dry-run", false, "Dry run")
43
+	flag.Parse()
44
+	if *chunks == 0 {
45
+		*chunks = *replicas
46
+	}
47
+	if *randSeed == int64(0) {
48
+		*randSeed = time.Now().UnixNano()
49
+	}
50
+	cli, err := client.NewEnvClient()
51
+	if err != nil {
52
+		return err
53
+	}
54
+	if hasStack(cli, defaultStackName) {
55
+		logrus.Infof("Removing stack %s", defaultStackName)
56
+		removeStack(cli, defaultStackName)
57
+	}
58
+	if hasVolume(cli, defaultVolumeName) {
59
+		logrus.Infof("Removing volume %s", defaultVolumeName)
60
+		removeVolume(cli, defaultVolumeName)
61
+	}
62
+	if err = ensureImages(cli, []string{defaultWorkerImageName, defaultMasterImageName}); err != nil {
63
+		return err
64
+	}
65
+	workerImageForStack := defaultWorkerImageName
66
+	if *pushWorkerImage != "" {
67
+		logrus.Infof("Pushing %s to %s", defaultWorkerImageName, *pushWorkerImage)
68
+		if err = pushImage(cli, *pushWorkerImage, defaultWorkerImageName); err != nil {
69
+			return err
70
+		}
71
+		workerImageForStack = *pushWorkerImage
72
+	}
73
+	compose, err := createCompose("", cli, composeOptions{
74
+		Replicas:    *replicas,
75
+		Chunks:      *chunks,
76
+		MasterImage: defaultMasterImageName,
77
+		WorkerImage: workerImageForStack,
78
+		Volume:      defaultVolumeName,
79
+		Shuffle:     *shuffle,
80
+		RandSeed:    *randSeed,
81
+		DryRun:      *dryRun,
82
+	})
83
+	if err != nil {
84
+		return err
85
+	}
86
+	filters, err := filtersBytes(*filtersFile)
87
+	if err != nil {
88
+		return err
89
+	}
90
+	logrus.Infof("Creating volume %s with input data", defaultVolumeName)
91
+	if err = createVolumeWithData(cli,
92
+		defaultVolumeName,
93
+		map[string][]byte{"/input": filters},
94
+		defaultMasterImageName); err != nil {
95
+		return err
96
+	}
97
+	logrus.Infof("Deploying stack %s from %s", defaultStackName, compose)
98
+	defer func() {
99
+		logrus.Infof("NOTE: You may want to inspect or clean up following resources:")
100
+		logrus.Infof(" - Stack: %s", defaultStackName)
101
+		logrus.Infof(" - Volume: %s", defaultVolumeName)
102
+		logrus.Infof(" - Compose file: %s", compose)
103
+		logrus.Infof(" - Master image: %s", defaultMasterImageName)
104
+		logrus.Infof(" - Worker image: %s", workerImageForStack)
105
+	}()
106
+	if err = deployStack(cli, defaultStackName, compose); err != nil {
107
+		return err
108
+	}
109
+	logrus.Infof("The log will be displayed here after some duration."+
110
+		"You can watch the live status via `docker service logs %s_worker`",
111
+		defaultStackName)
112
+	masterContainerID, err := waitForMasterUp(cli, defaultStackName)
113
+	if err != nil {
114
+		return err
115
+	}
116
+	rc, err := waitForContainerCompletion(cli, os.Stdout, os.Stderr, masterContainerID)
117
+	if err != nil {
118
+		return err
119
+	}
120
+	logrus.Infof("Exit status: %d", rc)
121
+	os.Exit(int(rc))
122
+	return nil
123
+}
124
+
125
+func ensureImages(cli *client.Client, images []string) error {
126
+	for _, image := range images {
127
+		_, _, err := cli.ImageInspectWithRaw(context.Background(), image)
128
+		if err != nil {
129
+			return fmt.Errorf("could not find image %s, please run `make build-integration-cli-on-swarm`: %v",
130
+				image, err)
131
+		}
132
+	}
133
+	return nil
134
+}
135
+
136
+func filtersBytes(optionalFiltersFile string) ([]byte, error) {
137
+	var b []byte
138
+	if optionalFiltersFile == "" {
139
+		tests, err := enumerateTests(".")
140
+		if err != nil {
141
+			return b, err
142
+		}
143
+		b = []byte(strings.Join(tests, "\n") + "\n")
144
+	} else {
145
+		var err error
146
+		b, err = ioutil.ReadFile(optionalFiltersFile)
147
+		if err != nil {
148
+			return b, err
149
+		}
150
+	}
151
+	return b, nil
152
+}
153
+
154
+func waitForMasterUp(cli *client.Client, stackName string) (string, error) {
155
+	// FIXME(AkihiroSuda): it should retry until master is up, rather than pre-sleeping
156
+	time.Sleep(10 * time.Second)
157
+
158
+	fil := filters.NewArgs()
159
+	fil.Add("label", "com.docker.stack.namespace="+stackName)
160
+	// FIXME(AkihiroSuda): we should not rely on internal service naming convention
161
+	fil.Add("label", "com.docker.swarm.service.name="+stackName+"_master")
162
+	masters, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
163
+		All:     true,
164
+		Filters: fil,
165
+	})
166
+	if err != nil {
167
+		return "", err
168
+	}
169
+	if len(masters) == 0 {
170
+		return "", fmt.Errorf("master not running in stack %s?", stackName)
171
+	}
172
+	return masters[0].ID, nil
173
+}
174
+
175
+func waitForContainerCompletion(cli *client.Client, stdout, stderr io.Writer, containerID string) (int64, error) {
176
+	stream, err := cli.ContainerLogs(context.Background(),
177
+		containerID,
178
+		types.ContainerLogsOptions{
179
+			ShowStdout: true,
180
+			ShowStderr: true,
181
+			Follow:     true,
182
+		})
183
+	if err != nil {
184
+		return 1, err
185
+	}
186
+	stdcopy.StdCopy(stdout, stderr, stream)
187
+	stream.Close()
188
+	return cli.ContainerWait(context.Background(), containerID)
189
+}
0 190
new file mode 100644
... ...
@@ -0,0 +1,88 @@
0
+package main
1
+
2
+import (
3
+	"archive/tar"
4
+	"bytes"
5
+	"context"
6
+	"io"
7
+
8
+	"github.com/docker/docker/api/types"
9
+	"github.com/docker/docker/api/types/container"
10
+	"github.com/docker/docker/api/types/mount"
11
+	"github.com/docker/docker/api/types/volume"
12
+	"github.com/docker/docker/client"
13
+)
14
+
15
+func createTar(data map[string][]byte) (io.Reader, error) {
16
+	var b bytes.Buffer
17
+	tw := tar.NewWriter(&b)
18
+	for path, datum := range data {
19
+		hdr := tar.Header{
20
+			Name: path,
21
+			Mode: 0644,
22
+			Size: int64(len(datum)),
23
+		}
24
+		if err := tw.WriteHeader(&hdr); err != nil {
25
+			return nil, err
26
+		}
27
+		_, err := tw.Write(datum)
28
+		if err != nil {
29
+			return nil, err
30
+		}
31
+	}
32
+	if err := tw.Close(); err != nil {
33
+		return nil, err
34
+	}
35
+	return &b, nil
36
+}
37
+
38
+// createVolumeWithData creates a volume with the given data (e.g. data["/foo"] = []byte("bar"))
39
+// Internally, a container is created from the image so as to provision the data to the volume,
40
+// which is attached to the container.
41
+func createVolumeWithData(cli *client.Client, volumeName string, data map[string][]byte, image string) error {
42
+	_, err := cli.VolumeCreate(context.Background(),
43
+		volume.VolumesCreateBody{
44
+			Driver: "local",
45
+			Name:   volumeName,
46
+		})
47
+	if err != nil {
48
+		return err
49
+	}
50
+	mnt := "/mnt"
51
+	miniContainer, err := cli.ContainerCreate(context.Background(),
52
+		&container.Config{
53
+			Image: image,
54
+		},
55
+		&container.HostConfig{
56
+			Mounts: []mount.Mount{
57
+				{
58
+					Type:   mount.TypeVolume,
59
+					Source: volumeName,
60
+					Target: mnt,
61
+				},
62
+			},
63
+		}, nil, "")
64
+	if err != nil {
65
+		return err
66
+	}
67
+	tr, err := createTar(data)
68
+	if err != nil {
69
+		return err
70
+	}
71
+	if cli.CopyToContainer(context.Background(),
72
+		miniContainer.ID, mnt, tr, types.CopyToContainerOptions{}); err != nil {
73
+		return err
74
+	}
75
+	return cli.ContainerRemove(context.Background(),
76
+		miniContainer.ID,
77
+		types.ContainerRemoveOptions{})
78
+}
79
+
80
+func hasVolume(cli *client.Client, volumeName string) bool {
81
+	_, err := cli.VolumeInspect(context.Background(), volumeName)
82
+	return err == nil
83
+}
84
+
85
+func removeVolume(cli *client.Client, volumeName string) error {
86
+	return cli.VolumeRemove(context.Background(), volumeName, true)
87
+}
... ...
@@ -60,6 +60,7 @@ test_env() {
60 60
 		# use "env -i" to tightly control the environment variables that bleed into the tests
61 61
 		env -i \
62 62
 			DEST="$DEST" \
63
+			DOCKER_INTEGRATION_DAEMON_DEST="$DOCKER_INTEGRATION_DAEMON_DEST" \
63 64
 			DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \
64 65
 			DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \
65 66
 			DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \
... ...
@@ -77,12 +77,15 @@ type clientConfig struct {
77 77
 }
78 78
 
79 79
 // New returns a Daemon instance to be used for testing.
80
-// This will create a directory such as d123456789 in the folder specified by $DEST.
80
+// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST.
81 81
 // The daemon will not automatically start.
82 82
 func New(t testingT, dockerBinary string, dockerdBinary string, config Config) *Daemon {
83
-	dest := os.Getenv("DEST")
83
+	dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
84 84
 	if dest == "" {
85
-		t.Fatalf("Please set the DEST environment variable")
85
+		dest = os.Getenv("DEST")
86
+	}
87
+	if dest == "" {
88
+		t.Fatalf("Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable")
86 89
 	}
87 90
 
88 91
 	if err := os.MkdirAll(SockRoot, 0700); err != nil {