Browse code

hack: remove integration-cli-on-swarm

integration-on-swarm had unnecessary complexity and was too hard to
maintain. Also, it didn't support the new non-CLI integration test suite.

I'm now doing some experiments out of the repo using Kubernetes:
https://github.com/AkihiroSuda/kube-moby-integration

Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
(cherry picked from commit e7fbe8e457bb15b0750e327d3b29f127dd18fdbd)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>

Akihiro Suda authored on 2019/06/01 04:45:26
Showing 24 changed files
... ...
@@ -3,5 +3,4 @@ bundles
3 3
 vendor/pkg
4 4
 .go-pkg-cache
5 5
 .git
6
-hack/integration-cli-on-swarm/integration-cli-on-swarm
7 6
 
... ...
@@ -12,6 +12,5 @@ daemon/graphdriver/overlay2/**          @dmcgowan
12 12
 daemon/graphdriver/windows/**           @johnstep @jhowardmsft
13 13
 daemon/logger/awslogs/**                @samuelkarp  
14 14
 hack/**                                 @tianon
15
-hack/integration-cli-on-swarm/**        @AkihiroSuda
16 15
 plugin/**                               @cpuguy83
17 16
 project/**                              @thaJeztah
... ...
@@ -19,6 +19,5 @@ contrib/builder/rpm/*/changelog
19 19
 dockerversion/version_autogen.go
20 20
 dockerversion/version_autogen_unix.go
21 21
 vendor/pkg/
22
-hack/integration-cli-on-swarm/integration-cli-on-swarm
23 22
 coverage.txt
24 23
 profile.out
... ...
@@ -105,9 +105,6 @@ export BUILD_APT_MIRROR
105 105
 
106 106
 SWAGGER_DOCS_PORT ?= 9000
107 107
 
108
-INTEGRATION_CLI_MASTER_IMAGE := $(if $(INTEGRATION_CLI_MASTER_IMAGE), $(INTEGRATION_CLI_MASTER_IMAGE), integration-cli-master)
109
-INTEGRATION_CLI_WORKER_IMAGE := $(if $(INTEGRATION_CLI_WORKER_IMAGE), $(INTEGRATION_CLI_WORKER_IMAGE), integration-cli-worker)
110
-
111 108
 define \n
112 109
 
113 110
 
... ...
@@ -212,18 +209,3 @@ swagger-docs: ## preview the API documentation
212 212
 		-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
213 213
 		-p $(SWAGGER_DOCS_PORT):80 \
214 214
 		bfirsh/redoc:1.6.2
215
-
216
-build-integration-cli-on-swarm: build ## build images and binary for running integration-cli on Swarm in parallel
217
-	@echo "Building hack/integration-cli-on-swarm (if build fails, please refer to hack/integration-cli-on-swarm/README.md)"
218
-	go build -buildmode=pie -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host
219
-	@echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)"
220
-	docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent
221
-	@echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)"
222
-	$(eval tmp := integration-cli-worker-tmp)
223
-# We mount pkgcache, but not bundle (bundle needs to be baked into the image)
224
-# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here
225
-	docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS --privileged $(DOCKER_IMAGE) top
226
-	docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary
227
-	docker exec $(tmp) go build -buildmode=pie -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker
228
-	docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE)
229
-	docker rm -f $(tmp)
230 215
deleted file mode 100644
... ...
@@ -1,68 +0,0 @@
1
-# Integration Testing on Swarm
2
-
3
-IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
4
-
5
-## Architecture
6
-
7
-### Master service
8
-
9
-  - Works as a funker caller
10
-  - Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
11
-
12
-### Worker service
13
-
14
-  - Works as a funker callee
15
-  - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration` using the bind-mounted API socket (`docker.sock`)
16
-
17
-### Client
18
-
19
-  - Controls master and workers via `docker stack`
20
-  - No need to have a local daemon
21
-
22
-Typically, the master and workers are supposed to be running on a cloud environment,
23
-while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
24
-
25
-## Requirement
26
-
27
-  - Docker daemon 1.13 or later
28
-  - Private registry for distributed execution with multiple nodes
29
-
30
-## Usage
31
-
32
-### Step 1: Prepare images
33
-
34
-    $ make build-integration-cli-on-swarm
35
-
36
-Following environment variables are known to work in this step:
37
-
38
- - `BUILDFLAGS`
39
-
40
-Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`. 
41
-
42
-### Step 2: Execute tests
43
-
44
-    $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest 
45
-
46
-Following environment variables are known to work in this step:
47
-
48
- - `DOCKER_GRAPHDRIVER`
49
- - `DOCKER_EXPERIMENTAL`
50
-
51
-#### Flags
52
-
53
-Basic flags:
54
-
55
- - `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
56
- - `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
57
- - `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
58
-
59
-Experimental flags for mitigating makespan nonuniformity:
60
-
61
- - `-shuffle`: Shuffle the test filter strings
62
-
63
-Flags for debugging IT on Swarm itself:
64
-
65
- - `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
66
- - `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
67
- - `-dry-run`: skip the actual workload
68
- - `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm
69 1
deleted file mode 100644
... ...
@@ -1,6 +0,0 @@
1
-# this Dockerfile is solely used for the master image.
2
-# Please refer to the top-level Makefile for the worker image.
3
-FROM golang:1.7
4
-ADD . /go/src/github.com/docker/docker/hack/integration-cli-on-swarm/agent
5
-RUN go build -buildmode=pie -o /master github.com/docker/docker/hack/integration-cli-on-swarm/agent/master
6
-ENTRYPOINT ["/master"]
7 1
deleted file mode 100644
... ...
@@ -1,132 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"encoding/json"
5
-	"fmt"
6
-	"log"
7
-	"strings"
8
-	"sync"
9
-	"sync/atomic"
10
-	"time"
11
-
12
-	"github.com/bfirsh/funker-go"
13
-	"github.com/docker/docker/hack/integration-cli-on-swarm/agent/types"
14
-)
15
-
16
-const (
17
-	// funkerRetryTimeout is for the issue https://github.com/bfirsh/funker/issues/3
18
-	// When all the funker replicas are busy in their own job, we cannot connect to funker.
19
-	funkerRetryTimeout  = 1 * time.Hour
20
-	funkerRetryDuration = 1 * time.Second
21
-)
22
-
23
-// ticker is needed for some CI (e.g., on Travis, job is aborted when no output emitted for 10 minutes)
24
-func ticker(d time.Duration) chan struct{} {
25
-	t := time.NewTicker(d)
26
-	stop := make(chan struct{})
27
-	go func() {
28
-		for {
29
-			select {
30
-			case <-t.C:
31
-				log.Printf("tick (just for keeping CI job active) per %s", d.String())
32
-			case <-stop:
33
-				t.Stop()
34
-			}
35
-		}
36
-	}()
37
-	return stop
38
-}
39
-
40
-func executeTests(funkerName string, testChunks [][]string) error {
41
-	tickerStopper := ticker(9*time.Minute + 55*time.Second)
42
-	defer func() {
43
-		close(tickerStopper)
44
-	}()
45
-	begin := time.Now()
46
-	log.Printf("Executing %d chunks in parallel, against %q", len(testChunks), funkerName)
47
-	var wg sync.WaitGroup
48
-	var passed, failed uint32
49
-	for chunkID, tests := range testChunks {
50
-		log.Printf("Executing chunk %d (contains %d test filters)", chunkID, len(tests))
51
-		wg.Add(1)
52
-		go func(chunkID int, tests []string) {
53
-			defer wg.Done()
54
-			chunkBegin := time.Now()
55
-			result, err := executeTestChunkWithRetry(funkerName, types.Args{
56
-				ChunkID: chunkID,
57
-				Tests:   tests,
58
-			})
59
-			if result.RawLog != "" {
60
-				for _, s := range strings.Split(result.RawLog, "\n") {
61
-					log.Printf("Log (chunk %d): %s", chunkID, s)
62
-				}
63
-			}
64
-			if err != nil {
65
-				log.Printf("Error while executing chunk %d: %v",
66
-					chunkID, err)
67
-				atomic.AddUint32(&failed, 1)
68
-			} else {
69
-				if result.Code == 0 {
70
-					atomic.AddUint32(&passed, 1)
71
-				} else {
72
-					atomic.AddUint32(&failed, 1)
73
-				}
74
-				log.Printf("Finished chunk %d [%d/%d] with %d test filters in %s, code=%d.",
75
-					chunkID, passed+failed, len(testChunks), len(tests),
76
-					time.Since(chunkBegin), result.Code)
77
-			}
78
-		}(chunkID, tests)
79
-	}
80
-	wg.Wait()
81
-	// TODO: print actual tests rather than chunks
82
-	log.Printf("Executed %d chunks in %s. PASS: %d, FAIL: %d.",
83
-		len(testChunks), time.Since(begin), passed, failed)
84
-	if failed > 0 {
85
-		return fmt.Errorf("%d chunks failed", failed)
86
-	}
87
-	return nil
88
-}
89
-
90
-func executeTestChunk(funkerName string, args types.Args) (types.Result, error) {
91
-	ret, err := funker.Call(funkerName, args)
92
-	if err != nil {
93
-		return types.Result{}, err
94
-	}
95
-	tmp, err := json.Marshal(ret)
96
-	if err != nil {
97
-		return types.Result{}, err
98
-	}
99
-	var result types.Result
100
-	err = json.Unmarshal(tmp, &result)
101
-	return result, err
102
-}
103
-
104
-func executeTestChunkWithRetry(funkerName string, args types.Args) (types.Result, error) {
105
-	begin := time.Now()
106
-	for i := 0; time.Since(begin) < funkerRetryTimeout; i++ {
107
-		result, err := executeTestChunk(funkerName, args)
108
-		if err == nil {
109
-			log.Printf("executeTestChunk(%q, %d) returned code %d in trial %d", funkerName, args.ChunkID, result.Code, i)
110
-			return result, nil
111
-		}
112
-		if errorSeemsInteresting(err) {
113
-			log.Printf("Error while calling executeTestChunk(%q, %d), will retry (trial %d): %v",
114
-				funkerName, args.ChunkID, i, err)
115
-		}
116
-		// TODO: non-constant sleep
117
-		time.Sleep(funkerRetryDuration)
118
-	}
119
-	return types.Result{}, fmt.Errorf("could not call executeTestChunk(%q, %d) in %v", funkerName, args.ChunkID, funkerRetryTimeout)
120
-}
121
-
122
-//  errorSeemsInteresting returns true if err does not seem about https://github.com/bfirsh/funker/issues/3
123
-func errorSeemsInteresting(err error) bool {
124
-	boringSubstrs := []string{"connection refused", "connection reset by peer", "no such host", "transport endpoint is not connected", "no route to host"}
125
-	errS := err.Error()
126
-	for _, boringS := range boringSubstrs {
127
-		if strings.Contains(errS, boringS) {
128
-			return false
129
-		}
130
-	}
131
-	return true
132
-}
133 1
deleted file mode 100644
... ...
@@ -1,65 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"errors"
5
-	"flag"
6
-	"io/ioutil"
7
-	"log"
8
-	"strings"
9
-)
10
-
11
-func main() {
12
-	if err := xmain(); err != nil {
13
-		log.Fatalf("fatal error: %v", err)
14
-	}
15
-}
16
-
17
-func xmain() error {
18
-	workerService := flag.String("worker-service", "", "Name of worker service")
19
-	chunks := flag.Int("chunks", 0, "Number of chunks")
20
-	input := flag.String("input", "", "Path to input file")
21
-	randSeed := flag.Int64("rand-seed", int64(0), "Random seed")
22
-	shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity")
23
-	flag.Parse()
24
-	if *workerService == "" {
25
-		return errors.New("worker-service unset")
26
-	}
27
-	if *chunks == 0 {
28
-		return errors.New("chunks unset")
29
-	}
30
-	if *input == "" {
31
-		return errors.New("input unset")
32
-	}
33
-
34
-	tests, err := loadTests(*input)
35
-	if err != nil {
36
-		return err
37
-	}
38
-	testChunks := chunkTests(tests, *chunks, *shuffle, *randSeed)
39
-	log.Printf("Loaded %d tests (%d chunks)", len(tests), len(testChunks))
40
-	return executeTests(*workerService, testChunks)
41
-}
42
-
43
-func chunkTests(tests []string, numChunks int, shuffle bool, randSeed int64) [][]string {
44
-	// shuffling (experimental) mitigates makespan nonuniformity
45
-	// Not sure this can cause some locality problem..
46
-	if shuffle {
47
-		shuffleStrings(tests, randSeed)
48
-	}
49
-	return chunkStrings(tests, numChunks)
50
-}
51
-
52
-func loadTests(filename string) ([]string, error) {
53
-	b, err := ioutil.ReadFile(filename)
54
-	if err != nil {
55
-		return nil, err
56
-	}
57
-	var tests []string
58
-	for _, line := range strings.Split(string(b), "\n") {
59
-		s := strings.TrimSpace(line)
60
-		if s != "" {
61
-			tests = append(tests, s)
62
-		}
63
-	}
64
-	return tests, nil
65
-}
66 1
deleted file mode 100644
... ...
@@ -1,28 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"math/rand"
5
-)
6
-
7
-// chunkStrings chunks the string slice
8
-func chunkStrings(x []string, numChunks int) [][]string {
9
-	var result [][]string
10
-	chunkSize := (len(x) + numChunks - 1) / numChunks
11
-	for i := 0; i < len(x); i += chunkSize {
12
-		ub := i + chunkSize
13
-		if ub > len(x) {
14
-			ub = len(x)
15
-		}
16
-		result = append(result, x[i:ub])
17
-	}
18
-	return result
19
-}
20
-
21
-// shuffleStrings shuffles strings
22
-func shuffleStrings(x []string, seed int64) {
23
-	r := rand.New(rand.NewSource(seed))
24
-	for i := range x {
25
-		j := r.Intn(i + 1)
26
-		x[i], x[j] = x[j], x[i]
27
-	}
28
-}
29 1
deleted file mode 100644
... ...
@@ -1,63 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"fmt"
5
-	"reflect"
6
-	"testing"
7
-	"time"
8
-)
9
-
10
-func generateInput(inputLen int) []string {
11
-	var input []string
12
-	for i := 0; i < inputLen; i++ {
13
-		input = append(input, fmt.Sprintf("s%d", i))
14
-	}
15
-
16
-	return input
17
-}
18
-
19
-func testChunkStrings(t *testing.T, inputLen, numChunks int) {
20
-	t.Logf("inputLen=%d, numChunks=%d", inputLen, numChunks)
21
-	input := generateInput(inputLen)
22
-	result := chunkStrings(input, numChunks)
23
-	t.Logf("result has %d chunks", len(result))
24
-	var inputReconstructedFromResult []string
25
-	for i, chunk := range result {
26
-		t.Logf("chunk %d has %d elements", i, len(chunk))
27
-		inputReconstructedFromResult = append(inputReconstructedFromResult, chunk...)
28
-	}
29
-	if !reflect.DeepEqual(input, inputReconstructedFromResult) {
30
-		t.Fatal("input != inputReconstructedFromResult")
31
-	}
32
-}
33
-
34
-func TestChunkStrings_4_4(t *testing.T) {
35
-	testChunkStrings(t, 4, 4)
36
-}
37
-
38
-func TestChunkStrings_4_1(t *testing.T) {
39
-	testChunkStrings(t, 4, 1)
40
-}
41
-
42
-func TestChunkStrings_1_4(t *testing.T) {
43
-	testChunkStrings(t, 1, 4)
44
-}
45
-
46
-func TestChunkStrings_1000_8(t *testing.T) {
47
-	testChunkStrings(t, 1000, 8)
48
-}
49
-
50
-func TestChunkStrings_1000_9(t *testing.T) {
51
-	testChunkStrings(t, 1000, 9)
52
-}
53
-
54
-func testShuffleStrings(t *testing.T, inputLen int, seed int64) {
55
-	t.Logf("inputLen=%d, seed=%d", inputLen, seed)
56
-	x := generateInput(inputLen)
57
-	shuffleStrings(x, seed)
58
-	t.Logf("shuffled: %v", x)
59
-}
60
-
61
-func TestShuffleStrings_100(t *testing.T) {
62
-	testShuffleStrings(t, 100, time.Now().UnixNano())
63
-}
64 1
deleted file mode 100644
... ...
@@ -1,18 +0,0 @@
1
-package types
2
-
3
-// Args is the type for funker args
4
-type Args struct {
5
-	// ChunkID is an unique number of the chunk
6
-	ChunkID int `json:"chunk_id"`
7
-	// Tests is the set of the strings that are passed as `-check.f` filters
8
-	Tests []string `json:"tests"`
9
-}
10
-
11
-// Result is the type for funker result
12
-type Result struct {
13
-	// ChunkID corresponds to Args.ChunkID
14
-	ChunkID int `json:"chunk_id"`
15
-	// Code is the exit code
16
-	Code   int    `json:"code"`
17
-	RawLog string `json:"raw_log"`
18
-}
19 1
deleted file mode 100644
... ...
@@ -1,2 +0,0 @@
1
-# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
2
-github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773
3 1
deleted file mode 100644
... ...
@@ -1,191 +0,0 @@
1
-
2
-                                 Apache License
3
-                           Version 2.0, January 2004
4
-                        http://www.apache.org/licenses/
5
-
6
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
-
8
-   1. Definitions.
9
-
10
-      "License" shall mean the terms and conditions for use, reproduction,
11
-      and distribution as defined by Sections 1 through 9 of this document.
12
-
13
-      "Licensor" shall mean the copyright owner or entity authorized by
14
-      the copyright owner that is granting the License.
15
-
16
-      "Legal Entity" shall mean the union of the acting entity and all
17
-      other entities that control, are controlled by, or are under common
18
-      control with that entity. For the purposes of this definition,
19
-      "control" means (i) the power, direct or indirect, to cause the
20
-      direction or management of such entity, whether by contract or
21
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
-      outstanding shares, or (iii) beneficial ownership of such entity.
23
-
24
-      "You" (or "Your") shall mean an individual or Legal Entity
25
-      exercising permissions granted by this License.
26
-
27
-      "Source" form shall mean the preferred form for making modifications,
28
-      including but not limited to software source code, documentation
29
-      source, and configuration files.
30
-
31
-      "Object" form shall mean any form resulting from mechanical
32
-      transformation or translation of a Source form, including but
33
-      not limited to compiled object code, generated documentation,
34
-      and conversions to other media types.
35
-
36
-      "Work" shall mean the work of authorship, whether in Source or
37
-      Object form, made available under the License, as indicated by a
38
-      copyright notice that is included in or attached to the work
39
-      (an example is provided in the Appendix below).
40
-
41
-      "Derivative Works" shall mean any work, whether in Source or Object
42
-      form, that is based on (or derived from) the Work and for which the
43
-      editorial revisions, annotations, elaborations, or other modifications
44
-      represent, as a whole, an original work of authorship. For the purposes
45
-      of this License, Derivative Works shall not include works that remain
46
-      separable from, or merely link (or bind by name) to the interfaces of,
47
-      the Work and Derivative Works thereof.
48
-
49
-      "Contribution" shall mean any work of authorship, including
50
-      the original version of the Work and any modifications or additions
51
-      to that Work or Derivative Works thereof, that is intentionally
52
-      submitted to Licensor for inclusion in the Work by the copyright owner
53
-      or by an individual or Legal Entity authorized to submit on behalf of
54
-      the copyright owner. For the purposes of this definition, "submitted"
55
-      means any form of electronic, verbal, or written communication sent
56
-      to the Licensor or its representatives, including but not limited to
57
-      communication on electronic mailing lists, source code control systems,
58
-      and issue tracking systems that are managed by, or on behalf of, the
59
-      Licensor for the purpose of discussing and improving the Work, but
60
-      excluding communication that is conspicuously marked or otherwise
61
-      designated in writing by the copyright owner as "Not a Contribution."
62
-
63
-      "Contributor" shall mean Licensor and any individual or Legal Entity
64
-      on behalf of whom a Contribution has been received by Licensor and
65
-      subsequently incorporated within the Work.
66
-
67
-   2. Grant of Copyright License. Subject to the terms and conditions of
68
-      this License, each Contributor hereby grants to You a perpetual,
69
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
-      copyright license to reproduce, prepare Derivative Works of,
71
-      publicly display, publicly perform, sublicense, and distribute the
72
-      Work and such Derivative Works in Source or Object form.
73
-
74
-   3. Grant of Patent License. Subject to the terms and conditions of
75
-      this License, each Contributor hereby grants to You a perpetual,
76
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
-      (except as stated in this section) patent license to make, have made,
78
-      use, offer to sell, sell, import, and otherwise transfer the Work,
79
-      where such license applies only to those patent claims licensable
80
-      by such Contributor that are necessarily infringed by their
81
-      Contribution(s) alone or by combination of their Contribution(s)
82
-      with the Work to which such Contribution(s) was submitted. If You
83
-      institute patent litigation against any entity (including a
84
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
85
-      or a Contribution incorporated within the Work constitutes direct
86
-      or contributory patent infringement, then any patent licenses
87
-      granted to You under this License for that Work shall terminate
88
-      as of the date such litigation is filed.
89
-
90
-   4. Redistribution. You may reproduce and distribute copies of the
91
-      Work or Derivative Works thereof in any medium, with or without
92
-      modifications, and in Source or Object form, provided that You
93
-      meet the following conditions:
94
-
95
-      (a) You must give any other recipients of the Work or
96
-          Derivative Works a copy of this License; and
97
-
98
-      (b) You must cause any modified files to carry prominent notices
99
-          stating that You changed the files; and
100
-
101
-      (c) You must retain, in the Source form of any Derivative Works
102
-          that You distribute, all copyright, patent, trademark, and
103
-          attribution notices from the Source form of the Work,
104
-          excluding those notices that do not pertain to any part of
105
-          the Derivative Works; and
106
-
107
-      (d) If the Work includes a "NOTICE" text file as part of its
108
-          distribution, then any Derivative Works that You distribute must
109
-          include a readable copy of the attribution notices contained
110
-          within such NOTICE file, excluding those notices that do not
111
-          pertain to any part of the Derivative Works, in at least one
112
-          of the following places: within a NOTICE text file distributed
113
-          as part of the Derivative Works; within the Source form or
114
-          documentation, if provided along with the Derivative Works; or,
115
-          within a display generated by the Derivative Works, if and
116
-          wherever such third-party notices normally appear. The contents
117
-          of the NOTICE file are for informational purposes only and
118
-          do not modify the License. You may add Your own attribution
119
-          notices within Derivative Works that You distribute, alongside
120
-          or as an addendum to the NOTICE text from the Work, provided
121
-          that such additional attribution notices cannot be construed
122
-          as modifying the License.
123
-
124
-      You may add Your own copyright statement to Your modifications and
125
-      may provide additional or different license terms and conditions
126
-      for use, reproduction, or distribution of Your modifications, or
127
-      for any such Derivative Works as a whole, provided Your use,
128
-      reproduction, and distribution of the Work otherwise complies with
129
-      the conditions stated in this License.
130
-
131
-   5. Submission of Contributions. Unless You explicitly state otherwise,
132
-      any Contribution intentionally submitted for inclusion in the Work
133
-      by You to the Licensor shall be under the terms and conditions of
134
-      this License, without any additional terms or conditions.
135
-      Notwithstanding the above, nothing herein shall supersede or modify
136
-      the terms of any separate license agreement you may have executed
137
-      with Licensor regarding such Contributions.
138
-
139
-   6. Trademarks. This License does not grant permission to use the trade
140
-      names, trademarks, service marks, or product names of the Licensor,
141
-      except as required for reasonable and customary use in describing the
142
-      origin of the Work and reproducing the content of the NOTICE file.
143
-
144
-   7. Disclaimer of Warranty. Unless required by applicable law or
145
-      agreed to in writing, Licensor provides the Work (and each
146
-      Contributor provides its Contributions) on an "AS IS" BASIS,
147
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
-      implied, including, without limitation, any warranties or conditions
149
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
-      PARTICULAR PURPOSE. You are solely responsible for determining the
151
-      appropriateness of using or redistributing the Work and assume any
152
-      risks associated with Your exercise of permissions under this License.
153
-
154
-   8. Limitation of Liability. In no event and under no legal theory,
155
-      whether in tort (including negligence), contract, or otherwise,
156
-      unless required by applicable law (such as deliberate and grossly
157
-      negligent acts) or agreed to in writing, shall any Contributor be
158
-      liable to You for damages, including any direct, indirect, special,
159
-      incidental, or consequential damages of any character arising as a
160
-      result of this License or out of the use or inability to use the
161
-      Work (including but not limited to damages for loss of goodwill,
162
-      work stoppage, computer failure or malfunction, or any and all
163
-      other commercial damages or losses), even if such Contributor
164
-      has been advised of the possibility of such damages.
165
-
166
-   9. Accepting Warranty or Additional Liability. While redistributing
167
-      the Work or Derivative Works thereof, You may choose to offer,
168
-      and charge a fee for, acceptance of support, warranty, indemnity,
169
-      or other liability obligations and/or rights consistent with this
170
-      License. However, in accepting such obligations, You may act only
171
-      on Your own behalf and on Your sole responsibility, not on behalf
172
-      of any other Contributor, and only if You agree to indemnify,
173
-      defend, and hold each Contributor harmless for any liability
174
-      incurred by, or claims asserted against, such Contributor by reason
175
-      of your accepting any such warranty or additional liability.
176
-
177
-   END OF TERMS AND CONDITIONS
178
-
179
-   Copyright 2016 Docker, Inc.
180
-
181
-   Licensed under the Apache License, Version 2.0 (the "License");
182
-   you may not use this file except in compliance with the License.
183
-   You may obtain a copy of the License at
184
-
185
-       http://www.apache.org/licenses/LICENSE-2.0
186
-
187
-   Unless required by applicable law or agreed to in writing, software
188
-   distributed under the License is distributed on an "AS IS" BASIS,
189
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
190
-   See the License for the specific language governing permissions and
191
-   limitations under the License.
192 1
deleted file mode 100644
... ...
@@ -1,50 +0,0 @@
1
-package funker
2
-
3
-import (
4
-	"encoding/json"
5
-	"io/ioutil"
6
-	"net"
7
-	"time"
8
-)
9
-
10
-// Call a Funker function
11
-func Call(name string, args interface{}) (interface{}, error) {
12
-	argsJSON, err := json.Marshal(args)
13
-	if err != nil {
14
-		return nil, err
15
-	}
16
-
17
-	addr, err := net.ResolveTCPAddr("tcp", name+":9999")
18
-	if err != nil {
19
-		return nil, err
20
-	}
21
-
22
-	conn, err := net.DialTCP("tcp", nil, addr)
23
-	if err != nil {
24
-		return nil, err
25
-	}
26
-	// Keepalive is a workaround for docker/docker#29655 .
27
-	// The implementation of FIN_WAIT2 seems weird on Swarm-mode.
28
-	// It seems always refuseing any packet after 60 seconds.
29
-	//
30
-	// TODO: remove this workaround if the issue gets resolved on the Docker side
31
-	if err := conn.SetKeepAlive(true); err != nil {
32
-		return nil, err
33
-	}
34
-	if err := conn.SetKeepAlivePeriod(30 * time.Second); err != nil {
35
-		return nil, err
36
-	}
37
-	if _, err = conn.Write(argsJSON); err != nil {
38
-		return nil, err
39
-	}
40
-	if err = conn.CloseWrite(); err != nil {
41
-		return nil, err
42
-	}
43
-	retJSON, err := ioutil.ReadAll(conn)
44
-	if err != nil {
45
-		return nil, err
46
-	}
47
-	var ret interface{}
48
-	err = json.Unmarshal(retJSON, &ret)
49
-	return ret, err
50
-}
51 1
deleted file mode 100644
... ...
@@ -1,54 +0,0 @@
1
-package funker
2
-
3
-import (
4
-	"encoding/json"
5
-	"fmt"
6
-	"io/ioutil"
7
-	"net"
8
-	"reflect"
9
-)
10
-
11
-// Handle a Funker function.
12
-func Handle(handler interface{}) error {
13
-	handlerValue := reflect.ValueOf(handler)
14
-	handlerType := handlerValue.Type()
15
-	if handlerType.Kind() != reflect.Func || handlerType.NumIn() != 1 || handlerType.NumOut() != 1 {
16
-		return fmt.Errorf("Handler must be a function with a single parameter and single return value.")
17
-	}
18
-	argsValue := reflect.New(handlerType.In(0))
19
-
20
-	listener, err := net.Listen("tcp", ":9999")
21
-	if err != nil {
22
-		return err
23
-	}
24
-	conn, err := listener.Accept()
25
-	if err != nil {
26
-		return err
27
-	}
28
-	// We close listener, because we only allow single request.
29
-	// Note that TCP "backlog" cannot be used for that purpose.
30
-	// http://www.perlmonks.org/?node_id=940662
31
-	if err = listener.Close(); err != nil {
32
-		return err
33
-	}
34
-	argsJSON, err := ioutil.ReadAll(conn)
35
-	if err != nil {
36
-		return err
37
-	}
38
-	err = json.Unmarshal(argsJSON, argsValue.Interface())
39
-	if err != nil {
40
-		return err
41
-	}
42
-
43
-	ret := handlerValue.Call([]reflect.Value{argsValue.Elem()})[0].Interface()
44
-	retJSON, err := json.Marshal(ret)
45
-	if err != nil {
46
-		return err
47
-	}
48
-
49
-	if _, err = conn.Write(retJSON); err != nil {
50
-		return err
51
-	}
52
-
53
-	return conn.Close()
54
-}
55 1
deleted file mode 100644
... ...
@@ -1,118 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"bytes"
5
-	"context"
6
-	"fmt"
7
-	"io"
8
-	"os"
9
-	"strings"
10
-
11
-	"github.com/docker/docker/api/types"
12
-	"github.com/docker/docker/api/types/container"
13
-	"github.com/docker/docker/api/types/mount"
14
-	"github.com/docker/docker/client"
15
-	"github.com/docker/docker/pkg/stdcopy"
16
-)
17
-
18
-// testChunkExecutor executes integration-cli binary.
19
-// image needs to be the worker image itself. testFlags are OR-set of regexp for filtering tests.
20
-type testChunkExecutor func(image string, tests []string) (int64, string, error)
21
-
22
-func dryTestChunkExecutor() testChunkExecutor {
23
-	return func(image string, tests []string) (int64, string, error) {
24
-		return 0, fmt.Sprintf("DRY RUN (image=%q, tests=%v)", image, tests), nil
25
-	}
26
-}
27
-
28
-// privilegedTestChunkExecutor invokes a privileged container from the worker
29
-// service via bind-mounted API socket so as to execute the test chunk
30
-func privilegedTestChunkExecutor(autoRemove bool) testChunkExecutor {
31
-	return func(image string, tests []string) (int64, string, error) {
32
-		cli, err := client.NewClientWithOpts(client.FromEnv)
33
-		if err != nil {
34
-			return 0, "", err
35
-		}
36
-		// propagate variables from the host (needs to be defined in the compose file)
37
-		experimental := os.Getenv("DOCKER_EXPERIMENTAL")
38
-		graphdriver := os.Getenv("DOCKER_GRAPHDRIVER")
39
-		if graphdriver == "" {
40
-			info, err := cli.Info(context.Background())
41
-			if err != nil {
42
-				return 0, "", err
43
-			}
44
-			graphdriver = info.Driver
45
-		}
46
-		// `daemon_dest` is similar to `$DEST` (e.g. `bundles/VERSION/test-integration`)
47
-		// but it exists outside of `bundles` so as to make `$DOCKER_GRAPHDRIVER` work.
48
-		//
49
-		// Without this hack, `$DOCKER_GRAPHDRIVER` fails because of (e.g.) `overlay2 is not supported over overlayfs`
50
-		//
51
-		// see integration-cli/daemon/daemon.go
52
-		daemonDest := "/daemon_dest"
53
-		config := container.Config{
54
-			Image: image,
55
-			Env: []string{
56
-				"TESTFLAGS=-check.f " + strings.Join(tests, "|"),
57
-				"KEEPBUNDLE=1",
58
-				"DOCKER_INTEGRATION_TESTS_VERIFIED=1", // for avoiding rebuilding integration-cli
59
-				"DOCKER_EXPERIMENTAL=" + experimental,
60
-				"DOCKER_GRAPHDRIVER=" + graphdriver,
61
-				"DOCKER_INTEGRATION_DAEMON_DEST=" + daemonDest,
62
-			},
63
-			Labels: map[string]string{
64
-				"org.dockerproject.integration-cli-on-swarm":         "",
65
-				"org.dockerproject.integration-cli-on-swarm.comment": "this non-service container is created for running privileged programs on Swarm. you can remove this container manually if the corresponding service is already stopped.",
66
-			},
67
-			Entrypoint: []string{"hack/dind"},
68
-			Cmd:        []string{"hack/make.sh", "test-integration"},
69
-		}
70
-		hostConfig := container.HostConfig{
71
-			AutoRemove: autoRemove,
72
-			Privileged: true,
73
-			Mounts: []mount.Mount{
74
-				{
75
-					Type:   mount.TypeVolume,
76
-					Target: daemonDest,
77
-				},
78
-			},
79
-		}
80
-		id, stream, err := runContainer(context.Background(), cli, config, hostConfig)
81
-		if err != nil {
82
-			return 0, "", err
83
-		}
84
-		var b bytes.Buffer
85
-		teeContainerStream(&b, os.Stdout, os.Stderr, stream)
86
-		resultC, errC := cli.ContainerWait(context.Background(), id, "")
87
-		select {
88
-		case err := <-errC:
89
-			return 0, "", err
90
-		case result := <-resultC:
91
-			return result.StatusCode, b.String(), nil
92
-		}
93
-	}
94
-}
95
-
96
-func runContainer(ctx context.Context, cli *client.Client, config container.Config, hostConfig container.HostConfig) (string, io.ReadCloser, error) {
97
-	created, err := cli.ContainerCreate(context.Background(),
98
-		&config, &hostConfig, nil, "")
99
-	if err != nil {
100
-		return "", nil, err
101
-	}
102
-	if err = cli.ContainerStart(ctx, created.ID, types.ContainerStartOptions{}); err != nil {
103
-		return "", nil, err
104
-	}
105
-	stream, err := cli.ContainerLogs(ctx,
106
-		created.ID,
107
-		types.ContainerLogsOptions{
108
-			ShowStdout: true,
109
-			ShowStderr: true,
110
-			Follow:     true,
111
-		})
112
-	return created.ID, stream, err
113
-}
114
-
115
-func teeContainerStream(w, stdout, stderr io.Writer, stream io.ReadCloser) {
116
-	stdcopy.StdCopy(io.MultiWriter(w, stdout), io.MultiWriter(w, stderr), stream)
117
-	stream.Close()
118
-}
119 1
deleted file mode 100644
... ...
@@ -1,69 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"flag"
5
-	"fmt"
6
-	"log"
7
-	"time"
8
-
9
-	"github.com/bfirsh/funker-go"
10
-	"github.com/docker/distribution/reference"
11
-	"github.com/docker/docker/hack/integration-cli-on-swarm/agent/types"
12
-)
13
-
14
-func main() {
15
-	if err := xmain(); err != nil {
16
-		log.Fatalf("fatal error: %v", err)
17
-	}
18
-}
19
-
20
-func validImageDigest(s string) bool {
21
-	return reference.DigestRegexp.FindString(s) != ""
22
-}
23
-
24
-func xmain() error {
25
-	workerImageDigest := flag.String("worker-image-digest", "", "Needs to be the digest of this worker image itself")
26
-	dryRun := flag.Bool("dry-run", false, "Dry run")
27
-	keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm")
28
-	flag.Parse()
29
-	if !validImageDigest(*workerImageDigest) {
30
-		// Because of issue #29582.
31
-		// `docker service create localregistry.example.com/blahblah:latest` pulls the image data to local, but not a tag.
32
-		// So, `docker run localregistry.example.com/blahblah:latest` fails: `Unable to find image 'localregistry.example.com/blahblah:latest' locally`
33
-		return fmt.Errorf("worker-image-digest must be a digest, got %q", *workerImageDigest)
34
-	}
35
-	executor := privilegedTestChunkExecutor(!*keepExecutor)
36
-	if *dryRun {
37
-		executor = dryTestChunkExecutor()
38
-	}
39
-	return handle(*workerImageDigest, executor)
40
-}
41
-
42
-func handle(workerImageDigest string, executor testChunkExecutor) error {
43
-	log.Print("Waiting for a funker request")
44
-	return funker.Handle(func(args *types.Args) types.Result {
45
-		log.Printf("Executing chunk %d, contains %d test filters",
46
-			args.ChunkID, len(args.Tests))
47
-		begin := time.Now()
48
-		code, rawLog, err := executor(workerImageDigest, args.Tests)
49
-		if err != nil {
50
-			log.Printf("Error while executing chunk %d: %v", args.ChunkID, err)
51
-			if code == 0 {
52
-				// Make sure this is a failure
53
-				code = 1
54
-			}
55
-			return types.Result{
56
-				ChunkID: args.ChunkID,
57
-				Code:    int(code),
58
-				RawLog:  rawLog,
59
-			}
60
-		}
61
-		elapsed := time.Since(begin)
62
-		log.Printf("Finished chunk %d, code=%d, elapsed=%v", args.ChunkID, code, elapsed)
63
-		return types.Result{
64
-			ChunkID: args.ChunkID,
65
-			Code:    int(code),
66
-			RawLog:  rawLog,
67
-		}
68
-	})
69
-}
70 1
deleted file mode 100644
... ...
@@ -1,122 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"context"
5
-	"io/ioutil"
6
-	"os"
7
-	"path/filepath"
8
-	"text/template"
9
-
10
-	"github.com/docker/docker/client"
11
-)
12
-
13
-const composeTemplate = `# generated by integration-cli-on-swarm
14
-version: "3"
15
-
16
-services:
17
-  worker:
18
-    image: "{{.WorkerImage}}"
19
-    command: ["-worker-image-digest={{.WorkerImageDigest}}", "-dry-run={{.DryRun}}", "-keep-executor={{.KeepExecutor}}"]
20
-    networks:
21
-      - net
22
-    volumes:
23
-# Bind-mount the API socket so that we can invoke "docker run --privileged" within the service containers
24
-      - /var/run/docker.sock:/var/run/docker.sock
25
-    environment:
26
-      - DOCKER_GRAPHDRIVER={{.EnvDockerGraphDriver}}
27
-      - DOCKER_EXPERIMENTAL={{.EnvDockerExperimental}}
28
-    deploy:
29
-      mode: replicated
30
-      replicas: {{.Replicas}}
31
-      restart_policy:
32
-# The restart condition needs to be any for funker function
33
-        condition: any
34
-
35
-  master:
36
-    image: "{{.MasterImage}}"
37
-    command: ["-worker-service=worker", "-input=/mnt/input", "-chunks={{.Chunks}}", "-shuffle={{.Shuffle}}", "-rand-seed={{.RandSeed}}"]
38
-    networks:
39
-      - net
40
-    volumes:
41
-      - {{.Volume}}:/mnt
42
-    deploy:
43
-      mode: replicated
44
-      replicas: 1
45
-      restart_policy:
46
-        condition: none
47
-      placement:
48
-# Make sure the master can access the volume
49
-        constraints: [node.id == {{.SelfNodeID}}]
50
-
51
-networks:
52
-  net:
53
-
54
-volumes:
55
-  {{.Volume}}:
56
-    external: true
57
-`
58
-
59
-type composeOptions struct {
60
-	Replicas     int
61
-	Chunks       int
62
-	MasterImage  string
63
-	WorkerImage  string
64
-	Volume       string
65
-	Shuffle      bool
66
-	RandSeed     int64
67
-	DryRun       bool
68
-	KeepExecutor bool
69
-}
70
-
71
-type composeTemplateOptions struct {
72
-	composeOptions
73
-	WorkerImageDigest     string
74
-	SelfNodeID            string
75
-	EnvDockerGraphDriver  string
76
-	EnvDockerExperimental string
77
-}
78
-
79
-// createCompose creates "dir/docker-compose.yml".
80
-// If dir is empty, TempDir() is used.
81
-func createCompose(dir string, cli *client.Client, opts composeOptions) (string, error) {
82
-	if dir == "" {
83
-		var err error
84
-		dir, err = ioutil.TempDir("", "integration-cli-on-swarm-")
85
-		if err != nil {
86
-			return "", err
87
-		}
88
-	}
89
-	resolved := composeTemplateOptions{}
90
-	resolved.composeOptions = opts
91
-	workerImageInspect, _, err := cli.ImageInspectWithRaw(context.Background(), defaultWorkerImageName)
92
-	if err != nil {
93
-		return "", err
94
-	}
95
-	if len(workerImageInspect.RepoDigests) > 0 {
96
-		resolved.WorkerImageDigest = workerImageInspect.RepoDigests[0]
97
-	} else {
98
-		// fall back for non-pushed image
99
-		resolved.WorkerImageDigest = workerImageInspect.ID
100
-	}
101
-	info, err := cli.Info(context.Background())
102
-	if err != nil {
103
-		return "", err
104
-	}
105
-	resolved.SelfNodeID = info.Swarm.NodeID
106
-	resolved.EnvDockerGraphDriver = os.Getenv("DOCKER_GRAPHDRIVER")
107
-	resolved.EnvDockerExperimental = os.Getenv("DOCKER_EXPERIMENTAL")
108
-	composeFilePath := filepath.Join(dir, "docker-compose.yml")
109
-	tmpl, err := template.New("").Parse(composeTemplate)
110
-	if err != nil {
111
-		return "", err
112
-	}
113
-	f, err := os.Create(composeFilePath)
114
-	if err != nil {
115
-		return "", err
116
-	}
117
-	defer f.Close()
118
-	if err = tmpl.Execute(f, resolved); err != nil {
119
-		return "", err
120
-	}
121
-	return composeFilePath, nil
122
-}
123 1
deleted file mode 100644
... ...
@@ -1,63 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"fmt"
5
-	"os"
6
-	"os/exec"
7
-	"strings"
8
-	"time"
9
-
10
-	"github.com/docker/docker/client"
11
-)
12
-
13
-func system(commands [][]string) error {
14
-	for _, c := range commands {
15
-		cmd := exec.Command(c[0], c[1:]...)
16
-		cmd.Stdout = os.Stdout
17
-		cmd.Stderr = os.Stderr
18
-		if err := cmd.Run(); err != nil {
19
-			return err
20
-		}
21
-	}
22
-	return nil
23
-}
24
-
25
-func pushImage(_ *client.Client, remote, local string) error {
26
-	// FIXME: eliminate os/exec (but it is hard to pass auth without os/exec ...)
27
-	return system([][]string{
28
-		{"docker", "image", "tag", local, remote},
29
-		{"docker", "image", "push", remote},
30
-	})
31
-}
32
-
33
-func deployStack(_ *client.Client, stackName, composeFilePath string) error {
34
-	// FIXME: eliminate os/exec (but stack is implemented in CLI ...)
35
-	return system([][]string{
36
-		{"docker", "stack", "deploy",
37
-			"--compose-file", composeFilePath,
38
-			"--with-registry-auth",
39
-			stackName},
40
-	})
41
-}
42
-
43
-func hasStack(_ *client.Client, stackName string) bool {
44
-	// FIXME: eliminate os/exec (but stack is implemented in CLI ...)
45
-	out, err := exec.Command("docker", "stack", "ls").CombinedOutput()
46
-	if err != nil {
47
-		panic(fmt.Errorf("`docker stack ls` failed with: %s", string(out)))
48
-	}
49
-	// FIXME: not accurate
50
-	return strings.Contains(string(out), stackName)
51
-}
52
-
53
-func removeStack(_ *client.Client, stackName string) error {
54
-	// FIXME: eliminate os/exec (but stack is implemented in CLI ...)
55
-	if err := system([][]string{
56
-		{"docker", "stack", "rm", stackName},
57
-	}); err != nil {
58
-		return err
59
-	}
60
-	// FIXME
61
-	time.Sleep(10 * time.Second)
62
-	return nil
63
-}
64 1
deleted file mode 100644
... ...
@@ -1,55 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"fmt"
5
-	"io/ioutil"
6
-	"path/filepath"
7
-	"regexp"
8
-)
9
-
10
-var testFuncRegexp *regexp.Regexp
11
-
12
-func init() {
13
-	testFuncRegexp = regexp.MustCompile(`(?m)^\s*func\s+\(\w*\s*\*(\w+Suite)\)\s+(Test\w+)`)
14
-}
15
-
16
-func enumerateTestsForBytes(b []byte) ([]string, error) {
17
-	var tests []string
18
-	submatches := testFuncRegexp.FindAllSubmatch(b, -1)
19
-	for _, submatch := range submatches {
20
-		if len(submatch) == 3 {
21
-			tests = append(tests, fmt.Sprintf("%s.%s$", submatch[1], submatch[2]))
22
-		}
23
-	}
24
-	return tests, nil
25
-}
26
-
27
-// enumerateTests enumerates valid `-check.f` strings for all the test functions.
28
-// Note that we use regexp rather than parsing Go files for performance reason.
29
-// (Try `TESTFLAGS=-check.list make test-integration` to see the slowness of parsing)
30
-// The files needs to be `gofmt`-ed
31
-//
32
-// The result will be as follows, but unsorted ('$' is appended because they are regexp for `-check.f`):
33
-//  "DockerAuthzSuite.TestAuthZPluginAPIDenyResponse$"
34
-//  "DockerAuthzSuite.TestAuthZPluginAllowEventStream$"
35
-//  ...
36
-//  "DockerTrustedSwarmSuite.TestTrustedServiceUpdate$"
37
-func enumerateTests(wd string) ([]string, error) {
38
-	testGoFiles, err := filepath.Glob(filepath.Join(wd, "integration-cli", "*_test.go"))
39
-	if err != nil {
40
-		return nil, err
41
-	}
42
-	var allTests []string
43
-	for _, testGoFile := range testGoFiles {
44
-		b, err := ioutil.ReadFile(testGoFile)
45
-		if err != nil {
46
-			return nil, err
47
-		}
48
-		tests, err := enumerateTestsForBytes(b)
49
-		if err != nil {
50
-			return nil, err
51
-		}
52
-		allTests = append(allTests, tests...)
53
-	}
54
-	return allTests, nil
55
-}
56 1
deleted file mode 100644
... ...
@@ -1,84 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"os"
5
-	"path/filepath"
6
-	"reflect"
7
-	"sort"
8
-	"strings"
9
-	"testing"
10
-)
11
-
12
-func getRepoTopDir(t *testing.T) string {
13
-	wd, err := os.Getwd()
14
-	if err != nil {
15
-		t.Fatal(err)
16
-	}
17
-	wd = filepath.Clean(wd)
18
-	suffix := "hack/integration-cli-on-swarm/host"
19
-	if !strings.HasSuffix(wd, suffix) {
20
-		t.Skipf("cwd seems strange (needs to have suffix %s): %v", suffix, wd)
21
-	}
22
-	return filepath.Clean(filepath.Join(wd, "../../.."))
23
-}
24
-
25
-func TestEnumerateTests(t *testing.T) {
26
-	if testing.Short() {
27
-		t.Skip("skipping in short mode")
28
-	}
29
-	tests, err := enumerateTests(getRepoTopDir(t))
30
-	if err != nil {
31
-		t.Fatal(err)
32
-	}
33
-	sort.Strings(tests)
34
-	t.Logf("enumerated %d test filter strings:", len(tests))
35
-	for _, s := range tests {
36
-		t.Logf("- %q", s)
37
-	}
38
-}
39
-
40
-func TestEnumerateTestsForBytes(t *testing.T) {
41
-	b := []byte(`package main
42
-import (
43
-	"github.com/go-check/check"
44
-)
45
-
46
-func (s *FooSuite) TestA(c *check.C) {
47
-}
48
-
49
-func (s *FooSuite) TestAAA(c *check.C) {
50
-}
51
-
52
-func (s *BarSuite) TestBar(c *check.C) {
53
-}
54
-
55
-func (x *FooSuite) TestC(c *check.C) {
56
-}
57
-
58
-func (*FooSuite) TestD(c *check.C) {
59
-}
60
-
61
-// should not be counted
62
-func (s *FooSuite) testE(c *check.C) {
63
-}
64
-
65
-// counted, although we don't support ungofmt file
66
-  func   (s *FooSuite)    TestF  (c   *check.C){}
67
-`)
68
-	expected := []string{
69
-		"FooSuite.TestA$",
70
-		"FooSuite.TestAAA$",
71
-		"BarSuite.TestBar$",
72
-		"FooSuite.TestC$",
73
-		"FooSuite.TestD$",
74
-		"FooSuite.TestF$",
75
-	}
76
-
77
-	actual, err := enumerateTestsForBytes(b)
78
-	if err != nil {
79
-		t.Fatal(err)
80
-	}
81
-	if !reflect.DeepEqual(expected, actual) {
82
-		t.Fatalf("expected %q, got %q", expected, actual)
83
-	}
84
-}
85 1
deleted file mode 100644
... ...
@@ -1,198 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"context"
5
-	"flag"
6
-	"fmt"
7
-	"io"
8
-	"io/ioutil"
9
-	"os"
10
-	"strings"
11
-	"time"
12
-
13
-	"github.com/docker/docker/api/types"
14
-	"github.com/docker/docker/api/types/filters"
15
-	"github.com/docker/docker/client"
16
-	"github.com/docker/docker/pkg/stdcopy"
17
-	"github.com/sirupsen/logrus"
18
-)
19
-
20
-const (
21
-	defaultStackName       = "integration-cli-on-swarm"
22
-	defaultVolumeName      = "integration-cli-on-swarm"
23
-	defaultMasterImageName = "integration-cli-master"
24
-	defaultWorkerImageName = "integration-cli-worker"
25
-)
26
-
27
-func main() {
28
-	rc, err := xmain()
29
-	if err != nil {
30
-		logrus.Fatalf("fatal error: %v", err)
31
-	}
32
-	os.Exit(rc)
33
-}
34
-
35
-func xmain() (int, error) {
36
-	// Should we use cobra maybe?
37
-	replicas := flag.Int("replicas", 1, "Number of worker service replica")
38
-	chunks := flag.Int("chunks", 0, "Number of test chunks executed in batch (0 == replicas)")
39
-	pushWorkerImage := flag.String("push-worker-image", "", "Push the worker image to the registry. Required for distributed execution. (empty == not to push)")
40
-	shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity")
41
-	// flags below are rarely used
42
-	randSeed := flag.Int64("rand-seed", int64(0), "Random seed used for shuffling (0 == current time)")
43
-	filtersFile := flag.String("filters-file", "", "Path to optional file composed of `-check.f` filter strings")
44
-	dryRun := flag.Bool("dry-run", false, "Dry run")
45
-	keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm")
46
-	flag.Parse()
47
-	if *chunks == 0 {
48
-		*chunks = *replicas
49
-	}
50
-	if *randSeed == int64(0) {
51
-		*randSeed = time.Now().UnixNano()
52
-	}
53
-	cli, err := client.NewClientWithOpts(client.FromEnv)
54
-	if err != nil {
55
-		return 1, err
56
-	}
57
-	if hasStack(cli, defaultStackName) {
58
-		logrus.Infof("Removing stack %s", defaultStackName)
59
-		removeStack(cli, defaultStackName)
60
-	}
61
-	if hasVolume(cli, defaultVolumeName) {
62
-		logrus.Infof("Removing volume %s", defaultVolumeName)
63
-		removeVolume(cli, defaultVolumeName)
64
-	}
65
-	if err = ensureImages(cli, []string{defaultWorkerImageName, defaultMasterImageName}); err != nil {
66
-		return 1, err
67
-	}
68
-	workerImageForStack := defaultWorkerImageName
69
-	if *pushWorkerImage != "" {
70
-		logrus.Infof("Pushing %s to %s", defaultWorkerImageName, *pushWorkerImage)
71
-		if err = pushImage(cli, *pushWorkerImage, defaultWorkerImageName); err != nil {
72
-			return 1, err
73
-		}
74
-		workerImageForStack = *pushWorkerImage
75
-	}
76
-	compose, err := createCompose("", cli, composeOptions{
77
-		Replicas:     *replicas,
78
-		Chunks:       *chunks,
79
-		MasterImage:  defaultMasterImageName,
80
-		WorkerImage:  workerImageForStack,
81
-		Volume:       defaultVolumeName,
82
-		Shuffle:      *shuffle,
83
-		RandSeed:     *randSeed,
84
-		DryRun:       *dryRun,
85
-		KeepExecutor: *keepExecutor,
86
-	})
87
-	if err != nil {
88
-		return 1, err
89
-	}
90
-	filters, err := filtersBytes(*filtersFile)
91
-	if err != nil {
92
-		return 1, err
93
-	}
94
-	logrus.Infof("Creating volume %s with input data", defaultVolumeName)
95
-	if err = createVolumeWithData(cli,
96
-		defaultVolumeName,
97
-		map[string][]byte{"/input": filters},
98
-		defaultMasterImageName); err != nil {
99
-		return 1, err
100
-	}
101
-	logrus.Infof("Deploying stack %s from %s", defaultStackName, compose)
102
-	defer func() {
103
-		logrus.Info("NOTE: You may want to inspect or clean up following resources:")
104
-		logrus.Infof(" - Stack: %s", defaultStackName)
105
-		logrus.Infof(" - Volume: %s", defaultVolumeName)
106
-		logrus.Infof(" - Compose file: %s", compose)
107
-		logrus.Infof(" - Master image: %s", defaultMasterImageName)
108
-		logrus.Infof(" - Worker image: %s", workerImageForStack)
109
-	}()
110
-	if err = deployStack(cli, defaultStackName, compose); err != nil {
111
-		return 1, err
112
-	}
113
-	logrus.Infof("The log will be displayed here after some duration."+
114
-		"You can watch the live status via `docker service logs %s_worker`",
115
-		defaultStackName)
116
-	masterContainerID, err := waitForMasterUp(cli, defaultStackName)
117
-	if err != nil {
118
-		return 1, err
119
-	}
120
-	rc, err := waitForContainerCompletion(cli, os.Stdout, os.Stderr, masterContainerID)
121
-	if err != nil {
122
-		return 1, err
123
-	}
124
-	logrus.Infof("Exit status: %d", rc)
125
-	return int(rc), nil
126
-}
127
-
128
-func ensureImages(cli *client.Client, images []string) error {
129
-	for _, image := range images {
130
-		_, _, err := cli.ImageInspectWithRaw(context.Background(), image)
131
-		if err != nil {
132
-			return fmt.Errorf("could not find image %s, please run `make build-integration-cli-on-swarm`: %v",
133
-				image, err)
134
-		}
135
-	}
136
-	return nil
137
-}
138
-
139
-func filtersBytes(optionalFiltersFile string) ([]byte, error) {
140
-	var b []byte
141
-	if optionalFiltersFile == "" {
142
-		tests, err := enumerateTests(".")
143
-		if err != nil {
144
-			return b, err
145
-		}
146
-		b = []byte(strings.Join(tests, "\n") + "\n")
147
-	} else {
148
-		var err error
149
-		b, err = ioutil.ReadFile(optionalFiltersFile)
150
-		if err != nil {
151
-			return b, err
152
-		}
153
-	}
154
-	return b, nil
155
-}
156
-
157
-func waitForMasterUp(cli *client.Client, stackName string) (string, error) {
158
-	// FIXME(AkihiroSuda): it should retry until master is up, rather than pre-sleeping
159
-	time.Sleep(10 * time.Second)
160
-
161
-	fil := filters.NewArgs()
162
-	fil.Add("label", "com.docker.stack.namespace="+stackName)
163
-	// FIXME(AkihiroSuda): we should not rely on internal service naming convention
164
-	fil.Add("label", "com.docker.swarm.service.name="+stackName+"_master")
165
-	masters, err := cli.ContainerList(context.Background(), types.ContainerListOptions{
166
-		All:     true,
167
-		Filters: fil,
168
-	})
169
-	if err != nil {
170
-		return "", err
171
-	}
172
-	if len(masters) == 0 {
173
-		return "", fmt.Errorf("master not running in stack %s", stackName)
174
-	}
175
-	return masters[0].ID, nil
176
-}
177
-
178
-func waitForContainerCompletion(cli *client.Client, stdout, stderr io.Writer, containerID string) (int64, error) {
179
-	stream, err := cli.ContainerLogs(context.Background(),
180
-		containerID,
181
-		types.ContainerLogsOptions{
182
-			ShowStdout: true,
183
-			ShowStderr: true,
184
-			Follow:     true,
185
-		})
186
-	if err != nil {
187
-		return 1, err
188
-	}
189
-	stdcopy.StdCopy(stdout, stderr, stream)
190
-	stream.Close()
191
-	resultC, errC := cli.ContainerWait(context.Background(), containerID, "")
192
-	select {
193
-	case err := <-errC:
194
-		return 1, err
195
-	case result := <-resultC:
196
-		return result.StatusCode, nil
197
-	}
198
-}
199 1
deleted file mode 100644
... ...
@@ -1,88 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"archive/tar"
5
-	"bytes"
6
-	"context"
7
-	"io"
8
-
9
-	"github.com/docker/docker/api/types"
10
-	"github.com/docker/docker/api/types/container"
11
-	"github.com/docker/docker/api/types/mount"
12
-	"github.com/docker/docker/api/types/volume"
13
-	"github.com/docker/docker/client"
14
-)
15
-
16
-func createTar(data map[string][]byte) (io.Reader, error) {
17
-	var b bytes.Buffer
18
-	tw := tar.NewWriter(&b)
19
-	for path, datum := range data {
20
-		hdr := tar.Header{
21
-			Name: path,
22
-			Mode: 0644,
23
-			Size: int64(len(datum)),
24
-		}
25
-		if err := tw.WriteHeader(&hdr); err != nil {
26
-			return nil, err
27
-		}
28
-		_, err := tw.Write(datum)
29
-		if err != nil {
30
-			return nil, err
31
-		}
32
-	}
33
-	if err := tw.Close(); err != nil {
34
-		return nil, err
35
-	}
36
-	return &b, nil
37
-}
38
-
39
-// createVolumeWithData creates a volume with the given data (e.g. data["/foo"] = []byte("bar"))
40
-// Internally, a container is created from the image so as to provision the data to the volume,
41
-// which is attached to the container.
42
-func createVolumeWithData(cli *client.Client, volumeName string, data map[string][]byte, image string) error {
43
-	_, err := cli.VolumeCreate(context.Background(),
44
-		volume.VolumeCreateBody{
45
-			Driver: "local",
46
-			Name:   volumeName,
47
-		})
48
-	if err != nil {
49
-		return err
50
-	}
51
-	mnt := "/mnt"
52
-	miniContainer, err := cli.ContainerCreate(context.Background(),
53
-		&container.Config{
54
-			Image: image,
55
-		},
56
-		&container.HostConfig{
57
-			Mounts: []mount.Mount{
58
-				{
59
-					Type:   mount.TypeVolume,
60
-					Source: volumeName,
61
-					Target: mnt,
62
-				},
63
-			},
64
-		}, nil, "")
65
-	if err != nil {
66
-		return err
67
-	}
68
-	tr, err := createTar(data)
69
-	if err != nil {
70
-		return err
71
-	}
72
-	if cli.CopyToContainer(context.Background(),
73
-		miniContainer.ID, mnt, tr, types.CopyToContainerOptions{}); err != nil {
74
-		return err
75
-	}
76
-	return cli.ContainerRemove(context.Background(),
77
-		miniContainer.ID,
78
-		types.ContainerRemoveOptions{})
79
-}
80
-
81
-func hasVolume(cli *client.Client, volumeName string) bool {
82
-	_, err := cli.VolumeInspect(context.Background(), volumeName)
83
-	return err == nil
84
-}
85
-
86
-func removeVolume(cli *client.Client, volumeName string) error {
87
-	return cli.VolumeRemove(context.Background(), volumeName, true)
88
-}
... ...
@@ -1,5 +1,5 @@
1 1
 #!/usr/bin/env bash
2
-# required by `make build-integration-cli-on-swarm`
2
+# required by https://github.com/AkihiroSuda/kube-moby-integration
3 3
 set -e
4 4
 
5 5
 source hack/make/.integration-test-helpers