Browse code

Add Swarm management backend

As described in our ROADMAP.md, introduce new Swarm management API
endpoints relying on swarmkit to deploy services. It currently vendors
docker/engine-api changes.

This PR is fully backward compatible (joining a Swarm is an optional
feature of the Engine, and existing commands are not impacted).

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
Signed-off-by: Victor Vieux <vieux@docker.com>
Signed-off-by: Daniel Nephin <dnephin@docker.com>
Signed-off-by: Jana Radhakrishnan <mrjana@docker.com>
Signed-off-by: Madhu Venugopal <madhu@docker.com>

Tonis Tiigi authored on 2016/06/14 11:52:49
Showing 42 changed files
... ...
@@ -8,6 +8,7 @@ import (
8 8
 	"github.com/docker/engine-api/types"
9 9
 	"github.com/docker/engine-api/types/versions"
10 10
 	"github.com/gorilla/mux"
11
+	"google.golang.org/grpc"
11 12
 )
12 13
 
13 14
 // httpStatusError is an interface
... ...
@@ -58,6 +59,7 @@ func GetHTTPErrorStatusCode(err error) int {
58 58
 			"wrong login/password":  http.StatusUnauthorized,
59 59
 			"unauthorized":          http.StatusUnauthorized,
60 60
 			"hasn't been activated": http.StatusForbidden,
61
+			"this node":             http.StatusNotAcceptable,
61 62
 		} {
62 63
 			if strings.Contains(errStr, keyword) {
63 64
 				statusCode = status
... ...
@@ -85,7 +87,7 @@ func MakeErrorHandler(err error) http.HandlerFunc {
85 85
 			}
86 86
 			WriteJSON(w, statusCode, response)
87 87
 		} else {
88
-			http.Error(w, err.Error(), statusCode)
88
+			http.Error(w, grpc.ErrorDesc(err), statusCode)
89 89
 		}
90 90
 	}
91 91
 }
... ...
@@ -2,7 +2,6 @@ package network
2 2
 
3 3
 import (
4 4
 	"github.com/docker/engine-api/types"
5
-	"github.com/docker/engine-api/types/filters"
6 5
 	"github.com/docker/engine-api/types/network"
7 6
 	"github.com/docker/libnetwork"
8 7
 )
... ...
@@ -13,7 +12,7 @@ type Backend interface {
13 13
 	FindNetwork(idName string) (libnetwork.Network, error)
14 14
 	GetNetworkByName(idName string) (libnetwork.Network, error)
15 15
 	GetNetworksByID(partialID string) []libnetwork.Network
16
-	FilterNetworks(netFilters filters.Args) ([]libnetwork.Network, error)
16
+	GetNetworks() []libnetwork.Network
17 17
 	CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error)
18 18
 	ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
19 19
 	DisconnectContainerFromNetwork(containerName string, network libnetwork.Network, force bool) error
20 20
new file mode 100644
... ...
@@ -0,0 +1,98 @@
0
+package network
1
+
2
+import (
3
+	"fmt"
4
+
5
+	"github.com/docker/docker/runconfig"
6
+	"github.com/docker/engine-api/types"
7
+	"github.com/docker/engine-api/types/filters"
8
+)
9
+
10
+type filterHandler func([]types.NetworkResource, string) ([]types.NetworkResource, error)
11
+
12
+var (
13
+	// AcceptedFilters is an acceptable filters for validation
14
+	AcceptedFilters = map[string]bool{
15
+		"driver": true,
16
+		"type":   true,
17
+		"name":   true,
18
+		"id":     true,
19
+		"label":  true,
20
+	}
21
+)
22
+
23
+func filterNetworkByType(nws []types.NetworkResource, netType string) (retNws []types.NetworkResource, err error) {
24
+	switch netType {
25
+	case "builtin":
26
+		for _, nw := range nws {
27
+			if runconfig.IsPreDefinedNetwork(nw.Name) {
28
+				retNws = append(retNws, nw)
29
+			}
30
+		}
31
+	case "custom":
32
+		for _, nw := range nws {
33
+			if !runconfig.IsPreDefinedNetwork(nw.Name) {
34
+				retNws = append(retNws, nw)
35
+			}
36
+		}
37
+	default:
38
+		return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType)
39
+	}
40
+	return retNws, nil
41
+}
42
+
43
+// filterNetworks filters network list according to user specified filter
44
+// and returns user chosen networks
45
+func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) {
46
+	// if filter is empty, return original network list
47
+	if filter.Len() == 0 {
48
+		return nws, nil
49
+	}
50
+
51
+	if err := filter.Validate(AcceptedFilters); err != nil {
52
+		return nil, err
53
+	}
54
+
55
+	var displayNet []types.NetworkResource
56
+	for _, nw := range nws {
57
+		if filter.Include("driver") {
58
+			if !filter.ExactMatch("driver", nw.Driver) {
59
+				continue
60
+			}
61
+		}
62
+		if filter.Include("name") {
63
+			if !filter.Match("name", nw.Name) {
64
+				continue
65
+			}
66
+		}
67
+		if filter.Include("id") {
68
+			if !filter.Match("id", nw.ID) {
69
+				continue
70
+			}
71
+		}
72
+		if filter.Include("label") {
73
+			if !filter.MatchKVList("label", nw.Labels) {
74
+				continue
75
+			}
76
+		}
77
+		displayNet = append(displayNet, nw)
78
+	}
79
+
80
+	if filter.Include("type") {
81
+		var typeNet []types.NetworkResource
82
+		errFilter := filter.WalkValues("type", func(fval string) error {
83
+			passList, err := filterNetworkByType(displayNet, fval)
84
+			if err != nil {
85
+				return err
86
+			}
87
+			typeNet = append(typeNet, passList...)
88
+			return nil
89
+		})
90
+		if errFilter != nil {
91
+			return nil, errFilter
92
+		}
93
+		displayNet = typeNet
94
+	}
95
+
96
+	return displayNet, nil
97
+}
... ...
@@ -1,17 +1,22 @@
1 1
 package network
2 2
 
3
-import "github.com/docker/docker/api/server/router"
3
+import (
4
+	"github.com/docker/docker/api/server/router"
5
+	"github.com/docker/docker/daemon/cluster"
6
+)
4 7
 
5 8
 // networkRouter is a router to talk with the network controller
6 9
 type networkRouter struct {
7
-	backend Backend
8
-	routes  []router.Route
10
+	backend         Backend
11
+	clusterProvider *cluster.Cluster
12
+	routes          []router.Route
9 13
 }
10 14
 
11 15
 // NewRouter initializes a new network router
12
-func NewRouter(b Backend) router.Router {
16
+func NewRouter(b Backend, c *cluster.Cluster) router.Router {
13 17
 	r := &networkRouter{
14
-		backend: b,
18
+		backend:         b,
19
+		clusterProvider: c,
15 20
 	}
16 21
 	r.initRoutes()
17 22
 	return r
... ...
@@ -24,17 +24,30 @@ func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWrit
24 24
 		return err
25 25
 	}
26 26
 
27
-	list := []*types.NetworkResource{}
27
+	list := []types.NetworkResource{}
28 28
 
29
-	nwList, err := n.backend.FilterNetworks(netFilters)
30
-	if err != nil {
31
-		return err
29
+	if nr, err := n.clusterProvider.GetNetworks(); err == nil {
30
+		for _, nw := range nr {
31
+			list = append(list, nw)
32
+		}
32 33
 	}
33 34
 
34
-	for _, nw := range nwList {
35
-		list = append(list, buildNetworkResource(nw))
35
+	// Combine the network list returned by Docker daemon if it is not already
36
+	// returned by the cluster manager
37
+SKIP:
38
+	for _, nw := range n.backend.GetNetworks() {
39
+		for _, nl := range list {
40
+			if nl.ID == nw.ID() {
41
+				continue SKIP
42
+			}
43
+		}
44
+		list = append(list, *n.buildNetworkResource(nw))
36 45
 	}
37 46
 
47
+	list, err = filterNetworks(list, netFilters)
48
+	if err != nil {
49
+		return err
50
+	}
38 51
 	return httputils.WriteJSON(w, http.StatusOK, list)
39 52
 }
40 53
 
... ...
@@ -45,9 +58,12 @@ func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r
45 45
 
46 46
 	nw, err := n.backend.FindNetwork(vars["id"])
47 47
 	if err != nil {
48
+		if nr, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil {
49
+			return httputils.WriteJSON(w, http.StatusOK, nr)
50
+		}
48 51
 		return err
49 52
 	}
50
-	return httputils.WriteJSON(w, http.StatusOK, buildNetworkResource(nw))
53
+	return httputils.WriteJSON(w, http.StatusOK, n.buildNetworkResource(nw))
51 54
 }
52 55
 
53 56
 func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
... ...
@@ -67,7 +83,14 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
67 67
 
68 68
 	nw, err := n.backend.CreateNetwork(create)
69 69
 	if err != nil {
70
-		return err
70
+		if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
71
+			return err
72
+		}
73
+		id, err := n.clusterProvider.CreateNetwork(create)
74
+		if err != nil {
75
+			return err
76
+		}
77
+		nw = &types.NetworkCreateResponse{ID: id}
71 78
 	}
72 79
 
73 80
 	return httputils.WriteJSON(w, http.StatusCreated, nw)
... ...
@@ -121,6 +144,9 @@ func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter
121 121
 	if err := httputils.ParseForm(r); err != nil {
122 122
 		return err
123 123
 	}
124
+	if _, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil {
125
+		return n.clusterProvider.RemoveNetwork(vars["id"])
126
+	}
124 127
 	if err := n.backend.DeleteNetwork(vars["id"]); err != nil {
125 128
 		return err
126 129
 	}
... ...
@@ -128,7 +154,7 @@ func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter
128 128
 	return nil
129 129
 }
130 130
 
131
-func buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
131
+func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
132 132
 	r := &types.NetworkResource{}
133 133
 	if nw == nil {
134 134
 		return r
... ...
@@ -138,6 +164,13 @@ func buildNetworkResource(nw libnetwork.Network) *types.NetworkResource {
138 138
 	r.Name = nw.Name()
139 139
 	r.ID = nw.ID()
140 140
 	r.Scope = info.Scope()
141
+	if n.clusterProvider.IsManager() {
142
+		if _, err := n.clusterProvider.GetNetwork(nw.Name()); err == nil {
143
+			r.Scope = "swarm"
144
+		}
145
+	} else if info.Dynamic() {
146
+		r.Scope = "swarm"
147
+	}
141 148
 	r.Driver = nw.Type()
142 149
 	r.EnableIPv6 = info.IPv6Enabled()
143 150
 	r.Internal = info.Internal()
144 151
new file mode 100644
... ...
@@ -0,0 +1,26 @@
0
+package swarm
1
+
2
+import (
3
+	basictypes "github.com/docker/engine-api/types"
4
+	types "github.com/docker/engine-api/types/swarm"
5
+)
6
+
7
+// Backend abstracts an swarm commands manager.
8
+type Backend interface {
9
+	Init(req types.InitRequest) (string, error)
10
+	Join(req types.JoinRequest) error
11
+	Leave(force bool) error
12
+	Inspect() (types.Swarm, error)
13
+	Update(uint64, types.Spec) error
14
+	GetServices(basictypes.ServiceListOptions) ([]types.Service, error)
15
+	GetService(string) (types.Service, error)
16
+	CreateService(types.ServiceSpec) (string, error)
17
+	UpdateService(string, uint64, types.ServiceSpec) error
18
+	RemoveService(string) error
19
+	GetNodes(basictypes.NodeListOptions) ([]types.Node, error)
20
+	GetNode(string) (types.Node, error)
21
+	UpdateNode(string, uint64, types.NodeSpec) error
22
+	RemoveNode(string) error
23
+	GetTasks(basictypes.TaskListOptions) ([]types.Task, error)
24
+	GetTask(string) (types.Task, error)
25
+}
0 26
new file mode 100644
... ...
@@ -0,0 +1,44 @@
0
+package swarm
1
+
2
+import "github.com/docker/docker/api/server/router"
3
+
4
+// buildRouter is a router to talk with the build controller
5
+type swarmRouter struct {
6
+	backend Backend
7
+	routes  []router.Route
8
+}
9
+
10
+// NewRouter initializes a new build router
11
+func NewRouter(b Backend) router.Router {
12
+	r := &swarmRouter{
13
+		backend: b,
14
+	}
15
+	r.initRoutes()
16
+	return r
17
+}
18
+
19
+// Routes returns the available routers to the swarm controller
20
+func (sr *swarmRouter) Routes() []router.Route {
21
+	return sr.routes
22
+}
23
+
24
+func (sr *swarmRouter) initRoutes() {
25
+	sr.routes = []router.Route{
26
+		router.NewPostRoute("/swarm/init", sr.initCluster),
27
+		router.NewPostRoute("/swarm/join", sr.joinCluster),
28
+		router.NewPostRoute("/swarm/leave", sr.leaveCluster),
29
+		router.NewGetRoute("/swarm", sr.inspectCluster),
30
+		router.NewPostRoute("/swarm/update", sr.updateCluster),
31
+		router.NewGetRoute("/services", sr.getServices),
32
+		router.NewGetRoute("/services/{id:.*}", sr.getService),
33
+		router.NewPostRoute("/services/create", sr.createService),
34
+		router.NewPostRoute("/services/{id:.*}/update", sr.updateService),
35
+		router.NewDeleteRoute("/services/{id:.*}", sr.removeService),
36
+		router.NewGetRoute("/nodes", sr.getNodes),
37
+		router.NewGetRoute("/nodes/{id:.*}", sr.getNode),
38
+		router.NewDeleteRoute("/nodes/{id:.*}", sr.removeNode),
39
+		router.NewPostRoute("/nodes/{id:.*}/update", sr.updateNode),
40
+		router.NewGetRoute("/tasks", sr.getTasks),
41
+		router.NewGetRoute("/tasks/{id:.*}", sr.getTask),
42
+	}
43
+}
0 44
new file mode 100644
... ...
@@ -0,0 +1,229 @@
0
+package swarm
1
+
2
+import (
3
+	"encoding/json"
4
+	"fmt"
5
+	"net/http"
6
+	"strconv"
7
+
8
+	"github.com/Sirupsen/logrus"
9
+	"github.com/docker/docker/api/server/httputils"
10
+	basictypes "github.com/docker/engine-api/types"
11
+	"github.com/docker/engine-api/types/filters"
12
+	types "github.com/docker/engine-api/types/swarm"
13
+	"golang.org/x/net/context"
14
+)
15
+
16
+func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
17
+	var req types.InitRequest
18
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
19
+		return err
20
+	}
21
+	nodeID, err := sr.backend.Init(req)
22
+	if err != nil {
23
+		logrus.Errorf("Error initializing swarm: %v", err)
24
+		return err
25
+	}
26
+	return httputils.WriteJSON(w, http.StatusOK, nodeID)
27
+}
28
+
29
+func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
30
+	var req types.JoinRequest
31
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
32
+		return err
33
+	}
34
+	return sr.backend.Join(req)
35
+}
36
+
37
+func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
38
+	if err := httputils.ParseForm(r); err != nil {
39
+		return err
40
+	}
41
+
42
+	force := httputils.BoolValue(r, "force")
43
+	return sr.backend.Leave(force)
44
+}
45
+
46
+func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
47
+	swarm, err := sr.backend.Inspect()
48
+	if err != nil {
49
+		logrus.Errorf("Error getting swarm: %v", err)
50
+		return err
51
+	}
52
+
53
+	return httputils.WriteJSON(w, http.StatusOK, swarm)
54
+}
55
+
56
+func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
57
+	var swarm types.Spec
58
+	if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil {
59
+		return err
60
+	}
61
+
62
+	rawVersion := r.URL.Query().Get("version")
63
+	version, err := strconv.ParseUint(rawVersion, 10, 64)
64
+	if err != nil {
65
+		return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error())
66
+	}
67
+
68
+	if err := sr.backend.Update(version, swarm); err != nil {
69
+		logrus.Errorf("Error configuring swarm: %v", err)
70
+		return err
71
+	}
72
+	return nil
73
+}
74
+
75
+func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
76
+	if err := httputils.ParseForm(r); err != nil {
77
+		return err
78
+	}
79
+	filter, err := filters.FromParam(r.Form.Get("filters"))
80
+	if err != nil {
81
+		return err
82
+	}
83
+
84
+	services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filter: filter})
85
+	if err != nil {
86
+		logrus.Errorf("Error getting services: %v", err)
87
+		return err
88
+	}
89
+
90
+	return httputils.WriteJSON(w, http.StatusOK, services)
91
+}
92
+
93
+func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
94
+	service, err := sr.backend.GetService(vars["id"])
95
+	if err != nil {
96
+		logrus.Errorf("Error getting service %s: %v", vars["id"], err)
97
+		return err
98
+	}
99
+
100
+	return httputils.WriteJSON(w, http.StatusOK, service)
101
+}
102
+
103
+func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
104
+	var service types.ServiceSpec
105
+	if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
106
+		return err
107
+	}
108
+
109
+	id, err := sr.backend.CreateService(service)
110
+	if err != nil {
111
+		logrus.Errorf("Error reating service %s: %v", id, err)
112
+		return err
113
+	}
114
+
115
+	return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ServiceCreateResponse{
116
+		ID: id,
117
+	})
118
+}
119
+
120
+func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
121
+	var service types.ServiceSpec
122
+	if err := json.NewDecoder(r.Body).Decode(&service); err != nil {
123
+		return err
124
+	}
125
+
126
+	rawVersion := r.URL.Query().Get("version")
127
+	version, err := strconv.ParseUint(rawVersion, 10, 64)
128
+	if err != nil {
129
+		return fmt.Errorf("Invalid service version '%s': %s", rawVersion, err.Error())
130
+	}
131
+
132
+	if err := sr.backend.UpdateService(vars["id"], version, service); err != nil {
133
+		logrus.Errorf("Error updating service %s: %v", vars["id"], err)
134
+		return err
135
+	}
136
+	return nil
137
+}
138
+
139
+func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
140
+	if err := sr.backend.RemoveService(vars["id"]); err != nil {
141
+		logrus.Errorf("Error removing service %s: %v", vars["id"], err)
142
+		return err
143
+	}
144
+	return nil
145
+}
146
+
147
+func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
148
+	if err := httputils.ParseForm(r); err != nil {
149
+		return err
150
+	}
151
+	filter, err := filters.FromParam(r.Form.Get("filters"))
152
+	if err != nil {
153
+		return err
154
+	}
155
+
156
+	nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filter: filter})
157
+	if err != nil {
158
+		logrus.Errorf("Error getting nodes: %v", err)
159
+		return err
160
+	}
161
+
162
+	return httputils.WriteJSON(w, http.StatusOK, nodes)
163
+}
164
+
165
+func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
166
+	node, err := sr.backend.GetNode(vars["id"])
167
+	if err != nil {
168
+		logrus.Errorf("Error getting node %s: %v", vars["id"], err)
169
+		return err
170
+	}
171
+
172
+	return httputils.WriteJSON(w, http.StatusOK, node)
173
+}
174
+
175
+func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
176
+	var node types.NodeSpec
177
+	if err := json.NewDecoder(r.Body).Decode(&node); err != nil {
178
+		return err
179
+	}
180
+
181
+	rawVersion := r.URL.Query().Get("version")
182
+	version, err := strconv.ParseUint(rawVersion, 10, 64)
183
+	if err != nil {
184
+		return fmt.Errorf("Invalid node version '%s': %s", rawVersion, err.Error())
185
+	}
186
+
187
+	if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil {
188
+		logrus.Errorf("Error updating node %s: %v", vars["id"], err)
189
+		return err
190
+	}
191
+	return nil
192
+}
193
+
194
+func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
195
+	if err := sr.backend.RemoveNode(vars["id"]); err != nil {
196
+		logrus.Errorf("Error removing node %s: %v", vars["id"], err)
197
+		return err
198
+	}
199
+	return nil
200
+}
201
+
202
+func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
203
+	if err := httputils.ParseForm(r); err != nil {
204
+		return err
205
+	}
206
+	filter, err := filters.FromParam(r.Form.Get("filters"))
207
+	if err != nil {
208
+		return err
209
+	}
210
+
211
+	tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filter: filter})
212
+	if err != nil {
213
+		logrus.Errorf("Error getting tasks: %v", err)
214
+		return err
215
+	}
216
+
217
+	return httputils.WriteJSON(w, http.StatusOK, tasks)
218
+}
219
+
220
+func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
221
+	task, err := sr.backend.GetTask(vars["id"])
222
+	if err != nil {
223
+		logrus.Errorf("Error getting task %s: %v", vars["id"], err)
224
+		return err
225
+	}
226
+
227
+	return httputils.WriteJSON(w, http.StatusOK, task)
228
+}
... ...
@@ -1,18 +1,23 @@
1 1
 package system
2 2
 
3
-import "github.com/docker/docker/api/server/router"
3
+import (
4
+	"github.com/docker/docker/api/server/router"
5
+	"github.com/docker/docker/daemon/cluster"
6
+)
4 7
 
5 8
 // systemRouter provides information about the Docker system overall.
6 9
 // It gathers information about host, daemon and container events.
7 10
 type systemRouter struct {
8
-	backend Backend
9
-	routes  []router.Route
11
+	backend         Backend
12
+	clusterProvider *cluster.Cluster
13
+	routes          []router.Route
10 14
 }
11 15
 
12 16
 // NewRouter initializes a new system router
13
-func NewRouter(b Backend) router.Router {
17
+func NewRouter(b Backend, c *cluster.Cluster) router.Router {
14 18
 	r := &systemRouter{
15
-		backend: b,
19
+		backend:         b,
20
+		clusterProvider: c,
16 21
 	}
17 22
 
18 23
 	r.routes = []router.Route{
... ...
@@ -33,6 +33,9 @@ func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *ht
33 33
 	if err != nil {
34 34
 		return err
35 35
 	}
36
+	if s.clusterProvider != nil {
37
+		info.Swarm = s.clusterProvider.Info()
38
+	}
36 39
 
37 40
 	return httputils.WriteJSON(w, http.StatusOK, info)
38 41
 }
... ...
@@ -20,12 +20,14 @@ import (
20 20
 	"github.com/docker/docker/api/server/router/container"
21 21
 	"github.com/docker/docker/api/server/router/image"
22 22
 	"github.com/docker/docker/api/server/router/network"
23
+	swarmrouter "github.com/docker/docker/api/server/router/swarm"
23 24
 	systemrouter "github.com/docker/docker/api/server/router/system"
24 25
 	"github.com/docker/docker/api/server/router/volume"
25 26
 	"github.com/docker/docker/builder/dockerfile"
26 27
 	cliflags "github.com/docker/docker/cli/flags"
27 28
 	"github.com/docker/docker/cliconfig"
28 29
 	"github.com/docker/docker/daemon"
30
+	"github.com/docker/docker/daemon/cluster"
29 31
 	"github.com/docker/docker/daemon/logger"
30 32
 	"github.com/docker/docker/dockerversion"
31 33
 	"github.com/docker/docker/libcontainerd"
... ...
@@ -208,6 +210,7 @@ func (cli *DaemonCli) start() (err error) {
208 208
 	}
209 209
 
210 210
 	api := apiserver.New(serverConfig)
211
+	cli.api = api
211 212
 
212 213
 	for i := 0; i < len(cli.Config.Hosts); i++ {
213 214
 		var err error
... ...
@@ -264,6 +267,17 @@ func (cli *DaemonCli) start() (err error) {
264 264
 		return fmt.Errorf("Error starting daemon: %v", err)
265 265
 	}
266 266
 
267
+	name, _ := os.Hostname()
268
+
269
+	c, err := cluster.New(cluster.Config{
270
+		Root:    cli.Config.Root,
271
+		Name:    name,
272
+		Backend: d,
273
+	})
274
+	if err != nil {
275
+		logrus.Fatalf("Error creating cluster component: %v", err)
276
+	}
277
+
267 278
 	logrus.Info("Daemon has completed initialization")
268 279
 
269 280
 	logrus.WithFields(logrus.Fields{
... ...
@@ -273,7 +287,7 @@ func (cli *DaemonCli) start() (err error) {
273 273
 	}).Info("Docker daemon")
274 274
 
275 275
 	cli.initMiddlewares(api, serverConfig)
276
-	initRouter(api, d)
276
+	initRouter(api, d, c)
277 277
 
278 278
 	cli.d = d
279 279
 	cli.setupConfigReloadTrap()
... ...
@@ -290,6 +304,7 @@ func (cli *DaemonCli) start() (err error) {
290 290
 	// Daemon is fully initialized and handling API traffic
291 291
 	// Wait for serve API to complete
292 292
 	errAPI := <-serveAPIWait
293
+	c.Cleanup()
293 294
 	shutdownDaemon(d, 15)
294 295
 	containerdRemote.Cleanup()
295 296
 	if errAPI != nil {
... ...
@@ -385,18 +400,19 @@ func loadDaemonCliConfig(config *daemon.Config, flags *flag.FlagSet, commonConfi
385 385
 	return config, nil
386 386
 }
387 387
 
388
-func initRouter(s *apiserver.Server, d *daemon.Daemon) {
388
+func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) {
389 389
 	decoder := runconfig.ContainerDecoder{}
390 390
 
391 391
 	routers := []router.Router{
392 392
 		container.NewRouter(d, decoder),
393 393
 		image.NewRouter(d, decoder),
394
-		systemrouter.NewRouter(d),
394
+		systemrouter.NewRouter(d, c),
395 395
 		volume.NewRouter(d),
396 396
 		build.NewRouter(dockerfile.NewBuildManager(d)),
397
+		swarmrouter.NewRouter(c),
397 398
 	}
398 399
 	if d.NetworkControllerEnabled() {
399
-		routers = append(routers, network.NewRouter(d))
400
+		routers = append(routers, network.NewRouter(d, c))
400 401
 	}
401 402
 
402 403
 	s.InitRouter(utils.IsDebugEnabled(), routers...)
... ...
@@ -66,6 +66,7 @@ type CommonContainer struct {
66 66
 	RWLayer         layer.RWLayer  `json:"-"`
67 67
 	ID              string
68 68
 	Created         time.Time
69
+	Managed         bool
69 70
 	Path            string
70 71
 	Args            []string
71 72
 	Config          *containertypes.Config
... ...
@@ -790,7 +791,7 @@ func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epC
790 790
 		ipam := epConfig.IPAMConfig
791 791
 		if ipam != nil && (ipam.IPv4Address != "" || ipam.IPv6Address != "") {
792 792
 			createOptions = append(createOptions,
793
-				libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), nil))
793
+				libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), nil, nil))
794 794
 		}
795 795
 
796 796
 		for _, alias := range epConfig.Aliases {
... ...
@@ -798,6 +799,27 @@ func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epC
798 798
 		}
799 799
 	}
800 800
 
801
+	if container.NetworkSettings.Service != nil {
802
+		svcCfg := container.NetworkSettings.Service
803
+
804
+		var vip string
805
+		if svcCfg.VirtualAddresses[n.ID()] != nil {
806
+			vip = svcCfg.VirtualAddresses[n.ID()].IPv4
807
+		}
808
+
809
+		var portConfigs []*libnetwork.PortConfig
810
+		for _, portConfig := range svcCfg.ExposedPorts {
811
+			portConfigs = append(portConfigs, &libnetwork.PortConfig{
812
+				Name:          portConfig.Name,
813
+				Protocol:      libnetwork.PortConfig_Protocol(portConfig.Protocol),
814
+				TargetPort:    portConfig.TargetPort,
815
+				PublishedPort: portConfig.PublishedPort,
816
+			})
817
+		}
818
+
819
+		createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs))
820
+	}
821
+
801 822
 	if !containertypes.NetworkMode(n.Name()).IsUserDefined() {
802 823
 		createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution())
803 824
 	}
... ...
@@ -5,6 +5,8 @@ import (
5 5
 	"sync"
6 6
 	"time"
7 7
 
8
+	"golang.org/x/net/context"
9
+
8 10
 	"github.com/docker/go-units"
9 11
 )
10 12
 
... ...
@@ -139,6 +141,32 @@ func (s *State) WaitStop(timeout time.Duration) (int, error) {
139 139
 	return s.getExitCode(), nil
140 140
 }
141 141
 
142
+// WaitWithContext waits for the container to stop. Optional context can be
143
+// passed for canceling the request.
144
+func (s *State) WaitWithContext(ctx context.Context) <-chan int {
145
+	// todo(tonistiigi): make other wait functions use this
146
+	c := make(chan int)
147
+	go func() {
148
+		s.Lock()
149
+		if !s.Running {
150
+			exitCode := s.ExitCode
151
+			s.Unlock()
152
+			c <- exitCode
153
+			close(c)
154
+			return
155
+		}
156
+		waitChan := s.waitChan
157
+		s.Unlock()
158
+		select {
159
+		case <-waitChan:
160
+			c <- s.getExitCode()
161
+		case <-ctx.Done():
162
+		}
163
+		close(c)
164
+	}()
165
+	return c
166
+}
167
+
142 168
 // IsRunning returns whether the running flag is set. Used by Container to check whether a container is running.
143 169
 func (s *State) IsRunning() bool {
144 170
 	s.Lock()
145 171
new file mode 100644
... ...
@@ -0,0 +1,1056 @@
0
+package cluster
1
+
2
+import (
3
+	"encoding/json"
4
+	"fmt"
5
+	"io/ioutil"
6
+	"os"
7
+	"path/filepath"
8
+	"strings"
9
+	"sync"
10
+	"time"
11
+
12
+	"google.golang.org/grpc"
13
+
14
+	"github.com/Sirupsen/logrus"
15
+	"github.com/docker/docker/daemon/cluster/convert"
16
+	executorpkg "github.com/docker/docker/daemon/cluster/executor"
17
+	"github.com/docker/docker/daemon/cluster/executor/container"
18
+	"github.com/docker/docker/errors"
19
+	"github.com/docker/docker/pkg/ioutils"
20
+	"github.com/docker/docker/runconfig"
21
+	apitypes "github.com/docker/engine-api/types"
22
+	types "github.com/docker/engine-api/types/swarm"
23
+	swarmagent "github.com/docker/swarmkit/agent"
24
+	swarmapi "github.com/docker/swarmkit/api"
25
+	"golang.org/x/net/context"
26
+)
27
+
28
+const swarmDirName = "swarm"
29
+const controlSocket = "control.sock"
30
+const swarmConnectTimeout = 5 * time.Second
31
+const stateFile = "docker-state.json"
32
+
33
+const (
34
+	initialReconnectDelay = 100 * time.Millisecond
35
+	maxReconnectDelay     = 10 * time.Second
36
+)
37
+
38
+// ErrNoManager is returned then a manager-only function is called on non-manager
39
+var ErrNoManager = fmt.Errorf("this node is not participating as a Swarm manager")
40
+
41
+// ErrNoSwarm is returned on leaving a cluster that was never initialized
42
+var ErrNoSwarm = fmt.Errorf("this node is not part of Swarm")
43
+
44
+// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated
45
+var ErrSwarmExists = fmt.Errorf("this node is already part of a Swarm")
46
+
47
+// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached.
48
+var ErrSwarmJoinTimeoutReached = fmt.Errorf("timeout reached before node was joined")
49
+
50
+type state struct {
51
+	ListenAddr string
52
+}
53
+
54
+// Config provides values for Cluster.
55
+type Config struct {
56
+	Root    string
57
+	Name    string
58
+	Backend executorpkg.Backend
59
+}
60
+
61
+// Cluster provides capabilities to pariticipate in a cluster as worker or a
62
+// manager and a worker.
63
+type Cluster struct {
64
+	sync.RWMutex
65
+	root           string
66
+	config         Config
67
+	configEvent    chan struct{} // todo: make this array and goroutine safe
68
+	node           *swarmagent.Node
69
+	conn           *grpc.ClientConn
70
+	client         swarmapi.ControlClient
71
+	ready          bool
72
+	listenAddr     string
73
+	err            error
74
+	reconnectDelay time.Duration
75
+	stop           bool
76
+	cancelDelay    func()
77
+}
78
+
79
+// New creates a new Cluster instance using provided config.
80
+func New(config Config) (*Cluster, error) {
81
+	root := filepath.Join(config.Root, swarmDirName)
82
+	if err := os.MkdirAll(root, 0700); err != nil {
83
+		return nil, err
84
+	}
85
+	c := &Cluster{
86
+		root:           root,
87
+		config:         config,
88
+		configEvent:    make(chan struct{}, 10),
89
+		reconnectDelay: initialReconnectDelay,
90
+	}
91
+
92
+	dt, err := ioutil.ReadFile(filepath.Join(root, stateFile))
93
+	if err != nil {
94
+		if os.IsNotExist(err) {
95
+			return c, nil
96
+		}
97
+		return nil, err
98
+	}
99
+
100
+	var st state
101
+	if err := json.Unmarshal(dt, &st); err != nil {
102
+		return nil, err
103
+	}
104
+
105
+	n, ctx, err := c.startNewNode(false, st.ListenAddr, "", "", "", false)
106
+	if err != nil {
107
+		return nil, err
108
+	}
109
+
110
+	select {
111
+	case <-time.After(swarmConnectTimeout):
112
+		logrus.Errorf("swarm component could not be started before timeout was reached")
113
+	case <-n.Ready(context.Background()):
114
+	case <-ctx.Done():
115
+	}
116
+	if ctx.Err() != nil {
117
+		return nil, fmt.Errorf("swarm component could not be started")
118
+	}
119
+	go c.reconnectOnFailure(ctx)
120
+	return c, nil
121
+}
122
+
123
+func (c *Cluster) checkCompatibility() error {
124
+	info, _ := c.config.Backend.SystemInfo()
125
+	if info != nil && (info.ClusterStore != "" || info.ClusterAdvertise != "") {
126
+		return fmt.Errorf("swarm mode is incompatible with `--cluster-store` and `--cluster-advertise daemon configuration")
127
+	}
128
+	return nil
129
+}
130
+
131
+func (c *Cluster) saveState() error {
132
+	dt, err := json.Marshal(state{ListenAddr: c.listenAddr})
133
+	if err != nil {
134
+		return err
135
+	}
136
+	return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600)
137
+}
138
+
139
+func (c *Cluster) reconnectOnFailure(ctx context.Context) {
140
+	for {
141
+		<-ctx.Done()
142
+		c.Lock()
143
+		if c.stop || c.node != nil {
144
+			c.Unlock()
145
+			return
146
+		}
147
+		c.reconnectDelay *= 2
148
+		if c.reconnectDelay > maxReconnectDelay {
149
+			c.reconnectDelay = maxReconnectDelay
150
+		}
151
+		logrus.Warnf("Restarting swarm in %.2f seconds", c.reconnectDelay.Seconds())
152
+		delayCtx, cancel := context.WithTimeout(context.Background(), c.reconnectDelay)
153
+		c.cancelDelay = cancel
154
+		c.Unlock()
155
+		<-delayCtx.Done()
156
+		if delayCtx.Err() != context.DeadlineExceeded {
157
+			return
158
+		}
159
+		c.Lock()
160
+		if c.node != nil {
161
+			c.Unlock()
162
+			return
163
+		}
164
+		var err error
165
+		_, ctx, err = c.startNewNode(false, c.listenAddr, c.getRemoteAddress(), "", "", false)
166
+		if err != nil {
167
+			c.err = err
168
+			ctx = delayCtx
169
+		}
170
+		c.Unlock()
171
+	}
172
+}
173
+
174
+func (c *Cluster) startNewNode(forceNewCluster bool, listenAddr, joinAddr, secret, cahash string, ismanager bool) (*swarmagent.Node, context.Context, error) {
175
+	if err := c.checkCompatibility(); err != nil {
176
+		return nil, nil, err
177
+	}
178
+	c.node = nil
179
+	c.cancelDelay = nil
180
+	node, err := swarmagent.NewNode(&swarmagent.NodeConfig{
181
+		Hostname:         c.config.Name,
182
+		ForceNewCluster:  forceNewCluster,
183
+		ListenControlAPI: filepath.Join(c.root, controlSocket),
184
+		ListenRemoteAPI:  listenAddr,
185
+		JoinAddr:         joinAddr,
186
+		StateDir:         c.root,
187
+		CAHash:           cahash,
188
+		Secret:           secret,
189
+		Executor:         container.NewExecutor(c.config.Backend),
190
+		HeartbeatTick:    1,
191
+		ElectionTick:     3,
192
+		IsManager:        ismanager,
193
+	})
194
+	if err != nil {
195
+		return nil, nil, err
196
+	}
197
+	ctx, cancel := context.WithCancel(context.Background())
198
+	if err := node.Start(ctx); err != nil {
199
+		return nil, nil, err
200
+	}
201
+
202
+	c.node = node
203
+	c.listenAddr = listenAddr
204
+	c.saveState()
205
+	c.config.Backend.SetClusterProvider(c)
206
+	go func() {
207
+		err := node.Err(ctx)
208
+		if err != nil {
209
+			logrus.Errorf("cluster exited with error: %v", err)
210
+		}
211
+		c.Lock()
212
+		c.conn = nil
213
+		c.client = nil
214
+		c.node = nil
215
+		c.ready = false
216
+		c.err = err
217
+		c.Unlock()
218
+		cancel()
219
+	}()
220
+
221
+	go func() {
222
+		select {
223
+		case <-node.Ready(context.Background()):
224
+			c.Lock()
225
+			c.reconnectDelay = initialReconnectDelay
226
+			c.Unlock()
227
+		case <-ctx.Done():
228
+		}
229
+		if ctx.Err() == nil {
230
+			c.Lock()
231
+			c.ready = true
232
+			c.err = nil
233
+			c.Unlock()
234
+		}
235
+		c.configEvent <- struct{}{}
236
+	}()
237
+
238
+	go func() {
239
+		for conn := range node.ListenControlSocket(ctx) {
240
+			c.Lock()
241
+			if c.conn != conn {
242
+				c.client = swarmapi.NewControlClient(conn)
243
+			}
244
+			if c.conn != nil {
245
+				c.client = nil
246
+			}
247
+			c.conn = conn
248
+			c.Unlock()
249
+			c.configEvent <- struct{}{}
250
+		}
251
+	}()
252
+
253
+	return node, ctx, nil
254
+}
255
+
256
+// Init initializes new cluster from user provided request.
257
+func (c *Cluster) Init(req types.InitRequest) (string, error) {
258
+	c.Lock()
259
+	if c.node != nil {
260
+		c.Unlock()
261
+		if !req.ForceNewCluster {
262
+			return "", ErrSwarmExists
263
+		}
264
+		ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
265
+		defer cancel()
266
+		if err := c.node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
267
+			return "", err
268
+		}
269
+		c.Lock()
270
+		c.node = nil
271
+		c.conn = nil
272
+		c.ready = false
273
+	}
274
+	// todo: check current state existing
275
+	n, ctx, err := c.startNewNode(req.ForceNewCluster, req.ListenAddr, "", "", "", false)
276
+	if err != nil {
277
+		c.Unlock()
278
+		return "", err
279
+	}
280
+	c.Unlock()
281
+
282
+	select {
283
+	case <-n.Ready(context.Background()):
284
+		if err := initAcceptancePolicy(n, req.Spec.AcceptancePolicy); err != nil {
285
+			return "", err
286
+		}
287
+		go c.reconnectOnFailure(ctx)
288
+		return n.NodeID(), nil
289
+	case <-ctx.Done():
290
+		c.RLock()
291
+		defer c.RUnlock()
292
+		if c.err != nil {
293
+			if !req.ForceNewCluster { // if failure on first attempt don't keep state
294
+				if err := c.clearState(); err != nil {
295
+					return "", err
296
+				}
297
+			}
298
+			return "", c.err
299
+		}
300
+		return "", ctx.Err()
301
+	}
302
+}
303
+
304
+// Join makes current Cluster part of an existing swarm cluster.
305
+func (c *Cluster) Join(req types.JoinRequest) error {
306
+	c.Lock()
307
+	if c.node != nil {
308
+		c.Unlock()
309
+		return ErrSwarmExists
310
+	}
311
+	// todo: check current state existing
312
+	if len(req.RemoteAddrs) == 0 {
313
+		return fmt.Errorf("at least 1 RemoteAddr is required to join")
314
+	}
315
+	n, ctx, err := c.startNewNode(false, req.ListenAddr, req.RemoteAddrs[0], req.Secret, req.CACertHash, req.Manager)
316
+	if err != nil {
317
+		c.Unlock()
318
+		return err
319
+	}
320
+	c.Unlock()
321
+
322
+	select {
323
+	case <-time.After(swarmConnectTimeout):
324
+		go c.reconnectOnFailure(ctx)
325
+		if nodeid := n.NodeID(); nodeid != "" {
326
+			return fmt.Errorf("Timeout reached before node was joined. Your cluster settings may be preventing this node from automatically joining. To accept this node into cluster run `docker node accept %v` in an existing cluster manager", nodeid)
327
+		}
328
+		return ErrSwarmJoinTimeoutReached
329
+	case <-n.Ready(context.Background()):
330
+		go c.reconnectOnFailure(ctx)
331
+		return nil
332
+	case <-ctx.Done():
333
+		c.RLock()
334
+		defer c.RUnlock()
335
+		if c.err != nil {
336
+			return c.err
337
+		}
338
+		return ctx.Err()
339
+	}
340
+}
341
+
342
+func (c *Cluster) cancelReconnect() {
343
+	c.stop = true
344
+	if c.cancelDelay != nil {
345
+		c.cancelDelay()
346
+		c.cancelDelay = nil
347
+	}
348
+}
349
+
350
+// Leave shuts down Cluster and removes current state.
351
+func (c *Cluster) Leave(force bool) error {
352
+	c.Lock()
353
+	node := c.node
354
+	if node == nil {
355
+		c.Unlock()
356
+		return ErrNoSwarm
357
+	}
358
+
359
+	if node.Manager() != nil && !force {
360
+		msg := "You are attempting to leave cluster on a node that is participating as a manager. "
361
+		if c.isActiveManager() {
362
+			active, reachable, unreachable, err := c.managerStats()
363
+			if err == nil {
364
+				if active && reachable-2 <= unreachable {
365
+					if reachable == 1 && unreachable == 0 {
366
+						msg += "Leaving last manager will remove all current state of the cluster. Use `--force` to ignore this message. "
367
+						c.Unlock()
368
+						return fmt.Errorf(msg)
369
+					}
370
+					msg += fmt.Sprintf("Leaving cluster will leave you with  %v managers out of %v. This means Raft quorum will be lost and your cluster will become inaccessible. ", reachable-1, reachable+unreachable)
371
+				}
372
+			}
373
+		} else {
374
+			msg += "Doing so may lose the consenus of your cluster. "
375
+		}
376
+
377
+		msg += "Only way to restore a cluster that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to ignore this message."
378
+		c.Unlock()
379
+		return fmt.Errorf(msg)
380
+	}
381
+	c.cancelReconnect()
382
+	c.Unlock()
383
+
384
+	ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
385
+	defer cancel()
386
+	if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
387
+		return err
388
+	}
389
+	nodeID := node.NodeID()
390
+	for _, id := range c.config.Backend.ListContainersForNode(nodeID) {
391
+		if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil {
392
+			logrus.Errorf("error removing %v: %v", id, err)
393
+		}
394
+	}
395
+	c.Lock()
396
+	defer c.Unlock()
397
+	c.node = nil
398
+	c.conn = nil
399
+	c.ready = false
400
+	c.configEvent <- struct{}{}
401
+	// todo: cleanup optional?
402
+	if err := c.clearState(); err != nil {
403
+		return err
404
+	}
405
+	return nil
406
+}
407
+
408
+func (c *Cluster) clearState() error {
409
+	if err := os.RemoveAll(c.root); err != nil {
410
+		return err
411
+	}
412
+	if err := os.MkdirAll(c.root, 0700); err != nil {
413
+		return err
414
+	}
415
+	c.config.Backend.SetClusterProvider(nil)
416
+	return nil
417
+}
418
+
419
+func (c *Cluster) getRequestContext() context.Context { // TODO: not needed when requests don't block on qourum lost
420
+	ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
421
+	return ctx
422
+}
423
+
424
+// Inspect retrives the confuguration properties of managed swarm cluster.
425
+func (c *Cluster) Inspect() (types.Swarm, error) {
426
+	c.RLock()
427
+	defer c.RUnlock()
428
+
429
+	if !c.isActiveManager() {
430
+		return types.Swarm{}, ErrNoManager
431
+	}
432
+
433
+	swarm, err := getSwarm(c.getRequestContext(), c.client)
434
+	if err != nil {
435
+		return types.Swarm{}, err
436
+	}
437
+
438
+	if err != nil {
439
+		return types.Swarm{}, err
440
+	}
441
+
442
+	return convert.SwarmFromGRPC(*swarm), nil
443
+}
444
+
445
+// Update updates configuration of a managed swarm cluster.
446
+func (c *Cluster) Update(version uint64, spec types.Spec) error {
447
+	c.RLock()
448
+	defer c.RUnlock()
449
+
450
+	if !c.isActiveManager() {
451
+		return ErrNoManager
452
+	}
453
+
454
+	swarmSpec, err := convert.SwarmSpecToGRPC(spec)
455
+	if err != nil {
456
+		return err
457
+	}
458
+
459
+	swarm, err := getSwarm(c.getRequestContext(), c.client)
460
+	if err != nil {
461
+		return err
462
+	}
463
+
464
+	_, err = c.client.UpdateCluster(
465
+		c.getRequestContext(),
466
+		&swarmapi.UpdateClusterRequest{
467
+			ClusterID: swarm.ID,
468
+			Spec:      &swarmSpec,
469
+			ClusterVersion: &swarmapi.Version{
470
+				Index: version,
471
+			},
472
+		},
473
+	)
474
+	return err
475
+}
476
+
477
+// IsManager returns true is Cluster is participating as a manager.
478
+func (c *Cluster) IsManager() bool {
479
+	c.RLock()
480
+	defer c.RUnlock()
481
+	return c.isActiveManager()
482
+}
483
+
484
+// IsAgent returns true is Cluster is participating as a worker/agent.
485
+func (c *Cluster) IsAgent() bool {
486
+	c.RLock()
487
+	defer c.RUnlock()
488
+	return c.ready
489
+}
490
+
491
+// GetListenAddress returns the listening address for current maanger's
492
+// consensus and dispatcher APIs.
493
+func (c *Cluster) GetListenAddress() string {
494
+	c.RLock()
495
+	defer c.RUnlock()
496
+	if c.conn != nil {
497
+		return c.listenAddr
498
+	}
499
+	return ""
500
+}
501
+
502
+// GetRemoteAddress returns a known advertise address of a remote maanger if
503
+// available.
504
+// todo: change to array/connect with info
505
+func (c *Cluster) GetRemoteAddress() string {
506
+	c.RLock()
507
+	defer c.RUnlock()
508
+	return c.getRemoteAddress()
509
+}
510
+
511
+func (c *Cluster) getRemoteAddress() string {
512
+	if c.node == nil {
513
+		return ""
514
+	}
515
+	nodeID := c.node.NodeID()
516
+	for _, r := range c.node.Remotes() {
517
+		if r.NodeID != nodeID {
518
+			return r.Addr
519
+		}
520
+	}
521
+	return ""
522
+}
523
+
524
+// ListenClusterEvents returns a channel that receives messages on cluster
525
+// participation changes.
526
+// todo: make cancelable and accessible to multiple callers
527
+func (c *Cluster) ListenClusterEvents() <-chan struct{} {
528
+	return c.configEvent
529
+}
530
+
531
+// Info returns information about the current cluster state.
532
+func (c *Cluster) Info() types.Info {
533
+	var info types.Info
534
+	c.RLock()
535
+	defer c.RUnlock()
536
+
537
+	if c.node == nil {
538
+		info.LocalNodeState = types.LocalNodeStateInactive
539
+		if c.cancelDelay != nil {
540
+			info.LocalNodeState = types.LocalNodeStateError
541
+		}
542
+	} else {
543
+		info.LocalNodeState = types.LocalNodeStatePending
544
+		if c.ready == true {
545
+			info.LocalNodeState = types.LocalNodeStateActive
546
+		}
547
+	}
548
+	if c.err != nil {
549
+		info.Error = c.err.Error()
550
+	}
551
+
552
+	if c.isActiveManager() {
553
+		info.ControlAvailable = true
554
+		if r, err := c.client.ListNodes(c.getRequestContext(), &swarmapi.ListNodesRequest{}); err == nil {
555
+			info.Nodes = len(r.Nodes)
556
+			for _, n := range r.Nodes {
557
+				if n.ManagerStatus != nil {
558
+					info.Managers = info.Managers + 1
559
+				}
560
+			}
561
+		}
562
+
563
+		if swarm, err := getSwarm(c.getRequestContext(), c.client); err == nil && swarm != nil {
564
+			info.CACertHash = swarm.RootCA.CACertHash
565
+		}
566
+	}
567
+
568
+	if c.node != nil {
569
+		for _, r := range c.node.Remotes() {
570
+			info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr})
571
+		}
572
+		info.NodeID = c.node.NodeID()
573
+	}
574
+
575
+	return info
576
+}
577
+
578
+// isActiveManager should not be called without a read lock
579
+func (c *Cluster) isActiveManager() bool {
580
+	return c.conn != nil
581
+}
582
+
583
+// GetServices returns all services of a managed swarm cluster.
584
+func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) {
585
+	c.RLock()
586
+	defer c.RUnlock()
587
+
588
+	if !c.isActiveManager() {
589
+		return nil, ErrNoManager
590
+	}
591
+
592
+	filters, err := newListServicesFilters(options.Filter)
593
+	if err != nil {
594
+		return nil, err
595
+	}
596
+	r, err := c.client.ListServices(
597
+		c.getRequestContext(),
598
+		&swarmapi.ListServicesRequest{Filters: filters})
599
+	if err != nil {
600
+		return nil, err
601
+	}
602
+
603
+	var services []types.Service
604
+
605
+	for _, service := range r.Services {
606
+		services = append(services, convert.ServiceFromGRPC(*service))
607
+	}
608
+
609
+	return services, nil
610
+}
611
+
612
+// CreateService creates a new service in a managed swarm cluster.
613
+func (c *Cluster) CreateService(s types.ServiceSpec) (string, error) {
614
+	c.RLock()
615
+	defer c.RUnlock()
616
+
617
+	if !c.isActiveManager() {
618
+		return "", ErrNoManager
619
+	}
620
+
621
+	ctx := c.getRequestContext()
622
+
623
+	err := populateNetworkID(ctx, c.client, &s)
624
+	if err != nil {
625
+		return "", err
626
+	}
627
+
628
+	serviceSpec, err := convert.ServiceSpecToGRPC(s)
629
+	if err != nil {
630
+		return "", err
631
+	}
632
+	r, err := c.client.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
633
+	if err != nil {
634
+		return "", err
635
+	}
636
+
637
+	return r.Service.ID, nil
638
+}
639
+
640
+// GetService returns a service based on a ID or name.
641
+func (c *Cluster) GetService(input string) (types.Service, error) {
642
+	c.RLock()
643
+	defer c.RUnlock()
644
+
645
+	if !c.isActiveManager() {
646
+		return types.Service{}, ErrNoManager
647
+	}
648
+
649
+	service, err := getService(c.getRequestContext(), c.client, input)
650
+	if err != nil {
651
+		return types.Service{}, err
652
+	}
653
+	return convert.ServiceFromGRPC(*service), nil
654
+}
655
+
656
+// UpdateService updates existing service to match new properties.
657
+func (c *Cluster) UpdateService(serviceID string, version uint64, spec types.ServiceSpec) error {
658
+	c.RLock()
659
+	defer c.RUnlock()
660
+
661
+	if !c.isActiveManager() {
662
+		return ErrNoManager
663
+	}
664
+
665
+	serviceSpec, err := convert.ServiceSpecToGRPC(spec)
666
+	if err != nil {
667
+		return err
668
+	}
669
+
670
+	_, err = c.client.UpdateService(
671
+		c.getRequestContext(),
672
+		&swarmapi.UpdateServiceRequest{
673
+			ServiceID: serviceID,
674
+			Spec:      &serviceSpec,
675
+			ServiceVersion: &swarmapi.Version{
676
+				Index: version,
677
+			},
678
+		},
679
+	)
680
+	return err
681
+}
682
+
683
+// RemoveService removes a service from a managed swarm cluster.
684
+func (c *Cluster) RemoveService(input string) error {
685
+	c.RLock()
686
+	defer c.RUnlock()
687
+
688
+	if !c.isActiveManager() {
689
+		return ErrNoManager
690
+	}
691
+
692
+	service, err := getService(c.getRequestContext(), c.client, input)
693
+	if err != nil {
694
+		return err
695
+	}
696
+
697
+	if _, err := c.client.RemoveService(c.getRequestContext(), &swarmapi.RemoveServiceRequest{ServiceID: service.ID}); err != nil {
698
+		return err
699
+	}
700
+	return nil
701
+}
702
+
703
+// GetNodes returns a list of all nodes known to a cluster.
704
+func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) {
705
+	c.RLock()
706
+	defer c.RUnlock()
707
+
708
+	if !c.isActiveManager() {
709
+		return nil, ErrNoManager
710
+	}
711
+
712
+	filters, err := newListNodesFilters(options.Filter)
713
+	if err != nil {
714
+		return nil, err
715
+	}
716
+	r, err := c.client.ListNodes(
717
+		c.getRequestContext(),
718
+		&swarmapi.ListNodesRequest{Filters: filters})
719
+	if err != nil {
720
+		return nil, err
721
+	}
722
+
723
+	nodes := []types.Node{}
724
+
725
+	for _, node := range r.Nodes {
726
+		nodes = append(nodes, convert.NodeFromGRPC(*node))
727
+	}
728
+	return nodes, nil
729
+}
730
+
731
+// GetNode returns a node based on a ID or name.
732
+func (c *Cluster) GetNode(input string) (types.Node, error) {
733
+	c.RLock()
734
+	defer c.RUnlock()
735
+
736
+	if !c.isActiveManager() {
737
+		return types.Node{}, ErrNoManager
738
+	}
739
+
740
+	node, err := getNode(c.getRequestContext(), c.client, input)
741
+	if err != nil {
742
+		return types.Node{}, err
743
+	}
744
+	return convert.NodeFromGRPC(*node), nil
745
+}
746
+
747
+// UpdateNode updates existing nodes properties.
748
+func (c *Cluster) UpdateNode(nodeID string, version uint64, spec types.NodeSpec) error {
749
+	c.RLock()
750
+	defer c.RUnlock()
751
+
752
+	if !c.isActiveManager() {
753
+		return ErrNoManager
754
+	}
755
+
756
+	nodeSpec, err := convert.NodeSpecToGRPC(spec)
757
+	if err != nil {
758
+		return err
759
+	}
760
+
761
+	_, err = c.client.UpdateNode(
762
+		c.getRequestContext(),
763
+		&swarmapi.UpdateNodeRequest{
764
+			NodeID: nodeID,
765
+			Spec:   &nodeSpec,
766
+			NodeVersion: &swarmapi.Version{
767
+				Index: version,
768
+			},
769
+		},
770
+	)
771
+	return err
772
+}
773
+
774
+// RemoveNode removes a node from a cluster
775
+func (c *Cluster) RemoveNode(input string) error {
776
+	c.RLock()
777
+	defer c.RUnlock()
778
+
779
+	if !c.isActiveManager() {
780
+		return ErrNoManager
781
+	}
782
+
783
+	ctx := c.getRequestContext()
784
+
785
+	node, err := getNode(ctx, c.client, input)
786
+	if err != nil {
787
+		return err
788
+	}
789
+
790
+	if _, err := c.client.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID}); err != nil {
791
+		return err
792
+	}
793
+	return nil
794
+}
795
+
796
+// GetTasks returns a list of tasks matching the filter options.
797
+func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) {
798
+	c.RLock()
799
+	defer c.RUnlock()
800
+
801
+	if !c.isActiveManager() {
802
+		return nil, ErrNoManager
803
+	}
804
+
805
+	filters, err := newListTasksFilters(options.Filter)
806
+	if err != nil {
807
+		return nil, err
808
+	}
809
+	r, err := c.client.ListTasks(
810
+		c.getRequestContext(),
811
+		&swarmapi.ListTasksRequest{Filters: filters})
812
+	if err != nil {
813
+		return nil, err
814
+	}
815
+
816
+	tasks := []types.Task{}
817
+
818
+	for _, task := range r.Tasks {
819
+		tasks = append(tasks, convert.TaskFromGRPC(*task))
820
+	}
821
+	return tasks, nil
822
+}
823
+
824
+// GetTask returns a task by an ID.
825
+func (c *Cluster) GetTask(input string) (types.Task, error) {
826
+	c.RLock()
827
+	defer c.RUnlock()
828
+
829
+	if !c.isActiveManager() {
830
+		return types.Task{}, ErrNoManager
831
+	}
832
+
833
+	task, err := getTask(c.getRequestContext(), c.client, input)
834
+	if err != nil {
835
+		return types.Task{}, err
836
+	}
837
+	return convert.TaskFromGRPC(*task), nil
838
+}
839
+
840
+// GetNetwork returns a cluster network by ID.
841
+func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) {
842
+	c.RLock()
843
+	defer c.RUnlock()
844
+
845
+	if !c.isActiveManager() {
846
+		return apitypes.NetworkResource{}, ErrNoManager
847
+	}
848
+
849
+	network, err := getNetwork(c.getRequestContext(), c.client, input)
850
+	if err != nil {
851
+		return apitypes.NetworkResource{}, err
852
+	}
853
+	return convert.BasicNetworkFromGRPC(*network), nil
854
+}
855
+
856
+// GetNetworks returns all current cluster managed networks.
857
+func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) {
858
+	c.RLock()
859
+	defer c.RUnlock()
860
+
861
+	if !c.isActiveManager() {
862
+		return nil, ErrNoManager
863
+	}
864
+
865
+	r, err := c.client.ListNetworks(c.getRequestContext(), &swarmapi.ListNetworksRequest{})
866
+	if err != nil {
867
+		return nil, err
868
+	}
869
+
870
+	var networks []apitypes.NetworkResource
871
+
872
+	for _, network := range r.Networks {
873
+		networks = append(networks, convert.BasicNetworkFromGRPC(*network))
874
+	}
875
+
876
+	return networks, nil
877
+}
878
+
879
+// CreateNetwork creates a new cluster managed network.
880
+func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) {
881
+	c.RLock()
882
+	defer c.RUnlock()
883
+
884
+	if !c.isActiveManager() {
885
+		return "", ErrNoManager
886
+	}
887
+
888
+	if runconfig.IsPreDefinedNetwork(s.Name) {
889
+		err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name)
890
+		return "", errors.NewRequestForbiddenError(err)
891
+	}
892
+
893
+	networkSpec := convert.BasicNetworkCreateToGRPC(s)
894
+	r, err := c.client.CreateNetwork(c.getRequestContext(), &swarmapi.CreateNetworkRequest{Spec: &networkSpec})
895
+	if err != nil {
896
+		return "", err
897
+	}
898
+
899
+	return r.Network.ID, nil
900
+}
901
+
902
+// RemoveNetwork removes a cluster network.
903
+func (c *Cluster) RemoveNetwork(input string) error {
904
+	c.RLock()
905
+	defer c.RUnlock()
906
+
907
+	if !c.isActiveManager() {
908
+		return ErrNoManager
909
+	}
910
+
911
+	network, err := getNetwork(c.getRequestContext(), c.client, input)
912
+	if err != nil {
913
+		return err
914
+	}
915
+
916
+	if _, err := c.client.RemoveNetwork(c.getRequestContext(), &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}); err != nil {
917
+		return err
918
+	}
919
+	return nil
920
+}
921
+
922
+func populateNetworkID(ctx context.Context, c swarmapi.ControlClient, s *types.ServiceSpec) error {
923
+	for i, n := range s.Networks {
924
+		apiNetwork, err := getNetwork(ctx, c, n.Target)
925
+		if err != nil {
926
+			return err
927
+		}
928
+		s.Networks[i] = types.NetworkAttachmentConfig{Target: apiNetwork.ID}
929
+	}
930
+	return nil
931
+}
932
+
933
+func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) {
934
+	// GetNetwork to match via full ID.
935
+	rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input})
936
+	if err != nil {
937
+		// If any error (including NotFound), ListNetworks to match via ID prefix and full name.
938
+		rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{Names: []string{input}}})
939
+		if err != nil || len(rl.Networks) == 0 {
940
+			rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{IDPrefixes: []string{input}}})
941
+		}
942
+
943
+		if err != nil {
944
+			return nil, err
945
+		}
946
+
947
+		if len(rl.Networks) == 0 {
948
+			return nil, fmt.Errorf("network %s not found", input)
949
+		}
950
+
951
+		if l := len(rl.Networks); l > 1 {
952
+			return nil, fmt.Errorf("network %s is ambigious (%d matches found)", input, l)
953
+		}
954
+
955
+		return rl.Networks[0], nil
956
+	}
957
+	return rg.Network, nil
958
+}
959
+
960
+// Cleanup stops active swarm node. This is run before daemon shutdown.
961
+func (c *Cluster) Cleanup() {
962
+	c.Lock()
963
+	node := c.node
964
+	if node == nil {
965
+		c.Unlock()
966
+		return
967
+	}
968
+
969
+	if c.isActiveManager() {
970
+		active, reachable, unreachable, err := c.managerStats()
971
+		if err == nil {
972
+			singlenode := active && reachable == 1 && unreachable == 0
973
+			if active && !singlenode && reachable-2 <= unreachable {
974
+				logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable)
975
+			}
976
+		}
977
+	}
978
+	c.cancelReconnect()
979
+	c.Unlock()
980
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
981
+	defer cancel()
982
+	if err := node.Stop(ctx); err != nil {
983
+		logrus.Errorf("error cleaning up cluster: %v", err)
984
+	}
985
+	c.Lock()
986
+	c.node = nil
987
+	c.ready = false
988
+	c.conn = nil
989
+	c.Unlock()
990
+}
991
+
992
+func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) {
993
+	ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
994
+	nodes, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{})
995
+	if err != nil {
996
+		return false, 0, 0, err
997
+	}
998
+	for _, n := range nodes.Nodes {
999
+		if n.ManagerStatus != nil {
1000
+			if n.ManagerStatus.Raft.Status.Reachability == swarmapi.RaftMemberStatus_REACHABLE {
1001
+				reachable++
1002
+				if n.ID == c.node.NodeID() {
1003
+					current = true
1004
+				}
1005
+			}
1006
+			if n.ManagerStatus.Raft.Status.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE {
1007
+				unreachable++
1008
+			}
1009
+		}
1010
+	}
1011
+	return
1012
+}
1013
+
1014
+func initAcceptancePolicy(node *swarmagent.Node, acceptancePolicy types.AcceptancePolicy) error {
1015
+	ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
1016
+	for conn := range node.ListenControlSocket(ctx) {
1017
+		if ctx.Err() != nil {
1018
+			return ctx.Err()
1019
+		}
1020
+		if conn != nil {
1021
+			client := swarmapi.NewControlClient(conn)
1022
+			var cluster *swarmapi.Cluster
1023
+			for i := 0; ; i++ {
1024
+				lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{})
1025
+				if err != nil {
1026
+					return fmt.Errorf("error on listing clusters: %v", err)
1027
+				}
1028
+				if len(lcr.Clusters) == 0 {
1029
+					if i < 10 {
1030
+						time.Sleep(200 * time.Millisecond)
1031
+						continue
1032
+					}
1033
+					return fmt.Errorf("empty list of clusters was returned")
1034
+				}
1035
+				cluster = lcr.Clusters[0]
1036
+				break
1037
+			}
1038
+			spec := &cluster.Spec
1039
+
1040
+			if err := convert.SwarmSpecUpdateAcceptancePolicy(spec, acceptancePolicy); err != nil {
1041
+				return fmt.Errorf("error updating cluster settings: %v", err)
1042
+			}
1043
+			_, err := client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{
1044
+				ClusterID:      cluster.ID,
1045
+				ClusterVersion: &cluster.Meta.Version,
1046
+				Spec:           spec,
1047
+			})
1048
+			if err != nil {
1049
+				return fmt.Errorf("error updating cluster settings: %v", err)
1050
+			}
1051
+			return nil
1052
+		}
1053
+	}
1054
+	return ctx.Err()
1055
+}
0 1056
new file mode 100644
... ...
@@ -0,0 +1,116 @@
0
+package convert
1
+
2
+import (
3
+	"fmt"
4
+	"strings"
5
+
6
+	types "github.com/docker/engine-api/types/swarm"
7
+	swarmapi "github.com/docker/swarmkit/api"
8
+	"github.com/docker/swarmkit/protobuf/ptypes"
9
+)
10
+
11
+func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec {
12
+	containerSpec := types.ContainerSpec{
13
+		Image:   c.Image,
14
+		Labels:  c.Labels,
15
+		Command: c.Command,
16
+		Args:    c.Args,
17
+		Env:     c.Env,
18
+		Dir:     c.Dir,
19
+		User:    c.User,
20
+	}
21
+
22
+	// Mounts
23
+	for _, m := range c.Mounts {
24
+		mount := types.Mount{
25
+			Target:   m.Target,
26
+			Source:   m.Source,
27
+			Type:     types.MountType(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])),
28
+			Writable: m.Writable,
29
+		}
30
+
31
+		if m.BindOptions != nil {
32
+			mount.BindOptions = &types.BindOptions{
33
+				Propagation: types.MountPropagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])),
34
+			}
35
+		}
36
+
37
+		if m.VolumeOptions != nil {
38
+			mount.VolumeOptions = &types.VolumeOptions{
39
+				Populate: m.VolumeOptions.Populate,
40
+				Labels:   m.VolumeOptions.Labels,
41
+			}
42
+			if m.VolumeOptions.DriverConfig != nil {
43
+				mount.VolumeOptions.DriverConfig = &types.Driver{
44
+					Name:    m.VolumeOptions.DriverConfig.Name,
45
+					Options: m.VolumeOptions.DriverConfig.Options,
46
+				}
47
+			}
48
+		}
49
+		containerSpec.Mounts = append(containerSpec.Mounts, mount)
50
+	}
51
+
52
+	if c.StopGracePeriod != nil {
53
+		grace, _ := ptypes.Duration(c.StopGracePeriod)
54
+		containerSpec.StopGracePeriod = &grace
55
+	}
56
+	return containerSpec
57
+}
58
+
59
+func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
60
+	containerSpec := &swarmapi.ContainerSpec{
61
+		Image:   c.Image,
62
+		Labels:  c.Labels,
63
+		Command: c.Command,
64
+		Args:    c.Args,
65
+		Env:     c.Env,
66
+		Dir:     c.Dir,
67
+		User:    c.User,
68
+	}
69
+
70
+	if c.StopGracePeriod != nil {
71
+		containerSpec.StopGracePeriod = ptypes.DurationProto(*c.StopGracePeriod)
72
+	}
73
+
74
+	// Mounts
75
+	for _, m := range c.Mounts {
76
+		mount := swarmapi.Mount{
77
+			Target:   m.Target,
78
+			Source:   m.Source,
79
+			Writable: m.Writable,
80
+		}
81
+
82
+		if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok {
83
+			mount.Type = swarmapi.Mount_MountType(mountType)
84
+		} else if string(m.Type) != "" {
85
+			return nil, fmt.Errorf("invalid MountType: %q", m.Type)
86
+		}
87
+
88
+		if m.BindOptions != nil {
89
+			if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok {
90
+				mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)}
91
+			} else if string(m.BindOptions.Propagation) != "" {
92
+				return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation)
93
+
94
+			}
95
+
96
+		}
97
+
98
+		if m.VolumeOptions != nil {
99
+			mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{
100
+				Populate: m.VolumeOptions.Populate,
101
+				Labels:   m.VolumeOptions.Labels,
102
+			}
103
+			if m.VolumeOptions.DriverConfig != nil {
104
+				mount.VolumeOptions.DriverConfig = &swarmapi.Driver{
105
+					Name:    m.VolumeOptions.DriverConfig.Name,
106
+					Options: m.VolumeOptions.DriverConfig.Options,
107
+				}
108
+			}
109
+		}
110
+
111
+		containerSpec.Mounts = append(containerSpec.Mounts, mount)
112
+	}
113
+
114
+	return containerSpec, nil
115
+}
0 116
new file mode 100644
... ...
@@ -0,0 +1,194 @@
0
+package convert
1
+
2
+import (
3
+	"strings"
4
+
5
+	basictypes "github.com/docker/engine-api/types"
6
+	networktypes "github.com/docker/engine-api/types/network"
7
+	types "github.com/docker/engine-api/types/swarm"
8
+	swarmapi "github.com/docker/swarmkit/api"
9
+	"github.com/docker/swarmkit/protobuf/ptypes"
10
+)
11
+
12
+func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment {
13
+	if na != nil {
14
+		return types.NetworkAttachment{
15
+			Network:   networkFromGRPC(na.Network),
16
+			Addresses: na.Addresses,
17
+		}
18
+	}
19
+	return types.NetworkAttachment{}
20
+}
21
+
22
+func networkFromGRPC(n *swarmapi.Network) types.Network {
23
+	if n != nil {
24
+		network := types.Network{
25
+			ID: n.ID,
26
+			Spec: types.NetworkSpec{
27
+				IPv6Enabled: n.Spec.Ipv6Enabled,
28
+				Internal:    n.Spec.Internal,
29
+				IPAMOptions: ipamFromGRPC(n.Spec.IPAM),
30
+			},
31
+			IPAMOptions: ipamFromGRPC(n.IPAM),
32
+		}
33
+
34
+		// Meta
35
+		network.Version.Index = n.Meta.Version.Index
36
+		network.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt)
37
+		network.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt)
38
+
39
+		//Annotations
40
+		network.Spec.Name = n.Spec.Annotations.Name
41
+		network.Spec.Labels = n.Spec.Annotations.Labels
42
+
43
+		//DriverConfiguration
44
+		if n.Spec.DriverConfig != nil {
45
+			network.Spec.DriverConfiguration = &types.Driver{
46
+				Name:    n.Spec.DriverConfig.Name,
47
+				Options: n.Spec.DriverConfig.Options,
48
+			}
49
+		}
50
+
51
+		//DriverState
52
+		if n.DriverState != nil {
53
+			network.DriverState = types.Driver{
54
+				Name:    n.DriverState.Name,
55
+				Options: n.DriverState.Options,
56
+			}
57
+		}
58
+
59
+		return network
60
+	}
61
+	return types.Network{}
62
+}
63
+
64
+func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions {
65
+	var ipam *types.IPAMOptions
66
+	if i != nil {
67
+		ipam = &types.IPAMOptions{}
68
+		if i.Driver != nil {
69
+			ipam.Driver.Name = i.Driver.Name
70
+			ipam.Driver.Options = i.Driver.Options
71
+		}
72
+
73
+		for _, config := range i.Configs {
74
+			ipam.Configs = append(ipam.Configs, types.IPAMConfig{
75
+				Subnet:  config.Subnet,
76
+				Range:   config.Range,
77
+				Gateway: config.Gateway,
78
+			})
79
+		}
80
+	}
81
+	return ipam
82
+}
83
+
84
+func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec {
85
+	var endpointSpec *types.EndpointSpec
86
+	if es != nil {
87
+		endpointSpec = &types.EndpointSpec{}
88
+		endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String()))
89
+
90
+		for _, portState := range es.Ports {
91
+			endpointSpec.Ports = append(endpointSpec.Ports, types.PortConfig{
92
+				Name:          portState.Name,
93
+				Protocol:      types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])),
94
+				TargetPort:    portState.TargetPort,
95
+				PublishedPort: portState.PublishedPort,
96
+			})
97
+		}
98
+	}
99
+	return endpointSpec
100
+}
101
+
102
+func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint {
103
+	endpoint := types.Endpoint{}
104
+	if e != nil {
105
+		if espec := endpointSpecFromGRPC(e.Spec); espec != nil {
106
+			endpoint.Spec = *espec
107
+		}
108
+
109
+		for _, portState := range e.Ports {
110
+			endpoint.Ports = append(endpoint.Ports, types.PortConfig{
111
+				Name:          portState.Name,
112
+				Protocol:      types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])),
113
+				TargetPort:    portState.TargetPort,
114
+				PublishedPort: portState.PublishedPort,
115
+			})
116
+		}
117
+
118
+		for _, v := range e.VirtualIPs {
119
+			endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{
120
+				NetworkID: v.NetworkID,
121
+				Addr:      v.Addr})
122
+		}
123
+
124
+	}
125
+
126
+	return endpoint
127
+}
128
+
129
+// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource.
130
+func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource {
131
+	spec := n.Spec
132
+	var ipam networktypes.IPAM
133
+	if spec.IPAM != nil {
134
+		if spec.IPAM.Driver != nil {
135
+			ipam.Driver = spec.IPAM.Driver.Name
136
+			ipam.Options = spec.IPAM.Driver.Options
137
+		}
138
+		ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs))
139
+		for _, ic := range spec.IPAM.Configs {
140
+			ipamConfig := networktypes.IPAMConfig{
141
+				Subnet:     ic.Subnet,
142
+				IPRange:    ic.Range,
143
+				Gateway:    ic.Gateway,
144
+				AuxAddress: ic.Reserved,
145
+			}
146
+			ipam.Config = append(ipam.Config, ipamConfig)
147
+		}
148
+	}
149
+
150
+	return basictypes.NetworkResource{
151
+		ID:         n.ID,
152
+		Name:       n.Spec.Annotations.Name,
153
+		Scope:      "swarm",
154
+		Driver:     n.DriverState.Name,
155
+		EnableIPv6: spec.Ipv6Enabled,
156
+		IPAM:       ipam,
157
+		Internal:   spec.Internal,
158
+		Options:    n.DriverState.Options,
159
+		Labels:     n.Spec.Annotations.Labels,
160
+	}
161
+}
162
+
163
+// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec.
164
+func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec {
165
+	ns := swarmapi.NetworkSpec{
166
+		Annotations: swarmapi.Annotations{
167
+			Name:   create.Name,
168
+			Labels: create.Labels,
169
+		},
170
+		DriverConfig: &swarmapi.Driver{
171
+			Name:    create.Driver,
172
+			Options: create.Options,
173
+		},
174
+		Ipv6Enabled: create.EnableIPv6,
175
+		Internal:    create.Internal,
176
+		IPAM: &swarmapi.IPAMOptions{
177
+			Driver: &swarmapi.Driver{
178
+				Name:    create.IPAM.Driver,
179
+				Options: create.IPAM.Options,
180
+			},
181
+		},
182
+	}
183
+	ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config))
184
+	for _, ipamConfig := range create.IPAM.Config {
185
+		ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{
186
+			Subnet:  ipamConfig.Subnet,
187
+			Range:   ipamConfig.IPRange,
188
+			Gateway: ipamConfig.Gateway,
189
+		})
190
+	}
191
+	ns.IPAM.Configs = ipamSpec
192
+	return ns
193
+}
0 194
new file mode 100644
... ...
@@ -0,0 +1,95 @@
0
+package convert
1
+
2
+import (
3
+	"fmt"
4
+	"strings"
5
+
6
+	types "github.com/docker/engine-api/types/swarm"
7
+	swarmapi "github.com/docker/swarmkit/api"
8
+	"github.com/docker/swarmkit/protobuf/ptypes"
9
+)
10
+
11
+// NodeFromGRPC converts a grpc Node to a Node.
12
+func NodeFromGRPC(n swarmapi.Node) types.Node {
13
+	node := types.Node{
14
+		ID: n.ID,
15
+		Spec: types.NodeSpec{
16
+			Role:         types.NodeRole(strings.ToLower(n.Spec.Role.String())),
17
+			Membership:   types.NodeMembership(strings.ToLower(n.Spec.Membership.String())),
18
+			Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())),
19
+		},
20
+		Status: types.NodeStatus{
21
+			State:   types.NodeState(strings.ToLower(n.Status.State.String())),
22
+			Message: n.Status.Message,
23
+		},
24
+	}
25
+
26
+	// Meta
27
+	node.Version.Index = n.Meta.Version.Index
28
+	node.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt)
29
+	node.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt)
30
+
31
+	//Annotations
32
+	node.Spec.Name = n.Spec.Annotations.Name
33
+	node.Spec.Labels = n.Spec.Annotations.Labels
34
+
35
+	//Description
36
+	if n.Description != nil {
37
+		node.Description.Hostname = n.Description.Hostname
38
+		if n.Description.Platform != nil {
39
+			node.Description.Platform.Architecture = n.Description.Platform.Architecture
40
+			node.Description.Platform.OS = n.Description.Platform.OS
41
+		}
42
+		if n.Description.Resources != nil {
43
+			node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs
44
+			node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes
45
+		}
46
+		if n.Description.Engine != nil {
47
+			node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion
48
+			node.Description.Engine.Labels = n.Description.Engine.Labels
49
+			for _, plugin := range n.Description.Engine.Plugins {
50
+				node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name})
51
+			}
52
+		}
53
+	}
54
+
55
+	//Manager
56
+	if n.ManagerStatus != nil {
57
+		node.ManagerStatus = &types.ManagerStatus{
58
+			Leader:       n.ManagerStatus.Raft.Status.Leader,
59
+			Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Raft.Status.Reachability.String())),
60
+			Addr:         n.ManagerStatus.Raft.Addr,
61
+		}
62
+	}
63
+
64
+	return node
65
+}
66
+
67
+// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec.
68
+func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) {
69
+	spec := swarmapi.NodeSpec{
70
+		Annotations: swarmapi.Annotations{
71
+			Name:   s.Name,
72
+			Labels: s.Labels,
73
+		},
74
+	}
75
+	if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok {
76
+		spec.Role = swarmapi.NodeRole(role)
77
+	} else {
78
+		return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role)
79
+	}
80
+
81
+	if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(string(s.Membership))]; ok {
82
+		spec.Membership = swarmapi.NodeSpec_Membership(membership)
83
+	} else {
84
+		return swarmapi.NodeSpec{}, fmt.Errorf("invalid Membership: %q", s.Membership)
85
+	}
86
+
87
+	if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok {
88
+		spec.Availability = swarmapi.NodeSpec_Availability(availability)
89
+	} else {
90
+		return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability)
91
+	}
92
+
93
+	return spec, nil
94
+}
0 95
new file mode 100644
... ...
@@ -0,0 +1,252 @@
0
+package convert
1
+
2
+import (
3
+	"fmt"
4
+	"strings"
5
+
6
+	"github.com/docker/docker/pkg/namesgenerator"
7
+	types "github.com/docker/engine-api/types/swarm"
8
+	swarmapi "github.com/docker/swarmkit/api"
9
+	"github.com/docker/swarmkit/protobuf/ptypes"
10
+)
11
+
12
+// ServiceFromGRPC converts a grpc Service to a Service.
13
+func ServiceFromGRPC(s swarmapi.Service) types.Service {
14
+	spec := s.Spec
15
+	containerConfig := spec.Task.Runtime.(*swarmapi.TaskSpec_Container).Container
16
+
17
+	networks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks))
18
+	for _, n := range spec.Networks {
19
+		networks = append(networks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases})
20
+	}
21
+	service := types.Service{
22
+		ID: s.ID,
23
+
24
+		Spec: types.ServiceSpec{
25
+			TaskTemplate: types.TaskSpec{
26
+				ContainerSpec: containerSpecFromGRPC(containerConfig),
27
+				Resources:     resourcesFromGRPC(s.Spec.Task.Resources),
28
+				RestartPolicy: restartPolicyFromGRPC(s.Spec.Task.Restart),
29
+				Placement:     placementFromGRPC(s.Spec.Task.Placement),
30
+			},
31
+
32
+			Networks:     networks,
33
+			EndpointSpec: endpointSpecFromGRPC(s.Spec.Endpoint),
34
+		},
35
+		Endpoint: endpointFromGRPC(s.Endpoint),
36
+	}
37
+
38
+	// Meta
39
+	service.Version.Index = s.Meta.Version.Index
40
+	service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt)
41
+	service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt)
42
+
43
+	// Annotations
44
+	service.Spec.Name = s.Spec.Annotations.Name
45
+	service.Spec.Labels = s.Spec.Annotations.Labels
46
+
47
+	// UpdateConfig
48
+	if s.Spec.Update != nil {
49
+		service.Spec.UpdateConfig = &types.UpdateConfig{
50
+			Parallelism: s.Spec.Update.Parallelism,
51
+		}
52
+
53
+		service.Spec.UpdateConfig.Delay, _ = ptypes.Duration(&s.Spec.Update.Delay)
54
+	}
55
+
56
+	//Mode
57
+	switch t := s.Spec.GetMode().(type) {
58
+	case *swarmapi.ServiceSpec_Global:
59
+		service.Spec.Mode.Global = &types.GlobalService{}
60
+	case *swarmapi.ServiceSpec_Replicated:
61
+		service.Spec.Mode.Replicated = &types.ReplicatedService{
62
+			Replicas: &t.Replicated.Replicas,
63
+		}
64
+	}
65
+
66
+	return service
67
+}
68
+
69
+// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec.
70
+func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) {
71
+	name := s.Name
72
+	if name == "" {
73
+		name = namesgenerator.GetRandomName(0)
74
+	}
75
+
76
+	networks := make([]*swarmapi.ServiceSpec_NetworkAttachmentConfig, 0, len(s.Networks))
77
+	for _, n := range s.Networks {
78
+		networks = append(networks, &swarmapi.ServiceSpec_NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases})
79
+	}
80
+
81
+	spec := swarmapi.ServiceSpec{
82
+		Annotations: swarmapi.Annotations{
83
+			Name:   name,
84
+			Labels: s.Labels,
85
+		},
86
+		Task: swarmapi.TaskSpec{
87
+			Resources: resourcesToGRPC(s.TaskTemplate.Resources),
88
+		},
89
+		Networks: networks,
90
+	}
91
+
92
+	containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec)
93
+	if err != nil {
94
+		return swarmapi.ServiceSpec{}, err
95
+	}
96
+	spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec}
97
+
98
+	restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy)
99
+	if err != nil {
100
+		return swarmapi.ServiceSpec{}, err
101
+	}
102
+	spec.Task.Restart = restartPolicy
103
+
104
+	if s.TaskTemplate.Placement != nil {
105
+		spec.Task.Placement = &swarmapi.Placement{
106
+			Constraints: s.TaskTemplate.Placement.Constraints,
107
+		}
108
+	}
109
+
110
+	if s.UpdateConfig != nil {
111
+		spec.Update = &swarmapi.UpdateConfig{
112
+			Parallelism: s.UpdateConfig.Parallelism,
113
+			Delay:       *ptypes.DurationProto(s.UpdateConfig.Delay),
114
+		}
115
+	}
116
+
117
+	if s.EndpointSpec != nil {
118
+		if s.EndpointSpec.Mode != "" &&
119
+			s.EndpointSpec.Mode != types.ResolutionModeVIP &&
120
+			s.EndpointSpec.Mode != types.ResolutionModeDNSRR {
121
+			return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode)
122
+		}
123
+
124
+		spec.Endpoint = &swarmapi.EndpointSpec{}
125
+
126
+		spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))])
127
+
128
+		for _, portConfig := range s.EndpointSpec.Ports {
129
+			spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{
130
+				Name:          portConfig.Name,
131
+				Protocol:      swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]),
132
+				TargetPort:    portConfig.TargetPort,
133
+				PublishedPort: portConfig.PublishedPort,
134
+			})
135
+		}
136
+	}
137
+
138
+	//Mode
139
+	if s.Mode.Global != nil {
140
+		spec.Mode = &swarmapi.ServiceSpec_Global{
141
+			Global: &swarmapi.GlobalService{},
142
+		}
143
+	} else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil {
144
+		spec.Mode = &swarmapi.ServiceSpec_Replicated{
145
+			Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas},
146
+		}
147
+	} else {
148
+		spec.Mode = &swarmapi.ServiceSpec_Replicated{
149
+			Replicated: &swarmapi.ReplicatedService{Replicas: 1},
150
+		}
151
+	}
152
+
153
+	return spec, nil
154
+}
155
+
156
+func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements {
157
+	var resources *types.ResourceRequirements
158
+	if res != nil {
159
+		resources = &types.ResourceRequirements{}
160
+		if res.Limits != nil {
161
+			resources.Limits = &types.Resources{
162
+				NanoCPUs:    res.Limits.NanoCPUs,
163
+				MemoryBytes: res.Limits.MemoryBytes,
164
+			}
165
+		}
166
+		if res.Reservations != nil {
167
+			resources.Reservations = &types.Resources{
168
+				NanoCPUs:    res.Reservations.NanoCPUs,
169
+				MemoryBytes: res.Reservations.MemoryBytes,
170
+			}
171
+		}
172
+	}
173
+
174
+	return resources
175
+}
176
+
177
+func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements {
178
+	var reqs *swarmapi.ResourceRequirements
179
+	if res != nil {
180
+		reqs = &swarmapi.ResourceRequirements{}
181
+		if res.Limits != nil {
182
+			reqs.Limits = &swarmapi.Resources{
183
+				NanoCPUs:    res.Limits.NanoCPUs,
184
+				MemoryBytes: res.Limits.MemoryBytes,
185
+			}
186
+		}
187
+		if res.Reservations != nil {
188
+			reqs.Reservations = &swarmapi.Resources{
189
+				NanoCPUs:    res.Reservations.NanoCPUs,
190
+				MemoryBytes: res.Reservations.MemoryBytes,
191
+			}
192
+
193
+		}
194
+	}
195
+	return reqs
196
+}
197
+
198
+func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy {
199
+	var rp *types.RestartPolicy
200
+	if p != nil {
201
+		rp = &types.RestartPolicy{}
202
+		rp.Condition = types.RestartPolicyCondition(strings.ToLower(p.Condition.String()))
203
+		if p.Delay != nil {
204
+			delay, _ := ptypes.Duration(p.Delay)
205
+			rp.Delay = &delay
206
+		}
207
+		if p.Window != nil {
208
+			window, _ := ptypes.Duration(p.Window)
209
+			rp.Window = &window
210
+		}
211
+
212
+		rp.MaxAttempts = &p.MaxAttempts
213
+	}
214
+	return rp
215
+}
216
+
217
+func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) {
218
+	var rp *swarmapi.RestartPolicy
219
+	if p != nil {
220
+		rp = &swarmapi.RestartPolicy{}
221
+		if condition, ok := swarmapi.RestartPolicy_RestartCondition_value[strings.ToUpper(string(p.Condition))]; ok {
222
+			rp.Condition = swarmapi.RestartPolicy_RestartCondition(condition)
223
+		} else if string(p.Condition) == "" {
224
+			rp.Condition = swarmapi.RestartOnAny
225
+		} else {
226
+			return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition)
227
+		}
228
+
229
+		if p.Delay != nil {
230
+			rp.Delay = ptypes.DurationProto(*p.Delay)
231
+		}
232
+		if p.Window != nil {
233
+			rp.Window = ptypes.DurationProto(*p.Window)
234
+		}
235
+		if p.MaxAttempts != nil {
236
+			rp.MaxAttempts = *p.MaxAttempts
237
+
238
+		}
239
+	}
240
+	return rp, nil
241
+}
242
+
243
+func placementFromGRPC(p *swarmapi.Placement) *types.Placement {
244
+	var r *types.Placement
245
+	if p != nil {
246
+		r = &types.Placement{}
247
+		r.Constraints = p.Constraints
248
+	}
249
+
250
+	return r
251
+}
0 252
new file mode 100644
... ...
@@ -0,0 +1,116 @@
0
+package convert
1
+
2
+import (
3
+	"fmt"
4
+	"strings"
5
+
6
+	"golang.org/x/crypto/bcrypt"
7
+
8
+	types "github.com/docker/engine-api/types/swarm"
9
+	swarmapi "github.com/docker/swarmkit/api"
10
+	"github.com/docker/swarmkit/protobuf/ptypes"
11
+)
12
+
13
+// SwarmFromGRPC converts a grpc Cluster to a Swarm.
14
+func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
15
+	swarm := types.Swarm{
16
+		ID: c.ID,
17
+		Spec: types.Spec{
18
+			Orchestration: types.OrchestrationConfig{
19
+				TaskHistoryRetentionLimit: c.Spec.Orchestration.TaskHistoryRetentionLimit,
20
+			},
21
+			Raft: types.RaftConfig{
22
+				SnapshotInterval:           c.Spec.Raft.SnapshotInterval,
23
+				KeepOldSnapshots:           c.Spec.Raft.KeepOldSnapshots,
24
+				LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers,
25
+				HeartbeatTick:              c.Spec.Raft.HeartbeatTick,
26
+				ElectionTick:               c.Spec.Raft.ElectionTick,
27
+			},
28
+			Dispatcher: types.DispatcherConfig{
29
+				HeartbeatPeriod: c.Spec.Dispatcher.HeartbeatPeriod,
30
+			},
31
+		},
32
+	}
33
+
34
+	swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry)
35
+
36
+	// Meta
37
+	swarm.Version.Index = c.Meta.Version.Index
38
+	swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt)
39
+	swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt)
40
+
41
+	// Annotations
42
+	swarm.Spec.Name = c.Spec.Annotations.Name
43
+	swarm.Spec.Labels = c.Spec.Annotations.Labels
44
+
45
+	for _, policy := range c.Spec.AcceptancePolicy.Policies {
46
+		p := types.Policy{
47
+			Role:       types.NodeRole(strings.ToLower(policy.Role.String())),
48
+			Autoaccept: policy.Autoaccept,
49
+		}
50
+		if policy.Secret != nil {
51
+			p.Secret = string(policy.Secret.Data)
52
+		}
53
+		swarm.Spec.AcceptancePolicy.Policies = append(swarm.Spec.AcceptancePolicy.Policies, p)
54
+	}
55
+
56
+	return swarm
57
+}
58
+
59
+// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec.
60
+func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) {
61
+	spec := swarmapi.ClusterSpec{
62
+		Annotations: swarmapi.Annotations{
63
+			Name:   s.Name,
64
+			Labels: s.Labels,
65
+		},
66
+		Orchestration: swarmapi.OrchestrationConfig{
67
+			TaskHistoryRetentionLimit: s.Orchestration.TaskHistoryRetentionLimit,
68
+		},
69
+		Raft: swarmapi.RaftConfig{
70
+			SnapshotInterval:           s.Raft.SnapshotInterval,
71
+			KeepOldSnapshots:           s.Raft.KeepOldSnapshots,
72
+			LogEntriesForSlowFollowers: s.Raft.LogEntriesForSlowFollowers,
73
+			HeartbeatTick:              s.Raft.HeartbeatTick,
74
+			ElectionTick:               s.Raft.ElectionTick,
75
+		},
76
+		Dispatcher: swarmapi.DispatcherConfig{
77
+			HeartbeatPeriod: s.Dispatcher.HeartbeatPeriod,
78
+		},
79
+		CAConfig: swarmapi.CAConfig{
80
+			NodeCertExpiry: ptypes.DurationProto(s.CAConfig.NodeCertExpiry),
81
+		},
82
+	}
83
+
84
+	if err := SwarmSpecUpdateAcceptancePolicy(&spec, s.AcceptancePolicy); err != nil {
85
+		return swarmapi.ClusterSpec{}, err
86
+	}
87
+	return spec, nil
88
+}
89
+
90
+// SwarmSpecUpdateAcceptancePolicy updates a grpc ClusterSpec using AcceptancePolicy.
91
+func SwarmSpecUpdateAcceptancePolicy(spec *swarmapi.ClusterSpec, acceptancePolicy types.AcceptancePolicy) error {
92
+	spec.AcceptancePolicy.Policies = nil
93
+	for _, p := range acceptancePolicy.Policies {
94
+		role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(p.Role))]
95
+		if !ok {
96
+			return fmt.Errorf("invalid Role: %q", p.Role)
97
+		}
98
+
99
+		policy := &swarmapi.AcceptancePolicy_RoleAdmissionPolicy{
100
+			Role:       swarmapi.NodeRole(role),
101
+			Autoaccept: p.Autoaccept,
102
+		}
103
+
104
+		if p.Secret != "" {
105
+			hashPwd, _ := bcrypt.GenerateFromPassword([]byte(p.Secret), 0)
106
+			policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{
107
+				Data: hashPwd,
108
+				Alg:  "bcrypt",
109
+			}
110
+		}
111
+
112
+		spec.AcceptancePolicy.Policies = append(spec.AcceptancePolicy.Policies, policy)
113
+	}
114
+	return nil
115
+}
0 116
new file mode 100644
... ...
@@ -0,0 +1,53 @@
0
+package convert
1
+
2
+import (
3
+	"strings"
4
+
5
+	types "github.com/docker/engine-api/types/swarm"
6
+	swarmapi "github.com/docker/swarmkit/api"
7
+	"github.com/docker/swarmkit/protobuf/ptypes"
8
+)
9
+
10
+// TaskFromGRPC converts a grpc Task to a Task.
11
+func TaskFromGRPC(t swarmapi.Task) types.Task {
12
+	containerConfig := t.Spec.Runtime.(*swarmapi.TaskSpec_Container).Container
13
+	containerStatus := t.Status.GetContainer()
14
+	task := types.Task{
15
+		ID:        t.ID,
16
+		ServiceID: t.ServiceID,
17
+		Slot:      int(t.Slot),
18
+		NodeID:    t.NodeID,
19
+		Spec: types.TaskSpec{
20
+			ContainerSpec: containerSpecFromGRPC(containerConfig),
21
+			Resources:     resourcesFromGRPC(t.Spec.Resources),
22
+			RestartPolicy: restartPolicyFromGRPC(t.Spec.Restart),
23
+			Placement:     placementFromGRPC(t.Spec.Placement),
24
+		},
25
+		Status: types.TaskStatus{
26
+			State:   types.TaskState(strings.ToLower(t.Status.State.String())),
27
+			Message: t.Status.Message,
28
+			Err:     t.Status.Err,
29
+		},
30
+		DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())),
31
+	}
32
+
33
+	// Meta
34
+	task.Version.Index = t.Meta.Version.Index
35
+	task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt)
36
+	task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt)
37
+
38
+	task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp)
39
+
40
+	if containerStatus != nil {
41
+		task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID
42
+		task.Status.ContainerStatus.PID = int(containerStatus.PID)
43
+		task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode)
44
+	}
45
+
46
+	// NetworksAttachments
47
+	for _, na := range t.Networks {
48
+		task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na))
49
+	}
50
+
51
+	return task
52
+}
0 53
new file mode 100644
... ...
@@ -0,0 +1,35 @@
0
+package executor
1
+
2
+import (
3
+	"io"
4
+
5
+	clustertypes "github.com/docker/docker/daemon/cluster/provider"
6
+	"github.com/docker/engine-api/types"
7
+	"github.com/docker/engine-api/types/container"
8
+	"github.com/docker/engine-api/types/network"
9
+	"github.com/docker/libnetwork/cluster"
10
+	networktypes "github.com/docker/libnetwork/types"
11
+	"golang.org/x/net/context"
12
+)
13
+
14
+// Backend defines the executor component for a swarm agent.
15
+type Backend interface {
16
+	CreateManagedNetwork(clustertypes.NetworkCreateRequest) error
17
+	DeleteManagedNetwork(name string) error
18
+	SetupIngress(req clustertypes.NetworkCreateRequest, nodeIP string) error
19
+	PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error
20
+	CreateManagedContainer(types.ContainerCreateConfig) (types.ContainerCreateResponse, error)
21
+	ContainerStart(name string, hostConfig *container.HostConfig) error
22
+	ContainerStop(name string, seconds int) error
23
+	ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
24
+	UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error
25
+	ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error)
26
+	ContainerWaitWithContext(ctx context.Context, name string) (<-chan int, error)
27
+	ContainerRm(name string, config *types.ContainerRmConfig) error
28
+	ContainerKill(name string, sig uint64) error
29
+	SystemInfo() (*types.Info, error)
30
+	VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error)
31
+	ListContainersForNode(nodeID string) []string
32
+	SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error
33
+	SetClusterProvider(provider cluster.Provider)
34
+}
0 35
new file mode 100644
... ...
@@ -0,0 +1,229 @@
0
+package container
1
+
2
+import (
3
+	"encoding/base64"
4
+	"encoding/json"
5
+	"fmt"
6
+	"io"
7
+	"strings"
8
+	"syscall"
9
+
10
+	"github.com/Sirupsen/logrus"
11
+	executorpkg "github.com/docker/docker/daemon/cluster/executor"
12
+	"github.com/docker/engine-api/types"
13
+	"github.com/docker/libnetwork"
14
+	"github.com/docker/swarmkit/api"
15
+	"github.com/docker/swarmkit/log"
16
+	"golang.org/x/net/context"
17
+)
18
+
19
+// containerAdapter conducts remote operations for a container. All calls
20
+// are mostly naked calls to the client API, seeded with information from
21
+// containerConfig.
22
+type containerAdapter struct {
23
+	backend   executorpkg.Backend
24
+	container *containerConfig
25
+}
26
+
27
+func newContainerAdapter(b executorpkg.Backend, task *api.Task) (*containerAdapter, error) {
28
+	ctnr, err := newContainerConfig(task)
29
+	if err != nil {
30
+		return nil, err
31
+	}
32
+
33
+	return &containerAdapter{
34
+		container: ctnr,
35
+		backend:   b,
36
+	}, nil
37
+}
38
+
39
+func (c *containerAdapter) pullImage(ctx context.Context) error {
40
+	// if the image needs to be pulled, the auth config will be retrieved and updated
41
+	encodedAuthConfig := c.container.task.ServiceAnnotations.Labels[fmt.Sprintf("%v.registryauth", systemLabelPrefix)]
42
+
43
+	authConfig := &types.AuthConfig{}
44
+	if encodedAuthConfig != "" {
45
+		if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil {
46
+			logrus.Warnf("invalid authconfig: %v", err)
47
+		}
48
+	}
49
+
50
+	pr, pw := io.Pipe()
51
+	metaHeaders := map[string][]string{}
52
+	go func() {
53
+		err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw)
54
+		pw.CloseWithError(err)
55
+	}()
56
+
57
+	dec := json.NewDecoder(pr)
58
+	m := map[string]interface{}{}
59
+	for {
60
+		if err := dec.Decode(&m); err != nil {
61
+			if err == io.EOF {
62
+				break
63
+			}
64
+			return err
65
+		}
66
+		// TOOD(stevvooe): Report this status somewhere.
67
+		logrus.Debugln("pull progress", m)
68
+	}
69
+	// if the final stream object contained an error, return it
70
+	if errMsg, ok := m["error"]; ok {
71
+		return fmt.Errorf("%v", errMsg)
72
+	}
73
+	return nil
74
+}
75
+
76
+func (c *containerAdapter) createNetworks(ctx context.Context) error {
77
+	for _, network := range c.container.networks() {
78
+		ncr, err := c.container.networkCreateRequest(network)
79
+		if err != nil {
80
+			return err
81
+		}
82
+
83
+		if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing
84
+			if _, ok := err.(libnetwork.NetworkNameError); ok {
85
+				continue
86
+			}
87
+
88
+			return err
89
+		}
90
+	}
91
+
92
+	return nil
93
+}
94
+
95
+func (c *containerAdapter) removeNetworks(ctx context.Context) error {
96
+	for _, nid := range c.container.networks() {
97
+		if err := c.backend.DeleteManagedNetwork(nid); err != nil {
98
+			if _, ok := err.(*libnetwork.ActiveEndpointsError); ok {
99
+				continue
100
+			}
101
+			log.G(ctx).Errorf("network %s remove failed: %v", nid, err)
102
+			return err
103
+		}
104
+	}
105
+
106
+	return nil
107
+}
108
+
109
+func (c *containerAdapter) create(ctx context.Context, backend executorpkg.Backend) error {
110
+	var cr types.ContainerCreateResponse
111
+	var err error
112
+	if cr, err = backend.CreateManagedContainer(types.ContainerCreateConfig{
113
+		Name:       c.container.name(),
114
+		Config:     c.container.config(),
115
+		HostConfig: c.container.hostConfig(),
116
+		// Use the first network in container create
117
+		NetworkingConfig: c.container.createNetworkingConfig(),
118
+	}); err != nil {
119
+		return err
120
+	}
121
+
122
+	// Docker daemon currently doesnt support multiple networks in container create
123
+	// Connect to all other networks
124
+	nc := c.container.connectNetworkingConfig()
125
+
126
+	if nc != nil {
127
+		for n, ep := range nc.EndpointsConfig {
128
+			logrus.Errorf("CONNECT %s : %v", n, ep.IPAMConfig.IPv4Address)
129
+			if err := backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil {
130
+				return err
131
+			}
132
+		}
133
+	}
134
+
135
+	if err := backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil {
136
+		return err
137
+	}
138
+
139
+	return nil
140
+}
141
+
142
+func (c *containerAdapter) start(ctx context.Context) error {
143
+	return c.backend.ContainerStart(c.container.name(), nil)
144
+}
145
+
146
+func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
147
+	cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false)
148
+	if ctx.Err() != nil {
149
+		return types.ContainerJSON{}, ctx.Err()
150
+	}
151
+	if err != nil {
152
+		return types.ContainerJSON{}, err
153
+	}
154
+	return *cs, nil
155
+}
156
+
157
+// events issues a call to the events API and returns a channel with all
158
+// events. The stream of events can be shutdown by cancelling the context.
159
+//
160
+// A chan struct{} is returned that will be closed if the event procressing
161
+// fails and needs to be restarted.
162
+func (c *containerAdapter) wait(ctx context.Context) (<-chan int, error) {
163
+	return c.backend.ContainerWaitWithContext(ctx, c.container.name())
164
+}
165
+
166
+func (c *containerAdapter) shutdown(ctx context.Context) error {
167
+	// Default stop grace period to 10s.
168
+	stopgrace := 10
169
+	spec := c.container.spec()
170
+	if spec.StopGracePeriod != nil {
171
+		stopgrace = int(spec.StopGracePeriod.Seconds)
172
+	}
173
+	return c.backend.ContainerStop(c.container.name(), stopgrace)
174
+}
175
+
176
+func (c *containerAdapter) terminate(ctx context.Context) error {
177
+	return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL))
178
+}
179
+
180
+func (c *containerAdapter) remove(ctx context.Context) error {
181
+	return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{
182
+		RemoveVolume: true,
183
+		ForceRemove:  true,
184
+	})
185
+}
186
+
187
+func (c *containerAdapter) createVolumes(ctx context.Context, backend executorpkg.Backend) error {
188
+	// Create plugin volumes that are embedded inside a Mount
189
+	for _, mount := range c.container.task.Spec.GetContainer().Mounts {
190
+		if mount.Type != api.MountTypeVolume {
191
+			continue
192
+		}
193
+
194
+		if mount.VolumeOptions != nil {
195
+			continue
196
+		}
197
+
198
+		if mount.VolumeOptions.DriverConfig == nil {
199
+			continue
200
+		}
201
+
202
+		req := c.container.volumeCreateRequest(&mount)
203
+
204
+		// Check if this volume exists on the engine
205
+		if _, err := backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil {
206
+			// TODO(amitshukla): Today, volume create through the engine api does not return an error
207
+			// when the named volume with the same parameters already exists.
208
+			// It returns an error if the driver name is different - that is a valid error
209
+			return err
210
+		}
211
+
212
+	}
213
+
214
+	return nil
215
+}
216
+
217
+// todo: typed/wrapped errors
218
+func isContainerCreateNameConflict(err error) bool {
219
+	return strings.Contains(err.Error(), "Conflict. The name")
220
+}
221
+
222
+func isUnknownContainer(err error) bool {
223
+	return strings.Contains(err.Error(), "No such container:")
224
+}
225
+
226
+func isStoppedContainer(err error) bool {
227
+	return strings.Contains(err.Error(), "is already stopped")
228
+}
0 229
new file mode 100644
... ...
@@ -0,0 +1,415 @@
0
+package container
1
+
2
+import (
3
+	"errors"
4
+	"fmt"
5
+	"log"
6
+	"net"
7
+	"strings"
8
+	"time"
9
+
10
+	clustertypes "github.com/docker/docker/daemon/cluster/provider"
11
+	"github.com/docker/docker/reference"
12
+	"github.com/docker/engine-api/types"
13
+	enginecontainer "github.com/docker/engine-api/types/container"
14
+	"github.com/docker/engine-api/types/network"
15
+	"github.com/docker/swarmkit/agent/exec"
16
+	"github.com/docker/swarmkit/api"
17
+)
18
+
19
+const (
20
+	// Explictly use the kernel's default setting for CPU quota of 100ms.
21
+	// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
22
+	cpuQuotaPeriod = 100 * time.Millisecond
23
+
24
+	// systemLabelPrefix represents the reserved namespace for system labels.
25
+	systemLabelPrefix = "com.docker.swarm"
26
+)
27
+
28
+// containerConfig converts task properties into docker container compatible
29
+// components.
30
+type containerConfig struct {
31
+	task                *api.Task
32
+	networksAttachments map[string]*api.NetworkAttachment
33
+}
34
+
35
+// newContainerConfig returns a validated container config. No methods should
36
+// return an error if this function returns without error.
37
+func newContainerConfig(t *api.Task) (*containerConfig, error) {
38
+	var c containerConfig
39
+	return &c, c.setTask(t)
40
+}
41
+
42
+func (c *containerConfig) setTask(t *api.Task) error {
43
+	container := t.Spec.GetContainer()
44
+	if container == nil {
45
+		return exec.ErrRuntimeUnsupported
46
+	}
47
+
48
+	if container.Image == "" {
49
+		return ErrImageRequired
50
+	}
51
+
52
+	// index the networks by name
53
+	c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks))
54
+	for _, attachment := range t.Networks {
55
+		c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment
56
+	}
57
+
58
+	c.task = t
59
+	return nil
60
+}
61
+
62
+func (c *containerConfig) endpoint() *api.Endpoint {
63
+	return c.task.Endpoint
64
+}
65
+
66
+func (c *containerConfig) spec() *api.ContainerSpec {
67
+	return c.task.Spec.GetContainer()
68
+}
69
+
70
+func (c *containerConfig) name() string {
71
+	if c.task.Annotations.Name != "" {
72
+		// if set, use the container Annotations.Name field, set in the orchestrator.
73
+		return c.task.Annotations.Name
74
+	}
75
+
76
+	// fallback to service.slot.id.
77
+	return strings.Join([]string{c.task.ServiceAnnotations.Name, fmt.Sprint(c.task.Slot), c.task.ID}, ".")
78
+}
79
+
80
+func (c *containerConfig) image() string {
81
+	raw := c.spec().Image
82
+	ref, err := reference.ParseNamed(raw)
83
+	if err != nil {
84
+		return raw
85
+	}
86
+	return reference.WithDefaultTag(ref).String()
87
+}
88
+
89
+func (c *containerConfig) volumes() map[string]struct{} {
90
+	r := make(map[string]struct{})
91
+
92
+	for _, mount := range c.spec().Mounts {
93
+		// pick off all the volume mounts.
94
+		if mount.Type != api.MountTypeVolume {
95
+			continue
96
+		}
97
+
98
+		r[fmt.Sprintf("%s:%s", mount.Target, getMountMask(&mount))] = struct{}{}
99
+	}
100
+
101
+	return r
102
+}
103
+
104
+func (c *containerConfig) config() *enginecontainer.Config {
105
+	config := &enginecontainer.Config{
106
+		Labels:     c.labels(),
107
+		User:       c.spec().User,
108
+		Env:        c.spec().Env,
109
+		WorkingDir: c.spec().Dir,
110
+		Image:      c.image(),
111
+		Volumes:    c.volumes(),
112
+	}
113
+
114
+	if len(c.spec().Command) > 0 {
115
+		// If Command is provided, we replace the whole invocation with Command
116
+		// by replacing Entrypoint and specifying Cmd. Args is ignored in this
117
+		// case.
118
+		config.Entrypoint = append(config.Entrypoint, c.spec().Command[0])
119
+		config.Cmd = append(config.Cmd, c.spec().Command[1:]...)
120
+	} else if len(c.spec().Args) > 0 {
121
+		// In this case, we assume the image has an Entrypoint and Args
122
+		// specifies the arguments for that entrypoint.
123
+		config.Cmd = c.spec().Args
124
+	}
125
+
126
+	return config
127
+}
128
+
129
+func (c *containerConfig) labels() map[string]string {
130
+	var (
131
+		system = map[string]string{
132
+			"task":         "", // mark as cluster task
133
+			"task.id":      c.task.ID,
134
+			"task.name":    fmt.Sprintf("%v.%v", c.task.ServiceAnnotations.Name, c.task.Slot),
135
+			"node.id":      c.task.NodeID,
136
+			"service.id":   c.task.ServiceID,
137
+			"service.name": c.task.ServiceAnnotations.Name,
138
+		}
139
+		labels = make(map[string]string)
140
+	)
141
+
142
+	// base labels are those defined in the spec.
143
+	for k, v := range c.spec().Labels {
144
+		labels[k] = v
145
+	}
146
+
147
+	// we then apply the overrides from the task, which may be set via the
148
+	// orchestrator.
149
+	for k, v := range c.task.Annotations.Labels {
150
+		labels[k] = v
151
+	}
152
+
153
+	// finally, we apply the system labels, which override all labels.
154
+	for k, v := range system {
155
+		labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v
156
+	}
157
+
158
+	return labels
159
+}
160
+
161
+func (c *containerConfig) bindMounts() []string {
162
+	var r []string
163
+
164
+	for _, val := range c.spec().Mounts {
165
+		mask := getMountMask(&val)
166
+		if val.Type == api.MountTypeBind {
167
+			r = append(r, fmt.Sprintf("%s:%s:%s", val.Source, val.Target, mask))
168
+		}
169
+	}
170
+
171
+	return r
172
+}
173
+
174
+func getMountMask(m *api.Mount) string {
175
+	maskOpts := []string{"ro"}
176
+	if m.Writable {
177
+		maskOpts[0] = "rw"
178
+	}
179
+
180
+	if m.BindOptions != nil {
181
+		switch m.BindOptions.Propagation {
182
+		case api.MountPropagationPrivate:
183
+			maskOpts = append(maskOpts, "private")
184
+		case api.MountPropagationRPrivate:
185
+			maskOpts = append(maskOpts, "rprivate")
186
+		case api.MountPropagationShared:
187
+			maskOpts = append(maskOpts, "shared")
188
+		case api.MountPropagationRShared:
189
+			maskOpts = append(maskOpts, "rshared")
190
+		case api.MountPropagationSlave:
191
+			maskOpts = append(maskOpts, "slave")
192
+		case api.MountPropagationRSlave:
193
+			maskOpts = append(maskOpts, "rslave")
194
+		}
195
+	}
196
+
197
+	if m.VolumeOptions != nil {
198
+		if !m.VolumeOptions.Populate {
199
+			maskOpts = append(maskOpts, "nocopy")
200
+		}
201
+	}
202
+	return strings.Join(maskOpts, ",")
203
+}
204
+
205
+func (c *containerConfig) hostConfig() *enginecontainer.HostConfig {
206
+	return &enginecontainer.HostConfig{
207
+		Resources: c.resources(),
208
+		Binds:     c.bindMounts(),
209
+	}
210
+}
211
+
212
+// This handles the case of volumes that are defined inside a service Mount
213
+func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *types.VolumeCreateRequest {
214
+	var (
215
+		driverName string
216
+		driverOpts map[string]string
217
+		labels     map[string]string
218
+	)
219
+
220
+	if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil {
221
+		driverName = mount.VolumeOptions.DriverConfig.Name
222
+		driverOpts = mount.VolumeOptions.DriverConfig.Options
223
+		labels = mount.VolumeOptions.Labels
224
+	}
225
+
226
+	if mount.VolumeOptions != nil {
227
+		return &types.VolumeCreateRequest{
228
+			Name:       mount.Source,
229
+			Driver:     driverName,
230
+			DriverOpts: driverOpts,
231
+			Labels:     labels,
232
+		}
233
+	}
234
+	return nil
235
+}
236
+
237
+func (c *containerConfig) resources() enginecontainer.Resources {
238
+	resources := enginecontainer.Resources{}
239
+
240
+	// If no limits are specified let the engine use its defaults.
241
+	//
242
+	// TODO(aluzzardi): We might want to set some limits anyway otherwise
243
+	// "unlimited" tasks will step over the reservation of other tasks.
244
+	r := c.task.Spec.Resources
245
+	if r == nil || r.Limits == nil {
246
+		return resources
247
+	}
248
+
249
+	if r.Limits.MemoryBytes > 0 {
250
+		resources.Memory = r.Limits.MemoryBytes
251
+	}
252
+
253
+	if r.Limits.NanoCPUs > 0 {
254
+		// CPU Period must be set in microseconds.
255
+		resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond)
256
+		resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9
257
+	}
258
+
259
+	return resources
260
+}
261
+
262
+// Docker daemon supports just 1 network during container create.
263
+func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig {
264
+	var networks []*api.NetworkAttachment
265
+	if c.task.Spec.GetContainer() != nil {
266
+		networks = c.task.Networks
267
+	}
268
+
269
+	epConfig := make(map[string]*network.EndpointSettings)
270
+	if len(networks) > 0 {
271
+		epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0])
272
+	}
273
+
274
+	return &network.NetworkingConfig{EndpointsConfig: epConfig}
275
+}
276
+
277
+// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create
278
+func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig {
279
+	var networks []*api.NetworkAttachment
280
+	if c.task.Spec.GetContainer() != nil {
281
+		networks = c.task.Networks
282
+	}
283
+
284
+	// First network is used during container create. Other networks are used in "docker network connect"
285
+	if len(networks) < 2 {
286
+		return nil
287
+	}
288
+
289
+	epConfig := make(map[string]*network.EndpointSettings)
290
+	for _, na := range networks[1:] {
291
+		epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na)
292
+	}
293
+	return &network.NetworkingConfig{EndpointsConfig: epConfig}
294
+}
295
+
296
+func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings {
297
+	var ipv4, ipv6 string
298
+	for _, addr := range na.Addresses {
299
+		ip, _, err := net.ParseCIDR(addr)
300
+		if err != nil {
301
+			continue
302
+		}
303
+
304
+		if ip.To4() != nil {
305
+			ipv4 = ip.String()
306
+			continue
307
+		}
308
+
309
+		if ip.To16() != nil {
310
+			ipv6 = ip.String()
311
+		}
312
+	}
313
+
314
+	return &network.EndpointSettings{
315
+		IPAMConfig: &network.EndpointIPAMConfig{
316
+			IPv4Address: ipv4,
317
+			IPv6Address: ipv6,
318
+		},
319
+	}
320
+}
321
+
322
+func (c *containerConfig) virtualIP(networkID string) string {
323
+	if c.task.Endpoint == nil {
324
+		return ""
325
+	}
326
+
327
+	for _, eVip := range c.task.Endpoint.VirtualIPs {
328
+		// We only support IPv4 VIPs for now.
329
+		if eVip.NetworkID == networkID {
330
+			vip, _, err := net.ParseCIDR(eVip.Addr)
331
+			if err != nil {
332
+				return ""
333
+			}
334
+
335
+			return vip.String()
336
+		}
337
+	}
338
+
339
+	return ""
340
+}
341
+
342
+func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig {
343
+	if len(c.task.Networks) == 0 {
344
+		return nil
345
+	}
346
+
347
+	log.Printf("Creating service config in agent for t = %+v", c.task)
348
+	svcCfg := &clustertypes.ServiceConfig{
349
+		Name:             c.task.ServiceAnnotations.Name,
350
+		ID:               c.task.ServiceID,
351
+		VirtualAddresses: make(map[string]*clustertypes.VirtualAddress),
352
+	}
353
+
354
+	for _, na := range c.task.Networks {
355
+		svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{
356
+			// We support only IPv4 virtual IP for now.
357
+			IPv4: c.virtualIP(na.Network.ID),
358
+		}
359
+	}
360
+
361
+	if c.task.Endpoint != nil {
362
+		for _, ePort := range c.task.Endpoint.Ports {
363
+			svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{
364
+				Name:          ePort.Name,
365
+				Protocol:      int32(ePort.Protocol),
366
+				TargetPort:    ePort.TargetPort,
367
+				PublishedPort: ePort.PublishedPort,
368
+			})
369
+		}
370
+	}
371
+
372
+	return svcCfg
373
+}
374
+
375
+// networks returns a list of network names attached to the container. The
376
+// returned name can be used to lookup the corresponding network create
377
+// options.
378
+func (c *containerConfig) networks() []string {
379
+	var networks []string
380
+
381
+	for name := range c.networksAttachments {
382
+		networks = append(networks, name)
383
+	}
384
+
385
+	return networks
386
+}
387
+
388
+func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) {
389
+	na, ok := c.networksAttachments[name]
390
+	if !ok {
391
+		return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced")
392
+	}
393
+
394
+	options := types.NetworkCreate{
395
+		// ID:     na.Network.ID,
396
+		Driver: na.Network.DriverState.Name,
397
+		IPAM: network.IPAM{
398
+			Driver: na.Network.IPAM.Driver.Name,
399
+		},
400
+		Options:        na.Network.DriverState.Options,
401
+		CheckDuplicate: true,
402
+	}
403
+
404
+	for _, ic := range na.Network.IPAM.Configs {
405
+		c := network.IPAMConfig{
406
+			Subnet:  ic.Subnet,
407
+			IPRange: ic.Range,
408
+			Gateway: ic.Gateway,
409
+		}
410
+		options.IPAM.Config = append(options.IPAM.Config, c)
411
+	}
412
+
413
+	return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil
414
+}
0 415
new file mode 100644
... ...
@@ -0,0 +1,305 @@
0
+package container
1
+
2
+import (
3
+	"errors"
4
+	"fmt"
5
+	"strings"
6
+
7
+	executorpkg "github.com/docker/docker/daemon/cluster/executor"
8
+	"github.com/docker/engine-api/types"
9
+	"github.com/docker/swarmkit/agent/exec"
10
+	"github.com/docker/swarmkit/api"
11
+	"github.com/docker/swarmkit/log"
12
+	"golang.org/x/net/context"
13
+)
14
+
15
+// controller implements agent.Controller against docker's API.
16
+//
17
+// Most operations against docker's API are done through the container name,
18
+// which is unique to the task.
19
+type controller struct {
20
+	backend executorpkg.Backend
21
+	task    *api.Task
22
+	adapter *containerAdapter
23
+	closed  chan struct{}
24
+	err     error
25
+}
26
+
27
+var _ exec.Controller = &controller{}
28
+
29
+// NewController returns a dockerexec runner for the provided task.
30
+func newController(b executorpkg.Backend, task *api.Task) (*controller, error) {
31
+	adapter, err := newContainerAdapter(b, task)
32
+	if err != nil {
33
+		return nil, err
34
+	}
35
+
36
+	return &controller{
37
+		backend: b,
38
+		task:    task,
39
+		adapter: adapter,
40
+		closed:  make(chan struct{}),
41
+	}, nil
42
+}
43
+
44
+func (r *controller) Task() (*api.Task, error) {
45
+	return r.task, nil
46
+}
47
+
48
+// ContainerStatus returns the container-specific status for the task.
49
+func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) {
50
+	ctnr, err := r.adapter.inspect(ctx)
51
+	if err != nil {
52
+		if isUnknownContainer(err) {
53
+			return nil, nil
54
+		}
55
+		return nil, err
56
+	}
57
+	return parseContainerStatus(ctnr)
58
+}
59
+
60
+// Update tasks a recent task update and applies it to the container.
61
+func (r *controller) Update(ctx context.Context, t *api.Task) error {
62
+	log.G(ctx).Warnf("task updates not yet supported")
63
+	// TODO(stevvooe): While assignment of tasks is idempotent, we do allow
64
+	// updates of metadata, such as labelling, as well as any other properties
65
+	// that make sense.
66
+	return nil
67
+}
68
+
69
+// Prepare creates a container and ensures the image is pulled.
70
+//
71
+// If the container has already be created, exec.ErrTaskPrepared is returned.
72
+func (r *controller) Prepare(ctx context.Context) error {
73
+	if err := r.checkClosed(); err != nil {
74
+		return err
75
+	}
76
+
77
+	// Make sure all the networks that the task needs are created.
78
+	if err := r.adapter.createNetworks(ctx); err != nil {
79
+		return err
80
+	}
81
+
82
+	// Make sure all the volumes that the task needs are created.
83
+	if err := r.adapter.createVolumes(ctx, r.backend); err != nil {
84
+		return err
85
+	}
86
+
87
+	for {
88
+		if err := r.checkClosed(); err != nil {
89
+			return err
90
+		}
91
+		if err := r.adapter.create(ctx, r.backend); err != nil {
92
+			if isContainerCreateNameConflict(err) {
93
+				if _, err := r.adapter.inspect(ctx); err != nil {
94
+					return err
95
+				}
96
+
97
+				// container is already created. success!
98
+				return exec.ErrTaskPrepared
99
+			}
100
+
101
+			if !strings.Contains(err.Error(), "No such image") { // todo: better error detection
102
+				return err
103
+			}
104
+			if err := r.adapter.pullImage(ctx); err != nil {
105
+				return err
106
+			}
107
+
108
+			continue // retry to create the container
109
+		}
110
+
111
+		break
112
+	}
113
+
114
+	return nil
115
+}
116
+
117
+// Start the container. An error will be returned if the container is already started.
118
+func (r *controller) Start(ctx context.Context) error {
119
+	if err := r.checkClosed(); err != nil {
120
+		return err
121
+	}
122
+
123
+	ctnr, err := r.adapter.inspect(ctx)
124
+	if err != nil {
125
+		return err
126
+	}
127
+
128
+	// Detect whether the container has *ever* been started. If so, we don't
129
+	// issue the start.
130
+	//
131
+	// TODO(stevvooe): This is very racy. While reading inspect, another could
132
+	// start the process and we could end up starting it twice.
133
+	if ctnr.State.Status != "created" {
134
+		return exec.ErrTaskStarted
135
+	}
136
+
137
+	if err := r.adapter.start(ctx); err != nil {
138
+		return err
139
+	}
140
+
141
+	return nil
142
+}
143
+
144
+// Wait on the container to exit.
145
+func (r *controller) Wait(pctx context.Context) error {
146
+	if err := r.checkClosed(); err != nil {
147
+		return err
148
+	}
149
+
150
+	ctx, cancel := context.WithCancel(pctx)
151
+	defer cancel()
152
+
153
+	c, err := r.adapter.wait(ctx)
154
+	if err != nil {
155
+		return err
156
+	}
157
+
158
+	<-c
159
+	if ctx.Err() != nil {
160
+		return ctx.Err()
161
+	}
162
+	ctnr, err := r.adapter.inspect(ctx)
163
+	if err != nil {
164
+		// TODO(stevvooe): Need to handle missing container here. It is likely
165
+		// that a Wait call with a not found error should result in no waiting
166
+		// and no error at all.
167
+		return err
168
+	}
169
+
170
+	if ctnr.State.ExitCode != 0 {
171
+		var cause error
172
+		if ctnr.State.Error != "" {
173
+			cause = errors.New(ctnr.State.Error)
174
+		}
175
+		cstatus, _ := parseContainerStatus(ctnr)
176
+		return &exitError{
177
+			code:            ctnr.State.ExitCode,
178
+			cause:           cause,
179
+			containerStatus: cstatus,
180
+		}
181
+	}
182
+	return nil
183
+}
184
+
185
+// Shutdown the container cleanly.
186
+func (r *controller) Shutdown(ctx context.Context) error {
187
+	if err := r.checkClosed(); err != nil {
188
+		return err
189
+	}
190
+
191
+	if err := r.adapter.shutdown(ctx); err != nil {
192
+		if isUnknownContainer(err) || isStoppedContainer(err) {
193
+			return nil
194
+		}
195
+
196
+		return err
197
+	}
198
+
199
+	return nil
200
+}
201
+
202
+// Terminate the container, with force.
203
+func (r *controller) Terminate(ctx context.Context) error {
204
+	if err := r.checkClosed(); err != nil {
205
+		return err
206
+	}
207
+
208
+	if err := r.adapter.terminate(ctx); err != nil {
209
+		if isUnknownContainer(err) {
210
+			return nil
211
+		}
212
+
213
+		return err
214
+	}
215
+
216
+	return nil
217
+}
218
+
219
+// Remove the container and its resources.
220
+func (r *controller) Remove(ctx context.Context) error {
221
+	if err := r.checkClosed(); err != nil {
222
+		return err
223
+	}
224
+
225
+	// It may be necessary to shut down the task before removing it.
226
+	if err := r.Shutdown(ctx); err != nil {
227
+		if isUnknownContainer(err) {
228
+			return nil
229
+		}
230
+		// This may fail if the task was already shut down.
231
+		log.G(ctx).WithError(err).Debug("shutdown failed on removal")
232
+	}
233
+
234
+	// Try removing networks referenced in this task in case this
235
+	// task is the last one referencing it
236
+	if err := r.adapter.removeNetworks(ctx); err != nil {
237
+		if isUnknownContainer(err) {
238
+			return nil
239
+		}
240
+		return err
241
+	}
242
+
243
+	if err := r.adapter.remove(ctx); err != nil {
244
+		if isUnknownContainer(err) {
245
+			return nil
246
+		}
247
+
248
+		return err
249
+	}
250
+	return nil
251
+}
252
+
253
+// Close the runner and clean up any ephemeral resources.
254
+func (r *controller) Close() error {
255
+	select {
256
+	case <-r.closed:
257
+		return r.err
258
+	default:
259
+		r.err = exec.ErrControllerClosed
260
+		close(r.closed)
261
+	}
262
+	return nil
263
+}
264
+
265
+func (r *controller) checkClosed() error {
266
+	select {
267
+	case <-r.closed:
268
+		return r.err
269
+	default:
270
+		return nil
271
+	}
272
+}
273
+
274
+func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) {
275
+	status := &api.ContainerStatus{
276
+		ContainerID: ctnr.ID,
277
+		PID:         int32(ctnr.State.Pid),
278
+		ExitCode:    int32(ctnr.State.ExitCode),
279
+	}
280
+
281
+	return status, nil
282
+}
283
+
284
+type exitError struct {
285
+	code            int
286
+	cause           error
287
+	containerStatus *api.ContainerStatus
288
+}
289
+
290
+func (e *exitError) Error() string {
291
+	if e.cause != nil {
292
+		return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause)
293
+	}
294
+
295
+	return fmt.Sprintf("task: non-zero exit (%v)", e.code)
296
+}
297
+
298
+func (e *exitError) ExitCode() int {
299
+	return int(e.containerStatus.ExitCode)
300
+}
301
+
302
+func (e *exitError) Cause() error {
303
+	return e.cause
304
+}
0 305
new file mode 100644
... ...
@@ -0,0 +1,12 @@
0
+package container
1
+
2
+import "fmt"
3
+
4
+var (
5
+	// ErrImageRequired returned if a task is missing the image definition.
6
+	ErrImageRequired = fmt.Errorf("dockerexec: image required")
7
+
8
+	// ErrContainerDestroyed returned when a container is prematurely destroyed
9
+	// during a wait call.
10
+	ErrContainerDestroyed = fmt.Errorf("dockerexec: container destroyed")
11
+)
0 12
new file mode 100644
... ...
@@ -0,0 +1,139 @@
0
+package container
1
+
2
+import (
3
+	"strings"
4
+
5
+	executorpkg "github.com/docker/docker/daemon/cluster/executor"
6
+	clustertypes "github.com/docker/docker/daemon/cluster/provider"
7
+	"github.com/docker/engine-api/types"
8
+	"github.com/docker/engine-api/types/network"
9
+	networktypes "github.com/docker/libnetwork/types"
10
+	"github.com/docker/swarmkit/agent/exec"
11
+	"github.com/docker/swarmkit/api"
12
+	"golang.org/x/net/context"
13
+)
14
+
15
+type executor struct {
16
+	backend executorpkg.Backend
17
+}
18
+
19
+// NewExecutor returns an executor from the docker client.
20
+func NewExecutor(b executorpkg.Backend) exec.Executor {
21
+	return &executor{
22
+		backend: b,
23
+	}
24
+}
25
+
26
+// Describe returns the underlying node description from the docker client.
27
+func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
28
+	info, err := e.backend.SystemInfo()
29
+	if err != nil {
30
+		return nil, err
31
+	}
32
+
33
+	var plugins []api.PluginDescription
34
+	addPlugins := func(typ string, names []string) {
35
+		for _, name := range names {
36
+			plugins = append(plugins, api.PluginDescription{
37
+				Type: typ,
38
+				Name: name,
39
+			})
40
+		}
41
+	}
42
+
43
+	addPlugins("Volume", info.Plugins.Volume)
44
+	// Add builtin driver "overlay" (the only builtin multi-host driver) to
45
+	// the plugin list by default.
46
+	addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
47
+	addPlugins("Authorization", info.Plugins.Authorization)
48
+
49
+	// parse []string labels into a map[string]string
50
+	labels := map[string]string{}
51
+	for _, l := range info.Labels {
52
+		stringSlice := strings.SplitN(l, "=", 2)
53
+		// this will take the last value in the list for a given key
54
+		// ideally, one shouldn't assign multiple values to the same key
55
+		if len(stringSlice) > 1 {
56
+			labels[stringSlice[0]] = stringSlice[1]
57
+		}
58
+	}
59
+
60
+	description := &api.NodeDescription{
61
+		Hostname: info.Name,
62
+		Platform: &api.Platform{
63
+			Architecture: info.Architecture,
64
+			OS:           info.OSType,
65
+		},
66
+		Engine: &api.EngineDescription{
67
+			EngineVersion: info.ServerVersion,
68
+			Labels:        labels,
69
+			Plugins:       plugins,
70
+		},
71
+		Resources: &api.Resources{
72
+			NanoCPUs:    int64(info.NCPU) * 1e9,
73
+			MemoryBytes: info.MemTotal,
74
+		},
75
+	}
76
+
77
+	return description, nil
78
+}
79
+
80
+func (e *executor) Configure(ctx context.Context, node *api.Node) error {
81
+	na := node.Attachment
82
+	if na == nil {
83
+		return nil
84
+	}
85
+
86
+	options := types.NetworkCreate{
87
+		Driver: na.Network.DriverState.Name,
88
+		IPAM: network.IPAM{
89
+			Driver: na.Network.IPAM.Driver.Name,
90
+		},
91
+		Options:        na.Network.DriverState.Options,
92
+		CheckDuplicate: true,
93
+	}
94
+
95
+	for _, ic := range na.Network.IPAM.Configs {
96
+		c := network.IPAMConfig{
97
+			Subnet:  ic.Subnet,
98
+			IPRange: ic.Range,
99
+			Gateway: ic.Gateway,
100
+		}
101
+		options.IPAM.Config = append(options.IPAM.Config, c)
102
+	}
103
+
104
+	return e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
105
+		na.Network.ID,
106
+		types.NetworkCreateRequest{
107
+			Name:          na.Network.Spec.Annotations.Name,
108
+			NetworkCreate: options,
109
+		},
110
+	}, na.Addresses[0])
111
+}
112
+
113
+// Controller returns a docker container runner.
114
+func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
115
+	ctlr, err := newController(e.backend, t)
116
+	if err != nil {
117
+		return nil, err
118
+	}
119
+
120
+	return ctlr, nil
121
+}
122
+
123
+func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
124
+	nwKeys := []*networktypes.EncryptionKey{}
125
+	for _, key := range keys {
126
+		nwKey := &networktypes.EncryptionKey{
127
+			Subsystem:   key.Subsystem,
128
+			Algorithm:   int32(key.Algorithm),
129
+			Key:         make([]byte, len(key.Key)),
130
+			LamportTime: key.LamportTime,
131
+		}
132
+		copy(nwKey.Key, key.Key)
133
+		nwKeys = append(nwKeys, nwKey)
134
+	}
135
+	e.backend.SetNetworkBootstrapKeys(nwKeys)
136
+
137
+	return nil
138
+}
0 139
new file mode 100644
... ...
@@ -0,0 +1,93 @@
0
+package cluster
1
+
2
+import (
3
+	"fmt"
4
+	"strings"
5
+
6
+	runconfigopts "github.com/docker/docker/runconfig/opts"
7
+	"github.com/docker/engine-api/types/filters"
8
+	swarmapi "github.com/docker/swarmkit/api"
9
+)
10
+
11
+func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) {
12
+	accepted := map[string]bool{
13
+		"name":       true,
14
+		"id":         true,
15
+		"label":      true,
16
+		"role":       true,
17
+		"membership": true,
18
+	}
19
+	if err := filter.Validate(accepted); err != nil {
20
+		return nil, err
21
+	}
22
+	f := &swarmapi.ListNodesRequest_Filters{
23
+		Names:      filter.Get("name"),
24
+		IDPrefixes: filter.Get("id"),
25
+		Labels:     runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
26
+	}
27
+
28
+	for _, r := range filter.Get("role") {
29
+		if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok {
30
+			f.Roles = append(f.Roles, swarmapi.NodeRole(role))
31
+		} else if r != "" {
32
+			return nil, fmt.Errorf("Invalid role filter: '%s'", r)
33
+		}
34
+	}
35
+
36
+	for _, a := range filter.Get("membership") {
37
+		if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok {
38
+			f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership))
39
+		} else if a != "" {
40
+			return nil, fmt.Errorf("Invalid membership filter: '%s'", a)
41
+		}
42
+	}
43
+
44
+	return f, nil
45
+}
46
+
47
+func newListServicesFilters(filter filters.Args) (*swarmapi.ListServicesRequest_Filters, error) {
48
+	accepted := map[string]bool{
49
+		"name":  true,
50
+		"id":    true,
51
+		"label": true,
52
+	}
53
+	if err := filter.Validate(accepted); err != nil {
54
+		return nil, err
55
+	}
56
+	return &swarmapi.ListServicesRequest_Filters{
57
+		Names:      filter.Get("name"),
58
+		IDPrefixes: filter.Get("id"),
59
+		Labels:     runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
60
+	}, nil
61
+}
62
+
63
+func newListTasksFilters(filter filters.Args) (*swarmapi.ListTasksRequest_Filters, error) {
64
+	accepted := map[string]bool{
65
+		"name":          true,
66
+		"id":            true,
67
+		"label":         true,
68
+		"service":       true,
69
+		"node":          true,
70
+		"desired_state": true,
71
+	}
72
+	if err := filter.Validate(accepted); err != nil {
73
+		return nil, err
74
+	}
75
+	f := &swarmapi.ListTasksRequest_Filters{
76
+		Names:      filter.Get("name"),
77
+		IDPrefixes: filter.Get("id"),
78
+		Labels:     runconfigopts.ConvertKVStringsToMap(filter.Get("label")),
79
+		ServiceIDs: filter.Get("service"),
80
+		NodeIDs:    filter.Get("node"),
81
+	}
82
+
83
+	for _, s := range filter.Get("desired_state") {
84
+		if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok {
85
+			f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state))
86
+		} else if s != "" {
87
+			return nil, fmt.Errorf("Invalid desired_state filter: '%s'", s)
88
+		}
89
+	}
90
+
91
+	return f, nil
92
+}
0 93
new file mode 100644
... ...
@@ -0,0 +1,108 @@
0
+package cluster
1
+
2
+import (
3
+	"fmt"
4
+
5
+	swarmapi "github.com/docker/swarmkit/api"
6
+	"golang.org/x/net/context"
7
+)
8
+
9
+func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) {
10
+	rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{})
11
+	if err != nil {
12
+		return nil, err
13
+	}
14
+
15
+	if len(rl.Clusters) == 0 {
16
+		return nil, fmt.Errorf("swarm not found")
17
+	}
18
+
19
+	// TODO: assume one cluster only
20
+	return rl.Clusters[0], nil
21
+}
22
+
23
+func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) {
24
+	// GetNode to match via full ID.
25
+	rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input})
26
+	if err != nil {
27
+		// If any error (including NotFound), ListNodes to match via full name.
28
+		rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{Names: []string{input}}})
29
+
30
+		if err != nil || len(rl.Nodes) == 0 {
31
+			// If any error or 0 result, ListNodes to match via ID prefix.
32
+			rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{IDPrefixes: []string{input}}})
33
+		}
34
+
35
+		if err != nil {
36
+			return nil, err
37
+		}
38
+
39
+		if len(rl.Nodes) == 0 {
40
+			return nil, fmt.Errorf("node %s not found", input)
41
+		}
42
+
43
+		if l := len(rl.Nodes); l > 1 {
44
+			return nil, fmt.Errorf("node %s is ambigious (%d matches found)", input, l)
45
+		}
46
+
47
+		return rl.Nodes[0], nil
48
+	}
49
+	return rg.Node, nil
50
+}
51
+
52
+func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Service, error) {
53
+	// GetService to match via full ID.
54
+	rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input})
55
+	if err != nil {
56
+		// If any error (including NotFound), ListServices to match via full name.
57
+		rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{Names: []string{input}}})
58
+		if err != nil || len(rl.Services) == 0 {
59
+			// If any error or 0 result, ListServices to match via ID prefix.
60
+			rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{IDPrefixes: []string{input}}})
61
+		}
62
+
63
+		if err != nil {
64
+			return nil, err
65
+		}
66
+
67
+		if len(rl.Services) == 0 {
68
+			return nil, fmt.Errorf("service %s not found", input)
69
+		}
70
+
71
+		if l := len(rl.Services); l > 1 {
72
+			return nil, fmt.Errorf("service %s is ambigious (%d matches found)", input, l)
73
+		}
74
+
75
+		return rl.Services[0], nil
76
+	}
77
+	return rg.Service, nil
78
+}
79
+
80
+func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) {
81
+	// GetTask to match via full ID.
82
+	rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input})
83
+	if err != nil {
84
+		// If any error (including NotFound), ListTasks to match via full name.
85
+		rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{Names: []string{input}}})
86
+
87
+		if err != nil || len(rl.Tasks) == 0 {
88
+			// If any error or 0 result, ListTasks to match via ID prefix.
89
+			rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{IDPrefixes: []string{input}}})
90
+		}
91
+
92
+		if err != nil {
93
+			return nil, err
94
+		}
95
+
96
+		if len(rl.Tasks) == 0 {
97
+			return nil, fmt.Errorf("task %s not found", input)
98
+		}
99
+
100
+		if l := len(rl.Tasks); l > 1 {
101
+			return nil, fmt.Errorf("task %s is ambigious (%d matches found)", input, l)
102
+		}
103
+
104
+		return rl.Tasks[0], nil
105
+	}
106
+	return rg.Task, nil
107
+}
0 108
new file mode 100644
... ...
@@ -0,0 +1,36 @@
0
+package provider
1
+
2
+import "github.com/docker/engine-api/types"
3
+
4
+// NetworkCreateRequest is a request when creating a network.
5
+type NetworkCreateRequest struct {
6
+	ID string
7
+	types.NetworkCreateRequest
8
+}
9
+
10
+// NetworkCreateResponse is a response when creating a network.
11
+type NetworkCreateResponse struct {
12
+	ID string `json:"Id"`
13
+}
14
+
15
+// VirtualAddress represents a virtual adress.
16
+type VirtualAddress struct {
17
+	IPv4 string
18
+	IPv6 string
19
+}
20
+
21
+// PortConfig represents a port configuration.
22
+type PortConfig struct {
23
+	Name          string
24
+	Protocol      int32
25
+	TargetPort    uint32
26
+	PublishedPort uint32
27
+}
28
+
29
+// ServiceConfig represents a service configuration.
30
+type ServiceConfig struct {
31
+	ID               string
32
+	Name             string
33
+	VirtualAddresses map[string]*VirtualAddress
34
+	ExposedPorts     []*PortConfig
35
+}
... ...
@@ -101,7 +101,7 @@ func (daemon *Daemon) Register(c *container.Container) error {
101 101
 	return nil
102 102
 }
103 103
 
104
-func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID) (*container.Container, error) {
104
+func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID, managed bool) (*container.Container, error) {
105 105
 	var (
106 106
 		id             string
107 107
 		err            error
... ...
@@ -117,6 +117,7 @@ func (daemon *Daemon) newContainer(name string, config *containertypes.Config, i
117 117
 
118 118
 	base := daemon.newBaseContainer(id)
119 119
 	base.Created = time.Now().UTC()
120
+	base.Managed = managed
120 121
 	base.Path = entrypoint
121 122
 	base.Args = args //FIXME: de-duplicate from config
122 123
 	base.Config = config
... ...
@@ -324,6 +324,10 @@ func (daemon *Daemon) updateNetwork(container *container.Container) error {
324 324
 	return nil
325 325
 }
326 326
 
327
+func errClusterNetworkOnRun(n string) error {
328
+	return fmt.Errorf("swarm-scoped network (%s) is not compatible with `docker create` or `docker run`. This network can be only used docker service", n)
329
+}
330
+
327 331
 // updateContainerNetworkSettings update the network settings
328 332
 func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) error {
329 333
 	var (
... ...
@@ -345,6 +349,9 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
345 345
 		if err != nil {
346 346
 			return err
347 347
 		}
348
+		if !container.Managed && n.Info().Dynamic() {
349
+			return errClusterNetworkOnRun(networkName)
350
+		}
348 351
 		networkName = n.Name()
349 352
 	}
350 353
 	if container.NetworkSettings == nil {
... ...
@@ -19,8 +19,17 @@ import (
19 19
 	"github.com/opencontainers/runc/libcontainer/label"
20 20
 )
21 21
 
22
-// ContainerCreate creates a container.
22
+// CreateManagedContainer creates a container that is managed by a Service
23
+func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) {
24
+	return daemon.containerCreate(params, true)
25
+}
26
+
27
+// ContainerCreate creates a regular container
23 28
 func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types.ContainerCreateResponse, error) {
29
+	return daemon.containerCreate(params, false)
30
+}
31
+
32
+func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (types.ContainerCreateResponse, error) {
24 33
 	if params.Config == nil {
25 34
 		return types.ContainerCreateResponse{}, fmt.Errorf("Config cannot be empty in order to create a container")
26 35
 	}
... ...
@@ -43,7 +52,7 @@ func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types
43 43
 		return types.ContainerCreateResponse{Warnings: warnings}, err
44 44
 	}
45 45
 
46
-	container, err := daemon.create(params)
46
+	container, err := daemon.create(params, managed)
47 47
 	if err != nil {
48 48
 		return types.ContainerCreateResponse{Warnings: warnings}, daemon.imageNotExistToErrcode(err)
49 49
 	}
... ...
@@ -52,7 +61,7 @@ func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (types
52 52
 }
53 53
 
54 54
 // Create creates a new container from the given configuration with a given name.
55
-func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *container.Container, retErr error) {
55
+func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) {
56 56
 	var (
57 57
 		container *container.Container
58 58
 		img       *image.Image
... ...
@@ -76,7 +85,7 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig) (retC *containe
76 76
 		return nil, err
77 77
 	}
78 78
 
79
-	if container, err = daemon.newContainer(params.Name, params.Config, imgID); err != nil {
79
+	if container, err = daemon.newContainer(params.Name, params.Config, imgID, managed); err != nil {
80 80
 		return nil, err
81 81
 	}
82 82
 	defer func() {
... ...
@@ -28,6 +28,7 @@ import (
28 28
 	"github.com/docker/docker/daemon/exec"
29 29
 	"github.com/docker/engine-api/types"
30 30
 	containertypes "github.com/docker/engine-api/types/container"
31
+	"github.com/docker/libnetwork/cluster"
31 32
 	// register graph drivers
32 33
 	_ "github.com/docker/docker/daemon/graphdriver/register"
33 34
 	dmetadata "github.com/docker/docker/distribution/metadata"
... ...
@@ -94,6 +95,7 @@ type Daemon struct {
94 94
 	containerd                libcontainerd.Client
95 95
 	containerdRemote          libcontainerd.Remote
96 96
 	defaultIsolation          containertypes.Isolation // Default isolation mode on Windows
97
+	clusterProvider           cluster.Provider
97 98
 }
98 99
 
99 100
 func (daemon *Daemon) restore() error {
... ...
@@ -344,6 +346,12 @@ func (daemon *Daemon) registerLink(parent, child *container.Container, alias str
344 344
 	return nil
345 345
 }
346 346
 
347
+// SetClusterProvider sets a component for quering the current cluster state.
348
+func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
349
+	daemon.clusterProvider = clusterProvider
350
+	daemon.netController.SetClusterProvider(clusterProvider)
351
+}
352
+
347 353
 // NewDaemon sets up everything for the daemon to be able to service
348 354
 // requests from the webserver.
349 355
 func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) {
... ...
@@ -893,6 +901,10 @@ func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
893 893
 		return nil
894 894
 	}
895 895
 
896
+	if daemon.clusterProvider != nil {
897
+		return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode")
898
+	}
899
+
896 900
 	// enable discovery for the first time if it was not previously enabled
897 901
 	if daemon.discoveryWatcher == nil {
898 902
 		discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts)
... ...
@@ -23,10 +23,12 @@ func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (
23 23
 	case versions.Equal(version, "1.20"):
24 24
 		return daemon.containerInspect120(name)
25 25
 	}
26
-	return daemon.containerInspectCurrent(name, size)
26
+	return daemon.ContainerInspectCurrent(name, size)
27 27
 }
28 28
 
29
-func (daemon *Daemon) containerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) {
29
+// ContainerInspectCurrent returns low-level information about a
30
+// container in a most recent api version.
31
+func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) {
30 32
 	container, err := daemon.GetContainer(name)
31 33
 	if err != nil {
32 34
 		return nil, err
... ...
@@ -28,7 +28,7 @@ func addMountPoints(container *container.Container) []types.MountPoint {
28 28
 
29 29
 // containerInspectPre120 get containers for pre 1.20 APIs.
30 30
 func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) {
31
-	return daemon.containerInspectCurrent(name, false)
31
+	return daemon.ContainerInspectCurrent(name, false)
32 32
 }
33 33
 
34 34
 func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig {
... ...
@@ -91,6 +91,17 @@ func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.C
91 91
 	return daemon.reduceContainers(config, daemon.transformContainer)
92 92
 }
93 93
 
94
+// ListContainersForNode returns all containerID that match the specified nodeID
95
+func (daemon *Daemon) ListContainersForNode(nodeID string) []string {
96
+	var ids []string
97
+	for _, c := range daemon.List() {
98
+		if c.Config.Labels["com.docker.swarm.node.id"] == nodeID {
99
+			ids = append(ids, c.ID)
100
+		}
101
+	}
102
+	return ids
103
+}
104
+
94 105
 func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container {
95 106
 	idSearch := false
96 107
 	names := ctx.filters.Get("name")
... ...
@@ -5,13 +5,14 @@ import (
5 5
 	"net"
6 6
 	"strings"
7 7
 
8
-	netsettings "github.com/docker/docker/daemon/network"
8
+	"github.com/Sirupsen/logrus"
9
+	clustertypes "github.com/docker/docker/daemon/cluster/provider"
9 10
 	"github.com/docker/docker/errors"
10 11
 	"github.com/docker/docker/runconfig"
11 12
 	"github.com/docker/engine-api/types"
12
-	"github.com/docker/engine-api/types/filters"
13 13
 	"github.com/docker/engine-api/types/network"
14 14
 	"github.com/docker/libnetwork"
15
+	networktypes "github.com/docker/libnetwork/types"
15 16
 )
16 17
 
17 18
 // NetworkControllerEnabled checks if the networking stack is enabled.
... ...
@@ -92,9 +93,106 @@ func (daemon *Daemon) getAllNetworks() []libnetwork.Network {
92 92
 	return list
93 93
 }
94 94
 
95
+func isIngressNetwork(name string) bool {
96
+	return name == "ingress"
97
+}
98
+
99
+var ingressChan = make(chan struct{}, 1)
100
+
101
+func ingressWait() func() {
102
+	ingressChan <- struct{}{}
103
+	return func() { <-ingressChan }
104
+}
105
+
106
+// SetupIngress setups ingress networking.
107
+func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) error {
108
+	ip, _, err := net.ParseCIDR(nodeIP)
109
+	if err != nil {
110
+		return err
111
+	}
112
+
113
+	go func() {
114
+		controller := daemon.netController
115
+		controller.AgentInitWait()
116
+
117
+		if n, err := daemon.GetNetworkByName(create.Name); err == nil && n != nil && n.ID() != create.ID {
118
+			if err := controller.SandboxDestroy("ingress-sbox"); err != nil {
119
+				logrus.Errorf("Failed to delete stale ingress sandbox: %v", err)
120
+				return
121
+			}
122
+
123
+			if err := n.Delete(); err != nil {
124
+				logrus.Errorf("Failed to delete stale ingress network %s: %v", n.ID(), err)
125
+				return
126
+			}
127
+		}
128
+
129
+		if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil {
130
+			// If it is any other error other than already
131
+			// exists error log error and return.
132
+			if _, ok := err.(libnetwork.NetworkNameError); !ok {
133
+				logrus.Errorf("Failed creating ingress network: %v", err)
134
+				return
135
+			}
136
+
137
+			// Otherwise continue down the call to create or recreate sandbox.
138
+		}
139
+
140
+		n, err := daemon.GetNetworkByID(create.ID)
141
+		if err != nil {
142
+			logrus.Errorf("Failed getting ingress network by id after creating: %v", err)
143
+			return
144
+		}
145
+
146
+		sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress())
147
+		if err != nil {
148
+			logrus.Errorf("Failed creating ingress sanbox: %v", err)
149
+			return
150
+		}
151
+
152
+		ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil))
153
+		if err != nil {
154
+			logrus.Errorf("Failed creating ingress endpoint: %v", err)
155
+			return
156
+		}
157
+
158
+		if err := ep.Join(sb, nil); err != nil {
159
+			logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err)
160
+		}
161
+	}()
162
+
163
+	return nil
164
+}
165
+
166
+// SetNetworkBootstrapKeys sets the bootstrap keys.
167
+func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error {
168
+	return daemon.netController.SetKeys(keys)
169
+}
170
+
171
+// CreateManagedNetwork creates an agent network.
172
+func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error {
173
+	_, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true)
174
+	return err
175
+}
176
+
95 177
 // CreateNetwork creates a network with the given name, driver and other optional parameters
96 178
 func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) {
97
-	if runconfig.IsPreDefinedNetwork(create.Name) {
179
+	resp, err := daemon.createNetwork(create, "", false)
180
+	if err != nil {
181
+		return nil, err
182
+	}
183
+	return resp, err
184
+}
185
+
186
+func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
187
+	// If there is a pending ingress network creation wait here
188
+	// since ingress network creation can happen via node download
189
+	// from manager or task download.
190
+	if isIngressNetwork(create.Name) {
191
+		defer ingressWait()()
192
+	}
193
+
194
+	if runconfig.IsPreDefinedNetwork(create.Name) && !agent {
98 195
 		err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name)
99 196
 		return nil, errors.NewRequestForbiddenError(err)
100 197
 	}
... ...
@@ -134,7 +232,16 @@ func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.N
134 134
 	if create.Internal {
135 135
 		nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork())
136 136
 	}
137
-	n, err := c.NewNetwork(driver, create.Name, "", nwOptions...)
137
+	if agent {
138
+		nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic())
139
+		nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false))
140
+	}
141
+
142
+	if isIngressNetwork(create.Name) {
143
+		nwOptions = append(nwOptions, libnetwork.NetworkOptionIngress())
144
+	}
145
+
146
+	n, err := c.NewNetwork(driver, create.Name, id, nwOptions...)
138 147
 	if err != nil {
139 148
 		return nil, err
140 149
 	}
... ...
@@ -168,6 +275,17 @@ func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnet
168 168
 	return ipamV4Cfg, ipamV6Cfg, nil
169 169
 }
170 170
 
171
+// UpdateContainerServiceConfig updates a service configuration.
172
+func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error {
173
+	container, err := daemon.GetContainer(containerName)
174
+	if err != nil {
175
+		return err
176
+	}
177
+
178
+	container.NetworkSettings.Service = serviceConfig
179
+	return nil
180
+}
181
+
171 182
 // ConnectContainerToNetwork connects the given container to the given
172 183
 // network. If either cannot be found, an err is returned. If the
173 184
 // network cannot be set up, an err is returned.
... ...
@@ -207,18 +325,29 @@ func (daemon *Daemon) GetNetworkDriverList() map[string]bool {
207 207
 		driver := network.Type()
208 208
 		pluginList[driver] = true
209 209
 	}
210
+	// TODO : Replace this with proper libnetwork API
211
+	pluginList["overlay"] = true
210 212
 
211 213
 	return pluginList
212 214
 }
213 215
 
216
+// DeleteManagedNetwork deletes an agent network.
217
+func (daemon *Daemon) DeleteManagedNetwork(networkID string) error {
218
+	return daemon.deleteNetwork(networkID, true)
219
+}
220
+
214 221
 // DeleteNetwork destroys a network unless it's one of docker's predefined networks.
215 222
 func (daemon *Daemon) DeleteNetwork(networkID string) error {
223
+	return daemon.deleteNetwork(networkID, false)
224
+}
225
+
226
+func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error {
216 227
 	nw, err := daemon.FindNetwork(networkID)
217 228
 	if err != nil {
218 229
 		return err
219 230
 	}
220 231
 
221
-	if runconfig.IsPreDefinedNetwork(nw.Name()) {
232
+	if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic {
222 233
 		err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name())
223 234
 		return errors.NewRequestForbiddenError(err)
224 235
 	}
... ...
@@ -230,14 +359,7 @@ func (daemon *Daemon) DeleteNetwork(networkID string) error {
230 230
 	return nil
231 231
 }
232 232
 
233
-// FilterNetworks returns a list of networks filtered by the given arguments.
234
-// It returns an error if the filters are not included in the list of accepted filters.
235
-func (daemon *Daemon) FilterNetworks(netFilters filters.Args) ([]libnetwork.Network, error) {
236
-	if netFilters.Len() != 0 {
237
-		if err := netFilters.Validate(netsettings.AcceptedFilters); err != nil {
238
-			return nil, err
239
-		}
240
-	}
241
-	nwList := daemon.getAllNetworks()
242
-	return netsettings.FilterNetworks(nwList, netFilters)
233
+// GetNetworks returns a list of all networks
234
+func (daemon *Daemon) GetNetworks() []libnetwork.Network {
235
+	return daemon.getAllNetworks()
243 236
 }
... ...
@@ -1,6 +1,7 @@
1 1
 package network
2 2
 
3 3
 import (
4
+	clustertypes "github.com/docker/docker/daemon/cluster/provider"
4 5
 	networktypes "github.com/docker/engine-api/types/network"
5 6
 	"github.com/docker/go-connections/nat"
6 7
 )
... ...
@@ -14,6 +15,7 @@ type Settings struct {
14 14
 	LinkLocalIPv6Address   string
15 15
 	LinkLocalIPv6PrefixLen int
16 16
 	Networks               map[string]*networktypes.EndpointSettings
17
+	Service                *clustertypes.ServiceConfig
17 18
 	Ports                  nat.PortMap
18 19
 	SandboxKey             string
19 20
 	SecondaryIPAddresses   []networktypes.Address
... ...
@@ -1,6 +1,10 @@
1 1
 package daemon
2 2
 
3
-import "time"
3
+import (
4
+	"time"
5
+
6
+	"golang.org/x/net/context"
7
+)
4 8
 
5 9
 // ContainerWait stops processing until the given container is
6 10
 // stopped. If the container is not found, an error is returned. On a
... ...
@@ -15,3 +19,14 @@ func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, er
15 15
 
16 16
 	return container.WaitStop(timeout)
17 17
 }
18
+
19
+// ContainerWaitWithContext returns a channel where exit code is sent
20
+// when container stops. Channel can be cancelled with a context.
21
+func (daemon *Daemon) ContainerWaitWithContext(ctx context.Context, name string) (<-chan int, error) {
22
+	container, err := daemon.GetContainer(name)
23
+	if err != nil {
24
+		return nil, err
25
+	}
26
+
27
+	return container.WaitWithContext(ctx), nil
28
+}
... ...
@@ -5,6 +5,8 @@ import (
5 5
 	"net"
6 6
 	"regexp"
7 7
 	"strings"
8
+
9
+	"github.com/docker/engine-api/types/filters"
8 10
 )
9 11
 
10 12
 var (
... ...
@@ -282,3 +284,38 @@ func ValidateSysctl(val string) (string, error) {
282 282
 	}
283 283
 	return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
284 284
 }
285
+
286
+// FilterOpt is a flag type for validating filters
287
+type FilterOpt struct {
288
+	filter filters.Args
289
+}
290
+
291
+// NewFilterOpt returns a new FilterOpt
292
+func NewFilterOpt() FilterOpt {
293
+	return FilterOpt{filter: filters.NewArgs()}
294
+}
295
+
296
+func (o *FilterOpt) String() string {
297
+	repr, err := filters.ToParam(o.filter)
298
+	if err != nil {
299
+		return "invalid filters"
300
+	}
301
+	return repr
302
+}
303
+
304
+// Set sets the value of the opt by parsing the command line value
305
+func (o *FilterOpt) Set(value string) error {
306
+	var err error
307
+	o.filter, err = filters.ParseFlag(value, o.filter)
308
+	return err
309
+}
310
+
311
+// Type returns the option type
312
+func (o *FilterOpt) Type() string {
313
+	return "filter"
314
+}
315
+
316
+// Value returns the value of this option
317
+func (o *FilterOpt) Value() filters.Args {
318
+	return o.filter
319
+}
... ...
@@ -19,7 +19,7 @@ func DefaultDaemonNetworkMode() container.NetworkMode {
19 19
 // IsPreDefinedNetwork indicates if a network is predefined by the daemon
20 20
 func IsPreDefinedNetwork(network string) bool {
21 21
 	n := container.NetworkMode(network)
22
-	return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault()
22
+	return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() || network == "ingress"
23 23
 }
24 24
 
25 25
 // ValidateNetMode ensures that the various combinations of requested
... ...
@@ -140,6 +140,17 @@ func (m *MountPoint) Path() string {
140 140
 	return m.Source
141 141
 }
142 142
 
143
+// Type returns the type of mount point
144
+func (m *MountPoint) Type() string {
145
+	if m.Name != "" {
146
+		return "VOLUME"
147
+	}
148
+	if m.Source != "" {
149
+		return "BIND"
150
+	}
151
+	return "EPHEMERAL"
152
+}
153
+
143 154
 // ParseVolumesFrom ensures that the supplied volumes-from is valid.
144 155
 func ParseVolumesFrom(spec string) (string, string, error) {
145 156
 	if len(spec) == 0 {