Browse code

vendor: update containerd and swarmkit

Signed-off-by: Alexander Morozov <lk4d4@docker.com>

Alexander Morozov authored on 2016/09/14 01:28:01
Showing 57 changed files
... ...
@@ -243,7 +243,7 @@ RUN set -x \
243 243
 	&& rm -rf "$GOPATH"
244 244
 
245 245
 # Install containerd
246
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
246
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
247 247
 RUN set -x \
248 248
 	&& export GOPATH="$(mktemp -d)" \
249 249
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
... ...
@@ -186,7 +186,7 @@ RUN set -x \
186 186
 	&& rm -rf "$GOPATH"
187 187
 
188 188
 # Install containerd
189
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
189
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
190 190
 RUN set -x \
191 191
 	&& export GOPATH="$(mktemp -d)" \
192 192
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
... ...
@@ -184,7 +184,7 @@ RUN set -x \
184 184
 	&& rm -rf "$GOPATH"
185 185
 
186 186
 # Install containerd
187
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
187
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
188 188
 RUN set -x \
189 189
 	&& export GOPATH="$(mktemp -d)" \
190 190
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
... ...
@@ -204,7 +204,7 @@ RUN set -x \
204 204
 	&& rm -rf "$GOPATH"
205 205
 
206 206
 # Install containerd
207
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
207
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
208 208
 RUN set -x \
209 209
 	&& export GOPATH="$(mktemp -d)" \
210 210
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
... ...
@@ -196,7 +196,7 @@ RUN set -x \
196 196
 	&& rm -rf "$GOPATH"
197 197
 
198 198
 # Install containerd
199
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
199
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
200 200
 RUN set -x \
201 201
 	&& export GOPATH="$(mktemp -d)" \
202 202
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
... ...
@@ -68,7 +68,7 @@ RUN set -x \
68 68
 	&& rm -rf "$GOPATH"
69 69
 
70 70
 # Install containerd
71
-ENV CONTAINERD_COMMIT 4c21ad662f71af56c0e6b29c0afef72df441d1ff
71
+ENV CONTAINERD_COMMIT 2545227b0357eb55e369fa0072baef9ad91cdb69
72 72
 RUN set -x \
73 73
 	&& export GOPATH="$(mktemp -d)" \
74 74
 	&& git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \
... ...
@@ -141,10 +141,10 @@ clone git google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https
141 141
 clone git github.com/docker/docker-credential-helpers v0.3.0
142 142
 
143 143
 # containerd
144
-clone git github.com/docker/containerd 4c21ad662f71af56c0e6b29c0afef72df441d1ff
144
+clone git github.com/docker/containerd 2545227b0357eb55e369fa0072baef9ad91cdb69
145 145
 
146 146
 # cluster
147
-clone git github.com/docker/swarmkit 27fbaef4ceed648bb575969ccc9083a6e104a719
147
+clone git github.com/docker/swarmkit 191acc1bbdb13d8ea3b8059dda14a12f8c3903f2
148 148
 clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
149 149
 clone git github.com/gogo/protobuf v0.3
150 150
 clone git github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
... ...
@@ -75,7 +75,9 @@ var _ = math.Inf
75 75
 
76 76
 // This is a compile-time assertion to ensure that this generated file
77 77
 // is compatible with the proto package it is being compiled against.
78
-const _ = proto.ProtoPackageIsVersion1
78
+// A compilation error at this line likely means your copy of the
79
+// proto package needs to be updated.
80
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
79 81
 
80 82
 type GetServerVersionRequest struct {
81 83
 }
... ...
@@ -223,7 +225,7 @@ func (*Rlimit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
223 223
 type User struct {
224 224
 	Uid            uint32   `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"`
225 225
 	Gid            uint32   `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"`
226
-	AdditionalGids []uint32 `protobuf:"varint,3,rep,name=additionalGids" json:"additionalGids,omitempty"`
226
+	AdditionalGids []uint32 `protobuf:"varint,3,rep,packed,name=additionalGids" json:"additionalGids,omitempty"`
227 227
 }
228 228
 
229 229
 func (m *User) Reset()                    { *m = User{} }
... ...
@@ -385,7 +387,7 @@ type Container struct {
385 385
 	Processes  []*Process `protobuf:"bytes,3,rep,name=processes" json:"processes,omitempty"`
386 386
 	Status     string     `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"`
387 387
 	Labels     []string   `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"`
388
-	Pids       []uint32   `protobuf:"varint,6,rep,name=pids" json:"pids,omitempty"`
388
+	Pids       []uint32   `protobuf:"varint,6,rep,packed,name=pids" json:"pids,omitempty"`
389 389
 	Runtime    string     `protobuf:"bytes,7,opt,name=runtime" json:"runtime,omitempty"`
390 390
 }
391 391
 
... ...
@@ -628,7 +630,7 @@ func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []in
628 628
 
629 629
 type CpuUsage struct {
630 630
 	TotalUsage        uint64   `protobuf:"varint,1,opt,name=total_usage,json=totalUsage" json:"total_usage,omitempty"`
631
-	PercpuUsage       []uint64 `protobuf:"varint,2,rep,name=percpu_usage,json=percpuUsage" json:"percpu_usage,omitempty"`
631
+	PercpuUsage       []uint64 `protobuf:"varint,2,rep,packed,name=percpu_usage,json=percpuUsage" json:"percpu_usage,omitempty"`
632 632
 	UsageInKernelmode uint64   `protobuf:"varint,3,opt,name=usage_in_kernelmode,json=usageInKernelmode" json:"usage_in_kernelmode,omitempty"`
633 633
 	UsageInUsermode   uint64   `protobuf:"varint,4,opt,name=usage_in_usermode,json=usageInUsermode" json:"usage_in_usermode,omitempty"`
634 634
 }
... ...
@@ -978,7 +980,7 @@ var _ grpc.ClientConn
978 978
 
979 979
 // This is a compile-time assertion to ensure that this generated file
980 980
 // is compatible with the grpc package it is being compiled against.
981
-const _ = grpc.SupportPackageIsVersion2
981
+const _ = grpc.SupportPackageIsVersion3
982 982
 
983 983
 // Client API for API service
984 984
 
... ...
@@ -1432,8 +1434,11 @@ var _API_serviceDesc = grpc.ServiceDesc{
1432 1432
 			ServerStreams: true,
1433 1433
 		},
1434 1434
 	},
1435
+	Metadata: fileDescriptor0,
1435 1436
 }
1436 1437
 
1438
+func init() { proto.RegisterFile("api.proto", fileDescriptor0) }
1439
+
1437 1440
 var fileDescriptor0 = []byte{
1438 1441
 	// 2604 bytes of a gzipped FileDescriptorProto
1439 1442
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x59, 0x4b, 0x6f, 0x1c, 0x5b,
1440 1443
deleted file mode 100644
... ...
@@ -1,34 +0,0 @@
1
-package reference
2
-
3
-import (
4
-	distreference "github.com/docker/distribution/reference"
5
-)
6
-
7
-// Parse parses the given references and returns the repository and
8
-// tag (if present) from it. If there is an error during parsing, it will
9
-// return an error.
10
-func Parse(ref string) (string, string, error) {
11
-	distributionRef, err := distreference.ParseNamed(ref)
12
-	if err != nil {
13
-		return "", "", err
14
-	}
15
-
16
-	tag := GetTagFromNamedRef(distributionRef)
17
-	return distributionRef.Name(), tag, nil
18
-}
19
-
20
-// GetTagFromNamedRef returns a tag from the specified reference.
21
-// This function is necessary as long as the docker "server" api makes the distinction between repository
22
-// and tags.
23
-func GetTagFromNamedRef(ref distreference.Named) string {
24
-	var tag string
25
-	switch x := ref.(type) {
26
-	case distreference.Digested:
27
-		tag = x.Digest().String()
28
-	case distreference.NamedTagged:
29
-		tag = x.Tag()
30
-	default:
31
-		tag = "latest"
32
-	}
33
-	return tag
34
-}
... ...
@@ -15,6 +15,7 @@ import (
15 15
 const (
16 16
 	initialSessionFailureBackoff = 100 * time.Millisecond
17 17
 	maxSessionFailureBackoff     = 8 * time.Second
18
+	nodeUpdatePeriod             = 20 * time.Second
18 19
 )
19 20
 
20 21
 // Agent implements the primary node functionality for a member of a swarm
... ...
@@ -134,9 +135,18 @@ func (a *Agent) run(ctx context.Context) {
134 134
 	log.G(ctx).Debugf("(*Agent).run")
135 135
 	defer log.G(ctx).Debugf("(*Agent).run exited")
136 136
 
137
+	// get the node description
138
+	nodeDescription, err := a.nodeDescriptionWithHostname(ctx)
139
+	if err != nil {
140
+		log.G(ctx).WithError(err).WithField("agent", a.config.Executor).Errorf("agent: node description unavailable")
141
+	}
142
+	// nodeUpdateTicker is used to periodically check for updates to node description
143
+	nodeUpdateTicker := time.NewTicker(nodeUpdatePeriod)
144
+	defer nodeUpdateTicker.Stop()
145
+
137 146
 	var (
138 147
 		backoff    time.Duration
139
-		session    = newSession(ctx, a, backoff, "") // start the initial session
148
+		session    = newSession(ctx, a, backoff, "", nodeDescription) // start the initial session
140 149
 		registered = session.registered
141 150
 		ready      = a.ready // first session ready
142 151
 		sessionq   chan sessionOperation
... ...
@@ -158,9 +168,16 @@ func (a *Agent) run(ctx context.Context) {
158 158
 		select {
159 159
 		case operation := <-sessionq:
160 160
 			operation.response <- operation.fn(session)
161
-		case msg := <-session.tasks:
162
-			if err := a.worker.Assign(ctx, msg.Tasks); err != nil {
163
-				log.G(ctx).WithError(err).Error("task assignment failed")
161
+		case msg := <-session.assignments:
162
+			switch msg.Type {
163
+			case api.AssignmentsMessage_COMPLETE:
164
+				if err := a.worker.AssignTasks(ctx, msg.UpdateTasks); err != nil {
165
+					log.G(ctx).WithError(err).Error("failed to synchronize worker assignments")
166
+				}
167
+			case api.AssignmentsMessage_INCREMENTAL:
168
+				if err := a.worker.UpdateTasks(ctx, msg.UpdateTasks, msg.RemoveTasks); err != nil {
169
+					log.G(ctx).WithError(err).Error("failed to update worker assignments")
170
+				}
164 171
 			}
165 172
 		case msg := <-session.messages:
166 173
 			if err := a.handleSessionMessage(ctx, msg); err != nil {
... ...
@@ -197,10 +214,42 @@ func (a *Agent) run(ctx context.Context) {
197 197
 			log.G(ctx).Debugf("agent: rebuild session")
198 198
 
199 199
 			// select a session registration delay from backoff range.
200
-			delay := time.Duration(rand.Int63n(int64(backoff)))
201
-			session = newSession(ctx, a, delay, session.sessionID)
200
+			delay := time.Duration(0)
201
+			if backoff > 0 {
202
+				delay = time.Duration(rand.Int63n(int64(backoff)))
203
+			}
204
+			session = newSession(ctx, a, delay, session.sessionID, nodeDescription)
202 205
 			registered = session.registered
203 206
 			sessionq = a.sessionq
207
+		case <-nodeUpdateTicker.C:
208
+			// skip this case if the registration isn't finished
209
+			if registered != nil {
210
+				continue
211
+			}
212
+			// get the current node description
213
+			newNodeDescription, err := a.nodeDescriptionWithHostname(ctx)
214
+			if err != nil {
215
+				log.G(ctx).WithError(err).WithField("agent", a.config.Executor).Errorf("agent: updated node description unavailable")
216
+			}
217
+
218
+			// if newNodeDescription is nil, it will cause a panic when
219
+			// trying to create a session. Typically this can happen
220
+			// if the engine goes down
221
+			if newNodeDescription == nil {
222
+				continue
223
+			}
224
+
225
+			// if the node description has changed, update it to the new one
226
+			// and close the session. The old session will be stopped and a
227
+			// new one will be created with the updated description
228
+			if !reflect.DeepEqual(nodeDescription, newNodeDescription) {
229
+				nodeDescription = newNodeDescription
230
+				// close the session
231
+				log.G(ctx).Info("agent: found node update")
232
+				if err := session.close(); err != nil {
233
+					log.G(ctx).WithError(err).Error("agent: closing session for node update failed")
234
+				}
235
+			}
204 236
 		case <-a.stopped:
205 237
 			// TODO(stevvooe): Wait on shutdown and cleanup. May need to pump
206 238
 			// this loop a few times.
... ...
@@ -315,7 +364,8 @@ func (a *Agent) UpdateTaskStatus(ctx context.Context, taskID string, status *api
315 315
 				if err == errTaskUnknown {
316 316
 					err = nil // dispatcher no longer cares about this task.
317 317
 				} else {
318
-					log.G(ctx).WithError(err).Error("sending task status update failed")
318
+					log.G(ctx).WithError(err).Error("closing session after fatal error")
319
+					session.close()
319 320
 				}
320 321
 			} else {
321 322
 				log.G(ctx).Debug("task status reported")
... ...
@@ -337,6 +387,17 @@ func (a *Agent) UpdateTaskStatus(ctx context.Context, taskID string, status *api
337 337
 	}
338 338
 }
339 339
 
340
+// nodeDescriptionWithHostname retrieves node description, and overrides hostname if available
341
+func (a *Agent) nodeDescriptionWithHostname(ctx context.Context) (*api.NodeDescription, error) {
342
+	desc, err := a.config.Executor.Describe(ctx)
343
+
344
+	// Override hostname
345
+	if a.config.Hostname != "" && desc != nil {
346
+		desc.Hostname = a.config.Hostname
347
+	}
348
+	return desc, err
349
+}
350
+
340 351
 // nodesEqual returns true if the node states are functionaly equal, ignoring status,
341 352
 // version and other superfluous fields.
342 353
 //
... ...
@@ -29,7 +29,7 @@ type Config struct {
29 29
 	NotifyRoleChange chan<- api.NodeRole
30 30
 
31 31
 	// Credentials is credentials for grpc connection to manager.
32
-	Credentials credentials.TransportAuthenticator
32
+	Credentials credentials.TransportCredentials
33 33
 }
34 34
 
35 35
 func (c *Config) validate() error {
... ...
@@ -147,7 +147,7 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus,
147 147
 		if cs, ok := err.(ContainerStatuser); ok {
148 148
 			var err error
149 149
 			containerStatus, err = cs.ContainerStatus(ctx)
150
-			if err != nil {
150
+			if err != nil && !contextDoneError(err) {
151 151
 				log.G(ctx).WithError(err).Error("error resolving container status on fatal")
152 152
 			}
153 153
 		}
... ...
@@ -207,7 +207,7 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus,
207 207
 
208 208
 			var err error
209 209
 			containerStatus, err = cctlr.ContainerStatus(ctx)
210
-			if err != nil {
210
+			if err != nil && !contextDoneError(err) {
211 211
 				log.G(ctx).WithError(err).Error("container status unavailable")
212 212
 			}
213 213
 
... ...
@@ -297,3 +297,8 @@ func logStateChange(ctx context.Context, desired, previous, next api.TaskState)
297 297
 		log.G(ctx).WithFields(fields).Debug("state changed")
298 298
 	}
299 299
 }
300
+
301
+func contextDoneError(err error) bool {
302
+	cause := errors.Cause(err)
303
+	return cause == context.Canceled || cause == context.DeadlineExceeded
304
+}
... ...
@@ -120,7 +120,7 @@ func NewNode(c *NodeConfig) (*Node, error) {
120 120
 
121 121
 	n := &Node{
122 122
 		remotes:              newPersistentRemotes(stateFile, p...),
123
-		role:                 ca.AgentRole,
123
+		role:                 ca.WorkerRole,
124 124
 		config:               c,
125 125
 		started:              make(chan struct{}),
126 126
 		stopped:              make(chan struct{}),
... ...
@@ -194,7 +194,9 @@ func (n *Node) run(ctx context.Context) (err error) {
194 194
 		select {
195 195
 		case <-ctx.Done():
196 196
 		case resp := <-issueResponseChan:
197
-			logrus.Debugf("Requesting certificate for NodeID: %v", resp.NodeID)
197
+			log.G(log.WithModule(ctx, "tls")).WithFields(logrus.Fields{
198
+				"node.id": resp.NodeID,
199
+			}).Debugf("requesting certificate")
198 200
 			n.Lock()
199 201
 			n.nodeID = resp.NodeID
200 202
 			n.nodeMembership = resp.NodeMembership
... ...
@@ -233,7 +235,7 @@ func (n *Node) run(ctx context.Context) (err error) {
233 233
 			case apirole := <-n.roleChangeReq:
234 234
 				n.Lock()
235 235
 				lastRole := n.role
236
-				role := ca.AgentRole
236
+				role := ca.WorkerRole
237 237
 				if apirole == api.NodeRoleManager {
238 238
 					role = ca.ManagerRole
239 239
 				}
... ...
@@ -242,7 +244,7 @@ func (n *Node) run(ctx context.Context) (err error) {
242 242
 					continue
243 243
 				}
244 244
 				// switch role to agent immediately to shutdown manager early
245
-				if role == ca.AgentRole {
245
+				if role == ca.WorkerRole {
246 246
 					n.role = role
247 247
 					n.roleCond.Broadcast()
248 248
 				}
... ...
@@ -343,7 +345,7 @@ func (n *Node) Err(ctx context.Context) error {
343 343
 	}
344 344
 }
345 345
 
346
-func (n *Node) runAgent(ctx context.Context, db *bolt.DB, creds credentials.TransportAuthenticator, ready chan<- struct{}) error {
346
+func (n *Node) runAgent(ctx context.Context, db *bolt.DB, creds credentials.TransportCredentials, ready chan<- struct{}) error {
347 347
 	select {
348 348
 	case <-ctx.Done():
349 349
 	case <-n.remotes.WaitSelect(ctx):
... ...
@@ -588,7 +590,7 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
588 588
 			return err
589 589
 		}
590 590
 
591
-		remoteAddr, _ := n.remotes.Select(n.nodeID)
591
+		remoteAddr, _ := n.remotes.Select(n.NodeID())
592 592
 		m, err := manager.New(&manager.Config{
593 593
 			ForceNewCluster: n.config.ForceNewCluster,
594 594
 			ProtoAddr: map[string]string{
... ...
@@ -607,8 +609,9 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
607 607
 			return err
608 608
 		}
609 609
 		done := make(chan struct{})
610
+		var runErr error
610 611
 		go func() {
611
-			m.Run(context.Background()) // todo: store error
612
+			runErr = m.Run(context.Background())
612 613
 			close(done)
613 614
 		}()
614 615
 
... ...
@@ -624,14 +627,31 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
624 624
 			go func(ready chan struct{}) {
625 625
 				select {
626 626
 				case <-ready:
627
-					n.remotes.Observe(api.Peer{NodeID: n.nodeID, Addr: n.config.ListenRemoteAPI}, remotes.DefaultObservationWeight)
627
+					n.remotes.Observe(api.Peer{NodeID: n.NodeID(), Addr: n.config.ListenRemoteAPI}, remotes.DefaultObservationWeight)
628 628
 				case <-connCtx.Done():
629 629
 				}
630 630
 			}(ready)
631 631
 			ready = nil
632 632
 		}
633 633
 
634
-		err = n.waitRole(ctx, ca.AgentRole)
634
+		roleChanged := make(chan error)
635
+		waitCtx, waitCancel := context.WithCancel(ctx)
636
+		go func() {
637
+			err := n.waitRole(waitCtx, ca.WorkerRole)
638
+			roleChanged <- err
639
+		}()
640
+
641
+		select {
642
+		case <-done:
643
+			// Fail out if m.Run() returns error, otherwise wait for
644
+			// role change.
645
+			if runErr != nil {
646
+				err = runErr
647
+			} else {
648
+				err = <-roleChanged
649
+			}
650
+		case err = <-roleChanged:
651
+		}
635 652
 
636 653
 		n.Lock()
637 654
 		n.manager = nil
... ...
@@ -646,6 +666,7 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
646 646
 		}
647 647
 		connCancel()
648 648
 		n.setControlSocket(nil)
649
+		waitCancel()
649 650
 
650 651
 		if err != nil {
651 652
 			return err
... ...
@@ -672,17 +693,18 @@ func newPersistentRemotes(f string, peers ...api.Peer) *persistentRemotes {
672 672
 
673 673
 func (s *persistentRemotes) Observe(peer api.Peer, weight int) {
674 674
 	s.Lock()
675
+	defer s.Unlock()
675 676
 	s.Remotes.Observe(peer, weight)
676 677
 	s.c.Broadcast()
677 678
 	if err := s.save(); err != nil {
678 679
 		logrus.Errorf("error writing cluster state file: %v", err)
679
-		s.Unlock()
680 680
 		return
681 681
 	}
682
-	s.Unlock()
683 682
 	return
684 683
 }
685 684
 func (s *persistentRemotes) Remove(peers ...api.Peer) {
685
+	s.Lock()
686
+	defer s.Unlock()
686 687
 	s.Remotes.Remove(peers...)
687 688
 	if err := s.save(); err != nil {
688 689
 		logrus.Errorf("error writing cluster state file: %v", err)
... ...
@@ -2,8 +2,10 @@ package agent
2 2
 
3 3
 import (
4 4
 	"errors"
5
+	"sync"
5 6
 	"time"
6 7
 
8
+	"github.com/Sirupsen/logrus"
7 9
 	"github.com/docker/swarmkit/api"
8 10
 	"github.com/docker/swarmkit/log"
9 11
 	"github.com/docker/swarmkit/protobuf/ptypes"
... ...
@@ -31,26 +33,27 @@ type session struct {
31 31
 	conn *grpc.ClientConn
32 32
 	addr string
33 33
 
34
-	agent     *Agent
35
-	sessionID string
36
-	session   api.Dispatcher_SessionClient
37
-	errs      chan error
38
-	messages  chan *api.SessionMessage
39
-	tasks     chan *api.TasksMessage
34
+	agent       *Agent
35
+	sessionID   string
36
+	session     api.Dispatcher_SessionClient
37
+	errs        chan error
38
+	messages    chan *api.SessionMessage
39
+	assignments chan *api.AssignmentsMessage
40 40
 
41 41
 	registered chan struct{} // closed registration
42 42
 	closed     chan struct{}
43
+	closeOnce  sync.Once
43 44
 }
44 45
 
45
-func newSession(ctx context.Context, agent *Agent, delay time.Duration, sessionID string) *session {
46
+func newSession(ctx context.Context, agent *Agent, delay time.Duration, sessionID string, description *api.NodeDescription) *session {
46 47
 	s := &session{
47
-		agent:      agent,
48
-		sessionID:  sessionID,
49
-		errs:       make(chan error, 1),
50
-		messages:   make(chan *api.SessionMessage),
51
-		tasks:      make(chan *api.TasksMessage),
52
-		registered: make(chan struct{}),
53
-		closed:     make(chan struct{}),
48
+		agent:       agent,
49
+		sessionID:   sessionID,
50
+		errs:        make(chan error, 1),
51
+		messages:    make(chan *api.SessionMessage),
52
+		assignments: make(chan *api.AssignmentsMessage),
53
+		registered:  make(chan struct{}),
54
+		closed:      make(chan struct{}),
54 55
 	}
55 56
 	peer, err := agent.config.Managers.Select()
56 57
 	if err != nil {
... ...
@@ -68,14 +71,14 @@ func newSession(ctx context.Context, agent *Agent, delay time.Duration, sessionI
68 68
 	s.addr = peer.Addr
69 69
 	s.conn = cc
70 70
 
71
-	go s.run(ctx, delay)
71
+	go s.run(ctx, delay, description)
72 72
 	return s
73 73
 }
74 74
 
75
-func (s *session) run(ctx context.Context, delay time.Duration) {
75
+func (s *session) run(ctx context.Context, delay time.Duration, description *api.NodeDescription) {
76 76
 	time.Sleep(delay) // delay before registering.
77 77
 
78
-	if err := s.start(ctx); err != nil {
78
+	if err := s.start(ctx, description); err != nil {
79 79
 		select {
80 80
 		case s.errs <- err:
81 81
 		case <-s.closed:
... ...
@@ -94,24 +97,14 @@ func (s *session) run(ctx context.Context, delay time.Duration) {
94 94
 }
95 95
 
96 96
 // start begins the session and returns the first SessionMessage.
97
-func (s *session) start(ctx context.Context) error {
97
+func (s *session) start(ctx context.Context, description *api.NodeDescription) error {
98 98
 	log.G(ctx).Debugf("(*session).start")
99 99
 
100
-	description, err := s.agent.config.Executor.Describe(ctx)
101
-	if err != nil {
102
-		log.G(ctx).WithError(err).WithField("executor", s.agent.config.Executor).
103
-			Errorf("node description unavailable")
104
-		return err
105
-	}
106
-	// Override hostname
107
-	if s.agent.config.Hostname != "" {
108
-		description.Hostname = s.agent.config.Hostname
109
-	}
110
-
111 100
 	errChan := make(chan error, 1)
112 101
 	var (
113 102
 		msg    *api.SessionMessage
114 103
 		stream api.Dispatcher_SessionClient
104
+		err    error
115 105
 	)
116 106
 	// Note: we don't defer cancellation of this context, because the
117 107
 	// streaming RPC is used after this function returned. We only cancel
... ...
@@ -215,22 +208,68 @@ func (s *session) handleSessionMessage(ctx context.Context, msg *api.SessionMess
215 215
 }
216 216
 
217 217
 func (s *session) watch(ctx context.Context) error {
218
-	log.G(ctx).Debugf("(*session).watch")
219
-	client := api.NewDispatcherClient(s.conn)
220
-	watch, err := client.Tasks(ctx, &api.TasksRequest{
221
-		SessionID: s.sessionID})
222
-	if err != nil {
223
-		return err
224
-	}
218
+	log := log.G(ctx).WithFields(logrus.Fields{"method": "(*session).watch"})
219
+	log.Debugf("")
220
+	var (
221
+		resp            *api.AssignmentsMessage
222
+		assignmentWatch api.Dispatcher_AssignmentsClient
223
+		tasksWatch      api.Dispatcher_TasksClient
224
+		streamReference string
225
+		tasksFallback   bool
226
+		err             error
227
+	)
225 228
 
229
+	client := api.NewDispatcherClient(s.conn)
226 230
 	for {
227
-		resp, err := watch.Recv()
228
-		if err != nil {
229
-			return err
231
+		// If this is the first time we're running the loop, or there was a reference mismatch
232
+		// attempt to get the assignmentWatch
233
+		if assignmentWatch == nil && !tasksFallback {
234
+			assignmentWatch, err = client.Assignments(ctx, &api.AssignmentsRequest{SessionID: s.sessionID})
235
+			if err != nil {
236
+				return err
237
+			}
238
+		}
239
+		// We have an assignmentWatch, let's try to receive an AssignmentMessage
240
+		if assignmentWatch != nil {
241
+			// If we get a code = 12 desc = unknown method Assignments, try to use tasks
242
+			resp, err = assignmentWatch.Recv()
243
+			if err != nil {
244
+				if grpc.Code(err) != codes.Unimplemented {
245
+					return err
246
+				}
247
+				tasksFallback = true
248
+				assignmentWatch = nil
249
+				log.WithError(err).Infof("falling back to Tasks")
250
+			}
251
+		}
252
+
253
+		// This code is here for backwards compatibility (so that newer clients can use the
254
+		// older method Tasks)
255
+		if tasksWatch == nil && tasksFallback {
256
+			tasksWatch, err = client.Tasks(ctx, &api.TasksRequest{SessionID: s.sessionID})
257
+			if err != nil {
258
+				return err
259
+			}
260
+		}
261
+		if tasksWatch != nil {
262
+			var taskResp *api.TasksMessage
263
+			taskResp, err = tasksWatch.Recv()
264
+			if err != nil {
265
+				return err
266
+			}
267
+			resp = &api.AssignmentsMessage{Type: api.AssignmentsMessage_COMPLETE, UpdateTasks: taskResp.Tasks}
268
+		}
269
+
270
+		// If there seems to be a gap in the stream, let's break out of the inner for and
271
+		// re-sync (by calling Assignments again).
272
+		if streamReference != "" && streamReference != resp.AppliesTo {
273
+			assignmentWatch = nil
274
+		} else {
275
+			streamReference = resp.ResultsIn
230 276
 		}
231 277
 
232 278
 		select {
233
-		case s.tasks <- resp:
279
+		case s.assignments <- resp:
234 280
 		case <-s.closed:
235 281
 			return errSessionClosed
236 282
 		case <-ctx.Done():
... ...
@@ -241,7 +280,6 @@ func (s *session) watch(ctx context.Context) error {
241 241
 
242 242
 // sendTaskStatus uses the current session to send the status of a single task.
243 243
 func (s *session) sendTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error {
244
-
245 244
 	client := api.NewDispatcherClient(s.conn)
246 245
 	if _, err := client.UpdateTaskStatus(ctx, &api.UpdateTaskStatusRequest{
247 246
 		SessionID: s.sessionID,
... ...
@@ -302,15 +340,14 @@ func (s *session) sendTaskStatuses(ctx context.Context, updates ...*api.UpdateTa
302 302
 }
303 303
 
304 304
 func (s *session) close() error {
305
-	select {
306
-	case <-s.closed:
307
-		return errSessionClosed
308
-	default:
305
+	s.closeOnce.Do(func() {
309 306
 		if s.conn != nil {
310 307
 			s.agent.config.Managers.ObserveIfExists(api.Peer{Addr: s.addr}, -remotes.DefaultObservationWeight)
311 308
 			s.conn.Close()
312 309
 		}
310
+
313 311
 		close(s.closed)
314
-		return nil
315
-	}
312
+	})
313
+
314
+	return nil
316 315
 }
... ...
@@ -17,9 +17,13 @@ type Worker interface {
17 17
 	// Init prepares the worker for task assignment.
18 18
 	Init(ctx context.Context) error
19 19
 
20
-	// Assign the set of tasks to the worker. Tasks outside of this set will be
21
-	// removed.
22
-	Assign(ctx context.Context, tasks []*api.Task) error
20
+	// AssignTasks assigns a complete set of tasks to a worker. Any task not included in
21
+	// this set will be removed.
22
+	AssignTasks(ctx context.Context, tasks []*api.Task) error
23
+
24
+	// UpdateTasks updates an incremental set of tasks to the worker. Any task not included
25
+	// either in added or removed will remain untouched.
26
+	UpdateTasks(ctx context.Context, added []*api.Task, removed []string) error
23 27
 
24 28
 	// Listen to updates about tasks controlled by the worker. When first
25 29
 	// called, the reporter will receive all updates for all tasks controlled
... ...
@@ -86,14 +90,37 @@ func (w *worker) Init(ctx context.Context) error {
86 86
 	})
87 87
 }
88 88
 
89
-// Assign the set of tasks to the worker. Any tasks not previously known will
89
+// AssignTasks assigns  the set of tasks to the worker. Any tasks not previously known will
90 90
 // be started. Any tasks that are in the task set and already running will be
91 91
 // updated, if possible. Any tasks currently running on the
92 92
 // worker outside the task set will be terminated.
93
-func (w *worker) Assign(ctx context.Context, tasks []*api.Task) error {
93
+func (w *worker) AssignTasks(ctx context.Context, tasks []*api.Task) error {
94 94
 	w.mu.Lock()
95 95
 	defer w.mu.Unlock()
96 96
 
97
+	log.G(ctx).WithFields(logrus.Fields{
98
+		"len(tasks)": len(tasks),
99
+	}).Debug("(*worker).AssignTasks")
100
+
101
+	return reconcileTaskState(ctx, w, tasks, nil, true)
102
+}
103
+
104
+// UpdateTasks the set of tasks to the worker.
105
+// Tasks in the added set will be added to the worker, and tasks in the removed set
106
+// will be removed from the worker
107
+func (w *worker) UpdateTasks(ctx context.Context, added []*api.Task, removed []string) error {
108
+	w.mu.Lock()
109
+	defer w.mu.Unlock()
110
+
111
+	log.G(ctx).WithFields(logrus.Fields{
112
+		"len(added)":   len(added),
113
+		"len(removed)": len(removed),
114
+	}).Debug("(*worker).UpdateTasks")
115
+
116
+	return reconcileTaskState(ctx, w, added, removed, false)
117
+}
118
+
119
+func reconcileTaskState(ctx context.Context, w *worker, added []*api.Task, removed []string, fullSnapshot bool) error {
97 120
 	tx, err := w.db.Begin(true)
98 121
 	if err != nil {
99 122
 		log.G(ctx).WithError(err).Error("failed starting transaction against task database")
... ...
@@ -101,10 +128,9 @@ func (w *worker) Assign(ctx context.Context, tasks []*api.Task) error {
101 101
 	}
102 102
 	defer tx.Rollback()
103 103
 
104
-	log.G(ctx).WithField("len(tasks)", len(tasks)).Debug("(*worker).Assign")
105 104
 	assigned := map[string]struct{}{}
106 105
 
107
-	for _, task := range tasks {
106
+	for _, task := range added {
108 107
 		log.G(ctx).WithFields(
109 108
 			logrus.Fields{
110 109
 				"task.id":           task.ID,
... ...
@@ -135,35 +161,59 @@ func (w *worker) Assign(ctx context.Context, tasks []*api.Task) error {
135 135
 					return err
136 136
 				}
137 137
 			} else {
138
-				task.Status = *status // overwrite the stale manager status with ours.
138
+				task.Status = *status
139 139
 			}
140
-
141 140
 			w.startTask(ctx, tx, task)
142 141
 		}
143 142
 
144 143
 		assigned[task.ID] = struct{}{}
145 144
 	}
146 145
 
147
-	for id, tm := range w.taskManagers {
148
-		if _, ok := assigned[id]; ok {
149
-			continue
146
+	closeManager := func(tm *taskManager) {
147
+		// when a task is no longer assigned, we shutdown the task manager for
148
+		// it and leave cleanup to the sweeper.
149
+		if err := tm.Close(); err != nil {
150
+			log.G(ctx).WithError(err).Error("error closing task manager")
150 151
 		}
152
+	}
151 153
 
152
-		ctx := log.WithLogger(ctx, log.G(ctx).WithField("task.id", id))
153
-		if err := SetTaskAssignment(tx, id, false); err != nil {
154
+	removeTaskAssignment := func(taskID string) error {
155
+		ctx := log.WithLogger(ctx, log.G(ctx).WithField("task.id", taskID))
156
+		if err := SetTaskAssignment(tx, taskID, false); err != nil {
154 157
 			log.G(ctx).WithError(err).Error("error setting task assignment in database")
155
-			continue
156 158
 		}
159
+		return err
160
+	}
161
+
162
+	// If this was a complete set of assignments, we're going to remove all the remaining
163
+	// tasks.
164
+	if fullSnapshot {
165
+		for id, tm := range w.taskManagers {
166
+			if _, ok := assigned[id]; ok {
167
+				continue
168
+			}
157 169
 
158
-		delete(w.taskManagers, id)
170
+			err := removeTaskAssignment(id)
171
+			if err == nil {
172
+				delete(w.taskManagers, id)
173
+				go closeManager(tm)
174
+			}
175
+		}
176
+	} else {
177
+		// If this was an incremental set of assignments, we're going to remove only the tasks
178
+		// in the removed set
179
+		for _, taskID := range removed {
180
+			err := removeTaskAssignment(taskID)
181
+			if err != nil {
182
+				continue
183
+			}
159 184
 
160
-		go func(tm *taskManager) {
161
-			// when a task is no longer assigned, we shutdown the task manager for
162
-			// it and leave cleanup to the sweeper.
163
-			if err := tm.Close(); err != nil {
164
-				log.G(ctx).WithError(err).Error("error closing task manager")
185
+			tm, ok := w.taskManagers[taskID]
186
+			if ok {
187
+				delete(w.taskManagers, taskID)
188
+				go closeManager(tm)
165 189
 			}
166
-		}(tm)
190
+		}
167 191
 	}
168 192
 
169 193
 	return tx.Commit()
... ...
@@ -21,10 +21,11 @@ import (
21 21
 	grpc "google.golang.org/grpc"
22 22
 )
23 23
 
24
-import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
24
+import raftselector "github.com/docker/swarmkit/manager/raftselector"
25 25
 import codes "google.golang.org/grpc/codes"
26 26
 import metadata "google.golang.org/grpc/metadata"
27 27
 import transport "google.golang.org/grpc/transport"
28
+import time "time"
28 29
 
29 30
 import io "io"
30 31
 
... ...
@@ -285,11 +286,12 @@ func valueToGoStringCa(v interface{}, typ string) string {
285 285
 	pv := reflect.Indirect(rv).Interface()
286 286
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
287 287
 }
288
-func extensionToGoStringCa(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
288
+func extensionToGoStringCa(m github_com_gogo_protobuf_proto.Message) string {
289
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
289 290
 	if e == nil {
290 291
 		return "nil"
291 292
 	}
292
-	s := "map[int32]proto.Extension{"
293
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
293 294
 	keys := make([]int, 0, len(e))
294 295
 	for k := range e {
295 296
 		keys = append(keys, int(k))
... ...
@@ -299,7 +301,7 @@ func extensionToGoStringCa(e map[int32]github_com_gogo_protobuf_proto.Extension)
299 299
 	for _, k := range keys {
300 300
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
301 301
 	}
302
-	s += strings.Join(ss, ",") + "}"
302
+	s += strings.Join(ss, ",") + "})"
303 303
 	return s
304 304
 }
305 305
 
... ...
@@ -309,7 +311,7 @@ var _ grpc.ClientConn
309 309
 
310 310
 // This is a compile-time assertion to ensure that this generated file
311 311
 // is compatible with the grpc package it is being compiled against.
312
-const _ = grpc.SupportPackageIsVersion2
312
+const _ = grpc.SupportPackageIsVersion3
313 313
 
314 314
 // Client API for CA service
315 315
 
... ...
@@ -371,7 +373,8 @@ var _CA_serviceDesc = grpc.ServiceDesc{
371 371
 			Handler:    _CA_GetRootCACertificate_Handler,
372 372
 		},
373 373
 	},
374
-	Streams: []grpc.StreamDesc{},
374
+	Streams:  []grpc.StreamDesc{},
375
+	Metadata: fileDescriptorCa,
375 376
 }
376 377
 
377 378
 // Client API for NodeCA service
... ...
@@ -467,7 +470,8 @@ var _NodeCA_serviceDesc = grpc.ServiceDesc{
467 467
 			Handler:    _NodeCA_NodeCertificateStatus_Handler,
468 468
 		},
469 469
 	},
470
-	Streams: []grpc.StreamDesc{},
470
+	Streams:  []grpc.StreamDesc{},
471
+	Metadata: fileDescriptorCa,
471 472
 }
472 473
 
473 474
 func (m *NodeCertificateStatusRequest) Marshal() (data []byte, err error) {
... ...
@@ -668,12 +672,11 @@ func encodeVarintCa(data []byte, offset int, v uint64) int {
668 668
 
669 669
 type raftProxyCAServer struct {
670 670
 	local        CAServer
671
-	connSelector raftpicker.Interface
672
-	cluster      raftpicker.RaftCluster
671
+	connSelector raftselector.ConnProvider
673 672
 	ctxMods      []func(context.Context) (context.Context, error)
674 673
 }
675 674
 
676
-func NewRaftProxyCAServer(local CAServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) CAServer {
675
+func NewRaftProxyCAServer(local CAServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) CAServer {
677 676
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
678 677
 		s, ok := transport.StreamFromContext(ctx)
679 678
 		if !ok {
... ...
@@ -695,7 +698,6 @@ func NewRaftProxyCAServer(local CAServer, connSelector raftpicker.Interface, clu
695 695
 
696 696
 	return &raftProxyCAServer{
697 697
 		local:        local,
698
-		cluster:      cluster,
699 698
 		connSelector: connSelector,
700 699
 		ctxMods:      mods,
701 700
 	}
... ...
@@ -710,44 +712,68 @@ func (p *raftProxyCAServer) runCtxMods(ctx context.Context) (context.Context, er
710 710
 	}
711 711
 	return ctx, nil
712 712
 }
713
+func (p *raftProxyCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
714
+	ticker := time.NewTicker(500 * time.Millisecond)
715
+	defer ticker.Stop()
716
+	for {
717
+		select {
718
+		case <-ticker.C:
719
+			conn, err := p.connSelector.LeaderConn(ctx)
720
+			if err != nil {
721
+				return nil, err
722
+			}
713 723
 
714
-func (p *raftProxyCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) {
724
+			client := NewHealthClient(conn)
715 725
 
716
-	if p.cluster.IsLeader() {
717
-		return p.local.GetRootCACertificate(ctx, r)
726
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
727
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
728
+				continue
729
+			}
730
+			return conn, nil
731
+		case <-ctx.Done():
732
+			return nil, ctx.Err()
733
+		}
718 734
 	}
719
-	ctx, err := p.runCtxMods(ctx)
735
+}
736
+
737
+func (p *raftProxyCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) {
738
+
739
+	conn, err := p.connSelector.LeaderConn(ctx)
720 740
 	if err != nil {
741
+		if err == raftselector.ErrIsLeader {
742
+			return p.local.GetRootCACertificate(ctx, r)
743
+		}
721 744
 		return nil, err
722 745
 	}
723
-	conn, err := p.connSelector.Conn()
746
+	modCtx, err := p.runCtxMods(ctx)
724 747
 	if err != nil {
725 748
 		return nil, err
726 749
 	}
727 750
 
728
-	defer func() {
751
+	resp, err := NewCAClient(conn).GetRootCACertificate(modCtx, r)
752
+	if err != nil {
753
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
754
+			return resp, err
755
+		}
756
+		conn, err := p.pollNewLeaderConn(ctx)
729 757
 		if err != nil {
730
-			errStr := err.Error()
731
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
732
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
733
-				strings.Contains(errStr, "connection error") ||
734
-				grpc.Code(err) == codes.Internal {
735
-				p.connSelector.Reset()
758
+			if err == raftselector.ErrIsLeader {
759
+				return p.local.GetRootCACertificate(ctx, r)
736 760
 			}
761
+			return nil, err
737 762
 		}
738
-	}()
739
-
740
-	return NewCAClient(conn).GetRootCACertificate(ctx, r)
763
+		return NewCAClient(conn).GetRootCACertificate(modCtx, r)
764
+	}
765
+	return resp, err
741 766
 }
742 767
 
743 768
 type raftProxyNodeCAServer struct {
744 769
 	local        NodeCAServer
745
-	connSelector raftpicker.Interface
746
-	cluster      raftpicker.RaftCluster
770
+	connSelector raftselector.ConnProvider
747 771
 	ctxMods      []func(context.Context) (context.Context, error)
748 772
 }
749 773
 
750
-func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) NodeCAServer {
774
+func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) NodeCAServer {
751 775
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
752 776
 		s, ok := transport.StreamFromContext(ctx)
753 777
 		if !ok {
... ...
@@ -769,7 +795,6 @@ func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftpicker.Interf
769 769
 
770 770
 	return &raftProxyNodeCAServer{
771 771
 		local:        local,
772
-		cluster:      cluster,
773 772
 		connSelector: connSelector,
774 773
 		ctxMods:      mods,
775 774
 	}
... ...
@@ -784,63 +809,90 @@ func (p *raftProxyNodeCAServer) runCtxMods(ctx context.Context) (context.Context
784 784
 	}
785 785
 	return ctx, nil
786 786
 }
787
+func (p *raftProxyNodeCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
788
+	ticker := time.NewTicker(500 * time.Millisecond)
789
+	defer ticker.Stop()
790
+	for {
791
+		select {
792
+		case <-ticker.C:
793
+			conn, err := p.connSelector.LeaderConn(ctx)
794
+			if err != nil {
795
+				return nil, err
796
+			}
787 797
 
788
-func (p *raftProxyNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) {
798
+			client := NewHealthClient(conn)
789 799
 
790
-	if p.cluster.IsLeader() {
791
-		return p.local.IssueNodeCertificate(ctx, r)
800
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
801
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
802
+				continue
803
+			}
804
+			return conn, nil
805
+		case <-ctx.Done():
806
+			return nil, ctx.Err()
807
+		}
792 808
 	}
793
-	ctx, err := p.runCtxMods(ctx)
809
+}
810
+
811
+func (p *raftProxyNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) {
812
+
813
+	conn, err := p.connSelector.LeaderConn(ctx)
794 814
 	if err != nil {
815
+		if err == raftselector.ErrIsLeader {
816
+			return p.local.IssueNodeCertificate(ctx, r)
817
+		}
795 818
 		return nil, err
796 819
 	}
797
-	conn, err := p.connSelector.Conn()
820
+	modCtx, err := p.runCtxMods(ctx)
798 821
 	if err != nil {
799 822
 		return nil, err
800 823
 	}
801 824
 
802
-	defer func() {
825
+	resp, err := NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r)
826
+	if err != nil {
827
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
828
+			return resp, err
829
+		}
830
+		conn, err := p.pollNewLeaderConn(ctx)
803 831
 		if err != nil {
804
-			errStr := err.Error()
805
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
806
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
807
-				strings.Contains(errStr, "connection error") ||
808
-				grpc.Code(err) == codes.Internal {
809
-				p.connSelector.Reset()
832
+			if err == raftselector.ErrIsLeader {
833
+				return p.local.IssueNodeCertificate(ctx, r)
810 834
 			}
835
+			return nil, err
811 836
 		}
812
-	}()
813
-
814
-	return NewNodeCAClient(conn).IssueNodeCertificate(ctx, r)
837
+		return NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r)
838
+	}
839
+	return resp, err
815 840
 }
816 841
 
817 842
 func (p *raftProxyNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) {
818 843
 
819
-	if p.cluster.IsLeader() {
820
-		return p.local.NodeCertificateStatus(ctx, r)
821
-	}
822
-	ctx, err := p.runCtxMods(ctx)
844
+	conn, err := p.connSelector.LeaderConn(ctx)
823 845
 	if err != nil {
846
+		if err == raftselector.ErrIsLeader {
847
+			return p.local.NodeCertificateStatus(ctx, r)
848
+		}
824 849
 		return nil, err
825 850
 	}
826
-	conn, err := p.connSelector.Conn()
851
+	modCtx, err := p.runCtxMods(ctx)
827 852
 	if err != nil {
828 853
 		return nil, err
829 854
 	}
830 855
 
831
-	defer func() {
856
+	resp, err := NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r)
857
+	if err != nil {
858
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
859
+			return resp, err
860
+		}
861
+		conn, err := p.pollNewLeaderConn(ctx)
832 862
 		if err != nil {
833
-			errStr := err.Error()
834
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
835
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
836
-				strings.Contains(errStr, "connection error") ||
837
-				grpc.Code(err) == codes.Internal {
838
-				p.connSelector.Reset()
863
+			if err == raftselector.ErrIsLeader {
864
+				return p.local.NodeCertificateStatus(ctx, r)
839 865
 			}
866
+			return nil, err
840 867
 		}
841
-	}()
842
-
843
-	return NewNodeCAClient(conn).NodeCertificateStatus(ctx, r)
868
+		return NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r)
869
+	}
870
+	return resp, err
844 871
 }
845 872
 
846 873
 func (m *NodeCertificateStatusRequest) Size() (n int) {
... ...
@@ -1655,6 +1707,8 @@ var (
1655 1655
 	ErrIntOverflowCa   = fmt.Errorf("proto: integer overflow")
1656 1656
 )
1657 1657
 
1658
+func init() { proto.RegisterFile("ca.proto", fileDescriptorCa) }
1659
+
1658 1660
 var fileDescriptorCa = []byte{
1659 1661
 	// 493 bytes of a gzipped FileDescriptorProto
1660 1662
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
... ...
@@ -22,10 +22,11 @@ import (
22 22
 	grpc "google.golang.org/grpc"
23 23
 )
24 24
 
25
-import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
25
+import raftselector "github.com/docker/swarmkit/manager/raftselector"
26 26
 import codes "google.golang.org/grpc/codes"
27 27
 import metadata "google.golang.org/grpc/metadata"
28 28
 import transport "google.golang.org/grpc/transport"
29
+import time "time"
29 30
 
30 31
 import io "io"
31 32
 
... ...
@@ -1961,11 +1962,12 @@ func valueToGoStringControl(v interface{}, typ string) string {
1961 1961
 	pv := reflect.Indirect(rv).Interface()
1962 1962
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
1963 1963
 }
1964
-func extensionToGoStringControl(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
1964
+func extensionToGoStringControl(m github_com_gogo_protobuf_proto.Message) string {
1965
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
1965 1966
 	if e == nil {
1966 1967
 		return "nil"
1967 1968
 	}
1968
-	s := "map[int32]proto.Extension{"
1969
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
1969 1970
 	keys := make([]int, 0, len(e))
1970 1971
 	for k := range e {
1971 1972
 		keys = append(keys, int(k))
... ...
@@ -1975,7 +1977,7 @@ func extensionToGoStringControl(e map[int32]github_com_gogo_protobuf_proto.Exten
1975 1975
 	for _, k := range keys {
1976 1976
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
1977 1977
 	}
1978
-	s += strings.Join(ss, ",") + "}"
1978
+	s += strings.Join(ss, ",") + "})"
1979 1979
 	return s
1980 1980
 }
1981 1981
 
... ...
@@ -1985,7 +1987,7 @@ var _ grpc.ClientConn
1985 1985
 
1986 1986
 // This is a compile-time assertion to ensure that this generated file
1987 1987
 // is compatible with the grpc package it is being compiled against.
1988
-const _ = grpc.SupportPackageIsVersion2
1988
+const _ = grpc.SupportPackageIsVersion3
1989 1989
 
1990 1990
 // Client API for Control service
1991 1991
 
... ...
@@ -2641,7 +2643,8 @@ var _Control_serviceDesc = grpc.ServiceDesc{
2641 2641
 			Handler:    _Control_UpdateCluster_Handler,
2642 2642
 		},
2643 2643
 	},
2644
-	Streams: []grpc.StreamDesc{},
2644
+	Streams:  []grpc.StreamDesc{},
2645
+	Metadata: fileDescriptorControl,
2645 2646
 }
2646 2647
 
2647 2648
 func (m *GetNodeRequest) Marshal() (data []byte, err error) {
... ...
@@ -4239,12 +4242,11 @@ func encodeVarintControl(data []byte, offset int, v uint64) int {
4239 4239
 
4240 4240
 type raftProxyControlServer struct {
4241 4241
 	local        ControlServer
4242
-	connSelector raftpicker.Interface
4243
-	cluster      raftpicker.RaftCluster
4242
+	connSelector raftselector.ConnProvider
4244 4243
 	ctxMods      []func(context.Context) (context.Context, error)
4245 4244
 }
4246 4245
 
4247
-func NewRaftProxyControlServer(local ControlServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) ControlServer {
4246
+func NewRaftProxyControlServer(local ControlServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) ControlServer {
4248 4247
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
4249 4248
 		s, ok := transport.StreamFromContext(ctx)
4250 4249
 		if !ok {
... ...
@@ -4266,7 +4268,6 @@ func NewRaftProxyControlServer(local ControlServer, connSelector raftpicker.Inte
4266 4266
 
4267 4267
 	return &raftProxyControlServer{
4268 4268
 		local:        local,
4269
-		cluster:      cluster,
4270 4269
 		connSelector: connSelector,
4271 4270
 		ctxMods:      mods,
4272 4271
 	}
... ...
@@ -4281,556 +4282,617 @@ func (p *raftProxyControlServer) runCtxMods(ctx context.Context) (context.Contex
4281 4281
 	}
4282 4282
 	return ctx, nil
4283 4283
 }
4284
+func (p *raftProxyControlServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
4285
+	ticker := time.NewTicker(500 * time.Millisecond)
4286
+	defer ticker.Stop()
4287
+	for {
4288
+		select {
4289
+		case <-ticker.C:
4290
+			conn, err := p.connSelector.LeaderConn(ctx)
4291
+			if err != nil {
4292
+				return nil, err
4293
+			}
4284 4294
 
4285
-func (p *raftProxyControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) {
4295
+			client := NewHealthClient(conn)
4286 4296
 
4287
-	if p.cluster.IsLeader() {
4288
-		return p.local.GetNode(ctx, r)
4297
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
4298
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
4299
+				continue
4300
+			}
4301
+			return conn, nil
4302
+		case <-ctx.Done():
4303
+			return nil, ctx.Err()
4304
+		}
4289 4305
 	}
4290
-	ctx, err := p.runCtxMods(ctx)
4306
+}
4307
+
4308
+func (p *raftProxyControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) {
4309
+
4310
+	conn, err := p.connSelector.LeaderConn(ctx)
4291 4311
 	if err != nil {
4312
+		if err == raftselector.ErrIsLeader {
4313
+			return p.local.GetNode(ctx, r)
4314
+		}
4292 4315
 		return nil, err
4293 4316
 	}
4294
-	conn, err := p.connSelector.Conn()
4317
+	modCtx, err := p.runCtxMods(ctx)
4295 4318
 	if err != nil {
4296 4319
 		return nil, err
4297 4320
 	}
4298 4321
 
4299
-	defer func() {
4322
+	resp, err := NewControlClient(conn).GetNode(modCtx, r)
4323
+	if err != nil {
4324
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4325
+			return resp, err
4326
+		}
4327
+		conn, err := p.pollNewLeaderConn(ctx)
4300 4328
 		if err != nil {
4301
-			errStr := err.Error()
4302
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4303
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4304
-				strings.Contains(errStr, "connection error") ||
4305
-				grpc.Code(err) == codes.Internal {
4306
-				p.connSelector.Reset()
4329
+			if err == raftselector.ErrIsLeader {
4330
+				return p.local.GetNode(ctx, r)
4307 4331
 			}
4332
+			return nil, err
4308 4333
 		}
4309
-	}()
4310
-
4311
-	return NewControlClient(conn).GetNode(ctx, r)
4334
+		return NewControlClient(conn).GetNode(modCtx, r)
4335
+	}
4336
+	return resp, err
4312 4337
 }
4313 4338
 
4314 4339
 func (p *raftProxyControlServer) ListNodes(ctx context.Context, r *ListNodesRequest) (*ListNodesResponse, error) {
4315 4340
 
4316
-	if p.cluster.IsLeader() {
4317
-		return p.local.ListNodes(ctx, r)
4318
-	}
4319
-	ctx, err := p.runCtxMods(ctx)
4341
+	conn, err := p.connSelector.LeaderConn(ctx)
4320 4342
 	if err != nil {
4343
+		if err == raftselector.ErrIsLeader {
4344
+			return p.local.ListNodes(ctx, r)
4345
+		}
4321 4346
 		return nil, err
4322 4347
 	}
4323
-	conn, err := p.connSelector.Conn()
4348
+	modCtx, err := p.runCtxMods(ctx)
4324 4349
 	if err != nil {
4325 4350
 		return nil, err
4326 4351
 	}
4327 4352
 
4328
-	defer func() {
4353
+	resp, err := NewControlClient(conn).ListNodes(modCtx, r)
4354
+	if err != nil {
4355
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4356
+			return resp, err
4357
+		}
4358
+		conn, err := p.pollNewLeaderConn(ctx)
4329 4359
 		if err != nil {
4330
-			errStr := err.Error()
4331
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4332
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4333
-				strings.Contains(errStr, "connection error") ||
4334
-				grpc.Code(err) == codes.Internal {
4335
-				p.connSelector.Reset()
4360
+			if err == raftselector.ErrIsLeader {
4361
+				return p.local.ListNodes(ctx, r)
4336 4362
 			}
4363
+			return nil, err
4337 4364
 		}
4338
-	}()
4339
-
4340
-	return NewControlClient(conn).ListNodes(ctx, r)
4365
+		return NewControlClient(conn).ListNodes(modCtx, r)
4366
+	}
4367
+	return resp, err
4341 4368
 }
4342 4369
 
4343 4370
 func (p *raftProxyControlServer) UpdateNode(ctx context.Context, r *UpdateNodeRequest) (*UpdateNodeResponse, error) {
4344 4371
 
4345
-	if p.cluster.IsLeader() {
4346
-		return p.local.UpdateNode(ctx, r)
4347
-	}
4348
-	ctx, err := p.runCtxMods(ctx)
4372
+	conn, err := p.connSelector.LeaderConn(ctx)
4349 4373
 	if err != nil {
4374
+		if err == raftselector.ErrIsLeader {
4375
+			return p.local.UpdateNode(ctx, r)
4376
+		}
4350 4377
 		return nil, err
4351 4378
 	}
4352
-	conn, err := p.connSelector.Conn()
4379
+	modCtx, err := p.runCtxMods(ctx)
4353 4380
 	if err != nil {
4354 4381
 		return nil, err
4355 4382
 	}
4356 4383
 
4357
-	defer func() {
4384
+	resp, err := NewControlClient(conn).UpdateNode(modCtx, r)
4385
+	if err != nil {
4386
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4387
+			return resp, err
4388
+		}
4389
+		conn, err := p.pollNewLeaderConn(ctx)
4358 4390
 		if err != nil {
4359
-			errStr := err.Error()
4360
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4361
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4362
-				strings.Contains(errStr, "connection error") ||
4363
-				grpc.Code(err) == codes.Internal {
4364
-				p.connSelector.Reset()
4391
+			if err == raftselector.ErrIsLeader {
4392
+				return p.local.UpdateNode(ctx, r)
4365 4393
 			}
4394
+			return nil, err
4366 4395
 		}
4367
-	}()
4368
-
4369
-	return NewControlClient(conn).UpdateNode(ctx, r)
4396
+		return NewControlClient(conn).UpdateNode(modCtx, r)
4397
+	}
4398
+	return resp, err
4370 4399
 }
4371 4400
 
4372 4401
 func (p *raftProxyControlServer) RemoveNode(ctx context.Context, r *RemoveNodeRequest) (*RemoveNodeResponse, error) {
4373 4402
 
4374
-	if p.cluster.IsLeader() {
4375
-		return p.local.RemoveNode(ctx, r)
4376
-	}
4377
-	ctx, err := p.runCtxMods(ctx)
4403
+	conn, err := p.connSelector.LeaderConn(ctx)
4378 4404
 	if err != nil {
4405
+		if err == raftselector.ErrIsLeader {
4406
+			return p.local.RemoveNode(ctx, r)
4407
+		}
4379 4408
 		return nil, err
4380 4409
 	}
4381
-	conn, err := p.connSelector.Conn()
4410
+	modCtx, err := p.runCtxMods(ctx)
4382 4411
 	if err != nil {
4383 4412
 		return nil, err
4384 4413
 	}
4385 4414
 
4386
-	defer func() {
4415
+	resp, err := NewControlClient(conn).RemoveNode(modCtx, r)
4416
+	if err != nil {
4417
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4418
+			return resp, err
4419
+		}
4420
+		conn, err := p.pollNewLeaderConn(ctx)
4387 4421
 		if err != nil {
4388
-			errStr := err.Error()
4389
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4390
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4391
-				strings.Contains(errStr, "connection error") ||
4392
-				grpc.Code(err) == codes.Internal {
4393
-				p.connSelector.Reset()
4422
+			if err == raftselector.ErrIsLeader {
4423
+				return p.local.RemoveNode(ctx, r)
4394 4424
 			}
4425
+			return nil, err
4395 4426
 		}
4396
-	}()
4397
-
4398
-	return NewControlClient(conn).RemoveNode(ctx, r)
4427
+		return NewControlClient(conn).RemoveNode(modCtx, r)
4428
+	}
4429
+	return resp, err
4399 4430
 }
4400 4431
 
4401 4432
 func (p *raftProxyControlServer) GetTask(ctx context.Context, r *GetTaskRequest) (*GetTaskResponse, error) {
4402 4433
 
4403
-	if p.cluster.IsLeader() {
4404
-		return p.local.GetTask(ctx, r)
4405
-	}
4406
-	ctx, err := p.runCtxMods(ctx)
4434
+	conn, err := p.connSelector.LeaderConn(ctx)
4407 4435
 	if err != nil {
4436
+		if err == raftselector.ErrIsLeader {
4437
+			return p.local.GetTask(ctx, r)
4438
+		}
4408 4439
 		return nil, err
4409 4440
 	}
4410
-	conn, err := p.connSelector.Conn()
4441
+	modCtx, err := p.runCtxMods(ctx)
4411 4442
 	if err != nil {
4412 4443
 		return nil, err
4413 4444
 	}
4414 4445
 
4415
-	defer func() {
4446
+	resp, err := NewControlClient(conn).GetTask(modCtx, r)
4447
+	if err != nil {
4448
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4449
+			return resp, err
4450
+		}
4451
+		conn, err := p.pollNewLeaderConn(ctx)
4416 4452
 		if err != nil {
4417
-			errStr := err.Error()
4418
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4419
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4420
-				strings.Contains(errStr, "connection error") ||
4421
-				grpc.Code(err) == codes.Internal {
4422
-				p.connSelector.Reset()
4453
+			if err == raftselector.ErrIsLeader {
4454
+				return p.local.GetTask(ctx, r)
4423 4455
 			}
4456
+			return nil, err
4424 4457
 		}
4425
-	}()
4426
-
4427
-	return NewControlClient(conn).GetTask(ctx, r)
4458
+		return NewControlClient(conn).GetTask(modCtx, r)
4459
+	}
4460
+	return resp, err
4428 4461
 }
4429 4462
 
4430 4463
 func (p *raftProxyControlServer) ListTasks(ctx context.Context, r *ListTasksRequest) (*ListTasksResponse, error) {
4431 4464
 
4432
-	if p.cluster.IsLeader() {
4433
-		return p.local.ListTasks(ctx, r)
4434
-	}
4435
-	ctx, err := p.runCtxMods(ctx)
4465
+	conn, err := p.connSelector.LeaderConn(ctx)
4436 4466
 	if err != nil {
4467
+		if err == raftselector.ErrIsLeader {
4468
+			return p.local.ListTasks(ctx, r)
4469
+		}
4437 4470
 		return nil, err
4438 4471
 	}
4439
-	conn, err := p.connSelector.Conn()
4472
+	modCtx, err := p.runCtxMods(ctx)
4440 4473
 	if err != nil {
4441 4474
 		return nil, err
4442 4475
 	}
4443 4476
 
4444
-	defer func() {
4477
+	resp, err := NewControlClient(conn).ListTasks(modCtx, r)
4478
+	if err != nil {
4479
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4480
+			return resp, err
4481
+		}
4482
+		conn, err := p.pollNewLeaderConn(ctx)
4445 4483
 		if err != nil {
4446
-			errStr := err.Error()
4447
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4448
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4449
-				strings.Contains(errStr, "connection error") ||
4450
-				grpc.Code(err) == codes.Internal {
4451
-				p.connSelector.Reset()
4484
+			if err == raftselector.ErrIsLeader {
4485
+				return p.local.ListTasks(ctx, r)
4452 4486
 			}
4487
+			return nil, err
4453 4488
 		}
4454
-	}()
4455
-
4456
-	return NewControlClient(conn).ListTasks(ctx, r)
4489
+		return NewControlClient(conn).ListTasks(modCtx, r)
4490
+	}
4491
+	return resp, err
4457 4492
 }
4458 4493
 
4459 4494
 func (p *raftProxyControlServer) RemoveTask(ctx context.Context, r *RemoveTaskRequest) (*RemoveTaskResponse, error) {
4460 4495
 
4461
-	if p.cluster.IsLeader() {
4462
-		return p.local.RemoveTask(ctx, r)
4463
-	}
4464
-	ctx, err := p.runCtxMods(ctx)
4496
+	conn, err := p.connSelector.LeaderConn(ctx)
4465 4497
 	if err != nil {
4498
+		if err == raftselector.ErrIsLeader {
4499
+			return p.local.RemoveTask(ctx, r)
4500
+		}
4466 4501
 		return nil, err
4467 4502
 	}
4468
-	conn, err := p.connSelector.Conn()
4503
+	modCtx, err := p.runCtxMods(ctx)
4469 4504
 	if err != nil {
4470 4505
 		return nil, err
4471 4506
 	}
4472 4507
 
4473
-	defer func() {
4508
+	resp, err := NewControlClient(conn).RemoveTask(modCtx, r)
4509
+	if err != nil {
4510
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4511
+			return resp, err
4512
+		}
4513
+		conn, err := p.pollNewLeaderConn(ctx)
4474 4514
 		if err != nil {
4475
-			errStr := err.Error()
4476
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4477
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4478
-				strings.Contains(errStr, "connection error") ||
4479
-				grpc.Code(err) == codes.Internal {
4480
-				p.connSelector.Reset()
4515
+			if err == raftselector.ErrIsLeader {
4516
+				return p.local.RemoveTask(ctx, r)
4481 4517
 			}
4518
+			return nil, err
4482 4519
 		}
4483
-	}()
4484
-
4485
-	return NewControlClient(conn).RemoveTask(ctx, r)
4520
+		return NewControlClient(conn).RemoveTask(modCtx, r)
4521
+	}
4522
+	return resp, err
4486 4523
 }
4487 4524
 
4488 4525
 func (p *raftProxyControlServer) GetService(ctx context.Context, r *GetServiceRequest) (*GetServiceResponse, error) {
4489 4526
 
4490
-	if p.cluster.IsLeader() {
4491
-		return p.local.GetService(ctx, r)
4492
-	}
4493
-	ctx, err := p.runCtxMods(ctx)
4527
+	conn, err := p.connSelector.LeaderConn(ctx)
4494 4528
 	if err != nil {
4529
+		if err == raftselector.ErrIsLeader {
4530
+			return p.local.GetService(ctx, r)
4531
+		}
4495 4532
 		return nil, err
4496 4533
 	}
4497
-	conn, err := p.connSelector.Conn()
4534
+	modCtx, err := p.runCtxMods(ctx)
4498 4535
 	if err != nil {
4499 4536
 		return nil, err
4500 4537
 	}
4501 4538
 
4502
-	defer func() {
4539
+	resp, err := NewControlClient(conn).GetService(modCtx, r)
4540
+	if err != nil {
4541
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4542
+			return resp, err
4543
+		}
4544
+		conn, err := p.pollNewLeaderConn(ctx)
4503 4545
 		if err != nil {
4504
-			errStr := err.Error()
4505
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4506
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4507
-				strings.Contains(errStr, "connection error") ||
4508
-				grpc.Code(err) == codes.Internal {
4509
-				p.connSelector.Reset()
4546
+			if err == raftselector.ErrIsLeader {
4547
+				return p.local.GetService(ctx, r)
4510 4548
 			}
4549
+			return nil, err
4511 4550
 		}
4512
-	}()
4513
-
4514
-	return NewControlClient(conn).GetService(ctx, r)
4551
+		return NewControlClient(conn).GetService(modCtx, r)
4552
+	}
4553
+	return resp, err
4515 4554
 }
4516 4555
 
4517 4556
 func (p *raftProxyControlServer) ListServices(ctx context.Context, r *ListServicesRequest) (*ListServicesResponse, error) {
4518 4557
 
4519
-	if p.cluster.IsLeader() {
4520
-		return p.local.ListServices(ctx, r)
4521
-	}
4522
-	ctx, err := p.runCtxMods(ctx)
4558
+	conn, err := p.connSelector.LeaderConn(ctx)
4523 4559
 	if err != nil {
4560
+		if err == raftselector.ErrIsLeader {
4561
+			return p.local.ListServices(ctx, r)
4562
+		}
4524 4563
 		return nil, err
4525 4564
 	}
4526
-	conn, err := p.connSelector.Conn()
4565
+	modCtx, err := p.runCtxMods(ctx)
4527 4566
 	if err != nil {
4528 4567
 		return nil, err
4529 4568
 	}
4530 4569
 
4531
-	defer func() {
4570
+	resp, err := NewControlClient(conn).ListServices(modCtx, r)
4571
+	if err != nil {
4572
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4573
+			return resp, err
4574
+		}
4575
+		conn, err := p.pollNewLeaderConn(ctx)
4532 4576
 		if err != nil {
4533
-			errStr := err.Error()
4534
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4535
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4536
-				strings.Contains(errStr, "connection error") ||
4537
-				grpc.Code(err) == codes.Internal {
4538
-				p.connSelector.Reset()
4577
+			if err == raftselector.ErrIsLeader {
4578
+				return p.local.ListServices(ctx, r)
4539 4579
 			}
4580
+			return nil, err
4540 4581
 		}
4541
-	}()
4542
-
4543
-	return NewControlClient(conn).ListServices(ctx, r)
4582
+		return NewControlClient(conn).ListServices(modCtx, r)
4583
+	}
4584
+	return resp, err
4544 4585
 }
4545 4586
 
4546 4587
 func (p *raftProxyControlServer) CreateService(ctx context.Context, r *CreateServiceRequest) (*CreateServiceResponse, error) {
4547 4588
 
4548
-	if p.cluster.IsLeader() {
4549
-		return p.local.CreateService(ctx, r)
4550
-	}
4551
-	ctx, err := p.runCtxMods(ctx)
4589
+	conn, err := p.connSelector.LeaderConn(ctx)
4552 4590
 	if err != nil {
4591
+		if err == raftselector.ErrIsLeader {
4592
+			return p.local.CreateService(ctx, r)
4593
+		}
4553 4594
 		return nil, err
4554 4595
 	}
4555
-	conn, err := p.connSelector.Conn()
4596
+	modCtx, err := p.runCtxMods(ctx)
4556 4597
 	if err != nil {
4557 4598
 		return nil, err
4558 4599
 	}
4559 4600
 
4560
-	defer func() {
4601
+	resp, err := NewControlClient(conn).CreateService(modCtx, r)
4602
+	if err != nil {
4603
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4604
+			return resp, err
4605
+		}
4606
+		conn, err := p.pollNewLeaderConn(ctx)
4561 4607
 		if err != nil {
4562
-			errStr := err.Error()
4563
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4564
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4565
-				strings.Contains(errStr, "connection error") ||
4566
-				grpc.Code(err) == codes.Internal {
4567
-				p.connSelector.Reset()
4608
+			if err == raftselector.ErrIsLeader {
4609
+				return p.local.CreateService(ctx, r)
4568 4610
 			}
4611
+			return nil, err
4569 4612
 		}
4570
-	}()
4571
-
4572
-	return NewControlClient(conn).CreateService(ctx, r)
4613
+		return NewControlClient(conn).CreateService(modCtx, r)
4614
+	}
4615
+	return resp, err
4573 4616
 }
4574 4617
 
4575 4618
 func (p *raftProxyControlServer) UpdateService(ctx context.Context, r *UpdateServiceRequest) (*UpdateServiceResponse, error) {
4576 4619
 
4577
-	if p.cluster.IsLeader() {
4578
-		return p.local.UpdateService(ctx, r)
4579
-	}
4580
-	ctx, err := p.runCtxMods(ctx)
4620
+	conn, err := p.connSelector.LeaderConn(ctx)
4581 4621
 	if err != nil {
4622
+		if err == raftselector.ErrIsLeader {
4623
+			return p.local.UpdateService(ctx, r)
4624
+		}
4582 4625
 		return nil, err
4583 4626
 	}
4584
-	conn, err := p.connSelector.Conn()
4627
+	modCtx, err := p.runCtxMods(ctx)
4585 4628
 	if err != nil {
4586 4629
 		return nil, err
4587 4630
 	}
4588 4631
 
4589
-	defer func() {
4632
+	resp, err := NewControlClient(conn).UpdateService(modCtx, r)
4633
+	if err != nil {
4634
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4635
+			return resp, err
4636
+		}
4637
+		conn, err := p.pollNewLeaderConn(ctx)
4590 4638
 		if err != nil {
4591
-			errStr := err.Error()
4592
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4593
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4594
-				strings.Contains(errStr, "connection error") ||
4595
-				grpc.Code(err) == codes.Internal {
4596
-				p.connSelector.Reset()
4639
+			if err == raftselector.ErrIsLeader {
4640
+				return p.local.UpdateService(ctx, r)
4597 4641
 			}
4642
+			return nil, err
4598 4643
 		}
4599
-	}()
4600
-
4601
-	return NewControlClient(conn).UpdateService(ctx, r)
4644
+		return NewControlClient(conn).UpdateService(modCtx, r)
4645
+	}
4646
+	return resp, err
4602 4647
 }
4603 4648
 
4604 4649
 func (p *raftProxyControlServer) RemoveService(ctx context.Context, r *RemoveServiceRequest) (*RemoveServiceResponse, error) {
4605 4650
 
4606
-	if p.cluster.IsLeader() {
4607
-		return p.local.RemoveService(ctx, r)
4608
-	}
4609
-	ctx, err := p.runCtxMods(ctx)
4651
+	conn, err := p.connSelector.LeaderConn(ctx)
4610 4652
 	if err != nil {
4653
+		if err == raftselector.ErrIsLeader {
4654
+			return p.local.RemoveService(ctx, r)
4655
+		}
4611 4656
 		return nil, err
4612 4657
 	}
4613
-	conn, err := p.connSelector.Conn()
4658
+	modCtx, err := p.runCtxMods(ctx)
4614 4659
 	if err != nil {
4615 4660
 		return nil, err
4616 4661
 	}
4617 4662
 
4618
-	defer func() {
4663
+	resp, err := NewControlClient(conn).RemoveService(modCtx, r)
4664
+	if err != nil {
4665
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4666
+			return resp, err
4667
+		}
4668
+		conn, err := p.pollNewLeaderConn(ctx)
4619 4669
 		if err != nil {
4620
-			errStr := err.Error()
4621
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4622
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4623
-				strings.Contains(errStr, "connection error") ||
4624
-				grpc.Code(err) == codes.Internal {
4625
-				p.connSelector.Reset()
4670
+			if err == raftselector.ErrIsLeader {
4671
+				return p.local.RemoveService(ctx, r)
4626 4672
 			}
4673
+			return nil, err
4627 4674
 		}
4628
-	}()
4629
-
4630
-	return NewControlClient(conn).RemoveService(ctx, r)
4675
+		return NewControlClient(conn).RemoveService(modCtx, r)
4676
+	}
4677
+	return resp, err
4631 4678
 }
4632 4679
 
4633 4680
 func (p *raftProxyControlServer) GetNetwork(ctx context.Context, r *GetNetworkRequest) (*GetNetworkResponse, error) {
4634 4681
 
4635
-	if p.cluster.IsLeader() {
4636
-		return p.local.GetNetwork(ctx, r)
4637
-	}
4638
-	ctx, err := p.runCtxMods(ctx)
4682
+	conn, err := p.connSelector.LeaderConn(ctx)
4639 4683
 	if err != nil {
4684
+		if err == raftselector.ErrIsLeader {
4685
+			return p.local.GetNetwork(ctx, r)
4686
+		}
4640 4687
 		return nil, err
4641 4688
 	}
4642
-	conn, err := p.connSelector.Conn()
4689
+	modCtx, err := p.runCtxMods(ctx)
4643 4690
 	if err != nil {
4644 4691
 		return nil, err
4645 4692
 	}
4646 4693
 
4647
-	defer func() {
4694
+	resp, err := NewControlClient(conn).GetNetwork(modCtx, r)
4695
+	if err != nil {
4696
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4697
+			return resp, err
4698
+		}
4699
+		conn, err := p.pollNewLeaderConn(ctx)
4648 4700
 		if err != nil {
4649
-			errStr := err.Error()
4650
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4651
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4652
-				strings.Contains(errStr, "connection error") ||
4653
-				grpc.Code(err) == codes.Internal {
4654
-				p.connSelector.Reset()
4701
+			if err == raftselector.ErrIsLeader {
4702
+				return p.local.GetNetwork(ctx, r)
4655 4703
 			}
4704
+			return nil, err
4656 4705
 		}
4657
-	}()
4658
-
4659
-	return NewControlClient(conn).GetNetwork(ctx, r)
4706
+		return NewControlClient(conn).GetNetwork(modCtx, r)
4707
+	}
4708
+	return resp, err
4660 4709
 }
4661 4710
 
4662 4711
 func (p *raftProxyControlServer) ListNetworks(ctx context.Context, r *ListNetworksRequest) (*ListNetworksResponse, error) {
4663 4712
 
4664
-	if p.cluster.IsLeader() {
4665
-		return p.local.ListNetworks(ctx, r)
4666
-	}
4667
-	ctx, err := p.runCtxMods(ctx)
4713
+	conn, err := p.connSelector.LeaderConn(ctx)
4668 4714
 	if err != nil {
4715
+		if err == raftselector.ErrIsLeader {
4716
+			return p.local.ListNetworks(ctx, r)
4717
+		}
4669 4718
 		return nil, err
4670 4719
 	}
4671
-	conn, err := p.connSelector.Conn()
4720
+	modCtx, err := p.runCtxMods(ctx)
4672 4721
 	if err != nil {
4673 4722
 		return nil, err
4674 4723
 	}
4675 4724
 
4676
-	defer func() {
4725
+	resp, err := NewControlClient(conn).ListNetworks(modCtx, r)
4726
+	if err != nil {
4727
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4728
+			return resp, err
4729
+		}
4730
+		conn, err := p.pollNewLeaderConn(ctx)
4677 4731
 		if err != nil {
4678
-			errStr := err.Error()
4679
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4680
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4681
-				strings.Contains(errStr, "connection error") ||
4682
-				grpc.Code(err) == codes.Internal {
4683
-				p.connSelector.Reset()
4732
+			if err == raftselector.ErrIsLeader {
4733
+				return p.local.ListNetworks(ctx, r)
4684 4734
 			}
4735
+			return nil, err
4685 4736
 		}
4686
-	}()
4687
-
4688
-	return NewControlClient(conn).ListNetworks(ctx, r)
4737
+		return NewControlClient(conn).ListNetworks(modCtx, r)
4738
+	}
4739
+	return resp, err
4689 4740
 }
4690 4741
 
4691 4742
 func (p *raftProxyControlServer) CreateNetwork(ctx context.Context, r *CreateNetworkRequest) (*CreateNetworkResponse, error) {
4692 4743
 
4693
-	if p.cluster.IsLeader() {
4694
-		return p.local.CreateNetwork(ctx, r)
4695
-	}
4696
-	ctx, err := p.runCtxMods(ctx)
4744
+	conn, err := p.connSelector.LeaderConn(ctx)
4697 4745
 	if err != nil {
4746
+		if err == raftselector.ErrIsLeader {
4747
+			return p.local.CreateNetwork(ctx, r)
4748
+		}
4698 4749
 		return nil, err
4699 4750
 	}
4700
-	conn, err := p.connSelector.Conn()
4751
+	modCtx, err := p.runCtxMods(ctx)
4701 4752
 	if err != nil {
4702 4753
 		return nil, err
4703 4754
 	}
4704 4755
 
4705
-	defer func() {
4756
+	resp, err := NewControlClient(conn).CreateNetwork(modCtx, r)
4757
+	if err != nil {
4758
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4759
+			return resp, err
4760
+		}
4761
+		conn, err := p.pollNewLeaderConn(ctx)
4706 4762
 		if err != nil {
4707
-			errStr := err.Error()
4708
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4709
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4710
-				strings.Contains(errStr, "connection error") ||
4711
-				grpc.Code(err) == codes.Internal {
4712
-				p.connSelector.Reset()
4763
+			if err == raftselector.ErrIsLeader {
4764
+				return p.local.CreateNetwork(ctx, r)
4713 4765
 			}
4766
+			return nil, err
4714 4767
 		}
4715
-	}()
4716
-
4717
-	return NewControlClient(conn).CreateNetwork(ctx, r)
4768
+		return NewControlClient(conn).CreateNetwork(modCtx, r)
4769
+	}
4770
+	return resp, err
4718 4771
 }
4719 4772
 
4720 4773
 func (p *raftProxyControlServer) RemoveNetwork(ctx context.Context, r *RemoveNetworkRequest) (*RemoveNetworkResponse, error) {
4721 4774
 
4722
-	if p.cluster.IsLeader() {
4723
-		return p.local.RemoveNetwork(ctx, r)
4724
-	}
4725
-	ctx, err := p.runCtxMods(ctx)
4775
+	conn, err := p.connSelector.LeaderConn(ctx)
4726 4776
 	if err != nil {
4777
+		if err == raftselector.ErrIsLeader {
4778
+			return p.local.RemoveNetwork(ctx, r)
4779
+		}
4727 4780
 		return nil, err
4728 4781
 	}
4729
-	conn, err := p.connSelector.Conn()
4782
+	modCtx, err := p.runCtxMods(ctx)
4730 4783
 	if err != nil {
4731 4784
 		return nil, err
4732 4785
 	}
4733 4786
 
4734
-	defer func() {
4787
+	resp, err := NewControlClient(conn).RemoveNetwork(modCtx, r)
4788
+	if err != nil {
4789
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4790
+			return resp, err
4791
+		}
4792
+		conn, err := p.pollNewLeaderConn(ctx)
4735 4793
 		if err != nil {
4736
-			errStr := err.Error()
4737
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4738
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4739
-				strings.Contains(errStr, "connection error") ||
4740
-				grpc.Code(err) == codes.Internal {
4741
-				p.connSelector.Reset()
4794
+			if err == raftselector.ErrIsLeader {
4795
+				return p.local.RemoveNetwork(ctx, r)
4742 4796
 			}
4797
+			return nil, err
4743 4798
 		}
4744
-	}()
4745
-
4746
-	return NewControlClient(conn).RemoveNetwork(ctx, r)
4799
+		return NewControlClient(conn).RemoveNetwork(modCtx, r)
4800
+	}
4801
+	return resp, err
4747 4802
 }
4748 4803
 
4749 4804
 func (p *raftProxyControlServer) GetCluster(ctx context.Context, r *GetClusterRequest) (*GetClusterResponse, error) {
4750 4805
 
4751
-	if p.cluster.IsLeader() {
4752
-		return p.local.GetCluster(ctx, r)
4753
-	}
4754
-	ctx, err := p.runCtxMods(ctx)
4806
+	conn, err := p.connSelector.LeaderConn(ctx)
4755 4807
 	if err != nil {
4808
+		if err == raftselector.ErrIsLeader {
4809
+			return p.local.GetCluster(ctx, r)
4810
+		}
4756 4811
 		return nil, err
4757 4812
 	}
4758
-	conn, err := p.connSelector.Conn()
4813
+	modCtx, err := p.runCtxMods(ctx)
4759 4814
 	if err != nil {
4760 4815
 		return nil, err
4761 4816
 	}
4762 4817
 
4763
-	defer func() {
4818
+	resp, err := NewControlClient(conn).GetCluster(modCtx, r)
4819
+	if err != nil {
4820
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4821
+			return resp, err
4822
+		}
4823
+		conn, err := p.pollNewLeaderConn(ctx)
4764 4824
 		if err != nil {
4765
-			errStr := err.Error()
4766
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4767
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4768
-				strings.Contains(errStr, "connection error") ||
4769
-				grpc.Code(err) == codes.Internal {
4770
-				p.connSelector.Reset()
4825
+			if err == raftselector.ErrIsLeader {
4826
+				return p.local.GetCluster(ctx, r)
4771 4827
 			}
4828
+			return nil, err
4772 4829
 		}
4773
-	}()
4774
-
4775
-	return NewControlClient(conn).GetCluster(ctx, r)
4830
+		return NewControlClient(conn).GetCluster(modCtx, r)
4831
+	}
4832
+	return resp, err
4776 4833
 }
4777 4834
 
4778 4835
 func (p *raftProxyControlServer) ListClusters(ctx context.Context, r *ListClustersRequest) (*ListClustersResponse, error) {
4779 4836
 
4780
-	if p.cluster.IsLeader() {
4781
-		return p.local.ListClusters(ctx, r)
4782
-	}
4783
-	ctx, err := p.runCtxMods(ctx)
4837
+	conn, err := p.connSelector.LeaderConn(ctx)
4784 4838
 	if err != nil {
4839
+		if err == raftselector.ErrIsLeader {
4840
+			return p.local.ListClusters(ctx, r)
4841
+		}
4785 4842
 		return nil, err
4786 4843
 	}
4787
-	conn, err := p.connSelector.Conn()
4844
+	modCtx, err := p.runCtxMods(ctx)
4788 4845
 	if err != nil {
4789 4846
 		return nil, err
4790 4847
 	}
4791 4848
 
4792
-	defer func() {
4849
+	resp, err := NewControlClient(conn).ListClusters(modCtx, r)
4850
+	if err != nil {
4851
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4852
+			return resp, err
4853
+		}
4854
+		conn, err := p.pollNewLeaderConn(ctx)
4793 4855
 		if err != nil {
4794
-			errStr := err.Error()
4795
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4796
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4797
-				strings.Contains(errStr, "connection error") ||
4798
-				grpc.Code(err) == codes.Internal {
4799
-				p.connSelector.Reset()
4856
+			if err == raftselector.ErrIsLeader {
4857
+				return p.local.ListClusters(ctx, r)
4800 4858
 			}
4859
+			return nil, err
4801 4860
 		}
4802
-	}()
4803
-
4804
-	return NewControlClient(conn).ListClusters(ctx, r)
4861
+		return NewControlClient(conn).ListClusters(modCtx, r)
4862
+	}
4863
+	return resp, err
4805 4864
 }
4806 4865
 
4807 4866
 func (p *raftProxyControlServer) UpdateCluster(ctx context.Context, r *UpdateClusterRequest) (*UpdateClusterResponse, error) {
4808 4867
 
4809
-	if p.cluster.IsLeader() {
4810
-		return p.local.UpdateCluster(ctx, r)
4811
-	}
4812
-	ctx, err := p.runCtxMods(ctx)
4868
+	conn, err := p.connSelector.LeaderConn(ctx)
4813 4869
 	if err != nil {
4870
+		if err == raftselector.ErrIsLeader {
4871
+			return p.local.UpdateCluster(ctx, r)
4872
+		}
4814 4873
 		return nil, err
4815 4874
 	}
4816
-	conn, err := p.connSelector.Conn()
4875
+	modCtx, err := p.runCtxMods(ctx)
4817 4876
 	if err != nil {
4818 4877
 		return nil, err
4819 4878
 	}
4820 4879
 
4821
-	defer func() {
4880
+	resp, err := NewControlClient(conn).UpdateCluster(modCtx, r)
4881
+	if err != nil {
4882
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
4883
+			return resp, err
4884
+		}
4885
+		conn, err := p.pollNewLeaderConn(ctx)
4822 4886
 		if err != nil {
4823
-			errStr := err.Error()
4824
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
4825
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
4826
-				strings.Contains(errStr, "connection error") ||
4827
-				grpc.Code(err) == codes.Internal {
4828
-				p.connSelector.Reset()
4887
+			if err == raftselector.ErrIsLeader {
4888
+				return p.local.UpdateCluster(ctx, r)
4829 4889
 			}
4890
+			return nil, err
4830 4891
 		}
4831
-	}()
4832
-
4833
-	return NewControlClient(conn).UpdateCluster(ctx, r)
4892
+		return NewControlClient(conn).UpdateCluster(modCtx, r)
4893
+	}
4894
+	return resp, err
4834 4895
 }
4835 4896
 
4836 4897
 func (m *GetNodeRequest) Size() (n int) {
... ...
@@ -6379,50 +6441,55 @@ func (m *ListNodesRequest_Filters) Unmarshal(data []byte) error {
6379 6379
 			}
6380 6380
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
6381 6381
 			iNdEx = postStringIndexmapkey
6382
-			var valuekey uint64
6383
-			for shift := uint(0); ; shift += 7 {
6384
-				if shift >= 64 {
6385
-					return ErrIntOverflowControl
6386
-				}
6387
-				if iNdEx >= l {
6388
-					return io.ErrUnexpectedEOF
6382
+			if m.Labels == nil {
6383
+				m.Labels = make(map[string]string)
6384
+			}
6385
+			if iNdEx < postIndex {
6386
+				var valuekey uint64
6387
+				for shift := uint(0); ; shift += 7 {
6388
+					if shift >= 64 {
6389
+						return ErrIntOverflowControl
6390
+					}
6391
+					if iNdEx >= l {
6392
+						return io.ErrUnexpectedEOF
6393
+					}
6394
+					b := data[iNdEx]
6395
+					iNdEx++
6396
+					valuekey |= (uint64(b) & 0x7F) << shift
6397
+					if b < 0x80 {
6398
+						break
6399
+					}
6389 6400
 				}
6390
-				b := data[iNdEx]
6391
-				iNdEx++
6392
-				valuekey |= (uint64(b) & 0x7F) << shift
6393
-				if b < 0x80 {
6394
-					break
6401
+				var stringLenmapvalue uint64
6402
+				for shift := uint(0); ; shift += 7 {
6403
+					if shift >= 64 {
6404
+						return ErrIntOverflowControl
6405
+					}
6406
+					if iNdEx >= l {
6407
+						return io.ErrUnexpectedEOF
6408
+					}
6409
+					b := data[iNdEx]
6410
+					iNdEx++
6411
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
6412
+					if b < 0x80 {
6413
+						break
6414
+					}
6395 6415
 				}
6396
-			}
6397
-			var stringLenmapvalue uint64
6398
-			for shift := uint(0); ; shift += 7 {
6399
-				if shift >= 64 {
6400
-					return ErrIntOverflowControl
6416
+				intStringLenmapvalue := int(stringLenmapvalue)
6417
+				if intStringLenmapvalue < 0 {
6418
+					return ErrInvalidLengthControl
6401 6419
 				}
6402
-				if iNdEx >= l {
6420
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
6421
+				if postStringIndexmapvalue > l {
6403 6422
 					return io.ErrUnexpectedEOF
6404 6423
 				}
6405
-				b := data[iNdEx]
6406
-				iNdEx++
6407
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
6408
-				if b < 0x80 {
6409
-					break
6410
-				}
6411
-			}
6412
-			intStringLenmapvalue := int(stringLenmapvalue)
6413
-			if intStringLenmapvalue < 0 {
6414
-				return ErrInvalidLengthControl
6415
-			}
6416
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
6417
-			if postStringIndexmapvalue > l {
6418
-				return io.ErrUnexpectedEOF
6424
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
6425
+				iNdEx = postStringIndexmapvalue
6426
+				m.Labels[mapkey] = mapvalue
6427
+			} else {
6428
+				var mapvalue string
6429
+				m.Labels[mapkey] = mapvalue
6419 6430
 			}
6420
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
6421
-			iNdEx = postStringIndexmapvalue
6422
-			if m.Labels == nil {
6423
-				m.Labels = make(map[string]string)
6424
-			}
6425
-			m.Labels[mapkey] = mapvalue
6426 6431
 			iNdEx = postIndex
6427 6432
 		case 4:
6428 6433
 			if wireType != 0 {
... ...
@@ -7499,50 +7566,55 @@ func (m *ListTasksRequest_Filters) Unmarshal(data []byte) error {
7499 7499
 			}
7500 7500
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
7501 7501
 			iNdEx = postStringIndexmapkey
7502
-			var valuekey uint64
7503
-			for shift := uint(0); ; shift += 7 {
7504
-				if shift >= 64 {
7505
-					return ErrIntOverflowControl
7506
-				}
7507
-				if iNdEx >= l {
7508
-					return io.ErrUnexpectedEOF
7502
+			if m.Labels == nil {
7503
+				m.Labels = make(map[string]string)
7504
+			}
7505
+			if iNdEx < postIndex {
7506
+				var valuekey uint64
7507
+				for shift := uint(0); ; shift += 7 {
7508
+					if shift >= 64 {
7509
+						return ErrIntOverflowControl
7510
+					}
7511
+					if iNdEx >= l {
7512
+						return io.ErrUnexpectedEOF
7513
+					}
7514
+					b := data[iNdEx]
7515
+					iNdEx++
7516
+					valuekey |= (uint64(b) & 0x7F) << shift
7517
+					if b < 0x80 {
7518
+						break
7519
+					}
7509 7520
 				}
7510
-				b := data[iNdEx]
7511
-				iNdEx++
7512
-				valuekey |= (uint64(b) & 0x7F) << shift
7513
-				if b < 0x80 {
7514
-					break
7521
+				var stringLenmapvalue uint64
7522
+				for shift := uint(0); ; shift += 7 {
7523
+					if shift >= 64 {
7524
+						return ErrIntOverflowControl
7525
+					}
7526
+					if iNdEx >= l {
7527
+						return io.ErrUnexpectedEOF
7528
+					}
7529
+					b := data[iNdEx]
7530
+					iNdEx++
7531
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
7532
+					if b < 0x80 {
7533
+						break
7534
+					}
7515 7535
 				}
7516
-			}
7517
-			var stringLenmapvalue uint64
7518
-			for shift := uint(0); ; shift += 7 {
7519
-				if shift >= 64 {
7520
-					return ErrIntOverflowControl
7536
+				intStringLenmapvalue := int(stringLenmapvalue)
7537
+				if intStringLenmapvalue < 0 {
7538
+					return ErrInvalidLengthControl
7521 7539
 				}
7522
-				if iNdEx >= l {
7540
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
7541
+				if postStringIndexmapvalue > l {
7523 7542
 					return io.ErrUnexpectedEOF
7524 7543
 				}
7525
-				b := data[iNdEx]
7526
-				iNdEx++
7527
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
7528
-				if b < 0x80 {
7529
-					break
7530
-				}
7531
-			}
7532
-			intStringLenmapvalue := int(stringLenmapvalue)
7533
-			if intStringLenmapvalue < 0 {
7534
-				return ErrInvalidLengthControl
7535
-			}
7536
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
7537
-			if postStringIndexmapvalue > l {
7538
-				return io.ErrUnexpectedEOF
7539
-			}
7540
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
7541
-			iNdEx = postStringIndexmapvalue
7542
-			if m.Labels == nil {
7543
-				m.Labels = make(map[string]string)
7544
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
7545
+				iNdEx = postStringIndexmapvalue
7546
+				m.Labels[mapkey] = mapvalue
7547
+			} else {
7548
+				var mapvalue string
7549
+				m.Labels[mapkey] = mapvalue
7544 7550
 			}
7545
-			m.Labels[mapkey] = mapvalue
7546 7551
 			iNdEx = postIndex
7547 7552
 		case 4:
7548 7553
 			if wireType != 2 {
... ...
@@ -8674,50 +8746,55 @@ func (m *ListServicesRequest_Filters) Unmarshal(data []byte) error {
8674 8674
 			}
8675 8675
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
8676 8676
 			iNdEx = postStringIndexmapkey
8677
-			var valuekey uint64
8678
-			for shift := uint(0); ; shift += 7 {
8679
-				if shift >= 64 {
8680
-					return ErrIntOverflowControl
8681
-				}
8682
-				if iNdEx >= l {
8683
-					return io.ErrUnexpectedEOF
8677
+			if m.Labels == nil {
8678
+				m.Labels = make(map[string]string)
8679
+			}
8680
+			if iNdEx < postIndex {
8681
+				var valuekey uint64
8682
+				for shift := uint(0); ; shift += 7 {
8683
+					if shift >= 64 {
8684
+						return ErrIntOverflowControl
8685
+					}
8686
+					if iNdEx >= l {
8687
+						return io.ErrUnexpectedEOF
8688
+					}
8689
+					b := data[iNdEx]
8690
+					iNdEx++
8691
+					valuekey |= (uint64(b) & 0x7F) << shift
8692
+					if b < 0x80 {
8693
+						break
8694
+					}
8684 8695
 				}
8685
-				b := data[iNdEx]
8686
-				iNdEx++
8687
-				valuekey |= (uint64(b) & 0x7F) << shift
8688
-				if b < 0x80 {
8689
-					break
8696
+				var stringLenmapvalue uint64
8697
+				for shift := uint(0); ; shift += 7 {
8698
+					if shift >= 64 {
8699
+						return ErrIntOverflowControl
8700
+					}
8701
+					if iNdEx >= l {
8702
+						return io.ErrUnexpectedEOF
8703
+					}
8704
+					b := data[iNdEx]
8705
+					iNdEx++
8706
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
8707
+					if b < 0x80 {
8708
+						break
8709
+					}
8690 8710
 				}
8691
-			}
8692
-			var stringLenmapvalue uint64
8693
-			for shift := uint(0); ; shift += 7 {
8694
-				if shift >= 64 {
8695
-					return ErrIntOverflowControl
8711
+				intStringLenmapvalue := int(stringLenmapvalue)
8712
+				if intStringLenmapvalue < 0 {
8713
+					return ErrInvalidLengthControl
8696 8714
 				}
8697
-				if iNdEx >= l {
8715
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
8716
+				if postStringIndexmapvalue > l {
8698 8717
 					return io.ErrUnexpectedEOF
8699 8718
 				}
8700
-				b := data[iNdEx]
8701
-				iNdEx++
8702
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
8703
-				if b < 0x80 {
8704
-					break
8705
-				}
8706
-			}
8707
-			intStringLenmapvalue := int(stringLenmapvalue)
8708
-			if intStringLenmapvalue < 0 {
8709
-				return ErrInvalidLengthControl
8710
-			}
8711
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
8712
-			if postStringIndexmapvalue > l {
8713
-				return io.ErrUnexpectedEOF
8719
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
8720
+				iNdEx = postStringIndexmapvalue
8721
+				m.Labels[mapkey] = mapvalue
8722
+			} else {
8723
+				var mapvalue string
8724
+				m.Labels[mapkey] = mapvalue
8714 8725
 			}
8715
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
8716
-			iNdEx = postStringIndexmapvalue
8717
-			if m.Labels == nil {
8718
-				m.Labels = make(map[string]string)
8719
-			}
8720
-			m.Labels[mapkey] = mapvalue
8721 8726
 			iNdEx = postIndex
8722 8727
 		case 4:
8723 8728
 			if wireType != 2 {
... ...
@@ -9601,50 +9678,55 @@ func (m *ListNetworksRequest_Filters) Unmarshal(data []byte) error {
9601 9601
 			}
9602 9602
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
9603 9603
 			iNdEx = postStringIndexmapkey
9604
-			var valuekey uint64
9605
-			for shift := uint(0); ; shift += 7 {
9606
-				if shift >= 64 {
9607
-					return ErrIntOverflowControl
9608
-				}
9609
-				if iNdEx >= l {
9610
-					return io.ErrUnexpectedEOF
9604
+			if m.Labels == nil {
9605
+				m.Labels = make(map[string]string)
9606
+			}
9607
+			if iNdEx < postIndex {
9608
+				var valuekey uint64
9609
+				for shift := uint(0); ; shift += 7 {
9610
+					if shift >= 64 {
9611
+						return ErrIntOverflowControl
9612
+					}
9613
+					if iNdEx >= l {
9614
+						return io.ErrUnexpectedEOF
9615
+					}
9616
+					b := data[iNdEx]
9617
+					iNdEx++
9618
+					valuekey |= (uint64(b) & 0x7F) << shift
9619
+					if b < 0x80 {
9620
+						break
9621
+					}
9611 9622
 				}
9612
-				b := data[iNdEx]
9613
-				iNdEx++
9614
-				valuekey |= (uint64(b) & 0x7F) << shift
9615
-				if b < 0x80 {
9616
-					break
9623
+				var stringLenmapvalue uint64
9624
+				for shift := uint(0); ; shift += 7 {
9625
+					if shift >= 64 {
9626
+						return ErrIntOverflowControl
9627
+					}
9628
+					if iNdEx >= l {
9629
+						return io.ErrUnexpectedEOF
9630
+					}
9631
+					b := data[iNdEx]
9632
+					iNdEx++
9633
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
9634
+					if b < 0x80 {
9635
+						break
9636
+					}
9617 9637
 				}
9618
-			}
9619
-			var stringLenmapvalue uint64
9620
-			for shift := uint(0); ; shift += 7 {
9621
-				if shift >= 64 {
9622
-					return ErrIntOverflowControl
9638
+				intStringLenmapvalue := int(stringLenmapvalue)
9639
+				if intStringLenmapvalue < 0 {
9640
+					return ErrInvalidLengthControl
9623 9641
 				}
9624
-				if iNdEx >= l {
9642
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
9643
+				if postStringIndexmapvalue > l {
9625 9644
 					return io.ErrUnexpectedEOF
9626 9645
 				}
9627
-				b := data[iNdEx]
9628
-				iNdEx++
9629
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
9630
-				if b < 0x80 {
9631
-					break
9632
-				}
9646
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
9647
+				iNdEx = postStringIndexmapvalue
9648
+				m.Labels[mapkey] = mapvalue
9649
+			} else {
9650
+				var mapvalue string
9651
+				m.Labels[mapkey] = mapvalue
9633 9652
 			}
9634
-			intStringLenmapvalue := int(stringLenmapvalue)
9635
-			if intStringLenmapvalue < 0 {
9636
-				return ErrInvalidLengthControl
9637
-			}
9638
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
9639
-			if postStringIndexmapvalue > l {
9640
-				return io.ErrUnexpectedEOF
9641
-			}
9642
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
9643
-			iNdEx = postStringIndexmapvalue
9644
-			if m.Labels == nil {
9645
-				m.Labels = make(map[string]string)
9646
-			}
9647
-			m.Labels[mapkey] = mapvalue
9648 9653
 			iNdEx = postIndex
9649 9654
 		case 4:
9650 9655
 			if wireType != 2 {
... ...
@@ -10175,50 +10257,55 @@ func (m *ListClustersRequest_Filters) Unmarshal(data []byte) error {
10175 10175
 			}
10176 10176
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
10177 10177
 			iNdEx = postStringIndexmapkey
10178
-			var valuekey uint64
10179
-			for shift := uint(0); ; shift += 7 {
10180
-				if shift >= 64 {
10181
-					return ErrIntOverflowControl
10182
-				}
10183
-				if iNdEx >= l {
10184
-					return io.ErrUnexpectedEOF
10178
+			if m.Labels == nil {
10179
+				m.Labels = make(map[string]string)
10180
+			}
10181
+			if iNdEx < postIndex {
10182
+				var valuekey uint64
10183
+				for shift := uint(0); ; shift += 7 {
10184
+					if shift >= 64 {
10185
+						return ErrIntOverflowControl
10186
+					}
10187
+					if iNdEx >= l {
10188
+						return io.ErrUnexpectedEOF
10189
+					}
10190
+					b := data[iNdEx]
10191
+					iNdEx++
10192
+					valuekey |= (uint64(b) & 0x7F) << shift
10193
+					if b < 0x80 {
10194
+						break
10195
+					}
10185 10196
 				}
10186
-				b := data[iNdEx]
10187
-				iNdEx++
10188
-				valuekey |= (uint64(b) & 0x7F) << shift
10189
-				if b < 0x80 {
10190
-					break
10197
+				var stringLenmapvalue uint64
10198
+				for shift := uint(0); ; shift += 7 {
10199
+					if shift >= 64 {
10200
+						return ErrIntOverflowControl
10201
+					}
10202
+					if iNdEx >= l {
10203
+						return io.ErrUnexpectedEOF
10204
+					}
10205
+					b := data[iNdEx]
10206
+					iNdEx++
10207
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
10208
+					if b < 0x80 {
10209
+						break
10210
+					}
10191 10211
 				}
10192
-			}
10193
-			var stringLenmapvalue uint64
10194
-			for shift := uint(0); ; shift += 7 {
10195
-				if shift >= 64 {
10196
-					return ErrIntOverflowControl
10212
+				intStringLenmapvalue := int(stringLenmapvalue)
10213
+				if intStringLenmapvalue < 0 {
10214
+					return ErrInvalidLengthControl
10197 10215
 				}
10198
-				if iNdEx >= l {
10216
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
10217
+				if postStringIndexmapvalue > l {
10199 10218
 					return io.ErrUnexpectedEOF
10200 10219
 				}
10201
-				b := data[iNdEx]
10202
-				iNdEx++
10203
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
10204
-				if b < 0x80 {
10205
-					break
10206
-				}
10220
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
10221
+				iNdEx = postStringIndexmapvalue
10222
+				m.Labels[mapkey] = mapvalue
10223
+			} else {
10224
+				var mapvalue string
10225
+				m.Labels[mapkey] = mapvalue
10207 10226
 			}
10208
-			intStringLenmapvalue := int(stringLenmapvalue)
10209
-			if intStringLenmapvalue < 0 {
10210
-				return ErrInvalidLengthControl
10211
-			}
10212
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
10213
-			if postStringIndexmapvalue > l {
10214
-				return io.ErrUnexpectedEOF
10215
-			}
10216
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
10217
-			iNdEx = postStringIndexmapvalue
10218
-			if m.Labels == nil {
10219
-				m.Labels = make(map[string]string)
10220
-			}
10221
-			m.Labels[mapkey] = mapvalue
10222 10227
 			iNdEx = postIndex
10223 10228
 		case 4:
10224 10229
 			if wireType != 2 {
... ...
@@ -10804,6 +10891,8 @@ var (
10804 10804
 	ErrIntOverflowControl   = fmt.Errorf("proto: integer overflow")
10805 10805
 )
10806 10806
 
10807
+func init() { proto.RegisterFile("control.proto", fileDescriptorControl) }
10808
+
10807 10809
 var fileDescriptorControl = []byte{
10808 10810
 	// 1521 bytes of a gzipped FileDescriptorProto
10809 10811
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0x4f, 0x6f, 0x1b, 0x45,
... ...
@@ -22,10 +22,11 @@ import (
22 22
 	grpc "google.golang.org/grpc"
23 23
 )
24 24
 
25
-import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
25
+import raftselector "github.com/docker/swarmkit/manager/raftselector"
26 26
 import codes "google.golang.org/grpc/codes"
27 27
 import metadata "google.golang.org/grpc/metadata"
28 28
 import transport "google.golang.org/grpc/transport"
29
+import time "time"
29 30
 
30 31
 import io "io"
31 32
 
... ...
@@ -34,6 +35,31 @@ var _ = proto.Marshal
34 34
 var _ = fmt.Errorf
35 35
 var _ = math.Inf
36 36
 
37
+// AssignmentType specifies whether this assignment message carries
38
+// the full state, or is an update to an existing state.
39
+type AssignmentsMessage_Type int32
40
+
41
+const (
42
+	AssignmentsMessage_COMPLETE    AssignmentsMessage_Type = 0
43
+	AssignmentsMessage_INCREMENTAL AssignmentsMessage_Type = 1
44
+)
45
+
46
+var AssignmentsMessage_Type_name = map[int32]string{
47
+	0: "COMPLETE",
48
+	1: "INCREMENTAL",
49
+}
50
+var AssignmentsMessage_Type_value = map[string]int32{
51
+	"COMPLETE":    0,
52
+	"INCREMENTAL": 1,
53
+}
54
+
55
+func (x AssignmentsMessage_Type) String() string {
56
+	return proto.EnumName(AssignmentsMessage_Type_name, int32(x))
57
+}
58
+func (AssignmentsMessage_Type) EnumDescriptor() ([]byte, []int) {
59
+	return fileDescriptorDispatcher, []int{9, 0}
60
+}
61
+
37 62
 // SessionRequest starts a session.
38 63
 type SessionRequest struct {
39 64
 	Description *NodeDescription `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
... ...
@@ -180,6 +206,41 @@ func (m *TasksMessage) Reset()                    { *m = TasksMessage{} }
180 180
 func (*TasksMessage) ProtoMessage()               {}
181 181
 func (*TasksMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{7} }
182 182
 
183
+type AssignmentsRequest struct {
184
+	SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
185
+}
186
+
187
+func (m *AssignmentsRequest) Reset()                    { *m = AssignmentsRequest{} }
188
+func (*AssignmentsRequest) ProtoMessage()               {}
189
+func (*AssignmentsRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{8} }
190
+
191
+type AssignmentsMessage struct {
192
+	Type AssignmentsMessage_Type `protobuf:"varint,1,opt,name=type,proto3,enum=docker.swarmkit.v1.AssignmentsMessage_Type" json:"type,omitempty"`
193
+	// AppliesTo references the previous ResultsIn value, to chain
194
+	// incremental updates together. For the first update in a stream,
195
+	// AppliesTo is empty.  If AppliesTo does not match the previously
196
+	// received ResultsIn, the consumer of the stream should start a new
197
+	// Assignments stream to re-sync.
198
+	AppliesTo string `protobuf:"bytes,2,opt,name=applies_to,json=appliesTo,proto3" json:"applies_to,omitempty"`
199
+	// ResultsIn identifies the result of this assignments message, to
200
+	// match against the next message's AppliesTo value and protect
201
+	// against missed messages.
202
+	ResultsIn string `protobuf:"bytes,3,opt,name=results_in,json=resultsIn,proto3" json:"results_in,omitempty"`
203
+	// UpdateTasks is a set of new or updated tasks to run on this node.
204
+	// In the first assignments message, it contains all of the tasks
205
+	// to run on this node. Tasks outside of this set running on the node
206
+	// should be terminated.
207
+	UpdateTasks []*Task `protobuf:"bytes,4,rep,name=update_tasks,json=updateTasks" json:"update_tasks,omitempty"`
208
+	// RemoveTasks is a set of previously-assigned task IDs to remove from the
209
+	// assignment set. It is not used in the first assignments message of
210
+	// a stream.
211
+	RemoveTasks []string `protobuf:"bytes,5,rep,name=remove_tasks,json=removeTasks" json:"remove_tasks,omitempty"`
212
+}
213
+
214
+func (m *AssignmentsMessage) Reset()                    { *m = AssignmentsMessage{} }
215
+func (*AssignmentsMessage) ProtoMessage()               {}
216
+func (*AssignmentsMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{9} }
217
+
183 218
 func init() {
184 219
 	proto.RegisterType((*SessionRequest)(nil), "docker.swarmkit.v1.SessionRequest")
185 220
 	proto.RegisterType((*SessionMessage)(nil), "docker.swarmkit.v1.SessionMessage")
... ...
@@ -190,6 +251,9 @@ func init() {
190 190
 	proto.RegisterType((*UpdateTaskStatusResponse)(nil), "docker.swarmkit.v1.UpdateTaskStatusResponse")
191 191
 	proto.RegisterType((*TasksRequest)(nil), "docker.swarmkit.v1.TasksRequest")
192 192
 	proto.RegisterType((*TasksMessage)(nil), "docker.swarmkit.v1.TasksMessage")
193
+	proto.RegisterType((*AssignmentsRequest)(nil), "docker.swarmkit.v1.AssignmentsRequest")
194
+	proto.RegisterType((*AssignmentsMessage)(nil), "docker.swarmkit.v1.AssignmentsMessage")
195
+	proto.RegisterEnum("docker.swarmkit.v1.AssignmentsMessage_Type", AssignmentsMessage_Type_name, AssignmentsMessage_Type_value)
193 196
 }
194 197
 
195 198
 type authenticatedWrapperDispatcherServer struct {
... ...
@@ -236,6 +300,14 @@ func (p *authenticatedWrapperDispatcherServer) Tasks(r *TasksRequest, stream Dis
236 236
 	return p.local.Tasks(r, stream)
237 237
 }
238 238
 
239
+func (p *authenticatedWrapperDispatcherServer) Assignments(r *AssignmentsRequest, stream Dispatcher_AssignmentsServer) error {
240
+
241
+	if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil {
242
+		return err
243
+	}
244
+	return p.local.Assignments(r, stream)
245
+}
246
+
239 247
 func (m *SessionRequest) Copy() *SessionRequest {
240 248
 	if m == nil {
241 249
 		return nil
... ...
@@ -371,6 +443,46 @@ func (m *TasksMessage) Copy() *TasksMessage {
371 371
 	return o
372 372
 }
373 373
 
374
+func (m *AssignmentsRequest) Copy() *AssignmentsRequest {
375
+	if m == nil {
376
+		return nil
377
+	}
378
+
379
+	o := &AssignmentsRequest{
380
+		SessionID: m.SessionID,
381
+	}
382
+
383
+	return o
384
+}
385
+
386
+func (m *AssignmentsMessage) Copy() *AssignmentsMessage {
387
+	if m == nil {
388
+		return nil
389
+	}
390
+
391
+	o := &AssignmentsMessage{
392
+		Type:      m.Type,
393
+		AppliesTo: m.AppliesTo,
394
+		ResultsIn: m.ResultsIn,
395
+	}
396
+
397
+	if m.UpdateTasks != nil {
398
+		o.UpdateTasks = make([]*Task, 0, len(m.UpdateTasks))
399
+		for _, v := range m.UpdateTasks {
400
+			o.UpdateTasks = append(o.UpdateTasks, v.Copy())
401
+		}
402
+	}
403
+
404
+	if m.RemoveTasks != nil {
405
+		o.RemoveTasks = make([]string, 0, len(m.RemoveTasks))
406
+		for _, v := range m.RemoveTasks {
407
+			o.RemoveTasks = append(o.RemoveTasks, v)
408
+		}
409
+	}
410
+
411
+	return o
412
+}
413
+
374 414
 func (this *SessionRequest) GoString() string {
375 415
 	if this == nil {
376 416
 		return "nil"
... ...
@@ -480,6 +592,32 @@ func (this *TasksMessage) GoString() string {
480 480
 	s = append(s, "}")
481 481
 	return strings.Join(s, "")
482 482
 }
483
+func (this *AssignmentsRequest) GoString() string {
484
+	if this == nil {
485
+		return "nil"
486
+	}
487
+	s := make([]string, 0, 5)
488
+	s = append(s, "&api.AssignmentsRequest{")
489
+	s = append(s, "SessionID: "+fmt.Sprintf("%#v", this.SessionID)+",\n")
490
+	s = append(s, "}")
491
+	return strings.Join(s, "")
492
+}
493
+func (this *AssignmentsMessage) GoString() string {
494
+	if this == nil {
495
+		return "nil"
496
+	}
497
+	s := make([]string, 0, 9)
498
+	s = append(s, "&api.AssignmentsMessage{")
499
+	s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
500
+	s = append(s, "AppliesTo: "+fmt.Sprintf("%#v", this.AppliesTo)+",\n")
501
+	s = append(s, "ResultsIn: "+fmt.Sprintf("%#v", this.ResultsIn)+",\n")
502
+	if this.UpdateTasks != nil {
503
+		s = append(s, "UpdateTasks: "+fmt.Sprintf("%#v", this.UpdateTasks)+",\n")
504
+	}
505
+	s = append(s, "RemoveTasks: "+fmt.Sprintf("%#v", this.RemoveTasks)+",\n")
506
+	s = append(s, "}")
507
+	return strings.Join(s, "")
508
+}
483 509
 func valueToGoStringDispatcher(v interface{}, typ string) string {
484 510
 	rv := reflect.ValueOf(v)
485 511
 	if rv.IsNil() {
... ...
@@ -488,11 +626,12 @@ func valueToGoStringDispatcher(v interface{}, typ string) string {
488 488
 	pv := reflect.Indirect(rv).Interface()
489 489
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
490 490
 }
491
-func extensionToGoStringDispatcher(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
491
+func extensionToGoStringDispatcher(m github_com_gogo_protobuf_proto.Message) string {
492
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
492 493
 	if e == nil {
493 494
 		return "nil"
494 495
 	}
495
-	s := "map[int32]proto.Extension{"
496
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
496 497
 	keys := make([]int, 0, len(e))
497 498
 	for k := range e {
498 499
 		keys = append(keys, int(k))
... ...
@@ -502,7 +641,7 @@ func extensionToGoStringDispatcher(e map[int32]github_com_gogo_protobuf_proto.Ex
502 502
 	for _, k := range keys {
503 503
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
504 504
 	}
505
-	s += strings.Join(ss, ",") + "}"
505
+	s += strings.Join(ss, ",") + "})"
506 506
 	return s
507 507
 }
508 508
 
... ...
@@ -512,7 +651,7 @@ var _ grpc.ClientConn
512 512
 
513 513
 // This is a compile-time assertion to ensure that this generated file
514 514
 // is compatible with the grpc package it is being compiled against.
515
-const _ = grpc.SupportPackageIsVersion2
515
+const _ = grpc.SupportPackageIsVersion3
516 516
 
517 517
 // Client API for Dispatcher service
518 518
 
... ...
@@ -541,6 +680,11 @@ type DispatcherClient interface {
541 541
 	// of tasks which should be run on node, if task is not present in that list,
542 542
 	// it should be terminated.
543 543
 	Tasks(ctx context.Context, in *TasksRequest, opts ...grpc.CallOption) (Dispatcher_TasksClient, error)
544
+	// Assignments is a stream of assignments such as tasks and secrets for node.
545
+	// The first message in the stream contains all of the tasks and secrets
546
+	// that are relevant to the node. Future messages in the stream are updates to
547
+	// the set of assignments.
548
+	Assignments(ctx context.Context, in *AssignmentsRequest, opts ...grpc.CallOption) (Dispatcher_AssignmentsClient, error)
544 549
 }
545 550
 
546 551
 type dispatcherClient struct {
... ...
@@ -633,6 +777,38 @@ func (x *dispatcherTasksClient) Recv() (*TasksMessage, error) {
633 633
 	return m, nil
634 634
 }
635 635
 
636
+func (c *dispatcherClient) Assignments(ctx context.Context, in *AssignmentsRequest, opts ...grpc.CallOption) (Dispatcher_AssignmentsClient, error) {
637
+	stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[2], c.cc, "/docker.swarmkit.v1.Dispatcher/Assignments", opts...)
638
+	if err != nil {
639
+		return nil, err
640
+	}
641
+	x := &dispatcherAssignmentsClient{stream}
642
+	if err := x.ClientStream.SendMsg(in); err != nil {
643
+		return nil, err
644
+	}
645
+	if err := x.ClientStream.CloseSend(); err != nil {
646
+		return nil, err
647
+	}
648
+	return x, nil
649
+}
650
+
651
+type Dispatcher_AssignmentsClient interface {
652
+	Recv() (*AssignmentsMessage, error)
653
+	grpc.ClientStream
654
+}
655
+
656
+type dispatcherAssignmentsClient struct {
657
+	grpc.ClientStream
658
+}
659
+
660
+func (x *dispatcherAssignmentsClient) Recv() (*AssignmentsMessage, error) {
661
+	m := new(AssignmentsMessage)
662
+	if err := x.ClientStream.RecvMsg(m); err != nil {
663
+		return nil, err
664
+	}
665
+	return m, nil
666
+}
667
+
636 668
 // Server API for Dispatcher service
637 669
 
638 670
 type DispatcherServer interface {
... ...
@@ -660,6 +836,11 @@ type DispatcherServer interface {
660 660
 	// of tasks which should be run on node, if task is not present in that list,
661 661
 	// it should be terminated.
662 662
 	Tasks(*TasksRequest, Dispatcher_TasksServer) error
663
+	// Assignments is a stream of assignments such as tasks and secrets for node.
664
+	// The first message in the stream contains all of the tasks and secrets
665
+	// that are relevant to the node. Future messages in the stream are updates to
666
+	// the set of assignments.
667
+	Assignments(*AssignmentsRequest, Dispatcher_AssignmentsServer) error
663 668
 }
664 669
 
665 670
 func RegisterDispatcherServer(s *grpc.Server, srv DispatcherServer) {
... ...
@@ -744,6 +925,27 @@ func (x *dispatcherTasksServer) Send(m *TasksMessage) error {
744 744
 	return x.ServerStream.SendMsg(m)
745 745
 }
746 746
 
747
+func _Dispatcher_Assignments_Handler(srv interface{}, stream grpc.ServerStream) error {
748
+	m := new(AssignmentsRequest)
749
+	if err := stream.RecvMsg(m); err != nil {
750
+		return err
751
+	}
752
+	return srv.(DispatcherServer).Assignments(m, &dispatcherAssignmentsServer{stream})
753
+}
754
+
755
+type Dispatcher_AssignmentsServer interface {
756
+	Send(*AssignmentsMessage) error
757
+	grpc.ServerStream
758
+}
759
+
760
+type dispatcherAssignmentsServer struct {
761
+	grpc.ServerStream
762
+}
763
+
764
+func (x *dispatcherAssignmentsServer) Send(m *AssignmentsMessage) error {
765
+	return x.ServerStream.SendMsg(m)
766
+}
767
+
747 768
 var _Dispatcher_serviceDesc = grpc.ServiceDesc{
748 769
 	ServiceName: "docker.swarmkit.v1.Dispatcher",
749 770
 	HandlerType: (*DispatcherServer)(nil),
... ...
@@ -768,7 +970,13 @@ var _Dispatcher_serviceDesc = grpc.ServiceDesc{
768 768
 			Handler:       _Dispatcher_Tasks_Handler,
769 769
 			ServerStreams: true,
770 770
 		},
771
+		{
772
+			StreamName:    "Assignments",
773
+			Handler:       _Dispatcher_Assignments_Handler,
774
+			ServerStreams: true,
775
+		},
771 776
 	},
777
+	Metadata: fileDescriptorDispatcher,
772 778
 }
773 779
 
774 780
 func (m *SessionRequest) Marshal() (data []byte, err error) {
... ...
@@ -1055,6 +1263,92 @@ func (m *TasksMessage) MarshalTo(data []byte) (int, error) {
1055 1055
 	return i, nil
1056 1056
 }
1057 1057
 
1058
+func (m *AssignmentsRequest) Marshal() (data []byte, err error) {
1059
+	size := m.Size()
1060
+	data = make([]byte, size)
1061
+	n, err := m.MarshalTo(data)
1062
+	if err != nil {
1063
+		return nil, err
1064
+	}
1065
+	return data[:n], nil
1066
+}
1067
+
1068
+func (m *AssignmentsRequest) MarshalTo(data []byte) (int, error) {
1069
+	var i int
1070
+	_ = i
1071
+	var l int
1072
+	_ = l
1073
+	if len(m.SessionID) > 0 {
1074
+		data[i] = 0xa
1075
+		i++
1076
+		i = encodeVarintDispatcher(data, i, uint64(len(m.SessionID)))
1077
+		i += copy(data[i:], m.SessionID)
1078
+	}
1079
+	return i, nil
1080
+}
1081
+
1082
+func (m *AssignmentsMessage) Marshal() (data []byte, err error) {
1083
+	size := m.Size()
1084
+	data = make([]byte, size)
1085
+	n, err := m.MarshalTo(data)
1086
+	if err != nil {
1087
+		return nil, err
1088
+	}
1089
+	return data[:n], nil
1090
+}
1091
+
1092
+func (m *AssignmentsMessage) MarshalTo(data []byte) (int, error) {
1093
+	var i int
1094
+	_ = i
1095
+	var l int
1096
+	_ = l
1097
+	if m.Type != 0 {
1098
+		data[i] = 0x8
1099
+		i++
1100
+		i = encodeVarintDispatcher(data, i, uint64(m.Type))
1101
+	}
1102
+	if len(m.AppliesTo) > 0 {
1103
+		data[i] = 0x12
1104
+		i++
1105
+		i = encodeVarintDispatcher(data, i, uint64(len(m.AppliesTo)))
1106
+		i += copy(data[i:], m.AppliesTo)
1107
+	}
1108
+	if len(m.ResultsIn) > 0 {
1109
+		data[i] = 0x1a
1110
+		i++
1111
+		i = encodeVarintDispatcher(data, i, uint64(len(m.ResultsIn)))
1112
+		i += copy(data[i:], m.ResultsIn)
1113
+	}
1114
+	if len(m.UpdateTasks) > 0 {
1115
+		for _, msg := range m.UpdateTasks {
1116
+			data[i] = 0x22
1117
+			i++
1118
+			i = encodeVarintDispatcher(data, i, uint64(msg.Size()))
1119
+			n, err := msg.MarshalTo(data[i:])
1120
+			if err != nil {
1121
+				return 0, err
1122
+			}
1123
+			i += n
1124
+		}
1125
+	}
1126
+	if len(m.RemoveTasks) > 0 {
1127
+		for _, s := range m.RemoveTasks {
1128
+			data[i] = 0x2a
1129
+			i++
1130
+			l = len(s)
1131
+			for l >= 1<<7 {
1132
+				data[i] = uint8(uint64(l)&0x7f | 0x80)
1133
+				l >>= 7
1134
+				i++
1135
+			}
1136
+			data[i] = uint8(l)
1137
+			i++
1138
+			i += copy(data[i:], s)
1139
+		}
1140
+	}
1141
+	return i, nil
1142
+}
1143
+
1058 1144
 func encodeFixed64Dispatcher(data []byte, offset int, v uint64) int {
1059 1145
 	data[offset] = uint8(v)
1060 1146
 	data[offset+1] = uint8(v >> 8)
... ...
@@ -1085,12 +1379,11 @@ func encodeVarintDispatcher(data []byte, offset int, v uint64) int {
1085 1085
 
1086 1086
 type raftProxyDispatcherServer struct {
1087 1087
 	local        DispatcherServer
1088
-	connSelector raftpicker.Interface
1089
-	cluster      raftpicker.RaftCluster
1088
+	connSelector raftselector.ConnProvider
1090 1089
 	ctxMods      []func(context.Context) (context.Context, error)
1091 1090
 }
1092 1091
 
1093
-func NewRaftProxyDispatcherServer(local DispatcherServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) DispatcherServer {
1092
+func NewRaftProxyDispatcherServer(local DispatcherServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) DispatcherServer {
1094 1093
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
1095 1094
 		s, ok := transport.StreamFromContext(ctx)
1096 1095
 		if !ok {
... ...
@@ -1112,7 +1405,6 @@ func NewRaftProxyDispatcherServer(local DispatcherServer, connSelector raftpicke
1112 1112
 
1113 1113
 	return &raftProxyDispatcherServer{
1114 1114
 		local:        local,
1115
-		cluster:      cluster,
1116 1115
 		connSelector: connSelector,
1117 1116
 		ctxMods:      mods,
1118 1117
 	}
... ...
@@ -1127,33 +1419,44 @@ func (p *raftProxyDispatcherServer) runCtxMods(ctx context.Context) (context.Con
1127 1127
 	}
1128 1128
 	return ctx, nil
1129 1129
 }
1130
+func (p *raftProxyDispatcherServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
1131
+	ticker := time.NewTicker(500 * time.Millisecond)
1132
+	defer ticker.Stop()
1133
+	for {
1134
+		select {
1135
+		case <-ticker.C:
1136
+			conn, err := p.connSelector.LeaderConn(ctx)
1137
+			if err != nil {
1138
+				return nil, err
1139
+			}
1130 1140
 
1131
-func (p *raftProxyDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error {
1141
+			client := NewHealthClient(conn)
1132 1142
 
1133
-	if p.cluster.IsLeader() {
1134
-		return p.local.Session(r, stream)
1143
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
1144
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
1145
+				continue
1146
+			}
1147
+			return conn, nil
1148
+		case <-ctx.Done():
1149
+			return nil, ctx.Err()
1150
+		}
1135 1151
 	}
1136
-	ctx, err := p.runCtxMods(stream.Context())
1152
+}
1153
+
1154
+func (p *raftProxyDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error {
1155
+
1156
+	ctx := stream.Context()
1157
+	conn, err := p.connSelector.LeaderConn(ctx)
1137 1158
 	if err != nil {
1159
+		if err == raftselector.ErrIsLeader {
1160
+			return p.local.Session(r, stream)
1161
+		}
1138 1162
 		return err
1139 1163
 	}
1140
-	conn, err := p.connSelector.Conn()
1164
+	ctx, err = p.runCtxMods(ctx)
1141 1165
 	if err != nil {
1142 1166
 		return err
1143 1167
 	}
1144
-
1145
-	defer func() {
1146
-		if err != nil {
1147
-			errStr := err.Error()
1148
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
1149
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
1150
-				strings.Contains(errStr, "connection error") ||
1151
-				grpc.Code(err) == codes.Internal {
1152
-				p.connSelector.Reset()
1153
-			}
1154
-		}
1155
-	}()
1156
-
1157 1168
 	clientStream, err := NewDispatcherClient(conn).Session(ctx, r)
1158 1169
 
1159 1170
 	if err != nil {
... ...
@@ -1177,89 +1480,116 @@ func (p *raftProxyDispatcherServer) Session(r *SessionRequest, stream Dispatcher
1177 1177
 
1178 1178
 func (p *raftProxyDispatcherServer) Heartbeat(ctx context.Context, r *HeartbeatRequest) (*HeartbeatResponse, error) {
1179 1179
 
1180
-	if p.cluster.IsLeader() {
1181
-		return p.local.Heartbeat(ctx, r)
1182
-	}
1183
-	ctx, err := p.runCtxMods(ctx)
1180
+	conn, err := p.connSelector.LeaderConn(ctx)
1184 1181
 	if err != nil {
1182
+		if err == raftselector.ErrIsLeader {
1183
+			return p.local.Heartbeat(ctx, r)
1184
+		}
1185 1185
 		return nil, err
1186 1186
 	}
1187
-	conn, err := p.connSelector.Conn()
1187
+	modCtx, err := p.runCtxMods(ctx)
1188 1188
 	if err != nil {
1189 1189
 		return nil, err
1190 1190
 	}
1191 1191
 
1192
-	defer func() {
1192
+	resp, err := NewDispatcherClient(conn).Heartbeat(modCtx, r)
1193
+	if err != nil {
1194
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
1195
+			return resp, err
1196
+		}
1197
+		conn, err := p.pollNewLeaderConn(ctx)
1193 1198
 		if err != nil {
1194
-			errStr := err.Error()
1195
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
1196
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
1197
-				strings.Contains(errStr, "connection error") ||
1198
-				grpc.Code(err) == codes.Internal {
1199
-				p.connSelector.Reset()
1199
+			if err == raftselector.ErrIsLeader {
1200
+				return p.local.Heartbeat(ctx, r)
1200 1201
 			}
1202
+			return nil, err
1201 1203
 		}
1202
-	}()
1203
-
1204
-	return NewDispatcherClient(conn).Heartbeat(ctx, r)
1204
+		return NewDispatcherClient(conn).Heartbeat(modCtx, r)
1205
+	}
1206
+	return resp, err
1205 1207
 }
1206 1208
 
1207 1209
 func (p *raftProxyDispatcherServer) UpdateTaskStatus(ctx context.Context, r *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) {
1208 1210
 
1209
-	if p.cluster.IsLeader() {
1210
-		return p.local.UpdateTaskStatus(ctx, r)
1211
-	}
1212
-	ctx, err := p.runCtxMods(ctx)
1211
+	conn, err := p.connSelector.LeaderConn(ctx)
1213 1212
 	if err != nil {
1213
+		if err == raftselector.ErrIsLeader {
1214
+			return p.local.UpdateTaskStatus(ctx, r)
1215
+		}
1214 1216
 		return nil, err
1215 1217
 	}
1216
-	conn, err := p.connSelector.Conn()
1218
+	modCtx, err := p.runCtxMods(ctx)
1217 1219
 	if err != nil {
1218 1220
 		return nil, err
1219 1221
 	}
1220 1222
 
1221
-	defer func() {
1223
+	resp, err := NewDispatcherClient(conn).UpdateTaskStatus(modCtx, r)
1224
+	if err != nil {
1225
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
1226
+			return resp, err
1227
+		}
1228
+		conn, err := p.pollNewLeaderConn(ctx)
1222 1229
 		if err != nil {
1223
-			errStr := err.Error()
1224
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
1225
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
1226
-				strings.Contains(errStr, "connection error") ||
1227
-				grpc.Code(err) == codes.Internal {
1228
-				p.connSelector.Reset()
1230
+			if err == raftselector.ErrIsLeader {
1231
+				return p.local.UpdateTaskStatus(ctx, r)
1229 1232
 			}
1233
+			return nil, err
1230 1234
 		}
1231
-	}()
1232
-
1233
-	return NewDispatcherClient(conn).UpdateTaskStatus(ctx, r)
1235
+		return NewDispatcherClient(conn).UpdateTaskStatus(modCtx, r)
1236
+	}
1237
+	return resp, err
1234 1238
 }
1235 1239
 
1236 1240
 func (p *raftProxyDispatcherServer) Tasks(r *TasksRequest, stream Dispatcher_TasksServer) error {
1237 1241
 
1238
-	if p.cluster.IsLeader() {
1239
-		return p.local.Tasks(r, stream)
1242
+	ctx := stream.Context()
1243
+	conn, err := p.connSelector.LeaderConn(ctx)
1244
+	if err != nil {
1245
+		if err == raftselector.ErrIsLeader {
1246
+			return p.local.Tasks(r, stream)
1247
+		}
1248
+		return err
1240 1249
 	}
1241
-	ctx, err := p.runCtxMods(stream.Context())
1250
+	ctx, err = p.runCtxMods(ctx)
1242 1251
 	if err != nil {
1243 1252
 		return err
1244 1253
 	}
1245
-	conn, err := p.connSelector.Conn()
1254
+	clientStream, err := NewDispatcherClient(conn).Tasks(ctx, r)
1255
+
1246 1256
 	if err != nil {
1247 1257
 		return err
1248 1258
 	}
1249 1259
 
1250
-	defer func() {
1260
+	for {
1261
+		msg, err := clientStream.Recv()
1262
+		if err == io.EOF {
1263
+			break
1264
+		}
1251 1265
 		if err != nil {
1252
-			errStr := err.Error()
1253
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
1254
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
1255
-				strings.Contains(errStr, "connection error") ||
1256
-				grpc.Code(err) == codes.Internal {
1257
-				p.connSelector.Reset()
1258
-			}
1266
+			return err
1267
+		}
1268
+		if err := stream.Send(msg); err != nil {
1269
+			return err
1259 1270
 		}
1260
-	}()
1271
+	}
1272
+	return nil
1273
+}
1261 1274
 
1262
-	clientStream, err := NewDispatcherClient(conn).Tasks(ctx, r)
1275
+func (p *raftProxyDispatcherServer) Assignments(r *AssignmentsRequest, stream Dispatcher_AssignmentsServer) error {
1276
+
1277
+	ctx := stream.Context()
1278
+	conn, err := p.connSelector.LeaderConn(ctx)
1279
+	if err != nil {
1280
+		if err == raftselector.ErrIsLeader {
1281
+			return p.local.Assignments(r, stream)
1282
+		}
1283
+		return err
1284
+	}
1285
+	ctx, err = p.runCtxMods(ctx)
1286
+	if err != nil {
1287
+		return err
1288
+	}
1289
+	clientStream, err := NewDispatcherClient(conn).Assignments(ctx, r)
1263 1290
 
1264 1291
 	if err != nil {
1265 1292
 		return err
... ...
@@ -1396,6 +1726,45 @@ func (m *TasksMessage) Size() (n int) {
1396 1396
 	return n
1397 1397
 }
1398 1398
 
1399
+func (m *AssignmentsRequest) Size() (n int) {
1400
+	var l int
1401
+	_ = l
1402
+	l = len(m.SessionID)
1403
+	if l > 0 {
1404
+		n += 1 + l + sovDispatcher(uint64(l))
1405
+	}
1406
+	return n
1407
+}
1408
+
1409
+func (m *AssignmentsMessage) Size() (n int) {
1410
+	var l int
1411
+	_ = l
1412
+	if m.Type != 0 {
1413
+		n += 1 + sovDispatcher(uint64(m.Type))
1414
+	}
1415
+	l = len(m.AppliesTo)
1416
+	if l > 0 {
1417
+		n += 1 + l + sovDispatcher(uint64(l))
1418
+	}
1419
+	l = len(m.ResultsIn)
1420
+	if l > 0 {
1421
+		n += 1 + l + sovDispatcher(uint64(l))
1422
+	}
1423
+	if len(m.UpdateTasks) > 0 {
1424
+		for _, e := range m.UpdateTasks {
1425
+			l = e.Size()
1426
+			n += 1 + l + sovDispatcher(uint64(l))
1427
+		}
1428
+	}
1429
+	if len(m.RemoveTasks) > 0 {
1430
+		for _, s := range m.RemoveTasks {
1431
+			l = len(s)
1432
+			n += 1 + l + sovDispatcher(uint64(l))
1433
+		}
1434
+	}
1435
+	return n
1436
+}
1437
+
1399 1438
 func sovDispatcher(x uint64) (n int) {
1400 1439
 	for {
1401 1440
 		n++
... ...
@@ -1504,6 +1873,30 @@ func (this *TasksMessage) String() string {
1504 1504
 	}, "")
1505 1505
 	return s
1506 1506
 }
1507
+func (this *AssignmentsRequest) String() string {
1508
+	if this == nil {
1509
+		return "nil"
1510
+	}
1511
+	s := strings.Join([]string{`&AssignmentsRequest{`,
1512
+		`SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`,
1513
+		`}`,
1514
+	}, "")
1515
+	return s
1516
+}
1517
+func (this *AssignmentsMessage) String() string {
1518
+	if this == nil {
1519
+		return "nil"
1520
+	}
1521
+	s := strings.Join([]string{`&AssignmentsMessage{`,
1522
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
1523
+		`AppliesTo:` + fmt.Sprintf("%v", this.AppliesTo) + `,`,
1524
+		`ResultsIn:` + fmt.Sprintf("%v", this.ResultsIn) + `,`,
1525
+		`UpdateTasks:` + strings.Replace(fmt.Sprintf("%v", this.UpdateTasks), "Task", "Task", 1) + `,`,
1526
+		`RemoveTasks:` + fmt.Sprintf("%v", this.RemoveTasks) + `,`,
1527
+		`}`,
1528
+	}, "")
1529
+	return s
1530
+}
1507 1531
 func valueToStringDispatcher(v interface{}) string {
1508 1532
 	rv := reflect.ValueOf(v)
1509 1533
 	if rv.IsNil() {
... ...
@@ -2389,6 +2782,272 @@ func (m *TasksMessage) Unmarshal(data []byte) error {
2389 2389
 	}
2390 2390
 	return nil
2391 2391
 }
2392
+func (m *AssignmentsRequest) Unmarshal(data []byte) error {
2393
+	l := len(data)
2394
+	iNdEx := 0
2395
+	for iNdEx < l {
2396
+		preIndex := iNdEx
2397
+		var wire uint64
2398
+		for shift := uint(0); ; shift += 7 {
2399
+			if shift >= 64 {
2400
+				return ErrIntOverflowDispatcher
2401
+			}
2402
+			if iNdEx >= l {
2403
+				return io.ErrUnexpectedEOF
2404
+			}
2405
+			b := data[iNdEx]
2406
+			iNdEx++
2407
+			wire |= (uint64(b) & 0x7F) << shift
2408
+			if b < 0x80 {
2409
+				break
2410
+			}
2411
+		}
2412
+		fieldNum := int32(wire >> 3)
2413
+		wireType := int(wire & 0x7)
2414
+		if wireType == 4 {
2415
+			return fmt.Errorf("proto: AssignmentsRequest: wiretype end group for non-group")
2416
+		}
2417
+		if fieldNum <= 0 {
2418
+			return fmt.Errorf("proto: AssignmentsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
2419
+		}
2420
+		switch fieldNum {
2421
+		case 1:
2422
+			if wireType != 2 {
2423
+				return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType)
2424
+			}
2425
+			var stringLen uint64
2426
+			for shift := uint(0); ; shift += 7 {
2427
+				if shift >= 64 {
2428
+					return ErrIntOverflowDispatcher
2429
+				}
2430
+				if iNdEx >= l {
2431
+					return io.ErrUnexpectedEOF
2432
+				}
2433
+				b := data[iNdEx]
2434
+				iNdEx++
2435
+				stringLen |= (uint64(b) & 0x7F) << shift
2436
+				if b < 0x80 {
2437
+					break
2438
+				}
2439
+			}
2440
+			intStringLen := int(stringLen)
2441
+			if intStringLen < 0 {
2442
+				return ErrInvalidLengthDispatcher
2443
+			}
2444
+			postIndex := iNdEx + intStringLen
2445
+			if postIndex > l {
2446
+				return io.ErrUnexpectedEOF
2447
+			}
2448
+			m.SessionID = string(data[iNdEx:postIndex])
2449
+			iNdEx = postIndex
2450
+		default:
2451
+			iNdEx = preIndex
2452
+			skippy, err := skipDispatcher(data[iNdEx:])
2453
+			if err != nil {
2454
+				return err
2455
+			}
2456
+			if skippy < 0 {
2457
+				return ErrInvalidLengthDispatcher
2458
+			}
2459
+			if (iNdEx + skippy) > l {
2460
+				return io.ErrUnexpectedEOF
2461
+			}
2462
+			iNdEx += skippy
2463
+		}
2464
+	}
2465
+
2466
+	if iNdEx > l {
2467
+		return io.ErrUnexpectedEOF
2468
+	}
2469
+	return nil
2470
+}
2471
+func (m *AssignmentsMessage) Unmarshal(data []byte) error {
2472
+	l := len(data)
2473
+	iNdEx := 0
2474
+	for iNdEx < l {
2475
+		preIndex := iNdEx
2476
+		var wire uint64
2477
+		for shift := uint(0); ; shift += 7 {
2478
+			if shift >= 64 {
2479
+				return ErrIntOverflowDispatcher
2480
+			}
2481
+			if iNdEx >= l {
2482
+				return io.ErrUnexpectedEOF
2483
+			}
2484
+			b := data[iNdEx]
2485
+			iNdEx++
2486
+			wire |= (uint64(b) & 0x7F) << shift
2487
+			if b < 0x80 {
2488
+				break
2489
+			}
2490
+		}
2491
+		fieldNum := int32(wire >> 3)
2492
+		wireType := int(wire & 0x7)
2493
+		if wireType == 4 {
2494
+			return fmt.Errorf("proto: AssignmentsMessage: wiretype end group for non-group")
2495
+		}
2496
+		if fieldNum <= 0 {
2497
+			return fmt.Errorf("proto: AssignmentsMessage: illegal tag %d (wire type %d)", fieldNum, wire)
2498
+		}
2499
+		switch fieldNum {
2500
+		case 1:
2501
+			if wireType != 0 {
2502
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
2503
+			}
2504
+			m.Type = 0
2505
+			for shift := uint(0); ; shift += 7 {
2506
+				if shift >= 64 {
2507
+					return ErrIntOverflowDispatcher
2508
+				}
2509
+				if iNdEx >= l {
2510
+					return io.ErrUnexpectedEOF
2511
+				}
2512
+				b := data[iNdEx]
2513
+				iNdEx++
2514
+				m.Type |= (AssignmentsMessage_Type(b) & 0x7F) << shift
2515
+				if b < 0x80 {
2516
+					break
2517
+				}
2518
+			}
2519
+		case 2:
2520
+			if wireType != 2 {
2521
+				return fmt.Errorf("proto: wrong wireType = %d for field AppliesTo", wireType)
2522
+			}
2523
+			var stringLen uint64
2524
+			for shift := uint(0); ; shift += 7 {
2525
+				if shift >= 64 {
2526
+					return ErrIntOverflowDispatcher
2527
+				}
2528
+				if iNdEx >= l {
2529
+					return io.ErrUnexpectedEOF
2530
+				}
2531
+				b := data[iNdEx]
2532
+				iNdEx++
2533
+				stringLen |= (uint64(b) & 0x7F) << shift
2534
+				if b < 0x80 {
2535
+					break
2536
+				}
2537
+			}
2538
+			intStringLen := int(stringLen)
2539
+			if intStringLen < 0 {
2540
+				return ErrInvalidLengthDispatcher
2541
+			}
2542
+			postIndex := iNdEx + intStringLen
2543
+			if postIndex > l {
2544
+				return io.ErrUnexpectedEOF
2545
+			}
2546
+			m.AppliesTo = string(data[iNdEx:postIndex])
2547
+			iNdEx = postIndex
2548
+		case 3:
2549
+			if wireType != 2 {
2550
+				return fmt.Errorf("proto: wrong wireType = %d for field ResultsIn", wireType)
2551
+			}
2552
+			var stringLen uint64
2553
+			for shift := uint(0); ; shift += 7 {
2554
+				if shift >= 64 {
2555
+					return ErrIntOverflowDispatcher
2556
+				}
2557
+				if iNdEx >= l {
2558
+					return io.ErrUnexpectedEOF
2559
+				}
2560
+				b := data[iNdEx]
2561
+				iNdEx++
2562
+				stringLen |= (uint64(b) & 0x7F) << shift
2563
+				if b < 0x80 {
2564
+					break
2565
+				}
2566
+			}
2567
+			intStringLen := int(stringLen)
2568
+			if intStringLen < 0 {
2569
+				return ErrInvalidLengthDispatcher
2570
+			}
2571
+			postIndex := iNdEx + intStringLen
2572
+			if postIndex > l {
2573
+				return io.ErrUnexpectedEOF
2574
+			}
2575
+			m.ResultsIn = string(data[iNdEx:postIndex])
2576
+			iNdEx = postIndex
2577
+		case 4:
2578
+			if wireType != 2 {
2579
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdateTasks", wireType)
2580
+			}
2581
+			var msglen int
2582
+			for shift := uint(0); ; shift += 7 {
2583
+				if shift >= 64 {
2584
+					return ErrIntOverflowDispatcher
2585
+				}
2586
+				if iNdEx >= l {
2587
+					return io.ErrUnexpectedEOF
2588
+				}
2589
+				b := data[iNdEx]
2590
+				iNdEx++
2591
+				msglen |= (int(b) & 0x7F) << shift
2592
+				if b < 0x80 {
2593
+					break
2594
+				}
2595
+			}
2596
+			if msglen < 0 {
2597
+				return ErrInvalidLengthDispatcher
2598
+			}
2599
+			postIndex := iNdEx + msglen
2600
+			if postIndex > l {
2601
+				return io.ErrUnexpectedEOF
2602
+			}
2603
+			m.UpdateTasks = append(m.UpdateTasks, &Task{})
2604
+			if err := m.UpdateTasks[len(m.UpdateTasks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
2605
+				return err
2606
+			}
2607
+			iNdEx = postIndex
2608
+		case 5:
2609
+			if wireType != 2 {
2610
+				return fmt.Errorf("proto: wrong wireType = %d for field RemoveTasks", wireType)
2611
+			}
2612
+			var stringLen uint64
2613
+			for shift := uint(0); ; shift += 7 {
2614
+				if shift >= 64 {
2615
+					return ErrIntOverflowDispatcher
2616
+				}
2617
+				if iNdEx >= l {
2618
+					return io.ErrUnexpectedEOF
2619
+				}
2620
+				b := data[iNdEx]
2621
+				iNdEx++
2622
+				stringLen |= (uint64(b) & 0x7F) << shift
2623
+				if b < 0x80 {
2624
+					break
2625
+				}
2626
+			}
2627
+			intStringLen := int(stringLen)
2628
+			if intStringLen < 0 {
2629
+				return ErrInvalidLengthDispatcher
2630
+			}
2631
+			postIndex := iNdEx + intStringLen
2632
+			if postIndex > l {
2633
+				return io.ErrUnexpectedEOF
2634
+			}
2635
+			m.RemoveTasks = append(m.RemoveTasks, string(data[iNdEx:postIndex]))
2636
+			iNdEx = postIndex
2637
+		default:
2638
+			iNdEx = preIndex
2639
+			skippy, err := skipDispatcher(data[iNdEx:])
2640
+			if err != nil {
2641
+				return err
2642
+			}
2643
+			if skippy < 0 {
2644
+				return ErrInvalidLengthDispatcher
2645
+			}
2646
+			if (iNdEx + skippy) > l {
2647
+				return io.ErrUnexpectedEOF
2648
+			}
2649
+			iNdEx += skippy
2650
+		}
2651
+	}
2652
+
2653
+	if iNdEx > l {
2654
+		return io.ErrUnexpectedEOF
2655
+	}
2656
+	return nil
2657
+}
2392 2658
 func skipDispatcher(data []byte) (n int, err error) {
2393 2659
 	l := len(data)
2394 2660
 	iNdEx := 0
... ...
@@ -2494,47 +3153,60 @@ var (
2494 2494
 	ErrIntOverflowDispatcher   = fmt.Errorf("proto: integer overflow")
2495 2495
 )
2496 2496
 
2497
+func init() { proto.RegisterFile("dispatcher.proto", fileDescriptorDispatcher) }
2498
+
2497 2499
 var fileDescriptorDispatcher = []byte{
2498
-	// 645 bytes of a gzipped FileDescriptorProto
2499
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x95, 0xdf, 0x6a, 0x13, 0x4f,
2500
-	0x14, 0xc7, 0x3b, 0x69, 0x9a, 0xfe, 0x72, 0xd2, 0xfe, 0x88, 0x63, 0xb1, 0xcb, 0x52, 0xb7, 0x71,
2501
-	0xab, 0x50, 0xb0, 0x6e, 0x35, 0x82, 0x17, 0x52, 0x44, 0x42, 0x0a, 0x86, 0xe2, 0x1f, 0xb6, 0x6a,
2502
-	0x2f, 0xcb, 0x24, 0x7b, 0x48, 0xd7, 0xd8, 0x9d, 0x75, 0x66, 0x62, 0xcd, 0x85, 0x20, 0x88, 0xb7,
2503
-	0x22, 0x5e, 0xf9, 0x14, 0x3e, 0x47, 0xf1, 0xca, 0x4b, 0xaf, 0x8a, 0xcd, 0x03, 0x88, 0x8f, 0x20,
2504
-	0xbb, 0x3b, 0x9b, 0xd6, 0x74, 0x53, 0x9b, 0x5e, 0x65, 0xfe, 0x7c, 0xcf, 0xf7, 0x7c, 0x38, 0xe7,
2505
-	0x4c, 0x16, 0xca, 0x9e, 0x2f, 0x43, 0xa6, 0x5a, 0x3b, 0x28, 0x9c, 0x50, 0x70, 0xc5, 0x29, 0xf5,
2506
-	0x78, 0xab, 0x83, 0xc2, 0x91, 0x7b, 0x4c, 0xec, 0x76, 0x7c, 0xe5, 0xbc, 0xbe, 0x65, 0x96, 0x54,
2507
-	0x2f, 0x44, 0x99, 0x08, 0xcc, 0x59, 0xde, 0x7c, 0x81, 0x2d, 0x95, 0x6e, 0xe7, 0xda, 0xbc, 0xcd,
2508
-	0xe3, 0xe5, 0x6a, 0xb4, 0xd2, 0xa7, 0x17, 0xc3, 0x97, 0xdd, 0xb6, 0x1f, 0xac, 0x26, 0x3f, 0xfa,
2509
-	0x70, 0xde, 0xeb, 0x0a, 0xa6, 0x7c, 0x1e, 0xac, 0xa6, 0x8b, 0xe4, 0xc2, 0xfe, 0x40, 0xe0, 0xff,
2510
-	0x4d, 0x94, 0xd2, 0xe7, 0x81, 0x8b, 0xaf, 0xba, 0x28, 0x15, 0x5d, 0x87, 0x92, 0x87, 0xb2, 0x25,
2511
-	0xfc, 0x30, 0xd2, 0x19, 0xa4, 0x42, 0x96, 0x4b, 0xd5, 0x25, 0xe7, 0x24, 0x9c, 0xf3, 0x88, 0x7b,
2512
-	0x58, 0x3f, 0x92, 0xba, 0xc7, 0xe3, 0xe8, 0x0a, 0x80, 0x4c, 0x8c, 0xb7, 0x7d, 0xcf, 0xc8, 0x55,
2513
-	0xc8, 0x72, 0xb1, 0x36, 0xdb, 0x3f, 0x58, 0x2c, 0xea, 0x74, 0x8d, 0xba, 0x5b, 0xd4, 0x82, 0x86,
2514
-	0x67, 0xbf, 0xcf, 0x0d, 0x38, 0x1e, 0xa2, 0x94, 0xac, 0x8d, 0x43, 0x06, 0xe4, 0x74, 0x03, 0xba,
2515
-	0x02, 0xf9, 0x80, 0x7b, 0x18, 0x27, 0x2a, 0x55, 0x8d, 0x51, 0xb8, 0x6e, 0xac, 0xa2, 0x6b, 0xf0,
2516
-	0xdf, 0x2e, 0x0b, 0x58, 0x1b, 0x85, 0x34, 0x26, 0x2b, 0x93, 0xcb, 0xa5, 0x6a, 0x25, 0x2b, 0x62,
2517
-	0x0b, 0xfd, 0xf6, 0x8e, 0x42, 0xef, 0x09, 0xa2, 0x70, 0x07, 0x11, 0x74, 0x0b, 0x2e, 0x05, 0xa8,
2518
-	0xf6, 0xb8, 0xe8, 0x6c, 0x37, 0x39, 0x57, 0x52, 0x09, 0x16, 0x6e, 0x77, 0xb0, 0x27, 0x8d, 0x7c,
2519
-	0xec, 0x75, 0x25, 0xcb, 0x6b, 0x3d, 0x68, 0x89, 0x5e, 0x5c, 0x9a, 0x0d, 0xec, 0xb9, 0x73, 0xda,
2520
-	0xa0, 0x96, 0xc6, 0x6f, 0x60, 0x4f, 0xda, 0xf7, 0xa1, 0xfc, 0x00, 0x99, 0x50, 0x4d, 0x64, 0x2a,
2521
-	0x6d, 0xc7, 0x58, 0x65, 0xb0, 0x1f, 0xc3, 0x85, 0x63, 0x0e, 0x32, 0xe4, 0x81, 0x44, 0x7a, 0x17,
2522
-	0x0a, 0x21, 0x0a, 0x9f, 0x7b, 0xba, 0x99, 0x0b, 0x59, 0x7c, 0x75, 0x3d, 0x18, 0xb5, 0xfc, 0xfe,
2523
-	0xc1, 0xe2, 0x84, 0xab, 0x23, 0xec, 0x4f, 0x39, 0x98, 0x7f, 0x16, 0x7a, 0x4c, 0xe1, 0x53, 0x26,
2524
-	0x3b, 0x9b, 0x8a, 0xa9, 0xae, 0x3c, 0x17, 0x1a, 0x7d, 0x0e, 0xd3, 0xdd, 0xd8, 0x28, 0x2d, 0xf9,
2525
-	0x5a, 0x16, 0xc6, 0x88, 0x5c, 0xce, 0xd1, 0x49, 0xa2, 0x70, 0x53, 0x33, 0x93, 0x43, 0x79, 0xf8,
2526
-	0x92, 0x2e, 0xc1, 0xb4, 0x62, 0xb2, 0x73, 0x84, 0x05, 0xfd, 0x83, 0xc5, 0x42, 0x24, 0x6b, 0xd4,
2527
-	0xdd, 0x42, 0x74, 0xd5, 0xf0, 0xe8, 0x1d, 0x28, 0xc8, 0x38, 0x48, 0x0f, 0x8d, 0x95, 0xc5, 0x73,
2528
-	0x8c, 0x44, 0xab, 0x6d, 0x13, 0x8c, 0x93, 0x94, 0x49, 0xa9, 0xed, 0x35, 0x98, 0x89, 0x4e, 0xcf,
2529
-	0x57, 0x22, 0xfb, 0x9e, 0x8e, 0x4e, 0x9f, 0x80, 0x03, 0x53, 0x11, 0xab, 0x34, 0x48, 0x5c, 0x30,
2530
-	0x63, 0x14, 0xa0, 0x9b, 0xc8, 0xaa, 0x1f, 0xf3, 0x00, 0xf5, 0xc1, 0xdf, 0x0a, 0x7d, 0x03, 0xd3,
2531
-	0x3a, 0x0d, 0xb5, 0xb3, 0x42, 0xff, 0x7e, 0xf8, 0xe6, 0x69, 0x1a, 0x4d, 0x64, 0x2f, 0x7d, 0xfb,
2532
-	0xfa, 0xeb, 0x4b, 0xee, 0x32, 0xcc, 0xc4, 0x9a, 0x1b, 0xd1, 0x08, 0xa3, 0x80, 0xd9, 0x64, 0xa7,
2533
-	0x1f, 0xc8, 0x4d, 0x42, 0xdf, 0x42, 0x71, 0x30, 0x86, 0xf4, 0x6a, 0x96, 0xef, 0xf0, 0x9c, 0x9b,
2534
-	0xd7, 0xfe, 0xa1, 0xd2, 0x05, 0x3e, 0x0b, 0x00, 0xfd, 0x4c, 0xa0, 0x3c, 0xdc, 0x22, 0x7a, 0x7d,
2535
-	0x8c, 0x71, 0x33, 0x57, 0xce, 0x26, 0x1e, 0x07, 0x4a, 0xc0, 0x54, 0xdc, 0x5c, 0x5a, 0x19, 0xd5,
2536
-	0xc6, 0x41, 0xf6, 0xd1, 0x8a, 0xf1, 0xfa, 0x50, 0x5b, 0xd8, 0x3f, 0xb4, 0x26, 0x7e, 0x1c, 0x5a,
2537
-	0x13, 0xbf, 0x0f, 0x2d, 0xf2, 0xae, 0x6f, 0x91, 0xfd, 0xbe, 0x45, 0xbe, 0xf7, 0x2d, 0xf2, 0xb3,
2538
-	0x6f, 0x91, 0x66, 0x21, 0xfe, 0x06, 0xdc, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0xa3, 0xfc, 0x50,
2539
-	0xc8, 0x8b, 0x06, 0x00, 0x00,
2500
+	// 820 bytes of a gzipped FileDescriptorProto
2501
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x55, 0x4d, 0x6f, 0x1b, 0x45,
2502
+	0x18, 0xce, 0x38, 0x8e, 0x53, 0xbf, 0xeb, 0x14, 0x33, 0x54, 0x74, 0x65, 0xb5, 0x1b, 0x77, 0x43,
2503
+	0x23, 0x4b, 0x0d, 0x9b, 0x62, 0x24, 0x0e, 0x10, 0x01, 0x75, 0x6d, 0x09, 0xab, 0x4d, 0x5a, 0x6d,
2504
+	0x0d, 0x3d, 0x5a, 0x6b, 0xef, 0x2b, 0x77, 0x71, 0xbc, 0xb3, 0xcc, 0xcc, 0xb6, 0xf8, 0x80, 0x84,
2505
+	0x04, 0x48, 0x1c, 0x11, 0xa7, 0x8a, 0x1f, 0xc1, 0xef, 0x88, 0x38, 0x71, 0xe4, 0x14, 0x11, 0xff,
2506
+	0x00, 0xc4, 0x4f, 0xa8, 0x76, 0x77, 0xd6, 0x71, 0x9d, 0x75, 0xe2, 0xe4, 0xe4, 0xd9, 0x77, 0x9e,
2507
+	0xe7, 0x99, 0x47, 0xef, 0x97, 0xa1, 0xec, 0x7a, 0x22, 0x70, 0x64, 0xff, 0x05, 0x72, 0x2b, 0xe0,
2508
+	0x4c, 0x32, 0x4a, 0x5d, 0xd6, 0x1f, 0x22, 0xb7, 0xc4, 0x2b, 0x87, 0x8f, 0x86, 0x9e, 0xb4, 0x5e,
2509
+	0x7e, 0x54, 0xd1, 0xe4, 0x38, 0x40, 0x91, 0x00, 0x2a, 0x1b, 0xac, 0xf7, 0x2d, 0xf6, 0x65, 0xfa,
2510
+	0x79, 0x63, 0xc0, 0x06, 0x2c, 0x3e, 0xee, 0x46, 0x27, 0x15, 0x7d, 0x2f, 0x38, 0x0c, 0x07, 0x9e,
2511
+	0xbf, 0x9b, 0xfc, 0xa8, 0xe0, 0x4d, 0x37, 0xe4, 0x8e, 0xf4, 0x98, 0xbf, 0x9b, 0x1e, 0x92, 0x0b,
2512
+	0xf3, 0x17, 0x02, 0xd7, 0x9f, 0xa1, 0x10, 0x1e, 0xf3, 0x6d, 0xfc, 0x2e, 0x44, 0x21, 0x69, 0x0b,
2513
+	0x34, 0x17, 0x45, 0x9f, 0x7b, 0x41, 0x84, 0xd3, 0x49, 0x95, 0xd4, 0xb4, 0xfa, 0x96, 0x75, 0xd6,
2514
+	0x9c, 0x75, 0xc0, 0x5c, 0x6c, 0x9e, 0x42, 0xed, 0x59, 0x1e, 0xdd, 0x01, 0x10, 0x89, 0x70, 0xd7,
2515
+	0x73, 0xf5, 0x5c, 0x95, 0xd4, 0x8a, 0x8d, 0x8d, 0xc9, 0xf1, 0x66, 0x51, 0x3d, 0xd7, 0x6e, 0xda,
2516
+	0x45, 0x05, 0x68, 0xbb, 0xe6, 0x4f, 0xb9, 0xa9, 0x8f, 0x7d, 0x14, 0xc2, 0x19, 0xe0, 0x9c, 0x00,
2517
+	0x39, 0x5f, 0x80, 0xee, 0x40, 0xde, 0x67, 0x2e, 0xc6, 0x0f, 0x69, 0x75, 0x7d, 0x91, 0x5d, 0x3b,
2518
+	0x46, 0xd1, 0x3d, 0xb8, 0x36, 0x72, 0x7c, 0x67, 0x80, 0x5c, 0xe8, 0xab, 0xd5, 0xd5, 0x9a, 0x56,
2519
+	0xaf, 0x66, 0x31, 0x9e, 0xa3, 0x37, 0x78, 0x21, 0xd1, 0x7d, 0x8a, 0xc8, 0xed, 0x29, 0x83, 0x3e,
2520
+	0x87, 0xf7, 0x7d, 0x94, 0xaf, 0x18, 0x1f, 0x76, 0x7b, 0x8c, 0x49, 0x21, 0xb9, 0x13, 0x74, 0x87,
2521
+	0x38, 0x16, 0x7a, 0x3e, 0xd6, 0xba, 0x93, 0xa5, 0xd5, 0xf2, 0xfb, 0x7c, 0x1c, 0xa7, 0xe6, 0x11,
2522
+	0x8e, 0xed, 0x1b, 0x4a, 0xa0, 0x91, 0xf2, 0x1f, 0xe1, 0x58, 0x98, 0x5f, 0x42, 0xf9, 0x2b, 0x74,
2523
+	0xb8, 0xec, 0xa1, 0x23, 0xd3, 0x72, 0x5c, 0x2a, 0x0d, 0xe6, 0x13, 0x78, 0x77, 0x46, 0x41, 0x04,
2524
+	0xcc, 0x17, 0x48, 0x3f, 0x85, 0x42, 0x80, 0xdc, 0x63, 0xae, 0x2a, 0xe6, 0xad, 0x2c, 0x7f, 0x4d,
2525
+	0xd5, 0x18, 0x8d, 0xfc, 0xd1, 0xf1, 0xe6, 0x8a, 0xad, 0x18, 0xe6, 0x6f, 0x39, 0xb8, 0xf9, 0x75,
2526
+	0xe0, 0x3a, 0x12, 0x3b, 0x8e, 0x18, 0x3e, 0x93, 0x8e, 0x0c, 0xc5, 0x95, 0xac, 0xd1, 0x6f, 0x60,
2527
+	0x3d, 0x8c, 0x85, 0xd2, 0x94, 0xef, 0x65, 0xd9, 0x58, 0xf0, 0x96, 0x75, 0x1a, 0x49, 0x10, 0x76,
2528
+	0x2a, 0x56, 0x61, 0x50, 0x9e, 0xbf, 0xa4, 0x5b, 0xb0, 0x2e, 0x1d, 0x31, 0x3c, 0xb5, 0x05, 0x93,
2529
+	0xe3, 0xcd, 0x42, 0x04, 0x6b, 0x37, 0xed, 0x42, 0x74, 0xd5, 0x76, 0xe9, 0x27, 0x50, 0x10, 0x31,
2530
+	0x49, 0x35, 0x8d, 0x91, 0xe5, 0x67, 0xc6, 0x89, 0x42, 0x9b, 0x15, 0xd0, 0xcf, 0xba, 0x4c, 0x52,
2531
+	0x6d, 0xee, 0x41, 0x29, 0x8a, 0x5e, 0x2d, 0x45, 0xe6, 0xe7, 0x8a, 0x9d, 0x8e, 0x80, 0x05, 0x6b,
2532
+	0x91, 0x57, 0xa1, 0x93, 0x38, 0x61, 0xfa, 0x22, 0x83, 0x76, 0x02, 0x33, 0x1b, 0x40, 0x1f, 0x08,
2533
+	0xe1, 0x0d, 0xfc, 0x11, 0xfa, 0xf2, 0x8a, 0x1e, 0xfe, 0xc8, 0xbd, 0x25, 0x92, 0x5a, 0xf9, 0x02,
2534
+	0xf2, 0xd1, 0x2a, 0x8a, 0xe9, 0xd7, 0xeb, 0xf7, 0xb2, 0x9c, 0x9c, 0x65, 0x59, 0x9d, 0x71, 0x80,
2535
+	0x76, 0x4c, 0xa4, 0xb7, 0x01, 0x9c, 0x20, 0x38, 0xf4, 0x50, 0x74, 0x25, 0x4b, 0xf6, 0x81, 0x5d,
2536
+	0x54, 0x91, 0x0e, 0x8b, 0xae, 0x39, 0x8a, 0xf0, 0x50, 0x8a, 0xae, 0xe7, 0xeb, 0xab, 0xc9, 0xb5,
2537
+	0x8a, 0xb4, 0x7d, 0xfa, 0x19, 0x94, 0x92, 0x7a, 0x77, 0x93, 0x84, 0xe4, 0x2f, 0x48, 0x88, 0x16,
2538
+	0x4e, 0x2b, 0x24, 0xe8, 0x1d, 0x28, 0x71, 0x1c, 0xb1, 0x97, 0x29, 0x79, 0xad, 0xba, 0x5a, 0x2b,
2539
+	0xda, 0x5a, 0x12, 0x8b, 0x21, 0xe6, 0x5d, 0xc8, 0x47, 0x5e, 0x69, 0x09, 0xae, 0x3d, 0x7c, 0xb2,
2540
+	0xff, 0xf4, 0x71, 0xab, 0xd3, 0x2a, 0xaf, 0xd0, 0x77, 0x40, 0x6b, 0x1f, 0x3c, 0xb4, 0x5b, 0xfb,
2541
+	0xad, 0x83, 0xce, 0x83, 0xc7, 0x65, 0x52, 0x7f, 0xbd, 0x06, 0xd0, 0x9c, 0xee, 0x6d, 0xfa, 0x3d,
2542
+	0xac, 0xab, 0x1c, 0x52, 0x33, 0xcb, 0xca, 0xdb, 0x9b, 0xb5, 0x72, 0x1e, 0x46, 0x65, 0xcc, 0xdc,
2543
+	0xfa, 0xeb, 0xcf, 0xff, 0x5e, 0xe7, 0x6e, 0x43, 0x29, 0xc6, 0x7c, 0x18, 0xed, 0x08, 0xe4, 0xb0,
2544
+	0x91, 0x7c, 0xa9, 0x0d, 0x74, 0x9f, 0xd0, 0x1f, 0xa0, 0x38, 0x9d, 0x73, 0xfa, 0x41, 0x96, 0xee,
2545
+	0xfc, 0x22, 0xa9, 0xdc, 0xbd, 0x00, 0xa5, 0x3a, 0x78, 0x19, 0x03, 0xf4, 0x77, 0x02, 0xe5, 0xf9,
2546
+	0x19, 0xa0, 0xf7, 0x2e, 0x31, 0xcf, 0x95, 0x9d, 0xe5, 0xc0, 0x97, 0x31, 0x15, 0xc2, 0x5a, 0x52,
2547
+	0xef, 0xea, 0xa2, 0xb6, 0x98, 0xbe, 0xbe, 0x18, 0x91, 0xd6, 0x61, 0x7b, 0x89, 0x17, 0x7f, 0xcd,
2548
+	0x91, 0xfb, 0x84, 0xfe, 0x4c, 0x40, 0x9b, 0x69, 0x7d, 0xba, 0x7d, 0xc1, 0x6c, 0xa4, 0x1e, 0xb6,
2549
+	0x97, 0x9b, 0xa1, 0x25, 0x3b, 0xa2, 0x71, 0xeb, 0xe8, 0xc4, 0x58, 0xf9, 0xe7, 0xc4, 0x58, 0xf9,
2550
+	0xff, 0xc4, 0x20, 0x3f, 0x4e, 0x0c, 0x72, 0x34, 0x31, 0xc8, 0xdf, 0x13, 0x83, 0xfc, 0x3b, 0x31,
2551
+	0x48, 0xaf, 0x10, 0xff, 0xdd, 0x7f, 0xfc, 0x26, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xb7, 0x47, 0x6b,
2552
+	0x76, 0x08, 0x00, 0x00,
2540 2553
 }
... ...
@@ -47,13 +47,22 @@ service Dispatcher { // maybe dispatch, al likes this
47 47
 	// it should be terminated.
48 48
 	rpc Tasks(TasksRequest) returns (stream TasksMessage) {
49 49
 		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
50
+		option deprecated = true;
51
+	};
52
+
53
+	// Assignments is a stream of assignments such as tasks and secrets for node.
54
+	// The first message in the stream contains all of the tasks and secrets
55
+	// that are relevant to the node. Future messages in the stream are updates to
56
+	// the set of assignments.
57
+	rpc Assignments(AssignmentsRequest) returns (stream AssignmentsMessage) {
58
+		option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
50 59
 	};
51 60
 }
52 61
 
53 62
 // SessionRequest starts a session.
54 63
 message SessionRequest {
55 64
 	NodeDescription description = 1;
56
-	// SessionID can be provided to attempt resuming an exising session. If the 
65
+	// SessionID can be provided to attempt resuming an exising session. If the
57 66
 	// SessionID is empty or invalid, a new SessionID will be assigned.
58 67
 	//
59 68
 	// See SessionMessage.SessionID for details.
... ...
@@ -115,7 +124,7 @@ message SessionMessage {
115 115
 	repeated WeightedPeer managers = 3;
116 116
 
117 117
 	// Symmetric encryption key distributed by the lead manager. Used by agents
118
-	// for securing network bootstrapping and communication. 
118
+	// for securing network bootstrapping and communication.
119 119
 	repeated EncryptionKey network_bootstrap_keys = 4;
120 120
 }
121 121
 
... ...
@@ -157,3 +166,40 @@ message TasksMessage {
157 157
 	repeated Task tasks = 1;
158 158
 }
159 159
 
160
+message AssignmentsRequest {
161
+	string session_id = 1 [(gogoproto.customname) = "SessionID"];
162
+}
163
+
164
+message AssignmentsMessage {
165
+	// AssignmentType specifies whether this assignment message carries
166
+	// the full state, or is an update to an existing state.
167
+	enum Type {
168
+		COMPLETE = 0;
169
+		INCREMENTAL = 1;
170
+	}
171
+
172
+	Type type = 1;
173
+
174
+	// AppliesTo references the previous ResultsIn value, to chain
175
+	// incremental updates together. For the first update in a stream,
176
+	// AppliesTo is empty.  If AppliesTo does not match the previously
177
+	// received ResultsIn, the consumer of the stream should start a new
178
+	// Assignments stream to re-sync.
179
+	string applies_to = 2;
180
+
181
+	// ResultsIn identifies the result of this assignments message, to
182
+	// match against the next message's AppliesTo value and protect
183
+	// against missed messages.
184
+	string results_in = 3;
185
+
186
+	// UpdateTasks is a set of new or updated tasks to run on this node.
187
+	// In the first assignments message, it contains all of the tasks
188
+	// to run on this node. Tasks outside of this set running on the node
189
+	// should be terminated.
190
+	repeated Task update_tasks = 4;
191
+
192
+	// RemoveTasks is a set of previously-assigned task IDs to remove from the
193
+	// assignment set. It is not used in the first assignments message of
194
+	// a stream.
195
+	repeated string remove_tasks = 5;
196
+}
... ...
@@ -32,7 +32,9 @@ var _ = math.Inf
32 32
 
33 33
 // This is a compile-time assertion to ensure that this generated file
34 34
 // is compatible with the proto package it is being compiled against.
35
-const _ = proto.GoGoProtoPackageIsVersion1
35
+// A compilation error at this line likely means your copy of the
36
+// proto package needs to be updated.
37
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
36 38
 
37 39
 // A Duration represents a signed, fixed-length span of time represented
38 40
 // as a count of seconds and fractions of seconds at nanosecond
... ...
@@ -128,11 +130,12 @@ func valueToGoStringDuration(v interface{}, typ string) string {
128 128
 	pv := reflect.Indirect(rv).Interface()
129 129
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
130 130
 }
131
-func extensionToGoStringDuration(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
131
+func extensionToGoStringDuration(m github_com_gogo_protobuf_proto.Message) string {
132
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
132 133
 	if e == nil {
133 134
 		return "nil"
134 135
 	}
135
-	s := "map[int32]proto.Extension{"
136
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
136 137
 	keys := make([]int, 0, len(e))
137 138
 	for k := range e {
138 139
 		keys = append(keys, int(k))
... ...
@@ -142,7 +145,7 @@ func extensionToGoStringDuration(e map[int32]github_com_gogo_protobuf_proto.Exte
142 142
 	for _, k := range keys {
143 143
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
144 144
 	}
145
-	s += strings.Join(ss, ",") + "}"
145
+	s += strings.Join(ss, ",") + "})"
146 146
 	return s
147 147
 }
148 148
 func (m *Duration) Marshal() (data []byte, err error) {
... ...
@@ -438,6 +441,8 @@ var (
438 438
 	ErrIntOverflowDuration   = fmt.Errorf("proto: integer overflow")
439 439
 )
440 440
 
441
+func init() { proto.RegisterFile("duration.proto", fileDescriptorDuration) }
442
+
441 443
 var fileDescriptorDuration = []byte{
442 444
 	// 201 bytes of a gzipped FileDescriptorProto
443 445
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0x29, 0x2d, 0x4a,
... ...
@@ -21,10 +21,11 @@ import (
21 21
 	grpc "google.golang.org/grpc"
22 22
 )
23 23
 
24
-import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
24
+import raftselector "github.com/docker/swarmkit/manager/raftselector"
25 25
 import codes "google.golang.org/grpc/codes"
26 26
 import metadata "google.golang.org/grpc/metadata"
27 27
 import transport "google.golang.org/grpc/transport"
28
+import time "time"
28 29
 
29 30
 import io "io"
30 31
 
... ...
@@ -153,11 +154,12 @@ func valueToGoStringHealth(v interface{}, typ string) string {
153 153
 	pv := reflect.Indirect(rv).Interface()
154 154
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
155 155
 }
156
-func extensionToGoStringHealth(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
156
+func extensionToGoStringHealth(m github_com_gogo_protobuf_proto.Message) string {
157
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
157 158
 	if e == nil {
158 159
 		return "nil"
159 160
 	}
160
-	s := "map[int32]proto.Extension{"
161
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
161 162
 	keys := make([]int, 0, len(e))
162 163
 	for k := range e {
163 164
 		keys = append(keys, int(k))
... ...
@@ -167,7 +169,7 @@ func extensionToGoStringHealth(e map[int32]github_com_gogo_protobuf_proto.Extens
167 167
 	for _, k := range keys {
168 168
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
169 169
 	}
170
-	s += strings.Join(ss, ",") + "}"
170
+	s += strings.Join(ss, ",") + "})"
171 171
 	return s
172 172
 }
173 173
 
... ...
@@ -177,7 +179,7 @@ var _ grpc.ClientConn
177 177
 
178 178
 // This is a compile-time assertion to ensure that this generated file
179 179
 // is compatible with the grpc package it is being compiled against.
180
-const _ = grpc.SupportPackageIsVersion2
180
+const _ = grpc.SupportPackageIsVersion3
181 181
 
182 182
 // Client API for Health service
183 183
 
... ...
@@ -239,7 +241,8 @@ var _Health_serviceDesc = grpc.ServiceDesc{
239 239
 			Handler:    _Health_Check_Handler,
240 240
 		},
241 241
 	},
242
-	Streams: []grpc.StreamDesc{},
242
+	Streams:  []grpc.StreamDesc{},
243
+	Metadata: fileDescriptorHealth,
243 244
 }
244 245
 
245 246
 func (m *HealthCheckRequest) Marshal() (data []byte, err error) {
... ...
@@ -319,12 +322,11 @@ func encodeVarintHealth(data []byte, offset int, v uint64) int {
319 319
 
320 320
 type raftProxyHealthServer struct {
321 321
 	local        HealthServer
322
-	connSelector raftpicker.Interface
323
-	cluster      raftpicker.RaftCluster
322
+	connSelector raftselector.ConnProvider
324 323
 	ctxMods      []func(context.Context) (context.Context, error)
325 324
 }
326 325
 
327
-func NewRaftProxyHealthServer(local HealthServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) HealthServer {
326
+func NewRaftProxyHealthServer(local HealthServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) HealthServer {
328 327
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
329 328
 		s, ok := transport.StreamFromContext(ctx)
330 329
 		if !ok {
... ...
@@ -346,7 +348,6 @@ func NewRaftProxyHealthServer(local HealthServer, connSelector raftpicker.Interf
346 346
 
347 347
 	return &raftProxyHealthServer{
348 348
 		local:        local,
349
-		cluster:      cluster,
350 349
 		connSelector: connSelector,
351 350
 		ctxMods:      mods,
352 351
 	}
... ...
@@ -361,34 +362,59 @@ func (p *raftProxyHealthServer) runCtxMods(ctx context.Context) (context.Context
361 361
 	}
362 362
 	return ctx, nil
363 363
 }
364
+func (p *raftProxyHealthServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
365
+	ticker := time.NewTicker(500 * time.Millisecond)
366
+	defer ticker.Stop()
367
+	for {
368
+		select {
369
+		case <-ticker.C:
370
+			conn, err := p.connSelector.LeaderConn(ctx)
371
+			if err != nil {
372
+				return nil, err
373
+			}
364 374
 
365
-func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) {
375
+			client := NewHealthClient(conn)
366 376
 
367
-	if p.cluster.IsLeader() {
368
-		return p.local.Check(ctx, r)
377
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
378
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
379
+				continue
380
+			}
381
+			return conn, nil
382
+		case <-ctx.Done():
383
+			return nil, ctx.Err()
384
+		}
369 385
 	}
370
-	ctx, err := p.runCtxMods(ctx)
386
+}
387
+
388
+func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) {
389
+
390
+	conn, err := p.connSelector.LeaderConn(ctx)
371 391
 	if err != nil {
392
+		if err == raftselector.ErrIsLeader {
393
+			return p.local.Check(ctx, r)
394
+		}
372 395
 		return nil, err
373 396
 	}
374
-	conn, err := p.connSelector.Conn()
397
+	modCtx, err := p.runCtxMods(ctx)
375 398
 	if err != nil {
376 399
 		return nil, err
377 400
 	}
378 401
 
379
-	defer func() {
402
+	resp, err := NewHealthClient(conn).Check(modCtx, r)
403
+	if err != nil {
404
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
405
+			return resp, err
406
+		}
407
+		conn, err := p.pollNewLeaderConn(ctx)
380 408
 		if err != nil {
381
-			errStr := err.Error()
382
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
383
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
384
-				strings.Contains(errStr, "connection error") ||
385
-				grpc.Code(err) == codes.Internal {
386
-				p.connSelector.Reset()
409
+			if err == raftselector.ErrIsLeader {
410
+				return p.local.Check(ctx, r)
387 411
 			}
412
+			return nil, err
388 413
 		}
389
-	}()
390
-
391
-	return NewHealthClient(conn).Check(ctx, r)
414
+		return NewHealthClient(conn).Check(modCtx, r)
415
+	}
416
+	return resp, err
392 417
 }
393 418
 
394 419
 func (m *HealthCheckRequest) Size() (n int) {
... ...
@@ -704,6 +730,8 @@ var (
704 704
 	ErrIntOverflowHealth   = fmt.Errorf("proto: integer overflow")
705 705
 )
706 706
 
707
+func init() { proto.RegisterFile("health.proto", fileDescriptorHealth) }
708
+
707 709
 var fileDescriptorHealth = []byte{
708 710
 	// 291 bytes of a gzipped FileDescriptorProto
709 711
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc,
... ...
@@ -66,6 +66,9 @@ type Service struct {
66 66
 	ID   string      `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
67 67
 	Meta Meta        `protobuf:"bytes,2,opt,name=meta" json:"meta"`
68 68
 	Spec ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"`
69
+	// PreviousSpec is the previous service spec that was in place before
70
+	// "Spec".
71
+	PreviousSpec *ServiceSpec `protobuf:"bytes,6,opt,name=previous_spec,json=previousSpec" json:"previous_spec,omitempty"`
69 72
 	// Runtime state of service endpoint. This may be different
70 73
 	// from the spec version because the user may not have entered
71 74
 	// the optional fields like node_port or virtual_ip and it
... ...
@@ -284,6 +287,7 @@ func (m *Service) Copy() *Service {
284 284
 		ID:           m.ID,
285 285
 		Meta:         *m.Meta.Copy(),
286 286
 		Spec:         *m.Spec.Copy(),
287
+		PreviousSpec: m.PreviousSpec.Copy(),
287 288
 		Endpoint:     m.Endpoint.Copy(),
288 289
 		UpdateStatus: m.UpdateStatus.Copy(),
289 290
 	}
... ...
@@ -468,11 +472,14 @@ func (this *Service) GoString() string {
468 468
 	if this == nil {
469 469
 		return "nil"
470 470
 	}
471
-	s := make([]string, 0, 9)
471
+	s := make([]string, 0, 10)
472 472
 	s = append(s, "&api.Service{")
473 473
 	s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
474 474
 	s = append(s, "Meta: "+strings.Replace(this.Meta.GoString(), `&`, ``, 1)+",\n")
475 475
 	s = append(s, "Spec: "+strings.Replace(this.Spec.GoString(), `&`, ``, 1)+",\n")
476
+	if this.PreviousSpec != nil {
477
+		s = append(s, "PreviousSpec: "+fmt.Sprintf("%#v", this.PreviousSpec)+",\n")
478
+	}
476 479
 	if this.Endpoint != nil {
477 480
 		s = append(s, "Endpoint: "+fmt.Sprintf("%#v", this.Endpoint)+",\n")
478 481
 	}
... ...
@@ -596,11 +603,12 @@ func valueToGoStringObjects(v interface{}, typ string) string {
596 596
 	pv := reflect.Indirect(rv).Interface()
597 597
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
598 598
 }
599
-func extensionToGoStringObjects(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
599
+func extensionToGoStringObjects(m github_com_gogo_protobuf_proto.Message) string {
600
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
600 601
 	if e == nil {
601 602
 		return "nil"
602 603
 	}
603
-	s := "map[int32]proto.Extension{"
604
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
604 605
 	keys := make([]int, 0, len(e))
605 606
 	for k := range e {
606 607
 		keys = append(keys, int(k))
... ...
@@ -610,7 +618,7 @@ func extensionToGoStringObjects(e map[int32]github_com_gogo_protobuf_proto.Exten
610 610
 	for _, k := range keys {
611 611
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
612 612
 	}
613
-	s += strings.Join(ss, ",") + "}"
613
+	s += strings.Join(ss, ",") + "})"
614 614
 	return s
615 615
 }
616 616
 func (m *Meta) Marshal() (data []byte, err error) {
... ...
@@ -802,6 +810,16 @@ func (m *Service) MarshalTo(data []byte) (int, error) {
802 802
 		}
803 803
 		i += n14
804 804
 	}
805
+	if m.PreviousSpec != nil {
806
+		data[i] = 0x32
807
+		i++
808
+		i = encodeVarintObjects(data, i, uint64(m.PreviousSpec.Size()))
809
+		n15, err := m.PreviousSpec.MarshalTo(data[i:])
810
+		if err != nil {
811
+			return 0, err
812
+		}
813
+		i += n15
814
+	}
805 815
 	return i, nil
806 816
 }
807 817
 
... ...
@@ -824,11 +842,11 @@ func (m *Endpoint) MarshalTo(data []byte) (int, error) {
824 824
 		data[i] = 0xa
825 825
 		i++
826 826
 		i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
827
-		n15, err := m.Spec.MarshalTo(data[i:])
827
+		n16, err := m.Spec.MarshalTo(data[i:])
828 828
 		if err != nil {
829 829
 			return 0, err
830 830
 		}
831
-		i += n15
831
+		i += n16
832 832
 	}
833 833
 	if len(m.Ports) > 0 {
834 834
 		for _, msg := range m.Ports {
... ...
@@ -911,19 +929,19 @@ func (m *Task) MarshalTo(data []byte) (int, error) {
911 911
 	data[i] = 0x12
912 912
 	i++
913 913
 	i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
914
-	n16, err := m.Meta.MarshalTo(data[i:])
914
+	n17, err := m.Meta.MarshalTo(data[i:])
915 915
 	if err != nil {
916 916
 		return 0, err
917 917
 	}
918
-	i += n16
918
+	i += n17
919 919
 	data[i] = 0x1a
920 920
 	i++
921 921
 	i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
922
-	n17, err := m.Spec.MarshalTo(data[i:])
922
+	n18, err := m.Spec.MarshalTo(data[i:])
923 923
 	if err != nil {
924 924
 		return 0, err
925 925
 	}
926
-	i += n17
926
+	i += n18
927 927
 	if len(m.ServiceID) > 0 {
928 928
 		data[i] = 0x22
929 929
 		i++
... ...
@@ -944,27 +962,27 @@ func (m *Task) MarshalTo(data []byte) (int, error) {
944 944
 	data[i] = 0x3a
945 945
 	i++
946 946
 	i = encodeVarintObjects(data, i, uint64(m.Annotations.Size()))
947
-	n18, err := m.Annotations.MarshalTo(data[i:])
947
+	n19, err := m.Annotations.MarshalTo(data[i:])
948 948
 	if err != nil {
949 949
 		return 0, err
950 950
 	}
951
-	i += n18
951
+	i += n19
952 952
 	data[i] = 0x42
953 953
 	i++
954 954
 	i = encodeVarintObjects(data, i, uint64(m.ServiceAnnotations.Size()))
955
-	n19, err := m.ServiceAnnotations.MarshalTo(data[i:])
955
+	n20, err := m.ServiceAnnotations.MarshalTo(data[i:])
956 956
 	if err != nil {
957 957
 		return 0, err
958 958
 	}
959
-	i += n19
959
+	i += n20
960 960
 	data[i] = 0x4a
961 961
 	i++
962 962
 	i = encodeVarintObjects(data, i, uint64(m.Status.Size()))
963
-	n20, err := m.Status.MarshalTo(data[i:])
963
+	n21, err := m.Status.MarshalTo(data[i:])
964 964
 	if err != nil {
965 965
 		return 0, err
966 966
 	}
967
-	i += n20
967
+	i += n21
968 968
 	if m.DesiredState != 0 {
969 969
 		data[i] = 0x50
970 970
 		i++
... ...
@@ -986,21 +1004,21 @@ func (m *Task) MarshalTo(data []byte) (int, error) {
986 986
 		data[i] = 0x62
987 987
 		i++
988 988
 		i = encodeVarintObjects(data, i, uint64(m.Endpoint.Size()))
989
-		n21, err := m.Endpoint.MarshalTo(data[i:])
989
+		n22, err := m.Endpoint.MarshalTo(data[i:])
990 990
 		if err != nil {
991 991
 			return 0, err
992 992
 		}
993
-		i += n21
993
+		i += n22
994 994
 	}
995 995
 	if m.LogDriver != nil {
996 996
 		data[i] = 0x6a
997 997
 		i++
998 998
 		i = encodeVarintObjects(data, i, uint64(m.LogDriver.Size()))
999
-		n22, err := m.LogDriver.MarshalTo(data[i:])
999
+		n23, err := m.LogDriver.MarshalTo(data[i:])
1000 1000
 		if err != nil {
1001 1001
 			return 0, err
1002 1002
 		}
1003
-		i += n22
1003
+		i += n23
1004 1004
 	}
1005 1005
 	return i, nil
1006 1006
 }
... ...
@@ -1024,11 +1042,11 @@ func (m *NetworkAttachment) MarshalTo(data []byte) (int, error) {
1024 1024
 		data[i] = 0xa
1025 1025
 		i++
1026 1026
 		i = encodeVarintObjects(data, i, uint64(m.Network.Size()))
1027
-		n23, err := m.Network.MarshalTo(data[i:])
1027
+		n24, err := m.Network.MarshalTo(data[i:])
1028 1028
 		if err != nil {
1029 1029
 			return 0, err
1030 1030
 		}
1031
-		i += n23
1031
+		i += n24
1032 1032
 	}
1033 1033
 	if len(m.Addresses) > 0 {
1034 1034
 		for _, s := range m.Addresses {
... ...
@@ -1087,38 +1105,38 @@ func (m *Network) MarshalTo(data []byte) (int, error) {
1087 1087
 	data[i] = 0x12
1088 1088
 	i++
1089 1089
 	i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
1090
-	n24, err := m.Meta.MarshalTo(data[i:])
1090
+	n25, err := m.Meta.MarshalTo(data[i:])
1091 1091
 	if err != nil {
1092 1092
 		return 0, err
1093 1093
 	}
1094
-	i += n24
1094
+	i += n25
1095 1095
 	data[i] = 0x1a
1096 1096
 	i++
1097 1097
 	i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
1098
-	n25, err := m.Spec.MarshalTo(data[i:])
1098
+	n26, err := m.Spec.MarshalTo(data[i:])
1099 1099
 	if err != nil {
1100 1100
 		return 0, err
1101 1101
 	}
1102
-	i += n25
1102
+	i += n26
1103 1103
 	if m.DriverState != nil {
1104 1104
 		data[i] = 0x22
1105 1105
 		i++
1106 1106
 		i = encodeVarintObjects(data, i, uint64(m.DriverState.Size()))
1107
-		n26, err := m.DriverState.MarshalTo(data[i:])
1107
+		n27, err := m.DriverState.MarshalTo(data[i:])
1108 1108
 		if err != nil {
1109 1109
 			return 0, err
1110 1110
 		}
1111
-		i += n26
1111
+		i += n27
1112 1112
 	}
1113 1113
 	if m.IPAM != nil {
1114 1114
 		data[i] = 0x2a
1115 1115
 		i++
1116 1116
 		i = encodeVarintObjects(data, i, uint64(m.IPAM.Size()))
1117
-		n27, err := m.IPAM.MarshalTo(data[i:])
1117
+		n28, err := m.IPAM.MarshalTo(data[i:])
1118 1118
 		if err != nil {
1119 1119
 			return 0, err
1120 1120
 		}
1121
-		i += n27
1121
+		i += n28
1122 1122
 	}
1123 1123
 	return i, nil
1124 1124
 }
... ...
@@ -1147,27 +1165,27 @@ func (m *Cluster) MarshalTo(data []byte) (int, error) {
1147 1147
 	data[i] = 0x12
1148 1148
 	i++
1149 1149
 	i = encodeVarintObjects(data, i, uint64(m.Meta.Size()))
1150
-	n28, err := m.Meta.MarshalTo(data[i:])
1150
+	n29, err := m.Meta.MarshalTo(data[i:])
1151 1151
 	if err != nil {
1152 1152
 		return 0, err
1153 1153
 	}
1154
-	i += n28
1154
+	i += n29
1155 1155
 	data[i] = 0x1a
1156 1156
 	i++
1157 1157
 	i = encodeVarintObjects(data, i, uint64(m.Spec.Size()))
1158
-	n29, err := m.Spec.MarshalTo(data[i:])
1158
+	n30, err := m.Spec.MarshalTo(data[i:])
1159 1159
 	if err != nil {
1160 1160
 		return 0, err
1161 1161
 	}
1162
-	i += n29
1162
+	i += n30
1163 1163
 	data[i] = 0x22
1164 1164
 	i++
1165 1165
 	i = encodeVarintObjects(data, i, uint64(m.RootCA.Size()))
1166
-	n30, err := m.RootCA.MarshalTo(data[i:])
1166
+	n31, err := m.RootCA.MarshalTo(data[i:])
1167 1167
 	if err != nil {
1168 1168
 		return 0, err
1169 1169
 	}
1170
-	i += n30
1170
+	i += n31
1171 1171
 	if len(m.NetworkBootstrapKeys) > 0 {
1172 1172
 		for _, msg := range m.NetworkBootstrapKeys {
1173 1173
 			data[i] = 0x2a
... ...
@@ -1281,6 +1299,10 @@ func (m *Service) Size() (n int) {
1281 1281
 		l = m.UpdateStatus.Size()
1282 1282
 		n += 1 + l + sovObjects(uint64(l))
1283 1283
 	}
1284
+	if m.PreviousSpec != nil {
1285
+		l = m.PreviousSpec.Size()
1286
+		n += 1 + l + sovObjects(uint64(l))
1287
+	}
1284 1288
 	return n
1285 1289
 }
1286 1290
 
... ...
@@ -1489,6 +1511,7 @@ func (this *Service) String() string {
1489 1489
 		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`,
1490 1490
 		`Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`,
1491 1491
 		`UpdateStatus:` + strings.Replace(fmt.Sprintf("%v", this.UpdateStatus), "UpdateStatus", "UpdateStatus", 1) + `,`,
1492
+		`PreviousSpec:` + strings.Replace(fmt.Sprintf("%v", this.PreviousSpec), "ServiceSpec", "ServiceSpec", 1) + `,`,
1492 1493
 		`}`,
1493 1494
 	}, "")
1494 1495
 	return s
... ...
@@ -2215,6 +2238,39 @@ func (m *Service) Unmarshal(data []byte) error {
2215 2215
 				return err
2216 2216
 			}
2217 2217
 			iNdEx = postIndex
2218
+		case 6:
2219
+			if wireType != 2 {
2220
+				return fmt.Errorf("proto: wrong wireType = %d for field PreviousSpec", wireType)
2221
+			}
2222
+			var msglen int
2223
+			for shift := uint(0); ; shift += 7 {
2224
+				if shift >= 64 {
2225
+					return ErrIntOverflowObjects
2226
+				}
2227
+				if iNdEx >= l {
2228
+					return io.ErrUnexpectedEOF
2229
+				}
2230
+				b := data[iNdEx]
2231
+				iNdEx++
2232
+				msglen |= (int(b) & 0x7F) << shift
2233
+				if b < 0x80 {
2234
+					break
2235
+				}
2236
+			}
2237
+			if msglen < 0 {
2238
+				return ErrInvalidLengthObjects
2239
+			}
2240
+			postIndex := iNdEx + msglen
2241
+			if postIndex > l {
2242
+				return io.ErrUnexpectedEOF
2243
+			}
2244
+			if m.PreviousSpec == nil {
2245
+				m.PreviousSpec = &ServiceSpec{}
2246
+			}
2247
+			if err := m.PreviousSpec.Unmarshal(data[iNdEx:postIndex]); err != nil {
2248
+				return err
2249
+			}
2250
+			iNdEx = postIndex
2218 2251
 		default:
2219 2252
 			iNdEx = preIndex
2220 2253
 			skippy, err := skipObjects(data[iNdEx:])
... ...
@@ -3581,70 +3637,73 @@ var (
3581 3581
 	ErrIntOverflowObjects   = fmt.Errorf("proto: integer overflow")
3582 3582
 )
3583 3583
 
3584
+func init() { proto.RegisterFile("objects.proto", fileDescriptorObjects) }
3585
+
3584 3586
 var fileDescriptorObjects = []byte{
3585
-	// 1009 bytes of a gzipped FileDescriptorProto
3586
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x56, 0xcf, 0x6e, 0x1b, 0x45,
3587
-	0x1c, 0xce, 0xda, 0x1b, 0xdb, 0xfb, 0x73, 0x1c, 0x89, 0xa1, 0xaa, 0xb6, 0x21, 0xd8, 0xc1, 0x15,
3588
-	0xa8, 0x87, 0xca, 0x15, 0xa5, 0x20, 0x2a, 0x5a, 0x21, 0xff, 0x13, 0x58, 0x25, 0x10, 0x4d, 0x4b,
3589
-	0x7a, 0x5c, 0x4d, 0x76, 0xa7, 0x66, 0xb1, 0xbd, 0xb3, 0x9a, 0x19, 0xbb, 0xf2, 0x0d, 0xf1, 0x00,
3590
-	0x48, 0xbc, 0x00, 0xaf, 0xc2, 0x35, 0x07, 0x0e, 0x1c, 0x39, 0x59, 0xc4, 0x37, 0x4e, 0xf0, 0x08,
3591
-	0x68, 0x66, 0x67, 0xed, 0x8d, 0xbc, 0x0e, 0x8d, 0x84, 0x72, 0x9b, 0xd9, 0xf9, 0xbe, 0x6f, 0x7e,
3592
-	0xff, 0x67, 0xa1, 0xc6, 0xce, 0xbe, 0xa7, 0xbe, 0x14, 0xad, 0x98, 0x33, 0xc9, 0x10, 0x0a, 0x98,
3587
+	// 1029 bytes of a gzipped FileDescriptorProto
3588
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x6f, 0x1b, 0x45,
3589
+	0x18, 0xce, 0xda, 0x1b, 0xdb, 0xfb, 0x3a, 0x8e, 0xc4, 0x50, 0x55, 0xdb, 0x10, 0xec, 0xe0, 0x0a,
3590
+	0xd4, 0x43, 0xe5, 0x8a, 0x52, 0x10, 0x15, 0xad, 0x90, 0xbf, 0x04, 0x56, 0x09, 0x44, 0xd3, 0x92,
3591
+	0x1e, 0x57, 0x93, 0xdd, 0xa9, 0x59, 0x6c, 0xef, 0xac, 0x66, 0xc6, 0xae, 0x7c, 0x43, 0xfc, 0x00,
3592
+	0x7e, 0x02, 0x7f, 0x85, 0x6b, 0x0e, 0x1c, 0xb8, 0xc1, 0xc9, 0x22, 0xbe, 0x71, 0x82, 0x9f, 0x80,
3593
+	0x66, 0x76, 0xd6, 0xde, 0xc8, 0xeb, 0x90, 0x4a, 0x28, 0xb7, 0x99, 0x9d, 0xe7, 0x79, 0xde, 0xcf,
3594
+	0x79, 0x77, 0xa0, 0xc6, 0xce, 0xbe, 0xa7, 0xbe, 0x14, 0xad, 0x98, 0x33, 0xc9, 0x10, 0x0a, 0x98,
3593 3595
 	0x3f, 0xa2, 0xbc, 0x25, 0x5e, 0x13, 0x3e, 0x19, 0x85, 0xb2, 0x35, 0xfb, 0xf0, 0xa0, 0x2a, 0xe7,
3594 3596
 	0x31, 0x35, 0x80, 0x83, 0xaa, 0x88, 0xa9, 0x9f, 0x6e, 0xee, 0xc8, 0x70, 0x42, 0x85, 0x24, 0x93,
3595 3597
 	0xf8, 0xc1, 0x6a, 0x65, 0x8e, 0x6e, 0x0d, 0xd9, 0x90, 0xe9, 0xe5, 0x03, 0xb5, 0x4a, 0xbe, 0x36,
3596
-	0x7f, 0xb5, 0xc0, 0x3e, 0xa6, 0x92, 0xa0, 0xcf, 0xa0, 0x3c, 0xa3, 0x5c, 0x84, 0x2c, 0x72, 0xad,
3597
-	0x23, 0xeb, 0x5e, 0xf5, 0xe1, 0x3b, 0xad, 0xcd, 0x9b, 0x5b, 0xa7, 0x09, 0xa4, 0x63, 0x9f, 0x2f,
3598
-	0x1a, 0x3b, 0x38, 0x65, 0xa0, 0x27, 0x00, 0x3e, 0xa7, 0x44, 0xd2, 0xc0, 0x23, 0xd2, 0x2d, 0x68,
3599
-	0xfe, 0xbb, 0x79, 0xfc, 0x17, 0xa9, 0x51, 0xd8, 0x31, 0x84, 0xb6, 0x54, 0xec, 0x69, 0x1c, 0xa4,
3600
-	0xec, 0xe2, 0x1b, 0xb1, 0x0d, 0xa1, 0x2d, 0x9b, 0x7f, 0x15, 0xc1, 0xfe, 0x9a, 0x05, 0x14, 0xdd,
3601
-	0x86, 0x42, 0x18, 0x68, 0xe3, 0x9d, 0x4e, 0x69, 0xb9, 0x68, 0x14, 0x06, 0x3d, 0x5c, 0x08, 0x03,
3602
-	0xf4, 0x10, 0xec, 0x09, 0x95, 0xc4, 0x98, 0xe5, 0xe6, 0x09, 0xab, 0x08, 0x18, 0x9f, 0x34, 0x16,
3603
-	0x7d, 0x02, 0xb6, 0x0a, 0xab, 0x31, 0xe6, 0x30, 0x8f, 0xa3, 0xee, 0x7c, 0x1e, 0x53, 0x3f, 0xe5,
3604
-	0x29, 0x3c, 0xea, 0x43, 0x35, 0xa0, 0xc2, 0xe7, 0x61, 0x2c, 0x55, 0x24, 0x6d, 0x4d, 0xbf, 0xbb,
3605
-	0x8d, 0xde, 0x5b, 0x43, 0x71, 0x96, 0x87, 0x9e, 0x40, 0x49, 0x48, 0x22, 0xa7, 0xc2, 0xdd, 0xd5,
3606
-	0x0a, 0xf5, 0xad, 0x06, 0x68, 0x94, 0x31, 0xc1, 0x70, 0xd0, 0x97, 0xb0, 0x3f, 0x21, 0x11, 0x19,
3607
-	0x52, 0xee, 0x19, 0x95, 0x92, 0x56, 0x79, 0x2f, 0xd7, 0xf5, 0x04, 0x99, 0x08, 0xe1, 0xda, 0x24,
3608
-	0xbb, 0x45, 0x7d, 0x00, 0x22, 0x25, 0xf1, 0xbf, 0x9b, 0xd0, 0x48, 0xba, 0x65, 0xad, 0xf2, 0x7e,
3609
-	0xae, 0x2d, 0x54, 0xbe, 0x66, 0x7c, 0xd4, 0x5e, 0x81, 0x71, 0x86, 0x88, 0xbe, 0x80, 0xaa, 0x4f,
3610
-	0xb9, 0x0c, 0x5f, 0x85, 0x3e, 0x91, 0xd4, 0xad, 0x68, 0x9d, 0x46, 0x9e, 0x4e, 0x77, 0x0d, 0x33,
3611
-	0x4e, 0x65, 0x99, 0xcd, 0x9f, 0x0b, 0x50, 0x7e, 0x4e, 0xf9, 0x2c, 0xf4, 0xff, 0xdf, 0x74, 0x3f,
3612
-	0xbe, 0x94, 0xee, 0x5c, 0xcb, 0xcc, 0xb5, 0x1b, 0x19, 0xff, 0x14, 0x2a, 0x34, 0x0a, 0x62, 0x16,
3613
-	0x46, 0xd2, 0xa4, 0x3b, 0xb7, 0x5a, 0xfa, 0x06, 0x83, 0x57, 0x68, 0xd4, 0x87, 0x5a, 0x52, 0xc5,
3614
-	0xde, 0xa5, 0x5c, 0x1f, 0xe5, 0xd1, 0xbf, 0xd5, 0x40, 0x93, 0xa4, 0xbd, 0x69, 0x66, 0xd7, 0xfc,
3615
-	0xa5, 0x00, 0x95, 0x54, 0x1d, 0x3d, 0x32, 0x8e, 0x58, 0xdb, 0xa5, 0x52, 0xac, 0xf2, 0xc4, 0xf8,
3616
-	0xf0, 0x08, 0x76, 0x63, 0xc6, 0xa5, 0x70, 0x0b, 0x47, 0xc5, 0x6d, 0xd5, 0x76, 0xc2, 0xb8, 0xec,
3617
-	0xb2, 0xe8, 0x55, 0x38, 0xc4, 0x09, 0x18, 0xbd, 0x84, 0xea, 0x2c, 0xe4, 0x72, 0x4a, 0xc6, 0x5e,
3618
-	0x18, 0x0b, 0xb7, 0xa8, 0xb9, 0x1f, 0x5c, 0x75, 0x65, 0xeb, 0x34, 0xc1, 0x0f, 0x4e, 0x3a, 0xfb,
3619
-	0xcb, 0x45, 0x03, 0x56, 0x5b, 0x81, 0xc1, 0x48, 0x0d, 0x62, 0x71, 0x70, 0x0c, 0xce, 0xea, 0x04,
3620
-	0xdd, 0x07, 0x88, 0x92, 0xe2, 0xf2, 0x56, 0xe9, 0xae, 0x2d, 0x17, 0x0d, 0xc7, 0x94, 0xdc, 0xa0,
3621
-	0x87, 0x1d, 0x03, 0x18, 0x04, 0x08, 0x81, 0x4d, 0x82, 0x80, 0xeb, 0xe4, 0x3b, 0x58, 0xaf, 0x9b,
3622
-	0xbf, 0xed, 0x82, 0xfd, 0x82, 0x88, 0xd1, 0x4d, 0x0f, 0x08, 0x75, 0xe7, 0x46, 0xb9, 0xdc, 0x07,
3623
-	0x10, 0x49, 0x25, 0x29, 0x77, 0xec, 0xb5, 0x3b, 0xa6, 0xbe, 0x94, 0x3b, 0x06, 0x90, 0xb8, 0x23,
3624
-	0xc6, 0x4c, 0xea, 0xca, 0xb0, 0xb1, 0x5e, 0xa3, 0xbb, 0x50, 0x8e, 0x58, 0xa0, 0xe9, 0x25, 0x4d,
3625
-	0x87, 0xe5, 0xa2, 0x51, 0x52, 0xc3, 0x60, 0xd0, 0xc3, 0x25, 0x75, 0x34, 0x08, 0x54, 0xc7, 0x91,
3626
-	0x28, 0x62, 0x92, 0xa8, 0x71, 0x22, 0x4c, 0xe7, 0xe6, 0xd6, 0x75, 0x7b, 0x0d, 0x4b, 0x3b, 0x2e,
3627
-	0xc3, 0x44, 0xa7, 0xf0, 0x76, 0x6a, 0x6f, 0x56, 0xb0, 0x72, 0x1d, 0x41, 0x64, 0x14, 0x32, 0x27,
3628
-	0x99, 0x09, 0xe7, 0x6c, 0x9f, 0x70, 0x3a, 0x82, 0x79, 0x13, 0xae, 0x03, 0xb5, 0x80, 0x8a, 0x90,
3629
-	0xd3, 0x40, 0xf7, 0x0e, 0x75, 0xe1, 0xc8, 0xba, 0xb7, 0xbf, 0xe5, 0xd1, 0x30, 0x22, 0x14, 0xef,
3630
-	0x19, 0x8e, 0xde, 0xa1, 0x36, 0x54, 0x4c, 0xdd, 0x08, 0xb7, 0xaa, 0x6b, 0xf7, 0x0d, 0x27, 0xdb,
3631
-	0x8a, 0x76, 0xa9, 0xf7, 0xf7, 0xae, 0xd5, 0xfb, 0x8f, 0x01, 0xc6, 0x6c, 0xe8, 0x05, 0x3c, 0x9c,
3632
-	0x51, 0xee, 0xd6, 0x34, 0xf7, 0x20, 0x8f, 0xdb, 0xd3, 0x08, 0xec, 0x8c, 0xd9, 0x30, 0x59, 0x36,
3633
-	0x7f, 0xb4, 0xe0, 0xad, 0x0d, 0xa3, 0xd0, 0xc7, 0x50, 0x36, 0x66, 0x5d, 0xf5, 0x7c, 0x1b, 0x1e,
3634
-	0x4e, 0xb1, 0xe8, 0x10, 0x1c, 0xd5, 0x23, 0x54, 0x08, 0x9a, 0x74, 0xbf, 0x83, 0xd7, 0x1f, 0x90,
3635
-	0x0b, 0x65, 0x32, 0x0e, 0x89, 0x3a, 0x2b, 0xea, 0xb3, 0x74, 0xdb, 0xfc, 0xa9, 0x00, 0x65, 0x23,
3636
-	0x76, 0xd3, 0x83, 0xd8, 0x5c, 0xbb, 0xd1, 0x59, 0x4f, 0x61, 0x2f, 0x09, 0xa7, 0x29, 0x09, 0xfb,
3637
-	0x3f, 0x83, 0x5a, 0x4d, 0xf0, 0x49, 0x39, 0x3c, 0x05, 0x3b, 0x8c, 0xc9, 0xc4, 0x0c, 0xe1, 0xdc,
3638
-	0x9b, 0x07, 0x27, 0xed, 0xe3, 0x6f, 0xe2, 0xa4, 0xb2, 0x2b, 0xcb, 0x45, 0xc3, 0x56, 0x1f, 0xb0,
3639
-	0xa6, 0x35, 0xff, 0x2e, 0x40, 0xb9, 0x3b, 0x9e, 0x0a, 0x49, 0xf9, 0x4d, 0x07, 0xc4, 0x5c, 0xbb,
3640
-	0x11, 0x90, 0x2e, 0x94, 0x39, 0x63, 0xd2, 0xf3, 0xc9, 0x55, 0xb1, 0xc0, 0x8c, 0xc9, 0x6e, 0xbb,
3641
-	0xb3, 0xaf, 0x88, 0x6a, 0x90, 0x24, 0x7b, 0x5c, 0x52, 0xd4, 0x2e, 0x41, 0x2f, 0xe1, 0x76, 0x3a,
3642
-	0x7e, 0xcf, 0x18, 0x93, 0x42, 0x72, 0x12, 0x7b, 0x23, 0x3a, 0x57, 0xaf, 0x55, 0x71, 0xdb, 0x3f,
3643
-	0x45, 0x3f, 0xf2, 0xf9, 0x5c, 0x07, 0xea, 0x19, 0x9d, 0xe3, 0x5b, 0x46, 0xa0, 0x93, 0xf2, 0x9f,
3644
-	0xd1, 0xb9, 0x40, 0x9f, 0xc3, 0x21, 0x5d, 0xc1, 0x94, 0xa2, 0x37, 0x26, 0x13, 0xf5, 0xb0, 0x78,
3645
-	0xfe, 0x98, 0xf9, 0x23, 0x3d, 0xdb, 0x6c, 0x7c, 0x87, 0x66, 0xa5, 0xbe, 0x4a, 0x10, 0x5d, 0x05,
3646
-	0xe8, 0x1c, 0x9e, 0x5f, 0xd4, 0x77, 0xfe, 0xb8, 0xa8, 0xef, 0xfc, 0x73, 0x51, 0xb7, 0x7e, 0x58,
3647
-	0xd6, 0xad, 0xf3, 0x65, 0xdd, 0xfa, 0x7d, 0x59, 0xb7, 0xfe, 0x5c, 0xd6, 0xad, 0xb3, 0x92, 0xfe,
3648
-	0xbd, 0xfd, 0xe8, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x56, 0x49, 0xe6, 0x55, 0x4e, 0x0b, 0x00,
3649
-	0x00,
3598
+	0x7f, 0xb1, 0xc0, 0x3e, 0xa6, 0x92, 0xa0, 0xcf, 0xa0, 0x3c, 0xa3, 0x5c, 0x84, 0x2c, 0x72, 0xad,
3599
+	0x23, 0xeb, 0x5e, 0xf5, 0xe1, 0x3b, 0xad, 0x4d, 0xcb, 0xad, 0xd3, 0x04, 0xd2, 0xb1, 0xcf, 0x17,
3600
+	0x8d, 0x1d, 0x9c, 0x32, 0xd0, 0x13, 0x00, 0x9f, 0x53, 0x22, 0x69, 0xe0, 0x11, 0xe9, 0x16, 0x34,
3601
+	0xff, 0xdd, 0x3c, 0xfe, 0x8b, 0xd4, 0x29, 0xec, 0x18, 0x42, 0x5b, 0x2a, 0xf6, 0x34, 0x0e, 0x52,
3602
+	0x76, 0xf1, 0x5a, 0x6c, 0x43, 0x68, 0xcb, 0xe6, 0x5f, 0x45, 0xb0, 0xbf, 0x66, 0x01, 0x45, 0xb7,
3603
+	0xa1, 0x10, 0x06, 0xda, 0x79, 0xa7, 0x53, 0x5a, 0x2e, 0x1a, 0x85, 0x41, 0x0f, 0x17, 0xc2, 0x00,
3604
+	0x3d, 0x04, 0x7b, 0x42, 0x25, 0x31, 0x6e, 0xb9, 0x79, 0xc2, 0x2a, 0x03, 0x26, 0x26, 0x8d, 0x45,
3605
+	0x9f, 0x80, 0xad, 0xd2, 0x6a, 0x9c, 0x39, 0xcc, 0xe3, 0x28, 0x9b, 0xcf, 0x63, 0xea, 0xa7, 0x3c,
3606
+	0x85, 0x47, 0x7d, 0xa8, 0x06, 0x54, 0xf8, 0x3c, 0x8c, 0xa5, 0xca, 0xa4, 0xad, 0xe9, 0x77, 0xb7,
3607
+	0xd1, 0x7b, 0x6b, 0x28, 0xce, 0xf2, 0xd0, 0x13, 0x28, 0x09, 0x49, 0xe4, 0x54, 0xb8, 0xbb, 0x5a,
3608
+	0xa1, 0xbe, 0xd5, 0x01, 0x8d, 0x32, 0x2e, 0x18, 0x0e, 0xfa, 0x12, 0xf6, 0x27, 0x24, 0x22, 0x43,
3609
+	0xca, 0x3d, 0xa3, 0x52, 0xd2, 0x2a, 0xef, 0xe5, 0x86, 0x9e, 0x20, 0x13, 0x21, 0x5c, 0x9b, 0x64,
3610
+	0xb7, 0xa8, 0x0f, 0x40, 0xa4, 0x24, 0xfe, 0x77, 0x13, 0x1a, 0x49, 0xb7, 0xac, 0x55, 0xde, 0xcf,
3611
+	0xf5, 0x85, 0xca, 0xd7, 0x8c, 0x8f, 0xda, 0x2b, 0x30, 0xce, 0x10, 0xd1, 0x17, 0x50, 0xf5, 0x29,
3612
+	0x97, 0xe1, 0xab, 0xd0, 0x27, 0x92, 0xba, 0x15, 0xad, 0xd3, 0xc8, 0xd3, 0xe9, 0xae, 0x61, 0x26,
3613
+	0xa8, 0x2c, 0xb3, 0xf9, 0x7b, 0x01, 0xca, 0xcf, 0x29, 0x9f, 0x85, 0xfe, 0xff, 0x5b, 0xee, 0xc7,
3614
+	0x97, 0xca, 0x9d, 0xeb, 0x99, 0x31, 0xbb, 0x51, 0xf1, 0x4f, 0xa1, 0x42, 0xa3, 0x20, 0x66, 0x61,
3615
+	0x24, 0x4d, 0xb9, 0x73, 0xbb, 0xa5, 0x6f, 0x30, 0x78, 0x85, 0x46, 0x7d, 0xa8, 0x25, 0x5d, 0xec,
3616
+	0x5d, 0xaa, 0xf5, 0x51, 0x1e, 0xfd, 0x5b, 0x0d, 0x34, 0x45, 0xda, 0x9b, 0x66, 0x76, 0xa8, 0x07,
3617
+	0xb5, 0x98, 0xd3, 0x59, 0xc8, 0xa6, 0xc2, 0xd3, 0x41, 0x94, 0xae, 0x15, 0x04, 0xde, 0x4b, 0x59,
3618
+	0x6a, 0xd7, 0xfc, 0xb9, 0x00, 0x95, 0xd4, 0x47, 0xf4, 0xc8, 0xa4, 0xc3, 0xda, 0xee, 0x50, 0x8a,
3619
+	0xd5, 0x52, 0x49, 0x26, 0x1e, 0xc1, 0x6e, 0xcc, 0xb8, 0x14, 0x6e, 0xe1, 0xa8, 0xb8, 0xad, 0x67,
3620
+	0x4f, 0x18, 0x97, 0x5d, 0x16, 0xbd, 0x0a, 0x87, 0x38, 0x01, 0xa3, 0x97, 0x50, 0x9d, 0x85, 0x5c,
3621
+	0x4e, 0xc9, 0xd8, 0x0b, 0x63, 0xe1, 0x16, 0x35, 0xf7, 0x83, 0xab, 0x4c, 0xb6, 0x4e, 0x13, 0xfc,
3622
+	0xe0, 0xa4, 0xb3, 0xbf, 0x5c, 0x34, 0x60, 0xb5, 0x15, 0x18, 0x8c, 0xd4, 0x20, 0x16, 0x07, 0xc7,
3623
+	0xe0, 0xac, 0x4e, 0xd0, 0x7d, 0x80, 0x28, 0x69, 0x51, 0x6f, 0xd5, 0x34, 0xb5, 0xe5, 0xa2, 0xe1,
3624
+	0x98, 0xc6, 0x1d, 0xf4, 0xb0, 0x63, 0x00, 0x83, 0x00, 0x21, 0xb0, 0x49, 0x10, 0x70, 0xdd, 0x42,
3625
+	0x0e, 0xd6, 0xeb, 0xe6, 0xaf, 0xbb, 0x60, 0xbf, 0x20, 0x62, 0x74, 0xd3, 0x63, 0x46, 0xd9, 0xdc,
3626
+	0x68, 0xba, 0xfb, 0x00, 0x22, 0x29, 0xa5, 0x0a, 0xc7, 0x5e, 0x87, 0x63, 0x0a, 0xac, 0xc2, 0x31,
3627
+	0x80, 0x24, 0x1c, 0x31, 0x66, 0x52, 0xf7, 0x97, 0x8d, 0xf5, 0x1a, 0xdd, 0x85, 0x72, 0xc4, 0x02,
3628
+	0x4d, 0x2f, 0x69, 0x3a, 0x2c, 0x17, 0x8d, 0x92, 0x1a, 0x29, 0x83, 0x1e, 0x2e, 0xa9, 0xa3, 0x41,
3629
+	0xa0, 0xee, 0x2d, 0x89, 0x22, 0x26, 0x89, 0x1a, 0x4a, 0xc2, 0xdc, 0xff, 0xdc, 0xc6, 0x6a, 0xaf,
3630
+	0x61, 0xe9, 0xbd, 0xcd, 0x30, 0xd1, 0x29, 0xbc, 0x9d, 0xfa, 0x9b, 0x15, 0xac, 0xbc, 0x89, 0x20,
3631
+	0x32, 0x0a, 0x99, 0x93, 0xcc, 0x9c, 0x74, 0xb6, 0xcf, 0x49, 0x9d, 0xc1, 0xbc, 0x39, 0xd9, 0x81,
3632
+	0x5a, 0x40, 0x45, 0xc8, 0x69, 0xa0, 0x6f, 0x20, 0x75, 0xe1, 0xc8, 0xba, 0xb7, 0xbf, 0xe5, 0xd7,
3633
+	0x63, 0x44, 0x28, 0xde, 0x33, 0x1c, 0xbd, 0x43, 0x6d, 0xa8, 0x98, 0xbe, 0x11, 0x6e, 0x55, 0xf7,
3634
+	0xee, 0x35, 0xe7, 0xe3, 0x8a, 0x76, 0x69, 0x82, 0xec, 0xbd, 0xd1, 0x04, 0x79, 0x0c, 0x30, 0x66,
3635
+	0x43, 0x2f, 0xe0, 0xe1, 0x8c, 0x72, 0xb7, 0xa6, 0xb9, 0x07, 0x79, 0xdc, 0x9e, 0x46, 0x60, 0x67,
3636
+	0xcc, 0x86, 0xc9, 0xb2, 0xf9, 0xa3, 0x05, 0x6f, 0x6d, 0x38, 0x85, 0x3e, 0x86, 0xb2, 0x71, 0xeb,
3637
+	0xaa, 0x47, 0x80, 0xe1, 0xe1, 0x14, 0x8b, 0x0e, 0xc1, 0x51, 0x77, 0x84, 0x0a, 0x41, 0x93, 0xdb,
3638
+	0xef, 0xe0, 0xf5, 0x07, 0xe4, 0x42, 0x99, 0x8c, 0x43, 0xa2, 0xce, 0x8a, 0xfa, 0x2c, 0xdd, 0x36,
3639
+	0x7f, 0x2a, 0x40, 0xd9, 0x88, 0xdd, 0xf4, 0x38, 0x37, 0x66, 0x37, 0x6e, 0xd6, 0x53, 0xd8, 0x4b,
3640
+	0xd2, 0x69, 0x5a, 0xc2, 0xfe, 0xcf, 0xa4, 0x56, 0x13, 0x7c, 0xd2, 0x0e, 0x4f, 0xc1, 0x0e, 0x63,
3641
+	0x32, 0x31, 0xa3, 0x3c, 0xd7, 0xf2, 0xe0, 0xa4, 0x7d, 0xfc, 0x4d, 0x9c, 0x74, 0x76, 0x65, 0xb9,
3642
+	0x68, 0xd8, 0xea, 0x03, 0xd6, 0xb4, 0xe6, 0xdf, 0x05, 0x28, 0x77, 0xc7, 0x53, 0x21, 0x29, 0xbf,
3643
+	0xe9, 0x84, 0x18, 0xb3, 0x1b, 0x09, 0xe9, 0x42, 0x99, 0x33, 0x26, 0x3d, 0x9f, 0x5c, 0x95, 0x0b,
3644
+	0xcc, 0x98, 0xec, 0xb6, 0x3b, 0xfb, 0x8a, 0xa8, 0x06, 0x49, 0xb2, 0xc7, 0x25, 0x45, 0xed, 0x12,
3645
+	0xf4, 0x12, 0x6e, 0xa7, 0xe3, 0xf7, 0x8c, 0x31, 0x29, 0x24, 0x27, 0xb1, 0x37, 0xa2, 0x73, 0xf5,
3646
+	0xcf, 0x2b, 0x6e, 0x7b, 0x99, 0xf4, 0x23, 0x9f, 0xcf, 0x75, 0xa2, 0x9e, 0xd1, 0x39, 0xbe, 0x65,
3647
+	0x04, 0x3a, 0x29, 0xff, 0x19, 0x9d, 0x0b, 0xf4, 0x39, 0x1c, 0xd2, 0x15, 0x4c, 0x29, 0x7a, 0x63,
3648
+	0x32, 0x51, 0x3f, 0x16, 0xcf, 0x1f, 0x33, 0x7f, 0xa4, 0x67, 0x9b, 0x8d, 0xef, 0xd0, 0xac, 0xd4,
3649
+	0x57, 0x09, 0xa2, 0xab, 0x00, 0x9d, 0xc3, 0xf3, 0x8b, 0xfa, 0xce, 0x1f, 0x17, 0xf5, 0x9d, 0x7f,
3650
+	0x2e, 0xea, 0xd6, 0x0f, 0xcb, 0xba, 0x75, 0xbe, 0xac, 0x5b, 0xbf, 0x2d, 0xeb, 0xd6, 0x9f, 0xcb,
3651
+	0xba, 0x75, 0x56, 0xd2, 0x8f, 0xe4, 0x8f, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xb2, 0x97,
3652
+	0xcc, 0x94, 0x0b, 0x00, 0x00,
3650 3653
 }
... ...
@@ -57,6 +57,10 @@ message Service {
57 57
 
58 58
 	ServiceSpec spec = 3 [(gogoproto.nullable) = false];
59 59
 
60
+	// PreviousSpec is the previous service spec that was in place before
61
+	// "Spec".
62
+	ServiceSpec previous_spec = 6;
63
+
60 64
 	// Runtime state of service endpoint. This may be different
61 65
 	// from the spec version because the user may not have entered
62 66
 	// the optional fields like node_port or virtual_ip and it
... ...
@@ -23,10 +23,11 @@ import (
23 23
 	grpc "google.golang.org/grpc"
24 24
 )
25 25
 
26
-import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
26
+import raftselector "github.com/docker/swarmkit/manager/raftselector"
27 27
 import codes "google.golang.org/grpc/codes"
28 28
 import metadata "google.golang.org/grpc/metadata"
29 29
 import transport "google.golang.org/grpc/transport"
30
+import time "time"
30 31
 
31 32
 import io "io"
32 33
 
... ...
@@ -163,7 +164,7 @@ func (m *InternalRaftRequest) Reset()                    { *m = InternalRaftRequ
163 163
 func (*InternalRaftRequest) ProtoMessage()               {}
164 164
 func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{9} }
165 165
 
166
-// StoreAction defines a taret and operation to apply on the storage system.
166
+// StoreAction defines a target and operation to apply on the storage system.
167 167
 type StoreAction struct {
168 168
 	Action StoreActionKind `protobuf:"varint,1,opt,name=action,proto3,enum=docker.swarmkit.v1.StoreActionKind" json:"action,omitempty"`
169 169
 	// Types that are valid to be assigned to Target:
... ...
@@ -797,11 +798,12 @@ func valueToGoStringRaft(v interface{}, typ string) string {
797 797
 	pv := reflect.Indirect(rv).Interface()
798 798
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
799 799
 }
800
-func extensionToGoStringRaft(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
800
+func extensionToGoStringRaft(m github_com_gogo_protobuf_proto.Message) string {
801
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
801 802
 	if e == nil {
802 803
 		return "nil"
803 804
 	}
804
-	s := "map[int32]proto.Extension{"
805
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
805 806
 	keys := make([]int, 0, len(e))
806 807
 	for k := range e {
807 808
 		keys = append(keys, int(k))
... ...
@@ -811,7 +813,7 @@ func extensionToGoStringRaft(e map[int32]github_com_gogo_protobuf_proto.Extensio
811 811
 	for _, k := range keys {
812 812
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
813 813
 	}
814
-	s += strings.Join(ss, ",") + "}"
814
+	s += strings.Join(ss, ",") + "})"
815 815
 	return s
816 816
 }
817 817
 
... ...
@@ -821,7 +823,7 @@ var _ grpc.ClientConn
821 821
 
822 822
 // This is a compile-time assertion to ensure that this generated file
823 823
 // is compatible with the grpc package it is being compiled against.
824
-const _ = grpc.SupportPackageIsVersion2
824
+const _ = grpc.SupportPackageIsVersion3
825 825
 
826 826
 // Client API for Raft service
827 827
 
... ...
@@ -922,7 +924,8 @@ var _Raft_serviceDesc = grpc.ServiceDesc{
922 922
 			Handler:    _Raft_ResolveAddress_Handler,
923 923
 		},
924 924
 	},
925
-	Streams: []grpc.StreamDesc{},
925
+	Streams:  []grpc.StreamDesc{},
926
+	Metadata: fileDescriptorRaft,
926 927
 }
927 928
 
928 929
 // Client API for RaftMembership service
... ...
@@ -1022,7 +1025,8 @@ var _RaftMembership_serviceDesc = grpc.ServiceDesc{
1022 1022
 			Handler:    _RaftMembership_Leave_Handler,
1023 1023
 		},
1024 1024
 	},
1025
-	Streams: []grpc.StreamDesc{},
1025
+	Streams:  []grpc.StreamDesc{},
1026
+	Metadata: fileDescriptorRaft,
1026 1027
 }
1027 1028
 
1028 1029
 func (m *RaftMember) Marshal() (data []byte, err error) {
... ...
@@ -1438,12 +1442,11 @@ func encodeVarintRaft(data []byte, offset int, v uint64) int {
1438 1438
 
1439 1439
 type raftProxyRaftServer struct {
1440 1440
 	local        RaftServer
1441
-	connSelector raftpicker.Interface
1442
-	cluster      raftpicker.RaftCluster
1441
+	connSelector raftselector.ConnProvider
1443 1442
 	ctxMods      []func(context.Context) (context.Context, error)
1444 1443
 }
1445 1444
 
1446
-func NewRaftProxyRaftServer(local RaftServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) RaftServer {
1445
+func NewRaftProxyRaftServer(local RaftServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) RaftServer {
1447 1446
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
1448 1447
 		s, ok := transport.StreamFromContext(ctx)
1449 1448
 		if !ok {
... ...
@@ -1465,7 +1468,6 @@ func NewRaftProxyRaftServer(local RaftServer, connSelector raftpicker.Interface,
1465 1465
 
1466 1466
 	return &raftProxyRaftServer{
1467 1467
 		local:        local,
1468
-		cluster:      cluster,
1469 1468
 		connSelector: connSelector,
1470 1469
 		ctxMods:      mods,
1471 1470
 	}
... ...
@@ -1480,73 +1482,99 @@ func (p *raftProxyRaftServer) runCtxMods(ctx context.Context) (context.Context,
1480 1480
 	}
1481 1481
 	return ctx, nil
1482 1482
 }
1483
+func (p *raftProxyRaftServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
1484
+	ticker := time.NewTicker(500 * time.Millisecond)
1485
+	defer ticker.Stop()
1486
+	for {
1487
+		select {
1488
+		case <-ticker.C:
1489
+			conn, err := p.connSelector.LeaderConn(ctx)
1490
+			if err != nil {
1491
+				return nil, err
1492
+			}
1483 1493
 
1484
-func (p *raftProxyRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) {
1494
+			client := NewHealthClient(conn)
1485 1495
 
1486
-	if p.cluster.IsLeader() {
1487
-		return p.local.ProcessRaftMessage(ctx, r)
1496
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
1497
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
1498
+				continue
1499
+			}
1500
+			return conn, nil
1501
+		case <-ctx.Done():
1502
+			return nil, ctx.Err()
1503
+		}
1488 1504
 	}
1489
-	ctx, err := p.runCtxMods(ctx)
1505
+}
1506
+
1507
+func (p *raftProxyRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) {
1508
+
1509
+	conn, err := p.connSelector.LeaderConn(ctx)
1490 1510
 	if err != nil {
1511
+		if err == raftselector.ErrIsLeader {
1512
+			return p.local.ProcessRaftMessage(ctx, r)
1513
+		}
1491 1514
 		return nil, err
1492 1515
 	}
1493
-	conn, err := p.connSelector.Conn()
1516
+	modCtx, err := p.runCtxMods(ctx)
1494 1517
 	if err != nil {
1495 1518
 		return nil, err
1496 1519
 	}
1497 1520
 
1498
-	defer func() {
1521
+	resp, err := NewRaftClient(conn).ProcessRaftMessage(modCtx, r)
1522
+	if err != nil {
1523
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
1524
+			return resp, err
1525
+		}
1526
+		conn, err := p.pollNewLeaderConn(ctx)
1499 1527
 		if err != nil {
1500
-			errStr := err.Error()
1501
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
1502
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
1503
-				strings.Contains(errStr, "connection error") ||
1504
-				grpc.Code(err) == codes.Internal {
1505
-				p.connSelector.Reset()
1528
+			if err == raftselector.ErrIsLeader {
1529
+				return p.local.ProcessRaftMessage(ctx, r)
1506 1530
 			}
1531
+			return nil, err
1507 1532
 		}
1508
-	}()
1509
-
1510
-	return NewRaftClient(conn).ProcessRaftMessage(ctx, r)
1533
+		return NewRaftClient(conn).ProcessRaftMessage(modCtx, r)
1534
+	}
1535
+	return resp, err
1511 1536
 }
1512 1537
 
1513 1538
 func (p *raftProxyRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) {
1514 1539
 
1515
-	if p.cluster.IsLeader() {
1516
-		return p.local.ResolveAddress(ctx, r)
1517
-	}
1518
-	ctx, err := p.runCtxMods(ctx)
1540
+	conn, err := p.connSelector.LeaderConn(ctx)
1519 1541
 	if err != nil {
1542
+		if err == raftselector.ErrIsLeader {
1543
+			return p.local.ResolveAddress(ctx, r)
1544
+		}
1520 1545
 		return nil, err
1521 1546
 	}
1522
-	conn, err := p.connSelector.Conn()
1547
+	modCtx, err := p.runCtxMods(ctx)
1523 1548
 	if err != nil {
1524 1549
 		return nil, err
1525 1550
 	}
1526 1551
 
1527
-	defer func() {
1552
+	resp, err := NewRaftClient(conn).ResolveAddress(modCtx, r)
1553
+	if err != nil {
1554
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
1555
+			return resp, err
1556
+		}
1557
+		conn, err := p.pollNewLeaderConn(ctx)
1528 1558
 		if err != nil {
1529
-			errStr := err.Error()
1530
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
1531
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
1532
-				strings.Contains(errStr, "connection error") ||
1533
-				grpc.Code(err) == codes.Internal {
1534
-				p.connSelector.Reset()
1559
+			if err == raftselector.ErrIsLeader {
1560
+				return p.local.ResolveAddress(ctx, r)
1535 1561
 			}
1562
+			return nil, err
1536 1563
 		}
1537
-	}()
1538
-
1539
-	return NewRaftClient(conn).ResolveAddress(ctx, r)
1564
+		return NewRaftClient(conn).ResolveAddress(modCtx, r)
1565
+	}
1566
+	return resp, err
1540 1567
 }
1541 1568
 
1542 1569
 type raftProxyRaftMembershipServer struct {
1543 1570
 	local        RaftMembershipServer
1544
-	connSelector raftpicker.Interface
1545
-	cluster      raftpicker.RaftCluster
1571
+	connSelector raftselector.ConnProvider
1546 1572
 	ctxMods      []func(context.Context) (context.Context, error)
1547 1573
 }
1548 1574
 
1549
-func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) RaftMembershipServer {
1575
+func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) RaftMembershipServer {
1550 1576
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
1551 1577
 		s, ok := transport.StreamFromContext(ctx)
1552 1578
 		if !ok {
... ...
@@ -1568,7 +1596,6 @@ func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector r
1568 1568
 
1569 1569
 	return &raftProxyRaftMembershipServer{
1570 1570
 		local:        local,
1571
-		cluster:      cluster,
1572 1571
 		connSelector: connSelector,
1573 1572
 		ctxMods:      mods,
1574 1573
 	}
... ...
@@ -1583,63 +1610,90 @@ func (p *raftProxyRaftMembershipServer) runCtxMods(ctx context.Context) (context
1583 1583
 	}
1584 1584
 	return ctx, nil
1585 1585
 }
1586
+func (p *raftProxyRaftMembershipServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
1587
+	ticker := time.NewTicker(500 * time.Millisecond)
1588
+	defer ticker.Stop()
1589
+	for {
1590
+		select {
1591
+		case <-ticker.C:
1592
+			conn, err := p.connSelector.LeaderConn(ctx)
1593
+			if err != nil {
1594
+				return nil, err
1595
+			}
1586 1596
 
1587
-func (p *raftProxyRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) {
1597
+			client := NewHealthClient(conn)
1588 1598
 
1589
-	if p.cluster.IsLeader() {
1590
-		return p.local.Join(ctx, r)
1599
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
1600
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
1601
+				continue
1602
+			}
1603
+			return conn, nil
1604
+		case <-ctx.Done():
1605
+			return nil, ctx.Err()
1606
+		}
1591 1607
 	}
1592
-	ctx, err := p.runCtxMods(ctx)
1608
+}
1609
+
1610
+func (p *raftProxyRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) {
1611
+
1612
+	conn, err := p.connSelector.LeaderConn(ctx)
1593 1613
 	if err != nil {
1614
+		if err == raftselector.ErrIsLeader {
1615
+			return p.local.Join(ctx, r)
1616
+		}
1594 1617
 		return nil, err
1595 1618
 	}
1596
-	conn, err := p.connSelector.Conn()
1619
+	modCtx, err := p.runCtxMods(ctx)
1597 1620
 	if err != nil {
1598 1621
 		return nil, err
1599 1622
 	}
1600 1623
 
1601
-	defer func() {
1624
+	resp, err := NewRaftMembershipClient(conn).Join(modCtx, r)
1625
+	if err != nil {
1626
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
1627
+			return resp, err
1628
+		}
1629
+		conn, err := p.pollNewLeaderConn(ctx)
1602 1630
 		if err != nil {
1603
-			errStr := err.Error()
1604
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
1605
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
1606
-				strings.Contains(errStr, "connection error") ||
1607
-				grpc.Code(err) == codes.Internal {
1608
-				p.connSelector.Reset()
1631
+			if err == raftselector.ErrIsLeader {
1632
+				return p.local.Join(ctx, r)
1609 1633
 			}
1634
+			return nil, err
1610 1635
 		}
1611
-	}()
1612
-
1613
-	return NewRaftMembershipClient(conn).Join(ctx, r)
1636
+		return NewRaftMembershipClient(conn).Join(modCtx, r)
1637
+	}
1638
+	return resp, err
1614 1639
 }
1615 1640
 
1616 1641
 func (p *raftProxyRaftMembershipServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) {
1617 1642
 
1618
-	if p.cluster.IsLeader() {
1619
-		return p.local.Leave(ctx, r)
1620
-	}
1621
-	ctx, err := p.runCtxMods(ctx)
1643
+	conn, err := p.connSelector.LeaderConn(ctx)
1622 1644
 	if err != nil {
1645
+		if err == raftselector.ErrIsLeader {
1646
+			return p.local.Leave(ctx, r)
1647
+		}
1623 1648
 		return nil, err
1624 1649
 	}
1625
-	conn, err := p.connSelector.Conn()
1650
+	modCtx, err := p.runCtxMods(ctx)
1626 1651
 	if err != nil {
1627 1652
 		return nil, err
1628 1653
 	}
1629 1654
 
1630
-	defer func() {
1655
+	resp, err := NewRaftMembershipClient(conn).Leave(modCtx, r)
1656
+	if err != nil {
1657
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
1658
+			return resp, err
1659
+		}
1660
+		conn, err := p.pollNewLeaderConn(ctx)
1631 1661
 		if err != nil {
1632
-			errStr := err.Error()
1633
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
1634
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
1635
-				strings.Contains(errStr, "connection error") ||
1636
-				grpc.Code(err) == codes.Internal {
1637
-				p.connSelector.Reset()
1662
+			if err == raftselector.ErrIsLeader {
1663
+				return p.local.Leave(ctx, r)
1638 1664
 			}
1665
+			return nil, err
1639 1666
 		}
1640
-	}()
1641
-
1642
-	return NewRaftMembershipClient(conn).Leave(ctx, r)
1667
+		return NewRaftMembershipClient(conn).Leave(modCtx, r)
1668
+	}
1669
+	return resp, err
1643 1670
 }
1644 1671
 
1645 1672
 func (m *RaftMember) Size() (n int) {
... ...
@@ -3205,6 +3259,8 @@ var (
3205 3205
 	ErrIntOverflowRaft   = fmt.Errorf("proto: integer overflow")
3206 3206
 )
3207 3207
 
3208
+func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) }
3209
+
3208 3210
 var fileDescriptorRaft = []byte{
3209 3211
 	// 868 bytes of a gzipped FileDescriptorProto
3210 3212
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x95, 0x4f, 0x73, 0xdb, 0x44,
... ...
@@ -115,7 +115,7 @@ enum StoreActionKind {
115 115
 	STORE_ACTION_REMOVE = 3 [(gogoproto.enumvalue_customname) = "StoreActionKindRemove"];
116 116
 }
117 117
 
118
-// StoreAction defines a taret and operation to apply on the storage system.
118
+// StoreAction defines a target and operation to apply on the storage system.
119 119
 message StoreAction {
120 120
 	StoreActionKind action = 1;
121 121
 	oneof target {
... ...
@@ -21,10 +21,11 @@ import (
21 21
 	grpc "google.golang.org/grpc"
22 22
 )
23 23
 
24
-import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
24
+import raftselector "github.com/docker/swarmkit/manager/raftselector"
25 25
 import codes "google.golang.org/grpc/codes"
26 26
 import metadata "google.golang.org/grpc/metadata"
27 27
 import transport "google.golang.org/grpc/transport"
28
+import time "time"
28 29
 
29 30
 import io "io"
30 31
 
... ...
@@ -197,11 +198,12 @@ func valueToGoStringResource(v interface{}, typ string) string {
197 197
 	pv := reflect.Indirect(rv).Interface()
198 198
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
199 199
 }
200
-func extensionToGoStringResource(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
200
+func extensionToGoStringResource(m github_com_gogo_protobuf_proto.Message) string {
201
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
201 202
 	if e == nil {
202 203
 		return "nil"
203 204
 	}
204
-	s := "map[int32]proto.Extension{"
205
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
205 206
 	keys := make([]int, 0, len(e))
206 207
 	for k := range e {
207 208
 		keys = append(keys, int(k))
... ...
@@ -211,7 +213,7 @@ func extensionToGoStringResource(e map[int32]github_com_gogo_protobuf_proto.Exte
211 211
 	for _, k := range keys {
212 212
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
213 213
 	}
214
-	s += strings.Join(ss, ",") + "}"
214
+	s += strings.Join(ss, ",") + "})"
215 215
 	return s
216 216
 }
217 217
 
... ...
@@ -221,7 +223,7 @@ var _ grpc.ClientConn
221 221
 
222 222
 // This is a compile-time assertion to ensure that this generated file
223 223
 // is compatible with the grpc package it is being compiled against.
224
-const _ = grpc.SupportPackageIsVersion2
224
+const _ = grpc.SupportPackageIsVersion3
225 225
 
226 226
 // Client API for ResourceAllocator service
227 227
 
... ...
@@ -316,7 +318,8 @@ var _ResourceAllocator_serviceDesc = grpc.ServiceDesc{
316 316
 			Handler:    _ResourceAllocator_DetachNetwork_Handler,
317 317
 		},
318 318
 	},
319
-	Streams: []grpc.StreamDesc{},
319
+	Streams:  []grpc.StreamDesc{},
320
+	Metadata: fileDescriptorResource,
320 321
 }
321 322
 
322 323
 func (m *AttachNetworkRequest) Marshal() (data []byte, err error) {
... ...
@@ -449,12 +452,11 @@ func encodeVarintResource(data []byte, offset int, v uint64) int {
449 449
 
450 450
 type raftProxyResourceAllocatorServer struct {
451 451
 	local        ResourceAllocatorServer
452
-	connSelector raftpicker.Interface
453
-	cluster      raftpicker.RaftCluster
452
+	connSelector raftselector.ConnProvider
454 453
 	ctxMods      []func(context.Context) (context.Context, error)
455 454
 }
456 455
 
457
-func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSelector raftpicker.Interface, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) ResourceAllocatorServer {
456
+func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSelector raftselector.ConnProvider, ctxMod func(context.Context) (context.Context, error)) ResourceAllocatorServer {
458 457
 	redirectChecker := func(ctx context.Context) (context.Context, error) {
459 458
 		s, ok := transport.StreamFromContext(ctx)
460 459
 		if !ok {
... ...
@@ -476,7 +478,6 @@ func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSele
476 476
 
477 477
 	return &raftProxyResourceAllocatorServer{
478 478
 		local:        local,
479
-		cluster:      cluster,
480 479
 		connSelector: connSelector,
481 480
 		ctxMods:      mods,
482 481
 	}
... ...
@@ -491,63 +492,90 @@ func (p *raftProxyResourceAllocatorServer) runCtxMods(ctx context.Context) (cont
491 491
 	}
492 492
 	return ctx, nil
493 493
 }
494
+func (p *raftProxyResourceAllocatorServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
495
+	ticker := time.NewTicker(500 * time.Millisecond)
496
+	defer ticker.Stop()
497
+	for {
498
+		select {
499
+		case <-ticker.C:
500
+			conn, err := p.connSelector.LeaderConn(ctx)
501
+			if err != nil {
502
+				return nil, err
503
+			}
494 504
 
495
-func (p *raftProxyResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) {
505
+			client := NewHealthClient(conn)
496 506
 
497
-	if p.cluster.IsLeader() {
498
-		return p.local.AttachNetwork(ctx, r)
507
+			resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"})
508
+			if err != nil || resp.Status != HealthCheckResponse_SERVING {
509
+				continue
510
+			}
511
+			return conn, nil
512
+		case <-ctx.Done():
513
+			return nil, ctx.Err()
514
+		}
499 515
 	}
500
-	ctx, err := p.runCtxMods(ctx)
516
+}
517
+
518
+func (p *raftProxyResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) {
519
+
520
+	conn, err := p.connSelector.LeaderConn(ctx)
501 521
 	if err != nil {
522
+		if err == raftselector.ErrIsLeader {
523
+			return p.local.AttachNetwork(ctx, r)
524
+		}
502 525
 		return nil, err
503 526
 	}
504
-	conn, err := p.connSelector.Conn()
527
+	modCtx, err := p.runCtxMods(ctx)
505 528
 	if err != nil {
506 529
 		return nil, err
507 530
 	}
508 531
 
509
-	defer func() {
532
+	resp, err := NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r)
533
+	if err != nil {
534
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
535
+			return resp, err
536
+		}
537
+		conn, err := p.pollNewLeaderConn(ctx)
510 538
 		if err != nil {
511
-			errStr := err.Error()
512
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
513
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
514
-				strings.Contains(errStr, "connection error") ||
515
-				grpc.Code(err) == codes.Internal {
516
-				p.connSelector.Reset()
539
+			if err == raftselector.ErrIsLeader {
540
+				return p.local.AttachNetwork(ctx, r)
517 541
 			}
542
+			return nil, err
518 543
 		}
519
-	}()
520
-
521
-	return NewResourceAllocatorClient(conn).AttachNetwork(ctx, r)
544
+		return NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r)
545
+	}
546
+	return resp, err
522 547
 }
523 548
 
524 549
 func (p *raftProxyResourceAllocatorServer) DetachNetwork(ctx context.Context, r *DetachNetworkRequest) (*DetachNetworkResponse, error) {
525 550
 
526
-	if p.cluster.IsLeader() {
527
-		return p.local.DetachNetwork(ctx, r)
528
-	}
529
-	ctx, err := p.runCtxMods(ctx)
551
+	conn, err := p.connSelector.LeaderConn(ctx)
530 552
 	if err != nil {
553
+		if err == raftselector.ErrIsLeader {
554
+			return p.local.DetachNetwork(ctx, r)
555
+		}
531 556
 		return nil, err
532 557
 	}
533
-	conn, err := p.connSelector.Conn()
558
+	modCtx, err := p.runCtxMods(ctx)
534 559
 	if err != nil {
535 560
 		return nil, err
536 561
 	}
537 562
 
538
-	defer func() {
563
+	resp, err := NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r)
564
+	if err != nil {
565
+		if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") {
566
+			return resp, err
567
+		}
568
+		conn, err := p.pollNewLeaderConn(ctx)
539 569
 		if err != nil {
540
-			errStr := err.Error()
541
-			if strings.Contains(errStr, grpc.ErrClientConnClosing.Error()) ||
542
-				strings.Contains(errStr, grpc.ErrClientConnTimeout.Error()) ||
543
-				strings.Contains(errStr, "connection error") ||
544
-				grpc.Code(err) == codes.Internal {
545
-				p.connSelector.Reset()
570
+			if err == raftselector.ErrIsLeader {
571
+				return p.local.DetachNetwork(ctx, r)
546 572
 			}
573
+			return nil, err
547 574
 		}
548
-	}()
549
-
550
-	return NewResourceAllocatorClient(conn).DetachNetwork(ctx, r)
575
+		return NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r)
576
+	}
577
+	return resp, err
551 578
 }
552 579
 
553 580
 func (m *AttachNetworkRequest) Size() (n int) {
... ...
@@ -1076,6 +1104,8 @@ var (
1076 1076
 	ErrIntOverflowResource   = fmt.Errorf("proto: integer overflow")
1077 1077
 )
1078 1078
 
1079
+func init() { proto.RegisterFile("resource.proto", fileDescriptorResource) }
1080
+
1079 1081
 var fileDescriptorResource = []byte{
1080 1082
 	// 373 bytes of a gzipped FileDescriptorProto
1081 1083
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x4a, 0x2d, 0xce,
... ...
@@ -222,11 +222,12 @@ func valueToGoStringSnapshot(v interface{}, typ string) string {
222 222
 	pv := reflect.Indirect(rv).Interface()
223 223
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
224 224
 }
225
-func extensionToGoStringSnapshot(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
225
+func extensionToGoStringSnapshot(m github_com_gogo_protobuf_proto.Message) string {
226
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
226 227
 	if e == nil {
227 228
 		return "nil"
228 229
 	}
229
-	s := "map[int32]proto.Extension{"
230
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
230 231
 	keys := make([]int, 0, len(e))
231 232
 	for k := range e {
232 233
 		keys = append(keys, int(k))
... ...
@@ -236,7 +237,7 @@ func extensionToGoStringSnapshot(e map[int32]github_com_gogo_protobuf_proto.Exte
236 236
 	for _, k := range keys {
237 237
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
238 238
 	}
239
-	s += strings.Join(ss, ",") + "}"
239
+	s += strings.Join(ss, ",") + "})"
240 240
 	return s
241 241
 }
242 242
 func (m *StoreSnapshot) Marshal() (data []byte, err error) {
... ...
@@ -1085,6 +1086,8 @@ var (
1085 1085
 	ErrIntOverflowSnapshot   = fmt.Errorf("proto: integer overflow")
1086 1086
 )
1087 1087
 
1088
+func init() { proto.RegisterFile("snapshot.proto", fileDescriptorSnapshot) }
1089
+
1088 1090
 var fileDescriptorSnapshot = []byte{
1089 1091
 	// 396 bytes of a gzipped FileDescriptorProto
1090 1092
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x92, 0xbd, 0x6e, 0xdb, 0x30,
... ...
@@ -1047,11 +1047,12 @@ func valueToGoStringSpecs(v interface{}, typ string) string {
1047 1047
 	pv := reflect.Indirect(rv).Interface()
1048 1048
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
1049 1049
 }
1050
-func extensionToGoStringSpecs(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
1050
+func extensionToGoStringSpecs(m github_com_gogo_protobuf_proto.Message) string {
1051
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
1051 1052
 	if e == nil {
1052 1053
 		return "nil"
1053 1054
 	}
1054
-	s := "map[int32]proto.Extension{"
1055
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
1055 1056
 	keys := make([]int, 0, len(e))
1056 1057
 	for k := range e {
1057 1058
 		keys = append(keys, int(k))
... ...
@@ -1061,7 +1062,7 @@ func extensionToGoStringSpecs(e map[int32]github_com_gogo_protobuf_proto.Extensi
1061 1061
 	for _, k := range keys {
1062 1062
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
1063 1063
 	}
1064
-	s += strings.Join(ss, ",") + "}"
1064
+	s += strings.Join(ss, ",") + "})"
1065 1065
 	return s
1066 1066
 }
1067 1067
 func (m *NodeSpec) Marshal() (data []byte, err error) {
... ...
@@ -3252,50 +3253,55 @@ func (m *ContainerSpec) Unmarshal(data []byte) error {
3252 3252
 			}
3253 3253
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
3254 3254
 			iNdEx = postStringIndexmapkey
3255
-			var valuekey uint64
3256
-			for shift := uint(0); ; shift += 7 {
3257
-				if shift >= 64 {
3258
-					return ErrIntOverflowSpecs
3259
-				}
3260
-				if iNdEx >= l {
3261
-					return io.ErrUnexpectedEOF
3255
+			if m.Labels == nil {
3256
+				m.Labels = make(map[string]string)
3257
+			}
3258
+			if iNdEx < postIndex {
3259
+				var valuekey uint64
3260
+				for shift := uint(0); ; shift += 7 {
3261
+					if shift >= 64 {
3262
+						return ErrIntOverflowSpecs
3263
+					}
3264
+					if iNdEx >= l {
3265
+						return io.ErrUnexpectedEOF
3266
+					}
3267
+					b := data[iNdEx]
3268
+					iNdEx++
3269
+					valuekey |= (uint64(b) & 0x7F) << shift
3270
+					if b < 0x80 {
3271
+						break
3272
+					}
3262 3273
 				}
3263
-				b := data[iNdEx]
3264
-				iNdEx++
3265
-				valuekey |= (uint64(b) & 0x7F) << shift
3266
-				if b < 0x80 {
3267
-					break
3274
+				var stringLenmapvalue uint64
3275
+				for shift := uint(0); ; shift += 7 {
3276
+					if shift >= 64 {
3277
+						return ErrIntOverflowSpecs
3278
+					}
3279
+					if iNdEx >= l {
3280
+						return io.ErrUnexpectedEOF
3281
+					}
3282
+					b := data[iNdEx]
3283
+					iNdEx++
3284
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
3285
+					if b < 0x80 {
3286
+						break
3287
+					}
3268 3288
 				}
3269
-			}
3270
-			var stringLenmapvalue uint64
3271
-			for shift := uint(0); ; shift += 7 {
3272
-				if shift >= 64 {
3273
-					return ErrIntOverflowSpecs
3289
+				intStringLenmapvalue := int(stringLenmapvalue)
3290
+				if intStringLenmapvalue < 0 {
3291
+					return ErrInvalidLengthSpecs
3274 3292
 				}
3275
-				if iNdEx >= l {
3293
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
3294
+				if postStringIndexmapvalue > l {
3276 3295
 					return io.ErrUnexpectedEOF
3277 3296
 				}
3278
-				b := data[iNdEx]
3279
-				iNdEx++
3280
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
3281
-				if b < 0x80 {
3282
-					break
3283
-				}
3284
-			}
3285
-			intStringLenmapvalue := int(stringLenmapvalue)
3286
-			if intStringLenmapvalue < 0 {
3287
-				return ErrInvalidLengthSpecs
3297
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
3298
+				iNdEx = postStringIndexmapvalue
3299
+				m.Labels[mapkey] = mapvalue
3300
+			} else {
3301
+				var mapvalue string
3302
+				m.Labels[mapkey] = mapvalue
3288 3303
 			}
3289
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
3290
-			if postStringIndexmapvalue > l {
3291
-				return io.ErrUnexpectedEOF
3292
-			}
3293
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
3294
-			iNdEx = postStringIndexmapvalue
3295
-			if m.Labels == nil {
3296
-				m.Labels = make(map[string]string)
3297
-			}
3298
-			m.Labels[mapkey] = mapvalue
3299 3304
 			iNdEx = postIndex
3300 3305
 		case 3:
3301 3306
 			if wireType != 2 {
... ...
@@ -4339,6 +4345,8 @@ var (
4339 4339
 	ErrIntOverflowSpecs   = fmt.Errorf("proto: integer overflow")
4340 4340
 )
4341 4341
 
4342
+func init() { proto.RegisterFile("specs.proto", fileDescriptorSpecs) }
4343
+
4342 4344
 var fileDescriptorSpecs = []byte{
4343 4345
 	// 1397 bytes of a gzipped FileDescriptorProto
4344 4346
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x6f, 0xdb, 0xc6,
... ...
@@ -32,7 +32,9 @@ var _ = math.Inf
32 32
 
33 33
 // This is a compile-time assertion to ensure that this generated file
34 34
 // is compatible with the proto package it is being compiled against.
35
-const _ = proto.GoGoProtoPackageIsVersion1
35
+// A compilation error at this line likely means your copy of the
36
+// proto package needs to be updated.
37
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
36 38
 
37 39
 // A Timestamp represents a point in time independent of any time zone
38 40
 // or calendar, represented as seconds and fractions of seconds at
... ...
@@ -141,11 +143,12 @@ func valueToGoStringTimestamp(v interface{}, typ string) string {
141 141
 	pv := reflect.Indirect(rv).Interface()
142 142
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
143 143
 }
144
-func extensionToGoStringTimestamp(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
144
+func extensionToGoStringTimestamp(m github_com_gogo_protobuf_proto.Message) string {
145
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
145 146
 	if e == nil {
146 147
 		return "nil"
147 148
 	}
148
-	s := "map[int32]proto.Extension{"
149
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
149 150
 	keys := make([]int, 0, len(e))
150 151
 	for k := range e {
151 152
 		keys = append(keys, int(k))
... ...
@@ -155,7 +158,7 @@ func extensionToGoStringTimestamp(e map[int32]github_com_gogo_protobuf_proto.Ext
155 155
 	for _, k := range keys {
156 156
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
157 157
 	}
158
-	s += strings.Join(ss, ",") + "}"
158
+	s += strings.Join(ss, ",") + "})"
159 159
 	return s
160 160
 }
161 161
 func (m *Timestamp) Marshal() (data []byte, err error) {
... ...
@@ -451,6 +454,8 @@ var (
451 451
 	ErrIntOverflowTimestamp   = fmt.Errorf("proto: integer overflow")
452 452
 )
453 453
 
454
+func init() { proto.RegisterFile("timestamp.proto", fileDescriptorTimestamp) }
455
+
454 456
 var fileDescriptorTimestamp = []byte{
455 457
 	// 205 bytes of a gzipped FileDescriptorProto
456 458
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0xc9, 0xcc, 0x4d,
... ...
@@ -121,6 +121,8 @@
121 121
 		UpdateTaskStatusResponse
122 122
 		TasksRequest
123 123
 		TasksMessage
124
+		AssignmentsRequest
125
+		AssignmentsMessage
124 126
 		NodeCertificateStatusRequest
125 127
 		NodeCertificateStatusResponse
126 128
 		IssueNodeCertificateRequest
... ...
@@ -175,7 +177,9 @@ var _ = math.Inf
175 175
 
176 176
 // This is a compile-time assertion to ensure that this generated file
177 177
 // is compatible with the proto package it is being compiled against.
178
-const _ = proto.GoGoProtoPackageIsVersion1
178
+// A compilation error at this line likely means your copy of the
179
+// proto package needs to be updated.
180
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
179 181
 
180 182
 // TaskState enumerates the states that a task progresses through within an
181 183
 // agent. States are designed to be monotonically increasing, such that if two
... ...
@@ -408,15 +412,24 @@ type UpdateConfig_FailureAction int32
408 408
 const (
409 409
 	UpdateConfig_PAUSE    UpdateConfig_FailureAction = 0
410 410
 	UpdateConfig_CONTINUE UpdateConfig_FailureAction = 1
411
+	// NOTE: Automated rollback triggered as a failure action is an
412
+	// experimental feature that is not yet exposed to the end
413
+	// user. Currently, rollbacks must be initiated manually
414
+	// through the API by setting Spec to PreviousSpec. We may
415
+	// decide to expose automatic rollback in the future based on
416
+	// user feedback, or remove this feature otherwise.
417
+	UpdateConfig_ROLLBACK UpdateConfig_FailureAction = 2
411 418
 )
412 419
 
413 420
 var UpdateConfig_FailureAction_name = map[int32]string{
414 421
 	0: "PAUSE",
415 422
 	1: "CONTINUE",
423
+	2: "ROLLBACK",
416 424
 }
417 425
 var UpdateConfig_FailureAction_value = map[string]int32{
418 426
 	"PAUSE":    0,
419 427
 	"CONTINUE": 1,
428
+	"ROLLBACK": 2,
420 429
 }
421 430
 
422 431
 func (x UpdateConfig_FailureAction) String() string {
... ...
@@ -429,10 +442,13 @@ func (UpdateConfig_FailureAction) EnumDescriptor() ([]byte, []int) {
429 429
 type UpdateStatus_UpdateState int32
430 430
 
431 431
 const (
432
-	UpdateStatus_UNKNOWN   UpdateStatus_UpdateState = 0
433
-	UpdateStatus_UPDATING  UpdateStatus_UpdateState = 1
434
-	UpdateStatus_PAUSED    UpdateStatus_UpdateState = 2
435
-	UpdateStatus_COMPLETED UpdateStatus_UpdateState = 3
432
+	UpdateStatus_UNKNOWN            UpdateStatus_UpdateState = 0
433
+	UpdateStatus_UPDATING           UpdateStatus_UpdateState = 1
434
+	UpdateStatus_PAUSED             UpdateStatus_UpdateState = 2
435
+	UpdateStatus_COMPLETED          UpdateStatus_UpdateState = 3
436
+	UpdateStatus_ROLLBACK_STARTED   UpdateStatus_UpdateState = 4
437
+	UpdateStatus_ROLLBACK_PAUSED    UpdateStatus_UpdateState = 5
438
+	UpdateStatus_ROLLBACK_COMPLETED UpdateStatus_UpdateState = 6
436 439
 )
437 440
 
438 441
 var UpdateStatus_UpdateState_name = map[int32]string{
... ...
@@ -440,12 +456,18 @@ var UpdateStatus_UpdateState_name = map[int32]string{
440 440
 	1: "UPDATING",
441 441
 	2: "PAUSED",
442 442
 	3: "COMPLETED",
443
+	4: "ROLLBACK_STARTED",
444
+	5: "ROLLBACK_PAUSED",
445
+	6: "ROLLBACK_COMPLETED",
443 446
 }
444 447
 var UpdateStatus_UpdateState_value = map[string]int32{
445
-	"UNKNOWN":   0,
446
-	"UPDATING":  1,
447
-	"PAUSED":    2,
448
-	"COMPLETED": 3,
448
+	"UNKNOWN":            0,
449
+	"UPDATING":           1,
450
+	"PAUSED":             2,
451
+	"COMPLETED":          3,
452
+	"ROLLBACK_STARTED":   4,
453
+	"ROLLBACK_PAUSED":    5,
454
+	"ROLLBACK_COMPLETED": 6,
449 455
 }
450 456
 
451 457
 func (x UpdateStatus_UpdateState) String() string {
... ...
@@ -804,10 +826,34 @@ type UpdateConfig struct {
804 804
 	// Amount of time between updates.
805 805
 	Delay docker_swarmkit_v11.Duration `protobuf:"bytes,2,opt,name=delay" json:"delay"`
806 806
 	// FailureAction is the action to take when an update failures.
807
-	// Currently, a failure is defined as a single updated task failing to
808
-	// reach the RUNNING state. In the future, there will be configuration
809
-	// to define what is treated as a failure (see #486 for a proposal).
810 807
 	FailureAction UpdateConfig_FailureAction `protobuf:"varint,3,opt,name=failure_action,json=failureAction,proto3,enum=docker.swarmkit.v1.UpdateConfig_FailureAction" json:"failure_action,omitempty"`
808
+	// Monitor indicates how long to monitor a task for failure after it is
809
+	// created. If the task fails by ending up in one of the states
810
+	// REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
811
+	// this counts as a failure. If it fails after Monitor, it does not
812
+	// count as a failure. If Monitor is unspecified, a default value will
813
+	// be used.
814
+	Monitor *docker_swarmkit_v11.Duration `protobuf:"bytes,4,opt,name=monitor" json:"monitor,omitempty"`
815
+	// AllowedFailureFraction is the fraction of tasks that may fail during
816
+	// an update before the failure action is invoked. Any task created by
817
+	// the current update which ends up in one of the states REJECTED,
818
+	// COMPLETED or FAILED within Monitor from its creation counts as a
819
+	// failure. The number of failures is divided by the number of tasks
820
+	// being updated, and if this fraction is greater than
821
+	// AllowedFailureFraction, the failure action is invoked.
822
+	//
823
+	// If the failure action is CONTINUE, there is no effect.
824
+	// If the failure action is PAUSE, no more tasks will be updated until
825
+	// another update is started.
826
+	// If the failure action is ROLLBACK, the orchestrator will attempt to
827
+	// roll back to the previous service spec. If the AllowedFailureFraction
828
+	// threshold is hit during the rollback, the rollback will pause.
829
+	//
830
+	// TODO(aaronl): Should there be a separate failure threshold for
831
+	// rollbacks? Should there be a failure action for rollbacks (to allow
832
+	// them to do something other than pause when the rollback encounters
833
+	// errors)?
834
+	AllowedFailureFraction float32 `protobuf:"fixed32,5,opt,name=allowed_failure_fraction,json=allowedFailureFraction,proto3" json:"allowed_failure_fraction,omitempty"`
811 835
 }
812 836
 
813 837
 func (m *UpdateConfig) Reset()                    { *m = UpdateConfig{} }
... ...
@@ -817,11 +863,13 @@ func (*UpdateConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes,
817 817
 // UpdateStatus is the status of an update in progress.
818 818
 type UpdateStatus struct {
819 819
 	// State is the state of this update. It indicates whether the
820
-	// update is in progress, completed, or is paused.
820
+	// update is in progress, completed, paused, rolling back, or
821
+	// finished rolling back.
821 822
 	State UpdateStatus_UpdateState `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.UpdateStatus_UpdateState" json:"state,omitempty"`
822 823
 	// StartedAt is the time at which the update was started.
823 824
 	StartedAt *docker_swarmkit_v1.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt" json:"started_at,omitempty"`
824
-	// CompletedAt is the time at which the update completed.
825
+	// CompletedAt is the time at which the update completed successfully,
826
+	// paused, or finished rolling back.
825 827
 	CompletedAt *docker_swarmkit_v1.Timestamp `protobuf:"bytes,3,opt,name=completed_at,json=completedAt" json:"completed_at,omitempty"`
826 828
 	// Message explains how the update got into its current state. For
827 829
 	// example, if the update is paused, it will explain what is preventing
... ...
@@ -1594,9 +1642,11 @@ func (m *UpdateConfig) Copy() *UpdateConfig {
1594 1594
 	}
1595 1595
 
1596 1596
 	o := &UpdateConfig{
1597
-		Parallelism:   m.Parallelism,
1598
-		Delay:         *m.Delay.Copy(),
1599
-		FailureAction: m.FailureAction,
1597
+		Parallelism:            m.Parallelism,
1598
+		Delay:                  *m.Delay.Copy(),
1599
+		FailureAction:          m.FailureAction,
1600
+		Monitor:                m.Monitor.Copy(),
1601
+		AllowedFailureFraction: m.AllowedFailureFraction,
1600 1602
 	}
1601 1603
 
1602 1604
 	return o
... ...
@@ -2270,11 +2320,15 @@ func (this *UpdateConfig) GoString() string {
2270 2270
 	if this == nil {
2271 2271
 		return "nil"
2272 2272
 	}
2273
-	s := make([]string, 0, 7)
2273
+	s := make([]string, 0, 9)
2274 2274
 	s = append(s, "&api.UpdateConfig{")
2275 2275
 	s = append(s, "Parallelism: "+fmt.Sprintf("%#v", this.Parallelism)+",\n")
2276 2276
 	s = append(s, "Delay: "+strings.Replace(this.Delay.GoString(), `&`, ``, 1)+",\n")
2277 2277
 	s = append(s, "FailureAction: "+fmt.Sprintf("%#v", this.FailureAction)+",\n")
2278
+	if this.Monitor != nil {
2279
+		s = append(s, "Monitor: "+fmt.Sprintf("%#v", this.Monitor)+",\n")
2280
+	}
2281
+	s = append(s, "AllowedFailureFraction: "+fmt.Sprintf("%#v", this.AllowedFailureFraction)+",\n")
2278 2282
 	s = append(s, "}")
2279 2283
 	return strings.Join(s, "")
2280 2284
 }
... ...
@@ -2663,11 +2717,12 @@ func valueToGoStringTypes(v interface{}, typ string) string {
2663 2663
 	pv := reflect.Indirect(rv).Interface()
2664 2664
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
2665 2665
 }
2666
-func extensionToGoStringTypes(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
2666
+func extensionToGoStringTypes(m github_com_gogo_protobuf_proto.Message) string {
2667
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
2667 2668
 	if e == nil {
2668 2669
 		return "nil"
2669 2670
 	}
2670
-	s := "map[int32]proto.Extension{"
2671
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
2671 2672
 	keys := make([]int, 0, len(e))
2672 2673
 	for k := range e {
2673 2674
 		keys = append(keys, int(k))
... ...
@@ -2677,7 +2732,7 @@ func extensionToGoStringTypes(e map[int32]github_com_gogo_protobuf_proto.Extensi
2677 2677
 	for _, k := range keys {
2678 2678
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
2679 2679
 	}
2680
-	s += strings.Join(ss, ",") + "}"
2680
+	s += strings.Join(ss, ",") + "})"
2681 2681
 	return s
2682 2682
 }
2683 2683
 func (m *Version) Marshal() (data []byte, err error) {
... ...
@@ -3331,6 +3386,21 @@ func (m *UpdateConfig) MarshalTo(data []byte) (int, error) {
3331 3331
 		i++
3332 3332
 		i = encodeVarintTypes(data, i, uint64(m.FailureAction))
3333 3333
 	}
3334
+	if m.Monitor != nil {
3335
+		data[i] = 0x22
3336
+		i++
3337
+		i = encodeVarintTypes(data, i, uint64(m.Monitor.Size()))
3338
+		n13, err := m.Monitor.MarshalTo(data[i:])
3339
+		if err != nil {
3340
+			return 0, err
3341
+		}
3342
+		i += n13
3343
+	}
3344
+	if m.AllowedFailureFraction != 0 {
3345
+		data[i] = 0x2d
3346
+		i++
3347
+		i = encodeFixed32Types(data, i, uint32(math.Float32bits(float32(m.AllowedFailureFraction))))
3348
+	}
3334 3349
 	return i, nil
3335 3350
 }
3336 3351
 
... ...
@@ -3358,21 +3428,21 @@ func (m *UpdateStatus) MarshalTo(data []byte) (int, error) {
3358 3358
 		data[i] = 0x12
3359 3359
 		i++
3360 3360
 		i = encodeVarintTypes(data, i, uint64(m.StartedAt.Size()))
3361
-		n13, err := m.StartedAt.MarshalTo(data[i:])
3361
+		n14, err := m.StartedAt.MarshalTo(data[i:])
3362 3362
 		if err != nil {
3363 3363
 			return 0, err
3364 3364
 		}
3365
-		i += n13
3365
+		i += n14
3366 3366
 	}
3367 3367
 	if m.CompletedAt != nil {
3368 3368
 		data[i] = 0x1a
3369 3369
 		i++
3370 3370
 		i = encodeVarintTypes(data, i, uint64(m.CompletedAt.Size()))
3371
-		n14, err := m.CompletedAt.MarshalTo(data[i:])
3371
+		n15, err := m.CompletedAt.MarshalTo(data[i:])
3372 3372
 		if err != nil {
3373 3373
 			return 0, err
3374 3374
 		}
3375
-		i += n14
3375
+		i += n15
3376 3376
 	}
3377 3377
 	if len(m.Message) > 0 {
3378 3378
 		data[i] = 0x22
... ...
@@ -3436,11 +3506,11 @@ func (m *TaskStatus) MarshalTo(data []byte) (int, error) {
3436 3436
 		data[i] = 0xa
3437 3437
 		i++
3438 3438
 		i = encodeVarintTypes(data, i, uint64(m.Timestamp.Size()))
3439
-		n15, err := m.Timestamp.MarshalTo(data[i:])
3439
+		n16, err := m.Timestamp.MarshalTo(data[i:])
3440 3440
 		if err != nil {
3441 3441
 			return 0, err
3442 3442
 		}
3443
-		i += n15
3443
+		i += n16
3444 3444
 	}
3445 3445
 	if m.State != 0 {
3446 3446
 		data[i] = 0x10
... ...
@@ -3460,11 +3530,11 @@ func (m *TaskStatus) MarshalTo(data []byte) (int, error) {
3460 3460
 		i += copy(data[i:], m.Err)
3461 3461
 	}
3462 3462
 	if m.RuntimeStatus != nil {
3463
-		nn16, err := m.RuntimeStatus.MarshalTo(data[i:])
3463
+		nn17, err := m.RuntimeStatus.MarshalTo(data[i:])
3464 3464
 		if err != nil {
3465 3465
 			return 0, err
3466 3466
 		}
3467
-		i += nn16
3467
+		i += nn17
3468 3468
 	}
3469 3469
 	return i, nil
3470 3470
 }
... ...
@@ -3475,11 +3545,11 @@ func (m *TaskStatus_Container) MarshalTo(data []byte) (int, error) {
3475 3475
 		data[i] = 0x2a
3476 3476
 		i++
3477 3477
 		i = encodeVarintTypes(data, i, uint64(m.Container.Size()))
3478
-		n17, err := m.Container.MarshalTo(data[i:])
3478
+		n18, err := m.Container.MarshalTo(data[i:])
3479 3479
 		if err != nil {
3480 3480
 			return 0, err
3481 3481
 		}
3482
-		i += n17
3482
+		i += n18
3483 3483
 	}
3484 3484
 	return i, nil
3485 3485
 }
... ...
@@ -3694,11 +3764,11 @@ func (m *IPAMOptions) MarshalTo(data []byte) (int, error) {
3694 3694
 		data[i] = 0xa
3695 3695
 		i++
3696 3696
 		i = encodeVarintTypes(data, i, uint64(m.Driver.Size()))
3697
-		n18, err := m.Driver.MarshalTo(data[i:])
3697
+		n19, err := m.Driver.MarshalTo(data[i:])
3698 3698
 		if err != nil {
3699 3699
 			return 0, err
3700 3700
 		}
3701
-		i += n18
3701
+		i += n19
3702 3702
 	}
3703 3703
 	if len(m.Configs) > 0 {
3704 3704
 		for _, msg := range m.Configs {
... ...
@@ -3764,11 +3834,11 @@ func (m *WeightedPeer) MarshalTo(data []byte) (int, error) {
3764 3764
 		data[i] = 0xa
3765 3765
 		i++
3766 3766
 		i = encodeVarintTypes(data, i, uint64(m.Peer.Size()))
3767
-		n19, err := m.Peer.MarshalTo(data[i:])
3767
+		n20, err := m.Peer.MarshalTo(data[i:])
3768 3768
 		if err != nil {
3769 3769
 			return 0, err
3770 3770
 		}
3771
-		i += n19
3771
+		i += n20
3772 3772
 	}
3773 3773
 	if m.Weight != 0 {
3774 3774
 		data[i] = 0x10
... ...
@@ -3871,11 +3941,11 @@ func (m *AcceptancePolicy_RoleAdmissionPolicy) MarshalTo(data []byte) (int, erro
3871 3871
 		data[i] = 0x1a
3872 3872
 		i++
3873 3873
 		i = encodeVarintTypes(data, i, uint64(m.Secret.Size()))
3874
-		n20, err := m.Secret.MarshalTo(data[i:])
3874
+		n21, err := m.Secret.MarshalTo(data[i:])
3875 3875
 		if err != nil {
3876 3876
 			return 0, err
3877 3877
 		}
3878
-		i += n20
3878
+		i += n21
3879 3879
 	}
3880 3880
 	return i, nil
3881 3881
 }
... ...
@@ -3975,11 +4045,11 @@ func (m *CAConfig) MarshalTo(data []byte) (int, error) {
3975 3975
 		data[i] = 0xa
3976 3976
 		i++
3977 3977
 		i = encodeVarintTypes(data, i, uint64(m.NodeCertExpiry.Size()))
3978
-		n21, err := m.NodeCertExpiry.MarshalTo(data[i:])
3978
+		n22, err := m.NodeCertExpiry.MarshalTo(data[i:])
3979 3979
 		if err != nil {
3980 3980
 			return 0, err
3981 3981
 		}
3982
-		i += n21
3982
+		i += n22
3983 3983
 	}
3984 3984
 	if len(m.ExternalCAs) > 0 {
3985 3985
 		for _, msg := range m.ExternalCAs {
... ...
@@ -4038,11 +4108,11 @@ func (m *TaskDefaults) MarshalTo(data []byte) (int, error) {
4038 4038
 		data[i] = 0xa
4039 4039
 		i++
4040 4040
 		i = encodeVarintTypes(data, i, uint64(m.LogDriver.Size()))
4041
-		n22, err := m.LogDriver.MarshalTo(data[i:])
4041
+		n23, err := m.LogDriver.MarshalTo(data[i:])
4042 4042
 		if err != nil {
4043 4043
 			return 0, err
4044 4044
 		}
4045
-		i += n22
4045
+		i += n23
4046 4046
 	}
4047 4047
 	return i, nil
4048 4048
 }
... ...
@@ -4066,11 +4136,11 @@ func (m *DispatcherConfig) MarshalTo(data []byte) (int, error) {
4066 4066
 		data[i] = 0xa
4067 4067
 		i++
4068 4068
 		i = encodeVarintTypes(data, i, uint64(m.HeartbeatPeriod.Size()))
4069
-		n23, err := m.HeartbeatPeriod.MarshalTo(data[i:])
4069
+		n24, err := m.HeartbeatPeriod.MarshalTo(data[i:])
4070 4070
 		if err != nil {
4071 4071
 			return 0, err
4072 4072
 		}
4073
-		i += n23
4073
+		i += n24
4074 4074
 	}
4075 4075
 	return i, nil
4076 4076
 }
... ...
@@ -4217,11 +4287,11 @@ func (m *RootCA) MarshalTo(data []byte) (int, error) {
4217 4217
 	data[i] = 0x22
4218 4218
 	i++
4219 4219
 	i = encodeVarintTypes(data, i, uint64(m.JoinTokens.Size()))
4220
-	n24, err := m.JoinTokens.MarshalTo(data[i:])
4220
+	n25, err := m.JoinTokens.MarshalTo(data[i:])
4221 4221
 	if err != nil {
4222 4222
 		return 0, err
4223 4223
 	}
4224
-	i += n24
4224
+	i += n25
4225 4225
 	return i, nil
4226 4226
 }
4227 4227
 
... ...
@@ -4254,11 +4324,11 @@ func (m *Certificate) MarshalTo(data []byte) (int, error) {
4254 4254
 	data[i] = 0x1a
4255 4255
 	i++
4256 4256
 	i = encodeVarintTypes(data, i, uint64(m.Status.Size()))
4257
-	n25, err := m.Status.MarshalTo(data[i:])
4257
+	n26, err := m.Status.MarshalTo(data[i:])
4258 4258
 	if err != nil {
4259 4259
 		return 0, err
4260 4260
 	}
4261
-	i += n25
4261
+	i += n26
4262 4262
 	if len(m.Certificate) > 0 {
4263 4263
 		data[i] = 0x22
4264 4264
 		i++
... ...
@@ -4657,6 +4727,13 @@ func (m *UpdateConfig) Size() (n int) {
4657 4657
 	if m.FailureAction != 0 {
4658 4658
 		n += 1 + sovTypes(uint64(m.FailureAction))
4659 4659
 	}
4660
+	if m.Monitor != nil {
4661
+		l = m.Monitor.Size()
4662
+		n += 1 + l + sovTypes(uint64(l))
4663
+	}
4664
+	if m.AllowedFailureFraction != 0 {
4665
+		n += 5
4666
+	}
4660 4667
 	return n
4661 4668
 }
4662 4669
 
... ...
@@ -5347,6 +5424,8 @@ func (this *UpdateConfig) String() string {
5347 5347
 		`Parallelism:` + fmt.Sprintf("%v", this.Parallelism) + `,`,
5348 5348
 		`Delay:` + strings.Replace(strings.Replace(this.Delay.String(), "Duration", "docker_swarmkit_v11.Duration", 1), `&`, ``, 1) + `,`,
5349 5349
 		`FailureAction:` + fmt.Sprintf("%v", this.FailureAction) + `,`,
5350
+		`Monitor:` + strings.Replace(fmt.Sprintf("%v", this.Monitor), "Duration", "docker_swarmkit_v11.Duration", 1) + `,`,
5351
+		`AllowedFailureFraction:` + fmt.Sprintf("%v", this.AllowedFailureFraction) + `,`,
5350 5352
 		`}`,
5351 5353
 	}, "")
5352 5354
 	return s
... ...
@@ -5899,50 +5978,55 @@ func (m *Annotations) Unmarshal(data []byte) error {
5899 5899
 			}
5900 5900
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
5901 5901
 			iNdEx = postStringIndexmapkey
5902
-			var valuekey uint64
5903
-			for shift := uint(0); ; shift += 7 {
5904
-				if shift >= 64 {
5905
-					return ErrIntOverflowTypes
5906
-				}
5907
-				if iNdEx >= l {
5908
-					return io.ErrUnexpectedEOF
5902
+			if m.Labels == nil {
5903
+				m.Labels = make(map[string]string)
5904
+			}
5905
+			if iNdEx < postIndex {
5906
+				var valuekey uint64
5907
+				for shift := uint(0); ; shift += 7 {
5908
+					if shift >= 64 {
5909
+						return ErrIntOverflowTypes
5910
+					}
5911
+					if iNdEx >= l {
5912
+						return io.ErrUnexpectedEOF
5913
+					}
5914
+					b := data[iNdEx]
5915
+					iNdEx++
5916
+					valuekey |= (uint64(b) & 0x7F) << shift
5917
+					if b < 0x80 {
5918
+						break
5919
+					}
5909 5920
 				}
5910
-				b := data[iNdEx]
5911
-				iNdEx++
5912
-				valuekey |= (uint64(b) & 0x7F) << shift
5913
-				if b < 0x80 {
5914
-					break
5921
+				var stringLenmapvalue uint64
5922
+				for shift := uint(0); ; shift += 7 {
5923
+					if shift >= 64 {
5924
+						return ErrIntOverflowTypes
5925
+					}
5926
+					if iNdEx >= l {
5927
+						return io.ErrUnexpectedEOF
5928
+					}
5929
+					b := data[iNdEx]
5930
+					iNdEx++
5931
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
5932
+					if b < 0x80 {
5933
+						break
5934
+					}
5915 5935
 				}
5916
-			}
5917
-			var stringLenmapvalue uint64
5918
-			for shift := uint(0); ; shift += 7 {
5919
-				if shift >= 64 {
5920
-					return ErrIntOverflowTypes
5936
+				intStringLenmapvalue := int(stringLenmapvalue)
5937
+				if intStringLenmapvalue < 0 {
5938
+					return ErrInvalidLengthTypes
5921 5939
 				}
5922
-				if iNdEx >= l {
5940
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
5941
+				if postStringIndexmapvalue > l {
5923 5942
 					return io.ErrUnexpectedEOF
5924 5943
 				}
5925
-				b := data[iNdEx]
5926
-				iNdEx++
5927
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
5928
-				if b < 0x80 {
5929
-					break
5930
-				}
5944
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
5945
+				iNdEx = postStringIndexmapvalue
5946
+				m.Labels[mapkey] = mapvalue
5947
+			} else {
5948
+				var mapvalue string
5949
+				m.Labels[mapkey] = mapvalue
5931 5950
 			}
5932
-			intStringLenmapvalue := int(stringLenmapvalue)
5933
-			if intStringLenmapvalue < 0 {
5934
-				return ErrInvalidLengthTypes
5935
-			}
5936
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
5937
-			if postStringIndexmapvalue > l {
5938
-				return io.ErrUnexpectedEOF
5939
-			}
5940
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
5941
-			iNdEx = postStringIndexmapvalue
5942
-			if m.Labels == nil {
5943
-				m.Labels = make(map[string]string)
5944
-			}
5945
-			m.Labels[mapkey] = mapvalue
5946 5951
 			iNdEx = postIndex
5947 5952
 		default:
5948 5953
 			iNdEx = preIndex
... ...
@@ -6509,50 +6593,55 @@ func (m *EngineDescription) Unmarshal(data []byte) error {
6509 6509
 			}
6510 6510
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
6511 6511
 			iNdEx = postStringIndexmapkey
6512
-			var valuekey uint64
6513
-			for shift := uint(0); ; shift += 7 {
6514
-				if shift >= 64 {
6515
-					return ErrIntOverflowTypes
6516
-				}
6517
-				if iNdEx >= l {
6518
-					return io.ErrUnexpectedEOF
6512
+			if m.Labels == nil {
6513
+				m.Labels = make(map[string]string)
6514
+			}
6515
+			if iNdEx < postIndex {
6516
+				var valuekey uint64
6517
+				for shift := uint(0); ; shift += 7 {
6518
+					if shift >= 64 {
6519
+						return ErrIntOverflowTypes
6520
+					}
6521
+					if iNdEx >= l {
6522
+						return io.ErrUnexpectedEOF
6523
+					}
6524
+					b := data[iNdEx]
6525
+					iNdEx++
6526
+					valuekey |= (uint64(b) & 0x7F) << shift
6527
+					if b < 0x80 {
6528
+						break
6529
+					}
6519 6530
 				}
6520
-				b := data[iNdEx]
6521
-				iNdEx++
6522
-				valuekey |= (uint64(b) & 0x7F) << shift
6523
-				if b < 0x80 {
6524
-					break
6531
+				var stringLenmapvalue uint64
6532
+				for shift := uint(0); ; shift += 7 {
6533
+					if shift >= 64 {
6534
+						return ErrIntOverflowTypes
6535
+					}
6536
+					if iNdEx >= l {
6537
+						return io.ErrUnexpectedEOF
6538
+					}
6539
+					b := data[iNdEx]
6540
+					iNdEx++
6541
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
6542
+					if b < 0x80 {
6543
+						break
6544
+					}
6525 6545
 				}
6526
-			}
6527
-			var stringLenmapvalue uint64
6528
-			for shift := uint(0); ; shift += 7 {
6529
-				if shift >= 64 {
6530
-					return ErrIntOverflowTypes
6546
+				intStringLenmapvalue := int(stringLenmapvalue)
6547
+				if intStringLenmapvalue < 0 {
6548
+					return ErrInvalidLengthTypes
6531 6549
 				}
6532
-				if iNdEx >= l {
6550
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
6551
+				if postStringIndexmapvalue > l {
6533 6552
 					return io.ErrUnexpectedEOF
6534 6553
 				}
6535
-				b := data[iNdEx]
6536
-				iNdEx++
6537
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
6538
-				if b < 0x80 {
6539
-					break
6540
-				}
6554
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
6555
+				iNdEx = postStringIndexmapvalue
6556
+				m.Labels[mapkey] = mapvalue
6557
+			} else {
6558
+				var mapvalue string
6559
+				m.Labels[mapkey] = mapvalue
6541 6560
 			}
6542
-			intStringLenmapvalue := int(stringLenmapvalue)
6543
-			if intStringLenmapvalue < 0 {
6544
-				return ErrInvalidLengthTypes
6545
-			}
6546
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
6547
-			if postStringIndexmapvalue > l {
6548
-				return io.ErrUnexpectedEOF
6549
-			}
6550
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
6551
-			iNdEx = postStringIndexmapvalue
6552
-			if m.Labels == nil {
6553
-				m.Labels = make(map[string]string)
6554
-			}
6555
-			m.Labels[mapkey] = mapvalue
6556 6561
 			iNdEx = postIndex
6557 6562
 		case 3:
6558 6563
 			if wireType != 2 {
... ...
@@ -7509,50 +7598,55 @@ func (m *Mount_VolumeOptions) Unmarshal(data []byte) error {
7509 7509
 			}
7510 7510
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
7511 7511
 			iNdEx = postStringIndexmapkey
7512
-			var valuekey uint64
7513
-			for shift := uint(0); ; shift += 7 {
7514
-				if shift >= 64 {
7515
-					return ErrIntOverflowTypes
7516
-				}
7517
-				if iNdEx >= l {
7518
-					return io.ErrUnexpectedEOF
7512
+			if m.Labels == nil {
7513
+				m.Labels = make(map[string]string)
7514
+			}
7515
+			if iNdEx < postIndex {
7516
+				var valuekey uint64
7517
+				for shift := uint(0); ; shift += 7 {
7518
+					if shift >= 64 {
7519
+						return ErrIntOverflowTypes
7520
+					}
7521
+					if iNdEx >= l {
7522
+						return io.ErrUnexpectedEOF
7523
+					}
7524
+					b := data[iNdEx]
7525
+					iNdEx++
7526
+					valuekey |= (uint64(b) & 0x7F) << shift
7527
+					if b < 0x80 {
7528
+						break
7529
+					}
7519 7530
 				}
7520
-				b := data[iNdEx]
7521
-				iNdEx++
7522
-				valuekey |= (uint64(b) & 0x7F) << shift
7523
-				if b < 0x80 {
7524
-					break
7531
+				var stringLenmapvalue uint64
7532
+				for shift := uint(0); ; shift += 7 {
7533
+					if shift >= 64 {
7534
+						return ErrIntOverflowTypes
7535
+					}
7536
+					if iNdEx >= l {
7537
+						return io.ErrUnexpectedEOF
7538
+					}
7539
+					b := data[iNdEx]
7540
+					iNdEx++
7541
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
7542
+					if b < 0x80 {
7543
+						break
7544
+					}
7525 7545
 				}
7526
-			}
7527
-			var stringLenmapvalue uint64
7528
-			for shift := uint(0); ; shift += 7 {
7529
-				if shift >= 64 {
7530
-					return ErrIntOverflowTypes
7546
+				intStringLenmapvalue := int(stringLenmapvalue)
7547
+				if intStringLenmapvalue < 0 {
7548
+					return ErrInvalidLengthTypes
7531 7549
 				}
7532
-				if iNdEx >= l {
7550
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
7551
+				if postStringIndexmapvalue > l {
7533 7552
 					return io.ErrUnexpectedEOF
7534 7553
 				}
7535
-				b := data[iNdEx]
7536
-				iNdEx++
7537
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
7538
-				if b < 0x80 {
7539
-					break
7540
-				}
7541
-			}
7542
-			intStringLenmapvalue := int(stringLenmapvalue)
7543
-			if intStringLenmapvalue < 0 {
7544
-				return ErrInvalidLengthTypes
7545
-			}
7546
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
7547
-			if postStringIndexmapvalue > l {
7548
-				return io.ErrUnexpectedEOF
7549
-			}
7550
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
7551
-			iNdEx = postStringIndexmapvalue
7552
-			if m.Labels == nil {
7553
-				m.Labels = make(map[string]string)
7554
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
7555
+				iNdEx = postStringIndexmapvalue
7556
+				m.Labels[mapkey] = mapvalue
7557
+			} else {
7558
+				var mapvalue string
7559
+				m.Labels[mapkey] = mapvalue
7554 7560
 			}
7555
-			m.Labels[mapkey] = mapvalue
7556 7561
 			iNdEx = postIndex
7557 7562
 		case 3:
7558 7563
 			if wireType != 2 {
... ...
@@ -7947,6 +8041,53 @@ func (m *UpdateConfig) Unmarshal(data []byte) error {
7947 7947
 					break
7948 7948
 				}
7949 7949
 			}
7950
+		case 4:
7951
+			if wireType != 2 {
7952
+				return fmt.Errorf("proto: wrong wireType = %d for field Monitor", wireType)
7953
+			}
7954
+			var msglen int
7955
+			for shift := uint(0); ; shift += 7 {
7956
+				if shift >= 64 {
7957
+					return ErrIntOverflowTypes
7958
+				}
7959
+				if iNdEx >= l {
7960
+					return io.ErrUnexpectedEOF
7961
+				}
7962
+				b := data[iNdEx]
7963
+				iNdEx++
7964
+				msglen |= (int(b) & 0x7F) << shift
7965
+				if b < 0x80 {
7966
+					break
7967
+				}
7968
+			}
7969
+			if msglen < 0 {
7970
+				return ErrInvalidLengthTypes
7971
+			}
7972
+			postIndex := iNdEx + msglen
7973
+			if postIndex > l {
7974
+				return io.ErrUnexpectedEOF
7975
+			}
7976
+			if m.Monitor == nil {
7977
+				m.Monitor = &docker_swarmkit_v11.Duration{}
7978
+			}
7979
+			if err := m.Monitor.Unmarshal(data[iNdEx:postIndex]); err != nil {
7980
+				return err
7981
+			}
7982
+			iNdEx = postIndex
7983
+		case 5:
7984
+			if wireType != 5 {
7985
+				return fmt.Errorf("proto: wrong wireType = %d for field AllowedFailureFraction", wireType)
7986
+			}
7987
+			var v uint32
7988
+			if (iNdEx + 4) > l {
7989
+				return io.ErrUnexpectedEOF
7990
+			}
7991
+			iNdEx += 4
7992
+			v = uint32(data[iNdEx-4])
7993
+			v |= uint32(data[iNdEx-3]) << 8
7994
+			v |= uint32(data[iNdEx-2]) << 16
7995
+			v |= uint32(data[iNdEx-1]) << 24
7996
+			m.AllowedFailureFraction = float32(math.Float32frombits(v))
7950 7997
 		default:
7951 7998
 			iNdEx = preIndex
7952 7999
 			skippy, err := skipTypes(data[iNdEx:])
... ...
@@ -8779,50 +8920,55 @@ func (m *IPAMConfig) Unmarshal(data []byte) error {
8779 8779
 			}
8780 8780
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
8781 8781
 			iNdEx = postStringIndexmapkey
8782
-			var valuekey uint64
8783
-			for shift := uint(0); ; shift += 7 {
8784
-				if shift >= 64 {
8785
-					return ErrIntOverflowTypes
8786
-				}
8787
-				if iNdEx >= l {
8788
-					return io.ErrUnexpectedEOF
8782
+			if m.Reserved == nil {
8783
+				m.Reserved = make(map[string]string)
8784
+			}
8785
+			if iNdEx < postIndex {
8786
+				var valuekey uint64
8787
+				for shift := uint(0); ; shift += 7 {
8788
+					if shift >= 64 {
8789
+						return ErrIntOverflowTypes
8790
+					}
8791
+					if iNdEx >= l {
8792
+						return io.ErrUnexpectedEOF
8793
+					}
8794
+					b := data[iNdEx]
8795
+					iNdEx++
8796
+					valuekey |= (uint64(b) & 0x7F) << shift
8797
+					if b < 0x80 {
8798
+						break
8799
+					}
8789 8800
 				}
8790
-				b := data[iNdEx]
8791
-				iNdEx++
8792
-				valuekey |= (uint64(b) & 0x7F) << shift
8793
-				if b < 0x80 {
8794
-					break
8801
+				var stringLenmapvalue uint64
8802
+				for shift := uint(0); ; shift += 7 {
8803
+					if shift >= 64 {
8804
+						return ErrIntOverflowTypes
8805
+					}
8806
+					if iNdEx >= l {
8807
+						return io.ErrUnexpectedEOF
8808
+					}
8809
+					b := data[iNdEx]
8810
+					iNdEx++
8811
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
8812
+					if b < 0x80 {
8813
+						break
8814
+					}
8795 8815
 				}
8796
-			}
8797
-			var stringLenmapvalue uint64
8798
-			for shift := uint(0); ; shift += 7 {
8799
-				if shift >= 64 {
8800
-					return ErrIntOverflowTypes
8816
+				intStringLenmapvalue := int(stringLenmapvalue)
8817
+				if intStringLenmapvalue < 0 {
8818
+					return ErrInvalidLengthTypes
8801 8819
 				}
8802
-				if iNdEx >= l {
8820
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
8821
+				if postStringIndexmapvalue > l {
8803 8822
 					return io.ErrUnexpectedEOF
8804 8823
 				}
8805
-				b := data[iNdEx]
8806
-				iNdEx++
8807
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
8808
-				if b < 0x80 {
8809
-					break
8810
-				}
8811
-			}
8812
-			intStringLenmapvalue := int(stringLenmapvalue)
8813
-			if intStringLenmapvalue < 0 {
8814
-				return ErrInvalidLengthTypes
8824
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
8825
+				iNdEx = postStringIndexmapvalue
8826
+				m.Reserved[mapkey] = mapvalue
8827
+			} else {
8828
+				var mapvalue string
8829
+				m.Reserved[mapkey] = mapvalue
8815 8830
 			}
8816
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
8817
-			if postStringIndexmapvalue > l {
8818
-				return io.ErrUnexpectedEOF
8819
-			}
8820
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
8821
-			iNdEx = postStringIndexmapvalue
8822
-			if m.Reserved == nil {
8823
-				m.Reserved = make(map[string]string)
8824
-			}
8825
-			m.Reserved[mapkey] = mapvalue
8826 8831
 			iNdEx = postIndex
8827 8832
 		default:
8828 8833
 			iNdEx = preIndex
... ...
@@ -9105,50 +9251,55 @@ func (m *Driver) Unmarshal(data []byte) error {
9105 9105
 			}
9106 9106
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
9107 9107
 			iNdEx = postStringIndexmapkey
9108
-			var valuekey uint64
9109
-			for shift := uint(0); ; shift += 7 {
9110
-				if shift >= 64 {
9111
-					return ErrIntOverflowTypes
9112
-				}
9113
-				if iNdEx >= l {
9114
-					return io.ErrUnexpectedEOF
9108
+			if m.Options == nil {
9109
+				m.Options = make(map[string]string)
9110
+			}
9111
+			if iNdEx < postIndex {
9112
+				var valuekey uint64
9113
+				for shift := uint(0); ; shift += 7 {
9114
+					if shift >= 64 {
9115
+						return ErrIntOverflowTypes
9116
+					}
9117
+					if iNdEx >= l {
9118
+						return io.ErrUnexpectedEOF
9119
+					}
9120
+					b := data[iNdEx]
9121
+					iNdEx++
9122
+					valuekey |= (uint64(b) & 0x7F) << shift
9123
+					if b < 0x80 {
9124
+						break
9125
+					}
9115 9126
 				}
9116
-				b := data[iNdEx]
9117
-				iNdEx++
9118
-				valuekey |= (uint64(b) & 0x7F) << shift
9119
-				if b < 0x80 {
9120
-					break
9127
+				var stringLenmapvalue uint64
9128
+				for shift := uint(0); ; shift += 7 {
9129
+					if shift >= 64 {
9130
+						return ErrIntOverflowTypes
9131
+					}
9132
+					if iNdEx >= l {
9133
+						return io.ErrUnexpectedEOF
9134
+					}
9135
+					b := data[iNdEx]
9136
+					iNdEx++
9137
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
9138
+					if b < 0x80 {
9139
+						break
9140
+					}
9121 9141
 				}
9122
-			}
9123
-			var stringLenmapvalue uint64
9124
-			for shift := uint(0); ; shift += 7 {
9125
-				if shift >= 64 {
9126
-					return ErrIntOverflowTypes
9142
+				intStringLenmapvalue := int(stringLenmapvalue)
9143
+				if intStringLenmapvalue < 0 {
9144
+					return ErrInvalidLengthTypes
9127 9145
 				}
9128
-				if iNdEx >= l {
9146
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
9147
+				if postStringIndexmapvalue > l {
9129 9148
 					return io.ErrUnexpectedEOF
9130 9149
 				}
9131
-				b := data[iNdEx]
9132
-				iNdEx++
9133
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
9134
-				if b < 0x80 {
9135
-					break
9136
-				}
9150
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
9151
+				iNdEx = postStringIndexmapvalue
9152
+				m.Options[mapkey] = mapvalue
9153
+			} else {
9154
+				var mapvalue string
9155
+				m.Options[mapkey] = mapvalue
9137 9156
 			}
9138
-			intStringLenmapvalue := int(stringLenmapvalue)
9139
-			if intStringLenmapvalue < 0 {
9140
-				return ErrInvalidLengthTypes
9141
-			}
9142
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
9143
-			if postStringIndexmapvalue > l {
9144
-				return io.ErrUnexpectedEOF
9145
-			}
9146
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
9147
-			iNdEx = postStringIndexmapvalue
9148
-			if m.Options == nil {
9149
-				m.Options = make(map[string]string)
9150
-			}
9151
-			m.Options[mapkey] = mapvalue
9152 9157
 			iNdEx = postIndex
9153 9158
 		default:
9154 9159
 			iNdEx = preIndex
... ...
@@ -10049,50 +10200,55 @@ func (m *ExternalCA) Unmarshal(data []byte) error {
10049 10049
 			}
10050 10050
 			mapkey := string(data[iNdEx:postStringIndexmapkey])
10051 10051
 			iNdEx = postStringIndexmapkey
10052
-			var valuekey uint64
10053
-			for shift := uint(0); ; shift += 7 {
10054
-				if shift >= 64 {
10055
-					return ErrIntOverflowTypes
10056
-				}
10057
-				if iNdEx >= l {
10058
-					return io.ErrUnexpectedEOF
10052
+			if m.Options == nil {
10053
+				m.Options = make(map[string]string)
10054
+			}
10055
+			if iNdEx < postIndex {
10056
+				var valuekey uint64
10057
+				for shift := uint(0); ; shift += 7 {
10058
+					if shift >= 64 {
10059
+						return ErrIntOverflowTypes
10060
+					}
10061
+					if iNdEx >= l {
10062
+						return io.ErrUnexpectedEOF
10063
+					}
10064
+					b := data[iNdEx]
10065
+					iNdEx++
10066
+					valuekey |= (uint64(b) & 0x7F) << shift
10067
+					if b < 0x80 {
10068
+						break
10069
+					}
10059 10070
 				}
10060
-				b := data[iNdEx]
10061
-				iNdEx++
10062
-				valuekey |= (uint64(b) & 0x7F) << shift
10063
-				if b < 0x80 {
10064
-					break
10071
+				var stringLenmapvalue uint64
10072
+				for shift := uint(0); ; shift += 7 {
10073
+					if shift >= 64 {
10074
+						return ErrIntOverflowTypes
10075
+					}
10076
+					if iNdEx >= l {
10077
+						return io.ErrUnexpectedEOF
10078
+					}
10079
+					b := data[iNdEx]
10080
+					iNdEx++
10081
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
10082
+					if b < 0x80 {
10083
+						break
10084
+					}
10065 10085
 				}
10066
-			}
10067
-			var stringLenmapvalue uint64
10068
-			for shift := uint(0); ; shift += 7 {
10069
-				if shift >= 64 {
10070
-					return ErrIntOverflowTypes
10086
+				intStringLenmapvalue := int(stringLenmapvalue)
10087
+				if intStringLenmapvalue < 0 {
10088
+					return ErrInvalidLengthTypes
10071 10089
 				}
10072
-				if iNdEx >= l {
10090
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
10091
+				if postStringIndexmapvalue > l {
10073 10092
 					return io.ErrUnexpectedEOF
10074 10093
 				}
10075
-				b := data[iNdEx]
10076
-				iNdEx++
10077
-				stringLenmapvalue |= (uint64(b) & 0x7F) << shift
10078
-				if b < 0x80 {
10079
-					break
10080
-				}
10094
+				mapvalue := string(data[iNdEx:postStringIndexmapvalue])
10095
+				iNdEx = postStringIndexmapvalue
10096
+				m.Options[mapkey] = mapvalue
10097
+			} else {
10098
+				var mapvalue string
10099
+				m.Options[mapkey] = mapvalue
10081 10100
 			}
10082
-			intStringLenmapvalue := int(stringLenmapvalue)
10083
-			if intStringLenmapvalue < 0 {
10084
-				return ErrInvalidLengthTypes
10085
-			}
10086
-			postStringIndexmapvalue := iNdEx + intStringLenmapvalue
10087
-			if postStringIndexmapvalue > l {
10088
-				return io.ErrUnexpectedEOF
10089
-			}
10090
-			mapvalue := string(data[iNdEx:postStringIndexmapvalue])
10091
-			iNdEx = postStringIndexmapvalue
10092
-			if m.Options == nil {
10093
-				m.Options = make(map[string]string)
10094
-			}
10095
-			m.Options[mapkey] = mapvalue
10096 10101
 			iNdEx = postIndex
10097 10102
 		default:
10098 10103
 			iNdEx = preIndex
... ...
@@ -11547,222 +11703,228 @@ var (
11547 11547
 	ErrIntOverflowTypes   = fmt.Errorf("proto: integer overflow")
11548 11548
 )
11549 11549
 
11550
+func init() { proto.RegisterFile("types.proto", fileDescriptorTypes) }
11551
+
11550 11552
 var fileDescriptorTypes = []byte{
11551
-	// 3442 bytes of a gzipped FileDescriptorProto
11552
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x59, 0x4d, 0x6c, 0x1b, 0x49,
11553
-	0x76, 0x16, 0x7f, 0x45, 0x3e, 0x52, 0x72, 0xbb, 0xec, 0xf5, 0xc8, 0x1c, 0x8f, 0xc4, 0x69, 0x8f,
11554
-	0x77, 0xbc, 0xb3, 0x13, 0xce, 0x8c, 0x66, 0x13, 0x78, 0xc7, 0xc9, 0xce, 0xb4, 0x48, 0xca, 0xe6,
11555
-	0x5a, 0xa2, 0x88, 0x22, 0x69, 0x63, 0x10, 0x20, 0x44, 0xa9, 0xbb, 0x44, 0xf6, 0xa8, 0xd9, 0xc5,
11556
-	0x74, 0x17, 0x25, 0x33, 0x41, 0x00, 0x27, 0x97, 0x04, 0x3a, 0xe5, 0x1e, 0x08, 0x8b, 0x20, 0x41,
11557
-	0x6e, 0x39, 0xe4, 0x14, 0x20, 0x27, 0x1f, 0xe7, 0xb8, 0x41, 0x80, 0x60, 0x91, 0x00, 0x42, 0x46,
11558
-	0x39, 0xe6, 0xb2, 0x40, 0x0e, 0x7b, 0x48, 0x0e, 0x41, 0xfd, 0x74, 0xf3, 0xc7, 0xb4, 0xc6, 0x93,
11559
-	0xdd, 0x13, 0xbb, 0x5e, 0x7d, 0xef, 0xd5, 0xab, 0xaa, 0x57, 0xaf, 0xbe, 0x57, 0x84, 0x02, 0x9f,
11560
-	0x8c, 0x68, 0x58, 0x19, 0x05, 0x8c, 0x33, 0x84, 0x1c, 0x66, 0x1f, 0xd3, 0xa0, 0x12, 0x9e, 0x92,
11561
-	0x60, 0x78, 0xec, 0xf2, 0xca, 0xc9, 0x27, 0xa5, 0xdb, 0xdc, 0x1d, 0xd2, 0x90, 0x93, 0xe1, 0xe8,
11562
-	0xa3, 0xf8, 0x4b, 0xc1, 0x4b, 0x6f, 0x39, 0xe3, 0x80, 0x70, 0x97, 0xf9, 0x1f, 0x45, 0x1f, 0xba,
11563
-	0xe3, 0x66, 0x9f, 0xf5, 0x99, 0xfc, 0xfc, 0x48, 0x7c, 0x29, 0xa9, 0xb9, 0x05, 0xab, 0x4f, 0x69,
11564
-	0x10, 0xba, 0xcc, 0x47, 0x37, 0x21, 0xe3, 0xfa, 0x0e, 0x7d, 0xbe, 0x91, 0x28, 0x27, 0xee, 0xa7,
11565
-	0xb1, 0x6a, 0x98, 0x7f, 0x9d, 0x80, 0x82, 0xe5, 0xfb, 0x8c, 0x4b, 0x5b, 0x21, 0x42, 0x90, 0xf6,
11566
-	0xc9, 0x90, 0x4a, 0x50, 0x1e, 0xcb, 0x6f, 0x54, 0x85, 0xac, 0x47, 0x0e, 0xa9, 0x17, 0x6e, 0x24,
11567
-	0xcb, 0xa9, 0xfb, 0x85, 0xed, 0x1f, 0x56, 0x5e, 0xf5, 0xb9, 0x32, 0x63, 0xa4, 0xb2, 0x27, 0xd1,
11568
-	0x75, 0x9f, 0x07, 0x13, 0xac, 0x55, 0x4b, 0x3f, 0x86, 0xc2, 0x8c, 0x18, 0x19, 0x90, 0x3a, 0xa6,
11569
-	0x13, 0x3d, 0x8c, 0xf8, 0x14, 0xfe, 0x9d, 0x10, 0x6f, 0x4c, 0x37, 0x92, 0x52, 0xa6, 0x1a, 0x9f,
11570
-	0x25, 0x1f, 0x24, 0xcc, 0x2f, 0x21, 0x8f, 0x69, 0xc8, 0xc6, 0x81, 0x4d, 0x43, 0xf4, 0x03, 0xc8,
11571
-	0xfb, 0xc4, 0x67, 0x3d, 0x7b, 0x34, 0x0e, 0xa5, 0x7a, 0x6a, 0xa7, 0x78, 0x79, 0xb1, 0x95, 0x6b,
11572
-	0x12, 0x9f, 0x55, 0x5b, 0xdd, 0x10, 0xe7, 0x44, 0x77, 0x75, 0x34, 0x0e, 0xd1, 0xbb, 0x50, 0x1c,
11573
-	0xd2, 0x21, 0x0b, 0x26, 0xbd, 0xc3, 0x09, 0xa7, 0xa1, 0x34, 0x9c, 0xc2, 0x05, 0x25, 0xdb, 0x11,
11574
-	0x22, 0xf3, 0x2f, 0x13, 0x70, 0x33, 0xb2, 0x8d, 0xe9, 0x1f, 0x8e, 0xdd, 0x80, 0x0e, 0xa9, 0xcf,
11575
-	0x43, 0xf4, 0xdb, 0x90, 0xf5, 0xdc, 0xa1, 0xcb, 0xd5, 0x18, 0x85, 0xed, 0x77, 0x96, 0xcd, 0x39,
11576
-	0xf6, 0x0a, 0x6b, 0x30, 0xb2, 0xa0, 0x18, 0xd0, 0x90, 0x06, 0x27, 0x6a, 0x25, 0xe4, 0x90, 0xdf,
11577
-	0xaa, 0x3c, 0xa7, 0x62, 0xee, 0x42, 0xae, 0xe5, 0x11, 0x7e, 0xc4, 0x82, 0x21, 0x32, 0xa1, 0x48,
11578
-	0x02, 0x7b, 0xe0, 0x72, 0x6a, 0xf3, 0x71, 0x10, 0xed, 0xca, 0x9c, 0x0c, 0xdd, 0x82, 0x24, 0x53,
11579
-	0x03, 0xe5, 0x77, 0xb2, 0x97, 0x17, 0x5b, 0xc9, 0x83, 0x36, 0x4e, 0xb2, 0xd0, 0x7c, 0x08, 0xd7,
11580
-	0x5b, 0xde, 0xb8, 0xef, 0xfa, 0x35, 0x1a, 0xda, 0x81, 0x3b, 0x12, 0xd6, 0xc5, 0xf6, 0x8a, 0xe0,
11581
-	0x8b, 0xb6, 0x57, 0x7c, 0xc7, 0x5b, 0x9e, 0x9c, 0x6e, 0xb9, 0xf9, 0xe7, 0x49, 0xb8, 0x5e, 0xf7,
11582
-	0xfb, 0xae, 0x4f, 0x67, 0xb5, 0xef, 0xc1, 0x3a, 0x95, 0xc2, 0xde, 0x89, 0x0a, 0x2a, 0x6d, 0x67,
11583
-	0x4d, 0x49, 0xa3, 0x48, 0x6b, 0x2c, 0xc4, 0xcb, 0x27, 0xcb, 0xa6, 0xff, 0x8a, 0xf5, 0x65, 0x51,
11584
-	0x83, 0xea, 0xb0, 0x3a, 0x92, 0x93, 0x08, 0x37, 0x52, 0xd2, 0xd6, 0xbd, 0x65, 0xb6, 0x5e, 0x99,
11585
-	0xe7, 0x4e, 0xfa, 0xeb, 0x8b, 0xad, 0x15, 0x1c, 0xe9, 0xfe, 0x3a, 0xc1, 0xf7, 0x9f, 0x09, 0xb8,
11586
-	0xd6, 0x64, 0xce, 0xdc, 0x3a, 0x94, 0x20, 0x37, 0x60, 0x21, 0x9f, 0x39, 0x28, 0x71, 0x1b, 0x3d,
11587
-	0x80, 0xdc, 0x48, 0x6f, 0x9f, 0xde, 0xfd, 0x3b, 0xcb, 0x5d, 0x56, 0x18, 0x1c, 0xa3, 0xd1, 0x43,
11588
-	0xc8, 0x07, 0x51, 0x4c, 0x6c, 0xa4, 0xde, 0x24, 0x70, 0xa6, 0x78, 0xf4, 0x7b, 0x90, 0x55, 0x9b,
11589
-	0xb0, 0x91, 0x96, 0x9a, 0xf7, 0xde, 0x68, 0xcd, 0xb1, 0x56, 0x32, 0x7f, 0x91, 0x00, 0x03, 0x93,
11590
-	0x23, 0xbe, 0x4f, 0x87, 0x87, 0x34, 0x68, 0x73, 0xc2, 0xc7, 0x21, 0xba, 0x05, 0x59, 0x8f, 0x12,
11591
-	0x87, 0x06, 0x72, 0x92, 0x39, 0xac, 0x5b, 0xa8, 0x2b, 0x82, 0x9c, 0xd8, 0x03, 0x72, 0xe8, 0x7a,
11592
-	0x2e, 0x9f, 0xc8, 0x69, 0xae, 0x2f, 0xdf, 0xe5, 0x45, 0x9b, 0x15, 0x3c, 0xa3, 0x88, 0xe7, 0xcc,
11593
-	0xa0, 0x0d, 0x58, 0x1d, 0xd2, 0x30, 0x24, 0x7d, 0x2a, 0x67, 0x9f, 0xc7, 0x51, 0xd3, 0x7c, 0x08,
11594
-	0xc5, 0x59, 0x3d, 0x54, 0x80, 0xd5, 0x6e, 0xf3, 0x49, 0xf3, 0xe0, 0x59, 0xd3, 0x58, 0x41, 0xd7,
11595
-	0xa0, 0xd0, 0x6d, 0xe2, 0xba, 0x55, 0x7d, 0x6c, 0xed, 0xec, 0xd5, 0x8d, 0x04, 0x5a, 0x83, 0xfc,
11596
-	0xb4, 0x99, 0x34, 0x7f, 0x96, 0x00, 0x10, 0x1b, 0xa8, 0x27, 0xf5, 0x19, 0x64, 0x42, 0x4e, 0xb8,
11597
-	0xda, 0xb8, 0xf5, 0xed, 0xf7, 0x96, 0x79, 0x3d, 0x85, 0x57, 0xc4, 0x0f, 0xc5, 0x4a, 0x65, 0xd6,
11598
-	0xc3, 0xe4, 0xa2, 0x87, 0x19, 0x89, 0x9c, 0x77, 0x2d, 0x07, 0xe9, 0x9a, 0xf8, 0x4a, 0xa0, 0x3c,
11599
-	0x64, 0x70, 0xdd, 0xaa, 0x7d, 0x69, 0x24, 0x91, 0x01, 0xc5, 0x5a, 0xa3, 0x5d, 0x3d, 0x68, 0x36,
11600
-	0xeb, 0xd5, 0x4e, 0xbd, 0x66, 0xa4, 0xcc, 0x7b, 0x90, 0x69, 0x0c, 0x49, 0x9f, 0xa2, 0x3b, 0x22,
11601
-	0x02, 0x8e, 0x68, 0x40, 0x7d, 0x3b, 0x0a, 0xac, 0xa9, 0xc0, 0xfc, 0x79, 0x1e, 0x32, 0xfb, 0x6c,
11602
-	0xec, 0x73, 0xb4, 0x3d, 0x73, 0x8a, 0xd7, 0xb7, 0x37, 0x97, 0x4d, 0x41, 0x02, 0x2b, 0x9d, 0xc9,
11603
-	0x88, 0xea, 0x53, 0x7e, 0x0b, 0xb2, 0x2a, 0x56, 0xb4, 0xeb, 0xba, 0x25, 0xe4, 0x9c, 0x04, 0x7d,
11604
-	0xca, 0xf5, 0xa2, 0xeb, 0x16, 0xba, 0x0f, 0xb9, 0x80, 0x12, 0x87, 0xf9, 0xde, 0x44, 0x86, 0x54,
11605
-	0x4e, 0xa5, 0x59, 0x4c, 0x89, 0x73, 0xe0, 0x7b, 0x13, 0x1c, 0xf7, 0xa2, 0xc7, 0x50, 0x3c, 0x74,
11606
-	0x7d, 0xa7, 0xc7, 0x46, 0x2a, 0xe7, 0x65, 0x5e, 0x1f, 0x80, 0xca, 0xab, 0x1d, 0xd7, 0x77, 0x0e,
11607
-	0x14, 0x18, 0x17, 0x0e, 0xa7, 0x0d, 0xd4, 0x84, 0xf5, 0x13, 0xe6, 0x8d, 0x87, 0x34, 0xb6, 0x95,
11608
-	0x95, 0xb6, 0xde, 0x7f, 0xbd, 0xad, 0xa7, 0x12, 0x1f, 0x59, 0x5b, 0x3b, 0x99, 0x6d, 0xa2, 0x27,
11609
-	0xb0, 0xc6, 0x87, 0xa3, 0xa3, 0x30, 0x36, 0xb7, 0x2a, 0xcd, 0x7d, 0xff, 0x8a, 0x05, 0x13, 0xf0,
11610
-	0xc8, 0x5a, 0x91, 0xcf, 0xb4, 0x4a, 0x7f, 0x96, 0x82, 0xc2, 0x8c, 0xe7, 0xa8, 0x0d, 0x85, 0x51,
11611
-	0xc0, 0x46, 0xa4, 0x2f, 0xf3, 0xb6, 0xde, 0x8b, 0x4f, 0xde, 0x68, 0xd6, 0x95, 0xd6, 0x54, 0x11,
11612
-	0xcf, 0x5a, 0x31, 0xcf, 0x93, 0x50, 0x98, 0xe9, 0x44, 0x1f, 0x40, 0x0e, 0xb7, 0x70, 0xe3, 0xa9,
11613
-	0xd5, 0xa9, 0x1b, 0x2b, 0xa5, 0x3b, 0x67, 0xe7, 0xe5, 0x0d, 0x69, 0x6d, 0xd6, 0x40, 0x2b, 0x70,
11614
-	0x4f, 0x44, 0xe8, 0xdd, 0x87, 0xd5, 0x08, 0x9a, 0x28, 0xbd, 0x7d, 0x76, 0x5e, 0x7e, 0x6b, 0x11,
11615
-	0x3a, 0x83, 0xc4, 0xed, 0xc7, 0x16, 0xae, 0xd7, 0x8c, 0xe4, 0x72, 0x24, 0x6e, 0x0f, 0x48, 0x40,
11616
-	0x1d, 0xf4, 0x7d, 0xc8, 0x6a, 0x60, 0xaa, 0x54, 0x3a, 0x3b, 0x2f, 0xdf, 0x5a, 0x04, 0x4e, 0x71,
11617
-	0xb8, 0xbd, 0x67, 0x3d, 0xad, 0x1b, 0xe9, 0xe5, 0x38, 0xdc, 0xf6, 0xc8, 0x09, 0x45, 0xef, 0x41,
11618
-	0x46, 0xc1, 0x32, 0xa5, 0xdb, 0x67, 0xe7, 0xe5, 0xef, 0xbd, 0x62, 0x4e, 0xa0, 0x4a, 0x1b, 0x7f,
11619
-	0xf1, 0x37, 0x9b, 0x2b, 0xff, 0xf4, 0xb7, 0x9b, 0xc6, 0x62, 0x77, 0xe9, 0x7f, 0x13, 0xb0, 0x36,
11620
-	0xb7, 0xe5, 0xc8, 0x84, 0xac, 0xcf, 0x6c, 0x36, 0x52, 0xe9, 0x3c, 0xb7, 0x03, 0x97, 0x17, 0x5b,
11621
-	0xd9, 0x26, 0xab, 0xb2, 0xd1, 0x04, 0xeb, 0x1e, 0xf4, 0x64, 0xe1, 0x42, 0xfa, 0xf4, 0x0d, 0xe3,
11622
-	0x69, 0xe9, 0x95, 0xf4, 0x39, 0xac, 0x39, 0x81, 0x7b, 0x42, 0x83, 0x9e, 0xcd, 0xfc, 0x23, 0xb7,
11623
-	0xaf, 0x53, 0x75, 0x69, 0x99, 0xcd, 0x9a, 0x04, 0xe2, 0xa2, 0x52, 0xa8, 0x4a, 0xfc, 0xaf, 0x71,
11624
-	0x19, 0x95, 0x9e, 0x42, 0x71, 0x36, 0x42, 0xd1, 0x3b, 0x00, 0xa1, 0xfb, 0x47, 0x54, 0xf3, 0x1b,
11625
-	0xc9, 0x86, 0x70, 0x5e, 0x48, 0x24, 0xbb, 0x41, 0xef, 0x43, 0x7a, 0xc8, 0x1c, 0x65, 0x27, 0xb3,
11626
-	0x73, 0x43, 0xdc, 0x89, 0xff, 0x76, 0xb1, 0x55, 0x60, 0x61, 0x65, 0xd7, 0xf5, 0xe8, 0x3e, 0x73,
11627
-	0x28, 0x96, 0x00, 0xf3, 0x04, 0xd2, 0x22, 0x55, 0xa0, 0xb7, 0x21, 0xbd, 0xd3, 0x68, 0xd6, 0x8c,
11628
-	0x95, 0xd2, 0xf5, 0xb3, 0xf3, 0xf2, 0x9a, 0x5c, 0x12, 0xd1, 0x21, 0x62, 0x17, 0x6d, 0x41, 0xf6,
11629
-	0xe9, 0xc1, 0x5e, 0x77, 0x5f, 0x84, 0xd7, 0x8d, 0xb3, 0xf3, 0xf2, 0xb5, 0xb8, 0x5b, 0x2d, 0x1a,
11630
-	0x7a, 0x07, 0x32, 0x9d, 0xfd, 0xd6, 0x6e, 0xdb, 0x48, 0x96, 0xd0, 0xd9, 0x79, 0x79, 0x3d, 0xee,
11631
-	0x97, 0x3e, 0x97, 0xae, 0xeb, 0x5d, 0xcd, 0xc7, 0x72, 0xf3, 0x7f, 0x92, 0xb0, 0x86, 0x05, 0xbf,
11632
-	0x0d, 0x78, 0x8b, 0x79, 0xae, 0x3d, 0x41, 0x2d, 0xc8, 0xdb, 0xcc, 0x77, 0xdc, 0x99, 0x33, 0xb5,
11633
-	0xfd, 0x9a, 0x4b, 0x70, 0xaa, 0x15, 0xb5, 0xaa, 0x91, 0x26, 0x9e, 0x1a, 0x41, 0xdb, 0x90, 0x71,
11634
-	0xa8, 0x47, 0x26, 0x57, 0xdd, 0xc6, 0x35, 0xcd, 0xa5, 0xb1, 0x82, 0x4a, 0xe6, 0x48, 0x9e, 0xf7,
11635
-	0x08, 0xe7, 0x74, 0x38, 0xe2, 0xea, 0x36, 0x4e, 0xe3, 0xc2, 0x90, 0x3c, 0xb7, 0xb4, 0x08, 0xfd,
11636
-	0x08, 0xb2, 0xa7, 0xae, 0xef, 0xb0, 0x53, 0x7d, 0xe1, 0x5e, 0x6d, 0x57, 0x63, 0xcd, 0x33, 0x71,
11637
-	0xcf, 0x2e, 0x38, 0x2b, 0x56, 0xbd, 0x79, 0xd0, 0xac, 0x47, 0xab, 0xae, 0xfb, 0x0f, 0xfc, 0x26,
11638
-	0xf3, 0xc5, 0x89, 0x81, 0x83, 0x66, 0x6f, 0xd7, 0x6a, 0xec, 0x75, 0xb1, 0x58, 0xf9, 0x9b, 0x67,
11639
-	0xe7, 0x65, 0x23, 0x86, 0xec, 0x12, 0xd7, 0x13, 0x24, 0xf0, 0x36, 0xa4, 0xac, 0xe6, 0x97, 0x46,
11640
-	0xb2, 0x64, 0x9c, 0x9d, 0x97, 0x8b, 0x71, 0xb7, 0xe5, 0x4f, 0xa6, 0x87, 0x69, 0x71, 0x5c, 0xf3,
11641
-	0xbf, 0x12, 0x50, 0xec, 0x8e, 0x1c, 0xc2, 0xa9, 0x8a, 0x4c, 0x54, 0x86, 0xc2, 0x88, 0x04, 0xc4,
11642
-	0xf3, 0xa8, 0xe7, 0x86, 0x43, 0x5d, 0x28, 0xcc, 0x8a, 0xd0, 0x83, 0xef, 0xb0, 0x98, 0x9a, 0x84,
11643
-	0xe9, 0x25, 0xed, 0xc2, 0xfa, 0x91, 0x72, 0xb6, 0x47, 0x6c, 0xb9, 0xbb, 0x29, 0xb9, 0xbb, 0x95,
11644
-	0x65, 0x26, 0x66, 0xbd, 0xaa, 0xe8, 0x39, 0x5a, 0x52, 0x0b, 0xaf, 0x1d, 0xcd, 0x36, 0xcd, 0xfb,
11645
-	0xb0, 0x36, 0xd7, 0x2f, 0x6e, 0xda, 0x96, 0xd5, 0x6d, 0xd7, 0x8d, 0x15, 0x54, 0x84, 0x5c, 0xf5,
11646
-	0xa0, 0xd9, 0x69, 0x34, 0xbb, 0x75, 0x23, 0x61, 0xfe, 0x43, 0x32, 0x9a, 0xad, 0x66, 0x02, 0x3b,
11647
-	0xf3, 0x4c, 0xe0, 0xc3, 0xd7, 0x3b, 0xa2, 0xb9, 0xc0, 0xb4, 0x11, 0x33, 0x82, 0xdf, 0x05, 0x90,
11648
-	0x8b, 0x4a, 0x9d, 0x1e, 0xe1, 0x57, 0xb1, 0xfd, 0x4e, 0x54, 0xc7, 0xe1, 0xbc, 0x56, 0xb0, 0x38,
11649
-	0xfa, 0x02, 0x8a, 0x36, 0x1b, 0x8e, 0x3c, 0xaa, 0xf5, 0x53, 0x6f, 0xa2, 0x5f, 0x88, 0x55, 0x2c,
11650
-	0x3e, 0xcb, 0x48, 0xd2, 0xf3, 0x8c, 0xa4, 0x0a, 0x85, 0x19, 0x7f, 0xe7, 0x79, 0x49, 0x11, 0x72,
11651
-	0xdd, 0x56, 0xcd, 0xea, 0x34, 0x9a, 0x8f, 0x8c, 0x04, 0x02, 0xc8, 0xca, 0x15, 0xab, 0x19, 0x49,
11652
-	0xc1, 0x9d, 0xaa, 0x07, 0xfb, 0xad, 0xbd, 0xba, 0x62, 0x26, 0x7f, 0x02, 0xd7, 0xaa, 0xcc, 0xe7,
11653
-	0xc4, 0xf5, 0x63, 0x52, 0xb8, 0x2d, 0x7c, 0xd6, 0xa2, 0x9e, 0xeb, 0xa8, 0xbc, 0xb5, 0x73, 0xed,
11654
-	0xf2, 0x62, 0xab, 0x10, 0x43, 0x1b, 0x35, 0xe1, 0x65, 0xd4, 0x70, 0x44, 0x74, 0x8e, 0x5c, 0x47,
11655
-	0xa7, 0xa1, 0xd5, 0xcb, 0x8b, 0xad, 0x54, 0xab, 0x51, 0xc3, 0x42, 0x86, 0xde, 0x86, 0x3c, 0x7d,
11656
-	0xee, 0xf2, 0x9e, 0x2d, 0xf2, 0x94, 0x98, 0x7f, 0x06, 0xe7, 0x84, 0xa0, 0x2a, 0xd2, 0xd2, 0x9f,
11657
-	0x26, 0x01, 0x3a, 0x24, 0x3c, 0xd6, 0x43, 0x3f, 0x84, 0x7c, 0x5c, 0x0e, 0x5f, 0x55, 0x96, 0xcd,
11658
-	0xac, 0x75, 0x8c, 0x47, 0x9f, 0x46, 0xbb, 0xad, 0xd8, 0xea, 0x72, 0x45, 0x3d, 0xd6, 0x32, 0xc2,
11659
-	0x37, 0x4f, 0x49, 0x45, 0xd6, 0xa6, 0x41, 0xa0, 0x17, 0x5d, 0x7c, 0xa2, 0xaa, 0xcc, 0x5c, 0x6a,
11660
-	0xce, 0x9a, 0x03, 0xdd, 0x5d, 0x36, 0xc8, 0xc2, 0x82, 0x3e, 0x5e, 0xc1, 0x53, 0xbd, 0x1d, 0x03,
11661
-	0xd6, 0x83, 0xb1, 0x2f, 0xbc, 0xee, 0x85, 0xb2, 0xdb, 0x74, 0xe1, 0xad, 0x26, 0xe5, 0xa7, 0x2c,
11662
-	0x38, 0xb6, 0x38, 0x27, 0xf6, 0x40, 0x94, 0xa7, 0xfa, 0xb8, 0x4e, 0xa9, 0x5b, 0x62, 0x8e, 0xba,
11663
-	0x6d, 0xc0, 0x2a, 0xf1, 0x5c, 0x12, 0x52, 0x75, 0xdf, 0xe5, 0x71, 0xd4, 0x14, 0x04, 0x93, 0x38,
11664
-	0x4e, 0x40, 0xc3, 0x90, 0xaa, 0x82, 0x2a, 0x8f, 0xa7, 0x02, 0xf3, 0x5f, 0x92, 0x00, 0x8d, 0x96,
11665
-	0xb5, 0xaf, 0xcd, 0xd7, 0x20, 0x7b, 0x44, 0x86, 0xae, 0x37, 0xb9, 0xea, 0x80, 0x4c, 0xf1, 0x15,
11666
-	0x4b, 0x19, 0xda, 0x95, 0x3a, 0x58, 0xeb, 0x4a, 0xde, 0x39, 0x3e, 0xf4, 0x29, 0x8f, 0x79, 0xa7,
11667
-	0x6c, 0x89, 0x4b, 0x2e, 0x20, 0x7e, 0xbc, 0xb0, 0xaa, 0x21, 0x5c, 0xef, 0x13, 0x4e, 0x4f, 0xc9,
11668
-	0x24, 0x8a, 0x67, 0xdd, 0x44, 0x8f, 0x05, 0x1f, 0x15, 0x65, 0x32, 0x75, 0x36, 0x32, 0xf2, 0x16,
11669
-	0xff, 0x36, 0x7f, 0xb0, 0x86, 0xab, 0xeb, 0x3b, 0xd6, 0x2e, 0x3d, 0x94, 0x77, 0xce, 0xb4, 0xeb,
11670
-	0x3b, 0x95, 0x83, 0x1f, 0xc3, 0xda, 0xdc, 0x3c, 0x5f, 0x21, 0xfc, 0x8d, 0xd6, 0xd3, 0x1f, 0x19,
11671
-	0x69, 0xfd, 0xf5, 0x3b, 0x46, 0xd6, 0xfc, 0xef, 0x04, 0x40, 0x8b, 0x05, 0xd1, 0xa6, 0x2d, 0x7f,
11672
-	0x60, 0xc9, 0xc9, 0xe7, 0x1a, 0x9b, 0x79, 0x3a, 0x3c, 0x97, 0x32, 0xde, 0xa9, 0x15, 0x41, 0x20,
11673
-	0x25, 0x1c, 0xc7, 0x8a, 0x68, 0x0b, 0x0a, 0x6a, 0xff, 0x7b, 0x23, 0x16, 0xa8, 0x5c, 0xb2, 0x86,
11674
-	0x41, 0x89, 0x84, 0xa6, 0xa8, 0xde, 0x47, 0xe3, 0x43, 0xcf, 0x0d, 0x07, 0xd4, 0x51, 0x98, 0xb4,
11675
-	0xc4, 0xac, 0xc5, 0x52, 0x01, 0x33, 0x6b, 0x90, 0x8b, 0xac, 0xa3, 0x0d, 0x48, 0x75, 0xaa, 0x2d,
11676
-	0x63, 0xa5, 0x74, 0xed, 0xec, 0xbc, 0x5c, 0x88, 0xc4, 0x9d, 0x6a, 0x4b, 0xf4, 0x74, 0x6b, 0x2d,
11677
-	0x23, 0x31, 0xdf, 0xd3, 0xad, 0xb5, 0x4a, 0x69, 0x71, 0xdf, 0x98, 0x7f, 0x95, 0x80, 0xac, 0x62,
11678
-	0x3f, 0x4b, 0x67, 0x6c, 0xc1, 0x6a, 0xc4, 0xc9, 0x15, 0x25, 0x7b, 0xff, 0xf5, 0xf4, 0xa9, 0xa2,
11679
-	0xd9, 0x8e, 0xda, 0xc7, 0x48, 0xaf, 0xf4, 0x19, 0x14, 0x67, 0x3b, 0xbe, 0xd3, 0x2e, 0xfe, 0x31,
11680
-	0x14, 0x44, 0xa0, 0x44, 0x34, 0x6a, 0x1b, 0xb2, 0x8a, 0xa1, 0xe9, 0xac, 0x72, 0x15, 0x97, 0xd3,
11681
-	0x48, 0xf4, 0x00, 0x56, 0x15, 0xff, 0x8b, 0x5e, 0x26, 0x36, 0xaf, 0x0e, 0x47, 0x1c, 0xc1, 0xcd,
11682
-	0xcf, 0x21, 0xdd, 0xa2, 0x34, 0x40, 0x77, 0x61, 0xd5, 0x67, 0x0e, 0x9d, 0x26, 0x51, 0x4d, 0x5d,
11683
-	0x1d, 0xda, 0xa8, 0x09, 0xea, 0xea, 0xd0, 0x86, 0x23, 0x16, 0x4f, 0x1c, 0xd0, 0xe8, 0x71, 0x46,
11684
-	0x7c, 0x9b, 0x1d, 0x28, 0x3e, 0xa3, 0x6e, 0x7f, 0xc0, 0xa9, 0x23, 0x0d, 0x7d, 0x08, 0xe9, 0x11,
11685
-	0x8d, 0x9d, 0xdf, 0x58, 0x1a, 0x3a, 0x94, 0x06, 0x58, 0xa2, 0xc4, 0x81, 0x3c, 0x95, 0xda, 0xfa,
11686
-	0x3d, 0x4c, 0xb7, 0xcc, 0xbf, 0x4f, 0xc2, 0x7a, 0x23, 0x0c, 0xc7, 0xc4, 0xb7, 0xa3, 0x1b, 0xf2,
11687
-	0x27, 0xf3, 0x37, 0xe4, 0xfd, 0xa5, 0x33, 0x9c, 0x53, 0x99, 0xaf, 0x97, 0x75, 0x92, 0x4c, 0xc6,
11688
-	0x49, 0xd2, 0xfc, 0x3a, 0x11, 0x15, 0xca, 0xf7, 0x66, 0xce, 0x4d, 0x69, 0xe3, 0xec, 0xbc, 0x7c,
11689
-	0x73, 0xd6, 0x12, 0xed, 0xfa, 0xc7, 0x3e, 0x3b, 0xf5, 0xd1, 0xbb, 0xa2, 0x70, 0x6e, 0xd6, 0x9f,
11690
-	0x19, 0x89, 0xd2, 0xad, 0xb3, 0xf3, 0x32, 0x9a, 0x03, 0x61, 0xea, 0xd3, 0x53, 0x61, 0xa9, 0x55,
11691
-	0x6f, 0xd6, 0xc4, 0x65, 0x96, 0x5c, 0x62, 0xa9, 0x45, 0x7d, 0xc7, 0xf5, 0xfb, 0xe8, 0x2e, 0x64,
11692
-	0x1b, 0xed, 0x76, 0x57, 0x96, 0x32, 0x6f, 0x9d, 0x9d, 0x97, 0x6f, 0xcc, 0xa1, 0x44, 0x83, 0x3a,
11693
-	0x02, 0x24, 0xa8, 0x56, 0xbd, 0x66, 0xa4, 0x97, 0x80, 0x04, 0xd3, 0xa0, 0x8e, 0x8e, 0xf0, 0x7f,
11694
-	0x4f, 0x82, 0x61, 0xd9, 0x36, 0x1d, 0x71, 0xd1, 0xaf, 0xe9, 0x6b, 0x07, 0x72, 0x23, 0xf1, 0xe5,
11695
-	0x4a, 0x3a, 0x2e, 0xc2, 0xe2, 0xc1, 0xd2, 0xc7, 0xd2, 0x05, 0xbd, 0x0a, 0x66, 0x1e, 0xb5, 0x9c,
11696
-	0xa1, 0x1b, 0x86, 0xa2, 0x4c, 0x93, 0x32, 0x1c, 0x5b, 0x2a, 0xfd, 0x32, 0x01, 0x37, 0x96, 0x20,
11697
-	0xd0, 0xc7, 0x90, 0x0e, 0x98, 0x17, 0x6d, 0xcf, 0x9d, 0xd7, 0x3d, 0x65, 0x08, 0x55, 0x2c, 0x91,
11698
-	0x68, 0x13, 0x80, 0x8c, 0x39, 0x23, 0x72, 0x7c, 0xb9, 0x31, 0x39, 0x3c, 0x23, 0x41, 0xcf, 0x20,
11699
-	0x1b, 0x52, 0x3b, 0xa0, 0x11, 0x17, 0xf9, 0xfc, 0xff, 0xeb, 0x7d, 0xa5, 0x2d, 0xcd, 0x60, 0x6d,
11700
-	0xae, 0x54, 0x81, 0xac, 0x92, 0x88, 0x88, 0x76, 0x08, 0x27, 0xd2, 0xe9, 0x22, 0x96, 0xdf, 0x22,
11701
-	0x50, 0x88, 0xd7, 0x8f, 0x02, 0x85, 0x78, 0x7d, 0xf3, 0x67, 0x49, 0x80, 0xfa, 0x73, 0x4e, 0x03,
11702
-	0x9f, 0x78, 0x55, 0x0b, 0xd5, 0x67, 0x32, 0xa4, 0x9a, 0xed, 0x0f, 0x96, 0x3e, 0x70, 0xc5, 0x1a,
11703
-	0x95, 0xaa, 0xb5, 0x24, 0x47, 0xde, 0x86, 0xd4, 0x38, 0xf0, 0xf4, 0x63, 0xa9, 0x24, 0x22, 0x5d,
11704
-	0xbc, 0x87, 0x85, 0x0c, 0xd5, 0xa7, 0x19, 0x29, 0xf5, 0xfa, 0x57, 0xee, 0x99, 0x01, 0x7e, 0xf3,
11705
-	0x59, 0xe9, 0x43, 0x80, 0xa9, 0xd7, 0x68, 0x13, 0x32, 0xd5, 0xdd, 0x76, 0x7b, 0xcf, 0x58, 0x51,
11706
-	0xd5, 0xd6, 0xb4, 0x4b, 0x8a, 0xcd, 0xbf, 0x4b, 0x40, 0xae, 0x6a, 0xe9, 0x5b, 0x65, 0x17, 0x0c,
11707
-	0x99, 0x4b, 0x6c, 0x1a, 0xf0, 0x1e, 0x7d, 0x3e, 0x72, 0x83, 0x89, 0x4e, 0x07, 0x57, 0xd7, 0x25,
11708
-	0xeb, 0x42, 0xab, 0x4a, 0x03, 0x5e, 0x97, 0x3a, 0x08, 0x43, 0x91, 0xea, 0x29, 0xf6, 0x6c, 0x12,
11709
-	0x25, 0xe7, 0xcd, 0xab, 0x97, 0x42, 0xb1, 0xbf, 0x69, 0x3b, 0xc4, 0x85, 0xc8, 0x48, 0x95, 0x84,
11710
-	0xe6, 0x53, 0xb8, 0x71, 0x10, 0xd8, 0x03, 0x1a, 0x72, 0x35, 0xa8, 0x76, 0xf9, 0x73, 0xb8, 0xc3,
11711
-	0x49, 0x78, 0xdc, 0x1b, 0xb8, 0x21, 0x67, 0xc1, 0xa4, 0x17, 0x50, 0x4e, 0x7d, 0xd1, 0xdf, 0x93,
11712
-	0x6f, 0xe9, 0xba, 0x9a, 0xbd, 0x2d, 0x30, 0x8f, 0x15, 0x04, 0x47, 0x88, 0x3d, 0x01, 0x30, 0x1b,
11713
-	0x50, 0x14, 0x84, 0xad, 0x46, 0x8f, 0xc8, 0xd8, 0xe3, 0x21, 0xfa, 0x31, 0x80, 0xc7, 0xfa, 0xbd,
11714
-	0x37, 0xce, 0xe4, 0x79, 0x8f, 0xf5, 0xd5, 0xa7, 0xf9, 0xfb, 0x60, 0xd4, 0xdc, 0x70, 0x44, 0xb8,
11715
-	0x3d, 0x88, 0xca, 0x74, 0xf4, 0x08, 0x8c, 0x01, 0x25, 0x01, 0x3f, 0xa4, 0x84, 0xf7, 0x46, 0x34,
11716
-	0x70, 0x99, 0xf3, 0x46, 0x4b, 0x7a, 0x2d, 0xd6, 0x6a, 0x49, 0x25, 0xf3, 0x57, 0x09, 0x00, 0x4c,
11717
-	0x8e, 0x22, 0x02, 0xf0, 0x43, 0xb8, 0x1e, 0xfa, 0x64, 0x14, 0x0e, 0x18, 0xef, 0xb9, 0x3e, 0xa7,
11718
-	0xc1, 0x09, 0xf1, 0x74, 0xa9, 0x65, 0x44, 0x1d, 0x0d, 0x2d, 0x47, 0x1f, 0x02, 0x3a, 0xa6, 0x74,
11719
-	0xd4, 0x63, 0x9e, 0xd3, 0x8b, 0x3a, 0xd5, 0x63, 0x7f, 0x1a, 0x1b, 0xa2, 0xe7, 0xc0, 0x73, 0xda,
11720
-	0x91, 0x1c, 0xed, 0xc0, 0xa6, 0x58, 0x01, 0xea, 0xf3, 0xc0, 0xa5, 0x61, 0xef, 0x88, 0x05, 0xbd,
11721
-	0xd0, 0x63, 0xa7, 0xbd, 0x23, 0xe6, 0x79, 0xec, 0x94, 0x06, 0x51, 0x21, 0x5b, 0xf2, 0x58, 0xbf,
11722
-	0xae, 0x40, 0xbb, 0x2c, 0x68, 0x7b, 0xec, 0x74, 0x37, 0x42, 0x08, 0x96, 0x30, 0x9d, 0x36, 0x77,
11723
-	0xed, 0xe3, 0x88, 0x25, 0xc4, 0xd2, 0x8e, 0x6b, 0x1f, 0xa3, 0xbb, 0xb0, 0x46, 0x3d, 0x2a, 0x4b,
11724
-	0x2e, 0x85, 0xca, 0x48, 0x54, 0x31, 0x12, 0x0a, 0x90, 0xf9, 0x5b, 0x90, 0x6f, 0x79, 0xc4, 0x96,
11725
-	0x7f, 0xa9, 0x88, 0xe2, 0xd2, 0x66, 0xbe, 0x08, 0x02, 0xd7, 0xe7, 0x2a, 0x3b, 0xe6, 0xf1, 0xac,
11726
-	0xc8, 0xfc, 0x09, 0xc0, 0x4f, 0x99, 0xeb, 0x77, 0xd8, 0x31, 0xf5, 0xe5, 0xeb, 0xb3, 0x60, 0xbd,
11727
-	0x7a, 0x2b, 0xf3, 0x58, 0xb7, 0x24, 0x27, 0x27, 0x3e, 0xe9, 0xd3, 0x20, 0x7e, 0x84, 0x55, 0x4d,
11728
-	0x71, 0xb9, 0x64, 0x31, 0x63, 0xbc, 0x6a, 0xa1, 0x32, 0x64, 0x6d, 0xd2, 0x8b, 0x4e, 0x5e, 0x71,
11729
-	0x27, 0x7f, 0x79, 0xb1, 0x95, 0xa9, 0x5a, 0x4f, 0xe8, 0x04, 0x67, 0x6c, 0xf2, 0x84, 0x4e, 0xc4,
11730
-	0xed, 0x6b, 0x13, 0x79, 0x5e, 0xa4, 0x99, 0xa2, 0xba, 0x7d, 0xab, 0x96, 0x38, 0x0c, 0x38, 0x6b,
11731
-	0x13, 0xf1, 0x8b, 0x3e, 0x86, 0xa2, 0x06, 0xf5, 0x06, 0x24, 0x1c, 0x28, 0xae, 0xba, 0xb3, 0x7e,
11732
-	0x79, 0xb1, 0x05, 0x0a, 0xf9, 0x98, 0x84, 0x03, 0x0c, 0x0a, 0x2d, 0xbe, 0x51, 0x1d, 0x0a, 0x5f,
11733
-	0x31, 0xd7, 0xef, 0x71, 0x39, 0x09, 0xfd, 0x36, 0xb0, 0xf4, 0xfc, 0x4c, 0xa7, 0xaa, 0x0b, 0x65,
11734
-	0xf8, 0x2a, 0x96, 0x98, 0xff, 0x9a, 0x80, 0x82, 0xb0, 0xe9, 0x1e, 0xb9, 0xb6, 0xb8, 0x2d, 0xbf,
11735
-	0x7b, 0xa6, 0xbf, 0x0d, 0x29, 0x3b, 0x0c, 0xf4, 0xdc, 0x64, 0xaa, 0xab, 0xb6, 0x31, 0x16, 0x32,
11736
-	0xf4, 0x05, 0x64, 0x55, 0x71, 0xa1, 0x93, 0xbc, 0xf9, 0xed, 0xf7, 0xba, 0x76, 0x51, 0xeb, 0xc9,
11737
-	0xbd, 0x9c, 0x7a, 0x27, 0x67, 0x59, 0xc4, 0xb3, 0x22, 0x74, 0x0b, 0x92, 0xb6, 0x2f, 0x83, 0x42,
11738
-	0xff, 0x2b, 0x55, 0x6d, 0xe2, 0xa4, 0xed, 0x9b, 0xff, 0x9c, 0x80, 0xb5, 0xba, 0x6f, 0x07, 0x13,
11739
-	0x99, 0x24, 0xc5, 0x46, 0xdc, 0x81, 0x7c, 0x38, 0x3e, 0x0c, 0x27, 0x21, 0xa7, 0xc3, 0xe8, 0xd1,
11740
-	0x3b, 0x16, 0xa0, 0x06, 0xe4, 0x89, 0xd7, 0x67, 0x81, 0xcb, 0x07, 0x43, 0xcd, 0x8d, 0x97, 0x27,
11741
-	0xe6, 0x59, 0x9b, 0x15, 0x2b, 0x52, 0xc1, 0x53, 0xed, 0x28, 0x15, 0xa7, 0xa4, 0xb3, 0x32, 0x15,
11742
-	0xbf, 0x0b, 0x45, 0x8f, 0x0c, 0x05, 0x15, 0xee, 0x89, 0x92, 0x4b, 0xce, 0x23, 0x8d, 0x0b, 0x5a,
11743
-	0x26, 0xca, 0x48, 0xd3, 0x84, 0x7c, 0x6c, 0x0c, 0x5d, 0x83, 0x82, 0x55, 0x6f, 0xf7, 0x3e, 0xd9,
11744
-	0x7e, 0xd0, 0x7b, 0x54, 0xdd, 0x37, 0x56, 0x34, 0x13, 0xf8, 0xc7, 0x04, 0xac, 0xed, 0xab, 0x18,
11745
-	0xd4, 0xc4, 0xe9, 0x2e, 0xac, 0x06, 0xe4, 0x88, 0x47, 0xd4, 0x2e, 0xad, 0x82, 0x4b, 0x24, 0x01,
11746
-	0x41, 0xed, 0x44, 0xd7, 0x72, 0x6a, 0x37, 0xf3, 0x97, 0x4b, 0xea, 0xca, 0xbf, 0x5c, 0xd2, 0xbf,
11747
-	0x91, 0xbf, 0x5c, 0x3e, 0xf8, 0x55, 0x0a, 0xf2, 0x71, 0xd1, 0x2b, 0x42, 0x46, 0x30, 0xad, 0x15,
11748
-	0xf5, 0x88, 0x14, 0xcb, 0x9b, 0x92, 0x63, 0xe5, 0xad, 0xbd, 0xbd, 0x83, 0xaa, 0xd5, 0xa9, 0xd7,
11749
-	0x8c, 0x2f, 0x14, 0x15, 0x8b, 0x01, 0x96, 0xe7, 0x31, 0xb1, 0xe9, 0x0e, 0x32, 0xa7, 0x54, 0xec,
11750
-	0x85, 0x7e, 0xaa, 0x8a, 0x51, 0x11, 0x0f, 0x7b, 0x0f, 0x72, 0x56, 0xbb, 0xdd, 0x78, 0xd4, 0xac,
11751
-	0xd7, 0x8c, 0x97, 0x89, 0xd2, 0xf7, 0xce, 0xce, 0xcb, 0xd7, 0xa7, 0xa6, 0xc2, 0xd0, 0xed, 0xfb,
11752
-	0xd4, 0x91, 0xa8, 0x6a, 0xb5, 0xde, 0x12, 0xe3, 0xbd, 0x48, 0x2e, 0xa2, 0x24, 0x01, 0x91, 0xcf,
11753
-	0xce, 0xf9, 0x16, 0xae, 0xb7, 0x2c, 0x2c, 0x46, 0x7c, 0x99, 0x5c, 0xf0, 0xab, 0x15, 0xd0, 0x11,
11754
-	0x09, 0xc4, 0x98, 0x9b, 0xd1, 0xdf, 0x2f, 0x2f, 0x52, 0xea, 0x69, 0x72, 0x5a, 0xe9, 0x53, 0xe2,
11755
-	0x4c, 0xc4, 0x68, 0xed, 0x8e, 0x85, 0xe5, 0x83, 0xc8, 0xcb, 0xd4, 0xc2, 0x68, 0x6d, 0x4e, 0x02,
11756
-	0x2e, 0xac, 0x98, 0xb0, 0x8a, 0xbb, 0xcd, 0xa6, 0x9c, 0x5d, 0x7a, 0x61, 0x76, 0x78, 0xec, 0xfb,
11757
-	0x02, 0x73, 0x0f, 0x72, 0xd1, 0x03, 0x8a, 0xf1, 0x32, 0xbd, 0xe0, 0x50, 0x35, 0x7a, 0xb9, 0x91,
11758
-	0x03, 0x3e, 0xee, 0x76, 0xe4, 0xbf, 0x43, 0x2f, 0x32, 0x8b, 0x03, 0x0e, 0xc6, 0xdc, 0x11, 0xe4,
11759
-	0xb7, 0x1c, 0xb3, 0xd1, 0x97, 0x19, 0x45, 0x02, 0x62, 0x8c, 0xa2, 0xa2, 0xc2, 0x0e, 0xae, 0xff,
11760
-	0x54, 0xfd, 0x91, 0xf4, 0x22, 0xbb, 0x60, 0x07, 0xd3, 0xaf, 0xa8, 0xcd, 0xa9, 0x33, 0x7d, 0x79,
11761
-	0x8d, 0xbb, 0x3e, 0xf8, 0x03, 0xc8, 0x45, 0x09, 0x03, 0x6d, 0x42, 0xf6, 0xd9, 0x01, 0x7e, 0x52,
11762
-	0xc7, 0xc6, 0x8a, 0x5a, 0x9d, 0xa8, 0xe7, 0x99, 0xca, 0xb8, 0x65, 0x58, 0xdd, 0xb7, 0x9a, 0xd6,
11763
-	0xa3, 0x3a, 0x8e, 0x5e, 0x7e, 0x23, 0x80, 0x8e, 0xfa, 0x92, 0xa1, 0x07, 0x88, 0x6d, 0xee, 0xdc,
11764
-	0xf9, 0xfa, 0x9b, 0xcd, 0x95, 0x5f, 0x7c, 0xb3, 0xb9, 0xf2, 0xcb, 0x6f, 0x36, 0x13, 0x2f, 0x2e,
11765
-	0x37, 0x13, 0x5f, 0x5f, 0x6e, 0x26, 0x7e, 0x7e, 0xb9, 0x99, 0xf8, 0x8f, 0xcb, 0xcd, 0xc4, 0x61,
11766
-	0x56, 0x32, 0xb2, 0x4f, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x7d, 0xe8, 0x50, 0x18, 0x0a, 0x21,
11767
-	0x00, 0x00,
11553
+	// 3518 bytes of a gzipped FileDescriptorProto
11554
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x59, 0x4d, 0x6c, 0x23, 0x47,
11555
+	0x76, 0x16, 0x7f, 0x45, 0x3e, 0x52, 0x9a, 0x9e, 0x9a, 0xd9, 0xb1, 0x86, 0x1e, 0x4b, 0x74, 0x8f,
11556
+	0x67, 0x3d, 0xeb, 0x75, 0x68, 0x5b, 0xde, 0x18, 0xb3, 0x9e, 0x64, 0xed, 0x16, 0x49, 0xcd, 0x70,
11557
+	0x47, 0xa2, 0x88, 0xa2, 0x38, 0x03, 0x23, 0x40, 0x1a, 0xa5, 0xee, 0x12, 0xd5, 0x56, 0xb3, 0x8b,
11558
+	0xe9, 0x2e, 0x4a, 0xc3, 0x04, 0x01, 0x26, 0x39, 0x24, 0x81, 0x4e, 0xb9, 0x07, 0xc2, 0x22, 0x48,
11559
+	0x90, 0x5b, 0xce, 0x01, 0x72, 0xf2, 0xd1, 0xc7, 0x0d, 0x02, 0x04, 0x8b, 0x04, 0x10, 0x62, 0xe5,
11560
+	0x98, 0xcb, 0x02, 0x41, 0xb0, 0x87, 0xe4, 0x10, 0xd4, 0x4f, 0x37, 0x7f, 0x86, 0x23, 0x8f, 0xb3,
11561
+	0x7b, 0x62, 0xd7, 0xab, 0xef, 0xbd, 0x7a, 0x55, 0xf5, 0xea, 0xd5, 0xf7, 0x8a, 0x50, 0xe2, 0xe3,
11562
+	0x21, 0x8d, 0x6a, 0xc3, 0x90, 0x71, 0x86, 0x90, 0xcb, 0x9c, 0x63, 0x1a, 0xd6, 0xa2, 0x53, 0x12,
11563
+	0x0e, 0x8e, 0x3d, 0x5e, 0x3b, 0xf9, 0xa8, 0x72, 0x9b, 0x7b, 0x03, 0x1a, 0x71, 0x32, 0x18, 0x7e,
11564
+	0x90, 0x7c, 0x29, 0x78, 0xe5, 0x0d, 0x77, 0x14, 0x12, 0xee, 0xb1, 0xe0, 0x83, 0xf8, 0x43, 0x77,
11565
+	0xdc, 0xec, 0xb3, 0x3e, 0x93, 0x9f, 0x1f, 0x88, 0x2f, 0x25, 0x35, 0x37, 0x60, 0xf9, 0x29, 0x0d,
11566
+	0x23, 0x8f, 0x05, 0xe8, 0x26, 0xe4, 0xbc, 0xc0, 0xa5, 0xcf, 0xd7, 0x52, 0xd5, 0xd4, 0xfd, 0x2c,
11567
+	0x56, 0x0d, 0xf3, 0xaf, 0x53, 0x50, 0xb2, 0x82, 0x80, 0x71, 0x69, 0x2b, 0x42, 0x08, 0xb2, 0x01,
11568
+	0x19, 0x50, 0x09, 0x2a, 0x62, 0xf9, 0x8d, 0xea, 0x90, 0xf7, 0xc9, 0x01, 0xf5, 0xa3, 0xb5, 0x74,
11569
+	0x35, 0x73, 0xbf, 0xb4, 0xf9, 0xc3, 0xda, 0xcb, 0x3e, 0xd7, 0xa6, 0x8c, 0xd4, 0x76, 0x24, 0xba,
11570
+	0x19, 0xf0, 0x70, 0x8c, 0xb5, 0x6a, 0xe5, 0xc7, 0x50, 0x9a, 0x12, 0x23, 0x03, 0x32, 0xc7, 0x74,
11571
+	0xac, 0x87, 0x11, 0x9f, 0xc2, 0xbf, 0x13, 0xe2, 0x8f, 0xe8, 0x5a, 0x5a, 0xca, 0x54, 0xe3, 0xd3,
11572
+	0xf4, 0x83, 0x94, 0xf9, 0x05, 0x14, 0x31, 0x8d, 0xd8, 0x28, 0x74, 0x68, 0x84, 0x7e, 0x00, 0xc5,
11573
+	0x80, 0x04, 0xcc, 0x76, 0x86, 0xa3, 0x48, 0xaa, 0x67, 0xb6, 0xca, 0x97, 0x17, 0x1b, 0x85, 0x36,
11574
+	0x09, 0x58, 0xbd, 0xd3, 0x8b, 0x70, 0x41, 0x74, 0xd7, 0x87, 0xa3, 0x08, 0xbd, 0x0d, 0xe5, 0x01,
11575
+	0x1d, 0xb0, 0x70, 0x6c, 0x1f, 0x8c, 0x39, 0x8d, 0xa4, 0xe1, 0x0c, 0x2e, 0x29, 0xd9, 0x96, 0x10,
11576
+	0x99, 0x7f, 0x99, 0x82, 0x9b, 0xb1, 0x6d, 0x4c, 0xff, 0x60, 0xe4, 0x85, 0x74, 0x40, 0x03, 0x1e,
11577
+	0xa1, 0xdf, 0x86, 0xbc, 0xef, 0x0d, 0x3c, 0xae, 0xc6, 0x28, 0x6d, 0xbe, 0xb5, 0x68, 0xce, 0x89,
11578
+	0x57, 0x58, 0x83, 0x91, 0x05, 0xe5, 0x90, 0x46, 0x34, 0x3c, 0x51, 0x2b, 0x21, 0x87, 0xfc, 0x56,
11579
+	0xe5, 0x19, 0x15, 0x73, 0x1b, 0x0a, 0x1d, 0x9f, 0xf0, 0x43, 0x16, 0x0e, 0x90, 0x09, 0x65, 0x12,
11580
+	0x3a, 0x47, 0x1e, 0xa7, 0x0e, 0x1f, 0x85, 0xf1, 0xae, 0xcc, 0xc8, 0xd0, 0x2d, 0x48, 0x33, 0x35,
11581
+	0x50, 0x71, 0x2b, 0x7f, 0x79, 0xb1, 0x91, 0xde, 0xeb, 0xe2, 0x34, 0x8b, 0xcc, 0x87, 0x70, 0xbd,
11582
+	0xe3, 0x8f, 0xfa, 0x5e, 0xd0, 0xa0, 0x91, 0x13, 0x7a, 0x43, 0x61, 0x5d, 0x6c, 0xaf, 0x08, 0xbe,
11583
+	0x78, 0x7b, 0xc5, 0x77, 0xb2, 0xe5, 0xe9, 0xc9, 0x96, 0x9b, 0x7f, 0x9e, 0x86, 0xeb, 0xcd, 0xa0,
11584
+	0xef, 0x05, 0x74, 0x5a, 0xfb, 0x1e, 0xac, 0x52, 0x29, 0xb4, 0x4f, 0x54, 0x50, 0x69, 0x3b, 0x2b,
11585
+	0x4a, 0x1a, 0x47, 0x5a, 0x6b, 0x2e, 0x5e, 0x3e, 0x5a, 0x34, 0xfd, 0x97, 0xac, 0x2f, 0x8a, 0x1a,
11586
+	0xd4, 0x84, 0xe5, 0xa1, 0x9c, 0x44, 0xb4, 0x96, 0x91, 0xb6, 0xee, 0x2d, 0xb2, 0xf5, 0xd2, 0x3c,
11587
+	0xb7, 0xb2, 0x5f, 0x5f, 0x6c, 0x2c, 0xe1, 0x58, 0xf7, 0xd7, 0x09, 0xbe, 0xff, 0x48, 0xc1, 0xb5,
11588
+	0x36, 0x73, 0x67, 0xd6, 0xa1, 0x02, 0x85, 0x23, 0x16, 0xf1, 0xa9, 0x83, 0x92, 0xb4, 0xd1, 0x03,
11589
+	0x28, 0x0c, 0xf5, 0xf6, 0xe9, 0xdd, 0xbf, 0xb3, 0xd8, 0x65, 0x85, 0xc1, 0x09, 0x1a, 0x3d, 0x84,
11590
+	0x62, 0x18, 0xc7, 0xc4, 0x5a, 0xe6, 0x75, 0x02, 0x67, 0x82, 0x47, 0xbf, 0x0b, 0x79, 0xb5, 0x09,
11591
+	0x6b, 0x59, 0xa9, 0x79, 0xef, 0xb5, 0xd6, 0x1c, 0x6b, 0x25, 0xf3, 0x17, 0x29, 0x30, 0x30, 0x39,
11592
+	0xe4, 0xbb, 0x74, 0x70, 0x40, 0xc3, 0x2e, 0x27, 0x7c, 0x14, 0xa1, 0x5b, 0x90, 0xf7, 0x29, 0x71,
11593
+	0x69, 0x28, 0x27, 0x59, 0xc0, 0xba, 0x85, 0x7a, 0x22, 0xc8, 0x89, 0x73, 0x44, 0x0e, 0x3c, 0xdf,
11594
+	0xe3, 0x63, 0x39, 0xcd, 0xd5, 0xc5, 0xbb, 0x3c, 0x6f, 0xb3, 0x86, 0xa7, 0x14, 0xf1, 0x8c, 0x19,
11595
+	0xb4, 0x06, 0xcb, 0x03, 0x1a, 0x45, 0xa4, 0x4f, 0xe5, 0xec, 0x8b, 0x38, 0x6e, 0x9a, 0x0f, 0xa1,
11596
+	0x3c, 0xad, 0x87, 0x4a, 0xb0, 0xdc, 0x6b, 0x3f, 0x69, 0xef, 0x3d, 0x6b, 0x1b, 0x4b, 0xe8, 0x1a,
11597
+	0x94, 0x7a, 0x6d, 0xdc, 0xb4, 0xea, 0x8f, 0xad, 0xad, 0x9d, 0xa6, 0x91, 0x42, 0x2b, 0x50, 0x9c,
11598
+	0x34, 0xd3, 0xe6, 0xcf, 0x52, 0x00, 0x62, 0x03, 0xf5, 0xa4, 0x3e, 0x85, 0x5c, 0xc4, 0x09, 0x57,
11599
+	0x1b, 0xb7, 0xba, 0xf9, 0xce, 0x22, 0xaf, 0x27, 0xf0, 0x9a, 0xf8, 0xa1, 0x58, 0xa9, 0x4c, 0x7b,
11600
+	0x98, 0x9e, 0xf7, 0x30, 0x27, 0x91, 0xb3, 0xae, 0x15, 0x20, 0xdb, 0x10, 0x5f, 0x29, 0x54, 0x84,
11601
+	0x1c, 0x6e, 0x5a, 0x8d, 0x2f, 0x8c, 0x34, 0x32, 0xa0, 0xdc, 0x68, 0x75, 0xeb, 0x7b, 0xed, 0x76,
11602
+	0xb3, 0xbe, 0xdf, 0x6c, 0x18, 0x19, 0xf3, 0x1e, 0xe4, 0x5a, 0x03, 0xd2, 0xa7, 0xe8, 0x8e, 0x88,
11603
+	0x80, 0x43, 0x1a, 0xd2, 0xc0, 0x89, 0x03, 0x6b, 0x22, 0x30, 0x7f, 0x5e, 0x84, 0xdc, 0x2e, 0x1b,
11604
+	0x05, 0x1c, 0x6d, 0x4e, 0x9d, 0xe2, 0xd5, 0xcd, 0xf5, 0x45, 0x53, 0x90, 0xc0, 0xda, 0xfe, 0x78,
11605
+	0x48, 0xf5, 0x29, 0xbf, 0x05, 0x79, 0x15, 0x2b, 0xda, 0x75, 0xdd, 0x12, 0x72, 0x4e, 0xc2, 0x3e,
11606
+	0xe5, 0x7a, 0xd1, 0x75, 0x0b, 0xdd, 0x87, 0x42, 0x48, 0x89, 0xcb, 0x02, 0x7f, 0x2c, 0x43, 0xaa,
11607
+	0xa0, 0xd2, 0x2c, 0xa6, 0xc4, 0xdd, 0x0b, 0xfc, 0x31, 0x4e, 0x7a, 0xd1, 0x63, 0x28, 0x1f, 0x78,
11608
+	0x81, 0x6b, 0xb3, 0xa1, 0xca, 0x79, 0xb9, 0x57, 0x07, 0xa0, 0xf2, 0x6a, 0xcb, 0x0b, 0xdc, 0x3d,
11609
+	0x05, 0xc6, 0xa5, 0x83, 0x49, 0x03, 0xb5, 0x61, 0xf5, 0x84, 0xf9, 0xa3, 0x01, 0x4d, 0x6c, 0xe5,
11610
+	0xa5, 0xad, 0x77, 0x5f, 0x6d, 0xeb, 0xa9, 0xc4, 0xc7, 0xd6, 0x56, 0x4e, 0xa6, 0x9b, 0xe8, 0x09,
11611
+	0xac, 0xf0, 0xc1, 0xf0, 0x30, 0x4a, 0xcc, 0x2d, 0x4b, 0x73, 0xdf, 0xbf, 0x62, 0xc1, 0x04, 0x3c,
11612
+	0xb6, 0x56, 0xe6, 0x53, 0xad, 0xca, 0x9f, 0x66, 0xa0, 0x34, 0xe5, 0x39, 0xea, 0x42, 0x69, 0x18,
11613
+	0xb2, 0x21, 0xe9, 0xcb, 0xbc, 0xad, 0xf7, 0xe2, 0xa3, 0xd7, 0x9a, 0x75, 0xad, 0x33, 0x51, 0xc4,
11614
+	0xd3, 0x56, 0xcc, 0xf3, 0x34, 0x94, 0xa6, 0x3a, 0xd1, 0x7b, 0x50, 0xc0, 0x1d, 0xdc, 0x7a, 0x6a,
11615
+	0xed, 0x37, 0x8d, 0xa5, 0xca, 0x9d, 0xb3, 0xf3, 0xea, 0x9a, 0xb4, 0x36, 0x6d, 0xa0, 0x13, 0x7a,
11616
+	0x27, 0x22, 0xf4, 0xee, 0xc3, 0x72, 0x0c, 0x4d, 0x55, 0xde, 0x3c, 0x3b, 0xaf, 0xbe, 0x31, 0x0f,
11617
+	0x9d, 0x42, 0xe2, 0xee, 0x63, 0x0b, 0x37, 0x1b, 0x46, 0x7a, 0x31, 0x12, 0x77, 0x8f, 0x48, 0x48,
11618
+	0x5d, 0xf4, 0x7d, 0xc8, 0x6b, 0x60, 0xa6, 0x52, 0x39, 0x3b, 0xaf, 0xde, 0x9a, 0x07, 0x4e, 0x70,
11619
+	0xb8, 0xbb, 0x63, 0x3d, 0x6d, 0x1a, 0xd9, 0xc5, 0x38, 0xdc, 0xf5, 0xc9, 0x09, 0x45, 0xef, 0x40,
11620
+	0x4e, 0xc1, 0x72, 0x95, 0xdb, 0x67, 0xe7, 0xd5, 0xef, 0xbd, 0x64, 0x4e, 0xa0, 0x2a, 0x6b, 0x7f,
11621
+	0xf1, 0x37, 0xeb, 0x4b, 0xff, 0xf8, 0xb7, 0xeb, 0xc6, 0x7c, 0x77, 0xe5, 0x7f, 0x53, 0xb0, 0x32,
11622
+	0xb3, 0xe5, 0xc8, 0x84, 0x7c, 0xc0, 0x1c, 0x36, 0x54, 0xe9, 0xbc, 0xb0, 0x05, 0x97, 0x17, 0x1b,
11623
+	0xf9, 0x36, 0xab, 0xb3, 0xe1, 0x18, 0xeb, 0x1e, 0xf4, 0x64, 0xee, 0x42, 0xfa, 0xf8, 0x35, 0xe3,
11624
+	0x69, 0xe1, 0x95, 0xf4, 0x19, 0xac, 0xb8, 0xa1, 0x77, 0x42, 0x43, 0xdb, 0x61, 0xc1, 0xa1, 0xd7,
11625
+	0xd7, 0xa9, 0xba, 0xb2, 0xc8, 0x66, 0x43, 0x02, 0x71, 0x59, 0x29, 0xd4, 0x25, 0xfe, 0xd7, 0xb8,
11626
+	0x8c, 0x2a, 0x4f, 0xa1, 0x3c, 0x1d, 0xa1, 0xe8, 0x2d, 0x80, 0xc8, 0xfb, 0x43, 0xaa, 0xf9, 0x8d,
11627
+	0x64, 0x43, 0xb8, 0x28, 0x24, 0x92, 0xdd, 0xa0, 0x77, 0x21, 0x3b, 0x60, 0xae, 0xb2, 0x93, 0xdb,
11628
+	0xba, 0x21, 0xee, 0xc4, 0x7f, 0xbd, 0xd8, 0x28, 0xb1, 0xa8, 0xb6, 0xed, 0xf9, 0x74, 0x97, 0xb9,
11629
+	0x14, 0x4b, 0x80, 0x79, 0x02, 0x59, 0x91, 0x2a, 0xd0, 0x9b, 0x90, 0xdd, 0x6a, 0xb5, 0x1b, 0xc6,
11630
+	0x52, 0xe5, 0xfa, 0xd9, 0x79, 0x75, 0x45, 0x2e, 0x89, 0xe8, 0x10, 0xb1, 0x8b, 0x36, 0x20, 0xff,
11631
+	0x74, 0x6f, 0xa7, 0xb7, 0x2b, 0xc2, 0xeb, 0xc6, 0xd9, 0x79, 0xf5, 0x5a, 0xd2, 0xad, 0x16, 0x0d,
11632
+	0xbd, 0x05, 0xb9, 0xfd, 0xdd, 0xce, 0x76, 0xd7, 0x48, 0x57, 0xd0, 0xd9, 0x79, 0x75, 0x35, 0xe9,
11633
+	0x97, 0x3e, 0x57, 0xae, 0xeb, 0x5d, 0x2d, 0x26, 0x72, 0xf3, 0x7f, 0xd2, 0xb0, 0x82, 0x05, 0xbf,
11634
+	0x0d, 0x79, 0x87, 0xf9, 0x9e, 0x33, 0x46, 0x1d, 0x28, 0x3a, 0x2c, 0x70, 0xbd, 0xa9, 0x33, 0xb5,
11635
+	0xf9, 0x8a, 0x4b, 0x70, 0xa2, 0x15, 0xb7, 0xea, 0xb1, 0x26, 0x9e, 0x18, 0x41, 0x9b, 0x90, 0x73,
11636
+	0xa9, 0x4f, 0xc6, 0x57, 0xdd, 0xc6, 0x0d, 0xcd, 0xa5, 0xb1, 0x82, 0x4a, 0xe6, 0x48, 0x9e, 0xdb,
11637
+	0x84, 0x73, 0x3a, 0x18, 0x72, 0x75, 0x1b, 0x67, 0x71, 0x69, 0x40, 0x9e, 0x5b, 0x5a, 0x84, 0x7e,
11638
+	0x04, 0xf9, 0x53, 0x2f, 0x70, 0xd9, 0xa9, 0xbe, 0x70, 0xaf, 0xb6, 0xab, 0xb1, 0xe6, 0x99, 0xb8,
11639
+	0x67, 0xe7, 0x9c, 0x15, 0xab, 0xde, 0xde, 0x6b, 0x37, 0xe3, 0x55, 0xd7, 0xfd, 0x7b, 0x41, 0x9b,
11640
+	0x05, 0xe2, 0xc4, 0xc0, 0x5e, 0xdb, 0xde, 0xb6, 0x5a, 0x3b, 0x3d, 0x2c, 0x56, 0xfe, 0xe6, 0xd9,
11641
+	0x79, 0xd5, 0x48, 0x20, 0xdb, 0xc4, 0xf3, 0x05, 0x09, 0xbc, 0x0d, 0x19, 0xab, 0xfd, 0x85, 0x91,
11642
+	0xae, 0x18, 0x67, 0xe7, 0xd5, 0x72, 0xd2, 0x6d, 0x05, 0xe3, 0xc9, 0x61, 0x9a, 0x1f, 0xd7, 0xfc,
11643
+	0xcf, 0x34, 0x94, 0x7b, 0x43, 0x97, 0x70, 0xaa, 0x22, 0x13, 0x55, 0xa1, 0x34, 0x24, 0x21, 0xf1,
11644
+	0x7d, 0xea, 0x7b, 0xd1, 0x40, 0x17, 0x0a, 0xd3, 0x22, 0xf4, 0xe0, 0x3b, 0x2c, 0xa6, 0x26, 0x61,
11645
+	0x7a, 0x49, 0x7b, 0xb0, 0x7a, 0xa8, 0x9c, 0xb5, 0x89, 0x23, 0x77, 0x37, 0x23, 0x77, 0xb7, 0xb6,
11646
+	0xc8, 0xc4, 0xb4, 0x57, 0x35, 0x3d, 0x47, 0x4b, 0x6a, 0xe1, 0x95, 0xc3, 0xe9, 0x26, 0xfa, 0x04,
11647
+	0x96, 0x07, 0x2c, 0xf0, 0x38, 0x0b, 0x5f, 0x6b, 0x1f, 0x62, 0x30, 0x7a, 0x00, 0x6b, 0xc4, 0xf7,
11648
+	0xd9, 0x29, 0x75, 0xed, 0xd8, 0xad, 0xc3, 0x50, 0x3b, 0x26, 0x2e, 0xb0, 0x34, 0xbe, 0xa5, 0xfb,
11649
+	0xf5, 0xf0, 0xdb, 0xba, 0xd7, 0xfc, 0x04, 0x56, 0x66, 0x3c, 0x12, 0x77, 0x7b, 0xc7, 0xea, 0x75,
11650
+	0x9b, 0xc6, 0x12, 0x2a, 0x43, 0xa1, 0xbe, 0xd7, 0xde, 0x6f, 0xb5, 0x7b, 0x82, 0x88, 0x94, 0xa1,
11651
+	0x80, 0xf7, 0x76, 0x76, 0xb6, 0xac, 0xfa, 0x13, 0x23, 0x6d, 0xfe, 0x77, 0xb2, 0xda, 0x9a, 0x89,
11652
+	0x6c, 0xcd, 0x32, 0x91, 0xf7, 0x5f, 0xbd, 0x10, 0x9a, 0x8b, 0x4c, 0x1a, 0x09, 0x23, 0xf9, 0x1d,
11653
+	0x00, 0xb9, 0xa9, 0xd4, 0xb5, 0x09, 0xbf, 0xaa, 0xda, 0xd8, 0x8f, 0xeb, 0x48, 0x5c, 0xd4, 0x0a,
11654
+	0x16, 0x47, 0x9f, 0x43, 0xd9, 0x61, 0x83, 0xa1, 0x4f, 0xb5, 0x7e, 0xe6, 0x75, 0xf4, 0x4b, 0x89,
11655
+	0x8a, 0xc5, 0xa7, 0x19, 0x51, 0x76, 0x96, 0x11, 0xfd, 0x59, 0x0a, 0x4a, 0x53, 0x0e, 0xcf, 0x12,
11656
+	0xa3, 0x32, 0x14, 0x7a, 0x9d, 0x86, 0xb5, 0xdf, 0x6a, 0x3f, 0x32, 0x52, 0x08, 0x20, 0x2f, 0x17,
11657
+	0xb0, 0x61, 0xa4, 0x05, 0x79, 0xab, 0xef, 0xed, 0x76, 0x76, 0x9a, 0x92, 0x1a, 0xa1, 0x9b, 0x60,
11658
+	0xc4, 0x4b, 0x68, 0x77, 0xf7, 0x2d, 0x2c, 0xa4, 0x59, 0x74, 0x03, 0xae, 0x25, 0x52, 0xad, 0x99,
11659
+	0x43, 0xb7, 0x00, 0x25, 0xc2, 0x89, 0x89, 0xbc, 0xf9, 0xc7, 0x70, 0xad, 0xce, 0x02, 0x4e, 0xbc,
11660
+	0x20, 0x21, 0xb6, 0x9b, 0x62, 0xde, 0x5a, 0x64, 0x7b, 0xae, 0xca, 0xbd, 0x5b, 0xd7, 0x2e, 0x2f,
11661
+	0x36, 0x4a, 0x09, 0xb4, 0xd5, 0x10, 0x33, 0x8d, 0x1b, 0xae, 0x38, 0x61, 0x43, 0xcf, 0xd5, 0xa9,
11662
+	0x74, 0xf9, 0xf2, 0x62, 0x23, 0xd3, 0x69, 0x35, 0xb0, 0x90, 0xa1, 0x37, 0xa1, 0x48, 0x9f, 0x7b,
11663
+	0xdc, 0x76, 0x44, 0xae, 0x15, 0x6b, 0x98, 0xc3, 0x05, 0x21, 0xa8, 0x8b, 0xd4, 0xfa, 0x27, 0x69,
11664
+	0x80, 0x7d, 0x12, 0x1d, 0xeb, 0xa1, 0x1f, 0x42, 0x31, 0x29, 0xe9, 0xaf, 0x2a, 0x2d, 0xa7, 0xf6,
11665
+	0x2b, 0xc1, 0xa3, 0x8f, 0xe3, 0x88, 0x51, 0x8c, 0x7b, 0xb1, 0xa2, 0x1e, 0x6b, 0x11, 0x69, 0x9d,
11666
+	0xa5, 0xd5, 0xe2, 0xe6, 0xa1, 0x61, 0xa8, 0x37, 0x4e, 0x7c, 0xa2, 0xba, 0xcc, 0xbe, 0x6a, 0xce,
11667
+	0x9a, 0xc7, 0xdd, 0x5d, 0x34, 0xc8, 0xdc, 0x82, 0x3e, 0x5e, 0xc2, 0x13, 0xbd, 0x2d, 0x03, 0x56,
11668
+	0xc3, 0x51, 0x20, 0xbc, 0xb6, 0x23, 0xd9, 0x6d, 0x7a, 0xf0, 0x46, 0x9b, 0xf2, 0x53, 0x16, 0x1e,
11669
+	0x5b, 0x9c, 0x13, 0xe7, 0x48, 0x94, 0xd8, 0x3a, 0xe5, 0x4c, 0xe8, 0x67, 0x6a, 0x86, 0x7e, 0xae,
11670
+	0xc1, 0x32, 0xf1, 0x3d, 0x12, 0x51, 0x75, 0x67, 0x17, 0x71, 0xdc, 0x14, 0x24, 0x99, 0xb8, 0x6e,
11671
+	0x48, 0xa3, 0x88, 0xaa, 0xa2, 0xb0, 0x88, 0x27, 0x02, 0xf3, 0x9f, 0xd3, 0x00, 0xad, 0x8e, 0xb5,
11672
+	0xab, 0xcd, 0x37, 0x20, 0x7f, 0x48, 0x06, 0x9e, 0x3f, 0xbe, 0xea, 0x90, 0x4d, 0xf0, 0x35, 0x4b,
11673
+	0x19, 0xda, 0x96, 0x3a, 0x58, 0xeb, 0x4a, 0xee, 0x3c, 0x3a, 0x08, 0x28, 0x4f, 0xb8, 0xb3, 0x6c,
11674
+	0x89, 0x8b, 0x3a, 0x24, 0x41, 0xb2, 0xb0, 0xaa, 0x21, 0x5c, 0xef, 0x13, 0x4e, 0x4f, 0xc9, 0x38,
11675
+	0x3e, 0x13, 0xba, 0x89, 0x1e, 0x0b, 0x4e, 0x2d, 0x4a, 0x7d, 0xea, 0xae, 0xe5, 0x24, 0x13, 0xf9,
11676
+	0x36, 0x7f, 0xb0, 0x86, 0x2b, 0x0a, 0x92, 0x68, 0x57, 0x1e, 0xca, 0x7b, 0x73, 0xd2, 0xf5, 0x9d,
11677
+	0x4a, 0xda, 0x0f, 0x61, 0x65, 0x66, 0x9e, 0x2f, 0x15, 0x2d, 0xad, 0xce, 0xd3, 0x1f, 0x19, 0x59,
11678
+	0xfd, 0xf5, 0x89, 0x91, 0x37, 0xff, 0x2b, 0x05, 0xd0, 0x61, 0x61, 0xbc, 0x69, 0x8b, 0x1f, 0x89,
11679
+	0x0a, 0xf2, 0xc9, 0xc9, 0x61, 0xbe, 0x0e, 0xcf, 0x85, 0xac, 0x7d, 0x62, 0x45, 0x90, 0x60, 0x09,
11680
+	0xc7, 0x89, 0x22, 0xda, 0x80, 0x92, 0xda, 0x7f, 0x7b, 0xc8, 0x42, 0x95, 0x8f, 0x56, 0x30, 0x28,
11681
+	0x91, 0xd0, 0x44, 0xf7, 0x60, 0x75, 0x38, 0x3a, 0xf0, 0xbd, 0xe8, 0x88, 0xba, 0x0a, 0x93, 0x95,
11682
+	0x98, 0x95, 0x44, 0x2a, 0x60, 0x66, 0x03, 0x0a, 0xb1, 0x75, 0xb4, 0x06, 0x99, 0xfd, 0x7a, 0xc7,
11683
+	0x58, 0xaa, 0x5c, 0x3b, 0x3b, 0xaf, 0x96, 0x62, 0xf1, 0x7e, 0xbd, 0x23, 0x7a, 0x7a, 0x8d, 0x8e,
11684
+	0x91, 0x9a, 0xed, 0xe9, 0x35, 0x3a, 0x95, 0xac, 0xb8, 0x33, 0xcd, 0xbf, 0x4a, 0x41, 0x5e, 0x31,
11685
+	0xb8, 0x85, 0x33, 0xb6, 0x60, 0x39, 0xae, 0x2b, 0x14, 0xad, 0x7c, 0xf7, 0xd5, 0x14, 0xb0, 0xa6,
11686
+	0x19, 0x9b, 0xda, 0xc7, 0x58, 0xaf, 0xf2, 0x29, 0x94, 0xa7, 0x3b, 0xbe, 0xd3, 0x2e, 0xfe, 0x11,
11687
+	0x94, 0x44, 0xa0, 0xc4, 0x54, 0x70, 0x13, 0xf2, 0x8a, 0x65, 0xea, 0xac, 0x72, 0x15, 0x1f, 0xd5,
11688
+	0x48, 0xf4, 0x00, 0x96, 0x15, 0x87, 0x8d, 0x5f, 0x57, 0xd6, 0xaf, 0x0e, 0x47, 0x1c, 0xc3, 0xcd,
11689
+	0xcf, 0x20, 0xdb, 0xa1, 0x34, 0x44, 0x77, 0x61, 0x39, 0x60, 0x2e, 0x9d, 0x24, 0x51, 0x4d, 0xbf,
11690
+	0x5d, 0xda, 0x6a, 0x08, 0xfa, 0xed, 0xd2, 0x96, 0x2b, 0x16, 0x4f, 0x1c, 0xd0, 0xf8, 0x81, 0x49,
11691
+	0x7c, 0x9b, 0xfb, 0x50, 0x7e, 0x46, 0xbd, 0xfe, 0x11, 0xa7, 0xae, 0x34, 0xf4, 0x3e, 0x64, 0x87,
11692
+	0x34, 0x71, 0x7e, 0x6d, 0x61, 0xe8, 0x50, 0x1a, 0x62, 0x89, 0x12, 0x07, 0xf2, 0x54, 0x6a, 0xeb,
11693
+	0x37, 0x3d, 0xdd, 0x32, 0xff, 0x3e, 0x0d, 0xab, 0xad, 0x28, 0x1a, 0x91, 0xc0, 0x89, 0x6f, 0xd9,
11694
+	0x9f, 0xcc, 0xde, 0xb2, 0xf7, 0x17, 0xce, 0x70, 0x46, 0x65, 0xb6, 0xe6, 0xd7, 0x49, 0x32, 0x9d,
11695
+	0x24, 0x49, 0xf3, 0xeb, 0x54, 0x5c, 0xec, 0xdf, 0x9b, 0x3a, 0x37, 0x95, 0xb5, 0xb3, 0xf3, 0xea,
11696
+	0xcd, 0x69, 0x4b, 0xb4, 0x17, 0x1c, 0x07, 0xec, 0x34, 0x40, 0x6f, 0x8b, 0xe2, 0xbf, 0xdd, 0x7c,
11697
+	0x66, 0xa4, 0x2a, 0xb7, 0xce, 0xce, 0xab, 0x68, 0x06, 0x84, 0x69, 0x40, 0x4f, 0x85, 0xa5, 0x4e,
11698
+	0xb3, 0xdd, 0x10, 0xf7, 0x61, 0x7a, 0x81, 0xa5, 0x0e, 0x0d, 0x5c, 0x2f, 0xe8, 0xa3, 0xbb, 0x90,
11699
+	0x6f, 0x75, 0xbb, 0x3d, 0x59, 0x8e, 0xbd, 0x71, 0x76, 0x5e, 0xbd, 0x31, 0x83, 0x12, 0x0d, 0xea,
11700
+	0x0a, 0x90, 0xa0, 0x8b, 0xe2, 0xa6, 0x5c, 0x00, 0x12, 0xdc, 0x85, 0xba, 0x3a, 0xc2, 0xff, 0x2d,
11701
+	0x0d, 0x86, 0xe5, 0x38, 0x74, 0xc8, 0x45, 0xbf, 0xa6, 0xe0, 0xfb, 0x50, 0x18, 0x8a, 0x2f, 0x4f,
11702
+	0x96, 0x14, 0x22, 0x2c, 0x1e, 0x2c, 0x7c, 0xf0, 0x9d, 0xd3, 0xab, 0x61, 0xe6, 0x53, 0xcb, 0x1d,
11703
+	0x78, 0x51, 0x24, 0x4a, 0x4d, 0x29, 0xc3, 0x89, 0xa5, 0xca, 0x2f, 0x53, 0x70, 0x63, 0x01, 0x02,
11704
+	0x7d, 0x08, 0xd9, 0x90, 0xf9, 0xf1, 0xf6, 0xdc, 0x79, 0xd5, 0x73, 0x8c, 0x50, 0xc5, 0x12, 0x89,
11705
+	0xd6, 0x01, 0xc8, 0x88, 0x33, 0x22, 0xc7, 0x97, 0x1b, 0x53, 0xc0, 0x53, 0x12, 0xf4, 0x0c, 0xf2,
11706
+	0x11, 0x75, 0x42, 0x1a, 0xf3, 0x99, 0xcf, 0xfe, 0xbf, 0xde, 0xd7, 0xba, 0xd2, 0x0c, 0xd6, 0xe6,
11707
+	0x2a, 0x35, 0xc8, 0x2b, 0x89, 0x88, 0x68, 0x97, 0x70, 0x22, 0x9d, 0x2e, 0x63, 0xf9, 0x2d, 0x02,
11708
+	0x85, 0xf8, 0xfd, 0x38, 0x50, 0x88, 0xdf, 0x37, 0x7f, 0x96, 0x06, 0x68, 0x3e, 0xe7, 0x34, 0x0c,
11709
+	0x88, 0x5f, 0xb7, 0x50, 0x73, 0x2a, 0x43, 0xaa, 0xd9, 0xfe, 0x60, 0xe1, 0x23, 0x5d, 0xa2, 0x51,
11710
+	0xab, 0x5b, 0x0b, 0x72, 0xe4, 0x6d, 0xc8, 0x8c, 0x42, 0x5f, 0x3f, 0xf8, 0x4a, 0x22, 0xd2, 0xc3,
11711
+	0x3b, 0x58, 0xc8, 0x50, 0x73, 0x92, 0x91, 0x32, 0xaf, 0x7e, 0xa9, 0x9f, 0x1a, 0xe0, 0x37, 0x9f,
11712
+	0x95, 0xde, 0x07, 0x98, 0x78, 0x8d, 0xd6, 0x21, 0x57, 0xdf, 0xee, 0x76, 0x77, 0x8c, 0x25, 0x55,
11713
+	0x31, 0x4e, 0xba, 0xa4, 0xd8, 0xfc, 0xbb, 0x14, 0x14, 0xea, 0x96, 0xbe, 0x55, 0xb6, 0xc1, 0x90,
11714
+	0xb9, 0xc4, 0xa1, 0x21, 0xb7, 0xe9, 0xf3, 0xa1, 0x17, 0x8e, 0x75, 0x3a, 0xb8, 0x9a, 0xd3, 0xaf,
11715
+	0x0a, 0xad, 0x3a, 0x0d, 0x79, 0x53, 0xea, 0x20, 0x0c, 0x65, 0xaa, 0xa7, 0x68, 0x3b, 0x24, 0x4e,
11716
+	0xce, 0xeb, 0x57, 0x2f, 0x85, 0x62, 0x7f, 0x93, 0x76, 0x84, 0x4b, 0xb1, 0x91, 0x3a, 0x89, 0xcc,
11717
+	0xa7, 0x70, 0x63, 0x2f, 0x74, 0x8e, 0x68, 0xc4, 0xd5, 0xa0, 0xda, 0xe5, 0xcf, 0xe0, 0x0e, 0x27,
11718
+	0xd1, 0xb1, 0x7d, 0xe4, 0x45, 0x9c, 0x85, 0x63, 0x3b, 0xa4, 0x9c, 0x06, 0xa2, 0xdf, 0x96, 0xff,
11719
+	0x07, 0xe8, 0x8a, 0xfc, 0xb6, 0xc0, 0x3c, 0x56, 0x10, 0x1c, 0x23, 0x76, 0x04, 0xc0, 0x6c, 0x41,
11720
+	0x59, 0x10, 0xb6, 0x06, 0x3d, 0x24, 0x23, 0x9f, 0x47, 0xe8, 0xc7, 0x00, 0x3e, 0xeb, 0xdb, 0xaf,
11721
+	0x9d, 0xc9, 0x8b, 0x3e, 0xeb, 0xab, 0x4f, 0xf3, 0xf7, 0xc0, 0x68, 0x78, 0xd1, 0x90, 0x70, 0xe7,
11722
+	0x28, 0x7e, 0x6a, 0x40, 0x8f, 0xc0, 0x38, 0xa2, 0x24, 0xe4, 0x07, 0x94, 0x70, 0x7b, 0x48, 0x43,
11723
+	0x8f, 0xb9, 0xaf, 0xb5, 0xa4, 0xd7, 0x12, 0xad, 0x8e, 0x54, 0x32, 0x7f, 0x95, 0x02, 0xc0, 0xe4,
11724
+	0x30, 0x26, 0x00, 0x3f, 0x84, 0xeb, 0x51, 0x40, 0x86, 0xd1, 0x11, 0xe3, 0xb6, 0x17, 0x70, 0x1a,
11725
+	0x9e, 0x10, 0x5f, 0x97, 0x8b, 0x46, 0xdc, 0xd1, 0xd2, 0x72, 0xf4, 0x3e, 0xa0, 0x63, 0x4a, 0x87,
11726
+	0x36, 0xf3, 0x5d, 0x3b, 0xee, 0x54, 0x7f, 0x58, 0x64, 0xb1, 0x21, 0x7a, 0xf6, 0x7c, 0xb7, 0x1b,
11727
+	0xcb, 0xd1, 0x16, 0xac, 0x8b, 0x15, 0xa0, 0x01, 0x0f, 0x3d, 0x1a, 0xd9, 0x87, 0x2c, 0xb4, 0x23,
11728
+	0x9f, 0x9d, 0xda, 0x87, 0x4c, 0x96, 0x63, 0x61, 0x5c, 0x8c, 0x57, 0x7c, 0xd6, 0x6f, 0x2a, 0xd0,
11729
+	0x36, 0x0b, 0xbb, 0x3e, 0x3b, 0xdd, 0x8e, 0x11, 0x82, 0x25, 0x4c, 0xa6, 0xcd, 0x3d, 0xe7, 0x38,
11730
+	0x66, 0x09, 0x89, 0x74, 0xdf, 0x73, 0x8e, 0xd1, 0x5d, 0x58, 0xa1, 0x3e, 0x95, 0x45, 0x9c, 0x42,
11731
+	0xe5, 0x24, 0xaa, 0x1c, 0x0b, 0x05, 0xc8, 0xfc, 0x2d, 0x28, 0x76, 0x7c, 0xe2, 0xc8, 0xbf, 0x85,
11732
+	0x44, 0x81, 0xec, 0xb0, 0x40, 0x04, 0x81, 0x17, 0x70, 0x95, 0x1d, 0x8b, 0x78, 0x5a, 0x64, 0xfe,
11733
+	0x04, 0xe0, 0xa7, 0xcc, 0x0b, 0xf6, 0xd9, 0x31, 0x0d, 0xe4, 0x0b, 0xba, 0x60, 0xbd, 0x7a, 0x2b,
11734
+	0x8b, 0x58, 0xb7, 0x24, 0x27, 0x27, 0x01, 0xe9, 0xd3, 0x30, 0x79, 0x48, 0x56, 0x4d, 0x71, 0xb9,
11735
+	0xe4, 0x31, 0x63, 0xbc, 0x6e, 0xa1, 0x2a, 0xe4, 0x1d, 0x62, 0xc7, 0x27, 0xaf, 0xbc, 0x55, 0xbc,
11736
+	0xbc, 0xd8, 0xc8, 0xd5, 0xad, 0x27, 0x74, 0x8c, 0x73, 0x0e, 0x79, 0x42, 0xc7, 0xe2, 0xf6, 0x75,
11737
+	0x88, 0x3c, 0x2f, 0xd2, 0x4c, 0x59, 0xdd, 0xbe, 0x75, 0x4b, 0x1c, 0x06, 0x9c, 0x77, 0x88, 0xf8,
11738
+	0x45, 0x1f, 0x42, 0x59, 0x83, 0xec, 0x23, 0x12, 0x1d, 0x29, 0xae, 0xba, 0xb5, 0x7a, 0x79, 0xb1,
11739
+	0x01, 0x0a, 0xf9, 0x98, 0x44, 0x47, 0x18, 0x14, 0x5a, 0x7c, 0xa3, 0x26, 0x94, 0xbe, 0x64, 0x5e,
11740
+	0x60, 0x73, 0x39, 0x09, 0x5d, 0x57, 0x2f, 0x3c, 0x3f, 0x93, 0xa9, 0xea, 0x62, 0x1f, 0xbe, 0x4c,
11741
+	0x24, 0xe6, 0xbf, 0xa4, 0xa0, 0x24, 0x6c, 0x7a, 0x87, 0x9e, 0x23, 0x6e, 0xcb, 0xef, 0x9e, 0xe9,
11742
+	0x6f, 0x43, 0xc6, 0x89, 0x42, 0x3d, 0x37, 0x99, 0xea, 0xea, 0x5d, 0x8c, 0x85, 0x0c, 0x7d, 0x0e,
11743
+	0x79, 0x55, 0x5c, 0xe8, 0x24, 0x6f, 0x7e, 0xfb, 0xbd, 0xae, 0x5d, 0xd4, 0x7a, 0x72, 0x2f, 0x27,
11744
+	0xde, 0xc9, 0x59, 0x96, 0xf1, 0xb4, 0x08, 0xdd, 0x82, 0xb4, 0xa3, 0x5e, 0x03, 0xf4, 0x3f, 0x6b,
11745
+	0xf5, 0x36, 0x4e, 0x3b, 0x81, 0xf9, 0x4f, 0x29, 0x58, 0x69, 0x06, 0x4e, 0x38, 0x96, 0x49, 0x52,
11746
+	0x6c, 0xc4, 0x1d, 0x28, 0x46, 0xa3, 0x83, 0x68, 0x1c, 0x71, 0x3a, 0x88, 0x1f, 0xee, 0x13, 0x01,
11747
+	0x6a, 0x41, 0x91, 0xf8, 0x7d, 0x16, 0x7a, 0xfc, 0x68, 0xa0, 0xb9, 0xf1, 0xe2, 0xc4, 0x3c, 0x6d,
11748
+	0xb3, 0x66, 0xc5, 0x2a, 0x78, 0xa2, 0x1d, 0xa7, 0xe2, 0x8c, 0x74, 0x56, 0xa6, 0xe2, 0xb7, 0xa1,
11749
+	0xec, 0x93, 0x81, 0xa0, 0xc2, 0xb6, 0x28, 0xb9, 0xe4, 0x3c, 0xb2, 0xb8, 0xa4, 0x65, 0xa2, 0x8c,
11750
+	0x34, 0x4d, 0x28, 0x26, 0xc6, 0xd0, 0x35, 0x28, 0x59, 0xcd, 0xae, 0xfd, 0xd1, 0xe6, 0x03, 0xfb,
11751
+	0x51, 0x7d, 0xd7, 0x58, 0xd2, 0x4c, 0xe0, 0x1f, 0x52, 0xb0, 0xb2, 0xab, 0x62, 0x50, 0x13, 0xa7,
11752
+	0xbb, 0xb0, 0x1c, 0x92, 0x43, 0x1e, 0x53, 0xbb, 0xac, 0x0a, 0x2e, 0x91, 0x04, 0x04, 0xb5, 0x13,
11753
+	0x5d, 0x8b, 0xa9, 0xdd, 0xd4, 0xdf, 0x46, 0x99, 0x2b, 0xff, 0x36, 0xca, 0xfe, 0x46, 0xfe, 0x36,
11754
+	0x7a, 0xef, 0x57, 0x19, 0x28, 0x26, 0x45, 0xaf, 0x08, 0x19, 0xc1, 0xb4, 0x96, 0xd4, 0x43, 0x58,
11755
+	0x22, 0x6f, 0x4b, 0x8e, 0x55, 0xb4, 0x76, 0x76, 0xf6, 0xea, 0xd6, 0x7e, 0xb3, 0x61, 0x7c, 0xae,
11756
+	0xa8, 0x58, 0x02, 0xb0, 0x7c, 0x9f, 0x89, 0x4d, 0x77, 0x91, 0x39, 0xa1, 0x62, 0x2f, 0xf4, 0x73,
11757
+	0x5b, 0x82, 0x8a, 0x79, 0xd8, 0x3b, 0x50, 0xb0, 0xba, 0xdd, 0xd6, 0xa3, 0x76, 0xb3, 0x61, 0x7c,
11758
+	0x95, 0xaa, 0x7c, 0xef, 0xec, 0xbc, 0x7a, 0x7d, 0x62, 0x2a, 0x8a, 0xbc, 0x7e, 0x40, 0x5d, 0x89,
11759
+	0xaa, 0xd7, 0x9b, 0x1d, 0x31, 0xde, 0x8b, 0xf4, 0x3c, 0x4a, 0x12, 0x10, 0xf9, 0x74, 0x5e, 0xec,
11760
+	0xe0, 0x66, 0xc7, 0xc2, 0x62, 0xc4, 0xaf, 0xd2, 0x73, 0x7e, 0x75, 0x42, 0x3a, 0x24, 0xa1, 0x18,
11761
+	0x73, 0x3d, 0xfe, 0x0b, 0xe9, 0x45, 0x46, 0x3d, 0xaf, 0x4e, 0x2a, 0x7d, 0x4a, 0xdc, 0xb1, 0x18,
11762
+	0x4d, 0xbe, 0x90, 0x48, 0x33, 0x99, 0xb9, 0xd1, 0xba, 0x9c, 0x84, 0x5c, 0x58, 0x31, 0x61, 0x19,
11763
+	0xf7, 0xda, 0x6d, 0x39, 0xbb, 0xec, 0xdc, 0xec, 0xf0, 0x28, 0x08, 0x04, 0xe6, 0x1e, 0x14, 0xe2,
11764
+	0x07, 0x14, 0xe3, 0xab, 0xec, 0x9c, 0x43, 0xf5, 0xf8, 0xf5, 0x47, 0x0e, 0xf8, 0xb8, 0xb7, 0x2f,
11765
+	0xff, 0xe1, 0x7a, 0x91, 0x9b, 0x1f, 0xf0, 0x68, 0xc4, 0x5d, 0x41, 0x7e, 0xab, 0x09, 0x1b, 0xfd,
11766
+	0x2a, 0xa7, 0x48, 0x40, 0x82, 0x51, 0x54, 0x54, 0xd8, 0xc1, 0xcd, 0x9f, 0xaa, 0x3f, 0xc3, 0x5e,
11767
+	0xe4, 0xe7, 0xec, 0x60, 0xfa, 0x25, 0x75, 0x38, 0x75, 0x27, 0xaf, 0xc7, 0x49, 0xd7, 0x7b, 0xbf,
11768
+	0x0f, 0x85, 0x38, 0x61, 0xa0, 0x75, 0xc8, 0x3f, 0xdb, 0xc3, 0x4f, 0x9a, 0xd8, 0x58, 0x52, 0xab,
11769
+	0x13, 0xf7, 0x3c, 0x53, 0x19, 0xb7, 0x0a, 0xcb, 0xbb, 0x56, 0xdb, 0x7a, 0xd4, 0xc4, 0xf1, 0xeb,
11770
+	0x75, 0x0c, 0xd0, 0x51, 0x5f, 0x31, 0xf4, 0x00, 0x89, 0xcd, 0xad, 0x3b, 0x5f, 0x7f, 0xb3, 0xbe,
11771
+	0xf4, 0x8b, 0x6f, 0xd6, 0x97, 0x7e, 0xf9, 0xcd, 0x7a, 0xea, 0xc5, 0xe5, 0x7a, 0xea, 0xeb, 0xcb,
11772
+	0xf5, 0xd4, 0xcf, 0x2f, 0xd7, 0x53, 0xff, 0x7e, 0xb9, 0x9e, 0x3a, 0xc8, 0x4b, 0x46, 0xf6, 0xf1,
11773
+	0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x36, 0x79, 0x51, 0x3a, 0xce, 0x21, 0x00, 0x00,
11768 11774
 }
... ...
@@ -281,15 +281,46 @@ message UpdateConfig {
281 281
 	enum FailureAction {
282 282
 		PAUSE = 0;
283 283
 		CONTINUE = 1;
284
-		// TODO(aaronl): Add ROLLBACK as a supported failure mode.
285
-		// (#486)
284
+		// NOTE: Automated rollback triggered as a failure action is an
285
+		// experimental feature that is not yet exposed to the end
286
+		// user. Currently, rollbacks must be initiated manually
287
+		// through the API by setting Spec to PreviousSpec. We may
288
+		// decide to expose automatic rollback in the future based on
289
+		// user feedback, or remove this feature otherwise.
290
+		ROLLBACK = 2;
286 291
 	}
287 292
 
288 293
 	// FailureAction is the action to take when an update failures.
289
-	// Currently, a failure is defined as a single updated task failing to
290
-	// reach the RUNNING state. In the future, there will be configuration
291
-	// to define what is treated as a failure (see #486 for a proposal).
292 294
 	FailureAction failure_action = 3;
295
+
296
+	// Monitor indicates how long to monitor a task for failure after it is
297
+	// created. If the task fails by ending up in one of the states
298
+	// REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
299
+	// this counts as a failure. If it fails after Monitor, it does not
300
+	// count as a failure. If Monitor is unspecified, a default value will
301
+	// be used.
302
+	Duration monitor = 4;
303
+
304
+	// AllowedFailureFraction is the fraction of tasks that may fail during
305
+	// an update before the failure action is invoked. Any task created by
306
+	// the current update which ends up in one of the states REJECTED,
307
+	// COMPLETED or FAILED within Monitor from its creation counts as a
308
+	// failure. The number of failures is divided by the number of tasks
309
+	// being updated, and if this fraction is greater than
310
+	// AllowedFailureFraction, the failure action is invoked.
311
+	//
312
+	// If the failure action is CONTINUE, there is no effect.
313
+	// If the failure action is PAUSE, no more tasks will be updated until
314
+	// another update is started.
315
+	// If the failure action is ROLLBACK, the orchestrator will attempt to
316
+	// roll back to the previous service spec. If the AllowedFailureFraction
317
+	// threshold is hit during the rollback, the rollback will pause.
318
+	//
319
+	// TODO(aaronl): Should there be a separate failure threshold for
320
+	// rollbacks? Should there be a failure action for rollbacks (to allow
321
+	// them to do something other than pause when the rollback encounters
322
+	// errors)?
323
+	float allowed_failure_fraction = 5;
293 324
 }
294 325
 
295 326
 // UpdateStatus is the status of an update in progress.
... ...
@@ -299,18 +330,21 @@ message UpdateStatus {
299 299
 		UPDATING = 1;
300 300
 		PAUSED = 2;
301 301
 		COMPLETED = 3;
302
-		// TODO(aaronl): add ROLLING_BACK, ROLLED_BACK as part of
303
-		// rollback support.
302
+		ROLLBACK_STARTED = 4;
303
+		ROLLBACK_PAUSED = 5; // if a rollback fails
304
+		ROLLBACK_COMPLETED = 6;
304 305
 	}
305 306
 
306 307
 	// State is the state of this update. It indicates whether the
307
-	// update is in progress, completed, or is paused.
308
+	// update is in progress, completed, paused, rolling back, or
309
+	// finished rolling back.
308 310
 	UpdateState state = 1;
309 311
 
310 312
 	// StartedAt is the time at which the update was started.
311 313
 	Timestamp started_at = 2;
312 314
 
313
-	// CompletedAt is the time at which the update completed.
315
+	// CompletedAt is the time at which the update completed successfully,
316
+	// paused, or finished rolling back.
314 317
 	Timestamp completed_at = 3;
315 318
 
316 319
 	// TODO(aaronl): Consider adding a timestamp showing when the most
... ...
@@ -16,7 +16,6 @@ import (
16 16
 	"path/filepath"
17 17
 	"time"
18 18
 
19
-	log "github.com/Sirupsen/logrus"
20 19
 	cfcsr "github.com/cloudflare/cfssl/csr"
21 20
 	"github.com/cloudflare/cfssl/helpers"
22 21
 	"github.com/cloudflare/cfssl/initca"
... ...
@@ -117,8 +116,7 @@ func (rca *RootCA) CanSign() bool {
117 117
 func (rca *RootCA) IssueAndSaveNewCertificates(paths CertPaths, cn, ou, org string) (*tls.Certificate, error) {
118 118
 	csr, key, err := GenerateAndWriteNewKey(paths)
119 119
 	if err != nil {
120
-		log.Debugf("error when generating new node certs: %v", err)
121
-		return nil, err
120
+		return nil, fmt.Errorf("error when generating new node certs: %v", err)
122 121
 	}
123 122
 
124 123
 	if !rca.CanSign() {
... ...
@@ -128,8 +126,7 @@ func (rca *RootCA) IssueAndSaveNewCertificates(paths CertPaths, cn, ou, org stri
128 128
 	// Obtain a signed Certificate
129 129
 	certChain, err := rca.ParseValidateAndSignCSR(csr, cn, ou, org)
130 130
 	if err != nil {
131
-		log.Debugf("failed to sign node certificate: %v", err)
132
-		return nil, err
131
+		return nil, fmt.Errorf("failed to sign node certificate: %v", err)
133 132
 	}
134 133
 
135 134
 	// Ensure directory exists
... ...
@@ -149,20 +146,18 @@ func (rca *RootCA) IssueAndSaveNewCertificates(paths CertPaths, cn, ou, org stri
149 149
 		return nil, err
150 150
 	}
151 151
 
152
-	log.Debugf("locally issued new TLS certificate for node ID: %s and role: %s", cn, ou)
153 152
 	return &tlsKeyPair, nil
154 153
 }
155 154
 
156 155
 // RequestAndSaveNewCertificates gets new certificates issued, either by signing them locally if a signer is
157 156
 // available, or by requesting them from the remote server at remoteAddr.
158
-func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths CertPaths, token string, remotes remotes.Remotes, transport credentials.TransportAuthenticator, nodeInfo chan<- api.IssueNodeCertificateResponse) (*tls.Certificate, error) {
157
+func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths CertPaths, token string, remotes remotes.Remotes, transport credentials.TransportCredentials, nodeInfo chan<- api.IssueNodeCertificateResponse) (*tls.Certificate, error) {
159 158
 	// Create a new key/pair and CSR for the new manager
160 159
 	// Write the new CSR and the new key to a temporary location so we can survive crashes on rotation
161 160
 	tempPaths := genTempPaths(paths)
162 161
 	csr, key, err := GenerateAndWriteNewKey(tempPaths)
163 162
 	if err != nil {
164
-		log.Debugf("error when generating new node certs: %v", err)
165
-		return nil, err
163
+		return nil, fmt.Errorf("error when generating new node certs: %v", err)
166 164
 	}
167 165
 
168 166
 	// Get the remote manager to issue a CA signed certificate for this node
... ...
@@ -174,7 +169,6 @@ func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths Cert
174 174
 		if err == nil {
175 175
 			break
176 176
 		}
177
-		log.Warningf("error fetching signed node certificate: %v", err)
178 177
 	}
179 178
 	if err != nil {
180 179
 		return nil, err
... ...
@@ -206,10 +200,6 @@ func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths Cert
206 206
 		return nil, err
207 207
 	}
208 208
 
209
-	if len(X509Cert.Subject.OrganizationalUnit) != 0 {
210
-		log.Infof("Downloaded new TLS credentials with role: %s.", X509Cert.Subject.OrganizationalUnit[0])
211
-	}
212
-
213 209
 	// Ensure directory exists
214 210
 	err = os.MkdirAll(filepath.Dir(paths.Cert), 0755)
215 211
 	if err != nil {
... ...
@@ -259,8 +249,7 @@ func (rca *RootCA) ParseValidateAndSignCSR(csrBytes []byte, cn, ou, org string)
259 259
 
260 260
 	cert, err := rca.Signer.Sign(signRequest)
261 261
 	if err != nil {
262
-		log.Debugf("failed to sign node certificate: %v", err)
263
-		return nil, err
262
+		return nil, fmt.Errorf("failed to sign node certificate: %v", err)
264 263
 	}
265 264
 
266 265
 	return rca.AppendFirstRootPEM(cert)
... ...
@@ -342,8 +331,7 @@ func NewRootCA(certBytes, keyBytes []byte, certExpiry time.Duration) (RootCA, er
342 342
 	if err != nil {
343 343
 		priv, err = helpers.ParsePrivateKeyPEMWithPassword(keyBytes, passphrasePrev)
344 344
 		if err != nil {
345
-			log.Debug("Malformed private key %v", err)
346
-			return RootCA{}, err
345
+			return RootCA{}, fmt.Errorf("Malformed private key: %v", err)
347 346
 		}
348 347
 	}
349 348
 
... ...
@@ -414,12 +402,7 @@ func GetLocalRootCA(baseDir string) (RootCA, error) {
414 414
 		key = nil
415 415
 	}
416 416
 
417
-	rootCA, err := NewRootCA(cert, key, DefaultNodeCertExpiration)
418
-	if err == nil {
419
-		log.Debugf("successfully loaded the Root CA: %s", paths.RootCA.Cert)
420
-	}
421
-
422
-	return rootCA, err
417
+	return NewRootCA(cert, key, DefaultNodeCertExpiration)
423 418
 }
424 419
 
425 420
 // GetRemoteCA returns the remote endpoint's CA certificate
... ...
@@ -552,8 +535,7 @@ func GenerateAndSignNewTLSCert(rootCA RootCA, cn, ou, org string, paths CertPath
552 552
 	// Obtain a signed Certificate
553 553
 	certChain, err := rootCA.ParseValidateAndSignCSR(csr, cn, ou, org)
554 554
 	if err != nil {
555
-		log.Debugf("failed to sign node certificate: %v", err)
556
-		return nil, err
555
+		return nil, fmt.Errorf("failed to sign node certificate: %v", err)
557 556
 	}
558 557
 
559 558
 	// Ensure directory exists
... ...
@@ -603,7 +585,7 @@ func GenerateAndWriteNewKey(paths CertPaths) (csr, key []byte, err error) {
603 603
 
604 604
 // GetRemoteSignedCertificate submits a CSR to a remote CA server address,
605 605
 // and that is part of a CA identified by a specific certificate pool.
606
-func GetRemoteSignedCertificate(ctx context.Context, csr []byte, token string, rootCAPool *x509.CertPool, r remotes.Remotes, creds credentials.TransportAuthenticator, nodeInfo chan<- api.IssueNodeCertificateResponse) ([]byte, error) {
606
+func GetRemoteSignedCertificate(ctx context.Context, csr []byte, token string, rootCAPool *x509.CertPool, r remotes.Remotes, creds credentials.TransportCredentials, nodeInfo chan<- api.IssueNodeCertificateResponse) ([]byte, error) {
607 607
 	if rootCAPool == nil {
608 608
 		return nil, fmt.Errorf("valid root CA pool required")
609 609
 	}
... ...
@@ -653,7 +635,6 @@ func GetRemoteSignedCertificate(ctx context.Context, csr []byte, token string, r
653 653
 		Max:    30 * time.Second,
654 654
 	})
655 655
 
656
-	log.Infof("Waiting for TLS certificate to be issued...")
657 656
 	// Exponential backoff with Max of 30 seconds to wait for a new retry
658 657
 	for {
659 658
 		// Send the Request and retrieve the certificate
... ...
@@ -694,7 +675,6 @@ func readCertExpiration(paths CertPaths) (time.Duration, error) {
694 694
 	// Read the Cert
695 695
 	cert, err := ioutil.ReadFile(paths.Cert)
696 696
 	if err != nil {
697
-		log.Debugf("failed to read certificate file: %s", paths.Cert)
698 697
 		return time.Hour, err
699 698
 	}
700 699
 
... ...
@@ -730,7 +710,6 @@ func generateNewCSR() (csr, key []byte, err error) {
730 730
 
731 731
 	csr, key, err = cfcsr.ParseRequest(req)
732 732
 	if err != nil {
733
-		log.Debugf(`failed to generate CSR`)
734 733
 		return
735 734
 	}
736 735
 
... ...
@@ -15,11 +15,12 @@ import (
15 15
 	"sync"
16 16
 	"time"
17 17
 
18
-	log "github.com/Sirupsen/logrus"
18
+	"github.com/Sirupsen/logrus"
19 19
 	cfconfig "github.com/cloudflare/cfssl/config"
20 20
 	"github.com/docker/distribution/digest"
21 21
 	"github.com/docker/swarmkit/api"
22 22
 	"github.com/docker/swarmkit/identity"
23
+	"github.com/docker/swarmkit/log"
23 24
 	"github.com/docker/swarmkit/remotes"
24 25
 
25 26
 	"golang.org/x/net/context"
... ...
@@ -35,8 +36,8 @@ const (
35 35
 	rootCN = "swarm-ca"
36 36
 	// ManagerRole represents the Manager node type, and is used for authorization to endpoints
37 37
 	ManagerRole = "swarm-manager"
38
-	// AgentRole represents the Agent node type, and is used for authorization to endpoints
39
-	AgentRole = "swarm-worker"
38
+	// WorkerRole represents the Worker node type, and is used for authorization to endpoints
39
+	WorkerRole = "swarm-worker"
40 40
 	// CARole represents the CA node type, and is used for clients attempting to get new certificates issued
41 41
 	CARole = "swarm-ca"
42 42
 
... ...
@@ -184,6 +185,7 @@ func getCAHashFromToken(token string) (digest.Digest, error) {
184 184
 // Every node requires at least a set of TLS certificates with which to join the cluster with.
185 185
 // In the case of a manager, these certificates will be used both for client and server credentials.
186 186
 func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, proposedRole string, remotes remotes.Remotes, nodeInfo chan<- api.IssueNodeCertificateResponse) (*SecurityConfig, error) {
187
+	ctx = log.WithModule(ctx, "tls")
187 188
 	paths := NewConfigPaths(baseCertDir)
188 189
 
189 190
 	var (
... ...
@@ -196,9 +198,9 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
196 196
 	rootCA, err = GetLocalRootCA(baseCertDir)
197 197
 	switch err {
198 198
 	case nil:
199
-		log.Debugf("loaded local CA certificate: %s.", paths.RootCA.Cert)
199
+		log.G(ctx).Debug("loaded CA certificate")
200 200
 	case ErrNoLocalRootCA:
201
-		log.Debugf("no valid local CA certificate found: %v", err)
201
+		log.G(ctx).WithError(err).Debugf("failed to load local CA certificate")
202 202
 
203 203
 		// Get a digest for the optional CA hash string that we've been provided
204 204
 		// If we were provided a non-empty string, and it is an invalid hash, return
... ...
@@ -221,7 +223,7 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
221 221
 			if err == nil {
222 222
 				break
223 223
 			}
224
-			log.Warningf("failed to retrieve remote root CA certificate: %v", err)
224
+			log.G(ctx).WithError(err).Errorf("failed to retrieve remote root CA certificate")
225 225
 		}
226 226
 		if err != nil {
227 227
 			return nil, err
... ...
@@ -232,7 +234,7 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
232 232
 			return nil, err
233 233
 		}
234 234
 
235
-		log.Debugf("downloaded remote CA certificate.")
235
+		log.G(ctx).Debugf("retrieved remote CA certificate: %s", paths.RootCA.Cert)
236 236
 	default:
237 237
 		return nil, err
238 238
 	}
... ...
@@ -242,7 +244,7 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
242 242
 	// load our certificates.
243 243
 	clientTLSCreds, serverTLSCreds, err = LoadTLSCreds(rootCA, paths.Node)
244 244
 	if err != nil {
245
-		log.Debugf("no valid local TLS credentials found: %v", err)
245
+		log.G(ctx).WithError(err).Debugf("no node credentials found in: %s", paths.Node.Cert)
246 246
 
247 247
 		var (
248 248
 			tlsKeyPair *tls.Certificate
... ...
@@ -262,17 +264,27 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
262 262
 			}
263 263
 			tlsKeyPair, err = rootCA.IssueAndSaveNewCertificates(paths.Node, cn, proposedRole, org)
264 264
 			if err != nil {
265
+				log.G(ctx).WithFields(logrus.Fields{
266
+					"node.id":   cn,
267
+					"node.role": proposedRole,
268
+				}).WithError(err).Errorf("failed to issue and save new certificate")
265 269
 				return nil, err
266 270
 			}
271
+
272
+			log.G(ctx).WithFields(logrus.Fields{
273
+				"node.id":   cn,
274
+				"node.role": proposedRole,
275
+			}).Debug("issued new TLS certificate")
267 276
 		} else {
268 277
 			// There was an error loading our Credentials, let's get a new certificate issued
269 278
 			// Last argument is nil because at this point we don't have any valid TLS creds
270 279
 			tlsKeyPair, err = rootCA.RequestAndSaveNewCertificates(ctx, paths.Node, token, remotes, nil, nodeInfo)
271 280
 			if err != nil {
281
+				log.G(ctx).WithError(err).Error("failed to request save new certificate")
272 282
 				return nil, err
273 283
 			}
274 284
 		}
275
-		// Create the Server TLS Credentials for this node. These will not be used by agents.
285
+		// Create the Server TLS Credentials for this node. These will not be used by workers.
276 286
 		serverTLSCreds, err = rootCA.NewServerTLSCredentials(tlsKeyPair)
277 287
 		if err != nil {
278 288
 			return nil, err
... ...
@@ -284,7 +296,10 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
284 284
 		if err != nil {
285 285
 			return nil, err
286 286
 		}
287
-		log.Debugf("new TLS credentials generated: %s.", paths.Node.Cert)
287
+		log.G(ctx).WithFields(logrus.Fields{
288
+			"node.id":   clientTLSCreds.NodeID(),
289
+			"node.role": clientTLSCreds.Role(),
290
+		}).Debugf("new node credentials generated: %s", paths.Node.Cert)
288 291
 	} else {
289 292
 		if nodeInfo != nil {
290 293
 			nodeInfo <- api.IssueNodeCertificateResponse{
... ...
@@ -292,7 +307,10 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, token, propose
292 292
 				NodeMembership: api.NodeMembershipAccepted,
293 293
 			}
294 294
 		}
295
-		log.Debugf("loaded local TLS credentials: %s.", paths.Node.Cert)
295
+		log.G(ctx).WithFields(logrus.Fields{
296
+			"node.id":   clientTLSCreds.NodeID(),
297
+			"node.role": clientTLSCreds.Role(),
298
+		}).Debug("loaded node credentials")
296 299
 	}
297 300
 
298 301
 	return NewSecurityConfig(&rootCA, clientTLSCreds, serverTLSCreds), nil
... ...
@@ -308,6 +326,11 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string,
308 308
 		var retry time.Duration
309 309
 		defer close(updates)
310 310
 		for {
311
+			ctx = log.WithModule(ctx, "tls")
312
+			log := log.G(ctx).WithFields(logrus.Fields{
313
+				"node.id":   s.ClientTLSCreds.NodeID(),
314
+				"node.role": s.ClientTLSCreds.Role(),
315
+			})
311 316
 			// Our starting default will be 5 minutes
312 317
 			retry = 5 * time.Minute
313 318
 
... ...
@@ -323,21 +346,27 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string,
323 323
 				// If we have an expired certificate, we let's stick with the starting default in
324 324
 				// the hope that this is a temporary clock skew.
325 325
 				if expiresIn.Minutes() < 0 {
326
-					log.Debugf("failed to create a new client TLS config: %v", err)
327
-					updates <- CertificateUpdate{Err: fmt.Errorf("TLS Certificate is expired")}
326
+					log.WithError(err).Errorf("failed to create a new client TLS config")
327
+					updates <- CertificateUpdate{Err: fmt.Errorf("TLS certificate is expired")}
328 328
 				} else {
329 329
 					// Random retry time between 50% and 80% of the total time to expiration
330 330
 					retry = calculateRandomExpiry(expiresIn)
331 331
 				}
332 332
 			}
333 333
 
334
+			log.WithFields(logrus.Fields{
335
+				"time": time.Now().Add(retry),
336
+			}).Debugf("next certificate renewal scheduled")
337
+
334 338
 			select {
335 339
 			case <-time.After(retry):
340
+				log.Infof("renewing certificate")
336 341
 			case <-renew:
342
+				log.Infof("forced certificate renewal")
337 343
 			case <-ctx.Done():
344
+				log.Infof("shuting down certificate renewal routine")
338 345
 				return
339 346
 			}
340
-			log.Infof("Renewing TLS Certificate.")
341 347
 
342 348
 			// Let's request new certs. Renewals don't require a token.
343 349
 			rootCA := s.RootCA()
... ...
@@ -348,25 +377,25 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string,
348 348
 				s.ClientTLSCreds,
349 349
 				nil)
350 350
 			if err != nil {
351
-				log.Debugf("failed to renew the TLS Certificate: %v", err)
351
+				log.WithError(err).Errorf("failed to renew the certificate")
352 352
 				updates <- CertificateUpdate{Err: err}
353 353
 				continue
354 354
 			}
355 355
 
356 356
 			clientTLSConfig, err := NewClientTLSConfig(tlsKeyPair, rootCA.Pool, CARole)
357 357
 			if err != nil {
358
-				log.Debugf("failed to create a new client TLS config: %v", err)
358
+				log.WithError(err).Errorf("failed to create a new client config")
359 359
 				updates <- CertificateUpdate{Err: err}
360 360
 			}
361 361
 			serverTLSConfig, err := NewServerTLSConfig(tlsKeyPair, rootCA.Pool)
362 362
 			if err != nil {
363
-				log.Debugf("failed to create a new server TLS config: %v", err)
363
+				log.WithError(err).Errorf("failed to create a new server config")
364 364
 				updates <- CertificateUpdate{Err: err}
365 365
 			}
366 366
 
367 367
 			err = s.ClientTLSCreds.LoadNewTLSConfig(clientTLSConfig)
368 368
 			if err != nil {
369
-				log.Debugf("failed to update the client TLS credentials: %v", err)
369
+				log.WithError(err).Errorf("failed to update the client credentials")
370 370
 				updates <- CertificateUpdate{Err: err}
371 371
 			}
372 372
 
... ...
@@ -380,7 +409,7 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string,
380 380
 
381 381
 			err = s.ServerTLSCreds.LoadNewTLSConfig(serverTLSConfig)
382 382
 			if err != nil {
383
-				log.Debugf("failed to update the server TLS credentials: %v", err)
383
+				log.WithError(err).Errorf("failed to update the server TLS credentials")
384 384
 				updates <- CertificateUpdate{Err: err}
385 385
 			}
386 386
 
... ...
@@ -478,7 +507,7 @@ func LoadTLSCreds(rootCA RootCA, paths CertPaths) (*MutableTLSCreds, *MutableTLS
478 478
 	}
479 479
 
480 480
 	// Load the Certificates also as client credentials.
481
-	// Both Agents and Managers always connect to remote Managers,
481
+	// Both workers and managers always connect to remote managers,
482 482
 	// so ServerName is always set to ManagerRole here.
483 483
 	clientTLSCreds, err := rootCA.NewClientTLSCredentials(&keyPair, ManagerRole)
484 484
 	if err != nil {
... ...
@@ -561,7 +590,7 @@ func ParseRole(apiRole api.NodeRole) (string, error) {
561 561
 	case api.NodeRoleManager:
562 562
 		return ManagerRole, nil
563 563
 	case api.NodeRoleWorker:
564
-		return AgentRole, nil
564
+		return WorkerRole, nil
565 565
 	default:
566 566
 		return "", fmt.Errorf("failed to parse api role: %v", apiRole)
567 567
 	}
... ...
@@ -572,7 +601,7 @@ func FormatRole(role string) (api.NodeRole, error) {
572 572
 	switch strings.ToLower(role) {
573 573
 	case strings.ToLower(ManagerRole):
574 574
 		return api.NodeRoleManager, nil
575
-	case strings.ToLower(AgentRole):
575
+	case strings.ToLower(WorkerRole):
576 576
 		return api.NodeRoleWorker, nil
577 577
 	default:
578 578
 		return 0, fmt.Errorf("failed to parse role: %s", role)
... ...
@@ -149,14 +149,14 @@ func (s *Server) IssueNodeCertificate(ctx context.Context, request *api.IssueNod
149 149
 	}
150 150
 	defer s.doneTask()
151 151
 
152
-	// If the remote node is an Agent (either forwarded by a manager, or calling directly),
153
-	// issue a renew agent certificate entry with the correct ID
154
-	nodeID, err := AuthorizeForwardedRoleAndOrg(ctx, []string{AgentRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization())
152
+	// If the remote node is a worker (either forwarded by a manager, or calling directly),
153
+	// issue a renew worker certificate entry with the correct ID
154
+	nodeID, err := AuthorizeForwardedRoleAndOrg(ctx, []string{WorkerRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization())
155 155
 	if err == nil {
156 156
 		return s.issueRenewCertificate(ctx, nodeID, request.CSR)
157 157
 	}
158 158
 
159
-	// If the remote node is a Manager (either forwarded by another manager, or calling directly),
159
+	// If the remote node is a manager (either forwarded by another manager, or calling directly),
160 160
 	// issue a renew certificate entry with the correct ID
161 161
 	nodeID, err = AuthorizeForwardedRoleAndOrg(ctx, []string{ManagerRole}, []string{ManagerRole}, s.securityConfig.ClientTLSCreds.Organization())
162 162
 	if err == nil {
... ...
@@ -8,7 +8,6 @@ import (
8 8
 	"net"
9 9
 	"strings"
10 10
 	"sync"
11
-	"time"
12 11
 
13 12
 	"google.golang.org/grpc/credentials"
14 13
 
... ...
@@ -33,12 +32,12 @@ type MutableTLSCreds struct {
33 33
 	// TLS configuration
34 34
 	config *tls.Config
35 35
 	// TLS Credentials
36
-	tlsCreds credentials.TransportAuthenticator
36
+	tlsCreds credentials.TransportCredentials
37 37
 	// store the subject for easy access
38 38
 	subject pkix.Name
39 39
 }
40 40
 
41
-// Info implements the credentials.TransportAuthenticator interface
41
+// Info implements the credentials.TransportCredentials interface
42 42
 func (c *MutableTLSCreds) Info() credentials.ProtocolInfo {
43 43
 	return credentials.ProtocolInfo{
44 44
 		SecurityProtocol: "tls",
... ...
@@ -46,26 +45,19 @@ func (c *MutableTLSCreds) Info() credentials.ProtocolInfo {
46 46
 	}
47 47
 }
48 48
 
49
-// GetRequestMetadata implements the credentials.TransportAuthenticator interface
49
+// GetRequestMetadata implements the credentials.TransportCredentials interface
50 50
 func (c *MutableTLSCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
51 51
 	return nil, nil
52 52
 }
53 53
 
54
-// RequireTransportSecurity implements the credentials.TransportAuthenticator interface
54
+// RequireTransportSecurity implements the credentials.TransportCredentials interface
55 55
 func (c *MutableTLSCreds) RequireTransportSecurity() bool {
56 56
 	return true
57 57
 }
58 58
 
59
-// ClientHandshake implements the credentials.TransportAuthenticator interface
60
-func (c *MutableTLSCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, credentials.AuthInfo, error) {
59
+// ClientHandshake implements the credentials.TransportCredentials interface
60
+func (c *MutableTLSCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
61 61
 	// borrow all the code from the original TLS credentials
62
-	var errChannel chan error
63
-	if timeout != 0 {
64
-		errChannel = make(chan error, 2)
65
-		time.AfterFunc(timeout, func() {
66
-			errChannel <- timeoutError{}
67
-		})
68
-	}
69 62
 	c.Lock()
70 63
 	if c.config.ServerName == "" {
71 64
 		colonPos := strings.LastIndex(addr, ":")
... ...
@@ -80,23 +72,23 @@ func (c *MutableTLSCreds) ClientHandshake(addr string, rawConn net.Conn, timeout
80 80
 	// would create a deadlock otherwise
81 81
 	c.Unlock()
82 82
 	var err error
83
-	if timeout == 0 {
84
-		err = conn.Handshake()
85
-	} else {
86
-		go func() {
87
-			errChannel <- conn.Handshake()
88
-		}()
89
-		err = <-errChannel
83
+	errChannel := make(chan error, 1)
84
+	go func() {
85
+		errChannel <- conn.Handshake()
86
+	}()
87
+	select {
88
+	case err = <-errChannel:
89
+	case <-ctx.Done():
90
+		err = ctx.Err()
90 91
 	}
91 92
 	if err != nil {
92 93
 		rawConn.Close()
93 94
 		return nil, nil, err
94 95
 	}
95
-
96 96
 	return conn, nil, nil
97 97
 }
98 98
 
99
-// ServerHandshake implements the credentials.TransportAuthenticator interface
99
+// ServerHandshake implements the credentials.TransportCredentials interface
100 100
 func (c *MutableTLSCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
101 101
 	c.Lock()
102 102
 	conn := tls.Server(rawConn, c.config)
... ...
@@ -132,7 +124,7 @@ func (c *MutableTLSCreds) Config() *tls.Config {
132 132
 	return c.config
133 133
 }
134 134
 
135
-// Role returns the OU for the certificate encapsulated in this TransportAuthenticator
135
+// Role returns the OU for the certificate encapsulated in this TransportCredentials
136 136
 func (c *MutableTLSCreds) Role() string {
137 137
 	c.Lock()
138 138
 	defer c.Unlock()
... ...
@@ -140,7 +132,7 @@ func (c *MutableTLSCreds) Role() string {
140 140
 	return c.subject.OrganizationalUnit[0]
141 141
 }
142 142
 
143
-// Organization returns the O for the certificate encapsulated in this TransportAuthenticator
143
+// Organization returns the O for the certificate encapsulated in this TransportCredentials
144 144
 func (c *MutableTLSCreds) Organization() string {
145 145
 	c.Lock()
146 146
 	defer c.Unlock()
... ...
@@ -148,7 +140,7 @@ func (c *MutableTLSCreds) Organization() string {
148 148
 	return c.subject.Organization[0]
149 149
 }
150 150
 
151
-// NodeID returns the CN for the certificate encapsulated in this TransportAuthenticator
151
+// NodeID returns the CN for the certificate encapsulated in this TransportCredentials
152 152
 func (c *MutableTLSCreds) NodeID() string {
153 153
 	c.Lock()
154 154
 	defer c.Unlock()
... ...
@@ -156,7 +148,7 @@ func (c *MutableTLSCreds) NodeID() string {
156 156
 	return c.subject.CommonName
157 157
 }
158 158
 
159
-// NewMutableTLS uses c to construct a mutable TransportAuthenticator based on TLS.
159
+// NewMutableTLS uses c to construct a mutable TransportCredentials based on TLS.
160 160
 func NewMutableTLS(c *tls.Config) (*MutableTLSCreds, error) {
161 161
 	originalTC := credentials.NewTLS(c)
162 162
 
... ...
@@ -564,7 +564,9 @@ func (a *Allocator) allocateNode(ctx context.Context, nc *networkContext, node *
564 564
 
565 565
 func (a *Allocator) allocateService(ctx context.Context, nc *networkContext, s *api.Service) error {
566 566
 	if s.Spec.Endpoint != nil {
567
+		// service has user-defined endpoint
567 568
 		if s.Endpoint == nil {
569
+			// service currently has no allocated endpoint, need allocated.
568 570
 			s.Endpoint = &api.Endpoint{
569 571
 				Spec: s.Spec.Endpoint.Copy(),
570 572
 			}
... ...
@@ -587,6 +589,12 @@ func (a *Allocator) allocateService(ctx context.Context, nc *networkContext, s *
587 587
 					&api.Endpoint_VirtualIP{NetworkID: nc.ingressNetwork.ID})
588 588
 			}
589 589
 		}
590
+	} else if s.Endpoint != nil {
591
+		// service has no user-defined endpoints while has already allocated network resources,
592
+		// need deallocated.
593
+		if err := nc.nwkAllocator.ServiceDeallocate(s); err != nil {
594
+			return err
595
+		}
590 596
 	}
591 597
 
592 598
 	if err := nc.nwkAllocator.ServiceAllocate(s); err != nil {
... ...
@@ -155,7 +155,18 @@ func (pa *portAllocator) serviceDeallocatePorts(s *api.Service) {
155 155
 }
156 156
 
157 157
 func (pa *portAllocator) isPortsAllocated(s *api.Service) bool {
158
-	if s.Endpoint == nil {
158
+	// If service has no user-defined endpoint and allocated endpoint,
159
+	// we assume it is allocated and return true.
160
+	if s.Endpoint == nil && s.Spec.Endpoint == nil {
161
+		return true
162
+	}
163
+
164
+	// If service has allocated endpoint while has no user-defined endpoint,
165
+	// we assume allocated endpoints are redudant, and they need deallocated.
166
+	// If service has no allocated endpoint while has user-defined endpoint,
167
+	// we assume it is not allocated.
168
+	if (s.Endpoint != nil && s.Spec.Endpoint == nil) ||
169
+		(s.Endpoint == nil && s.Spec.Endpoint != nil) {
159 170
 		return false
160 171
 	}
161 172
 
162 173
deleted file mode 100644
... ...
@@ -1,12 +0,0 @@
1
-package hackpicker
2
-
3
-// AddrSelector is interface which should track cluster for its leader address.
4
-type AddrSelector interface {
5
-	LeaderAddr() (string, error)
6
-}
7
-
8
-// RaftCluster is interface which combines useful methods for clustering.
9
-type RaftCluster interface {
10
-	AddrSelector
11
-	IsLeader() bool
12
-}
13 1
deleted file mode 100644
... ...
@@ -1,141 +0,0 @@
1
-// Package hackpicker is temporary solution to provide more seamless experience
2
-// for controlapi. It has drawback of slow reaction to leader change, but it
3
-// tracks leader automatically without erroring out to client.
4
-package hackpicker
5
-
6
-import (
7
-	"sync"
8
-
9
-	"golang.org/x/net/context"
10
-	"google.golang.org/grpc"
11
-	"google.golang.org/grpc/transport"
12
-)
13
-
14
-// picker always picks address of cluster leader.
15
-type picker struct {
16
-	mu   sync.Mutex
17
-	addr string
18
-	raft AddrSelector
19
-	conn *grpc.Conn
20
-	cc   *grpc.ClientConn
21
-}
22
-
23
-// Init does initial processing for the Picker, e.g., initiate some connections.
24
-func (p *picker) Init(cc *grpc.ClientConn) error {
25
-	p.cc = cc
26
-	return nil
27
-}
28
-
29
-func (p *picker) initConn() error {
30
-	if p.conn == nil {
31
-		conn, err := grpc.NewConn(p.cc)
32
-		if err != nil {
33
-			return err
34
-		}
35
-		p.conn = conn
36
-	}
37
-	return nil
38
-}
39
-
40
-// Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC
41
-// or some error happens.
42
-func (p *picker) Pick(ctx context.Context) (transport.ClientTransport, error) {
43
-	p.mu.Lock()
44
-	if err := p.initConn(); err != nil {
45
-		p.mu.Unlock()
46
-		return nil, err
47
-	}
48
-	p.mu.Unlock()
49
-
50
-	addr, err := p.raft.LeaderAddr()
51
-	if err != nil {
52
-		return nil, err
53
-	}
54
-	p.mu.Lock()
55
-	if p.addr != addr {
56
-		p.addr = addr
57
-		p.conn.NotifyReset()
58
-	}
59
-	p.mu.Unlock()
60
-	return p.conn.Wait(ctx)
61
-}
62
-
63
-// PickAddr picks a peer address for connecting. This will be called repeated for
64
-// connecting/reconnecting.
65
-func (p *picker) PickAddr() (string, error) {
66
-	addr, err := p.raft.LeaderAddr()
67
-	if err != nil {
68
-		return "", err
69
-	}
70
-	p.mu.Lock()
71
-	p.addr = addr
72
-	p.mu.Unlock()
73
-	return addr, nil
74
-}
75
-
76
-// State returns the connectivity state of the underlying connections.
77
-func (p *picker) State() (grpc.ConnectivityState, error) {
78
-	return p.conn.State(), nil
79
-}
80
-
81
-// WaitForStateChange blocks until the state changes to something other than
82
-// the sourceState. It returns the new state or error.
83
-func (p *picker) WaitForStateChange(ctx context.Context, sourceState grpc.ConnectivityState) (grpc.ConnectivityState, error) {
84
-	return p.conn.WaitForStateChange(ctx, sourceState)
85
-}
86
-
87
-// Reset the current connection and force a reconnect to another address.
88
-func (p *picker) Reset() error {
89
-	p.conn.NotifyReset()
90
-	return nil
91
-}
92
-
93
-// Close closes all the Conn's owned by this Picker.
94
-func (p *picker) Close() error {
95
-	return p.conn.Close()
96
-}
97
-
98
-// ConnSelector is struct for obtaining connection with raftpicker.
99
-type ConnSelector struct {
100
-	mu      sync.Mutex
101
-	cc      *grpc.ClientConn
102
-	cluster RaftCluster
103
-	opts    []grpc.DialOption
104
-}
105
-
106
-// NewConnSelector returns new ConnSelector with cluster and grpc.DialOpts which
107
-// will be used for Dial on first call of Conn.
108
-func NewConnSelector(cluster RaftCluster, opts ...grpc.DialOption) *ConnSelector {
109
-	return &ConnSelector{
110
-		cluster: cluster,
111
-		opts:    opts,
112
-	}
113
-}
114
-
115
-// Conn returns *grpc.ClientConn with picker which picks raft cluster leader.
116
-// Internal connection estabilished lazily on this call.
117
-// It can return error if cluster wasn't ready at the moment of initial call.
118
-func (c *ConnSelector) Conn() (*grpc.ClientConn, error) {
119
-	c.mu.Lock()
120
-	defer c.mu.Unlock()
121
-	if c.cc != nil {
122
-		return c.cc, nil
123
-	}
124
-	addr, err := c.cluster.LeaderAddr()
125
-	if err != nil {
126
-		return nil, err
127
-	}
128
-	picker := &picker{raft: c.cluster, addr: addr}
129
-	opts := append(c.opts, grpc.WithPicker(picker))
130
-	cc, err := grpc.Dial(addr, opts...)
131
-	if err != nil {
132
-		return nil, err
133
-	}
134
-	c.cc = cc
135
-	return c.cc, nil
136
-}
137
-
138
-// Reset does nothing for hackpicker.
139
-func (c *ConnSelector) Reset() error {
140
-	return nil
141
-}
... ...
@@ -5,7 +5,7 @@ import (
5 5
 	"reflect"
6 6
 	"strconv"
7 7
 
8
-	"github.com/docker/engine-api/types/reference"
8
+	"github.com/docker/distribution/reference"
9 9
 	"github.com/docker/swarmkit/api"
10 10
 	"github.com/docker/swarmkit/identity"
11 11
 	"github.com/docker/swarmkit/manager/scheduler"
... ...
@@ -133,7 +133,7 @@ func validateTask(taskSpec api.TaskSpec) error {
133 133
 		return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: image reference must be provided")
134 134
 	}
135 135
 
136
-	if _, _, err := reference.Parse(container.Image); err != nil {
136
+	if _, err := reference.ParseNamed(container.Image); err != nil {
137 137
 		return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: %q is not a valid repository/tag", container.Image)
138 138
 	}
139 139
 	return nil
... ...
@@ -149,13 +149,13 @@ func validateEndpointSpec(epSpec *api.EndpointSpec) error {
149 149
 		return grpc.Errorf(codes.InvalidArgument, "EndpointSpec: ports can't be used with dnsrr mode")
150 150
 	}
151 151
 
152
-	portSet := make(map[api.PortConfig]struct{})
152
+	portSet := make(map[uint32]struct{})
153 153
 	for _, port := range epSpec.Ports {
154
-		if _, ok := portSet[*port]; ok {
155
-			return grpc.Errorf(codes.InvalidArgument, "EndpointSpec: duplicate ports provided")
154
+		if _, ok := portSet[port.PublishedPort]; ok {
155
+			return grpc.Errorf(codes.InvalidArgument, "EndpointSpec: duplicate published ports provided")
156 156
 		}
157 157
 
158
-		portSet[*port] = struct{}{}
158
+		portSet[port.PublishedPort] = struct{}{}
159 159
 	}
160 160
 
161 161
 	return nil
... ...
@@ -350,6 +350,7 @@ func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRe
350 350
 			return errModeChangeNotAllowed
351 351
 		}
352 352
 		service.Meta.Version = *request.ServiceVersion
353
+		service.PreviousSpec = service.Spec.Copy()
353 354
 		service.Spec = *request.Spec.Copy()
354 355
 
355 356
 		// Reset update status
... ...
@@ -3,6 +3,7 @@ package dispatcher
3 3
 import (
4 4
 	"errors"
5 5
 	"fmt"
6
+	"strconv"
6 7
 	"sync"
7 8
 	"time"
8 9
 
... ...
@@ -41,6 +42,9 @@ const (
41 41
 	// into a single transaction. A fraction of a second feels about
42 42
 	// right.
43 43
 	maxBatchInterval = 100 * time.Millisecond
44
+
45
+	modificationBatchLimit = 100
46
+	batchingWaitTime       = 100 * time.Millisecond
44 47
 )
45 48
 
46 49
 var (
... ...
@@ -127,8 +131,6 @@ func New(cluster Cluster, c *Config) *Dispatcher {
127 127
 		nodes:                 newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier, c.RateLimitPeriod),
128 128
 		store:                 cluster.MemoryStore(),
129 129
 		cluster:               cluster,
130
-		mgrQueue:              watch.NewQueue(),
131
-		keyMgrQueue:           watch.NewQueue(),
132 130
 		taskUpdates:           make(map[string]*api.TaskStatus),
133 131
 		nodeUpdates:           make(map[string]nodeUpdate),
134 132
 		processUpdatesTrigger: make(chan struct{}, 1),
... ...
@@ -195,6 +197,9 @@ func (d *Dispatcher) Run(ctx context.Context) error {
195 195
 		d.mu.Unlock()
196 196
 		return err
197 197
 	}
198
+	// set queues here to guarantee that Close will close them
199
+	d.mgrQueue = watch.NewQueue()
200
+	d.keyMgrQueue = watch.NewQueue()
198 201
 
199 202
 	peerWatcher, peerCancel := d.cluster.SubscribePeers()
200 203
 	defer peerCancel()
... ...
@@ -351,26 +356,10 @@ func (d *Dispatcher) isRunning() bool {
351 351
 	return true
352 352
 }
353 353
 
354
-// register is used for registration of node with particular dispatcher.
355
-func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, error) {
356
-	// prevent register until we're ready to accept it
357
-	if err := d.isRunningLocked(); err != nil {
358
-		return "", err
359
-	}
360
-
361
-	if err := d.nodes.CheckRateLimit(nodeID); err != nil {
362
-		return "", err
363
-	}
364
-
365
-	// TODO(stevvooe): Validate node specification.
366
-	var node *api.Node
367
-	d.store.View(func(tx store.ReadTx) {
368
-		node = store.GetNode(tx, nodeID)
369
-	})
370
-	if node == nil {
371
-		return "", ErrNodeNotFound
372
-	}
373
-
354
+// updateNode updates the description of a node and sets status to READY
355
+// this is used during registration when a new node description is provided
356
+// and during node updates when the node description changes
357
+func (d *Dispatcher) updateNode(nodeID string, description *api.NodeDescription) error {
374 358
 	d.nodeUpdatesLock.Lock()
375 359
 	d.nodeUpdates[nodeID] = nodeUpdate{status: &api.NodeStatus{State: api.NodeStatus_READY}, description: description}
376 360
 	numUpdates := len(d.nodeUpdates)
... ...
@@ -380,7 +369,7 @@ func (d *Dispatcher) register(ctx context.Context, nodeID string, description *a
380 380
 		select {
381 381
 		case d.processUpdatesTrigger <- struct{}{}:
382 382
 		case <-d.ctx.Done():
383
-			return "", d.ctx.Err()
383
+			return d.ctx.Err()
384 384
 		}
385 385
 
386 386
 	}
... ...
@@ -389,12 +378,39 @@ func (d *Dispatcher) register(ctx context.Context, nodeID string, description *a
389 389
 	d.processUpdatesLock.Lock()
390 390
 	select {
391 391
 	case <-d.ctx.Done():
392
-		return "", d.ctx.Err()
392
+		return d.ctx.Err()
393 393
 	default:
394 394
 	}
395 395
 	d.processUpdatesCond.Wait()
396 396
 	d.processUpdatesLock.Unlock()
397 397
 
398
+	return nil
399
+}
400
+
401
+// register is used for registration of node with particular dispatcher.
402
+func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, error) {
403
+	// prevent register until we're ready to accept it
404
+	if err := d.isRunningLocked(); err != nil {
405
+		return "", err
406
+	}
407
+
408
+	if err := d.nodes.CheckRateLimit(nodeID); err != nil {
409
+		return "", err
410
+	}
411
+
412
+	// TODO(stevvooe): Validate node specification.
413
+	var node *api.Node
414
+	d.store.View(func(tx store.ReadTx) {
415
+		node = store.GetNode(tx, nodeID)
416
+	})
417
+	if node == nil {
418
+		return "", ErrNodeNotFound
419
+	}
420
+
421
+	if err := d.updateNode(nodeID, description); err != nil {
422
+		return "", err
423
+	}
424
+
398 425
 	expireFunc := func() {
399 426
 		nodeStatus := api.NodeStatus{State: api.NodeStatus_DOWN, Message: "heartbeat failure"}
400 427
 		log.G(ctx).Debugf("heartbeat expiration")
... ...
@@ -657,14 +673,10 @@ func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServe
657 657
 		}
658 658
 
659 659
 		// bursty events should be processed in batches and sent out snapshot
660
-		const (
661
-			modificationBatchLimit = 200
662
-			eventPausedGap         = 50 * time.Millisecond
663
-		)
664 660
 		var (
665
-			modificationCnt    int
666
-			eventPausedTimer   *time.Timer
667
-			eventPausedTimeout <-chan time.Time
661
+			modificationCnt int
662
+			batchingTimer   *time.Timer
663
+			batchingTimeout <-chan time.Time
668 664
 		)
669 665
 
670 666
 	batchingLoop:
... ...
@@ -692,13 +704,189 @@ func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServe
692 692
 					delete(tasksMap, v.Task.ID)
693 693
 					modificationCnt++
694 694
 				}
695
-				if eventPausedTimer != nil {
696
-					eventPausedTimer.Reset(eventPausedGap)
695
+				if batchingTimer != nil {
696
+					batchingTimer.Reset(batchingWaitTime)
697 697
 				} else {
698
-					eventPausedTimer = time.NewTimer(eventPausedGap)
699
-					eventPausedTimeout = eventPausedTimer.C
698
+					batchingTimer = time.NewTimer(batchingWaitTime)
699
+					batchingTimeout = batchingTimer.C
700
+				}
701
+			case <-batchingTimeout:
702
+				break batchingLoop
703
+			case <-stream.Context().Done():
704
+				return stream.Context().Err()
705
+			case <-d.ctx.Done():
706
+				return d.ctx.Err()
707
+			}
708
+		}
709
+
710
+		if batchingTimer != nil {
711
+			batchingTimer.Stop()
712
+		}
713
+	}
714
+}
715
+
716
+// Assignments is a stream of assignments for a node. Each message contains
717
+// either full list of tasks and secrets for the node, or an incremental update.
718
+func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
719
+	nodeInfo, err := ca.RemoteNode(stream.Context())
720
+	if err != nil {
721
+		return err
722
+	}
723
+	nodeID := nodeInfo.NodeID
724
+
725
+	if err := d.isRunningLocked(); err != nil {
726
+		return err
727
+	}
728
+
729
+	fields := logrus.Fields{
730
+		"node.id":      nodeID,
731
+		"node.session": r.SessionID,
732
+		"method":       "(*Dispatcher).Assignments",
733
+	}
734
+	if nodeInfo.ForwardedBy != nil {
735
+		fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
736
+	}
737
+	log := log.G(stream.Context()).WithFields(fields)
738
+	log.Debugf("")
739
+
740
+	if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
741
+		return err
742
+	}
743
+
744
+	var (
745
+		sequence  int64
746
+		appliesTo string
747
+		initial   api.AssignmentsMessage
748
+	)
749
+	tasksMap := make(map[string]*api.Task)
750
+
751
+	sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
752
+		sequence++
753
+		msg.AppliesTo = appliesTo
754
+		msg.ResultsIn = strconv.FormatInt(sequence, 10)
755
+		appliesTo = msg.ResultsIn
756
+		msg.Type = assignmentType
757
+
758
+		if err := stream.Send(&msg); err != nil {
759
+			return err
760
+		}
761
+		return nil
762
+	}
763
+
764
+	// TODO(aaronl): Also send node secrets that should be exposed to
765
+	// this node.
766
+	nodeTasks, cancel, err := store.ViewAndWatch(
767
+		d.store,
768
+		func(readTx store.ReadTx) error {
769
+			tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
770
+			if err != nil {
771
+				return err
772
+			}
773
+
774
+			for _, t := range tasks {
775
+				// We only care about tasks that are ASSIGNED or
776
+				// higher. If the state is below ASSIGNED, the
777
+				// task may not meet the constraints for this
778
+				// node, so we have to be careful about sending
779
+				// secrets associated with it.
780
+				if t.Status.State < api.TaskStateAssigned {
781
+					continue
782
+				}
783
+
784
+				tasksMap[t.ID] = t
785
+				initial.UpdateTasks = append(initial.UpdateTasks, t)
786
+			}
787
+			return nil
788
+		},
789
+		state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
790
+			Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
791
+		state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
792
+			Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
793
+	)
794
+	if err != nil {
795
+		return err
796
+	}
797
+	defer cancel()
798
+
799
+	if err := sendMessage(initial, api.AssignmentsMessage_COMPLETE); err != nil {
800
+		return err
801
+	}
802
+
803
+	for {
804
+		// Check for session expiration
805
+		if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
806
+			return err
807
+		}
808
+
809
+		// bursty events should be processed in batches and sent out together
810
+		var (
811
+			update          api.AssignmentsMessage
812
+			modificationCnt int
813
+			batchingTimer   *time.Timer
814
+			batchingTimeout <-chan time.Time
815
+			updateTasks     = make(map[string]*api.Task)
816
+			removeTasks     = make(map[string]struct{})
817
+		)
818
+
819
+		oneModification := func() {
820
+			modificationCnt++
821
+
822
+			if batchingTimer != nil {
823
+				batchingTimer.Reset(batchingWaitTime)
824
+			} else {
825
+				batchingTimer = time.NewTimer(batchingWaitTime)
826
+				batchingTimeout = batchingTimer.C
827
+			}
828
+		}
829
+
830
+		// The batching loop waits for 50 ms after the most recent
831
+		// change, or until modificationBatchLimit is reached. The
832
+		// worst case latency is modificationBatchLimit * batchingWaitTime,
833
+		// which is 10 seconds.
834
+	batchingLoop:
835
+		for modificationCnt < modificationBatchLimit {
836
+			select {
837
+			case event := <-nodeTasks:
838
+				switch v := event.(type) {
839
+				// We don't monitor EventCreateTask because tasks are
840
+				// never created in the ASSIGNED state. First tasks are
841
+				// created by the orchestrator, then the scheduler moves
842
+				// them to ASSIGNED. If this ever changes, we will need
843
+				// to monitor task creations as well.
844
+				case state.EventUpdateTask:
845
+					// We only care about tasks that are ASSIGNED or
846
+					// higher.
847
+					if v.Task.Status.State < api.TaskStateAssigned {
848
+						continue
849
+					}
850
+
851
+					if oldTask, exists := tasksMap[v.Task.ID]; exists {
852
+						// States ASSIGNED and below are set by the orchestrator/scheduler,
853
+						// not the agent, so tasks in these states need to be sent to the
854
+						// agent even if nothing else has changed.
855
+						if equality.TasksEqualStable(oldTask, v.Task) && v.Task.Status.State > api.TaskStateAssigned {
856
+							// this update should not trigger a task change for the agent
857
+							tasksMap[v.Task.ID] = v.Task
858
+							continue
859
+						}
860
+					}
861
+					tasksMap[v.Task.ID] = v.Task
862
+					updateTasks[v.Task.ID] = v.Task
863
+
864
+					oneModification()
865
+				case state.EventDeleteTask:
866
+
867
+					if _, exists := tasksMap[v.Task.ID]; !exists {
868
+						continue
869
+					}
870
+
871
+					removeTasks[v.Task.ID] = struct{}{}
872
+
873
+					delete(tasksMap, v.Task.ID)
874
+
875
+					oneModification()
700 876
 				}
701
-			case <-eventPausedTimeout:
877
+			case <-batchingTimeout:
702 878
 				break batchingLoop
703 879
 			case <-stream.Context().Done():
704 880
 				return stream.Context().Err()
... ...
@@ -707,8 +895,22 @@ func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServe
707 707
 			}
708 708
 		}
709 709
 
710
-		if eventPausedTimer != nil {
711
-			eventPausedTimer.Stop()
710
+		if batchingTimer != nil {
711
+			batchingTimer.Stop()
712
+		}
713
+
714
+		if modificationCnt > 0 {
715
+			for id, task := range updateTasks {
716
+				if _, ok := removeTasks[id]; !ok {
717
+					update.UpdateTasks = append(update.UpdateTasks, task)
718
+				}
719
+			}
720
+			for id := range removeTasks {
721
+				update.RemoveTasks = append(update.RemoveTasks, id)
722
+			}
723
+			if err := sendMessage(update, api.AssignmentsMessage_INCREMENTAL); err != nil {
724
+				return err
725
+			}
712 726
 		}
713 727
 	}
714 728
 }
... ...
@@ -787,6 +989,10 @@ func (d *Dispatcher) Session(r *api.SessionRequest, stream api.Dispatcher_Sessio
787 787
 		}
788 788
 	} else {
789 789
 		sessionID = r.SessionID
790
+		// update the node description
791
+		if err := d.updateNode(nodeID, r.Description); err != nil {
792
+			return err
793
+		}
790 794
 	}
791 795
 
792 796
 	fields := logrus.Fields{
... ...
@@ -9,7 +9,6 @@ import (
9 9
 	"path/filepath"
10 10
 	"sync"
11 11
 	"syscall"
12
-	"time"
13 12
 
14 13
 	"github.com/Sirupsen/logrus"
15 14
 	"github.com/docker/go-events"
... ...
@@ -18,12 +17,10 @@ import (
18 18
 	"github.com/docker/swarmkit/log"
19 19
 	"github.com/docker/swarmkit/manager/allocator"
20 20
 	"github.com/docker/swarmkit/manager/controlapi"
21
-	"github.com/docker/swarmkit/manager/controlapi/hackpicker"
22 21
 	"github.com/docker/swarmkit/manager/dispatcher"
23 22
 	"github.com/docker/swarmkit/manager/health"
24 23
 	"github.com/docker/swarmkit/manager/keymanager"
25 24
 	"github.com/docker/swarmkit/manager/orchestrator"
26
-	"github.com/docker/swarmkit/manager/raftpicker"
27 25
 	"github.com/docker/swarmkit/manager/resourceapi"
28 26
 	"github.com/docker/swarmkit/manager/scheduler"
29 27
 	"github.com/docker/swarmkit/manager/state/raft"
... ...
@@ -92,7 +89,6 @@ type Manager struct {
92 92
 	server                 *grpc.Server
93 93
 	localserver            *grpc.Server
94 94
 	RaftNode               *raft.Node
95
-	connSelector           *raftpicker.ConnSelector
96 95
 
97 96
 	mu sync.Mutex
98 97
 
... ...
@@ -250,25 +246,6 @@ func (m *Manager) Run(parent context.Context) error {
250 250
 
251 251
 	go m.handleLeadershipEvents(ctx, leadershipCh)
252 252
 
253
-	proxyOpts := []grpc.DialOption{
254
-		grpc.WithTimeout(5 * time.Second),
255
-		grpc.WithTransportCredentials(m.config.SecurityConfig.ClientTLSCreds),
256
-	}
257
-
258
-	cs := raftpicker.NewConnSelector(m.RaftNode, proxyOpts...)
259
-	m.connSelector = cs
260
-
261
-	// We need special connSelector for controlapi because it provides automatic
262
-	// leader tracking.
263
-	// Other APIs are using connSelector which errors out on leader change, but
264
-	// allows to react quickly to reelections.
265
-	controlAPIProxyOpts := []grpc.DialOption{
266
-		grpc.WithBackoffMaxDelay(time.Second),
267
-		grpc.WithTransportCredentials(m.config.SecurityConfig.ClientTLSCreds),
268
-	}
269
-
270
-	controlAPIConnSelector := hackpicker.NewConnSelector(m.RaftNode, controlAPIProxyOpts...)
271
-
272 253
 	authorize := func(ctx context.Context, roles []string) error {
273 254
 		// Authorize the remote roles, ensure they can only be forwarded by managers
274 255
 		_, err := ca.AuthorizeForwardedRoleAndOrg(ctx, roles, []string{ca.ManagerRole}, m.config.SecurityConfig.ClientTLSCreds.Organization())
... ...
@@ -289,11 +266,11 @@ func (m *Manager) Run(parent context.Context) error {
289 289
 	authenticatedHealthAPI := api.NewAuthenticatedWrapperHealthServer(healthServer, authorize)
290 290
 	authenticatedRaftMembershipAPI := api.NewAuthenticatedWrapperRaftMembershipServer(m.RaftNode, authorize)
291 291
 
292
-	proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
293
-	proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
294
-	proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
295
-	proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
296
-	proxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(authenticatedResourceAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
292
+	proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, m.RaftNode, ca.WithMetadataForwardTLSInfo)
293
+	proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, m.RaftNode, ca.WithMetadataForwardTLSInfo)
294
+	proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, m.RaftNode, ca.WithMetadataForwardTLSInfo)
295
+	proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, m.RaftNode, ca.WithMetadataForwardTLSInfo)
296
+	proxyResourceAPI := api.NewRaftProxyResourceAllocatorServer(authenticatedResourceAPI, m.RaftNode, ca.WithMetadataForwardTLSInfo)
297 297
 
298 298
 	// localProxyControlAPI is a special kind of proxy. It is only wired up
299 299
 	// to receive requests from a trusted local socket, and these requests
... ...
@@ -302,7 +279,7 @@ func (m *Manager) Run(parent context.Context) error {
302 302
 	// this manager rather than forwarded requests (it has no TLS
303 303
 	// information to put in the metadata map).
304 304
 	forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
305
-	localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, controlAPIConnSelector, m.RaftNode, forwardAsOwnRequest)
305
+	localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, m.RaftNode, forwardAsOwnRequest)
306 306
 
307 307
 	// Everything registered on m.server should be an authenticated
308 308
 	// wrapper, or a proxy wrapping an authenticated wrapper!
... ...
@@ -318,7 +295,7 @@ func (m *Manager) Run(parent context.Context) error {
318 318
 	api.RegisterControlServer(m.localserver, localProxyControlAPI)
319 319
 	api.RegisterHealthServer(m.localserver, localHealthServer)
320 320
 
321
-	errServe := make(chan error, 2)
321
+	errServe := make(chan error, len(m.listeners))
322 322
 	for proto, l := range m.listeners {
323 323
 		go m.serveListener(ctx, errServe, proto, l)
324 324
 	}
... ...
@@ -433,9 +410,6 @@ func (m *Manager) Stop(ctx context.Context) {
433 433
 		m.keyManager.Stop()
434 434
 	}
435 435
 
436
-	if m.connSelector != nil {
437
-		m.connSelector.Stop()
438
-	}
439 436
 	m.RaftNode.Shutdown()
440 437
 	// some time after this point, Run will receive an error from one of these
441 438
 	m.server.Stop()
... ...
@@ -346,7 +346,8 @@ func (r *RestartSupervisor) DelayStart(ctx context.Context, _ store.Tx, oldTask
346 346
 			close(doneCh)
347 347
 		}()
348 348
 
349
-		oldTaskTimeout := time.After(r.taskTimeout)
349
+		oldTaskTimer := time.NewTimer(r.taskTimeout)
350
+		defer oldTaskTimer.Stop()
350 351
 
351 352
 		// Wait for the delay to elapse, if one is specified.
352 353
 		if delay != 0 {
... ...
@@ -357,10 +358,10 @@ func (r *RestartSupervisor) DelayStart(ctx context.Context, _ store.Tx, oldTask
357 357
 			}
358 358
 		}
359 359
 
360
-		if waitStop {
360
+		if waitStop && oldTask != nil {
361 361
 			select {
362 362
 			case <-watch:
363
-			case <-oldTaskTimeout:
363
+			case <-oldTaskTimer.C:
364 364
 			case <-ctx.Done():
365 365
 				return
366 366
 			}
... ...
@@ -1,6 +1,7 @@
1 1
 package orchestrator
2 2
 
3 3
 import (
4
+	"errors"
4 5
 	"fmt"
5 6
 	"reflect"
6 7
 	"sync"
... ...
@@ -17,6 +18,8 @@ import (
17 17
 	"github.com/docker/swarmkit/protobuf/ptypes"
18 18
 )
19 19
 
20
+const defaultMonitor = 30 * time.Second
21
+
20 22
 // UpdateSupervisor supervises a set of updates. It's responsible for keeping track of updates,
21 23
 // shutting them down and replacing them.
22 24
 type UpdateSupervisor struct {
... ...
@@ -49,7 +52,7 @@ func (u *UpdateSupervisor) Update(ctx context.Context, cluster *api.Cluster, ser
49 49
 	id := service.ID
50 50
 
51 51
 	if update, ok := u.updates[id]; ok {
52
-		if !update.isServiceDirty(service) {
52
+		if reflect.DeepEqual(service.Spec, update.newService.Spec) {
53 53
 			// There's already an update working towards this goal.
54 54
 			return
55 55
 		}
... ...
@@ -87,6 +90,9 @@ type Updater struct {
87 87
 	cluster    *api.Cluster
88 88
 	newService *api.Service
89 89
 
90
+	updatedTasks   map[string]time.Time // task ID to creation time
91
+	updatedTasksMu sync.Mutex
92
+
90 93
 	// stopChan signals to the state machine to stop running.
91 94
 	stopChan chan struct{}
92 95
 	// doneChan is closed when the state machine terminates.
... ...
@@ -96,13 +102,14 @@ type Updater struct {
96 96
 // NewUpdater creates a new Updater.
97 97
 func NewUpdater(store *store.MemoryStore, restartSupervisor *RestartSupervisor, cluster *api.Cluster, newService *api.Service) *Updater {
98 98
 	return &Updater{
99
-		store:      store,
100
-		watchQueue: store.WatchQueue(),
101
-		restarts:   restartSupervisor,
102
-		cluster:    cluster.Copy(),
103
-		newService: newService.Copy(),
104
-		stopChan:   make(chan struct{}),
105
-		doneChan:   make(chan struct{}),
99
+		store:        store,
100
+		watchQueue:   store.WatchQueue(),
101
+		restarts:     restartSupervisor,
102
+		cluster:      cluster.Copy(),
103
+		newService:   newService.Copy(),
104
+		updatedTasks: make(map[string]time.Time),
105
+		stopChan:     make(chan struct{}),
106
+		doneChan:     make(chan struct{}),
106 107
 	}
107 108
 }
108 109
 
... ...
@@ -119,7 +126,9 @@ func (u *Updater) Run(ctx context.Context, slots []slot) {
119 119
 	service := u.newService
120 120
 
121 121
 	// If the update is in a PAUSED state, we should not do anything.
122
-	if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_PAUSED {
122
+	if service.UpdateStatus != nil &&
123
+		(service.UpdateStatus.State == api.UpdateStatus_PAUSED ||
124
+			service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_PAUSED) {
123 125
 		return
124 126
 	}
125 127
 
... ...
@@ -131,7 +140,9 @@ func (u *Updater) Run(ctx context.Context, slots []slot) {
131 131
 	}
132 132
 	// Abort immediately if all tasks are clean.
133 133
 	if len(dirtySlots) == 0 {
134
-		if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_UPDATING {
134
+		if service.UpdateStatus != nil &&
135
+			(service.UpdateStatus.State == api.UpdateStatus_UPDATING ||
136
+				service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED) {
135 137
 			u.completeUpdate(ctx, service.ID)
136 138
 		}
137 139
 		return
... ...
@@ -163,9 +174,26 @@ func (u *Updater) Run(ctx context.Context, slots []slot) {
163 163
 		}()
164 164
 	}
165 165
 
166
+	failureAction := api.UpdateConfig_PAUSE
167
+	allowedFailureFraction := float32(0)
168
+	monitoringPeriod := defaultMonitor
169
+
170
+	if service.Spec.Update != nil {
171
+		failureAction = service.Spec.Update.FailureAction
172
+		allowedFailureFraction = service.Spec.Update.AllowedFailureFraction
173
+
174
+		if service.Spec.Update.Monitor != nil {
175
+			var err error
176
+			monitoringPeriod, err = ptypes.Duration(service.Spec.Update.Monitor)
177
+			if err != nil {
178
+				monitoringPeriod = defaultMonitor
179
+			}
180
+		}
181
+	}
182
+
166 183
 	var failedTaskWatch chan events.Event
167 184
 
168
-	if service.Spec.Update == nil || service.Spec.Update.FailureAction == api.UpdateConfig_PAUSE {
185
+	if failureAction != api.UpdateConfig_CONTINUE {
169 186
 		var cancelWatch func()
170 187
 		failedTaskWatch, cancelWatch = state.Watch(
171 188
 			u.store.WatchQueue(),
... ...
@@ -178,6 +206,49 @@ func (u *Updater) Run(ctx context.Context, slots []slot) {
178 178
 	}
179 179
 
180 180
 	stopped := false
181
+	failedTasks := make(map[string]struct{})
182
+	totalFailures := 0
183
+
184
+	failureTriggersAction := func(failedTask *api.Task) bool {
185
+		// Ignore tasks we have already seen as failures.
186
+		if _, found := failedTasks[failedTask.ID]; found {
187
+			return false
188
+		}
189
+
190
+		// If this failed/completed task is one that we
191
+		// created as part of this update, we should
192
+		// follow the failure action.
193
+		u.updatedTasksMu.Lock()
194
+		startedAt, found := u.updatedTasks[failedTask.ID]
195
+		u.updatedTasksMu.Unlock()
196
+
197
+		if found && (startedAt.IsZero() || time.Since(startedAt) <= monitoringPeriod) {
198
+			failedTasks[failedTask.ID] = struct{}{}
199
+			totalFailures++
200
+			if float32(totalFailures)/float32(len(dirtySlots)) > allowedFailureFraction {
201
+				switch failureAction {
202
+				case api.UpdateConfig_PAUSE:
203
+					stopped = true
204
+					message := fmt.Sprintf("update paused due to failure or early termination of task %s", failedTask.ID)
205
+					u.pauseUpdate(ctx, service.ID, message)
206
+					return true
207
+				case api.UpdateConfig_ROLLBACK:
208
+					// Never roll back a rollback
209
+					if service.UpdateStatus != nil && service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
210
+						message := fmt.Sprintf("rollback paused due to failure or early termination of task %s", failedTask.ID)
211
+						u.pauseUpdate(ctx, service.ID, message)
212
+						return true
213
+					}
214
+					stopped = true
215
+					message := fmt.Sprintf("update rolled back due to failure or early termination of task %s", failedTask.ID)
216
+					u.rollbackUpdate(ctx, service.ID, message)
217
+					return true
218
+				}
219
+			}
220
+		}
221
+
222
+		return false
223
+	}
181 224
 
182 225
 slotsLoop:
183 226
 	for _, slot := range dirtySlots {
... ...
@@ -189,15 +260,7 @@ slotsLoop:
189 189
 				stopped = true
190 190
 				break slotsLoop
191 191
 			case ev := <-failedTaskWatch:
192
-				failedTask := ev.(state.EventUpdateTask).Task
193
-
194
-				// If this failed/completed task has a spec matching
195
-				// the one we're updating to, we should pause the
196
-				// update.
197
-				if !u.isTaskDirty(failedTask) {
198
-					stopped = true
199
-					message := fmt.Sprintf("update paused due to failure or early termination of task %s", failedTask.ID)
200
-					u.pauseUpdate(ctx, service.ID, message)
192
+				if failureTriggersAction(ev.(state.EventUpdateTask).Task) {
201 193
 					break slotsLoop
202 194
 				}
203 195
 			case slotQueue <- slot:
... ...
@@ -210,6 +273,29 @@ slotsLoop:
210 210
 	wg.Wait()
211 211
 
212 212
 	if !stopped {
213
+		// Keep watching for task failures for one more monitoringPeriod,
214
+		// before declaring the update complete.
215
+		doneMonitoring := time.After(monitoringPeriod)
216
+	monitorLoop:
217
+		for {
218
+			select {
219
+			case <-u.stopChan:
220
+				stopped = true
221
+				break monitorLoop
222
+			case <-doneMonitoring:
223
+				break monitorLoop
224
+			case ev := <-failedTaskWatch:
225
+				if failureTriggersAction(ev.(state.EventUpdateTask).Task) {
226
+					break monitorLoop
227
+				}
228
+			}
229
+		}
230
+	}
231
+
232
+	// TODO(aaronl): Potentially roll back the service if not enough tasks
233
+	// have reached RUNNING by this point.
234
+
235
+	if !stopped {
213 236
 		u.completeUpdate(ctx, service.ID)
214 237
 	}
215 238
 }
... ...
@@ -237,9 +323,13 @@ func (u *Updater) worker(ctx context.Context, queue <-chan slot) {
237 237
 			}
238 238
 		}
239 239
 		if runningTask != nil {
240
-			u.useExistingTask(ctx, slot, runningTask)
240
+			if err := u.useExistingTask(ctx, slot, runningTask); err != nil {
241
+				log.G(ctx).WithError(err).Error("update failed")
242
+			}
241 243
 		} else if cleanTask != nil {
242
-			u.useExistingTask(ctx, slot, cleanTask)
244
+			if err := u.useExistingTask(ctx, slot, cleanTask); err != nil {
245
+				log.G(ctx).WithError(err).Error("update failed")
246
+			}
243 247
 		} else {
244 248
 			updated := newTask(u.cluster, u.newService, slot[0].Slot)
245 249
 			updated.DesiredState = api.TaskStateReady
... ...
@@ -275,10 +365,22 @@ func (u *Updater) updateTask(ctx context.Context, slot slot, updated *api.Task)
275 275
 	})
276 276
 	defer cancel()
277 277
 
278
+	// Create an empty entry for this task, so the updater knows a failure
279
+	// should count towards the failure count. The timestamp is added
280
+	// if/when the task reaches RUNNING.
281
+	u.updatedTasksMu.Lock()
282
+	u.updatedTasks[updated.ID] = time.Time{}
283
+	u.updatedTasksMu.Unlock()
284
+
278 285
 	var delayStartCh <-chan struct{}
279 286
 	// Atomically create the updated task and bring down the old one.
280 287
 	_, err := u.store.Batch(func(batch *store.Batch) error {
281
-		err := batch.Update(func(tx store.Tx) error {
288
+		oldTask, err := u.removeOldTasks(ctx, batch, slot)
289
+		if err != nil {
290
+			return err
291
+		}
292
+
293
+		err = batch.Update(func(tx store.Tx) error {
282 294
 			if err := store.CreateTask(tx, updated); err != nil {
283 295
 				return err
284 296
 			}
... ...
@@ -288,7 +390,6 @@ func (u *Updater) updateTask(ctx context.Context, slot slot, updated *api.Task)
288 288
 			return err
289 289
 		}
290 290
 
291
-		oldTask := u.removeOldTasks(ctx, batch, slot)
292 291
 		delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, updated.ID, 0, true)
293 292
 
294 293
 		return nil
... ...
@@ -309,6 +410,9 @@ func (u *Updater) updateTask(ctx context.Context, slot slot, updated *api.Task)
309 309
 		case e := <-taskUpdates:
310 310
 			updated = e.(state.EventUpdateTask).Task
311 311
 			if updated.Status.State >= api.TaskStateRunning {
312
+				u.updatedTasksMu.Lock()
313
+				u.updatedTasks[updated.ID] = time.Now()
314
+				u.updatedTasksMu.Unlock()
312 315
 				return nil
313 316
 			}
314 317
 		case <-u.stopChan:
... ...
@@ -317,7 +421,7 @@ func (u *Updater) updateTask(ctx context.Context, slot slot, updated *api.Task)
317 317
 	}
318 318
 }
319 319
 
320
-func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.Task) {
320
+func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.Task) error {
321 321
 	var removeTasks []*api.Task
322 322
 	for _, t := range slot {
323 323
 		if t != existing {
... ...
@@ -327,7 +431,14 @@ func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.
327 327
 	if len(removeTasks) != 0 || existing.DesiredState != api.TaskStateRunning {
328 328
 		var delayStartCh <-chan struct{}
329 329
 		_, err := u.store.Batch(func(batch *store.Batch) error {
330
-			oldTask := u.removeOldTasks(ctx, batch, removeTasks)
330
+			var oldTask *api.Task
331
+			if len(removeTasks) != 0 {
332
+				var err error
333
+				oldTask, err = u.removeOldTasks(ctx, batch, removeTasks)
334
+				if err != nil {
335
+					return err
336
+				}
337
+			}
331 338
 
332 339
 			if existing.DesiredState != api.TaskStateRunning {
333 340
 				delayStartCh = u.restarts.DelayStart(ctx, nil, oldTask, existing.ID, 0, true)
... ...
@@ -335,19 +446,24 @@ func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.
335 335
 			return nil
336 336
 		})
337 337
 		if err != nil {
338
-			log.G(ctx).WithError(err).Error("updater batch transaction failed")
338
+			return err
339 339
 		}
340 340
 
341 341
 		if delayStartCh != nil {
342 342
 			<-delayStartCh
343 343
 		}
344 344
 	}
345
+
346
+	return nil
345 347
 }
346 348
 
347 349
 // removeOldTasks shuts down the given tasks and returns one of the tasks that
348
-// was shut down, or nil.
349
-func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, removeTasks []*api.Task) *api.Task {
350
-	var removedTask *api.Task
350
+// was shut down, or an error.
351
+func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, removeTasks []*api.Task) (*api.Task, error) {
352
+	var (
353
+		lastErr     error
354
+		removedTask *api.Task
355
+	)
351 356
 	for _, original := range removeTasks {
352 357
 		err := batch.Update(func(tx store.Tx) error {
353 358
 			t := store.GetTask(tx, original.ID)
... ...
@@ -361,13 +477,16 @@ func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, remove
361 361
 			return store.UpdateTask(tx, t)
362 362
 		})
363 363
 		if err != nil {
364
-			log.G(ctx).WithError(err).Errorf("shutting down stale task %s failed", original.ID)
364
+			lastErr = err
365 365
 		} else {
366 366
 			removedTask = original
367 367
 		}
368 368
 	}
369 369
 
370
-	return removedTask
370
+	if removedTask == nil {
371
+		return nil, lastErr
372
+	}
373
+	return removedTask, nil
371 374
 }
372 375
 
373 376
 func (u *Updater) isTaskDirty(t *api.Task) bool {
... ...
@@ -375,11 +494,6 @@ func (u *Updater) isTaskDirty(t *api.Task) bool {
375 375
 		(t.Endpoint != nil && !reflect.DeepEqual(u.newService.Spec.Endpoint, t.Endpoint.Spec))
376 376
 }
377 377
 
378
-func (u *Updater) isServiceDirty(service *api.Service) bool {
379
-	return !reflect.DeepEqual(u.newService.Spec.Task, service.Spec.Task) ||
380
-		!reflect.DeepEqual(u.newService.Spec.Endpoint, service.Spec.Endpoint)
381
-}
382
-
383 378
 func (u *Updater) isSlotDirty(slot slot) bool {
384 379
 	return len(slot) > 1 || (len(slot) == 1 && u.isTaskDirty(slot[0]))
385 380
 }
... ...
@@ -421,7 +535,11 @@ func (u *Updater) pauseUpdate(ctx context.Context, serviceID, message string) {
421 421
 			return nil
422 422
 		}
423 423
 
424
-		service.UpdateStatus.State = api.UpdateStatus_PAUSED
424
+		if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
425
+			service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_PAUSED
426
+		} else {
427
+			service.UpdateStatus.State = api.UpdateStatus_PAUSED
428
+		}
425 429
 		service.UpdateStatus.Message = message
426 430
 
427 431
 		return store.UpdateService(tx, service)
... ...
@@ -432,6 +550,38 @@ func (u *Updater) pauseUpdate(ctx context.Context, serviceID, message string) {
432 432
 	}
433 433
 }
434 434
 
435
+func (u *Updater) rollbackUpdate(ctx context.Context, serviceID, message string) {
436
+	log.G(ctx).Debugf("starting rollback of service %s", serviceID)
437
+
438
+	var service *api.Service
439
+	err := u.store.Update(func(tx store.Tx) error {
440
+		service = store.GetService(tx, serviceID)
441
+		if service == nil {
442
+			return nil
443
+		}
444
+		if service.UpdateStatus == nil {
445
+			// The service was updated since we started this update
446
+			return nil
447
+		}
448
+
449
+		service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_STARTED
450
+		service.UpdateStatus.Message = message
451
+
452
+		if service.PreviousSpec == nil {
453
+			return errors.New("cannot roll back service because no previous spec is available")
454
+		}
455
+		service.Spec = *service.PreviousSpec
456
+		service.PreviousSpec = nil
457
+
458
+		return store.UpdateService(tx, service)
459
+	})
460
+
461
+	if err != nil {
462
+		log.G(ctx).WithError(err).Errorf("failed to start rollback of service %s", serviceID)
463
+		return
464
+	}
465
+}
466
+
435 467
 func (u *Updater) completeUpdate(ctx context.Context, serviceID string) {
436 468
 	log.G(ctx).Debugf("update of service %s complete", serviceID)
437 469
 
... ...
@@ -444,9 +594,13 @@ func (u *Updater) completeUpdate(ctx context.Context, serviceID string) {
444 444
 			// The service was changed since we started this update
445 445
 			return nil
446 446
 		}
447
-
448
-		service.UpdateStatus.State = api.UpdateStatus_COMPLETED
449
-		service.UpdateStatus.Message = "update completed"
447
+		if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
448
+			service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_COMPLETED
449
+			service.UpdateStatus.Message = "rollback completed"
450
+		} else {
451
+			service.UpdateStatus.State = api.UpdateStatus_COMPLETED
452
+			service.UpdateStatus.Message = "update completed"
453
+		}
450 454
 		service.UpdateStatus.CompletedAt = ptypes.MustTimestampProto(time.Now())
451 455
 
452 456
 		return store.UpdateService(tx, service)
453 457
deleted file mode 100644
... ...
@@ -1,12 +0,0 @@
1
-package raftpicker
2
-
3
-// AddrSelector is interface which should track cluster for its leader address.
4
-type AddrSelector interface {
5
-	LeaderAddr() (string, error)
6
-}
7
-
8
-// RaftCluster is interface which combines useful methods for clustering.
9
-type RaftCluster interface {
10
-	AddrSelector
11
-	IsLeader() bool
12
-}
13 1
deleted file mode 100644
... ...
@@ -1,127 +0,0 @@
1
-package raftpicker
2
-
3
-import (
4
-	"sync"
5
-	"time"
6
-
7
-	"github.com/Sirupsen/logrus"
8
-
9
-	"google.golang.org/grpc"
10
-)
11
-
12
-// Interface is interface to replace implementation with controlapi/hackpicker.
13
-// TODO: it should be done cooler.
14
-type Interface interface {
15
-	Conn() (*grpc.ClientConn, error)
16
-	Reset() error
17
-}
18
-
19
-// ConnSelector is struct for obtaining connection connected to cluster leader.
20
-type ConnSelector struct {
21
-	mu      sync.Mutex
22
-	cluster RaftCluster
23
-	opts    []grpc.DialOption
24
-
25
-	cc   *grpc.ClientConn
26
-	addr string
27
-
28
-	stop chan struct{}
29
-}
30
-
31
-// NewConnSelector returns new ConnSelector with cluster and grpc.DialOpts which
32
-// will be used for connection create.
33
-func NewConnSelector(cluster RaftCluster, opts ...grpc.DialOption) *ConnSelector {
34
-	cs := &ConnSelector{
35
-		cluster: cluster,
36
-		opts:    opts,
37
-		stop:    make(chan struct{}),
38
-	}
39
-	go cs.updateLoop()
40
-	return cs
41
-}
42
-
43
-// Conn returns *grpc.ClientConn which connected to cluster leader.
44
-// It can return error if cluster wasn't ready at the moment of initial call.
45
-func (c *ConnSelector) Conn() (*grpc.ClientConn, error) {
46
-	c.mu.Lock()
47
-	defer c.mu.Unlock()
48
-	if c.cc != nil {
49
-		return c.cc, nil
50
-	}
51
-	addr, err := c.cluster.LeaderAddr()
52
-	if err != nil {
53
-		return nil, err
54
-	}
55
-	cc, err := grpc.Dial(addr, c.opts...)
56
-	if err != nil {
57
-		return nil, err
58
-	}
59
-	c.cc = cc
60
-	c.addr = addr
61
-	return cc, nil
62
-}
63
-
64
-// Reset recreates underlying connection.
65
-func (c *ConnSelector) Reset() error {
66
-	c.mu.Lock()
67
-	defer c.mu.Unlock()
68
-	if c.cc != nil {
69
-		c.cc.Close()
70
-		c.cc = nil
71
-	}
72
-	addr, err := c.cluster.LeaderAddr()
73
-	if err != nil {
74
-		logrus.WithError(err).Errorf("error obtaining leader address")
75
-		return err
76
-	}
77
-	cc, err := grpc.Dial(addr, c.opts...)
78
-	if err != nil {
79
-		logrus.WithError(err).Errorf("error reestabilishing connection to leader")
80
-		return err
81
-	}
82
-	c.cc = cc
83
-	c.addr = addr
84
-	return nil
85
-}
86
-
87
-// Stop cancels updating connection loop.
88
-func (c *ConnSelector) Stop() {
89
-	close(c.stop)
90
-}
91
-
92
-func (c *ConnSelector) updateConn() error {
93
-	addr, err := c.cluster.LeaderAddr()
94
-	if err != nil {
95
-		return err
96
-	}
97
-	c.mu.Lock()
98
-	defer c.mu.Unlock()
99
-	if c.addr != addr {
100
-		if c.cc != nil {
101
-			c.cc.Close()
102
-			c.cc = nil
103
-		}
104
-		conn, err := grpc.Dial(addr, c.opts...)
105
-		if err != nil {
106
-			return err
107
-		}
108
-		c.cc = conn
109
-		c.addr = addr
110
-	}
111
-	return nil
112
-}
113
-
114
-func (c *ConnSelector) updateLoop() {
115
-	ticker := time.NewTicker(1 * time.Second)
116
-	defer ticker.Stop()
117
-	for {
118
-		select {
119
-		case <-ticker.C:
120
-			if err := c.updateConn(); err != nil {
121
-				logrus.WithError(err).Errorf("error reestabilishing connection to leader")
122
-			}
123
-		case <-c.stop:
124
-			return
125
-		}
126
-	}
127
-}
128 1
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+package raftselector
1
+
2
+import (
3
+	"errors"
4
+
5
+	"golang.org/x/net/context"
6
+
7
+	"google.golang.org/grpc"
8
+)
9
+
10
+// ConnProvider is basic interface for connecting API package(raft proxy in particular)
11
+// to manager/state/raft package without import cycles. It provides only one
12
+// method for obtaining connection to leader.
13
+type ConnProvider interface {
14
+	LeaderConn(ctx context.Context) (*grpc.ClientConn, error)
15
+}
16
+
17
+// ErrIsLeader is returned from LeaderConn method when current machine is leader.
18
+// It's just shim between packages to avoid import cycles.
19
+var ErrIsLeader = errors.New("current node is leader")
0 20
deleted file mode 100644
... ...
@@ -1,153 +0,0 @@
1
-package scheduler
2
-
3
-import (
4
-	"container/heap"
5
-	"errors"
6
-
7
-	"github.com/docker/swarmkit/api"
8
-)
9
-
10
-var errNodeNotFound = errors.New("node not found in scheduler heap")
11
-
12
-// A nodeHeap implements heap.Interface for nodes. It also includes an index
13
-// by node id.
14
-type nodeHeap struct {
15
-	heap  []NodeInfo
16
-	index map[string]int // map from node id to heap index
17
-}
18
-
19
-func (nh nodeHeap) Len() int {
20
-	return len(nh.heap)
21
-}
22
-
23
-func (nh nodeHeap) Less(i, j int) bool {
24
-	return len(nh.heap[i].Tasks) < len(nh.heap[j].Tasks)
25
-}
26
-
27
-func (nh nodeHeap) Swap(i, j int) {
28
-	nh.heap[i], nh.heap[j] = nh.heap[j], nh.heap[i]
29
-	nh.index[nh.heap[i].ID] = i
30
-	nh.index[nh.heap[j].ID] = j
31
-}
32
-
33
-func (nh *nodeHeap) Push(x interface{}) {
34
-	n := len(nh.heap)
35
-	item := x.(NodeInfo)
36
-	nh.index[item.ID] = n
37
-	nh.heap = append(nh.heap, item)
38
-}
39
-
40
-func (nh *nodeHeap) Pop() interface{} {
41
-	old := nh.heap
42
-	n := len(old)
43
-	item := old[n-1]
44
-	delete(nh.index, item.ID)
45
-	nh.heap = old[0 : n-1]
46
-	return item
47
-}
48
-
49
-func (nh *nodeHeap) alloc(n int) {
50
-	nh.heap = make([]NodeInfo, 0, n)
51
-	nh.index = make(map[string]int, n)
52
-}
53
-
54
-// nodeInfo returns the NodeInfo struct for a given node identified by its ID.
55
-func (nh *nodeHeap) nodeInfo(nodeID string) (NodeInfo, error) {
56
-	index, ok := nh.index[nodeID]
57
-	if ok {
58
-		return nh.heap[index], nil
59
-	}
60
-	return NodeInfo{}, errNodeNotFound
61
-}
62
-
63
-// addOrUpdateNode sets the number of tasks for a given node. It adds the node
64
-// to the heap if it wasn't already tracked.
65
-func (nh *nodeHeap) addOrUpdateNode(n NodeInfo) {
66
-	index, ok := nh.index[n.ID]
67
-	if ok {
68
-		nh.heap[index] = n
69
-		heap.Fix(nh, index)
70
-	} else {
71
-		heap.Push(nh, n)
72
-	}
73
-}
74
-
75
-// updateNode sets the number of tasks for a given node. It ignores the update
76
-// if the node isn't already tracked in the heap.
77
-func (nh *nodeHeap) updateNode(n NodeInfo) {
78
-	index, ok := nh.index[n.ID]
79
-	if ok {
80
-		nh.heap[index] = n
81
-		heap.Fix(nh, index)
82
-	}
83
-}
84
-
85
-func (nh *nodeHeap) remove(nodeID string) {
86
-	index, ok := nh.index[nodeID]
87
-	if ok {
88
-		heap.Remove(nh, index)
89
-	}
90
-}
91
-
92
-func (nh *nodeHeap) findMin(meetsConstraints func(*NodeInfo) bool, scanAllNodes bool) (*api.Node, int) {
93
-	if scanAllNodes {
94
-		return nh.scanAllToFindMin(meetsConstraints)
95
-	}
96
-	return nh.searchHeapToFindMin(meetsConstraints)
97
-}
98
-
99
-// Scan All nodes to find the best node which meets the constraints && has lightest workloads
100
-func (nh *nodeHeap) scanAllToFindMin(meetsConstraints func(*NodeInfo) bool) (*api.Node, int) {
101
-	var bestNode *api.Node
102
-	minTasks := int(^uint(0) >> 1) // max int
103
-
104
-	for i := 0; i < len(nh.heap); i++ {
105
-		heapEntry := &nh.heap[i]
106
-		if meetsConstraints(heapEntry) && len(heapEntry.Tasks) < minTasks {
107
-			bestNode = heapEntry.Node
108
-			minTasks = len(heapEntry.Tasks)
109
-		}
110
-	}
111
-
112
-	return bestNode, minTasks
113
-}
114
-
115
-// Search in heap to find the best node which meets the constraints && has lightest workloads
116
-func (nh *nodeHeap) searchHeapToFindMin(meetsConstraints func(*NodeInfo) bool) (*api.Node, int) {
117
-	var bestNode *api.Node
118
-	minTasks := int(^uint(0) >> 1) // max int
119
-
120
-	if nh == nil || len(nh.heap) == 0 {
121
-		return bestNode, minTasks
122
-	}
123
-
124
-	// push root to stack for search
125
-	stack := []int{0}
126
-
127
-	for len(stack) != 0 {
128
-		// pop an element
129
-		idx := stack[len(stack)-1]
130
-		stack = stack[0 : len(stack)-1]
131
-
132
-		heapEntry := &nh.heap[idx]
133
-
134
-		if len(heapEntry.Tasks) >= minTasks {
135
-			continue
136
-		}
137
-
138
-		if meetsConstraints(heapEntry) {
139
-			// meet constraints, update results
140
-			bestNode = heapEntry.Node
141
-			minTasks = len(heapEntry.Tasks)
142
-		} else {
143
-			// otherwise, push 2 children to stack for further search
144
-			if 2*idx+1 < len(nh.heap) {
145
-				stack = append(stack, 2*idx+1)
146
-			}
147
-			if 2*idx+2 < len(nh.heap) {
148
-				stack = append(stack, 2*idx+2)
149
-			}
150
-		}
151
-	}
152
-	return bestNode, minTasks
153
-}
... ...
@@ -5,15 +5,18 @@ import "github.com/docker/swarmkit/api"
5 5
 // NodeInfo contains a node and some additional metadata.
6 6
 type NodeInfo struct {
7 7
 	*api.Node
8
-	Tasks              map[string]*api.Task
9
-	AvailableResources api.Resources
8
+	Tasks                             map[string]*api.Task
9
+	DesiredRunningTasksCount          int
10
+	DesiredRunningTasksCountByService map[string]int
11
+	AvailableResources                api.Resources
10 12
 }
11 13
 
12 14
 func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api.Resources) NodeInfo {
13 15
 	nodeInfo := NodeInfo{
14
-		Node:               n,
15
-		Tasks:              make(map[string]*api.Task),
16
-		AvailableResources: availableResources,
16
+		Node:  n,
17
+		Tasks: make(map[string]*api.Task),
18
+		DesiredRunningTasksCountByService: make(map[string]int),
19
+		AvailableResources:                availableResources,
17 20
 	}
18 21
 
19 22
 	for _, t := range tasks {
... ...
@@ -22,15 +25,23 @@ func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api
22 22
 	return nodeInfo
23 23
 }
24 24
 
25
+// addTask removes a task from nodeInfo if it's tracked there, and returns true
26
+// if nodeInfo was modified.
25 27
 func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
26 28
 	if nodeInfo.Tasks == nil {
27 29
 		return false
28 30
 	}
29
-	if _, ok := nodeInfo.Tasks[t.ID]; !ok {
31
+	oldTask, ok := nodeInfo.Tasks[t.ID]
32
+	if !ok {
30 33
 		return false
31 34
 	}
32 35
 
33 36
 	delete(nodeInfo.Tasks, t.ID)
37
+	if oldTask.DesiredState == api.TaskStateRunning {
38
+		nodeInfo.DesiredRunningTasksCount--
39
+		nodeInfo.DesiredRunningTasksCountByService[t.ServiceID]--
40
+	}
41
+
34 42
 	reservations := taskReservations(t.Spec)
35 43
 	nodeInfo.AvailableResources.MemoryBytes += reservations.MemoryBytes
36 44
 	nodeInfo.AvailableResources.NanoCPUs += reservations.NanoCPUs
... ...
@@ -38,19 +49,43 @@ func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
38 38
 	return true
39 39
 }
40 40
 
41
+// addTask adds or updates a task on nodeInfo, and returns true if nodeInfo was
42
+// modified.
41 43
 func (nodeInfo *NodeInfo) addTask(t *api.Task) bool {
42 44
 	if nodeInfo.Tasks == nil {
43 45
 		nodeInfo.Tasks = make(map[string]*api.Task)
44 46
 	}
45
-	if _, ok := nodeInfo.Tasks[t.ID]; !ok {
46
-		nodeInfo.Tasks[t.ID] = t
47
-		reservations := taskReservations(t.Spec)
48
-		nodeInfo.AvailableResources.MemoryBytes -= reservations.MemoryBytes
49
-		nodeInfo.AvailableResources.NanoCPUs -= reservations.NanoCPUs
50
-		return true
47
+	if nodeInfo.DesiredRunningTasksCountByService == nil {
48
+		nodeInfo.DesiredRunningTasksCountByService = make(map[string]int)
49
+	}
50
+
51
+	oldTask, ok := nodeInfo.Tasks[t.ID]
52
+	if ok {
53
+		if t.DesiredState == api.TaskStateRunning && oldTask.DesiredState != api.TaskStateRunning {
54
+			nodeInfo.Tasks[t.ID] = t
55
+			nodeInfo.DesiredRunningTasksCount++
56
+			nodeInfo.DesiredRunningTasksCountByService[t.ServiceID]++
57
+			return true
58
+		} else if t.DesiredState != api.TaskStateRunning && oldTask.DesiredState == api.TaskStateRunning {
59
+			nodeInfo.Tasks[t.ID] = t
60
+			nodeInfo.DesiredRunningTasksCount--
61
+			nodeInfo.DesiredRunningTasksCountByService[t.ServiceID]--
62
+			return true
63
+		}
64
+		return false
65
+	}
66
+
67
+	nodeInfo.Tasks[t.ID] = t
68
+	reservations := taskReservations(t.Spec)
69
+	nodeInfo.AvailableResources.MemoryBytes -= reservations.MemoryBytes
70
+	nodeInfo.AvailableResources.NanoCPUs -= reservations.NanoCPUs
71
+
72
+	if t.DesiredState == api.TaskStateRunning {
73
+		nodeInfo.DesiredRunningTasksCount++
74
+		nodeInfo.DesiredRunningTasksCountByService[t.ServiceID]++
51 75
 	}
52 76
 
53
-	return false
77
+	return true
54 78
 }
55 79
 
56 80
 func taskReservations(spec api.TaskSpec) (reservations api.Resources) {
57 81
new file mode 100644
... ...
@@ -0,0 +1,115 @@
0
+package scheduler
1
+
2
+import (
3
+	"container/heap"
4
+	"errors"
5
+)
6
+
7
+var errNodeNotFound = errors.New("node not found in scheduler dataset")
8
+
9
+type nodeSet struct {
10
+	nodes map[string]NodeInfo // map from node id to node info
11
+}
12
+
13
+func (ns *nodeSet) alloc(n int) {
14
+	ns.nodes = make(map[string]NodeInfo, n)
15
+}
16
+
17
+// nodeInfo returns the NodeInfo struct for a given node identified by its ID.
18
+func (ns *nodeSet) nodeInfo(nodeID string) (NodeInfo, error) {
19
+	node, ok := ns.nodes[nodeID]
20
+	if ok {
21
+		return node, nil
22
+	}
23
+	return NodeInfo{}, errNodeNotFound
24
+}
25
+
26
+// addOrUpdateNode sets the number of tasks for a given node. It adds the node
27
+// to the set if it wasn't already tracked.
28
+func (ns *nodeSet) addOrUpdateNode(n NodeInfo) {
29
+	ns.nodes[n.ID] = n
30
+}
31
+
32
+// updateNode sets the number of tasks for a given node. It ignores the update
33
+// if the node isn't already tracked in the set.
34
+func (ns *nodeSet) updateNode(n NodeInfo) {
35
+	_, ok := ns.nodes[n.ID]
36
+	if ok {
37
+		ns.nodes[n.ID] = n
38
+	}
39
+}
40
+
41
+func (ns *nodeSet) remove(nodeID string) {
42
+	delete(ns.nodes, nodeID)
43
+}
44
+
45
+type nodeMaxHeap struct {
46
+	nodes    []NodeInfo
47
+	lessFunc func(*NodeInfo, *NodeInfo) bool
48
+	length   int
49
+}
50
+
51
+func (h nodeMaxHeap) Len() int {
52
+	return h.length
53
+}
54
+
55
+func (h nodeMaxHeap) Swap(i, j int) {
56
+	h.nodes[i], h.nodes[j] = h.nodes[j], h.nodes[i]
57
+}
58
+
59
+func (h nodeMaxHeap) Less(i, j int) bool {
60
+	// reversed to make a max-heap
61
+	return h.lessFunc(&h.nodes[j], &h.nodes[i])
62
+}
63
+
64
+func (h *nodeMaxHeap) Push(x interface{}) {
65
+	h.nodes = append(h.nodes, x.(NodeInfo))
66
+	h.length++
67
+}
68
+
69
+func (h *nodeMaxHeap) Pop() interface{} {
70
+	h.length--
71
+	// return value is never used
72
+	return nil
73
+}
74
+
75
+// findBestNodes returns n nodes (or < n if fewer nodes are available) that
76
+// rank best (lowest) according to the sorting function.
77
+func (ns *nodeSet) findBestNodes(n int, meetsConstraints func(*NodeInfo) bool, nodeLess func(*NodeInfo, *NodeInfo) bool) []NodeInfo {
78
+	if n == 0 {
79
+		return []NodeInfo{}
80
+	}
81
+
82
+	nodeHeap := nodeMaxHeap{lessFunc: nodeLess}
83
+
84
+	// TODO(aaronl): Is is possible to avoid checking constraints on every
85
+	// node? Perhaps we should try to schedule with n*2 nodes that weren't
86
+	// prescreened, and repeat the selection if there weren't enough nodes
87
+	// meeting the constraints.
88
+	for _, node := range ns.nodes {
89
+		// If there are fewer then n nodes in the heap, we add this
90
+		// node if it meets the constraints. Otherwise, the heap has
91
+		// n nodes, and if this node is better than the worst node in
92
+		// the heap, we replace the worst node and then fix the heap.
93
+		if nodeHeap.Len() < n {
94
+			if meetsConstraints(&node) {
95
+				heap.Push(&nodeHeap, node)
96
+			}
97
+		} else if nodeLess(&node, &nodeHeap.nodes[0]) {
98
+			if meetsConstraints(&node) {
99
+				nodeHeap.nodes[0] = node
100
+				heap.Fix(&nodeHeap, 0)
101
+			}
102
+		}
103
+	}
104
+
105
+	// Popping every element orders the nodes from best to worst. The
106
+	// first pop gets the worst node (since this a max-heap), and puts it
107
+	// at position n-1. Then the next pop puts the next-worst at n-2, and
108
+	// so on.
109
+	for nodeHeap.Len() > 0 {
110
+		heap.Pop(&nodeHeap)
111
+	}
112
+
113
+	return nodeHeap.nodes
114
+}
... ...
@@ -1,7 +1,6 @@
1 1
 package scheduler
2 2
 
3 3
 import (
4
-	"container/heap"
5 4
 	"container/list"
6 5
 	"time"
7 6
 
... ...
@@ -24,7 +23,7 @@ type Scheduler struct {
24 24
 	unassignedTasks *list.List
25 25
 	// preassignedTasks already have NodeID, need resource validation
26 26
 	preassignedTasks map[string]*api.Task
27
-	nodeHeap         nodeHeap
27
+	nodeSet          nodeSet
28 28
 	allTasks         map[string]*api.Task
29 29
 	pipeline         *Pipeline
30 30
 
... ...
@@ -32,11 +31,6 @@ type Scheduler struct {
32 32
 	stopChan chan struct{}
33 33
 	// doneChan is closed when the state machine terminates
34 34
 	doneChan chan struct{}
35
-
36
-	// This currently exists only for benchmarking. It tells the scheduler
37
-	// scan the whole heap instead of taking the minimum-valued node
38
-	// blindly.
39
-	scanAllNodes bool
40 35
 }
41 36
 
42 37
 // New creates a new scheduler.
... ...
@@ -83,7 +77,7 @@ func (s *Scheduler) setupTasksList(tx store.ReadTx) error {
83 83
 		tasksByNode[t.NodeID][t.ID] = t
84 84
 	}
85 85
 
86
-	if err := s.buildNodeHeap(tx, tasksByNode); err != nil {
86
+	if err := s.buildNodeSet(tx, tasksByNode); err != nil {
87 87
 		return err
88 88
 	}
89 89
 
... ...
@@ -152,7 +146,7 @@ func (s *Scheduler) Run(ctx context.Context) error {
152 152
 				s.createOrUpdateNode(v.Node)
153 153
 				pendingChanges++
154 154
 			case state.EventDeleteNode:
155
-				s.nodeHeap.remove(v.Node.ID)
155
+				s.nodeSet.remove(v.Node.ID)
156 156
 			case state.EventCommit:
157 157
 				if commitDebounceTimer != nil {
158 158
 					if time.Since(debouncingStarted) > maxLatency {
... ...
@@ -210,9 +204,9 @@ func (s *Scheduler) createTask(ctx context.Context, t *api.Task) int {
210 210
 		return 0
211 211
 	}
212 212
 
213
-	nodeInfo, err := s.nodeHeap.nodeInfo(t.NodeID)
213
+	nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID)
214 214
 	if err == nil && nodeInfo.addTask(t) {
215
-		s.nodeHeap.updateNode(nodeInfo)
215
+		s.nodeSet.updateNode(nodeInfo)
216 216
 	}
217 217
 
218 218
 	return 0
... ...
@@ -257,9 +251,9 @@ func (s *Scheduler) updateTask(ctx context.Context, t *api.Task) int {
257 257
 	}
258 258
 
259 259
 	s.allTasks[t.ID] = t
260
-	nodeInfo, err := s.nodeHeap.nodeInfo(t.NodeID)
260
+	nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID)
261 261
 	if err == nil && nodeInfo.addTask(t) {
262
-		s.nodeHeap.updateNode(nodeInfo)
262
+		s.nodeSet.updateNode(nodeInfo)
263 263
 	}
264 264
 
265 265
 	return 0
... ...
@@ -268,14 +262,14 @@ func (s *Scheduler) updateTask(ctx context.Context, t *api.Task) int {
268 268
 func (s *Scheduler) deleteTask(ctx context.Context, t *api.Task) {
269 269
 	delete(s.allTasks, t.ID)
270 270
 	delete(s.preassignedTasks, t.ID)
271
-	nodeInfo, err := s.nodeHeap.nodeInfo(t.NodeID)
271
+	nodeInfo, err := s.nodeSet.nodeInfo(t.NodeID)
272 272
 	if err == nil && nodeInfo.removeTask(t) {
273
-		s.nodeHeap.updateNode(nodeInfo)
273
+		s.nodeSet.updateNode(nodeInfo)
274 274
 	}
275 275
 }
276 276
 
277 277
 func (s *Scheduler) createOrUpdateNode(n *api.Node) {
278
-	nodeInfo, _ := s.nodeHeap.nodeInfo(n.ID)
278
+	nodeInfo, _ := s.nodeSet.nodeInfo(n.ID)
279 279
 	var resources api.Resources
280 280
 	if n.Description != nil && n.Description.Resources != nil {
281 281
 		resources = *n.Description.Resources
... ...
@@ -288,7 +282,7 @@ func (s *Scheduler) createOrUpdateNode(n *api.Node) {
288 288
 	}
289 289
 	nodeInfo.Node = n
290 290
 	nodeInfo.AvailableResources = resources
291
-	s.nodeHeap.addOrUpdateNode(nodeInfo)
291
+	s.nodeSet.addOrUpdateNode(nodeInfo)
292 292
 }
293 293
 
294 294
 func (s *Scheduler) processPreassignedTasks(ctx context.Context) {
... ...
@@ -308,44 +302,60 @@ func (s *Scheduler) processPreassignedTasks(ctx context.Context) {
308 308
 	}
309 309
 	for _, decision := range failed {
310 310
 		s.allTasks[decision.old.ID] = decision.old
311
-		nodeInfo, err := s.nodeHeap.nodeInfo(decision.new.NodeID)
311
+		nodeInfo, err := s.nodeSet.nodeInfo(decision.new.NodeID)
312 312
 		if err == nil && nodeInfo.removeTask(decision.new) {
313
-			s.nodeHeap.updateNode(nodeInfo)
313
+			s.nodeSet.updateNode(nodeInfo)
314 314
 		}
315 315
 	}
316 316
 }
317 317
 
318 318
 // tick attempts to schedule the queue.
319 319
 func (s *Scheduler) tick(ctx context.Context) {
320
+	tasksByCommonSpec := make(map[string]map[string]*api.Task)
320 321
 	schedulingDecisions := make(map[string]schedulingDecision, s.unassignedTasks.Len())
321 322
 
322 323
 	var next *list.Element
323 324
 	for e := s.unassignedTasks.Front(); e != nil; e = next {
324 325
 		next = e.Next()
325
-		id := e.Value.(*api.Task).ID
326
-		if _, ok := schedulingDecisions[id]; ok {
327
-			s.unassignedTasks.Remove(e)
328
-			continue
329
-		}
330 326
 		t := s.allTasks[e.Value.(*api.Task).ID]
331 327
 		if t == nil || t.NodeID != "" {
332 328
 			// task deleted or already assigned
333 329
 			s.unassignedTasks.Remove(e)
334 330
 			continue
335 331
 		}
336
-		if newT := s.scheduleTask(ctx, t); newT != nil {
337
-			schedulingDecisions[id] = schedulingDecision{old: t, new: newT}
338
-			s.unassignedTasks.Remove(e)
332
+
333
+		// Group common tasks with common specs by marshalling the spec
334
+		// into taskKey and using it as a map key.
335
+		// TODO(aaronl): Once specs are versioned, this will allow a
336
+		// much more efficient fast path.
337
+		fieldsToMarshal := api.Task{
338
+			ServiceID: t.ServiceID,
339
+			Spec:      t.Spec,
339 340
 		}
341
+		marshalled, err := fieldsToMarshal.Marshal()
342
+		if err != nil {
343
+			panic(err)
344
+		}
345
+		taskGroupKey := string(marshalled)
346
+
347
+		if tasksByCommonSpec[taskGroupKey] == nil {
348
+			tasksByCommonSpec[taskGroupKey] = make(map[string]*api.Task)
349
+		}
350
+		tasksByCommonSpec[taskGroupKey][t.ID] = t
351
+		s.unassignedTasks.Remove(e)
352
+	}
353
+
354
+	for _, taskGroup := range tasksByCommonSpec {
355
+		s.scheduleTaskGroup(ctx, taskGroup, schedulingDecisions)
340 356
 	}
341 357
 
342 358
 	_, failed := s.applySchedulingDecisions(ctx, schedulingDecisions)
343 359
 	for _, decision := range failed {
344 360
 		s.allTasks[decision.old.ID] = decision.old
345 361
 
346
-		nodeInfo, err := s.nodeHeap.nodeInfo(decision.new.NodeID)
362
+		nodeInfo, err := s.nodeSet.nodeInfo(decision.new.NodeID)
347 363
 		if err == nil && nodeInfo.removeTask(decision.new) {
348
-			s.nodeHeap.updateNode(nodeInfo)
364
+			s.nodeSet.updateNode(nodeInfo)
349 365
 		}
350 366
 
351 367
 		// enqueue task for next scheduling attempt
... ...
@@ -401,11 +411,11 @@ func (s *Scheduler) applySchedulingDecisions(ctx context.Context, schedulingDeci
401 401
 	return
402 402
 }
403 403
 
404
-// taskFitNode checks if a node has enough resource to accommodate a task
404
+// taskFitNode checks if a node has enough resources to accommodate a task.
405 405
 func (s *Scheduler) taskFitNode(ctx context.Context, t *api.Task, nodeID string) *api.Task {
406
-	nodeInfo, err := s.nodeHeap.nodeInfo(nodeID)
406
+	nodeInfo, err := s.nodeSet.nodeInfo(nodeID)
407 407
 	if err != nil {
408
-		// node does not exist in heap (it may have been deleted)
408
+		// node does not exist in set (it may have been deleted)
409 409
 		return nil
410 410
 	}
411 411
 	s.pipeline.SetTask(t)
... ...
@@ -422,57 +432,118 @@ func (s *Scheduler) taskFitNode(ctx context.Context, t *api.Task, nodeID string)
422 422
 	s.allTasks[t.ID] = &newT
423 423
 
424 424
 	if nodeInfo.addTask(&newT) {
425
-		s.nodeHeap.updateNode(nodeInfo)
425
+		s.nodeSet.updateNode(nodeInfo)
426 426
 	}
427 427
 	return &newT
428 428
 }
429 429
 
430
-// scheduleTask schedules a single task.
431
-func (s *Scheduler) scheduleTask(ctx context.Context, t *api.Task) *api.Task {
430
+// scheduleTaskGroup schedules a batch of tasks that are part of the same
431
+// service and share the same version of the spec.
432
+func (s *Scheduler) scheduleTaskGroup(ctx context.Context, taskGroup map[string]*api.Task, schedulingDecisions map[string]schedulingDecision) {
433
+	// Pick at task at random from taskGroup to use for constraint
434
+	// evaluation. It doesn't matter which one we pick because all the
435
+	// tasks in the group are equal in terms of the fields the constraint
436
+	// filters consider.
437
+	var t *api.Task
438
+	for _, t = range taskGroup {
439
+		break
440
+	}
441
+
432 442
 	s.pipeline.SetTask(t)
433
-	n, _ := s.nodeHeap.findMin(s.pipeline.Process, s.scanAllNodes)
434
-	if n == nil {
435
-		log.G(ctx).WithField("task.id", t.ID).Debug("No suitable node available for task")
436
-		return nil
443
+
444
+	nodeLess := func(a *NodeInfo, b *NodeInfo) bool {
445
+		tasksByServiceA := a.DesiredRunningTasksCountByService[t.ServiceID]
446
+		tasksByServiceB := b.DesiredRunningTasksCountByService[t.ServiceID]
447
+
448
+		if tasksByServiceA < tasksByServiceB {
449
+			return true
450
+		}
451
+		if tasksByServiceA > tasksByServiceB {
452
+			return false
453
+		}
454
+
455
+		// Total number of tasks breaks ties.
456
+		return a.DesiredRunningTasksCount < b.DesiredRunningTasksCount
437 457
 	}
438 458
 
439
-	log.G(ctx).WithField("task.id", t.ID).Debugf("Assigning to node %s", n.ID)
440
-	newT := *t
441
-	newT.NodeID = n.ID
442
-	newT.Status = api.TaskStatus{
443
-		State:     api.TaskStateAssigned,
444
-		Timestamp: ptypes.MustTimestampProto(time.Now()),
445
-		Message:   "scheduler assigned task to node",
459
+	nodes := s.nodeSet.findBestNodes(len(taskGroup), s.pipeline.Process, nodeLess)
460
+	if len(nodes) == 0 {
461
+		for _, t := range taskGroup {
462
+			log.G(ctx).WithField("task.id", t.ID).Debug("no suitable node available for task")
463
+			s.enqueue(t)
464
+		}
465
+		return
446 466
 	}
447
-	s.allTasks[t.ID] = &newT
448 467
 
449
-	nodeInfo, err := s.nodeHeap.nodeInfo(n.ID)
450
-	if err == nil && nodeInfo.addTask(&newT) {
451
-		s.nodeHeap.updateNode(nodeInfo)
468
+	failedConstraints := make(map[int]bool) // key is index in nodes slice
469
+	nodeIter := 0
470
+	for taskID, t := range taskGroup {
471
+		n := &nodes[nodeIter%len(nodes)]
472
+
473
+		log.G(ctx).WithField("task.id", t.ID).Debugf("assigning to node %s", n.ID)
474
+		newT := *t
475
+		newT.NodeID = n.ID
476
+		newT.Status = api.TaskStatus{
477
+			State:     api.TaskStateAssigned,
478
+			Timestamp: ptypes.MustTimestampProto(time.Now()),
479
+			Message:   "scheduler assigned task to node",
480
+		}
481
+		s.allTasks[t.ID] = &newT
482
+
483
+		nodeInfo, err := s.nodeSet.nodeInfo(n.ID)
484
+		if err == nil && nodeInfo.addTask(&newT) {
485
+			s.nodeSet.updateNode(nodeInfo)
486
+			nodes[nodeIter%len(nodes)] = nodeInfo
487
+		}
488
+
489
+		schedulingDecisions[taskID] = schedulingDecision{old: t, new: &newT}
490
+		delete(taskGroup, taskID)
491
+
492
+		if nodeIter+1 < len(nodes) {
493
+			// First pass fills the nodes until they have the same
494
+			// number of tasks from this service.
495
+			nextNode := nodes[(nodeIter+1)%len(nodes)]
496
+			if nodeLess(&nextNode, &nodeInfo) {
497
+				nodeIter++
498
+				continue
499
+			}
500
+		} else {
501
+			// In later passes, we just assign one task at a time
502
+			// to each node that still meets the constraints.
503
+			nodeIter++
504
+		}
505
+
506
+		origNodeIter := nodeIter
507
+		for failedConstraints[nodeIter%len(nodes)] || !s.pipeline.Process(&nodes[nodeIter%len(nodes)]) {
508
+			failedConstraints[nodeIter%len(nodes)] = true
509
+			nodeIter++
510
+			if nodeIter-origNodeIter == len(nodes) {
511
+				// None of the nodes meet the constraints anymore.
512
+				for _, t := range taskGroup {
513
+					log.G(ctx).WithField("task.id", t.ID).Debug("no suitable node available for task")
514
+					s.enqueue(t)
515
+				}
516
+				return
517
+			}
518
+		}
452 519
 	}
453
-	return &newT
454 520
 }
455 521
 
456
-func (s *Scheduler) buildNodeHeap(tx store.ReadTx, tasksByNode map[string]map[string]*api.Task) error {
522
+func (s *Scheduler) buildNodeSet(tx store.ReadTx, tasksByNode map[string]map[string]*api.Task) error {
457 523
 	nodes, err := store.FindNodes(tx, store.All)
458 524
 	if err != nil {
459 525
 		return err
460 526
 	}
461 527
 
462
-	s.nodeHeap.alloc(len(nodes))
528
+	s.nodeSet.alloc(len(nodes))
463 529
 
464
-	i := 0
465 530
 	for _, n := range nodes {
466 531
 		var resources api.Resources
467 532
 		if n.Description != nil && n.Description.Resources != nil {
468 533
 			resources = *n.Description.Resources
469 534
 		}
470
-		s.nodeHeap.heap = append(s.nodeHeap.heap, newNodeInfo(n, tasksByNode[n.ID], resources))
471
-		s.nodeHeap.index[n.ID] = i
472
-		i++
535
+		s.nodeSet.addOrUpdateNode(newNodeInfo(n, tasksByNode[n.ID], resources))
473 536
 	}
474 537
 
475
-	heap.Init(&s.nodeHeap)
476
-
477 538
 	return nil
478 539
 }
... ...
@@ -74,6 +74,9 @@ func (c *Cluster) Tick() {
74 74
 		m.tick++
75 75
 		if m.tick > c.heartbeatTicks {
76 76
 			m.active = false
77
+			if m.Conn != nil {
78
+				m.Conn.Close()
79
+			}
77 80
 		}
78 81
 	}
79 82
 }
... ...
@@ -26,6 +26,7 @@ import (
26 26
 	"github.com/docker/swarmkit/api"
27 27
 	"github.com/docker/swarmkit/ca"
28 28
 	"github.com/docker/swarmkit/log"
29
+	"github.com/docker/swarmkit/manager/raftselector"
29 30
 	"github.com/docker/swarmkit/manager/state/raft/membership"
30 31
 	"github.com/docker/swarmkit/manager/state/store"
31 32
 	"github.com/docker/swarmkit/manager/state/watch"
... ...
@@ -82,7 +83,7 @@ type Node struct {
82 82
 	Server         *grpc.Server
83 83
 	Ctx            context.Context
84 84
 	cancel         func()
85
-	tlsCredentials credentials.TransportAuthenticator
85
+	tlsCredentials credentials.TransportCredentials
86 86
 
87 87
 	Address  string
88 88
 	StateDir string
... ...
@@ -152,7 +153,7 @@ type NewNodeOptions struct {
152 152
 	// SendTimeout is the timeout on the sending messages to other raft
153 153
 	// nodes. Leave this as 0 to get the default value.
154 154
 	SendTimeout    time.Duration
155
-	TLSCredentials credentials.TransportAuthenticator
155
+	TLSCredentials credentials.TransportCredentials
156 156
 }
157 157
 
158 158
 func init() {
... ...
@@ -176,7 +177,7 @@ func NewNode(ctx context.Context, opts NewNodeOptions) *Node {
176 176
 	n := &Node{
177 177
 		Ctx:            ctx,
178 178
 		cancel:         cancel,
179
-		cluster:        membership.NewCluster(cfg.ElectionTick),
179
+		cluster:        membership.NewCluster(2 * cfg.ElectionTick),
180 180
 		tlsCredentials: opts.TLSCredentials,
181 181
 		raftStore:      raftStore,
182 182
 		Address:        opts.Addr,
... ...
@@ -395,39 +396,55 @@ func (n *Node) Run(ctx context.Context) error {
395 395
 				n.confState = rd.Snapshot.Metadata.ConfState
396 396
 			}
397 397
 
398
-			// Process committed entries
399
-			for _, entry := range rd.CommittedEntries {
400
-				if err := n.processCommitted(entry); err != nil {
401
-					n.Config.Logger.Error(err)
402
-				}
403
-			}
398
+			// If we cease to be the leader, we must cancel any
399
+			// proposals that are currently waiting for a quorum to
400
+			// acknowledge them. It is still possible for these to
401
+			// become committed, but if that happens we will apply
402
+			// them as any follower would.
404 403
 
405
-			// Trigger a snapshot every once in awhile
406
-			if n.snapshotInProgress == nil &&
407
-				raftConfig.SnapshotInterval > 0 &&
408
-				n.appliedIndex-n.snapshotIndex >= raftConfig.SnapshotInterval {
409
-				n.doSnapshot(&raftConfig)
410
-			}
404
+			// It is important that we cancel these proposals before
405
+			// calling processCommitted, so processCommitted does
406
+			// not deadlock.
411 407
 
412
-			// If we cease to be the leader, we must cancel
413
-			// any proposals that are currently waiting for
414
-			// a quorum to acknowledge them. It is still
415
-			// possible for these to become committed, but
416
-			// if that happens we will apply them as any
417
-			// follower would.
418 408
 			if rd.SoftState != nil {
419 409
 				if wasLeader && rd.SoftState.RaftState != raft.StateLeader {
420 410
 					wasLeader = false
421
-					n.wait.cancelAll()
422 411
 					if atomic.LoadUint32(&n.signalledLeadership) == 1 {
423 412
 						atomic.StoreUint32(&n.signalledLeadership, 0)
424 413
 						n.leadershipBroadcast.Publish(IsFollower)
425 414
 					}
415
+
416
+					// It is important that we set n.signalledLeadership to 0
417
+					// before calling n.wait.cancelAll. When a new raft
418
+					// request is registered, it checks n.signalledLeadership
419
+					// afterwards, and cancels the registration if it is 0.
420
+					// If cancelAll was called first, this call might run
421
+					// before the new request registers, but
422
+					// signalledLeadership would be set after the check.
423
+					// Setting signalledLeadership before calling cancelAll
424
+					// ensures that if a new request is registered during
425
+					// this transition, it will either be cancelled by
426
+					// cancelAll, or by its own check of signalledLeadership.
427
+					n.wait.cancelAll()
426 428
 				} else if !wasLeader && rd.SoftState.RaftState == raft.StateLeader {
427 429
 					wasLeader = true
428 430
 				}
429 431
 			}
430 432
 
433
+			// Process committed entries
434
+			for _, entry := range rd.CommittedEntries {
435
+				if err := n.processCommitted(entry); err != nil {
436
+					n.Config.Logger.Error(err)
437
+				}
438
+			}
439
+
440
+			// Trigger a snapshot every once in awhile
441
+			if n.snapshotInProgress == nil &&
442
+				raftConfig.SnapshotInterval > 0 &&
443
+				n.appliedIndex-n.snapshotIndex >= raftConfig.SnapshotInterval {
444
+				n.doSnapshot(&raftConfig)
445
+			}
446
+
431 447
 			if wasLeader && atomic.LoadUint32(&n.signalledLeadership) != 1 {
432 448
 				// If all the entries in the log have become
433 449
 				// committed, broadcast our leadership status.
... ...
@@ -539,11 +556,11 @@ func (n *Node) Leader() (uint64, error) {
539 539
 	defer n.stopMu.RUnlock()
540 540
 
541 541
 	if !n.IsMember() {
542
-		return 0, ErrNoRaftMember
542
+		return raft.None, ErrNoRaftMember
543 543
 	}
544 544
 	leader := n.leader()
545
-	if leader == 0 {
546
-		return 0, ErrNoClusterLeader
545
+	if leader == raft.None {
546
+		return raft.None, ErrNoClusterLeader
547 547
 	}
548 548
 
549 549
 	return leader, nil
... ...
@@ -658,6 +675,12 @@ func (n *Node) checkHealth(ctx context.Context, addr string, timeout time.Durati
658 658
 		return err
659 659
 	}
660 660
 
661
+	if timeout != 0 {
662
+		tctx, cancel := context.WithTimeout(ctx, timeout)
663
+		defer cancel()
664
+		ctx = tctx
665
+	}
666
+
661 667
 	client := api.NewHealthClient(conn)
662 668
 	defer conn.Close()
663 669
 
... ...
@@ -828,25 +851,54 @@ func (n *Node) ResolveAddress(ctx context.Context, msg *api.ResolveAddressReques
828 828
 	return &api.ResolveAddressResponse{Addr: member.Addr}, nil
829 829
 }
830 830
 
831
-// LeaderAddr returns address of current cluster leader.
832
-// With this method Node satisfies raftpicker.AddrSelector interface.
833
-func (n *Node) LeaderAddr() (string, error) {
834
-	ctx, cancel := context.WithTimeout(n.Ctx, 10*time.Second)
835
-	defer cancel()
836
-	if err := WaitForLeader(ctx, n); err != nil {
837
-		return "", ErrNoClusterLeader
831
+func (n *Node) getLeaderConn() (*grpc.ClientConn, error) {
832
+	leader, err := n.Leader()
833
+	if err != nil {
834
+		return nil, err
838 835
 	}
839
-	n.stopMu.RLock()
840
-	defer n.stopMu.RUnlock()
841
-	if !n.IsMember() {
842
-		return "", ErrNoRaftMember
836
+
837
+	if leader == n.Config.ID {
838
+		return nil, raftselector.ErrIsLeader
843 839
 	}
844
-	ms := n.cluster.Members()
845
-	l := ms[n.leader()]
840
+	l := n.cluster.GetMember(leader)
846 841
 	if l == nil {
847
-		return "", ErrNoClusterLeader
842
+		return nil, fmt.Errorf("no leader found")
843
+	}
844
+	if !n.cluster.Active(leader) {
845
+		return nil, fmt.Errorf("leader marked as inactive")
846
+	}
847
+	if l.Conn == nil {
848
+		return nil, fmt.Errorf("no connection to leader in member list")
849
+	}
850
+	return l.Conn, nil
851
+}
852
+
853
+// LeaderConn returns current connection to cluster leader or raftselector.ErrIsLeader
854
+// if current machine is leader.
855
+func (n *Node) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) {
856
+	cc, err := n.getLeaderConn()
857
+	if err == nil {
858
+		return cc, nil
859
+	}
860
+	if err == raftselector.ErrIsLeader {
861
+		return nil, err
862
+	}
863
+	ticker := time.NewTicker(1 * time.Second)
864
+	defer ticker.Stop()
865
+	for {
866
+		select {
867
+		case <-ticker.C:
868
+			cc, err := n.getLeaderConn()
869
+			if err == nil {
870
+				return cc, nil
871
+			}
872
+			if err == raftselector.ErrIsLeader {
873
+				return nil, err
874
+			}
875
+		case <-ctx.Done():
876
+			return nil, ctx.Err()
877
+		}
848 878
 	}
849
-	return l.Addr, nil
850 879
 }
851 880
 
852 881
 // registerNode registers a new node on the cluster memberlist
... ...
@@ -943,7 +995,7 @@ func (n *Node) GetMemberlist() map[uint64]*api.RaftMember {
943 943
 	members := n.cluster.Members()
944 944
 	leaderID, err := n.Leader()
945 945
 	if err != nil {
946
-		leaderID = 0
946
+		leaderID = raft.None
947 947
 	}
948 948
 
949 949
 	for id, member := range members {
... ...
@@ -1163,7 +1215,11 @@ func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRa
1163 1163
 
1164 1164
 	r.ID = n.reqIDGen.Next()
1165 1165
 
1166
-	ch := n.wait.register(r.ID, cb)
1166
+	// This must be derived from the context which is cancelled by stop()
1167
+	// to avoid a deadlock on shutdown.
1168
+	waitCtx, cancel := context.WithCancel(n.Ctx)
1169
+
1170
+	ch := n.wait.register(r.ID, cb, cancel)
1167 1171
 
1168 1172
 	// Do this check after calling register to avoid a race.
1169 1173
 	if atomic.LoadUint32(&n.signalledLeadership) != 1 {
... ...
@@ -1182,24 +1238,19 @@ func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRa
1182 1182
 		return nil, ErrRequestTooLarge
1183 1183
 	}
1184 1184
 
1185
-	// This must use the context which is cancelled by stop() to avoid a
1186
-	// deadlock on shutdown.
1187
-	err = n.Propose(n.Ctx, data)
1185
+	err = n.Propose(waitCtx, data)
1188 1186
 	if err != nil {
1189 1187
 		n.wait.cancel(r.ID)
1190 1188
 		return nil, err
1191 1189
 	}
1192 1190
 
1193 1191
 	select {
1194
-	case x, ok := <-ch:
1195
-		if ok {
1196
-			res := x.(*applyResult)
1197
-			return res.resp, res.err
1198
-		}
1199
-		return nil, ErrLostLeadership
1200
-	case <-n.Ctx.Done():
1192
+	case x := <-ch:
1193
+		res := x.(*applyResult)
1194
+		return res.resp, res.err
1195
+	case <-waitCtx.Done():
1201 1196
 		n.wait.cancel(r.ID)
1202
-		return nil, ErrStopped
1197
+		return nil, ErrLostLeadership
1203 1198
 	case <-ctx.Done():
1204 1199
 		n.wait.cancel(r.ID)
1205 1200
 		return nil, ctx.Err()
... ...
@@ -1211,10 +1262,12 @@ func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRa
1211 1211
 // until the change is performed or there is an error.
1212 1212
 func (n *Node) configure(ctx context.Context, cc raftpb.ConfChange) error {
1213 1213
 	cc.ID = n.reqIDGen.Next()
1214
-	ch := n.wait.register(cc.ID, nil)
1214
+
1215
+	ctx, cancel := context.WithCancel(ctx)
1216
+	ch := n.wait.register(cc.ID, nil, cancel)
1215 1217
 
1216 1218
 	if err := n.ProposeConfChange(ctx, cc); err != nil {
1217
-		n.wait.trigger(cc.ID, nil)
1219
+		n.wait.cancel(cc.ID)
1218 1220
 		return err
1219 1221
 	}
1220 1222
 
... ...
@@ -1228,7 +1281,7 @@ func (n *Node) configure(ctx context.Context, cc raftpb.ConfChange) error {
1228 1228
 		}
1229 1229
 		return nil
1230 1230
 	case <-ctx.Done():
1231
-		n.wait.trigger(cc.ID, nil)
1231
+		n.wait.cancel(cc.ID)
1232 1232
 		return ctx.Err()
1233 1233
 	case <-n.Ctx.Done():
1234 1234
 		return ErrStopped
... ...
@@ -1271,6 +1324,11 @@ func (n *Node) processEntry(entry raftpb.Entry) error {
1271 1271
 		// position and cancelling the transaction. Create a new
1272 1272
 		// transaction to commit the data.
1273 1273
 
1274
+		// It should not be possible for processInternalRaftRequest
1275
+		// to be running in this situation, but out of caution we
1276
+		// cancel any current invocations to avoid a deadlock.
1277
+		n.wait.cancelAll()
1278
+
1274 1279
 		err := n.memoryStore.ApplyStoreActions(r.Action)
1275 1280
 		if err != nil {
1276 1281
 			log.G(context.Background()).Errorf("error applying actions from raft: %v", err)
... ...
@@ -13,7 +13,7 @@ import (
13 13
 )
14 14
 
15 15
 // dial returns a grpc client connection
16
-func dial(addr string, protocol string, creds credentials.TransportAuthenticator, timeout time.Duration) (*grpc.ClientConn, error) {
16
+func dial(addr string, protocol string, creds credentials.TransportCredentials, timeout time.Duration) (*grpc.ClientConn, error) {
17 17
 	grpcOptions := []grpc.DialOption{
18 18
 		grpc.WithBackoffMaxDelay(2 * time.Second),
19 19
 		grpc.WithTransportCredentials(creds),
... ...
@@ -10,6 +10,8 @@ type waitItem struct {
10 10
 	ch chan interface{}
11 11
 	// callback which is called synchronously when the wait is triggered
12 12
 	cb func()
13
+	// callback which is called to cancel a waiter
14
+	cancel func()
13 15
 }
14 16
 
15 17
 type wait struct {
... ...
@@ -21,13 +23,13 @@ func newWait() *wait {
21 21
 	return &wait{m: make(map[uint64]waitItem)}
22 22
 }
23 23
 
24
-func (w *wait) register(id uint64, cb func()) <-chan interface{} {
24
+func (w *wait) register(id uint64, cb func(), cancel func()) <-chan interface{} {
25 25
 	w.l.Lock()
26 26
 	defer w.l.Unlock()
27 27
 	_, ok := w.m[id]
28 28
 	if !ok {
29 29
 		ch := make(chan interface{}, 1)
30
-		w.m[id] = waitItem{ch: ch, cb: cb}
30
+		w.m[id] = waitItem{ch: ch, cb: cb, cancel: cancel}
31 31
 		return ch
32 32
 	}
33 33
 	panic(fmt.Sprintf("duplicate id %x", id))
... ...
@@ -43,7 +45,6 @@ func (w *wait) trigger(id uint64, x interface{}) bool {
43 43
 			waitItem.cb()
44 44
 		}
45 45
 		waitItem.ch <- x
46
-		close(waitItem.ch)
47 46
 		return true
48 47
 	}
49 48
 	return false
... ...
@@ -54,8 +55,8 @@ func (w *wait) cancel(id uint64) {
54 54
 	waitItem, ok := w.m[id]
55 55
 	delete(w.m, id)
56 56
 	w.l.Unlock()
57
-	if ok {
58
-		close(waitItem.ch)
57
+	if ok && waitItem.cancel != nil {
58
+		waitItem.cancel()
59 59
 	}
60 60
 }
61 61
 
... ...
@@ -65,6 +66,8 @@ func (w *wait) cancelAll() {
65 65
 
66 66
 	for id, waitItem := range w.m {
67 67
 		delete(w.m, id)
68
-		close(waitItem.ch)
68
+		if waitItem.cancel != nil {
69
+			waitItem.cancel()
70
+		}
69 71
 	}
70 72
 }
... ...
@@ -6,6 +6,29 @@ import (
6 6
 	"github.com/docker/go-events"
7 7
 )
8 8
 
9
+// dropErrClosed is a sink that suppresses ErrSinkClosed from Write, to avoid
10
+// debug log messages that may be confusing. It is possible that the queue
11
+// will try to write an event to its destination channel while the queue is
12
+// being removed from the broadcaster. Since the channel is closed before the
13
+// queue, there is a narrow window when this is possible. In some event-based
14
+// dropping events when a sink is removed from a broadcaster is a problem, but
15
+// for the usage in this watch package that's the expected behavior.
16
+type dropErrClosed struct {
17
+	sink events.Sink
18
+}
19
+
20
+func (s dropErrClosed) Write(event events.Event) error {
21
+	err := s.sink.Write(event)
22
+	if err == events.ErrSinkClosed {
23
+		return nil
24
+	}
25
+	return err
26
+}
27
+
28
+func (s dropErrClosed) Close() error {
29
+	return s.sink.Close()
30
+}
31
+
9 32
 // Queue is the structure used to publish events and watch for them.
10 33
 type Queue struct {
11 34
 	mu          sync.Mutex
... ...
@@ -35,7 +58,7 @@ func (q *Queue) Watch() (eventq chan events.Event, cancel func()) {
35 35
 // close the channel.
36 36
 func (q *Queue) CallbackWatch(matcher events.Matcher) (eventq chan events.Event, cancel func()) {
37 37
 	ch := events.NewChannel(0)
38
-	sink := events.Sink(events.NewQueue(ch))
38
+	sink := events.Sink(events.NewQueue(dropErrClosed{sink: ch}))
39 39
 
40 40
 	if matcher != nil {
41 41
 		sink = events.NewFilter(sink, matcher)
... ...
@@ -33,7 +33,9 @@ var _ = math.Inf
33 33
 
34 34
 // This is a compile-time assertion to ensure that this generated file
35 35
 // is compatible with the proto package it is being compiled against.
36
-const _ = proto.GoGoProtoPackageIsVersion1
36
+// A compilation error at this line likely means your copy of the
37
+// proto package needs to be updated.
38
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
37 39
 
38 40
 type TLSAuthorization struct {
39 41
 	// Roles contains the acceptable TLS OU roles for the handler.
... ...
@@ -96,11 +98,12 @@ func valueToGoStringPlugin(v interface{}, typ string) string {
96 96
 	pv := reflect.Indirect(rv).Interface()
97 97
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
98 98
 }
99
-func extensionToGoStringPlugin(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
99
+func extensionToGoStringPlugin(m github_com_gogo_protobuf_proto.Message) string {
100
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
100 101
 	if e == nil {
101 102
 		return "nil"
102 103
 	}
103
-	s := "map[int32]proto.Extension{"
104
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
104 105
 	keys := make([]int, 0, len(e))
105 106
 	for k := range e {
106 107
 		keys = append(keys, int(k))
... ...
@@ -110,7 +113,7 @@ func extensionToGoStringPlugin(e map[int32]github_com_gogo_protobuf_proto.Extens
110 110
 	for _, k := range keys {
111 111
 		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
112 112
 	}
113
-	s += strings.Join(ss, ",") + "}"
113
+	s += strings.Join(ss, ",") + "})"
114 114
 	return s
115 115
 }
116 116
 func (m *TLSAuthorization) Marshal() (data []byte, err error) {
... ...
@@ -443,6 +446,8 @@ var (
443 443
 	ErrIntOverflowPlugin   = fmt.Errorf("proto: integer overflow")
444 444
 )
445 445
 
446
+func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
447
+
446 448
 var fileDescriptorPlugin = []byte{
447 449
 	// 259 bytes of a gzipped FileDescriptorProto
448 450
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,