Browse code

[1.13.x] Vendoring swarmkit @c7df892

Signed-off-by: Alessandro Boch <aboch@docker.com>

Alessandro Boch authored on 2017/02/15 14:08:59
Showing 6 changed files
... ...
@@ -101,7 +101,7 @@ github.com/docker/containerd aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1
101 101
 github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
102 102
 
103 103
 # cluster
104
-github.com/docker/swarmkit 1c7f003d75f091d5f7051ed982594420e4515f77
104
+github.com/docker/swarmkit c7df892262aa0bec0a3e52ea76219b7b364ded38
105 105
 github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
106 106
 github.com/gogo/protobuf v0.3
107 107
 github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
... ...
@@ -289,8 +289,9 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
289 289
 			if a.taskAllocateVote(networkVoter, t.ID) {
290 290
 				// If the task is not attached to any network, network
291 291
 				// allocators job is done. Immediately cast a vote so
292
-				// that the task can be moved to ALLOCATED state as
292
+				// that the task can be moved to the PENDING state as
293 293
 				// soon as possible.
294
+				updateTaskStatus(t, api.TaskStatePending, allocatedStatusMessage)
294 295
 				allocatedTasks = append(allocatedTasks, t)
295 296
 			}
296 297
 			continue
... ...
@@ -467,7 +468,7 @@ func taskDead(t *api.Task) bool {
467 467
 }
468 468
 
469 469
 // taskReadyForNetworkVote checks if the task is ready for a network
470
-// vote to move it to ALLOCATED state.
470
+// vote to move it to PENDING state.
471 471
 func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bool {
472 472
 	// Task is ready for vote if the following is true:
473 473
 	//
... ...
@@ -272,7 +272,8 @@ func (pa *portAllocator) portsAllocatedInHostPublishMode(s *api.Service) bool {
272 272
 
273 273
 	if s.Spec.Endpoint != nil {
274 274
 		for _, portConfig := range s.Spec.Endpoint.Ports {
275
-			if portConfig.PublishMode == api.PublishModeHost {
275
+			if portConfig.PublishMode == api.PublishModeHost &&
276
+				portConfig.PublishedPort != 0 {
276 277
 				if portStates.delState(portConfig) == nil {
277 278
 					return false
278 279
 				}
... ...
@@ -200,8 +200,6 @@ func (k *KeyManager) Run(ctx context.Context) error {
200 200
 	} else {
201 201
 		k.keyRing.lClock = cluster.EncryptionKeyLamportClock
202 202
 		k.keyRing.keys = cluster.NetworkBootstrapKeys
203
-
204
-		k.rotateKey(ctx)
205 203
 	}
206 204
 
207 205
 	ticker := time.NewTicker(k.config.RotationInterval)
... ...
@@ -504,7 +504,7 @@ func (g *Orchestrator) removeTasks(ctx context.Context, batch *store.Batch, task
504 504
 }
505 505
 
506 506
 func isTaskRunning(t *api.Task) bool {
507
-	return t != nil && t.DesiredState <= api.TaskStateRunning && t.Status.State <= api.TaskStateRunning
507
+	return t != nil && t.DesiredState <= api.TaskStateRunning
508 508
 }
509 509
 
510 510
 func isTaskCompleted(t *api.Task, restartPolicy api.RestartPolicy_RestartCondition) bool {
... ...
@@ -131,11 +131,13 @@ func (tr *TaskReaper) tick() {
131 131
 	}
132 132
 
133 133
 	defer func() {
134
-		tr.dirty = make(map[instanceTuple]struct{})
135 134
 		tr.orphaned = nil
136 135
 	}()
137 136
 
138
-	deleteTasks := tr.orphaned
137
+	deleteTasks := make(map[string]struct{})
138
+	for _, tID := range tr.orphaned {
139
+		deleteTasks[tID] = struct{}{}
140
+	}
139 141
 	tr.store.View(func(tx store.ReadTx) {
140 142
 		for dirty := range tr.dirty {
141 143
 			service := store.GetService(tx, dirty.serviceID)
... ...
@@ -180,13 +182,15 @@ func (tr *TaskReaper) tick() {
180 180
 			// instead of sorting the whole slice.
181 181
 			sort.Sort(tasksByTimestamp(historicTasks))
182 182
 
183
+			runningTasks := 0
183 184
 			for _, t := range historicTasks {
184
-				if t.DesiredState <= api.TaskStateRunning {
185
+				if t.DesiredState <= api.TaskStateRunning || t.Status.State <= api.TaskStateRunning {
185 186
 					// Don't delete running tasks
187
+					runningTasks++
186 188
 					continue
187 189
 				}
188 190
 
189
-				deleteTasks = append(deleteTasks, t.ID)
191
+				deleteTasks[t.ID] = struct{}{}
190 192
 
191 193
 				taskHistory++
192 194
 				if int64(len(historicTasks)) <= taskHistory {
... ...
@@ -194,12 +198,15 @@ func (tr *TaskReaper) tick() {
194 194
 				}
195 195
 			}
196 196
 
197
+			if runningTasks <= 1 {
198
+				delete(tr.dirty, dirty)
199
+			}
197 200
 		}
198 201
 	})
199 202
 
200 203
 	if len(deleteTasks) > 0 {
201 204
 		tr.store.Batch(func(batch *store.Batch) error {
202
-			for _, taskID := range deleteTasks {
205
+			for taskID := range deleteTasks {
203 206
 				batch.Update(func(tx store.Tx) error {
204 207
 					return store.DeleteTask(tx, taskID)
205 208
 				})