Browse code

Synchronous service create and service update

Change "service create" and "service update" to wait until the creation
or update finishes, when --detach=false is specified. Show progress bars
for the overall operation and for each individual task (when there are a
small enough number of tasks), unless "-q" / "--quiet" is specified.

Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>

Aaron Lehmann authored on 2017/02/17 10:05:36
Showing 13 changed files
... ...
@@ -7,6 +7,7 @@ import (
7 7
 	"github.com/docker/docker/cli"
8 8
 	"github.com/docker/docker/cli/command"
9 9
 	"github.com/spf13/cobra"
10
+	"github.com/spf13/pflag"
10 11
 	"golang.org/x/net/context"
11 12
 )
12 13
 
... ...
@@ -22,7 +23,7 @@ func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command {
22 22
 			if len(args) > 1 {
23 23
 				opts.args = args[1:]
24 24
 			}
25
-			return runCreate(dockerCli, opts)
25
+			return runCreate(dockerCli, cmd.Flags(), opts)
26 26
 		},
27 27
 	}
28 28
 	flags := cmd.Flags()
... ...
@@ -58,7 +59,7 @@ func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command {
58 58
 	return cmd
59 59
 }
60 60
 
61
-func runCreate(dockerCli *command.DockerCli, opts *serviceOptions) error {
61
+func runCreate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *serviceOptions) error {
62 62
 	apiClient := dockerCli.Client()
63 63
 	createOpts := types.ServiceCreateOptions{}
64 64
 
... ...
@@ -104,5 +105,14 @@ func runCreate(dockerCli *command.DockerCli, opts *serviceOptions) error {
104 104
 	}
105 105
 
106 106
 	fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID)
107
-	return nil
107
+
108
+	if opts.detach {
109
+		if !flags.Changed("detach") {
110
+			fmt.Fprintln(dockerCli.Err(), "Since --detach=false was not specified, tasks will be created in the background.\n"+
111
+				"In a future release, --detach=false will become the default.")
112
+		}
113
+		return nil
114
+	}
115
+
116
+	return waitOnService(ctx, dockerCli, response.ID, opts)
108 117
 }
109 118
new file mode 100644
... ...
@@ -0,0 +1,39 @@
0
+package service
1
+
2
+import (
3
+	"io"
4
+
5
+	"github.com/docker/docker/cli/command"
6
+	"github.com/docker/docker/cli/command/service/progress"
7
+	"github.com/docker/docker/pkg/jsonmessage"
8
+	"golang.org/x/net/context"
9
+)
10
+
11
+// waitOnService waits for the service to converge. It outputs a progress bar,
12
+// if appopriate based on the CLI flags.
13
+func waitOnService(ctx context.Context, dockerCli *command.DockerCli, serviceID string, opts *serviceOptions) error {
14
+	errChan := make(chan error, 1)
15
+	pipeReader, pipeWriter := io.Pipe()
16
+
17
+	go func() {
18
+		errChan <- progress.ServiceProgress(ctx, dockerCli.Client(), serviceID, pipeWriter)
19
+	}()
20
+
21
+	if opts.quiet {
22
+		go func() {
23
+			for {
24
+				var buf [1024]byte
25
+				if _, err := pipeReader.Read(buf[:]); err != nil {
26
+					return
27
+				}
28
+			}
29
+		}()
30
+		return <-errChan
31
+	}
32
+
33
+	err := jsonmessage.DisplayJSONMessagesToStream(pipeReader, dockerCli.Out(), nil)
34
+	if err == nil {
35
+		err = <-errChan
36
+	}
37
+	return err
38
+}
... ...
@@ -333,6 +333,9 @@ func convertExtraHostsToSwarmHosts(extraHosts []string) []string {
333 333
 }
334 334
 
335 335
 type serviceOptions struct {
336
+	detach bool
337
+	quiet  bool
338
+
336 339
 	name            string
337 340
 	labels          opts.ListOpts
338 341
 	containerLabels opts.ListOpts
... ...
@@ -496,6 +499,9 @@ func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) {
496 496
 // addServiceFlags adds all flags that are common to both `create` and `update`.
497 497
 // Any flags that are not common are added separately in the individual command
498 498
 func addServiceFlags(flags *pflag.FlagSet, opts *serviceOptions) {
499
+	flags.BoolVarP(&opts.detach, "detach", "d", true, "Exit immediately instead of waiting for the service to converge")
500
+	flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress progress output")
501
+
499 502
 	flags.StringVarP(&opts.workdir, flagWorkdir, "w", "", "Working directory inside the container")
500 503
 	flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID (format: <name|uid>[:<group|gid>])")
501 504
 	flags.StringVar(&opts.hostname, flagHostname, "", "Container hostname")
502 505
new file mode 100644
... ...
@@ -0,0 +1,409 @@
0
+package progress
1
+
2
+import (
3
+	"errors"
4
+	"fmt"
5
+	"io"
6
+	"os"
7
+	"os/signal"
8
+	"time"
9
+
10
+	"github.com/docker/docker/api/types"
11
+	"github.com/docker/docker/api/types/filters"
12
+	"github.com/docker/docker/api/types/swarm"
13
+	"github.com/docker/docker/client"
14
+	"github.com/docker/docker/pkg/progress"
15
+	"github.com/docker/docker/pkg/streamformatter"
16
+	"github.com/docker/docker/pkg/stringid"
17
+	"golang.org/x/net/context"
18
+)
19
+
20
+var (
21
+	numberedStates = map[swarm.TaskState]int64{
22
+		swarm.TaskStateNew:       1,
23
+		swarm.TaskStateAllocated: 2,
24
+		swarm.TaskStatePending:   3,
25
+		swarm.TaskStateAssigned:  4,
26
+		swarm.TaskStateAccepted:  5,
27
+		swarm.TaskStatePreparing: 6,
28
+		swarm.TaskStateReady:     7,
29
+		swarm.TaskStateStarting:  8,
30
+		swarm.TaskStateRunning:   9,
31
+	}
32
+
33
+	longestState int
34
+)
35
+
36
+const (
37
+	maxProgress     = 9
38
+	maxProgressBars = 20
39
+)
40
+
41
+type progressUpdater interface {
42
+	update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]swarm.Node, rollback bool) (bool, error)
43
+}
44
+
45
+func init() {
46
+	for state := range numberedStates {
47
+		if len(state) > longestState {
48
+			longestState = len(state)
49
+		}
50
+	}
51
+}
52
+
53
+func stateToProgress(state swarm.TaskState, rollback bool) int64 {
54
+	if !rollback {
55
+		return numberedStates[state]
56
+	}
57
+	return int64(len(numberedStates)) - numberedStates[state]
58
+}
59
+
60
+// ServiceProgress outputs progress information for convergence of a service.
61
+func ServiceProgress(ctx context.Context, client client.APIClient, serviceID string, progressWriter io.WriteCloser) error {
62
+	defer progressWriter.Close()
63
+
64
+	progressOut := streamformatter.NewJSONStreamFormatter().NewProgressOutput(progressWriter, false)
65
+
66
+	sigint := make(chan os.Signal, 1)
67
+	signal.Notify(sigint, os.Interrupt)
68
+	defer signal.Stop(sigint)
69
+
70
+	taskFilter := filters.NewArgs()
71
+	taskFilter.Add("service", serviceID)
72
+	taskFilter.Add("_up-to-date", "true")
73
+
74
+	getUpToDateTasks := func() ([]swarm.Task, error) {
75
+		return client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter})
76
+	}
77
+
78
+	var (
79
+		updater     progressUpdater
80
+		converged   bool
81
+		convergedAt time.Time
82
+		monitor     = 5 * time.Second
83
+		rollback    bool
84
+	)
85
+
86
+	for {
87
+		service, _, err := client.ServiceInspectWithRaw(ctx, serviceID)
88
+		if err != nil {
89
+			return err
90
+		}
91
+
92
+		if service.Spec.UpdateConfig != nil && service.Spec.UpdateConfig.Monitor != 0 {
93
+			monitor = service.Spec.UpdateConfig.Monitor
94
+		}
95
+
96
+		if updater == nil {
97
+			updater, err = initializeUpdater(service, progressOut)
98
+			if err != nil {
99
+				return err
100
+			}
101
+		}
102
+
103
+		if service.UpdateStatus != nil {
104
+			switch service.UpdateStatus.State {
105
+			case swarm.UpdateStateUpdating:
106
+				rollback = false
107
+			case swarm.UpdateStateCompleted:
108
+				if !converged {
109
+					return nil
110
+				}
111
+			case swarm.UpdateStatePaused:
112
+				return fmt.Errorf("service update paused: %s", service.UpdateStatus.Message)
113
+			case swarm.UpdateStateRollbackStarted:
114
+				if !rollback && service.UpdateStatus.Message != "" {
115
+					progressOut.WriteProgress(progress.Progress{
116
+						ID:     "rollback",
117
+						Action: service.UpdateStatus.Message,
118
+					})
119
+				}
120
+				rollback = true
121
+			case swarm.UpdateStateRollbackPaused:
122
+				return fmt.Errorf("service rollback paused: %s", service.UpdateStatus.Message)
123
+			case swarm.UpdateStateRollbackCompleted:
124
+				if !converged {
125
+					return fmt.Errorf("service rolled back: %s", service.UpdateStatus.Message)
126
+				}
127
+			}
128
+		}
129
+		if converged && time.Since(convergedAt) >= monitor {
130
+			return nil
131
+		}
132
+
133
+		tasks, err := getUpToDateTasks()
134
+		if err != nil {
135
+			return err
136
+		}
137
+
138
+		activeNodes, err := getActiveNodes(ctx, client)
139
+		if err != nil {
140
+			return err
141
+		}
142
+
143
+		converged, err = updater.update(service, tasks, activeNodes, rollback)
144
+		if err != nil {
145
+			return err
146
+		}
147
+		if converged {
148
+			if convergedAt.IsZero() {
149
+				convergedAt = time.Now()
150
+			}
151
+			wait := monitor - time.Since(convergedAt)
152
+			if wait >= 0 {
153
+				progressOut.WriteProgress(progress.Progress{
154
+					// Ideally this would have no ID, but
155
+					// the progress rendering code behaves
156
+					// poorly on an "action" with no ID. It
157
+					// returns the cursor to the beginning
158
+					// of the line, so the first character
159
+					// may be difficult to read. Then the
160
+					// output is overwritten by the shell
161
+					// prompt when the command finishes.
162
+					ID:     "verify",
163
+					Action: fmt.Sprintf("Waiting %d seconds to verify that tasks are stable...", wait/time.Second+1),
164
+				})
165
+			}
166
+		} else {
167
+			if !convergedAt.IsZero() {
168
+				progressOut.WriteProgress(progress.Progress{
169
+					ID:     "verify",
170
+					Action: "Detected task failure",
171
+				})
172
+			}
173
+			convergedAt = time.Time{}
174
+		}
175
+
176
+		select {
177
+		case <-time.After(200 * time.Millisecond):
178
+		case <-sigint:
179
+			if !converged {
180
+				progress.Message(progressOut, "", "Operation continuing in background.")
181
+				progress.Messagef(progressOut, "", "Use `docker service ps %s` to check progress.", serviceID)
182
+			}
183
+			return nil
184
+		}
185
+	}
186
+}
187
+
188
+func getActiveNodes(ctx context.Context, client client.APIClient) (map[string]swarm.Node, error) {
189
+	nodes, err := client.NodeList(ctx, types.NodeListOptions{})
190
+	if err != nil {
191
+		return nil, err
192
+	}
193
+
194
+	activeNodes := make(map[string]swarm.Node)
195
+	for _, n := range nodes {
196
+		if n.Status.State != swarm.NodeStateDown {
197
+			activeNodes[n.ID] = n
198
+		}
199
+	}
200
+	return activeNodes, nil
201
+}
202
+
203
+func initializeUpdater(service swarm.Service, progressOut progress.Output) (progressUpdater, error) {
204
+	if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
205
+		return &replicatedProgressUpdater{
206
+			progressOut: progressOut,
207
+		}, nil
208
+	}
209
+	if service.Spec.Mode.Global != nil {
210
+		return &globalProgressUpdater{
211
+			progressOut: progressOut,
212
+		}, nil
213
+	}
214
+	return nil, errors.New("unrecognized service mode")
215
+}
216
+
217
+func writeOverallProgress(progressOut progress.Output, numerator, denominator int, rollback bool) {
218
+	if rollback {
219
+		progressOut.WriteProgress(progress.Progress{
220
+			ID:     "overall progress",
221
+			Action: fmt.Sprintf("rolling back update: %d out of %d tasks", numerator, denominator),
222
+		})
223
+		return
224
+	}
225
+	progressOut.WriteProgress(progress.Progress{
226
+		ID:     "overall progress",
227
+		Action: fmt.Sprintf("%d out of %d tasks", numerator, denominator),
228
+	})
229
+}
230
+
231
+type replicatedProgressUpdater struct {
232
+	progressOut progress.Output
233
+
234
+	// used for maping slots to a contiguous space
235
+	// this also causes progress bars to appear in order
236
+	slotMap map[int]int
237
+
238
+	initialized bool
239
+	done        bool
240
+}
241
+
242
+func (u *replicatedProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]swarm.Node, rollback bool) (bool, error) {
243
+	if service.Spec.Mode.Replicated == nil || service.Spec.Mode.Replicated.Replicas == nil {
244
+		return false, errors.New("no replica count")
245
+	}
246
+	replicas := *service.Spec.Mode.Replicated.Replicas
247
+
248
+	if !u.initialized {
249
+		u.slotMap = make(map[int]int)
250
+
251
+		// Draw progress bars in order
252
+		writeOverallProgress(u.progressOut, 0, int(replicas), rollback)
253
+
254
+		if replicas <= maxProgressBars {
255
+			for i := uint64(1); i <= replicas; i++ {
256
+				progress.Update(u.progressOut, fmt.Sprintf("%d/%d", i, replicas), " ")
257
+			}
258
+		}
259
+		u.initialized = true
260
+	}
261
+
262
+	// If there are multiple tasks with the same slot number, favor the one
263
+	// with the *lowest* desired state. This can happen in restart
264
+	// scenarios.
265
+	tasksBySlot := make(map[int]swarm.Task)
266
+	for _, task := range tasks {
267
+		if numberedStates[task.DesiredState] == 0 {
268
+			continue
269
+		}
270
+		if existingTask, ok := tasksBySlot[task.Slot]; ok {
271
+			if numberedStates[existingTask.DesiredState] <= numberedStates[task.DesiredState] {
272
+				continue
273
+			}
274
+		}
275
+		if _, nodeActive := activeNodes[task.NodeID]; nodeActive {
276
+			tasksBySlot[task.Slot] = task
277
+		}
278
+	}
279
+
280
+	// If we had reached a converged state, check if we are still converged.
281
+	if u.done {
282
+		for _, task := range tasksBySlot {
283
+			if task.Status.State != swarm.TaskStateRunning {
284
+				u.done = false
285
+				break
286
+			}
287
+		}
288
+	}
289
+
290
+	running := uint64(0)
291
+
292
+	for _, task := range tasksBySlot {
293
+		mappedSlot := u.slotMap[task.Slot]
294
+		if mappedSlot == 0 {
295
+			mappedSlot = len(u.slotMap) + 1
296
+			u.slotMap[task.Slot] = mappedSlot
297
+		}
298
+
299
+		if !u.done && replicas <= maxProgressBars && uint64(mappedSlot) <= replicas {
300
+			u.progressOut.WriteProgress(progress.Progress{
301
+				ID:         fmt.Sprintf("%d/%d", mappedSlot, replicas),
302
+				Action:     fmt.Sprintf("%-[1]*s", longestState, task.Status.State),
303
+				Current:    stateToProgress(task.Status.State, rollback),
304
+				Total:      maxProgress,
305
+				HideCounts: true,
306
+			})
307
+		}
308
+		if task.Status.State == swarm.TaskStateRunning {
309
+			running++
310
+		}
311
+	}
312
+
313
+	if !u.done {
314
+		writeOverallProgress(u.progressOut, int(running), int(replicas), rollback)
315
+
316
+		if running == replicas {
317
+			u.done = true
318
+		}
319
+	}
320
+
321
+	return running == replicas, nil
322
+}
323
+
324
+type globalProgressUpdater struct {
325
+	progressOut progress.Output
326
+
327
+	initialized bool
328
+	done        bool
329
+}
330
+
331
+func (u *globalProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]swarm.Node, rollback bool) (bool, error) {
332
+	// If there are multiple tasks with the same node ID, favor the one
333
+	// with the *lowest* desired state. This can happen in restart
334
+	// scenarios.
335
+	tasksByNode := make(map[string]swarm.Task)
336
+	for _, task := range tasks {
337
+		if numberedStates[task.DesiredState] == 0 {
338
+			continue
339
+		}
340
+		if existingTask, ok := tasksByNode[task.NodeID]; ok {
341
+			if numberedStates[existingTask.DesiredState] <= numberedStates[task.DesiredState] {
342
+				continue
343
+			}
344
+		}
345
+		tasksByNode[task.NodeID] = task
346
+	}
347
+
348
+	// We don't have perfect knowledge of how many nodes meet the
349
+	// constraints for this service. But the orchestrator creates tasks
350
+	// for all eligible nodes at the same time, so we should see all those
351
+	// nodes represented among the up-to-date tasks.
352
+	nodeCount := len(tasksByNode)
353
+
354
+	if !u.initialized {
355
+		if nodeCount == 0 {
356
+			// Two possibilities: either the orchestrator hasn't created
357
+			// the tasks yet, or the service doesn't meet constraints for
358
+			// any node. Either way, we wait.
359
+			u.progressOut.WriteProgress(progress.Progress{
360
+				ID:     "overall progress",
361
+				Action: "waiting for new tasks",
362
+			})
363
+			return false, nil
364
+		}
365
+
366
+		writeOverallProgress(u.progressOut, 0, nodeCount, rollback)
367
+		u.initialized = true
368
+	}
369
+
370
+	// If we had reached a converged state, check if we are still converged.
371
+	if u.done {
372
+		for _, task := range tasksByNode {
373
+			if task.Status.State != swarm.TaskStateRunning {
374
+				u.done = false
375
+				break
376
+			}
377
+		}
378
+	}
379
+
380
+	running := 0
381
+
382
+	for _, task := range tasksByNode {
383
+		if node, nodeActive := activeNodes[task.NodeID]; nodeActive {
384
+			if !u.done && nodeCount <= maxProgressBars {
385
+				u.progressOut.WriteProgress(progress.Progress{
386
+					ID:         stringid.TruncateID(node.ID),
387
+					Action:     fmt.Sprintf("%-[1]*s", longestState, task.Status.State),
388
+					Current:    stateToProgress(task.Status.State, rollback),
389
+					Total:      maxProgress,
390
+					HideCounts: true,
391
+				})
392
+			}
393
+			if task.Status.State == swarm.TaskStateRunning {
394
+				running++
395
+			}
396
+		}
397
+	}
398
+
399
+	if !u.done {
400
+		writeOverallProgress(u.progressOut, running, nodeCount, rollback)
401
+
402
+		if running == nodeCount {
403
+			u.done = true
404
+		}
405
+	}
406
+
407
+	return running == nodeCount, nil
408
+}
... ...
@@ -31,7 +31,7 @@ func newUpdateCommand(dockerCli *command.DockerCli) *cobra.Command {
31 31
 		Short: "Update a service",
32 32
 		Args:  cli.ExactArgs(1),
33 33
 		RunE: func(cmd *cobra.Command, args []string) error {
34
-			return runUpdate(dockerCli, cmd.Flags(), args[0])
34
+			return runUpdate(dockerCli, cmd.Flags(), serviceOpts, args[0])
35 35
 		},
36 36
 	}
37 37
 
... ...
@@ -93,7 +93,7 @@ func newListOptsVar() *opts.ListOpts {
93 93
 	return opts.NewListOptsRef(&[]string{}, nil)
94 94
 }
95 95
 
96
-func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, serviceID string) error {
96
+func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, opts *serviceOptions, serviceID string) error {
97 97
 	apiClient := dockerCli.Client()
98 98
 	ctx := context.Background()
99 99
 
... ...
@@ -195,7 +195,16 @@ func runUpdate(dockerCli *command.DockerCli, flags *pflag.FlagSet, serviceID str
195 195
 	}
196 196
 
197 197
 	fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID)
198
-	return nil
198
+
199
+	if opts.detach {
200
+		if !flags.Changed("detach") {
201
+			fmt.Fprintln(dockerCli.Err(), "Since --detach=false was not specified, tasks will be updated in the background.\n"+
202
+				"In a future release, --detach=false will become the default.")
203
+		}
204
+		return nil
205
+	}
206
+
207
+	return waitOnService(ctx, dockerCli, serviceID, opts)
199 208
 }
200 209
 
201 210
 func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
... ...
@@ -23,6 +23,8 @@ Create a new service
23 23
 Options:
24 24
       --constraint list                    Placement constraints (default [])
25 25
       --container-label list               Container labels (default [])
26
+  -d, --detach                             Exit immediately instead of waiting for the service to converge
27
+                                           (default true)
26 28
       --dns list                           Set custom DNS servers (default [])
27 29
       --dns-option list                    Set DNS options (default [])
28 30
       --dns-search list                    Set custom DNS search domains (default [])
... ...
@@ -26,6 +26,8 @@ Options:
26 26
       --constraint-rm list                 Remove a constraint (default [])
27 27
       --container-label-add list           Add or update a container label (default [])
28 28
       --container-label-rm list            Remove a container label by its key (default [])
29
+  -d, --detach                             Exit immediately instead of waiting for the service to converge
30
+                                           (default true)
29 31
       --dns-add list                       Add or update a custom DNS server (default [])
30 32
       --dns-option-add list                Add or update a DNS option (default [])
31 33
       --dns-option-rm list                 Remove a DNS option (default [])
... ...
@@ -16,7 +16,7 @@ import (
16 16
 
17 17
 func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) {
18 18
 	d := s.AddDaemon(c, true, true)
19
-	out, err := d.Cmd("service", "create", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top")
19
+	out, err := d.Cmd("service", "create", "--detach=true", "--mount", "type=volume,source=foo,target=/foo,volume-nocopy", "busybox", "top")
20 20
 	c.Assert(err, checker.IsNil, check.Commentf(out))
21 21
 	id := strings.TrimSpace(out)
22 22
 
... ...
@@ -123,7 +123,7 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTarget(c *check.C) {
123 123
 
124 124
 func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) {
125 125
 	d := s.AddDaemon(c, true, true)
126
-	out, err := d.Cmd("service", "create", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; tail -f /dev/null")
126
+	out, err := d.Cmd("service", "create", "--detach=true", "--mount", "type=tmpfs,target=/foo,tmpfs-size=1MB", "busybox", "sh", "-c", "mount | grep foo; tail -f /dev/null")
127 127
 	c.Assert(err, checker.IsNil, check.Commentf(out))
128 128
 	id := strings.TrimSpace(out)
129 129
 
... ...
@@ -31,7 +31,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) {
31 31
 	c.Check(err, check.IsNil)
32 32
 
33 33
 	serviceName := "healthServiceRun"
34
-	out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top")
34
+	out, err := d.Cmd("service", "create", "--detach=true", "--name", serviceName, imageName, "top")
35 35
 	c.Assert(err, checker.IsNil, check.Commentf(out))
36 36
 	id := strings.TrimSpace(out)
37 37
 
... ...
@@ -92,7 +92,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) {
92 92
 	c.Check(err, check.IsNil)
93 93
 
94 94
 	serviceName := "healthServiceStart"
95
-	out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top")
95
+	out, err := d.Cmd("service", "create", "--detach=true", "--name", serviceName, imageName, "top")
96 96
 	c.Assert(err, checker.IsNil, check.Commentf(out))
97 97
 	id := strings.TrimSpace(out)
98 98
 
... ...
@@ -1611,13 +1611,13 @@ func (s *DockerSwarmSuite) TestSwarmServicePsMultipleServiceIDs(c *check.C) {
1611 1611
 	d := s.AddDaemon(c, true, true)
1612 1612
 
1613 1613
 	name1 := "top1"
1614
-	out, err := d.Cmd("service", "create", "--name", name1, "--replicas=3", "busybox", "top")
1614
+	out, err := d.Cmd("service", "create", "--detach=true", "--name", name1, "--replicas=3", "busybox", "top")
1615 1615
 	c.Assert(err, checker.IsNil)
1616 1616
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
1617 1617
 	id1 := strings.TrimSpace(out)
1618 1618
 
1619 1619
 	name2 := "top2"
1620
-	out, err = d.Cmd("service", "create", "--name", name2, "--replicas=3", "busybox", "top")
1620
+	out, err = d.Cmd("service", "create", "--detach=true", "--name", name2, "--replicas=3", "busybox", "top")
1621 1621
 	c.Assert(err, checker.IsNil)
1622 1622
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
1623 1623
 	id2 := strings.TrimSpace(out)
... ...
@@ -1680,7 +1680,7 @@ func (s *DockerSwarmSuite) TestSwarmServicePsMultipleServiceIDs(c *check.C) {
1680 1680
 func (s *DockerSwarmSuite) TestSwarmPublishDuplicatePorts(c *check.C) {
1681 1681
 	d := s.AddDaemon(c, true, true)
1682 1682
 
1683
-	out, err := d.Cmd("service", "create", "--publish", "5005:80", "--publish", "5006:80", "--publish", "80", "--publish", "80", "busybox", "top")
1683
+	out, err := d.Cmd("service", "create", "--detach=true", "--publish", "5005:80", "--publish", "5006:80", "--publish", "80", "--publish", "80", "busybox", "top")
1684 1684
 	c.Assert(err, check.IsNil, check.Commentf(out))
1685 1685
 	id := strings.TrimSpace(out)
1686 1686
 
... ...
@@ -35,6 +35,8 @@ type JSONProgress struct {
35 35
 	Current    int64 `json:"current,omitempty"`
36 36
 	Total      int64 `json:"total,omitempty"`
37 37
 	Start      int64 `json:"start,omitempty"`
38
+	// If true, don't show xB/yB
39
+	HideCounts bool `json:"hidecounts,omitempty"`
38 40
 }
39 41
 
40 42
 func (p *JSONProgress) String() string {
... ...
@@ -71,11 +73,13 @@ func (p *JSONProgress) String() string {
71 71
 		pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
72 72
 	}
73 73
 
74
-	numbersBox = fmt.Sprintf("%8v/%v", current, total)
74
+	if !p.HideCounts {
75
+		numbersBox = fmt.Sprintf("%8v/%v", current, total)
75 76
 
76
-	if p.Current > p.Total {
77
-		// remove total display if the reported current is wonky.
78
-		numbersBox = fmt.Sprintf("%8v", current)
77
+		if p.Current > p.Total {
78
+			// remove total display if the reported current is wonky.
79
+			numbersBox = fmt.Sprintf("%8v", current)
80
+		}
79 81
 	}
80 82
 
81 83
 	if p.Current > 0 && p.Start > 0 && percentage < 50 {
... ...
@@ -16,6 +16,9 @@ type Progress struct {
16 16
 	Current int64
17 17
 	Total   int64
18 18
 
19
+	// If true, don't show xB/yB
20
+	HideCounts bool
21
+
19 22
 	// Aux contains extra information not presented to the user, such as
20 23
 	// digests for push signing.
21 24
 	Aux interface{}
... ...
@@ -125,7 +125,7 @@ func (out *progressOutput) WriteProgress(prog progress.Progress) error {
125 125
 	if prog.Message != "" {
126 126
 		formatted = out.sf.FormatStatus(prog.ID, prog.Message)
127 127
 	} else {
128
-		jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total}
128
+		jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts}
129 129
 		formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux)
130 130
 	}
131 131
 	_, err := out.out.Write(formatted)