Refactored the API to more easily accept new endpoints. Added REST,
client, and CLI endpoints for getting logs from a specific task. All
that is needed after this commit to enable arbitrary service log
selectors is a REST endpoint and handler.
Task logs can be retrieved by putting in a task ID at the CLI instead of
a service ID.
Signed-off-by: Drew Erny <drew.erny@docker.com>
| ... | ... |
@@ -21,7 +21,7 @@ type Backend interface {
|
| 21 | 21 |
CreateService(types.ServiceSpec, string) (*basictypes.ServiceCreateResponse, error) |
| 22 | 22 |
UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions) (*basictypes.ServiceUpdateResponse, error) |
| 23 | 23 |
RemoveService(string) error |
| 24 |
- ServiceLogs(context.Context, string, *backend.ContainerLogsConfig, chan struct{}) error
|
|
| 24 |
+ ServiceLogs(context.Context, *backend.LogSelector, *backend.ContainerLogsConfig, chan struct{}) error
|
|
| 25 | 25 |
GetNodes(basictypes.NodeListOptions) ([]types.Node, error) |
| 26 | 26 |
GetNode(string) (types.Node, error) |
| 27 | 27 |
UpdateNode(string, uint64, types.NodeSpec) error |
| ... | ... |
@@ -43,6 +43,7 @@ func (sr *swarmRouter) initRoutes() {
|
| 43 | 43 |
router.NewPostRoute("/nodes/{id}/update", sr.updateNode),
|
| 44 | 44 |
router.NewGetRoute("/tasks", sr.getTasks),
|
| 45 | 45 |
router.NewGetRoute("/tasks/{id}", sr.getTask),
|
| 46 |
+ router.Experimental(router.Cancellable(router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs))),
|
|
| 46 | 47 |
router.NewGetRoute("/secrets", sr.getSecrets),
|
| 47 | 48 |
router.NewPostRoute("/secrets/create", sr.createSecret),
|
| 48 | 49 |
router.NewDeleteRoute("/secrets/{id}", sr.removeSecret),
|
| ... | ... |
@@ -13,7 +13,6 @@ import ( |
| 13 | 13 |
"github.com/docker/docker/api/types/backend" |
| 14 | 14 |
"github.com/docker/docker/api/types/filters" |
| 15 | 15 |
types "github.com/docker/docker/api/types/swarm" |
| 16 |
- "github.com/docker/docker/pkg/stdcopy" |
|
| 17 | 16 |
"golang.org/x/net/context" |
| 18 | 17 |
) |
| 19 | 18 |
|
| ... | ... |
@@ -215,54 +214,28 @@ func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, |
| 215 | 215 |
return nil |
| 216 | 216 |
} |
| 217 | 217 |
|
| 218 |
-func (sr *swarmRouter) getServiceLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
|
| 218 |
+func (sr *swarmRouter) getTaskLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
|
| 219 | 219 |
if err := httputils.ParseForm(r); err != nil {
|
| 220 | 220 |
return err |
| 221 | 221 |
} |
| 222 | 222 |
|
| 223 |
- // Args are validated before the stream starts because when it starts we're |
|
| 224 |
- // sending HTTP 200 by writing an empty chunk of data to tell the client that |
|
| 225 |
- // daemon is going to stream. By sending this initial HTTP 200 we can't report |
|
| 226 |
- // any error after the stream starts (i.e. container not found, wrong parameters) |
|
| 227 |
- // with the appropriate status code. |
|
| 228 |
- stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") |
|
| 229 |
- if !(stdout || stderr) {
|
|
| 230 |
- return fmt.Errorf("Bad parameters: you must choose at least one stream")
|
|
| 231 |
- } |
|
| 232 |
- |
|
| 233 |
- serviceName := vars["id"] |
|
| 234 |
- logsConfig := &backend.ContainerLogsConfig{
|
|
| 235 |
- ContainerLogsOptions: basictypes.ContainerLogsOptions{
|
|
| 236 |
- Follow: httputils.BoolValue(r, "follow"), |
|
| 237 |
- Timestamps: httputils.BoolValue(r, "timestamps"), |
|
| 238 |
- Since: r.Form.Get("since"),
|
|
| 239 |
- Tail: r.Form.Get("tail"),
|
|
| 240 |
- ShowStdout: stdout, |
|
| 241 |
- ShowStderr: stderr, |
|
| 242 |
- Details: httputils.BoolValue(r, "details"), |
|
| 243 |
- }, |
|
| 244 |
- OutStream: w, |
|
| 245 |
- } |
|
| 246 |
- |
|
| 247 |
- if logsConfig.Details {
|
|
| 248 |
- return fmt.Errorf("Bad parameters: details is not currently supported")
|
|
| 249 |
- } |
|
| 250 |
- |
|
| 251 |
- chStarted := make(chan struct{})
|
|
| 252 |
- if err := sr.backend.ServiceLogs(ctx, serviceName, logsConfig, chStarted); err != nil {
|
|
| 253 |
- select {
|
|
| 254 |
- case <-chStarted: |
|
| 255 |
- // The client may be expecting all of the data we're sending to |
|
| 256 |
- // be multiplexed, so send it through OutStream, which will |
|
| 257 |
- // have been set up to handle that if needed. |
|
| 258 |
- stdwriter := stdcopy.NewStdWriter(w, stdcopy.Systemerr) |
|
| 259 |
- fmt.Fprintf(stdwriter, "Error grabbing service logs: %v\n", err) |
|
| 260 |
- default: |
|
| 261 |
- return err |
|
| 262 |
- } |
|
| 223 |
+ // make a selector to pass to the helper function |
|
| 224 |
+ selector := &backend.LogSelector{
|
|
| 225 |
+ Tasks: []string{vars["id"]},
|
|
| 263 | 226 |
} |
| 227 |
+ return sr.swarmLogs(ctx, w, r, selector) |
|
| 228 |
+} |
|
| 264 | 229 |
|
| 265 |
- return nil |
|
| 230 |
+func (sr *swarmRouter) getServiceLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
|
| 231 |
+ if err := httputils.ParseForm(r); err != nil {
|
|
| 232 |
+ return err |
|
| 233 |
+ } |
|
| 234 |
+ |
|
| 235 |
+ // make a selector to pass to the helper function |
|
| 236 |
+ selector := &backend.LogSelector{
|
|
| 237 |
+ Services: []string{vars["id"]},
|
|
| 238 |
+ } |
|
| 239 |
+ return sr.swarmLogs(ctx, w, r, selector) |
|
| 266 | 240 |
} |
| 267 | 241 |
|
| 268 | 242 |
func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
| 269 | 243 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,55 @@ |
| 0 |
+package swarm |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "net/http" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/docker/api/server/httputils" |
|
| 7 |
+ basictypes "github.com/docker/docker/api/types" |
|
| 8 |
+ "github.com/docker/docker/api/types/backend" |
|
| 9 |
+ "github.com/docker/docker/pkg/stdcopy" |
|
| 10 |
+ "golang.org/x/net/context" |
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+// swarmLogs takes an http response, request, and selector, and writes the logs |
|
| 14 |
+// specified by the selector to the response |
|
| 15 |
+func (sr *swarmRouter) swarmLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, selector *backend.LogSelector) error {
|
|
| 16 |
+ // Args are validated before the stream starts because when it starts we're |
|
| 17 |
+ // sending HTTP 200 by writing an empty chunk of data to tell the client that |
|
| 18 |
+ // daemon is going to stream. By sending this initial HTTP 200 we can't report |
|
| 19 |
+ // any error after the stream starts (i.e. container not found, wrong parameters) |
|
| 20 |
+ // with the appropriate status code. |
|
| 21 |
+ stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") |
|
| 22 |
+ if !(stdout || stderr) {
|
|
| 23 |
+ return fmt.Errorf("Bad parameters: you must choose at least one stream")
|
|
| 24 |
+ } |
|
| 25 |
+ |
|
| 26 |
+ logsConfig := &backend.ContainerLogsConfig{
|
|
| 27 |
+ ContainerLogsOptions: basictypes.ContainerLogsOptions{
|
|
| 28 |
+ Follow: httputils.BoolValue(r, "follow"), |
|
| 29 |
+ Timestamps: httputils.BoolValue(r, "timestamps"), |
|
| 30 |
+ Since: r.Form.Get("since"),
|
|
| 31 |
+ Tail: r.Form.Get("tail"),
|
|
| 32 |
+ ShowStdout: stdout, |
|
| 33 |
+ ShowStderr: stderr, |
|
| 34 |
+ Details: httputils.BoolValue(r, "details"), |
|
| 35 |
+ }, |
|
| 36 |
+ OutStream: w, |
|
| 37 |
+ } |
|
| 38 |
+ |
|
| 39 |
+ chStarted := make(chan struct{})
|
|
| 40 |
+ if err := sr.backend.ServiceLogs(ctx, selector, logsConfig, chStarted); err != nil {
|
|
| 41 |
+ select {
|
|
| 42 |
+ case <-chStarted: |
|
| 43 |
+ // The client may be expecting all of the data we're sending to |
|
| 44 |
+ // be multiplexed, so send it through OutStream, which will |
|
| 45 |
+ // have been set up to handle that if needed. |
|
| 46 |
+ stdwriter := stdcopy.NewStdWriter(w, stdcopy.Systemerr) |
|
| 47 |
+ fmt.Fprintf(stdwriter, "Error grabbing service logs: %v\n", err) |
|
| 48 |
+ default: |
|
| 49 |
+ return err |
|
| 50 |
+ } |
|
| 51 |
+ } |
|
| 52 |
+ |
|
| 53 |
+ return nil |
|
| 54 |
+} |
| ... | ... |
@@ -7948,6 +7948,86 @@ paths: |
| 7948 | 7948 |
required: true |
| 7949 | 7949 |
type: "string" |
| 7950 | 7950 |
tags: ["Task"] |
| 7951 |
+ /tasks/{id}/logs:
|
|
| 7952 |
+ get: |
|
| 7953 |
+ summary: "Get task logs" |
|
| 7954 |
+ description: | |
|
| 7955 |
+ Get `stdout` and `stderr` logs from a task. |
|
| 7956 |
+ |
|
| 7957 |
+ **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. |
|
| 7958 |
+ operationId: "TaskLogs" |
|
| 7959 |
+ produces: |
|
| 7960 |
+ - "application/vnd.docker.raw-stream" |
|
| 7961 |
+ - "application/json" |
|
| 7962 |
+ responses: |
|
| 7963 |
+ 101: |
|
| 7964 |
+ description: "logs returned as a stream" |
|
| 7965 |
+ schema: |
|
| 7966 |
+ type: "string" |
|
| 7967 |
+ format: "binary" |
|
| 7968 |
+ 200: |
|
| 7969 |
+ description: "logs returned as a string in response body" |
|
| 7970 |
+ schema: |
|
| 7971 |
+ type: "string" |
|
| 7972 |
+ 404: |
|
| 7973 |
+ description: "no such task" |
|
| 7974 |
+ schema: |
|
| 7975 |
+ $ref: "#/definitions/ErrorResponse" |
|
| 7976 |
+ examples: |
|
| 7977 |
+ application/json: |
|
| 7978 |
+ message: "No such task: c2ada9df5af8" |
|
| 7979 |
+ 500: |
|
| 7980 |
+ description: "server error" |
|
| 7981 |
+ schema: |
|
| 7982 |
+ $ref: "#/definitions/ErrorResponse" |
|
| 7983 |
+ 503: |
|
| 7984 |
+ description: "node is not part of a swarm" |
|
| 7985 |
+ schema: |
|
| 7986 |
+ $ref: "#/definitions/ErrorResponse" |
|
| 7987 |
+ parameters: |
|
| 7988 |
+ - name: "id" |
|
| 7989 |
+ in: "path" |
|
| 7990 |
+ required: true |
|
| 7991 |
+ description: "ID of the task" |
|
| 7992 |
+ type: "string" |
|
| 7993 |
+ - name: "details" |
|
| 7994 |
+ in: "query" |
|
| 7995 |
+ description: "Show extra details provided to logs." |
|
| 7996 |
+ type: "boolean" |
|
| 7997 |
+ default: false |
|
| 7998 |
+ - name: "follow" |
|
| 7999 |
+ in: "query" |
|
| 8000 |
+ description: | |
|
| 8001 |
+ Return the logs as a stream. |
|
| 8002 |
+ |
|
| 8003 |
+ This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). |
|
| 8004 |
+ type: "boolean" |
|
| 8005 |
+ default: false |
|
| 8006 |
+ - name: "stdout" |
|
| 8007 |
+ in: "query" |
|
| 8008 |
+ description: "Return logs from `stdout`" |
|
| 8009 |
+ type: "boolean" |
|
| 8010 |
+ default: false |
|
| 8011 |
+ - name: "stderr" |
|
| 8012 |
+ in: "query" |
|
| 8013 |
+ description: "Return logs from `stderr`" |
|
| 8014 |
+ type: "boolean" |
|
| 8015 |
+ default: false |
|
| 8016 |
+ - name: "since" |
|
| 8017 |
+ in: "query" |
|
| 8018 |
+ description: "Only return logs since this time, as a UNIX timestamp" |
|
| 8019 |
+ type: "integer" |
|
| 8020 |
+ default: 0 |
|
| 8021 |
+ - name: "timestamps" |
|
| 8022 |
+ in: "query" |
|
| 8023 |
+ description: "Add timestamps to every log line" |
|
| 8024 |
+ type: "boolean" |
|
| 8025 |
+ default: false |
|
| 8026 |
+ - name: "tail" |
|
| 8027 |
+ in: "query" |
|
| 8028 |
+ description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." |
|
| 8029 |
+ type: "string" |
|
| 8030 |
+ default: "all" |
|
| 7951 | 8031 |
/secrets: |
| 7952 | 8032 |
get: |
| 7953 | 8033 |
summary: "List secrets" |
| ... | ... |
@@ -32,6 +32,16 @@ type ContainerLogsConfig struct {
|
| 32 | 32 |
OutStream io.Writer |
| 33 | 33 |
} |
| 34 | 34 |
|
| 35 |
+// LogSelector is a list of services and tasks that should be returned as part |
|
| 36 |
+// of a log stream. It is similar to swarmapi.LogSelector, with the difference |
|
| 37 |
+// that the names don't have to be resolved to IDs; this is mostly to avoid |
|
| 38 |
+// accidents later where a swarmapi LogSelector might have been incorrectly |
|
| 39 |
+// used verbatim (and to avoid the handler having to import swarmapi types) |
|
| 40 |
+type LogSelector struct {
|
|
| 41 |
+ Services []string |
|
| 42 |
+ Tasks []string |
|
| 43 |
+} |
|
| 44 |
+ |
|
| 35 | 45 |
// ContainerStatsConfig holds information for configuring the runtime |
| 36 | 46 |
// behavior of a backend.ContainerStats() call. |
| 37 | 47 |
type ContainerStatsConfig struct {
|
| ... | ... |
@@ -30,9 +30,14 @@ type logsOptions struct {
|
| 30 | 30 |
timestamps bool |
| 31 | 31 |
tail string |
| 32 | 32 |
|
| 33 |
- service string |
|
| 33 |
+ target string |
|
| 34 | 34 |
} |
| 35 | 35 |
|
| 36 |
+// TODO(dperny) the whole CLI for this is kind of a mess IMHOIRL and it needs |
|
| 37 |
+// to be refactored agressively. There may be changes to the implementation of |
|
| 38 |
+// details, which will be need to be reflected in this code. The refactoring |
|
| 39 |
+// should be put off until we make those changes, tho, because I think the |
|
| 40 |
+// decisions made WRT details will impact the design of the CLI. |
|
| 36 | 41 |
func newLogsCommand(dockerCli *command.DockerCli) *cobra.Command {
|
| 37 | 42 |
var opts logsOptions |
| 38 | 43 |
|
| ... | ... |
@@ -41,16 +46,16 @@ func newLogsCommand(dockerCli *command.DockerCli) *cobra.Command {
|
| 41 | 41 |
Short: "Fetch the logs of a service", |
| 42 | 42 |
Args: cli.ExactArgs(1), |
| 43 | 43 |
RunE: func(cmd *cobra.Command, args []string) error {
|
| 44 |
- opts.service = args[0] |
|
| 44 |
+ opts.target = args[0] |
|
| 45 | 45 |
return runLogs(dockerCli, &opts) |
| 46 | 46 |
}, |
| 47 | 47 |
Tags: map[string]string{"experimental": ""},
|
| 48 | 48 |
} |
| 49 | 49 |
|
| 50 | 50 |
flags := cmd.Flags() |
| 51 |
- flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") |
|
| 51 |
+ flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names in output") |
|
| 52 | 52 |
flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate output") |
| 53 |
- flags.BoolVar(&opts.noTaskIDs, "no-task-ids", false, "Do not include task IDs") |
|
| 53 |
+ flags.BoolVar(&opts.noTaskIDs, "no-task-ids", false, "Do not include task IDs in output") |
|
| 54 | 54 |
flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") |
| 55 | 55 |
flags.StringVar(&opts.since, "since", "", "Show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") |
| 56 | 56 |
flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") |
| ... | ... |
@@ -70,28 +75,44 @@ func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error {
|
| 70 | 70 |
Tail: opts.tail, |
| 71 | 71 |
} |
| 72 | 72 |
|
| 73 |
- client := dockerCli.Client() |
|
| 73 |
+ cli := dockerCli.Client() |
|
| 74 | 74 |
|
| 75 |
- service, _, err := client.ServiceInspectWithRaw(ctx, opts.service) |
|
| 76 |
- if err != nil {
|
|
| 77 |
- return err |
|
| 78 |
- } |
|
| 75 |
+ var ( |
|
| 76 |
+ maxLength = 1 |
|
| 77 |
+ responseBody io.ReadCloser |
|
| 78 |
+ ) |
|
| 79 | 79 |
|
| 80 |
- responseBody, err := client.ServiceLogs(ctx, opts.service, options) |
|
| 80 |
+ service, _, err := cli.ServiceInspectWithRaw(ctx, opts.target) |
|
| 81 | 81 |
if err != nil {
|
| 82 |
- return err |
|
| 82 |
+ // if it's any error other than service not found, it's Real |
|
| 83 |
+ if !client.IsErrServiceNotFound(err) {
|
|
| 84 |
+ return err |
|
| 85 |
+ } |
|
| 86 |
+ task, _, err := cli.TaskInspectWithRaw(ctx, opts.target) |
|
| 87 |
+ if err != nil {
|
|
| 88 |
+ if client.IsErrTaskNotFound(err) {
|
|
| 89 |
+ // if the task ALSO isn't found, rewrite the error to be clear |
|
| 90 |
+ // that we looked for services AND tasks |
|
| 91 |
+ err = fmt.Errorf("No such task or service")
|
|
| 92 |
+ } |
|
| 93 |
+ return err |
|
| 94 |
+ } |
|
| 95 |
+ maxLength = getMaxLength(task.Slot) |
|
| 96 |
+ responseBody, err = cli.TaskLogs(ctx, opts.target, options) |
|
| 97 |
+ } else {
|
|
| 98 |
+ responseBody, err = cli.ServiceLogs(ctx, opts.target, options) |
|
| 99 |
+ if err != nil {
|
|
| 100 |
+ return err |
|
| 101 |
+ } |
|
| 102 |
+ if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
|
|
| 103 |
+ // if replicas are initialized, figure out if we need to pad them |
|
| 104 |
+ replicas := *service.Spec.Mode.Replicated.Replicas |
|
| 105 |
+ maxLength = getMaxLength(int(replicas)) |
|
| 106 |
+ } |
|
| 83 | 107 |
} |
| 84 | 108 |
defer responseBody.Close() |
| 85 | 109 |
|
| 86 |
- var replicas uint64 |
|
| 87 |
- padding := 1 |
|
| 88 |
- if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
|
|
| 89 |
- // if replicas are initialized, figure out if we need to pad them |
|
| 90 |
- replicas = *service.Spec.Mode.Replicated.Replicas |
|
| 91 |
- padding = len(strconv.FormatUint(replicas, 10)) |
|
| 92 |
- } |
|
| 93 |
- |
|
| 94 |
- taskFormatter := newTaskFormatter(client, opts, padding) |
|
| 110 |
+ taskFormatter := newTaskFormatter(cli, opts, maxLength) |
|
| 95 | 111 |
|
| 96 | 112 |
stdout := &logWriter{ctx: ctx, opts: opts, f: taskFormatter, w: dockerCli.Out()}
|
| 97 | 113 |
stderr := &logWriter{ctx: ctx, opts: opts, f: taskFormatter, w: dockerCli.Err()}
|
| ... | ... |
@@ -101,6 +122,11 @@ func runLogs(dockerCli *command.DockerCli, opts *logsOptions) error {
|
| 101 | 101 |
return err |
| 102 | 102 |
} |
| 103 | 103 |
|
| 104 |
+// getMaxLength gets the maximum length of the number in base 10 |
|
| 105 |
+func getMaxLength(i int) int {
|
|
| 106 |
+ return len(strconv.FormatInt(int64(i), 10)) |
|
| 107 |
+} |
|
| 108 |
+ |
|
| 104 | 109 |
type taskFormatter struct {
|
| 105 | 110 |
client client.APIClient |
| 106 | 111 |
opts *logsOptions |
| ... | ... |
@@ -148,7 +174,8 @@ func (f *taskFormatter) format(ctx context.Context, logCtx logContext) (string, |
| 148 | 148 |
taskName += fmt.Sprintf(".%s", stringid.TruncateID(task.ID))
|
| 149 | 149 |
} |
| 150 | 150 |
} |
| 151 |
- padding := strings.Repeat(" ", f.padding-len(strconv.FormatInt(int64(task.Slot), 10)))
|
|
| 151 |
+ |
|
| 152 |
+ padding := strings.Repeat(" ", f.padding-getMaxLength(task.Slot))
|
|
| 152 | 153 |
formatted := fmt.Sprintf("%s@%s%s", taskName, nodeName, padding)
|
| 153 | 154 |
f.cache[logCtx] = formatted |
| 154 | 155 |
return formatted, nil |
| ... | ... |
@@ -128,6 +128,7 @@ type ServiceAPIClient interface {
|
| 128 | 128 |
ServiceRemove(ctx context.Context, serviceID string) error |
| 129 | 129 |
ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) |
| 130 | 130 |
ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) |
| 131 |
+ TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) |
|
| 131 | 132 |
TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) |
| 132 | 133 |
TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) |
| 133 | 134 |
} |
| 134 | 135 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,52 @@ |
| 0 |
+package client |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "io" |
|
| 4 |
+ "net/url" |
|
| 5 |
+ "time" |
|
| 6 |
+ |
|
| 7 |
+ "golang.org/x/net/context" |
|
| 8 |
+ |
|
| 9 |
+ "github.com/docker/docker/api/types" |
|
| 10 |
+ timetypes "github.com/docker/docker/api/types/time" |
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+// TaskLogs returns the logs generated by a task in an io.ReadCloser. |
|
| 14 |
+// It's up to the caller to close the stream. |
|
| 15 |
+func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
|
|
| 16 |
+ query := url.Values{}
|
|
| 17 |
+ if options.ShowStdout {
|
|
| 18 |
+ query.Set("stdout", "1")
|
|
| 19 |
+ } |
|
| 20 |
+ |
|
| 21 |
+ if options.ShowStderr {
|
|
| 22 |
+ query.Set("stderr", "1")
|
|
| 23 |
+ } |
|
| 24 |
+ |
|
| 25 |
+ if options.Since != "" {
|
|
| 26 |
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now()) |
|
| 27 |
+ if err != nil {
|
|
| 28 |
+ return nil, err |
|
| 29 |
+ } |
|
| 30 |
+ query.Set("since", ts)
|
|
| 31 |
+ } |
|
| 32 |
+ |
|
| 33 |
+ if options.Timestamps {
|
|
| 34 |
+ query.Set("timestamps", "1")
|
|
| 35 |
+ } |
|
| 36 |
+ |
|
| 37 |
+ if options.Details {
|
|
| 38 |
+ query.Set("details", "1")
|
|
| 39 |
+ } |
|
| 40 |
+ |
|
| 41 |
+ if options.Follow {
|
|
| 42 |
+ query.Set("follow", "1")
|
|
| 43 |
+ } |
|
| 44 |
+ query.Set("tail", options.Tail)
|
|
| 45 |
+ |
|
| 46 |
+ resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) |
|
| 47 |
+ if err != nil {
|
|
| 48 |
+ return nil, err |
|
| 49 |
+ } |
|
| 50 |
+ return resp.body, nil |
|
| 51 |
+} |
| ... | ... |
@@ -303,24 +303,32 @@ func (c *Cluster) RemoveService(input string) error {
|
| 303 | 303 |
} |
| 304 | 304 |
|
| 305 | 305 |
// ServiceLogs collects service logs and writes them back to `config.OutStream` |
| 306 |
-func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend.ContainerLogsConfig, started chan struct{}) error {
|
|
| 306 |
+func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *backend.ContainerLogsConfig, started chan struct{}) error {
|
|
| 307 | 307 |
c.mu.RLock() |
| 308 |
+ defer func() {
|
|
| 309 |
+ select {
|
|
| 310 |
+ case <-started: |
|
| 311 |
+ // if we've started streaming logs, we are no longer holding the |
|
| 312 |
+ // lock and do not have to release it |
|
| 313 |
+ return |
|
| 314 |
+ default: |
|
| 315 |
+ // before we start, though, we're holding this lock and it needs to |
|
| 316 |
+ // be released |
|
| 317 |
+ c.mu.RUnlock() |
|
| 318 |
+ } |
|
| 319 |
+ }() |
|
| 308 | 320 |
state := c.currentNodeState() |
| 309 | 321 |
if !state.IsActiveManager() {
|
| 310 |
- c.mu.RUnlock() |
|
| 311 | 322 |
return c.errNoManager(state) |
| 312 | 323 |
} |
| 313 | 324 |
|
| 314 |
- service, err := getService(ctx, state.controlClient, input) |
|
| 325 |
+ swarmSelector, tty, err := convertSelector(ctx, state.controlClient, selector) |
|
| 315 | 326 |
if err != nil {
|
| 316 |
- c.mu.RUnlock() |
|
| 317 |
- return err |
|
| 318 |
- } |
|
| 319 |
- container := service.Spec.Task.GetContainer() |
|
| 320 |
- if container == nil {
|
|
| 321 |
- return errors.New("service logs only supported for container tasks")
|
|
| 327 |
+ return errors.Wrap(err, "error making log selector") |
|
| 322 | 328 |
} |
| 323 |
- if container.TTY {
|
|
| 329 |
+ |
|
| 330 |
+ // TODO(dperny) this goes away when we support TTY logs, which is in the works |
|
| 331 |
+ if tty {
|
|
| 324 | 332 |
return errors.New("service logs not supported on tasks with a TTY attached")
|
| 325 | 333 |
} |
| 326 | 334 |
|
| ... | ... |
@@ -335,7 +343,7 @@ func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend |
| 335 | 335 |
|
| 336 | 336 |
// Get tail value squared away - the number of previous log lines we look at |
| 337 | 337 |
var tail int64 |
| 338 |
- if config.Tail == "all" {
|
|
| 338 |
+ if config.Tail == "all" || config.Tail == "" {
|
|
| 339 | 339 |
// tail of 0 means send all logs on the swarmkit side |
| 340 | 340 |
tail = 0 |
| 341 | 341 |
} else {
|
| ... | ... |
@@ -372,9 +380,7 @@ func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend |
| 372 | 372 |
} |
| 373 | 373 |
|
| 374 | 374 |
stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{
|
| 375 |
- Selector: &swarmapi.LogSelector{
|
|
| 376 |
- ServiceIDs: []string{service.ID},
|
|
| 377 |
- }, |
|
| 375 |
+ Selector: swarmSelector, |
|
| 378 | 376 |
Options: &swarmapi.LogSubscriptionOptions{
|
| 379 | 377 |
Follow: config.Follow, |
| 380 | 378 |
Streams: stdStreams, |
| ... | ... |
@@ -383,20 +389,26 @@ func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend |
| 383 | 383 |
}, |
| 384 | 384 |
}) |
| 385 | 385 |
if err != nil {
|
| 386 |
- c.mu.RUnlock() |
|
| 387 | 386 |
return err |
| 388 | 387 |
} |
| 389 | 388 |
|
| 390 | 389 |
wf := ioutils.NewWriteFlusher(config.OutStream) |
| 391 | 390 |
defer wf.Close() |
| 391 |
+ |
|
| 392 |
+ // Release the lock before starting the stream. |
|
| 393 |
+ // |
|
| 394 |
+ // this feels like it could be racy because we would double unlock if we |
|
| 395 |
+ // somehow returned right after we unlocked but before we closed, but I do |
|
| 396 |
+ // not think such a thing is possible. i wish it were possible to atomically |
|
| 397 |
+ // close and unlock but that might be overkill. programming is hard. |
|
| 398 |
+ c.mu.RUnlock() |
|
| 392 | 399 |
close(started) |
| 400 |
+ |
|
| 393 | 401 |
wf.Flush() |
| 394 | 402 |
|
| 395 | 403 |
outStream := stdcopy.NewStdWriter(wf, stdcopy.Stdout) |
| 396 | 404 |
errStream := stdcopy.NewStdWriter(wf, stdcopy.Stderr) |
| 397 | 405 |
|
| 398 |
- // Release the lock before starting the stream. |
|
| 399 |
- c.mu.RUnlock() |
|
| 400 | 406 |
for {
|
| 401 | 407 |
// Check the context before doing anything. |
| 402 | 408 |
select {
|
| ... | ... |
@@ -442,6 +454,43 @@ func (c *Cluster) ServiceLogs(ctx context.Context, input string, config *backend |
| 442 | 442 |
} |
| 443 | 443 |
} |
| 444 | 444 |
|
| 445 |
+// convertSelector takes a backend.LogSelector, which contains raw names that |
|
| 446 |
+// may or may not be valid, and converts them to an api.LogSelector proto. It |
|
| 447 |
+// also returns a boolean, true if any of the services use a TTY (false |
|
| 448 |
+// otherwise) and an error if something fails |
|
| 449 |
+func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, bool, error) {
|
|
| 450 |
+ // if ANY tasks use a TTY, don't mux streams |
|
| 451 |
+ var tty bool |
|
| 452 |
+ // don't rely on swarmkit to resolve IDs, do it ourselves |
|
| 453 |
+ swarmSelector := &swarmapi.LogSelector{}
|
|
| 454 |
+ for _, s := range selector.Services {
|
|
| 455 |
+ service, err := getService(ctx, cc, s) |
|
| 456 |
+ if err != nil {
|
|
| 457 |
+ return nil, false, err |
|
| 458 |
+ } |
|
| 459 |
+ c := service.Spec.Task.GetContainer() |
|
| 460 |
+ if c == nil {
|
|
| 461 |
+ return nil, false, errors.New("logs only supported on container tasks")
|
|
| 462 |
+ } |
|
| 463 |
+ // set TTY true if we have a TTY service, or if it's already true |
|
| 464 |
+ tty = tty || c.TTY |
|
| 465 |
+ swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID) |
|
| 466 |
+ } |
|
| 467 |
+ for _, t := range selector.Tasks {
|
|
| 468 |
+ task, err := getTask(ctx, cc, t) |
|
| 469 |
+ if err != nil {
|
|
| 470 |
+ return nil, false, err |
|
| 471 |
+ } |
|
| 472 |
+ c := task.Spec.GetContainer() |
|
| 473 |
+ if c == nil {
|
|
| 474 |
+ return nil, false, errors.New("logs only supported on container tasks")
|
|
| 475 |
+ } |
|
| 476 |
+ tty = tty || c.TTY |
|
| 477 |
+ swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID) |
|
| 478 |
+ } |
|
| 479 |
+ return swarmSelector, tty, nil |
|
| 480 |
+} |
|
| 481 |
+ |
|
| 445 | 482 |
// imageWithDigestString takes an image such as name or name:tag |
| 446 | 483 |
// and returns the image pinned to a digest, such as name@sha256:34234 |
| 447 | 484 |
func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) {
|
| ... | ... |
@@ -12,6 +12,7 @@ import ( |
| 12 | 12 |
|
| 13 | 13 |
"github.com/docker/docker/integration-cli/checker" |
| 14 | 14 |
"github.com/docker/docker/integration-cli/daemon" |
| 15 |
+ icmd "github.com/docker/docker/pkg/testutil/cmd" |
|
| 15 | 16 |
"github.com/go-check/check" |
| 16 | 17 |
) |
| 17 | 18 |
|
| ... | ... |
@@ -56,10 +57,10 @@ func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) {
|
| 56 | 56 |
// output. |
| 57 | 57 |
func countLogLines(d *daemon.Swarm, name string) func(*check.C) (interface{}, check.CommentInterface) {
|
| 58 | 58 |
return func(c *check.C) (interface{}, check.CommentInterface) {
|
| 59 |
- out, err := d.Cmd("service", "logs", "-t", name)
|
|
| 60 |
- c.Assert(err, checker.IsNil) |
|
| 61 |
- lines := strings.Split(strings.TrimSpace(out), "\n") |
|
| 62 |
- return len(lines), check.Commentf("output, %q", string(out))
|
|
| 59 |
+ result := icmd.RunCmd(d.Command("service", "logs", "-t", name))
|
|
| 60 |
+ result.Assert(c, icmd.Expected{})
|
|
| 61 |
+ lines := strings.Split(strings.TrimSpace(result.Stdout()), "\n") |
|
| 62 |
+ return len(lines), check.Commentf("output, %q", string(result.Stdout()))
|
|
| 63 | 63 |
} |
| 64 | 64 |
} |
| 65 | 65 |
|
| ... | ... |
@@ -70,7 +71,7 @@ func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *check.C) {
|
| 70 | 70 |
name := "TestServiceLogsCompleteness" |
| 71 | 71 |
|
| 72 | 72 |
// make a service that prints 6 lines |
| 73 |
- out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", "for line in $(seq 1 6); do echo log test $line; done; sleep 100000")
|
|
| 73 |
+ out, err := d.Cmd("service", "create", "--name", name, "busybox", "sh", "-c", "for line in $(seq 0 5); do echo log test $line; done; sleep 100000")
|
|
| 74 | 74 |
c.Assert(err, checker.IsNil) |
| 75 | 75 |
c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") |
| 76 | 76 |
|
| ... | ... |
@@ -79,22 +80,15 @@ func (s *DockerSwarmSuite) TestServiceLogsCompleteness(c *check.C) {
|
| 79 | 79 |
// and make sure we have all the log lines |
| 80 | 80 |
waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6) |
| 81 | 81 |
|
| 82 |
- args := []string{"service", "logs", name}
|
|
| 83 |
- cmd := exec.Command(dockerBinary, d.PrependHostArg(args)...) |
|
| 84 |
- r, w := io.Pipe() |
|
| 85 |
- cmd.Stdout = w |
|
| 86 |
- cmd.Stderr = w |
|
| 87 |
- c.Assert(cmd.Start(), checker.IsNil) |
|
| 82 |
+ out, err = d.Cmd("service", "logs", name)
|
|
| 83 |
+ c.Assert(err, checker.IsNil) |
|
| 84 |
+ lines := strings.Split(strings.TrimSpace(out), "\n") |
|
| 88 | 85 |
|
| 89 |
- reader := bufio.NewReader(r) |
|
| 90 | 86 |
// i have heard anecdotal reports that logs may come back from the engine |
| 91 | 87 |
// mis-ordered. if this tests fails, consider the possibility that that |
| 92 | 88 |
// might be occurring |
| 93 |
- for i := 1; i <= 6; i++ {
|
|
| 94 |
- msg := &logMessage{}
|
|
| 95 |
- msg.data, _, msg.err = reader.ReadLine() |
|
| 96 |
- c.Assert(msg.err, checker.IsNil) |
|
| 97 |
- c.Assert(string(msg.data), checker.Contains, fmt.Sprintf("log test %v", i))
|
|
| 89 |
+ for i, line := range lines {
|
|
| 90 |
+ c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i))
|
|
| 98 | 91 |
} |
| 99 | 92 |
} |
| 100 | 93 |
|
| ... | ... |
@@ -113,21 +107,13 @@ func (s *DockerSwarmSuite) TestServiceLogsTail(c *check.C) {
|
| 113 | 113 |
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
| 114 | 114 |
waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6) |
| 115 | 115 |
|
| 116 |
- args := []string{"service", "logs", "--tail=2", name}
|
|
| 117 |
- cmd := exec.Command(dockerBinary, d.PrependHostArg(args)...) |
|
| 118 |
- r, w := io.Pipe() |
|
| 119 |
- cmd.Stdout = w |
|
| 120 |
- cmd.Stderr = w |
|
| 121 |
- c.Assert(cmd.Start(), checker.IsNil) |
|
| 116 |
+ out, err = d.Cmd("service", "logs", "--tail=2", name)
|
|
| 117 |
+ c.Assert(err, checker.IsNil) |
|
| 118 |
+ lines := strings.Split(strings.TrimSpace(out), "\n") |
|
| 122 | 119 |
|
| 123 |
- reader := bufio.NewReader(r) |
|
| 124 |
- // see TestServiceLogsCompleteness for comments about logs being well- |
|
| 125 |
- // ordered, if this flakes |
|
| 126 |
- for i := 5; i <= 6; i++ {
|
|
| 127 |
- msg := &logMessage{}
|
|
| 128 |
- msg.data, _, msg.err = reader.ReadLine() |
|
| 129 |
- c.Assert(msg.err, checker.IsNil) |
|
| 130 |
- c.Assert(string(msg.data), checker.Contains, fmt.Sprintf("log test %v", i))
|
|
| 120 |
+ for i, line := range lines {
|
|
| 121 |
+ // doing i+5 is hacky but not too fragile, it's good enough. if it flakes something else is wrong |
|
| 122 |
+ c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i+5))
|
|
| 131 | 123 |
} |
| 132 | 124 |
} |
| 133 | 125 |
|
| ... | ... |
@@ -213,3 +199,58 @@ func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) {
|
| 213 | 213 |
|
| 214 | 214 |
c.Assert(cmd.Process.Kill(), checker.IsNil) |
| 215 | 215 |
} |
| 216 |
+ |
|
| 217 |
+func (s *DockerSwarmSuite) TestServiceLogsTaskLogs(c *check.C) {
|
|
| 218 |
+ testRequires(c, ExperimentalDaemon) |
|
| 219 |
+ |
|
| 220 |
+ d := s.AddDaemon(c, true, true) |
|
| 221 |
+ |
|
| 222 |
+ name := "TestServicelogsTaskLogs" |
|
| 223 |
+ replicas := 2 |
|
| 224 |
+ |
|
| 225 |
+ result := icmd.RunCmd(d.Command( |
|
| 226 |
+ // create a service with the name |
|
| 227 |
+ "service", "create", "--name", name, |
|
| 228 |
+ // which has some number of replicas |
|
| 229 |
+ fmt.Sprintf("--replicas=%v", replicas),
|
|
| 230 |
+ // which has this the task id as an environment variable templated in |
|
| 231 |
+ "--env", "TASK={{.Task.ID}}",
|
|
| 232 |
+ // and runs this command to print exaclty 6 logs lines |
|
| 233 |
+ "busybox", "sh", "-c", "for line in $(seq 0 5); do echo $TASK log test $line; done; sleep 100000", |
|
| 234 |
+ )) |
|
| 235 |
+ result.Assert(c, icmd.Expected{})
|
|
| 236 |
+ // ^^ verify that we get no error |
|
| 237 |
+ // then verify that we have an id in stdout |
|
| 238 |
+ id := strings.TrimSpace(result.Stdout()) |
|
| 239 |
+ c.Assert(id, checker.Not(checker.Equals), "") |
|
| 240 |
+ // so, right here, we're basically inspecting by id and returning only |
|
| 241 |
+ // the ID. if they don't match, the service doesn't exist. |
|
| 242 |
+ result = icmd.RunCmd(d.Command("service", "inspect", "--format=\"{{.ID}}\"", id))
|
|
| 243 |
+ result.Assert(c, icmd.Expected{Out: id})
|
|
| 244 |
+ |
|
| 245 |
+ // make sure task has been deployed. |
|
| 246 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, replicas) |
|
| 247 |
+ waitAndAssert(c, defaultReconciliationTimeout, countLogLines(d, name), checker.Equals, 6*replicas) |
|
| 248 |
+ |
|
| 249 |
+ // get the task ids |
|
| 250 |
+ result = icmd.RunCmd(d.Command("service", "ps", "-q", name))
|
|
| 251 |
+ result.Assert(c, icmd.Expected{})
|
|
| 252 |
+ // make sure we have two tasks |
|
| 253 |
+ taskIDs := strings.Split(strings.TrimSpace(result.Stdout()), "\n") |
|
| 254 |
+ c.Assert(taskIDs, checker.HasLen, replicas) |
|
| 255 |
+ |
|
| 256 |
+ for _, taskID := range taskIDs {
|
|
| 257 |
+ c.Logf("checking task %v", taskID)
|
|
| 258 |
+ result := icmd.RunCmd(d.Command("service", "logs", taskID))
|
|
| 259 |
+ result.Assert(c, icmd.Expected{})
|
|
| 260 |
+ lines := strings.Split(strings.TrimSpace(result.Stdout()), "\n") |
|
| 261 |
+ |
|
| 262 |
+ c.Logf("checking messages for %v", taskID)
|
|
| 263 |
+ for i, line := range lines {
|
|
| 264 |
+ // make sure the message is in order |
|
| 265 |
+ c.Assert(line, checker.Contains, fmt.Sprintf("log test %v", i))
|
|
| 266 |
+ // make sure it contains the task id |
|
| 267 |
+ c.Assert(line, checker.Contains, taskID) |
|
| 268 |
+ } |
|
| 269 |
+ } |
|
| 270 |
+} |