Signed-off-by: Antonio Murdaca <me@runcom.ninja>
| ... | ... |
@@ -1097,7 +1097,7 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite |
| 1097 | 1097 |
select {
|
| 1098 | 1098 |
case <-finished: |
| 1099 | 1099 |
case <-closeNotifier.CloseNotify(): |
| 1100 |
- log.Infof("Client disconnected, cancelling job: %v", job)
|
|
| 1100 |
+ log.Infof("Client disconnected, cancelling job: %s", job.Name)
|
|
| 1101 | 1101 |
job.Cancel() |
| 1102 | 1102 |
} |
| 1103 | 1103 |
}() |
| ... | ... |
@@ -1581,9 +1581,9 @@ type Server interface {
|
| 1581 | 1581 |
|
| 1582 | 1582 |
// ServeApi loops through all of the protocols sent in to docker and spawns |
| 1583 | 1583 |
// off a go routine to setup a serving http.Server for each. |
| 1584 |
-func ServeApi(job *engine.Job) engine.Status {
|
|
| 1584 |
+func ServeApi(job *engine.Job) error {
|
|
| 1585 | 1585 |
if len(job.Args) == 0 {
|
| 1586 |
- return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
|
|
| 1586 |
+ return fmt.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
|
|
| 1587 | 1587 |
} |
| 1588 | 1588 |
var ( |
| 1589 | 1589 |
protoAddrs = job.Args |
| ... | ... |
@@ -1594,7 +1594,7 @@ func ServeApi(job *engine.Job) engine.Status {
|
| 1594 | 1594 |
for _, protoAddr := range protoAddrs {
|
| 1595 | 1595 |
protoAddrParts := strings.SplitN(protoAddr, "://", 2) |
| 1596 | 1596 |
if len(protoAddrParts) != 2 {
|
| 1597 |
- return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
|
|
| 1597 |
+ return fmt.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name)
|
|
| 1598 | 1598 |
} |
| 1599 | 1599 |
go func() {
|
| 1600 | 1600 |
log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1])
|
| ... | ... |
@@ -1618,9 +1618,9 @@ func ServeApi(job *engine.Job) engine.Status {
|
| 1618 | 1618 |
for i := 0; i < len(protoAddrs); i++ {
|
| 1619 | 1619 |
err := <-chErrors |
| 1620 | 1620 |
if err != nil {
|
| 1621 |
- return job.Error(err) |
|
| 1621 |
+ return err |
|
| 1622 | 1622 |
} |
| 1623 | 1623 |
} |
| 1624 | 1624 |
|
| 1625 |
- return engine.StatusOK |
|
| 1625 |
+ return nil |
|
| 1626 | 1626 |
} |
| ... | ... |
@@ -90,7 +90,7 @@ func serveFd(addr string, job *engine.Job) error {
|
| 90 | 90 |
} |
| 91 | 91 |
|
| 92 | 92 |
// Called through eng.Job("acceptconnections")
|
| 93 |
-func AcceptConnections(job *engine.Job) engine.Status {
|
|
| 93 |
+func AcceptConnections(job *engine.Job) error {
|
|
| 94 | 94 |
// Tell the init daemon we are accepting requests |
| 95 | 95 |
go systemd.SdNotify("READY=1")
|
| 96 | 96 |
|
| ... | ... |
@@ -99,5 +99,5 @@ func AcceptConnections(job *engine.Job) engine.Status {
|
| 99 | 99 |
close(activationLock) |
| 100 | 100 |
} |
| 101 | 101 |
|
| 102 |
- return engine.StatusOK |
|
| 102 |
+ return nil |
|
| 103 | 103 |
} |
| ... | ... |
@@ -63,7 +63,7 @@ func TesthttpError(t *testing.T) {
|
| 63 | 63 |
func TestGetVersion(t *testing.T) {
|
| 64 | 64 |
eng := engine.New() |
| 65 | 65 |
var called bool |
| 66 |
- eng.Register("version", func(job *engine.Job) engine.Status {
|
|
| 66 |
+ eng.Register("version", func(job *engine.Job) error {
|
|
| 67 | 67 |
called = true |
| 68 | 68 |
v := &engine.Env{}
|
| 69 | 69 |
v.SetJson("Version", "42.1")
|
| ... | ... |
@@ -72,9 +72,9 @@ func TestGetVersion(t *testing.T) {
|
| 72 | 72 |
v.Set("Os", "Linux")
|
| 73 | 73 |
v.Set("Arch", "x86_64")
|
| 74 | 74 |
if _, err := v.WriteTo(job.Stdout); err != nil {
|
| 75 |
- return job.Error(err) |
|
| 75 |
+ return err |
|
| 76 | 76 |
} |
| 77 |
- return engine.StatusOK |
|
| 77 |
+ return nil |
|
| 78 | 78 |
}) |
| 79 | 79 |
r := serveRequest("GET", "/version", nil, eng, t)
|
| 80 | 80 |
if !called {
|
| ... | ... |
@@ -92,15 +92,15 @@ func TestGetVersion(t *testing.T) {
|
| 92 | 92 |
func TestGetInfo(t *testing.T) {
|
| 93 | 93 |
eng := engine.New() |
| 94 | 94 |
var called bool |
| 95 |
- eng.Register("info", func(job *engine.Job) engine.Status {
|
|
| 95 |
+ eng.Register("info", func(job *engine.Job) error {
|
|
| 96 | 96 |
called = true |
| 97 | 97 |
v := &engine.Env{}
|
| 98 | 98 |
v.SetInt("Containers", 1)
|
| 99 | 99 |
v.SetInt("Images", 42000)
|
| 100 | 100 |
if _, err := v.WriteTo(job.Stdout); err != nil {
|
| 101 |
- return job.Error(err) |
|
| 101 |
+ return err |
|
| 102 | 102 |
} |
| 103 |
- return engine.StatusOK |
|
| 103 |
+ return nil |
|
| 104 | 104 |
}) |
| 105 | 105 |
r := serveRequest("GET", "/info", nil, eng, t)
|
| 106 | 106 |
if !called {
|
| ... | ... |
@@ -119,13 +119,13 @@ func TestGetInfo(t *testing.T) {
|
| 119 | 119 |
func TestGetImagesJSON(t *testing.T) {
|
| 120 | 120 |
eng := engine.New() |
| 121 | 121 |
var called bool |
| 122 |
- eng.Register("images", func(job *engine.Job) engine.Status {
|
|
| 122 |
+ eng.Register("images", func(job *engine.Job) error {
|
|
| 123 | 123 |
called = true |
| 124 | 124 |
v := createEnvFromGetImagesJSONStruct(sampleImage) |
| 125 | 125 |
if _, err := v.WriteTo(job.Stdout); err != nil {
|
| 126 |
- return job.Error(err) |
|
| 126 |
+ return err |
|
| 127 | 127 |
} |
| 128 |
- return engine.StatusOK |
|
| 128 |
+ return nil |
|
| 129 | 129 |
}) |
| 130 | 130 |
r := serveRequest("GET", "/images/json", nil, eng, t)
|
| 131 | 131 |
if !called {
|
| ... | ... |
@@ -145,9 +145,9 @@ func TestGetImagesJSON(t *testing.T) {
|
| 145 | 145 |
func TestGetImagesJSONFilter(t *testing.T) {
|
| 146 | 146 |
eng := engine.New() |
| 147 | 147 |
filter := "nothing" |
| 148 |
- eng.Register("images", func(job *engine.Job) engine.Status {
|
|
| 148 |
+ eng.Register("images", func(job *engine.Job) error {
|
|
| 149 | 149 |
filter = job.Getenv("filter")
|
| 150 |
- return engine.StatusOK |
|
| 150 |
+ return nil |
|
| 151 | 151 |
}) |
| 152 | 152 |
serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t)
|
| 153 | 153 |
if filter != "aaaa" {
|
| ... | ... |
@@ -158,9 +158,9 @@ func TestGetImagesJSONFilter(t *testing.T) {
|
| 158 | 158 |
func TestGetImagesJSONFilters(t *testing.T) {
|
| 159 | 159 |
eng := engine.New() |
| 160 | 160 |
filter := "nothing" |
| 161 |
- eng.Register("images", func(job *engine.Job) engine.Status {
|
|
| 161 |
+ eng.Register("images", func(job *engine.Job) error {
|
|
| 162 | 162 |
filter = job.Getenv("filters")
|
| 163 |
- return engine.StatusOK |
|
| 163 |
+ return nil |
|
| 164 | 164 |
}) |
| 165 | 165 |
serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t)
|
| 166 | 166 |
if filter != "nnnn" {
|
| ... | ... |
@@ -171,9 +171,9 @@ func TestGetImagesJSONFilters(t *testing.T) {
|
| 171 | 171 |
func TestGetImagesJSONAll(t *testing.T) {
|
| 172 | 172 |
eng := engine.New() |
| 173 | 173 |
allFilter := "-1" |
| 174 |
- eng.Register("images", func(job *engine.Job) engine.Status {
|
|
| 174 |
+ eng.Register("images", func(job *engine.Job) error {
|
|
| 175 | 175 |
allFilter = job.Getenv("all")
|
| 176 |
- return engine.StatusOK |
|
| 176 |
+ return nil |
|
| 177 | 177 |
}) |
| 178 | 178 |
serveRequest("GET", "/images/json?all=1", nil, eng, t)
|
| 179 | 179 |
if allFilter != "1" {
|
| ... | ... |
@@ -184,14 +184,14 @@ func TestGetImagesJSONAll(t *testing.T) {
|
| 184 | 184 |
func TestGetImagesJSONLegacyFormat(t *testing.T) {
|
| 185 | 185 |
eng := engine.New() |
| 186 | 186 |
var called bool |
| 187 |
- eng.Register("images", func(job *engine.Job) engine.Status {
|
|
| 187 |
+ eng.Register("images", func(job *engine.Job) error {
|
|
| 188 | 188 |
called = true |
| 189 | 189 |
outsLegacy := engine.NewTable("Created", 0)
|
| 190 | 190 |
outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage)) |
| 191 | 191 |
if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil {
|
| 192 |
- return job.Error(err) |
|
| 192 |
+ return err |
|
| 193 | 193 |
} |
| 194 |
- return engine.StatusOK |
|
| 194 |
+ return nil |
|
| 195 | 195 |
}) |
| 196 | 196 |
r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t)
|
| 197 | 197 |
if !called {
|
| ... | ... |
@@ -219,7 +219,7 @@ func TestGetContainersByName(t *testing.T) {
|
| 219 | 219 |
eng := engine.New() |
| 220 | 220 |
name := "container_name" |
| 221 | 221 |
var called bool |
| 222 |
- eng.Register("container_inspect", func(job *engine.Job) engine.Status {
|
|
| 222 |
+ eng.Register("container_inspect", func(job *engine.Job) error {
|
|
| 223 | 223 |
called = true |
| 224 | 224 |
if job.Args[0] != name {
|
| 225 | 225 |
t.Errorf("name != '%s': %#v", name, job.Args[0])
|
| ... | ... |
@@ -232,9 +232,9 @@ func TestGetContainersByName(t *testing.T) {
|
| 232 | 232 |
v := &engine.Env{}
|
| 233 | 233 |
v.SetBool("dirty", true)
|
| 234 | 234 |
if _, err := v.WriteTo(job.Stdout); err != nil {
|
| 235 |
- return job.Error(err) |
|
| 235 |
+ return err |
|
| 236 | 236 |
} |
| 237 |
- return engine.StatusOK |
|
| 237 |
+ return nil |
|
| 238 | 238 |
}) |
| 239 | 239 |
r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t)
|
| 240 | 240 |
if !called {
|
| ... | ... |
@@ -253,7 +253,7 @@ func TestGetContainersByName(t *testing.T) {
|
| 253 | 253 |
func TestGetEvents(t *testing.T) {
|
| 254 | 254 |
eng := engine.New() |
| 255 | 255 |
var called bool |
| 256 |
- eng.Register("events", func(job *engine.Job) engine.Status {
|
|
| 256 |
+ eng.Register("events", func(job *engine.Job) error {
|
|
| 257 | 257 |
called = true |
| 258 | 258 |
since := job.Getenv("since")
|
| 259 | 259 |
if since != "1" {
|
| ... | ... |
@@ -267,9 +267,9 @@ func TestGetEvents(t *testing.T) {
|
| 267 | 267 |
v.Set("since", since)
|
| 268 | 268 |
v.Set("until", until)
|
| 269 | 269 |
if _, err := v.WriteTo(job.Stdout); err != nil {
|
| 270 |
- return job.Error(err) |
|
| 270 |
+ return err |
|
| 271 | 271 |
} |
| 272 |
- return engine.StatusOK |
|
| 272 |
+ return nil |
|
| 273 | 273 |
}) |
| 274 | 274 |
r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t)
|
| 275 | 275 |
if !called {
|
| ... | ... |
@@ -295,7 +295,7 @@ func TestLogs(t *testing.T) {
|
| 295 | 295 |
eng := engine.New() |
| 296 | 296 |
var inspect bool |
| 297 | 297 |
var logs bool |
| 298 |
- eng.Register("container_inspect", func(job *engine.Job) engine.Status {
|
|
| 298 |
+ eng.Register("container_inspect", func(job *engine.Job) error {
|
|
| 299 | 299 |
inspect = true |
| 300 | 300 |
if len(job.Args) == 0 {
|
| 301 | 301 |
t.Fatal("Job arguments is empty")
|
| ... | ... |
@@ -303,10 +303,10 @@ func TestLogs(t *testing.T) {
|
| 303 | 303 |
if job.Args[0] != "test" {
|
| 304 | 304 |
t.Fatalf("Container name %s, must be test", job.Args[0])
|
| 305 | 305 |
} |
| 306 |
- return engine.StatusOK |
|
| 306 |
+ return nil |
|
| 307 | 307 |
}) |
| 308 | 308 |
expected := "logs" |
| 309 |
- eng.Register("logs", func(job *engine.Job) engine.Status {
|
|
| 309 |
+ eng.Register("logs", func(job *engine.Job) error {
|
|
| 310 | 310 |
logs = true |
| 311 | 311 |
if len(job.Args) == 0 {
|
| 312 | 312 |
t.Fatal("Job arguments is empty")
|
| ... | ... |
@@ -331,7 +331,7 @@ func TestLogs(t *testing.T) {
|
| 331 | 331 |
t.Fatalf("timestamps %s, must be 1", timestamps)
|
| 332 | 332 |
} |
| 333 | 333 |
job.Stdout.Write([]byte(expected)) |
| 334 |
- return engine.StatusOK |
|
| 334 |
+ return nil |
|
| 335 | 335 |
}) |
| 336 | 336 |
r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1×tamps=1", nil, eng, t)
|
| 337 | 337 |
if r.Code != http.StatusOK {
|
| ... | ... |
@@ -353,7 +353,7 @@ func TestLogsNoStreams(t *testing.T) {
|
| 353 | 353 |
eng := engine.New() |
| 354 | 354 |
var inspect bool |
| 355 | 355 |
var logs bool |
| 356 |
- eng.Register("container_inspect", func(job *engine.Job) engine.Status {
|
|
| 356 |
+ eng.Register("container_inspect", func(job *engine.Job) error {
|
|
| 357 | 357 |
inspect = true |
| 358 | 358 |
if len(job.Args) == 0 {
|
| 359 | 359 |
t.Fatal("Job arguments is empty")
|
| ... | ... |
@@ -361,11 +361,11 @@ func TestLogsNoStreams(t *testing.T) {
|
| 361 | 361 |
if job.Args[0] != "test" {
|
| 362 | 362 |
t.Fatalf("Container name %s, must be test", job.Args[0])
|
| 363 | 363 |
} |
| 364 |
- return engine.StatusOK |
|
| 364 |
+ return nil |
|
| 365 | 365 |
}) |
| 366 |
- eng.Register("logs", func(job *engine.Job) engine.Status {
|
|
| 366 |
+ eng.Register("logs", func(job *engine.Job) error {
|
|
| 367 | 367 |
logs = true |
| 368 |
- return engine.StatusOK |
|
| 368 |
+ return nil |
|
| 369 | 369 |
}) |
| 370 | 370 |
r := serveRequest("GET", "/containers/test/logs", nil, eng, t)
|
| 371 | 371 |
if r.Code != http.StatusBadRequest {
|
| ... | ... |
@@ -388,7 +388,7 @@ func TestGetImagesHistory(t *testing.T) {
|
| 388 | 388 |
eng := engine.New() |
| 389 | 389 |
imageName := "docker-test-image" |
| 390 | 390 |
var called bool |
| 391 |
- eng.Register("history", func(job *engine.Job) engine.Status {
|
|
| 391 |
+ eng.Register("history", func(job *engine.Job) error {
|
|
| 392 | 392 |
called = true |
| 393 | 393 |
if len(job.Args) == 0 {
|
| 394 | 394 |
t.Fatal("Job arguments is empty")
|
| ... | ... |
@@ -398,9 +398,9 @@ func TestGetImagesHistory(t *testing.T) {
|
| 398 | 398 |
} |
| 399 | 399 |
v := &engine.Env{}
|
| 400 | 400 |
if _, err := v.WriteTo(job.Stdout); err != nil {
|
| 401 |
- return job.Error(err) |
|
| 401 |
+ return err |
|
| 402 | 402 |
} |
| 403 |
- return engine.StatusOK |
|
| 403 |
+ return nil |
|
| 404 | 404 |
}) |
| 405 | 405 |
r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t)
|
| 406 | 406 |
if !called {
|
| ... | ... |
@@ -418,7 +418,7 @@ func TestGetImagesByName(t *testing.T) {
|
| 418 | 418 |
eng := engine.New() |
| 419 | 419 |
name := "image_name" |
| 420 | 420 |
var called bool |
| 421 |
- eng.Register("image_inspect", func(job *engine.Job) engine.Status {
|
|
| 421 |
+ eng.Register("image_inspect", func(job *engine.Job) error {
|
|
| 422 | 422 |
called = true |
| 423 | 423 |
if job.Args[0] != name {
|
| 424 | 424 |
t.Fatalf("name != '%s': %#v", name, job.Args[0])
|
| ... | ... |
@@ -431,9 +431,9 @@ func TestGetImagesByName(t *testing.T) {
|
| 431 | 431 |
v := &engine.Env{}
|
| 432 | 432 |
v.SetBool("dirty", true)
|
| 433 | 433 |
if _, err := v.WriteTo(job.Stdout); err != nil {
|
| 434 |
- return job.Error(err) |
|
| 434 |
+ return err |
|
| 435 | 435 |
} |
| 436 |
- return engine.StatusOK |
|
| 436 |
+ return nil |
|
| 437 | 437 |
}) |
| 438 | 438 |
r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t)
|
| 439 | 439 |
if !called {
|
| ... | ... |
@@ -455,7 +455,7 @@ func TestDeleteContainers(t *testing.T) {
|
| 455 | 455 |
eng := engine.New() |
| 456 | 456 |
name := "foo" |
| 457 | 457 |
var called bool |
| 458 |
- eng.Register("rm", func(job *engine.Job) engine.Status {
|
|
| 458 |
+ eng.Register("rm", func(job *engine.Job) error {
|
|
| 459 | 459 |
called = true |
| 460 | 460 |
if len(job.Args) == 0 {
|
| 461 | 461 |
t.Fatalf("Job arguments is empty")
|
| ... | ... |
@@ -463,7 +463,7 @@ func TestDeleteContainers(t *testing.T) {
|
| 463 | 463 |
if job.Args[0] != name {
|
| 464 | 464 |
t.Fatalf("name != '%s': %#v", name, job.Args[0])
|
| 465 | 465 |
} |
| 466 |
- return engine.StatusOK |
|
| 466 |
+ return nil |
|
| 467 | 467 |
}) |
| 468 | 468 |
r := serveRequest("DELETE", "/containers/"+name, nil, eng, t)
|
| 469 | 469 |
if !called {
|
| ... | ... |
@@ -3,6 +3,7 @@ package builder |
| 3 | 3 |
import ( |
| 4 | 4 |
"bytes" |
| 5 | 5 |
"encoding/json" |
| 6 |
+ "fmt" |
|
| 6 | 7 |
"io" |
| 7 | 8 |
"io/ioutil" |
| 8 | 9 |
"os" |
| ... | ... |
@@ -44,9 +45,9 @@ func (b *BuilderJob) Install() {
|
| 44 | 44 |
b.Engine.Register("build_config", b.CmdBuildConfig)
|
| 45 | 45 |
} |
| 46 | 46 |
|
| 47 |
-func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
|
|
| 47 |
+func (b *BuilderJob) CmdBuild(job *engine.Job) error {
|
|
| 48 | 48 |
if len(job.Args) != 0 {
|
| 49 |
- return job.Errorf("Usage: %s\n", job.Name)
|
|
| 49 |
+ return fmt.Errorf("Usage: %s\n", job.Name)
|
|
| 50 | 50 |
} |
| 51 | 51 |
var ( |
| 52 | 52 |
dockerfileName = job.Getenv("dockerfile")
|
| ... | ... |
@@ -73,11 +74,11 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
|
| 73 | 73 |
repoName, tag = parsers.ParseRepositoryTag(repoName) |
| 74 | 74 |
if repoName != "" {
|
| 75 | 75 |
if err := registry.ValidateRepositoryName(repoName); err != nil {
|
| 76 |
- return job.Error(err) |
|
| 76 |
+ return err |
|
| 77 | 77 |
} |
| 78 | 78 |
if len(tag) > 0 {
|
| 79 | 79 |
if err := graph.ValidateTagName(tag); err != nil {
|
| 80 |
- return job.Error(err) |
|
| 80 |
+ return err |
|
| 81 | 81 |
} |
| 82 | 82 |
} |
| 83 | 83 |
} |
| ... | ... |
@@ -90,28 +91,28 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
|
| 90 | 90 |
} |
| 91 | 91 |
root, err := ioutil.TempDir("", "docker-build-git")
|
| 92 | 92 |
if err != nil {
|
| 93 |
- return job.Error(err) |
|
| 93 |
+ return err |
|
| 94 | 94 |
} |
| 95 | 95 |
defer os.RemoveAll(root) |
| 96 | 96 |
|
| 97 | 97 |
if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
|
| 98 |
- return job.Errorf("Error trying to use git: %s (%s)", err, output)
|
|
| 98 |
+ return fmt.Errorf("Error trying to use git: %s (%s)", err, output)
|
|
| 99 | 99 |
} |
| 100 | 100 |
|
| 101 | 101 |
c, err := archive.Tar(root, archive.Uncompressed) |
| 102 | 102 |
if err != nil {
|
| 103 |
- return job.Error(err) |
|
| 103 |
+ return err |
|
| 104 | 104 |
} |
| 105 | 105 |
context = c |
| 106 | 106 |
} else if urlutil.IsURL(remoteURL) {
|
| 107 | 107 |
f, err := utils.Download(remoteURL) |
| 108 | 108 |
if err != nil {
|
| 109 |
- return job.Error(err) |
|
| 109 |
+ return err |
|
| 110 | 110 |
} |
| 111 | 111 |
defer f.Body.Close() |
| 112 | 112 |
dockerFile, err := ioutil.ReadAll(f.Body) |
| 113 | 113 |
if err != nil {
|
| 114 |
- return job.Error(err) |
|
| 114 |
+ return err |
|
| 115 | 115 |
} |
| 116 | 116 |
|
| 117 | 117 |
// When we're downloading just a Dockerfile put it in |
| ... | ... |
@@ -120,7 +121,7 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
|
| 120 | 120 |
|
| 121 | 121 |
c, err := archive.Generate(dockerfileName, string(dockerFile)) |
| 122 | 122 |
if err != nil {
|
| 123 |
- return job.Error(err) |
|
| 123 |
+ return err |
|
| 124 | 124 |
} |
| 125 | 125 |
context = c |
| 126 | 126 |
} |
| ... | ... |
@@ -158,18 +159,18 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) engine.Status {
|
| 158 | 158 |
|
| 159 | 159 |
id, err := builder.Run(context) |
| 160 | 160 |
if err != nil {
|
| 161 |
- return job.Error(err) |
|
| 161 |
+ return err |
|
| 162 | 162 |
} |
| 163 | 163 |
|
| 164 | 164 |
if repoName != "" {
|
| 165 | 165 |
b.Daemon.Repositories().Set(repoName, tag, id, true) |
| 166 | 166 |
} |
| 167 |
- return engine.StatusOK |
|
| 167 |
+ return nil |
|
| 168 | 168 |
} |
| 169 | 169 |
|
| 170 |
-func (b *BuilderJob) CmdBuildConfig(job *engine.Job) engine.Status {
|
|
| 170 |
+func (b *BuilderJob) CmdBuildConfig(job *engine.Job) error {
|
|
| 171 | 171 |
if len(job.Args) != 0 {
|
| 172 |
- return job.Errorf("Usage: %s\n", job.Name)
|
|
| 172 |
+ return fmt.Errorf("Usage: %s\n", job.Name)
|
|
| 173 | 173 |
} |
| 174 | 174 |
|
| 175 | 175 |
var ( |
| ... | ... |
@@ -178,18 +179,18 @@ func (b *BuilderJob) CmdBuildConfig(job *engine.Job) engine.Status {
|
| 178 | 178 |
) |
| 179 | 179 |
|
| 180 | 180 |
if err := job.GetenvJson("config", &newConfig); err != nil {
|
| 181 |
- return job.Error(err) |
|
| 181 |
+ return err |
|
| 182 | 182 |
} |
| 183 | 183 |
|
| 184 | 184 |
ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) |
| 185 | 185 |
if err != nil {
|
| 186 |
- return job.Error(err) |
|
| 186 |
+ return err |
|
| 187 | 187 |
} |
| 188 | 188 |
|
| 189 | 189 |
// ensure that the commands are valid |
| 190 | 190 |
for _, n := range ast.Children {
|
| 191 | 191 |
if !validCommitCommands[n.Value] {
|
| 192 |
- return job.Errorf("%s is not a valid change command", n.Value)
|
|
| 192 |
+ return fmt.Errorf("%s is not a valid change command", n.Value)
|
|
| 193 | 193 |
} |
| 194 | 194 |
} |
| 195 | 195 |
|
| ... | ... |
@@ -204,12 +205,12 @@ func (b *BuilderJob) CmdBuildConfig(job *engine.Job) engine.Status {
|
| 204 | 204 |
|
| 205 | 205 |
for i, n := range ast.Children {
|
| 206 | 206 |
if err := builder.dispatch(i, n); err != nil {
|
| 207 |
- return job.Error(err) |
|
| 207 |
+ return err |
|
| 208 | 208 |
} |
| 209 | 209 |
} |
| 210 | 210 |
|
| 211 | 211 |
if err := json.NewEncoder(job.Stdout).Encode(builder.Config); err != nil {
|
| 212 |
- return job.Error(err) |
|
| 212 |
+ return err |
|
| 213 | 213 |
} |
| 214 |
- return engine.StatusOK |
|
| 214 |
+ return nil |
|
| 215 | 215 |
} |
| ... | ... |
@@ -57,7 +57,7 @@ func daemon(eng *engine.Engine) error {
|
| 57 | 57 |
} |
| 58 | 58 |
|
| 59 | 59 |
// builtins jobs independent of any subsystem |
| 60 |
-func dockerVersion(job *engine.Job) engine.Status {
|
|
| 60 |
+func dockerVersion(job *engine.Job) error {
|
|
| 61 | 61 |
v := &engine.Env{}
|
| 62 | 62 |
v.SetJson("Version", dockerversion.VERSION)
|
| 63 | 63 |
v.SetJson("ApiVersion", api.APIVERSION)
|
| ... | ... |
@@ -69,7 +69,7 @@ func dockerVersion(job *engine.Job) engine.Status {
|
| 69 | 69 |
v.Set("KernelVersion", kernelVersion.String())
|
| 70 | 70 |
} |
| 71 | 71 |
if _, err := v.WriteTo(job.Stdout); err != nil {
|
| 72 |
- return job.Error(err) |
|
| 72 |
+ return err |
|
| 73 | 73 |
} |
| 74 |
- return engine.StatusOK |
|
| 74 |
+ return nil |
|
| 75 | 75 |
} |
| ... | ... |
@@ -2,6 +2,7 @@ package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"encoding/json" |
| 5 |
+ "fmt" |
|
| 5 | 6 |
"io" |
| 6 | 7 |
"os" |
| 7 | 8 |
"sync" |
| ... | ... |
@@ -14,9 +15,9 @@ import ( |
| 14 | 14 |
"github.com/docker/docker/utils" |
| 15 | 15 |
) |
| 16 | 16 |
|
| 17 |
-func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
|
|
| 17 |
+func (daemon *Daemon) ContainerAttach(job *engine.Job) error {
|
|
| 18 | 18 |
if len(job.Args) != 1 {
|
| 19 |
- return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 19 |
+ return fmt.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 20 | 20 |
} |
| 21 | 21 |
|
| 22 | 22 |
var ( |
| ... | ... |
@@ -30,7 +31,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
|
| 30 | 30 |
|
| 31 | 31 |
container, err := daemon.Get(name) |
| 32 | 32 |
if err != nil {
|
| 33 |
- return job.Error(err) |
|
| 33 |
+ return err |
|
| 34 | 34 |
} |
| 35 | 35 |
|
| 36 | 36 |
//logs |
| ... | ... |
@@ -108,7 +109,7 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status {
|
| 108 | 108 |
container.WaitStop(-1 * time.Second) |
| 109 | 109 |
} |
| 110 | 110 |
} |
| 111 |
- return engine.StatusOK |
|
| 111 |
+ return nil |
|
| 112 | 112 |
} |
| 113 | 113 |
|
| 114 | 114 |
func (daemon *Daemon) Attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
|
| ... | ... |
@@ -1,37 +1,39 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 5 |
+ |
|
| 4 | 6 |
"github.com/docker/docker/engine" |
| 5 | 7 |
) |
| 6 | 8 |
|
| 7 |
-func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status {
|
|
| 9 |
+func (daemon *Daemon) ContainerChanges(job *engine.Job) error {
|
|
| 8 | 10 |
if n := len(job.Args); n != 1 {
|
| 9 |
- return job.Errorf("Usage: %s CONTAINER", job.Name)
|
|
| 11 |
+ return fmt.Errorf("Usage: %s CONTAINER", job.Name)
|
|
| 10 | 12 |
} |
| 11 | 13 |
name := job.Args[0] |
| 12 | 14 |
|
| 13 |
- container, error := daemon.Get(name) |
|
| 14 |
- if error != nil {
|
|
| 15 |
- return job.Error(error) |
|
| 15 |
+ container, err := daemon.Get(name) |
|
| 16 |
+ if err != nil {
|
|
| 17 |
+ return err |
|
| 16 | 18 |
} |
| 17 | 19 |
|
| 18 | 20 |
outs := engine.NewTable("", 0)
|
| 19 | 21 |
changes, err := container.Changes() |
| 20 | 22 |
if err != nil {
|
| 21 |
- return job.Error(err) |
|
| 23 |
+ return err |
|
| 22 | 24 |
} |
| 23 | 25 |
|
| 24 | 26 |
for _, change := range changes {
|
| 25 | 27 |
out := &engine.Env{}
|
| 26 | 28 |
if err := out.Import(change); err != nil {
|
| 27 |
- return job.Error(err) |
|
| 29 |
+ return err |
|
| 28 | 30 |
} |
| 29 | 31 |
outs.Add(out) |
| 30 | 32 |
} |
| 31 | 33 |
|
| 32 | 34 |
if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
| 33 |
- return job.Error(err) |
|
| 35 |
+ return err |
|
| 34 | 36 |
} |
| 35 | 37 |
|
| 36 |
- return engine.StatusOK |
|
| 38 |
+ return nil |
|
| 37 | 39 |
} |
| ... | ... |
@@ -3,21 +3,22 @@ package daemon |
| 3 | 3 |
import ( |
| 4 | 4 |
"bytes" |
| 5 | 5 |
"encoding/json" |
| 6 |
+ "fmt" |
|
| 6 | 7 |
|
| 7 | 8 |
"github.com/docker/docker/engine" |
| 8 | 9 |
"github.com/docker/docker/image" |
| 9 | 10 |
"github.com/docker/docker/runconfig" |
| 10 | 11 |
) |
| 11 | 12 |
|
| 12 |
-func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status {
|
|
| 13 |
+func (daemon *Daemon) ContainerCommit(job *engine.Job) error {
|
|
| 13 | 14 |
if len(job.Args) != 1 {
|
| 14 |
- return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
|
|
| 15 |
+ return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
|
|
| 15 | 16 |
} |
| 16 | 17 |
name := job.Args[0] |
| 17 | 18 |
|
| 18 | 19 |
container, err := daemon.Get(name) |
| 19 | 20 |
if err != nil {
|
| 20 |
- return job.Error(err) |
|
| 21 |
+ return err |
|
| 21 | 22 |
} |
| 22 | 23 |
|
| 23 | 24 |
var ( |
| ... | ... |
@@ -33,22 +34,22 @@ func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status {
|
| 33 | 33 |
buildConfigJob.Setenv("config", job.Getenv("config"))
|
| 34 | 34 |
|
| 35 | 35 |
if err := buildConfigJob.Run(); err != nil {
|
| 36 |
- return job.Error(err) |
|
| 36 |
+ return err |
|
| 37 | 37 |
} |
| 38 | 38 |
if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil {
|
| 39 |
- return job.Error(err) |
|
| 39 |
+ return err |
|
| 40 | 40 |
} |
| 41 | 41 |
|
| 42 | 42 |
if err := runconfig.Merge(&newConfig, config); err != nil {
|
| 43 |
- return job.Error(err) |
|
| 43 |
+ return err |
|
| 44 | 44 |
} |
| 45 | 45 |
|
| 46 | 46 |
img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
|
| 47 | 47 |
if err != nil {
|
| 48 |
- return job.Error(err) |
|
| 48 |
+ return err |
|
| 49 | 49 |
} |
| 50 | 50 |
job.Printf("%s\n", img.ID)
|
| 51 |
- return engine.StatusOK |
|
| 51 |
+ return nil |
|
| 52 | 52 |
} |
| 53 | 53 |
|
| 54 | 54 |
// Commit creates a new filesystem image from the current state of a container. |
| ... | ... |
@@ -1,14 +1,15 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 4 | 5 |
"io" |
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/docker/engine" |
| 7 | 8 |
) |
| 8 | 9 |
|
| 9 |
-func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status {
|
|
| 10 |
+func (daemon *Daemon) ContainerCopy(job *engine.Job) error {
|
|
| 10 | 11 |
if len(job.Args) != 2 {
|
| 11 |
- return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
|
|
| 12 |
+ return fmt.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
|
|
| 12 | 13 |
} |
| 13 | 14 |
|
| 14 | 15 |
var ( |
| ... | ... |
@@ -18,17 +19,17 @@ func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status {
|
| 18 | 18 |
|
| 19 | 19 |
container, err := daemon.Get(name) |
| 20 | 20 |
if err != nil {
|
| 21 |
- return job.Error(err) |
|
| 21 |
+ return err |
|
| 22 | 22 |
} |
| 23 | 23 |
|
| 24 | 24 |
data, err := container.Copy(resource) |
| 25 | 25 |
if err != nil {
|
| 26 |
- return job.Error(err) |
|
| 26 |
+ return err |
|
| 27 | 27 |
} |
| 28 | 28 |
defer data.Close() |
| 29 | 29 |
|
| 30 | 30 |
if _, err := io.Copy(job.Stdout, data); err != nil {
|
| 31 |
- return job.Error(err) |
|
| 31 |
+ return err |
|
| 32 | 32 |
} |
| 33 |
- return engine.StatusOK |
|
| 33 |
+ return nil |
|
| 34 | 34 |
} |
| ... | ... |
@@ -4,6 +4,7 @@ import ( |
| 4 | 4 |
"fmt" |
| 5 | 5 |
"strings" |
| 6 | 6 |
|
| 7 |
+ log "github.com/Sirupsen/logrus" |
|
| 7 | 8 |
"github.com/docker/docker/engine" |
| 8 | 9 |
"github.com/docker/docker/graph" |
| 9 | 10 |
"github.com/docker/docker/image" |
| ... | ... |
@@ -12,36 +13,36 @@ import ( |
| 12 | 12 |
"github.com/docker/libcontainer/label" |
| 13 | 13 |
) |
| 14 | 14 |
|
| 15 |
-func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
|
|
| 15 |
+func (daemon *Daemon) ContainerCreate(job *engine.Job) error {
|
|
| 16 | 16 |
var name string |
| 17 | 17 |
if len(job.Args) == 1 {
|
| 18 | 18 |
name = job.Args[0] |
| 19 | 19 |
} else if len(job.Args) > 1 {
|
| 20 |
- return job.Errorf("Usage: %s", job.Name)
|
|
| 20 |
+ return fmt.Errorf("Usage: %s", job.Name)
|
|
| 21 | 21 |
} |
| 22 | 22 |
|
| 23 | 23 |
config := runconfig.ContainerConfigFromJob(job) |
| 24 | 24 |
hostConfig := runconfig.ContainerHostConfigFromJob(job) |
| 25 | 25 |
|
| 26 | 26 |
if len(hostConfig.LxcConf) > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") {
|
| 27 |
- return job.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name())
|
|
| 27 |
+ return fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name())
|
|
| 28 | 28 |
} |
| 29 | 29 |
if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 {
|
| 30 |
- return job.Errorf("Minimum memory limit allowed is 4MB")
|
|
| 30 |
+ return fmt.Errorf("Minimum memory limit allowed is 4MB")
|
|
| 31 | 31 |
} |
| 32 | 32 |
if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit {
|
| 33 |
- job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
|
| 33 |
+ log.Printf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
|
| 34 | 34 |
hostConfig.Memory = 0 |
| 35 | 35 |
} |
| 36 | 36 |
if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit {
|
| 37 |
- job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
|
| 37 |
+ log.Printf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
|
| 38 | 38 |
hostConfig.MemorySwap = -1 |
| 39 | 39 |
} |
| 40 | 40 |
if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory {
|
| 41 |
- return job.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n")
|
|
| 41 |
+ return fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n")
|
|
| 42 | 42 |
} |
| 43 | 43 |
if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 {
|
| 44 |
- return job.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n")
|
|
| 44 |
+ return fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n")
|
|
| 45 | 45 |
} |
| 46 | 46 |
|
| 47 | 47 |
container, buildWarnings, err := daemon.Create(config, hostConfig, name) |
| ... | ... |
@@ -51,22 +52,22 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) engine.Status {
|
| 51 | 51 |
if tag == "" {
|
| 52 | 52 |
tag = graph.DEFAULTTAG |
| 53 | 53 |
} |
| 54 |
- return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
|
|
| 54 |
+ return fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag)
|
|
| 55 | 55 |
} |
| 56 |
- return job.Error(err) |
|
| 56 |
+ return err |
|
| 57 | 57 |
} |
| 58 | 58 |
if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled {
|
| 59 |
- job.Errorf("IPv4 forwarding is disabled.\n")
|
|
| 59 |
+ log.Printf("IPv4 forwarding is disabled.\n")
|
|
| 60 | 60 |
} |
| 61 | 61 |
container.LogEvent("create")
|
| 62 | 62 |
|
| 63 | 63 |
job.Printf("%s\n", container.ID)
|
| 64 | 64 |
|
| 65 | 65 |
for _, warning := range buildWarnings {
|
| 66 |
- job.Errorf("%s\n", warning)
|
|
| 66 |
+ log.Printf("%s\n", warning)
|
|
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 |
- return engine.StatusOK |
|
| 69 |
+ return nil |
|
| 70 | 70 |
} |
| 71 | 71 |
|
| 72 | 72 |
// Create creates a new container from the given configuration with a given name. |
| ... | ... |
@@ -9,9 +9,9 @@ import ( |
| 9 | 9 |
"github.com/docker/docker/engine" |
| 10 | 10 |
) |
| 11 | 11 |
|
| 12 |
-func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
|
|
| 12 |
+func (daemon *Daemon) ContainerRm(job *engine.Job) error {
|
|
| 13 | 13 |
if len(job.Args) != 1 {
|
| 14 |
- return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
|
|
| 14 |
+ return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
|
|
| 15 | 15 |
} |
| 16 | 16 |
name := job.Args[0] |
| 17 | 17 |
removeVolume := job.GetenvBool("removeVolume")
|
| ... | ... |
@@ -20,21 +20,23 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
|
| 20 | 20 |
|
| 21 | 21 |
container, err := daemon.Get(name) |
| 22 | 22 |
if err != nil {
|
| 23 |
- return job.Error(err) |
|
| 23 |
+ return err |
|
| 24 | 24 |
} |
| 25 | 25 |
|
| 26 | 26 |
if removeLink {
|
| 27 | 27 |
name, err := GetFullContainerName(name) |
| 28 | 28 |
if err != nil {
|
| 29 |
- job.Error(err) |
|
| 29 |
+ return err |
|
| 30 |
+ // TODO: why was just job.Error(err) without return if the function cannot continue w/o container name? |
|
| 31 |
+ //job.Error(err) |
|
| 30 | 32 |
} |
| 31 | 33 |
parent, n := path.Split(name) |
| 32 | 34 |
if parent == "/" {
|
| 33 |
- return job.Errorf("Conflict, cannot remove the default name of the container")
|
|
| 35 |
+ return fmt.Errorf("Conflict, cannot remove the default name of the container")
|
|
| 34 | 36 |
} |
| 35 | 37 |
pe := daemon.ContainerGraph().Get(parent) |
| 36 | 38 |
if pe == nil {
|
| 37 |
- return job.Errorf("Cannot get parent %s for name %s", parent, name)
|
|
| 39 |
+ return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
|
|
| 38 | 40 |
} |
| 39 | 41 |
parentContainer, _ := daemon.Get(pe.ID()) |
| 40 | 42 |
|
| ... | ... |
@@ -43,9 +45,9 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
|
| 43 | 43 |
} |
| 44 | 44 |
|
| 45 | 45 |
if err := daemon.ContainerGraph().Delete(name); err != nil {
|
| 46 |
- return job.Error(err) |
|
| 46 |
+ return err |
|
| 47 | 47 |
} |
| 48 |
- return engine.StatusOK |
|
| 48 |
+ return nil |
|
| 49 | 49 |
} |
| 50 | 50 |
|
| 51 | 51 |
if container != nil {
|
| ... | ... |
@@ -55,21 +57,21 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status {
|
| 55 | 55 |
if container.IsRunning() {
|
| 56 | 56 |
if forceRemove {
|
| 57 | 57 |
if err := container.Kill(); err != nil {
|
| 58 |
- return job.Errorf("Could not kill running container, cannot remove - %v", err)
|
|
| 58 |
+ return fmt.Errorf("Could not kill running container, cannot remove - %v", err)
|
|
| 59 | 59 |
} |
| 60 | 60 |
} else {
|
| 61 |
- return job.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
|
|
| 61 |
+ return fmt.Errorf("Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f")
|
|
| 62 | 62 |
} |
| 63 | 63 |
} |
| 64 | 64 |
if err := daemon.Rm(container); err != nil {
|
| 65 |
- return job.Errorf("Cannot destroy container %s: %s", name, err)
|
|
| 65 |
+ return fmt.Errorf("Cannot destroy container %s: %s", name, err)
|
|
| 66 | 66 |
} |
| 67 | 67 |
container.LogEvent("destroy")
|
| 68 | 68 |
if removeVolume {
|
| 69 | 69 |
daemon.DeleteVolumes(container.VolumePaths()) |
| 70 | 70 |
} |
| 71 | 71 |
} |
| 72 |
- return engine.StatusOK |
|
| 72 |
+ return nil |
|
| 73 | 73 |
} |
| 74 | 74 |
|
| 75 | 75 |
func (daemon *Daemon) DeleteVolumes(volumeIDs map[string]struct{}) {
|
| ... | ... |
@@ -111,25 +111,25 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) {
|
| 111 | 111 |
return container, nil |
| 112 | 112 |
} |
| 113 | 113 |
|
| 114 |
-func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
|
|
| 114 |
+func (d *Daemon) ContainerExecCreate(job *engine.Job) error {
|
|
| 115 | 115 |
if len(job.Args) != 1 {
|
| 116 |
- return job.Errorf("Usage: %s [options] container command [args]", job.Name)
|
|
| 116 |
+ return fmt.Errorf("Usage: %s [options] container command [args]", job.Name)
|
|
| 117 | 117 |
} |
| 118 | 118 |
|
| 119 | 119 |
if strings.HasPrefix(d.execDriver.Name(), lxc.DriverName) {
|
| 120 |
- return job.Error(lxc.ErrExec) |
|
| 120 |
+ return lxc.ErrExec |
|
| 121 | 121 |
} |
| 122 | 122 |
|
| 123 | 123 |
var name = job.Args[0] |
| 124 | 124 |
|
| 125 | 125 |
container, err := d.getActiveContainer(name) |
| 126 | 126 |
if err != nil {
|
| 127 |
- return job.Error(err) |
|
| 127 |
+ return err |
|
| 128 | 128 |
} |
| 129 | 129 |
|
| 130 | 130 |
config, err := runconfig.ExecConfigFromJob(job) |
| 131 | 131 |
if err != nil {
|
| 132 |
- return job.Error(err) |
|
| 132 |
+ return err |
|
| 133 | 133 |
} |
| 134 | 134 |
|
| 135 | 135 |
entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd) |
| ... | ... |
@@ -157,12 +157,12 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) engine.Status {
|
| 157 | 157 |
|
| 158 | 158 |
job.Printf("%s\n", execConfig.ID)
|
| 159 | 159 |
|
| 160 |
- return engine.StatusOK |
|
| 160 |
+ return nil |
|
| 161 | 161 |
} |
| 162 | 162 |
|
| 163 |
-func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
|
|
| 163 |
+func (d *Daemon) ContainerExecStart(job *engine.Job) error {
|
|
| 164 | 164 |
if len(job.Args) != 1 {
|
| 165 |
- return job.Errorf("Usage: %s [options] exec", job.Name)
|
|
| 165 |
+ return fmt.Errorf("Usage: %s [options] exec", job.Name)
|
|
| 166 | 166 |
} |
| 167 | 167 |
|
| 168 | 168 |
var ( |
| ... | ... |
@@ -173,7 +173,7 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
|
| 173 | 173 |
|
| 174 | 174 |
execConfig, err := d.getExecConfig(execName) |
| 175 | 175 |
if err != nil {
|
| 176 |
- return job.Error(err) |
|
| 176 |
+ return err |
|
| 177 | 177 |
} |
| 178 | 178 |
|
| 179 | 179 |
func() {
|
| ... | ... |
@@ -185,7 +185,7 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
|
| 185 | 185 |
execConfig.Running = true |
| 186 | 186 |
}() |
| 187 | 187 |
if err != nil {
|
| 188 |
- return job.Error(err) |
|
| 188 |
+ return err |
|
| 189 | 189 |
} |
| 190 | 190 |
|
| 191 | 191 |
log.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID)
|
| ... | ... |
@@ -236,14 +236,14 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) engine.Status {
|
| 236 | 236 |
select {
|
| 237 | 237 |
case err := <-attachErr: |
| 238 | 238 |
if err != nil {
|
| 239 |
- return job.Errorf("attach failed with error: %s", err)
|
|
| 239 |
+ return fmt.Errorf("attach failed with error: %s", err)
|
|
| 240 | 240 |
} |
| 241 | 241 |
break |
| 242 | 242 |
case err := <-execErr: |
| 243 |
- return job.Error(err) |
|
| 243 |
+ return err |
|
| 244 | 244 |
} |
| 245 | 245 |
|
| 246 |
- return engine.StatusOK |
|
| 246 |
+ return nil |
|
| 247 | 247 |
} |
| 248 | 248 |
|
| 249 | 249 |
func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
|
| ... | ... |
@@ -1,33 +1,34 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 4 | 5 |
"io" |
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/docker/engine" |
| 7 | 8 |
) |
| 8 | 9 |
|
| 9 |
-func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status {
|
|
| 10 |
+func (daemon *Daemon) ContainerExport(job *engine.Job) error {
|
|
| 10 | 11 |
if len(job.Args) != 1 {
|
| 11 |
- return job.Errorf("Usage: %s container_id", job.Name)
|
|
| 12 |
+ return fmt.Errorf("Usage: %s container_id", job.Name)
|
|
| 12 | 13 |
} |
| 13 | 14 |
name := job.Args[0] |
| 14 | 15 |
|
| 15 | 16 |
container, err := daemon.Get(name) |
| 16 | 17 |
if err != nil {
|
| 17 |
- return job.Error(err) |
|
| 18 |
+ return err |
|
| 18 | 19 |
} |
| 19 | 20 |
|
| 20 | 21 |
data, err := container.Export() |
| 21 | 22 |
if err != nil {
|
| 22 |
- return job.Errorf("%s: %s", name, err)
|
|
| 23 |
+ return fmt.Errorf("%s: %s", name, err)
|
|
| 23 | 24 |
} |
| 24 | 25 |
defer data.Close() |
| 25 | 26 |
|
| 26 | 27 |
// Stream the entire contents of the container (basically a volatile snapshot) |
| 27 | 28 |
if _, err := io.Copy(job.Stdout, data); err != nil {
|
| 28 |
- return job.Errorf("%s: %s", name, err)
|
|
| 29 |
+ return fmt.Errorf("%s: %s", name, err)
|
|
| 29 | 30 |
} |
| 30 | 31 |
// FIXME: factor job-specific LogEvent to engine.Job.Run() |
| 31 | 32 |
container.LogEvent("export")
|
| 32 |
- return engine.StatusOK |
|
| 33 |
+ return nil |
|
| 33 | 34 |
} |
| ... | ... |
@@ -12,21 +12,21 @@ import ( |
| 12 | 12 |
"github.com/docker/docker/utils" |
| 13 | 13 |
) |
| 14 | 14 |
|
| 15 |
-func (daemon *Daemon) ImageDelete(job *engine.Job) engine.Status {
|
|
| 15 |
+func (daemon *Daemon) ImageDelete(job *engine.Job) error {
|
|
| 16 | 16 |
if n := len(job.Args); n != 1 {
|
| 17 |
- return job.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 17 |
+ return fmt.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 18 | 18 |
} |
| 19 | 19 |
imgs := engine.NewTable("", 0)
|
| 20 | 20 |
if err := daemon.DeleteImage(job.Eng, job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil {
|
| 21 |
- return job.Error(err) |
|
| 21 |
+ return err |
|
| 22 | 22 |
} |
| 23 | 23 |
if len(imgs.Data) == 0 {
|
| 24 |
- return job.Errorf("Conflict, %s wasn't deleted", job.Args[0])
|
|
| 24 |
+ return fmt.Errorf("Conflict, %s wasn't deleted", job.Args[0])
|
|
| 25 | 25 |
} |
| 26 | 26 |
if _, err := imgs.WriteListTo(job.Stdout); err != nil {
|
| 27 |
- return job.Error(err) |
|
| 27 |
+ return err |
|
| 28 | 28 |
} |
| 29 |
- return engine.StatusOK |
|
| 29 |
+ return nil |
|
| 30 | 30 |
} |
| 31 | 31 |
|
| 32 | 32 |
// FIXME: make this private and use the job instead |
| ... | ... |
@@ -15,7 +15,7 @@ import ( |
| 15 | 15 |
"github.com/docker/docker/utils" |
| 16 | 16 |
) |
| 17 | 17 |
|
| 18 |
-func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
|
|
| 18 |
+func (daemon *Daemon) CmdInfo(job *engine.Job) error {
|
|
| 19 | 19 |
images, _ := daemon.Graph().Map() |
| 20 | 20 |
var imgcount int |
| 21 | 21 |
if images == nil {
|
| ... | ... |
@@ -54,16 +54,16 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
|
| 54 | 54 |
cjob := job.Eng.Job("subscribers_count")
|
| 55 | 55 |
env, _ := cjob.Stdout.AddEnv() |
| 56 | 56 |
if err := cjob.Run(); err != nil {
|
| 57 |
- return job.Error(err) |
|
| 57 |
+ return err |
|
| 58 | 58 |
} |
| 59 | 59 |
registryJob := job.Eng.Job("registry_config")
|
| 60 | 60 |
registryEnv, _ := registryJob.Stdout.AddEnv() |
| 61 | 61 |
if err := registryJob.Run(); err != nil {
|
| 62 |
- return job.Error(err) |
|
| 62 |
+ return err |
|
| 63 | 63 |
} |
| 64 | 64 |
registryConfig := registry.ServiceConfig{}
|
| 65 | 65 |
if err := registryEnv.GetJson("config", ®istryConfig); err != nil {
|
| 66 |
- return job.Error(err) |
|
| 66 |
+ return err |
|
| 67 | 67 |
} |
| 68 | 68 |
v := &engine.Env{}
|
| 69 | 69 |
v.SetJson("ID", daemon.ID)
|
| ... | ... |
@@ -104,7 +104,7 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) engine.Status {
|
| 104 | 104 |
} |
| 105 | 105 |
v.SetList("Labels", daemon.Config().Labels)
|
| 106 | 106 |
if _, err := v.WriteTo(job.Stdout); err != nil {
|
| 107 |
- return job.Error(err) |
|
| 107 |
+ return err |
|
| 108 | 108 |
} |
| 109 |
- return engine.StatusOK |
|
| 109 |
+ return nil |
|
| 110 | 110 |
} |
| ... | ... |
@@ -8,14 +8,14 @@ import ( |
| 8 | 8 |
"github.com/docker/docker/runconfig" |
| 9 | 9 |
) |
| 10 | 10 |
|
| 11 |
-func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
|
|
| 11 |
+func (daemon *Daemon) ContainerInspect(job *engine.Job) error {
|
|
| 12 | 12 |
if len(job.Args) != 1 {
|
| 13 |
- return job.Errorf("usage: %s NAME", job.Name)
|
|
| 13 |
+ return fmt.Errorf("usage: %s NAME", job.Name)
|
|
| 14 | 14 |
} |
| 15 | 15 |
name := job.Args[0] |
| 16 | 16 |
container, err := daemon.Get(name) |
| 17 | 17 |
if err != nil {
|
| 18 |
- return job.Error(err) |
|
| 18 |
+ return err |
|
| 19 | 19 |
} |
| 20 | 20 |
|
| 21 | 21 |
container.Lock() |
| ... | ... |
@@ -26,10 +26,10 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
|
| 26 | 26 |
HostConfig *runconfig.HostConfig |
| 27 | 27 |
}{container, container.hostConfig})
|
| 28 | 28 |
if err != nil {
|
| 29 |
- return job.Error(err) |
|
| 29 |
+ return err |
|
| 30 | 30 |
} |
| 31 | 31 |
job.Stdout.Write(b) |
| 32 |
- return engine.StatusOK |
|
| 32 |
+ return nil |
|
| 33 | 33 |
} |
| 34 | 34 |
|
| 35 | 35 |
out := &engine.Env{}
|
| ... | ... |
@@ -75,25 +75,25 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
|
| 75 | 75 |
|
| 76 | 76 |
container.hostConfig.Links = nil |
| 77 | 77 |
if _, err := out.WriteTo(job.Stdout); err != nil {
|
| 78 |
- return job.Error(err) |
|
| 78 |
+ return err |
|
| 79 | 79 |
} |
| 80 |
- return engine.StatusOK |
|
| 80 |
+ return nil |
|
| 81 | 81 |
} |
| 82 | 82 |
|
| 83 |
-func (daemon *Daemon) ContainerExecInspect(job *engine.Job) engine.Status {
|
|
| 83 |
+func (daemon *Daemon) ContainerExecInspect(job *engine.Job) error {
|
|
| 84 | 84 |
if len(job.Args) != 1 {
|
| 85 |
- return job.Errorf("usage: %s ID", job.Name)
|
|
| 85 |
+ return fmt.Errorf("usage: %s ID", job.Name)
|
|
| 86 | 86 |
} |
| 87 | 87 |
id := job.Args[0] |
| 88 | 88 |
eConfig, err := daemon.getExecConfig(id) |
| 89 | 89 |
if err != nil {
|
| 90 |
- return job.Error(err) |
|
| 90 |
+ return err |
|
| 91 | 91 |
} |
| 92 | 92 |
|
| 93 | 93 |
b, err := json.Marshal(*eConfig) |
| 94 | 94 |
if err != nil {
|
| 95 |
- return job.Error(err) |
|
| 95 |
+ return err |
|
| 96 | 96 |
} |
| 97 | 97 |
job.Stdout.Write(b) |
| 98 |
- return engine.StatusOK |
|
| 98 |
+ return nil |
|
| 99 | 99 |
} |
| ... | ... |
@@ -1,6 +1,7 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 4 | 5 |
"strconv" |
| 5 | 6 |
"strings" |
| 6 | 7 |
"syscall" |
| ... | ... |
@@ -13,9 +14,9 @@ import ( |
| 13 | 13 |
// If no signal is given (sig 0), then Kill with SIGKILL and wait |
| 14 | 14 |
// for the container to exit. |
| 15 | 15 |
// If a signal is given, then just send it to the container and return. |
| 16 |
-func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status {
|
|
| 16 |
+func (daemon *Daemon) ContainerKill(job *engine.Job) error {
|
|
| 17 | 17 |
if n := len(job.Args); n < 1 || n > 2 {
|
| 18 |
- return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
|
|
| 18 |
+ return fmt.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
|
|
| 19 | 19 |
} |
| 20 | 20 |
var ( |
| 21 | 21 |
name = job.Args[0] |
| ... | ... |
@@ -34,27 +35,27 @@ func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status {
|
| 34 | 34 |
} |
| 35 | 35 |
|
| 36 | 36 |
if sig == 0 {
|
| 37 |
- return job.Errorf("Invalid signal: %s", job.Args[1])
|
|
| 37 |
+ return fmt.Errorf("Invalid signal: %s", job.Args[1])
|
|
| 38 | 38 |
} |
| 39 | 39 |
} |
| 40 | 40 |
|
| 41 | 41 |
container, err := daemon.Get(name) |
| 42 | 42 |
if err != nil {
|
| 43 |
- return job.Error(err) |
|
| 43 |
+ return err |
|
| 44 | 44 |
} |
| 45 | 45 |
|
| 46 | 46 |
// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) |
| 47 | 47 |
if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
|
| 48 | 48 |
if err := container.Kill(); err != nil {
|
| 49 |
- return job.Errorf("Cannot kill container %s: %s", name, err)
|
|
| 49 |
+ return fmt.Errorf("Cannot kill container %s: %s", name, err)
|
|
| 50 | 50 |
} |
| 51 | 51 |
container.LogEvent("kill")
|
| 52 | 52 |
} else {
|
| 53 | 53 |
// Otherwise, just send the requested signal |
| 54 | 54 |
if err := container.KillSig(int(sig)); err != nil {
|
| 55 |
- return job.Errorf("Cannot kill container %s: %s", name, err)
|
|
| 55 |
+ return fmt.Errorf("Cannot kill container %s: %s", name, err)
|
|
| 56 | 56 |
} |
| 57 | 57 |
// FIXME: Add event for signals |
| 58 | 58 |
} |
| 59 |
- return engine.StatusOK |
|
| 59 |
+ return nil |
|
| 60 | 60 |
} |
| ... | ... |
@@ -20,7 +20,7 @@ func (daemon *Daemon) List() []*Container {
|
| 20 | 20 |
return daemon.containers.List() |
| 21 | 21 |
} |
| 22 | 22 |
|
| 23 |
-func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
|
|
| 23 |
+func (daemon *Daemon) Containers(job *engine.Job) error {
|
|
| 24 | 24 |
var ( |
| 25 | 25 |
foundBefore bool |
| 26 | 26 |
displayed int |
| ... | ... |
@@ -36,13 +36,13 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
|
| 36 | 36 |
|
| 37 | 37 |
psFilters, err := filters.FromParam(job.Getenv("filters"))
|
| 38 | 38 |
if err != nil {
|
| 39 |
- return job.Error(err) |
|
| 39 |
+ return err |
|
| 40 | 40 |
} |
| 41 | 41 |
if i, ok := psFilters["exited"]; ok {
|
| 42 | 42 |
for _, value := range i {
|
| 43 | 43 |
code, err := strconv.Atoi(value) |
| 44 | 44 |
if err != nil {
|
| 45 |
- return job.Error(err) |
|
| 45 |
+ return err |
|
| 46 | 46 |
} |
| 47 | 47 |
filt_exited = append(filt_exited, code) |
| 48 | 48 |
} |
| ... | ... |
@@ -65,14 +65,14 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
|
| 65 | 65 |
if before != "" {
|
| 66 | 66 |
beforeCont, err = daemon.Get(before) |
| 67 | 67 |
if err != nil {
|
| 68 |
- return job.Error(err) |
|
| 68 |
+ return err |
|
| 69 | 69 |
} |
| 70 | 70 |
} |
| 71 | 71 |
|
| 72 | 72 |
if since != "" {
|
| 73 | 73 |
sinceCont, err = daemon.Get(since) |
| 74 | 74 |
if err != nil {
|
| 75 |
- return job.Error(err) |
|
| 75 |
+ return err |
|
| 76 | 76 |
} |
| 77 | 77 |
} |
| 78 | 78 |
|
| ... | ... |
@@ -170,14 +170,14 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status {
|
| 170 | 170 |
for _, container := range daemon.List() {
|
| 171 | 171 |
if err := writeCont(container); err != nil {
|
| 172 | 172 |
if err != errLast {
|
| 173 |
- return job.Error(err) |
|
| 173 |
+ return err |
|
| 174 | 174 |
} |
| 175 | 175 |
break |
| 176 | 176 |
} |
| 177 | 177 |
} |
| 178 | 178 |
outs.ReverseSort() |
| 179 | 179 |
if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
| 180 |
- return job.Error(err) |
|
| 180 |
+ return err |
|
| 181 | 181 |
} |
| 182 |
- return engine.StatusOK |
|
| 182 |
+ return nil |
|
| 183 | 183 |
} |
| ... | ... |
@@ -16,9 +16,9 @@ import ( |
| 16 | 16 |
"github.com/docker/docker/pkg/timeutils" |
| 17 | 17 |
) |
| 18 | 18 |
|
| 19 |
-func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
|
| 19 |
+func (daemon *Daemon) ContainerLogs(job *engine.Job) error {
|
|
| 20 | 20 |
if len(job.Args) != 1 {
|
| 21 |
- return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 21 |
+ return fmt.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 22 | 22 |
} |
| 23 | 23 |
|
| 24 | 24 |
var ( |
| ... | ... |
@@ -32,7 +32,7 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
| 32 | 32 |
format string |
| 33 | 33 |
) |
| 34 | 34 |
if !(stdout || stderr) {
|
| 35 |
- return job.Errorf("You must choose at least one stream")
|
|
| 35 |
+ return fmt.Errorf("You must choose at least one stream")
|
|
| 36 | 36 |
} |
| 37 | 37 |
if times {
|
| 38 | 38 |
format = timeutils.RFC3339NanoFixed |
| ... | ... |
@@ -42,10 +42,10 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
| 42 | 42 |
} |
| 43 | 43 |
container, err := daemon.Get(name) |
| 44 | 44 |
if err != nil {
|
| 45 |
- return job.Error(err) |
|
| 45 |
+ return err |
|
| 46 | 46 |
} |
| 47 | 47 |
if container.LogDriverType() != "json-file" {
|
| 48 |
- return job.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver")
|
|
| 48 |
+ return fmt.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver")
|
|
| 49 | 49 |
} |
| 50 | 50 |
cLog, err := container.ReadLog("json")
|
| 51 | 51 |
if err != nil && os.IsNotExist(err) {
|
| ... | ... |
@@ -83,7 +83,7 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
| 83 | 83 |
f := cLog.(*os.File) |
| 84 | 84 |
ls, err := tailfile.TailFile(f, lines) |
| 85 | 85 |
if err != nil {
|
| 86 |
- return job.Error(err) |
|
| 86 |
+ return err |
|
| 87 | 87 |
} |
| 88 | 88 |
tmp := bytes.NewBuffer([]byte{})
|
| 89 | 89 |
for _, l := range ls {
|
| ... | ... |
@@ -148,5 +148,5 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status {
|
| 148 | 148 |
} |
| 149 | 149 |
|
| 150 | 150 |
} |
| 151 |
- return engine.StatusOK |
|
| 151 |
+ return nil |
|
| 152 | 152 |
} |
| ... | ... |
@@ -83,7 +83,7 @@ var ( |
| 83 | 83 |
ipAllocator = ipallocator.New() |
| 84 | 84 |
) |
| 85 | 85 |
|
| 86 |
-func InitDriver(job *engine.Job) engine.Status {
|
|
| 86 |
+func InitDriver(job *engine.Job) error {
|
|
| 87 | 87 |
var ( |
| 88 | 88 |
networkv4 *net.IPNet |
| 89 | 89 |
networkv6 *net.IPNet |
| ... | ... |
@@ -117,17 +117,17 @@ func InitDriver(job *engine.Job) engine.Status {
|
| 117 | 117 |
// No Bridge existent, create one |
| 118 | 118 |
// If we're not using the default bridge, fail without trying to create it |
| 119 | 119 |
if !usingDefaultBridge {
|
| 120 |
- return job.Error(err) |
|
| 120 |
+ return err |
|
| 121 | 121 |
} |
| 122 | 122 |
|
| 123 | 123 |
// If the iface is not found, try to create it |
| 124 | 124 |
if err := configureBridge(bridgeIP, bridgeIPv6, enableIPv6); err != nil {
|
| 125 |
- return job.Error(err) |
|
| 125 |
+ return err |
|
| 126 | 126 |
} |
| 127 | 127 |
|
| 128 | 128 |
addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface) |
| 129 | 129 |
if err != nil {
|
| 130 |
- return job.Error(err) |
|
| 130 |
+ return err |
|
| 131 | 131 |
} |
| 132 | 132 |
|
| 133 | 133 |
if fixedCIDRv6 != "" {
|
| ... | ... |
@@ -144,10 +144,10 @@ func InitDriver(job *engine.Job) engine.Status {
|
| 144 | 144 |
networkv4 = addrv4.(*net.IPNet) |
| 145 | 145 |
bip, _, err := net.ParseCIDR(bridgeIP) |
| 146 | 146 |
if err != nil {
|
| 147 |
- return job.Error(err) |
|
| 147 |
+ return err |
|
| 148 | 148 |
} |
| 149 | 149 |
if !networkv4.IP.Equal(bip) {
|
| 150 |
- return job.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip)
|
|
| 150 |
+ return fmt.Errorf("Bridge ip (%s) does not match existing bridge configuration %s", networkv4.IP, bip)
|
|
| 151 | 151 |
} |
| 152 | 152 |
} |
| 153 | 153 |
|
| ... | ... |
@@ -157,12 +157,12 @@ func InitDriver(job *engine.Job) engine.Status {
|
| 157 | 157 |
// the bridge init for IPv6 here, else we will error out below if --ipv6=true |
| 158 | 158 |
if len(addrsv6) == 0 && enableIPv6 {
|
| 159 | 159 |
if err := setupIPv6Bridge(bridgeIPv6); err != nil {
|
| 160 |
- return job.Error(err) |
|
| 160 |
+ return err |
|
| 161 | 161 |
} |
| 162 | 162 |
// Recheck addresses now that IPv6 is setup on the bridge |
| 163 | 163 |
addrv4, addrsv6, err = networkdriver.GetIfaceAddr(bridgeIface) |
| 164 | 164 |
if err != nil {
|
| 165 |
- return job.Error(err) |
|
| 165 |
+ return err |
|
| 166 | 166 |
} |
| 167 | 167 |
} |
| 168 | 168 |
|
| ... | ... |
@@ -172,7 +172,7 @@ func InitDriver(job *engine.Job) engine.Status {
|
| 172 | 172 |
if enableIPv6 {
|
| 173 | 173 |
bip6, _, err := net.ParseCIDR(bridgeIPv6) |
| 174 | 174 |
if err != nil {
|
| 175 |
- return job.Error(err) |
|
| 175 |
+ return err |
|
| 176 | 176 |
} |
| 177 | 177 |
found := false |
| 178 | 178 |
for _, addrv6 := range addrsv6 {
|
| ... | ... |
@@ -183,7 +183,7 @@ func InitDriver(job *engine.Job) engine.Status {
|
| 183 | 183 |
} |
| 184 | 184 |
} |
| 185 | 185 |
if !found {
|
| 186 |
- return job.Errorf("Bridge IPv6 does not match existing bridge configuration %s", bip6)
|
|
| 186 |
+ return fmt.Errorf("Bridge IPv6 does not match existing bridge configuration %s", bip6)
|
|
| 187 | 187 |
} |
| 188 | 188 |
} |
| 189 | 189 |
|
| ... | ... |
@@ -191,7 +191,7 @@ func InitDriver(job *engine.Job) engine.Status {
|
| 191 | 191 |
|
| 192 | 192 |
if enableIPv6 {
|
| 193 | 193 |
if len(addrsv6) == 0 {
|
| 194 |
- return job.Error(errors.New("IPv6 enabled but no IPv6 detected"))
|
|
| 194 |
+ return errors.New("IPv6 enabled but no IPv6 detected")
|
|
| 195 | 195 |
} |
| 196 | 196 |
bridgeIPv6Addr = networkv6.IP |
| 197 | 197 |
} |
| ... | ... |
@@ -199,7 +199,7 @@ func InitDriver(job *engine.Job) engine.Status {
|
| 199 | 199 |
// Configure iptables for link support |
| 200 | 200 |
if enableIPTables {
|
| 201 | 201 |
if err := setupIPTables(addrv4, icc, ipMasq); err != nil {
|
| 202 |
- return job.Error(err) |
|
| 202 |
+ return err |
|
| 203 | 203 |
} |
| 204 | 204 |
|
| 205 | 205 |
} |
| ... | ... |
@@ -207,33 +207,33 @@ func InitDriver(job *engine.Job) engine.Status {
|
| 207 | 207 |
if ipForward {
|
| 208 | 208 |
// Enable IPv4 forwarding |
| 209 | 209 |
if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil {
|
| 210 |
- job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
|
|
| 210 |
+ log.Warnf("WARNING: unable to enable IPv4 forwarding: %s\n", err)
|
|
| 211 | 211 |
} |
| 212 | 212 |
|
| 213 | 213 |
if fixedCIDRv6 != "" {
|
| 214 | 214 |
// Enable IPv6 forwarding |
| 215 | 215 |
if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/default/forwarding", []byte{'1', '\n'}, 0644); err != nil {
|
| 216 |
- job.Logf("WARNING: unable to enable IPv6 default forwarding: %s\n", err)
|
|
| 216 |
+ log.Warnf("WARNING: unable to enable IPv6 default forwarding: %s\n", err)
|
|
| 217 | 217 |
} |
| 218 | 218 |
if err := ioutil.WriteFile("/proc/sys/net/ipv6/conf/all/forwarding", []byte{'1', '\n'}, 0644); err != nil {
|
| 219 |
- job.Logf("WARNING: unable to enable IPv6 all forwarding: %s\n", err)
|
|
| 219 |
+ log.Warnf("WARNING: unable to enable IPv6 all forwarding: %s\n", err)
|
|
| 220 | 220 |
} |
| 221 | 221 |
} |
| 222 | 222 |
} |
| 223 | 223 |
|
| 224 | 224 |
// We can always try removing the iptables |
| 225 | 225 |
if err := iptables.RemoveExistingChain("DOCKER", iptables.Nat); err != nil {
|
| 226 |
- return job.Error(err) |
|
| 226 |
+ return err |
|
| 227 | 227 |
} |
| 228 | 228 |
|
| 229 | 229 |
if enableIPTables {
|
| 230 | 230 |
_, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Nat)
|
| 231 | 231 |
if err != nil {
|
| 232 |
- return job.Error(err) |
|
| 232 |
+ return err |
|
| 233 | 233 |
} |
| 234 | 234 |
chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter)
|
| 235 | 235 |
if err != nil {
|
| 236 |
- return job.Error(err) |
|
| 236 |
+ return err |
|
| 237 | 237 |
} |
| 238 | 238 |
portmapper.SetIptablesChain(chain) |
| 239 | 239 |
} |
| ... | ... |
@@ -242,22 +242,22 @@ func InitDriver(job *engine.Job) engine.Status {
|
| 242 | 242 |
if fixedCIDR != "" {
|
| 243 | 243 |
_, subnet, err := net.ParseCIDR(fixedCIDR) |
| 244 | 244 |
if err != nil {
|
| 245 |
- return job.Error(err) |
|
| 245 |
+ return err |
|
| 246 | 246 |
} |
| 247 | 247 |
log.Debugf("Subnet: %v", subnet)
|
| 248 | 248 |
if err := ipAllocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil {
|
| 249 |
- return job.Error(err) |
|
| 249 |
+ return err |
|
| 250 | 250 |
} |
| 251 | 251 |
} |
| 252 | 252 |
|
| 253 | 253 |
if fixedCIDRv6 != "" {
|
| 254 | 254 |
_, subnet, err := net.ParseCIDR(fixedCIDRv6) |
| 255 | 255 |
if err != nil {
|
| 256 |
- return job.Error(err) |
|
| 256 |
+ return err |
|
| 257 | 257 |
} |
| 258 | 258 |
log.Debugf("Subnet: %v", subnet)
|
| 259 | 259 |
if err := ipAllocator.RegisterSubnet(subnet, subnet); err != nil {
|
| 260 |
- return job.Error(err) |
|
| 260 |
+ return err |
|
| 261 | 261 |
} |
| 262 | 262 |
globalIPv6Network = subnet |
| 263 | 263 |
} |
| ... | ... |
@@ -275,10 +275,10 @@ func InitDriver(job *engine.Job) engine.Status {
|
| 275 | 275 |
"link": LinkContainers, |
| 276 | 276 |
} {
|
| 277 | 277 |
if err := job.Eng.Register(name, f); err != nil {
|
| 278 |
- return job.Error(err) |
|
| 278 |
+ return err |
|
| 279 | 279 |
} |
| 280 | 280 |
} |
| 281 |
- return engine.StatusOK |
|
| 281 |
+ return nil |
|
| 282 | 282 |
} |
| 283 | 283 |
|
| 284 | 284 |
func setupIPTables(addr net.Addr, icc, ipmasq bool) error {
|
| ... | ... |
@@ -499,7 +499,7 @@ func linkLocalIPv6FromMac(mac string) (string, error) {
|
| 499 | 499 |
} |
| 500 | 500 |
|
| 501 | 501 |
// Allocate a network interface |
| 502 |
-func Allocate(job *engine.Job) engine.Status {
|
|
| 502 |
+func Allocate(job *engine.Job) error {
|
|
| 503 | 503 |
var ( |
| 504 | 504 |
ip net.IP |
| 505 | 505 |
mac net.HardwareAddr |
| ... | ... |
@@ -512,7 +512,7 @@ func Allocate(job *engine.Job) engine.Status {
|
| 512 | 512 |
|
| 513 | 513 |
ip, err = ipAllocator.RequestIP(bridgeIPv4Network, requestedIP) |
| 514 | 514 |
if err != nil {
|
| 515 |
- return job.Error(err) |
|
| 515 |
+ return err |
|
| 516 | 516 |
} |
| 517 | 517 |
|
| 518 | 518 |
// If no explicit mac address was given, generate a random one. |
| ... | ... |
@@ -534,7 +534,7 @@ func Allocate(job *engine.Job) engine.Status {
|
| 534 | 534 |
globalIPv6, err = ipAllocator.RequestIP(globalIPv6Network, requestedIPv6) |
| 535 | 535 |
if err != nil {
|
| 536 | 536 |
log.Errorf("Allocator: RequestIP v6: %v", err)
|
| 537 |
- return job.Error(err) |
|
| 537 |
+ return err |
|
| 538 | 538 |
} |
| 539 | 539 |
log.Infof("Allocated IPv6 %s", globalIPv6)
|
| 540 | 540 |
} |
| ... | ... |
@@ -552,7 +552,7 @@ func Allocate(job *engine.Job) engine.Status {
|
| 552 | 552 |
// If linklocal IPv6 |
| 553 | 553 |
localIPv6Net, err := linkLocalIPv6FromMac(mac.String()) |
| 554 | 554 |
if err != nil {
|
| 555 |
- return job.Error(err) |
|
| 555 |
+ return err |
|
| 556 | 556 |
} |
| 557 | 557 |
localIPv6, _, _ := net.ParseCIDR(localIPv6Net) |
| 558 | 558 |
out.Set("LinkLocalIPv6", localIPv6.String())
|
| ... | ... |
@@ -572,18 +572,18 @@ func Allocate(job *engine.Job) engine.Status {
|
| 572 | 572 |
|
| 573 | 573 |
out.WriteTo(job.Stdout) |
| 574 | 574 |
|
| 575 |
- return engine.StatusOK |
|
| 575 |
+ return nil |
|
| 576 | 576 |
} |
| 577 | 577 |
|
| 578 | 578 |
// Release an interface for a select ip |
| 579 |
-func Release(job *engine.Job) engine.Status {
|
|
| 579 |
+func Release(job *engine.Job) error {
|
|
| 580 | 580 |
var ( |
| 581 | 581 |
id = job.Args[0] |
| 582 | 582 |
containerInterface = currentInterfaces.Get(id) |
| 583 | 583 |
) |
| 584 | 584 |
|
| 585 | 585 |
if containerInterface == nil {
|
| 586 |
- return job.Errorf("No network information to release for %s", id)
|
|
| 586 |
+ return fmt.Errorf("No network information to release for %s", id)
|
|
| 587 | 587 |
} |
| 588 | 588 |
|
| 589 | 589 |
for _, nat := range containerInterface.PortMappings {
|
| ... | ... |
@@ -600,11 +600,11 @@ func Release(job *engine.Job) engine.Status {
|
| 600 | 600 |
log.Infof("Unable to release IPv6 %s", err)
|
| 601 | 601 |
} |
| 602 | 602 |
} |
| 603 |
- return engine.StatusOK |
|
| 603 |
+ return nil |
|
| 604 | 604 |
} |
| 605 | 605 |
|
| 606 | 606 |
// Allocate an external port and map it to the interface |
| 607 |
-func AllocatePort(job *engine.Job) engine.Status {
|
|
| 607 |
+func AllocatePort(job *engine.Job) error {
|
|
| 608 | 608 |
var ( |
| 609 | 609 |
err error |
| 610 | 610 |
|
| ... | ... |
@@ -620,7 +620,7 @@ func AllocatePort(job *engine.Job) engine.Status {
|
| 620 | 620 |
if hostIP != "" {
|
| 621 | 621 |
ip = net.ParseIP(hostIP) |
| 622 | 622 |
if ip == nil {
|
| 623 |
- return job.Errorf("Bad parameter: invalid host ip %s", hostIP)
|
|
| 623 |
+ return fmt.Errorf("Bad parameter: invalid host ip %s", hostIP)
|
|
| 624 | 624 |
} |
| 625 | 625 |
} |
| 626 | 626 |
|
| ... | ... |
@@ -632,7 +632,7 @@ func AllocatePort(job *engine.Job) engine.Status {
|
| 632 | 632 |
case "udp": |
| 633 | 633 |
container = &net.UDPAddr{IP: network.IP, Port: containerPort}
|
| 634 | 634 |
default: |
| 635 |
- return job.Errorf("unsupported address type %s", proto)
|
|
| 635 |
+ return fmt.Errorf("unsupported address type %s", proto)
|
|
| 636 | 636 |
} |
| 637 | 637 |
|
| 638 | 638 |
// |
| ... | ... |
@@ -650,14 +650,14 @@ func AllocatePort(job *engine.Job) engine.Status {
|
| 650 | 650 |
// There is no point in immediately retrying to map an explicitly |
| 651 | 651 |
// chosen port. |
| 652 | 652 |
if hostPort != 0 {
|
| 653 |
- job.Logf("Failed to allocate and map port %d: %s", hostPort, err)
|
|
| 653 |
+ log.Warnf("Failed to allocate and map port %d: %s", hostPort, err)
|
|
| 654 | 654 |
break |
| 655 | 655 |
} |
| 656 |
- job.Logf("Failed to allocate and map port: %s, retry: %d", err, i+1)
|
|
| 656 |
+ log.Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1)
|
|
| 657 | 657 |
} |
| 658 | 658 |
|
| 659 | 659 |
if err != nil {
|
| 660 |
- return job.Error(err) |
|
| 660 |
+ return err |
|
| 661 | 661 |
} |
| 662 | 662 |
|
| 663 | 663 |
network.PortMappings = append(network.PortMappings, host) |
| ... | ... |
@@ -672,13 +672,13 @@ func AllocatePort(job *engine.Job) engine.Status {
|
| 672 | 672 |
out.SetInt("HostPort", netAddr.Port)
|
| 673 | 673 |
} |
| 674 | 674 |
if _, err := out.WriteTo(job.Stdout); err != nil {
|
| 675 |
- return job.Error(err) |
|
| 675 |
+ return err |
|
| 676 | 676 |
} |
| 677 | 677 |
|
| 678 |
- return engine.StatusOK |
|
| 678 |
+ return nil |
|
| 679 | 679 |
} |
| 680 | 680 |
|
| 681 |
-func LinkContainers(job *engine.Job) engine.Status {
|
|
| 681 |
+func LinkContainers(job *engine.Job) error {
|
|
| 682 | 682 |
var ( |
| 683 | 683 |
action = job.Args[0] |
| 684 | 684 |
nfAction iptables.Action |
| ... | ... |
@@ -696,24 +696,24 @@ func LinkContainers(job *engine.Job) engine.Status {
|
| 696 | 696 |
case "-D": |
| 697 | 697 |
nfAction = iptables.Delete |
| 698 | 698 |
default: |
| 699 |
- return job.Errorf("Invalid action '%s' specified", action)
|
|
| 699 |
+ return fmt.Errorf("Invalid action '%s' specified", action)
|
|
| 700 | 700 |
} |
| 701 | 701 |
|
| 702 | 702 |
ip1 := net.ParseIP(parentIP) |
| 703 | 703 |
if ip1 == nil {
|
| 704 |
- return job.Errorf("Parent IP '%s' is invalid", parentIP)
|
|
| 704 |
+ return fmt.Errorf("Parent IP '%s' is invalid", parentIP)
|
|
| 705 | 705 |
} |
| 706 | 706 |
ip2 := net.ParseIP(childIP) |
| 707 | 707 |
if ip2 == nil {
|
| 708 |
- return job.Errorf("Child IP '%s' is invalid", childIP)
|
|
| 708 |
+ return fmt.Errorf("Child IP '%s' is invalid", childIP)
|
|
| 709 | 709 |
} |
| 710 | 710 |
|
| 711 | 711 |
chain := iptables.Chain{Name: "DOCKER", Bridge: bridgeIface}
|
| 712 | 712 |
for _, p := range ports {
|
| 713 | 713 |
port := nat.Port(p) |
| 714 | 714 |
if err := chain.Link(nfAction, ip1, ip2, port.Int(), port.Proto()); !ignoreErrors && err != nil {
|
| 715 |
- return job.Error(err) |
|
| 715 |
+ return err |
|
| 716 | 716 |
} |
| 717 | 717 |
} |
| 718 |
- return engine.StatusOK |
|
| 718 |
+ return nil |
|
| 719 | 719 |
} |
| ... | ... |
@@ -60,22 +60,22 @@ func TestAllocatePortDetection(t *testing.T) {
|
| 60 | 60 |
|
| 61 | 61 |
// Init driver |
| 62 | 62 |
job := eng.Job("initdriver")
|
| 63 |
- if res := InitDriver(job); res != engine.StatusOK {
|
|
| 63 |
+ if res := InitDriver(job); res != nil {
|
|
| 64 | 64 |
t.Fatal("Failed to initialize network driver")
|
| 65 | 65 |
} |
| 66 | 66 |
|
| 67 | 67 |
// Allocate interface |
| 68 | 68 |
job = eng.Job("allocate_interface", "container_id")
|
| 69 |
- if res := Allocate(job); res != engine.StatusOK {
|
|
| 69 |
+ if res := Allocate(job); res != nil {
|
|
| 70 | 70 |
t.Fatal("Failed to allocate network interface")
|
| 71 | 71 |
} |
| 72 | 72 |
|
| 73 | 73 |
// Allocate same port twice, expect failure on second call |
| 74 | 74 |
job = newPortAllocationJob(eng, freePort) |
| 75 |
- if res := AllocatePort(job); res != engine.StatusOK {
|
|
| 75 |
+ if res := AllocatePort(job); res != nil {
|
|
| 76 | 76 |
t.Fatal("Failed to find a free port to allocate")
|
| 77 | 77 |
} |
| 78 |
- if res := AllocatePort(job); res == engine.StatusOK {
|
|
| 78 |
+ if res := AllocatePort(job); res == nil {
|
|
| 79 | 79 |
t.Fatal("Duplicate port allocation granted by AllocatePort")
|
| 80 | 80 |
} |
| 81 | 81 |
} |
| ... | ... |
@@ -88,19 +88,19 @@ func TestHostnameFormatChecking(t *testing.T) {
|
| 88 | 88 |
|
| 89 | 89 |
// Init driver |
| 90 | 90 |
job := eng.Job("initdriver")
|
| 91 |
- if res := InitDriver(job); res != engine.StatusOK {
|
|
| 91 |
+ if res := InitDriver(job); res != nil {
|
|
| 92 | 92 |
t.Fatal("Failed to initialize network driver")
|
| 93 | 93 |
} |
| 94 | 94 |
|
| 95 | 95 |
// Allocate interface |
| 96 | 96 |
job = eng.Job("allocate_interface", "container_id")
|
| 97 |
- if res := Allocate(job); res != engine.StatusOK {
|
|
| 97 |
+ if res := Allocate(job); res != nil {
|
|
| 98 | 98 |
t.Fatal("Failed to allocate network interface")
|
| 99 | 99 |
} |
| 100 | 100 |
|
| 101 | 101 |
// Allocate port with invalid HostIP, expect failure with Bad Request http status |
| 102 | 102 |
job = newPortAllocationJobWithInvalidHostIP(eng, freePort) |
| 103 |
- if res := AllocatePort(job); res == engine.StatusOK {
|
|
| 103 |
+ if res := AllocatePort(job); res == nil {
|
|
| 104 | 104 |
t.Fatal("Failed to check invalid HostIP")
|
| 105 | 105 |
} |
| 106 | 106 |
} |
| ... | ... |
@@ -129,11 +129,11 @@ func newInterfaceAllocation(t *testing.T, input engine.Env) (output engine.Env) |
| 129 | 129 |
<-done |
| 130 | 130 |
|
| 131 | 131 |
if input.Exists("expectFail") && input.GetBool("expectFail") {
|
| 132 |
- if res == engine.StatusOK {
|
|
| 132 |
+ if res == nil {
|
|
| 133 | 133 |
t.Fatal("Doesn't fail to allocate network interface")
|
| 134 | 134 |
} |
| 135 | 135 |
} else {
|
| 136 |
- if res != engine.StatusOK {
|
|
| 136 |
+ if res != nil {
|
|
| 137 | 137 |
t.Fatal("Failed to allocate network interface")
|
| 138 | 138 |
} |
| 139 | 139 |
} |
| ... | ... |
@@ -244,13 +244,13 @@ func TestLinkContainers(t *testing.T) {
|
| 244 | 244 |
|
| 245 | 245 |
// Init driver |
| 246 | 246 |
job := eng.Job("initdriver")
|
| 247 |
- if res := InitDriver(job); res != engine.StatusOK {
|
|
| 247 |
+ if res := InitDriver(job); res != nil {
|
|
| 248 | 248 |
t.Fatal("Failed to initialize network driver")
|
| 249 | 249 |
} |
| 250 | 250 |
|
| 251 | 251 |
// Allocate interface |
| 252 | 252 |
job = eng.Job("allocate_interface", "container_id")
|
| 253 |
- if res := Allocate(job); res != engine.StatusOK {
|
|
| 253 |
+ if res := Allocate(job); res != nil {
|
|
| 254 | 254 |
t.Fatal("Failed to allocate network interface")
|
| 255 | 255 |
} |
| 256 | 256 |
|
| ... | ... |
@@ -267,7 +267,7 @@ func TestLinkContainers(t *testing.T) {
|
| 267 | 267 |
t.Fatal(err) |
| 268 | 268 |
} |
| 269 | 269 |
|
| 270 |
- if res := LinkContainers(job); res != engine.StatusOK {
|
|
| 270 |
+ if res := LinkContainers(job); res != nil {
|
|
| 271 | 271 |
t.Fatalf("LinkContainers failed")
|
| 272 | 272 |
} |
| 273 | 273 |
|
| ... | ... |
@@ -1,37 +1,39 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 5 |
+ |
|
| 4 | 6 |
"github.com/docker/docker/engine" |
| 5 | 7 |
) |
| 6 | 8 |
|
| 7 |
-func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status {
|
|
| 9 |
+func (daemon *Daemon) ContainerPause(job *engine.Job) error {
|
|
| 8 | 10 |
if len(job.Args) != 1 {
|
| 9 |
- return job.Errorf("Usage: %s CONTAINER", job.Name)
|
|
| 11 |
+ return fmt.Errorf("Usage: %s CONTAINER", job.Name)
|
|
| 10 | 12 |
} |
| 11 | 13 |
name := job.Args[0] |
| 12 | 14 |
container, err := daemon.Get(name) |
| 13 | 15 |
if err != nil {
|
| 14 |
- return job.Error(err) |
|
| 16 |
+ return err |
|
| 15 | 17 |
} |
| 16 | 18 |
if err := container.Pause(); err != nil {
|
| 17 |
- return job.Errorf("Cannot pause container %s: %s", name, err)
|
|
| 19 |
+ return fmt.Errorf("Cannot pause container %s: %s", name, err)
|
|
| 18 | 20 |
} |
| 19 | 21 |
container.LogEvent("pause")
|
| 20 |
- return engine.StatusOK |
|
| 22 |
+ return nil |
|
| 21 | 23 |
} |
| 22 | 24 |
|
| 23 |
-func (daemon *Daemon) ContainerUnpause(job *engine.Job) engine.Status {
|
|
| 25 |
+func (daemon *Daemon) ContainerUnpause(job *engine.Job) error {
|
|
| 24 | 26 |
if n := len(job.Args); n < 1 || n > 2 {
|
| 25 |
- return job.Errorf("Usage: %s CONTAINER", job.Name)
|
|
| 27 |
+ return fmt.Errorf("Usage: %s CONTAINER", job.Name)
|
|
| 26 | 28 |
} |
| 27 | 29 |
name := job.Args[0] |
| 28 | 30 |
container, err := daemon.Get(name) |
| 29 | 31 |
if err != nil {
|
| 30 |
- return job.Error(err) |
|
| 32 |
+ return err |
|
| 31 | 33 |
} |
| 32 | 34 |
if err := container.Unpause(); err != nil {
|
| 33 |
- return job.Errorf("Cannot unpause container %s: %s", name, err)
|
|
| 35 |
+ return fmt.Errorf("Cannot unpause container %s: %s", name, err)
|
|
| 34 | 36 |
} |
| 35 | 37 |
container.LogEvent("unpause")
|
| 36 |
- return engine.StatusOK |
|
| 38 |
+ return nil |
|
| 37 | 39 |
} |
| ... | ... |
@@ -1,17 +1,21 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 |
-import "github.com/docker/docker/engine" |
|
| 3 |
+import ( |
|
| 4 |
+ "fmt" |
|
| 4 | 5 |
|
| 5 |
-func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status {
|
|
| 6 |
+ "github.com/docker/docker/engine" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+func (daemon *Daemon) ContainerRename(job *engine.Job) error {
|
|
| 6 | 10 |
if len(job.Args) != 2 {
|
| 7 |
- return job.Errorf("usage: %s OLD_NAME NEW_NAME", job.Name)
|
|
| 11 |
+ return fmt.Errorf("usage: %s OLD_NAME NEW_NAME", job.Name)
|
|
| 8 | 12 |
} |
| 9 | 13 |
oldName := job.Args[0] |
| 10 | 14 |
newName := job.Args[1] |
| 11 | 15 |
|
| 12 | 16 |
container, err := daemon.Get(oldName) |
| 13 | 17 |
if err != nil {
|
| 14 |
- return job.Error(err) |
|
| 18 |
+ return err |
|
| 15 | 19 |
} |
| 16 | 20 |
|
| 17 | 21 |
oldName = container.Name |
| ... | ... |
@@ -19,7 +23,7 @@ func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status {
|
| 19 | 19 |
container.Lock() |
| 20 | 20 |
defer container.Unlock() |
| 21 | 21 |
if newName, err = daemon.reserveName(container.ID, newName); err != nil {
|
| 22 |
- return job.Errorf("Error when allocating new name: %s", err)
|
|
| 22 |
+ return fmt.Errorf("Error when allocating new name: %s", err)
|
|
| 23 | 23 |
} |
| 24 | 24 |
|
| 25 | 25 |
container.Name = newName |
| ... | ... |
@@ -32,13 +36,13 @@ func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status {
|
| 32 | 32 |
|
| 33 | 33 |
if err := daemon.containerGraph.Delete(oldName); err != nil {
|
| 34 | 34 |
undo() |
| 35 |
- return job.Errorf("Failed to delete container %q: %v", oldName, err)
|
|
| 35 |
+ return fmt.Errorf("Failed to delete container %q: %v", oldName, err)
|
|
| 36 | 36 |
} |
| 37 | 37 |
|
| 38 | 38 |
if err := container.toDisk(); err != nil {
|
| 39 | 39 |
undo() |
| 40 |
- return job.Error(err) |
|
| 40 |
+ return err |
|
| 41 | 41 |
} |
| 42 | 42 |
|
| 43 |
- return engine.StatusOK |
|
| 43 |
+ return nil |
|
| 44 | 44 |
} |
| ... | ... |
@@ -1,53 +1,54 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 4 | 5 |
"strconv" |
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/docker/engine" |
| 7 | 8 |
) |
| 8 | 9 |
|
| 9 |
-func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status {
|
|
| 10 |
+func (daemon *Daemon) ContainerResize(job *engine.Job) error {
|
|
| 10 | 11 |
if len(job.Args) != 3 {
|
| 11 |
- return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
|
|
| 12 |
+ return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
|
|
| 12 | 13 |
} |
| 13 | 14 |
name := job.Args[0] |
| 14 | 15 |
height, err := strconv.Atoi(job.Args[1]) |
| 15 | 16 |
if err != nil {
|
| 16 |
- return job.Error(err) |
|
| 17 |
+ return err |
|
| 17 | 18 |
} |
| 18 | 19 |
width, err := strconv.Atoi(job.Args[2]) |
| 19 | 20 |
if err != nil {
|
| 20 |
- return job.Error(err) |
|
| 21 |
+ return err |
|
| 21 | 22 |
} |
| 22 | 23 |
container, err := daemon.Get(name) |
| 23 | 24 |
if err != nil {
|
| 24 |
- return job.Error(err) |
|
| 25 |
+ return err |
|
| 25 | 26 |
} |
| 26 | 27 |
if err := container.Resize(height, width); err != nil {
|
| 27 |
- return job.Error(err) |
|
| 28 |
+ return err |
|
| 28 | 29 |
} |
| 29 |
- return engine.StatusOK |
|
| 30 |
+ return nil |
|
| 30 | 31 |
} |
| 31 | 32 |
|
| 32 |
-func (daemon *Daemon) ContainerExecResize(job *engine.Job) engine.Status {
|
|
| 33 |
+func (daemon *Daemon) ContainerExecResize(job *engine.Job) error {
|
|
| 33 | 34 |
if len(job.Args) != 3 {
|
| 34 |
- return job.Errorf("Not enough arguments. Usage: %s EXEC HEIGHT WIDTH\n", job.Name)
|
|
| 35 |
+ return fmt.Errorf("Not enough arguments. Usage: %s EXEC HEIGHT WIDTH\n", job.Name)
|
|
| 35 | 36 |
} |
| 36 | 37 |
name := job.Args[0] |
| 37 | 38 |
height, err := strconv.Atoi(job.Args[1]) |
| 38 | 39 |
if err != nil {
|
| 39 |
- return job.Error(err) |
|
| 40 |
+ return err |
|
| 40 | 41 |
} |
| 41 | 42 |
width, err := strconv.Atoi(job.Args[2]) |
| 42 | 43 |
if err != nil {
|
| 43 |
- return job.Error(err) |
|
| 44 |
+ return err |
|
| 44 | 45 |
} |
| 45 | 46 |
execConfig, err := daemon.getExecConfig(name) |
| 46 | 47 |
if err != nil {
|
| 47 |
- return job.Error(err) |
|
| 48 |
+ return err |
|
| 48 | 49 |
} |
| 49 | 50 |
if err := execConfig.Resize(height, width); err != nil {
|
| 50 |
- return job.Error(err) |
|
| 51 |
+ return err |
|
| 51 | 52 |
} |
| 52 |
- return engine.StatusOK |
|
| 53 |
+ return nil |
|
| 53 | 54 |
} |
| ... | ... |
@@ -1,12 +1,14 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 5 |
+ |
|
| 4 | 6 |
"github.com/docker/docker/engine" |
| 5 | 7 |
) |
| 6 | 8 |
|
| 7 |
-func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
|
|
| 9 |
+func (daemon *Daemon) ContainerRestart(job *engine.Job) error {
|
|
| 8 | 10 |
if len(job.Args) != 1 {
|
| 9 |
- return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 11 |
+ return fmt.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 10 | 12 |
} |
| 11 | 13 |
var ( |
| 12 | 14 |
name = job.Args[0] |
| ... | ... |
@@ -17,11 +19,11 @@ func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status {
|
| 17 | 17 |
} |
| 18 | 18 |
container, err := daemon.Get(name) |
| 19 | 19 |
if err != nil {
|
| 20 |
- return job.Error(err) |
|
| 20 |
+ return err |
|
| 21 | 21 |
} |
| 22 | 22 |
if err := container.Restart(int(t)); err != nil {
|
| 23 |
- return job.Errorf("Cannot restart container %s: %s\n", name, err)
|
|
| 23 |
+ return fmt.Errorf("Cannot restart container %s: %s\n", name, err)
|
|
| 24 | 24 |
} |
| 25 | 25 |
container.LogEvent("restart")
|
| 26 |
- return engine.StatusOK |
|
| 26 |
+ return nil |
|
| 27 | 27 |
} |
| ... | ... |
@@ -1,13 +1,15 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 5 |
+ |
|
| 4 | 6 |
"github.com/docker/docker/engine" |
| 5 | 7 |
"github.com/docker/docker/runconfig" |
| 6 | 8 |
) |
| 7 | 9 |
|
| 8 |
-func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
|
|
| 10 |
+func (daemon *Daemon) ContainerStart(job *engine.Job) error {
|
|
| 9 | 11 |
if len(job.Args) < 1 {
|
| 10 |
- return job.Errorf("Usage: %s container_id", job.Name)
|
|
| 12 |
+ return fmt.Errorf("Usage: %s container_id", job.Name)
|
|
| 11 | 13 |
} |
| 12 | 14 |
var ( |
| 13 | 15 |
name = job.Args[0] |
| ... | ... |
@@ -15,15 +17,15 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
|
| 15 | 15 |
|
| 16 | 16 |
container, err := daemon.Get(name) |
| 17 | 17 |
if err != nil {
|
| 18 |
- return job.Error(err) |
|
| 18 |
+ return err |
|
| 19 | 19 |
} |
| 20 | 20 |
|
| 21 | 21 |
if container.IsPaused() {
|
| 22 |
- return job.Errorf("Cannot start a paused container, try unpause instead.")
|
|
| 22 |
+ return fmt.Errorf("Cannot start a paused container, try unpause instead.")
|
|
| 23 | 23 |
} |
| 24 | 24 |
|
| 25 | 25 |
if container.IsRunning() {
|
| 26 |
- return job.Errorf("Container already started")
|
|
| 26 |
+ return fmt.Errorf("Container already started")
|
|
| 27 | 27 |
} |
| 28 | 28 |
|
| 29 | 29 |
// If no environment was set, then no hostconfig was passed. |
| ... | ... |
@@ -32,15 +34,15 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status {
|
| 32 | 32 |
if len(job.Environ()) > 0 {
|
| 33 | 33 |
hostConfig := runconfig.ContainerHostConfigFromJob(job) |
| 34 | 34 |
if err := daemon.setHostConfig(container, hostConfig); err != nil {
|
| 35 |
- return job.Error(err) |
|
| 35 |
+ return err |
|
| 36 | 36 |
} |
| 37 | 37 |
} |
| 38 | 38 |
if err := container.Start(); err != nil {
|
| 39 | 39 |
container.LogEvent("die")
|
| 40 |
- return job.Errorf("Cannot start container %s: %s", name, err)
|
|
| 40 |
+ return fmt.Errorf("Cannot start container %s: %s", name, err)
|
|
| 41 | 41 |
} |
| 42 | 42 |
|
| 43 |
- return engine.StatusOK |
|
| 43 |
+ return nil |
|
| 44 | 44 |
} |
| 45 | 45 |
|
| 46 | 46 |
func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
|
| ... | ... |
@@ -10,10 +10,10 @@ import ( |
| 10 | 10 |
"github.com/docker/libcontainer/cgroups" |
| 11 | 11 |
) |
| 12 | 12 |
|
| 13 |
-func (daemon *Daemon) ContainerStats(job *engine.Job) engine.Status {
|
|
| 13 |
+func (daemon *Daemon) ContainerStats(job *engine.Job) error {
|
|
| 14 | 14 |
updates, err := daemon.SubscribeToContainerStats(job.Args[0]) |
| 15 | 15 |
if err != nil {
|
| 16 |
- return job.Error(err) |
|
| 16 |
+ return err |
|
| 17 | 17 |
} |
| 18 | 18 |
enc := json.NewEncoder(job.Stdout) |
| 19 | 19 |
for v := range updates {
|
| ... | ... |
@@ -25,10 +25,10 @@ func (daemon *Daemon) ContainerStats(job *engine.Job) engine.Status {
|
| 25 | 25 |
if err := enc.Encode(ss); err != nil {
|
| 26 | 26 |
// TODO: handle the specific broken pipe |
| 27 | 27 |
daemon.UnsubscribeToContainerStats(job.Args[0], updates) |
| 28 |
- return job.Error(err) |
|
| 28 |
+ return err |
|
| 29 | 29 |
} |
| 30 | 30 |
} |
| 31 |
- return engine.StatusOK |
|
| 31 |
+ return nil |
|
| 32 | 32 |
} |
| 33 | 33 |
|
| 34 | 34 |
// convertToAPITypes converts the libcontainer.Stats to the api specific |
| ... | ... |
@@ -1,12 +1,14 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 5 |
+ |
|
| 4 | 6 |
"github.com/docker/docker/engine" |
| 5 | 7 |
) |
| 6 | 8 |
|
| 7 |
-func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status {
|
|
| 9 |
+func (daemon *Daemon) ContainerStop(job *engine.Job) error {
|
|
| 8 | 10 |
if len(job.Args) != 1 {
|
| 9 |
- return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 11 |
+ return fmt.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 10 | 12 |
} |
| 11 | 13 |
var ( |
| 12 | 14 |
name = job.Args[0] |
| ... | ... |
@@ -17,14 +19,14 @@ func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status {
|
| 17 | 17 |
} |
| 18 | 18 |
container, err := daemon.Get(name) |
| 19 | 19 |
if err != nil {
|
| 20 |
- return job.Error(err) |
|
| 20 |
+ return err |
|
| 21 | 21 |
} |
| 22 | 22 |
if !container.IsRunning() {
|
| 23 |
- return job.Errorf("Container already stopped")
|
|
| 23 |
+ return fmt.Errorf("Container already stopped")
|
|
| 24 | 24 |
} |
| 25 | 25 |
if err := container.Stop(int(t)); err != nil {
|
| 26 |
- return job.Errorf("Cannot stop container %s: %s\n", name, err)
|
|
| 26 |
+ return fmt.Errorf("Cannot stop container %s: %s\n", name, err)
|
|
| 27 | 27 |
} |
| 28 | 28 |
container.LogEvent("stop")
|
| 29 |
- return engine.StatusOK |
|
| 29 |
+ return nil |
|
| 30 | 30 |
} |
| ... | ... |
@@ -1,6 +1,7 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 4 | 5 |
"os/exec" |
| 5 | 6 |
"strconv" |
| 6 | 7 |
"strings" |
| ... | ... |
@@ -8,9 +9,9 @@ import ( |
| 8 | 8 |
"github.com/docker/docker/engine" |
| 9 | 9 |
) |
| 10 | 10 |
|
| 11 |
-func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
|
|
| 11 |
+func (daemon *Daemon) ContainerTop(job *engine.Job) error {
|
|
| 12 | 12 |
if len(job.Args) != 1 && len(job.Args) != 2 {
|
| 13 |
- return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
|
|
| 13 |
+ return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
|
|
| 14 | 14 |
} |
| 15 | 15 |
var ( |
| 16 | 16 |
name = job.Args[0] |
| ... | ... |
@@ -23,18 +24,18 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
|
| 23 | 23 |
|
| 24 | 24 |
container, err := daemon.Get(name) |
| 25 | 25 |
if err != nil {
|
| 26 |
- return job.Error(err) |
|
| 26 |
+ return err |
|
| 27 | 27 |
} |
| 28 | 28 |
if !container.IsRunning() {
|
| 29 |
- return job.Errorf("Container %s is not running", name)
|
|
| 29 |
+ return fmt.Errorf("Container %s is not running", name)
|
|
| 30 | 30 |
} |
| 31 | 31 |
pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) |
| 32 | 32 |
if err != nil {
|
| 33 |
- return job.Error(err) |
|
| 33 |
+ return err |
|
| 34 | 34 |
} |
| 35 | 35 |
output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output()
|
| 36 | 36 |
if err != nil {
|
| 37 |
- return job.Errorf("Error running ps: %s", err)
|
|
| 37 |
+ return fmt.Errorf("Error running ps: %s", err)
|
|
| 38 | 38 |
} |
| 39 | 39 |
|
| 40 | 40 |
lines := strings.Split(string(output), "\n") |
| ... | ... |
@@ -49,7 +50,7 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
|
| 49 | 49 |
} |
| 50 | 50 |
} |
| 51 | 51 |
if pidIndex == -1 {
|
| 52 |
- return job.Errorf("Couldn't find PID field in ps output")
|
|
| 52 |
+ return fmt.Errorf("Couldn't find PID field in ps output")
|
|
| 53 | 53 |
} |
| 54 | 54 |
|
| 55 | 55 |
processes := [][]string{}
|
| ... | ... |
@@ -60,7 +61,7 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
|
| 60 | 60 |
fields := strings.Fields(line) |
| 61 | 61 |
p, err := strconv.Atoi(fields[pidIndex]) |
| 62 | 62 |
if err != nil {
|
| 63 |
- return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
|
|
| 63 |
+ return fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
|
|
| 64 | 64 |
} |
| 65 | 65 |
|
| 66 | 66 |
for _, pid := range pids {
|
| ... | ... |
@@ -75,5 +76,5 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status {
|
| 75 | 75 |
} |
| 76 | 76 |
out.SetJson("Processes", processes)
|
| 77 | 77 |
out.WriteTo(job.Stdout) |
| 78 |
- return engine.StatusOK |
|
| 78 |
+ return nil |
|
| 79 | 79 |
} |
| ... | ... |
@@ -1,21 +1,22 @@ |
| 1 | 1 |
package daemon |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 4 | 5 |
"time" |
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/docker/engine" |
| 7 | 8 |
) |
| 8 | 9 |
|
| 9 |
-func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status {
|
|
| 10 |
+func (daemon *Daemon) ContainerWait(job *engine.Job) error {
|
|
| 10 | 11 |
if len(job.Args) != 1 {
|
| 11 |
- return job.Errorf("Usage: %s", job.Name)
|
|
| 12 |
+ return fmt.Errorf("Usage: %s", job.Name)
|
|
| 12 | 13 |
} |
| 13 | 14 |
name := job.Args[0] |
| 14 | 15 |
container, err := daemon.Get(name) |
| 15 | 16 |
if err != nil {
|
| 16 |
- return job.Errorf("%s: %v", job.Name, err)
|
|
| 17 |
+ return fmt.Errorf("%s: %v", job.Name, err)
|
|
| 17 | 18 |
} |
| 18 | 19 |
status, _ := container.WaitStop(-1 * time.Second) |
| 19 | 20 |
job.Printf("%d\n", status)
|
| 20 |
- return engine.StatusOK |
|
| 21 |
+ return nil |
|
| 21 | 22 |
} |
| ... | ... |
@@ -21,7 +21,7 @@ type Installer interface {
|
| 21 | 21 |
Install(*Engine) error |
| 22 | 22 |
} |
| 23 | 23 |
|
| 24 |
-type Handler func(*Job) Status |
|
| 24 |
+type Handler func(*Job) error |
|
| 25 | 25 |
|
| 26 | 26 |
var globalHandlers map[string]Handler |
| 27 | 27 |
|
| ... | ... |
@@ -84,11 +84,11 @@ func New() *Engine {
|
| 84 | 84 |
Stdin: os.Stdin, |
| 85 | 85 |
Logging: true, |
| 86 | 86 |
} |
| 87 |
- eng.Register("commands", func(job *Job) Status {
|
|
| 87 |
+ eng.Register("commands", func(job *Job) error {
|
|
| 88 | 88 |
for _, name := range eng.commands() {
|
| 89 | 89 |
job.Printf("%s\n", name)
|
| 90 | 90 |
} |
| 91 |
- return StatusOK |
|
| 91 |
+ return nil |
|
| 92 | 92 |
}) |
| 93 | 93 |
// Copy existing global handlers |
| 94 | 94 |
for k, v := range globalHandlers {
|
| ... | ... |
@@ -45,9 +45,9 @@ func TestJob(t *testing.T) {
|
| 45 | 45 |
t.Fatalf("job1.handler should be empty")
|
| 46 | 46 |
} |
| 47 | 47 |
|
| 48 |
- h := func(j *Job) Status {
|
|
| 48 |
+ h := func(j *Job) error {
|
|
| 49 | 49 |
j.Printf("%s\n", j.Name)
|
| 50 |
- return 42 |
|
| 50 |
+ return nil |
|
| 51 | 51 |
} |
| 52 | 52 |
|
| 53 | 53 |
eng.Register("dummy2", h)
|
| ... | ... |
@@ -58,7 +58,7 @@ func TestJob(t *testing.T) {
|
| 58 | 58 |
t.Fatalf("job2.handler shouldn't be nil")
|
| 59 | 59 |
} |
| 60 | 60 |
|
| 61 |
- if job2.handler(job2) != 42 {
|
|
| 61 |
+ if job2.handler(job2) != nil {
|
|
| 62 | 62 |
t.Fatalf("handler dummy2 was not found in job2")
|
| 63 | 63 |
} |
| 64 | 64 |
} |
| ... | ... |
@@ -76,7 +76,7 @@ func TestEngineShutdown(t *testing.T) {
|
| 76 | 76 |
|
| 77 | 77 |
func TestEngineCommands(t *testing.T) {
|
| 78 | 78 |
eng := New() |
| 79 |
- handler := func(job *Job) Status { return StatusOK }
|
|
| 79 |
+ handler := func(job *Job) error { return nil }
|
|
| 80 | 80 |
eng.Register("foo", handler)
|
| 81 | 81 |
eng.Register("bar", handler)
|
| 82 | 82 |
eng.Register("echo", handler)
|
| ... | ... |
@@ -105,9 +105,9 @@ func TestParseJob(t *testing.T) {
|
| 105 | 105 |
eng := New() |
| 106 | 106 |
// Verify that the resulting job calls to the right place |
| 107 | 107 |
var called bool |
| 108 |
- eng.Register("echo", func(job *Job) Status {
|
|
| 108 |
+ eng.Register("echo", func(job *Job) error {
|
|
| 109 | 109 |
called = true |
| 110 |
- return StatusOK |
|
| 110 |
+ return nil |
|
| 111 | 111 |
}) |
| 112 | 112 |
input := "echo DEBUG=1 hello world VERBOSITY=42" |
| 113 | 113 |
job, err := eng.ParseJob(input) |
| ... | ... |
@@ -140,9 +140,9 @@ func TestParseJob(t *testing.T) {
|
| 140 | 140 |
func TestCatchallEmptyName(t *testing.T) {
|
| 141 | 141 |
eng := New() |
| 142 | 142 |
var called bool |
| 143 |
- eng.RegisterCatchall(func(job *Job) Status {
|
|
| 143 |
+ eng.RegisterCatchall(func(job *Job) error {
|
|
| 144 | 144 |
called = true |
| 145 |
- return StatusOK |
|
| 145 |
+ return nil |
|
| 146 | 146 |
}) |
| 147 | 147 |
err := eng.Job("").Run()
|
| 148 | 148 |
if err == nil {
|
| ... | ... |
@@ -164,7 +164,7 @@ func TestNestedJobSharedOutput(t *testing.T) {
|
| 164 | 164 |
wrapOutput bool |
| 165 | 165 |
) |
| 166 | 166 |
|
| 167 |
- outerHandler = func(job *Job) Status {
|
|
| 167 |
+ outerHandler = func(job *Job) error {
|
|
| 168 | 168 |
job.Stdout.Write([]byte("outer1"))
|
| 169 | 169 |
|
| 170 | 170 |
innerJob := job.Eng.Job("innerJob")
|
| ... | ... |
@@ -184,13 +184,13 @@ func TestNestedJobSharedOutput(t *testing.T) {
|
| 184 | 184 |
// closed output. |
| 185 | 185 |
job.Stdout.Write([]byte(" outer2"))
|
| 186 | 186 |
|
| 187 |
- return StatusOK |
|
| 187 |
+ return nil |
|
| 188 | 188 |
} |
| 189 | 189 |
|
| 190 |
- innerHandler = func(job *Job) Status {
|
|
| 190 |
+ innerHandler = func(job *Job) error {
|
|
| 191 | 191 |
job.Stdout.Write([]byte(" inner"))
|
| 192 | 192 |
|
| 193 |
- return StatusOK |
|
| 193 |
+ return nil |
|
| 194 | 194 |
} |
| 195 | 195 |
|
| 196 | 196 |
eng := New() |
| ... | ... |
@@ -32,7 +32,7 @@ type Job struct {
|
| 32 | 32 |
Stderr *Output |
| 33 | 33 |
Stdin *Input |
| 34 | 34 |
handler Handler |
| 35 |
- status Status |
|
| 35 |
+ err error |
|
| 36 | 36 |
end time.Time |
| 37 | 37 |
closeIO bool |
| 38 | 38 |
|
| ... | ... |
@@ -43,17 +43,8 @@ type Job struct {
|
| 43 | 43 |
cancelOnce sync.Once |
| 44 | 44 |
} |
| 45 | 45 |
|
| 46 |
-type Status int |
|
| 47 |
- |
|
| 48 |
-const ( |
|
| 49 |
- StatusOK Status = 0 |
|
| 50 |
- StatusErr Status = 1 |
|
| 51 |
- StatusNotFound Status = 127 |
|
| 52 |
-) |
|
| 53 |
- |
|
| 54 | 46 |
// Run executes the job and blocks until the job completes. |
| 55 |
-// If the job returns a failure status, an error is returned |
|
| 56 |
-// which includes the status. |
|
| 47 |
+// If the job fails it returns an error |
|
| 57 | 48 |
func (job *Job) Run() error {
|
| 58 | 49 |
if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") {
|
| 59 | 50 |
return fmt.Errorf("engine is shutdown")
|
| ... | ... |
@@ -78,16 +69,16 @@ func (job *Job) Run() error {
|
| 78 | 78 |
if job.Eng.Logging {
|
| 79 | 79 |
log.Infof("+job %s", job.CallString())
|
| 80 | 80 |
defer func() {
|
| 81 |
- log.Infof("-job %s%s", job.CallString(), job.StatusString())
|
|
| 81 |
+ // what if err is nil? |
|
| 82 |
+ log.Infof("-job %s%s", job.CallString(), job.err)
|
|
| 82 | 83 |
}() |
| 83 | 84 |
} |
| 84 | 85 |
var errorMessage = bytes.NewBuffer(nil) |
| 85 | 86 |
job.Stderr.Add(errorMessage) |
| 86 | 87 |
if job.handler == nil {
|
| 87 |
- job.Errorf("%s: command not found", job.Name)
|
|
| 88 |
- job.status = 127 |
|
| 88 |
+ job.err = fmt.Errorf("%s: command not found", job.Name)
|
|
| 89 | 89 |
} else {
|
| 90 |
- job.status = job.handler(job) |
|
| 90 |
+ job.err = job.handler(job) |
|
| 91 | 91 |
job.end = time.Now() |
| 92 | 92 |
} |
| 93 | 93 |
if job.closeIO {
|
| ... | ... |
@@ -102,36 +93,14 @@ func (job *Job) Run() error {
|
| 102 | 102 |
return err |
| 103 | 103 |
} |
| 104 | 104 |
} |
| 105 |
- if job.status != 0 {
|
|
| 106 |
- return fmt.Errorf("%s", Tail(errorMessage, 1))
|
|
| 107 |
- } |
|
| 108 | 105 |
|
| 109 |
- return nil |
|
| 106 |
+ return job.err |
|
| 110 | 107 |
} |
| 111 | 108 |
|
| 112 | 109 |
func (job *Job) CallString() string {
|
| 113 | 110 |
return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", "))
|
| 114 | 111 |
} |
| 115 | 112 |
|
| 116 |
-func (job *Job) StatusString() string {
|
|
| 117 |
- // If the job hasn't completed, status string is empty |
|
| 118 |
- if job.end.IsZero() {
|
|
| 119 |
- return "" |
|
| 120 |
- } |
|
| 121 |
- var okerr string |
|
| 122 |
- if job.status == StatusOK {
|
|
| 123 |
- okerr = "OK" |
|
| 124 |
- } else {
|
|
| 125 |
- okerr = "ERR" |
|
| 126 |
- } |
|
| 127 |
- return fmt.Sprintf(" = %s (%d)", okerr, job.status)
|
|
| 128 |
-} |
|
| 129 |
- |
|
| 130 |
-// String returns a human-readable description of `job` |
|
| 131 |
-func (job *Job) String() string {
|
|
| 132 |
- return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString())
|
|
| 133 |
-} |
|
| 134 |
- |
|
| 135 | 113 |
func (job *Job) Env() *Env {
|
| 136 | 114 |
return job.env |
| 137 | 115 |
} |
| ... | ... |
@@ -235,23 +204,6 @@ func (job *Job) Printf(format string, args ...interface{}) (n int, err error) {
|
| 235 | 235 |
return fmt.Fprintf(job.Stdout, format, args...) |
| 236 | 236 |
} |
| 237 | 237 |
|
| 238 |
-func (job *Job) Errorf(format string, args ...interface{}) Status {
|
|
| 239 |
- if format[len(format)-1] != '\n' {
|
|
| 240 |
- format = format + "\n" |
|
| 241 |
- } |
|
| 242 |
- fmt.Fprintf(job.Stderr, format, args...) |
|
| 243 |
- return StatusErr |
|
| 244 |
-} |
|
| 245 |
- |
|
| 246 |
-func (job *Job) Error(err error) Status {
|
|
| 247 |
- fmt.Fprintf(job.Stderr, "%s\n", err) |
|
| 248 |
- return StatusErr |
|
| 249 |
-} |
|
| 250 |
- |
|
| 251 |
-func (job *Job) StatusCode() int {
|
|
| 252 |
- return int(job.status) |
|
| 253 |
-} |
|
| 254 |
- |
|
| 255 | 238 |
func (job *Job) SetCloseIO(val bool) {
|
| 256 | 239 |
job.closeIO = val |
| 257 | 240 |
} |
| ... | ... |
@@ -2,43 +2,35 @@ package engine |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"bytes" |
| 5 |
+ "errors" |
|
| 5 | 6 |
"fmt" |
| 6 | 7 |
"testing" |
| 7 | 8 |
) |
| 8 | 9 |
|
| 9 |
-func TestJobStatusOK(t *testing.T) {
|
|
| 10 |
+func TestJobOK(t *testing.T) {
|
|
| 10 | 11 |
eng := New() |
| 11 |
- eng.Register("return_ok", func(job *Job) Status { return StatusOK })
|
|
| 12 |
+ eng.Register("return_ok", func(job *Job) error { return nil })
|
|
| 12 | 13 |
err := eng.Job("return_ok").Run()
|
| 13 | 14 |
if err != nil {
|
| 14 | 15 |
t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err)
|
| 15 | 16 |
} |
| 16 | 17 |
} |
| 17 | 18 |
|
| 18 |
-func TestJobStatusErr(t *testing.T) {
|
|
| 19 |
+func TestJobErr(t *testing.T) {
|
|
| 19 | 20 |
eng := New() |
| 20 |
- eng.Register("return_err", func(job *Job) Status { return StatusErr })
|
|
| 21 |
+ eng.Register("return_err", func(job *Job) error { return errors.New("return_err") })
|
|
| 21 | 22 |
err := eng.Job("return_err").Run()
|
| 22 | 23 |
if err == nil {
|
| 23 |
- t.Fatalf("When a job returns StatusErr, Run() should return an error")
|
|
| 24 |
- } |
|
| 25 |
-} |
|
| 26 |
- |
|
| 27 |
-func TestJobStatusNotFound(t *testing.T) {
|
|
| 28 |
- eng := New() |
|
| 29 |
- eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound })
|
|
| 30 |
- err := eng.Job("return_not_found").Run()
|
|
| 31 |
- if err == nil {
|
|
| 32 |
- t.Fatalf("When a job returns StatusNotFound, Run() should return an error")
|
|
| 24 |
+ t.Fatalf("When a job returns error, Run() should return an error")
|
|
| 33 | 25 |
} |
| 34 | 26 |
} |
| 35 | 27 |
|
| 36 | 28 |
func TestJobStdoutString(t *testing.T) {
|
| 37 | 29 |
eng := New() |
| 38 | 30 |
// FIXME: test multiple combinations of output and status |
| 39 |
- eng.Register("say_something_in_stdout", func(job *Job) Status {
|
|
| 31 |
+ eng.Register("say_something_in_stdout", func(job *Job) error {
|
|
| 40 | 32 |
job.Printf("Hello world\n")
|
| 41 |
- return StatusOK |
|
| 33 |
+ return nil |
|
| 42 | 34 |
}) |
| 43 | 35 |
|
| 44 | 36 |
job := eng.Job("say_something_in_stdout")
|
| ... | ... |
@@ -53,23 +45,3 @@ func TestJobStdoutString(t *testing.T) {
|
| 53 | 53 |
t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
|
| 54 | 54 |
} |
| 55 | 55 |
} |
| 56 |
- |
|
| 57 |
-func TestJobStderrString(t *testing.T) {
|
|
| 58 |
- eng := New() |
|
| 59 |
- // FIXME: test multiple combinations of output and status |
|
| 60 |
- eng.Register("say_something_in_stderr", func(job *Job) Status {
|
|
| 61 |
- job.Errorf("Something might happen\nHere it comes!\nOh no...\nSomething happened\n")
|
|
| 62 |
- return StatusOK |
|
| 63 |
- }) |
|
| 64 |
- |
|
| 65 |
- job := eng.Job("say_something_in_stderr")
|
|
| 66 |
- var outputBuffer = bytes.NewBuffer(nil) |
|
| 67 |
- job.Stderr.Add(outputBuffer) |
|
| 68 |
- if err := job.Run(); err != nil {
|
|
| 69 |
- t.Fatal(err) |
|
| 70 |
- } |
|
| 71 |
- var output = Tail(outputBuffer, 1) |
|
| 72 |
- if expectedOutput := "Something happened"; output != expectedOutput {
|
|
| 73 |
- t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output)
|
|
| 74 |
- } |
|
| 75 |
-} |
| ... | ... |
@@ -19,9 +19,9 @@ func TestShutdownEmpty(t *testing.T) {
|
| 19 | 19 |
func TestShutdownAfterRun(t *testing.T) {
|
| 20 | 20 |
eng := New() |
| 21 | 21 |
var called bool |
| 22 |
- eng.Register("foo", func(job *Job) Status {
|
|
| 22 |
+ eng.Register("foo", func(job *Job) error {
|
|
| 23 | 23 |
called = true |
| 24 |
- return StatusOK |
|
| 24 |
+ return nil |
|
| 25 | 25 |
}) |
| 26 | 26 |
if err := eng.Job("foo").Run(); err != nil {
|
| 27 | 27 |
t.Fatal(err) |
| ... | ... |
@@ -42,10 +42,10 @@ func TestShutdownDuringRun(t *testing.T) {
|
| 42 | 42 |
) |
| 43 | 43 |
eng := New() |
| 44 | 44 |
var completed bool |
| 45 |
- eng.Register("foo", func(job *Job) Status {
|
|
| 45 |
+ eng.Register("foo", func(job *Job) error {
|
|
| 46 | 46 |
time.Sleep(jobDelay) |
| 47 | 47 |
completed = true |
| 48 |
- return StatusOK |
|
| 48 |
+ return nil |
|
| 49 | 49 |
}) |
| 50 | 50 |
go eng.Job("foo").Run()
|
| 51 | 51 |
time.Sleep(50 * time.Millisecond) |
| ... | ... |
@@ -3,6 +3,7 @@ package events |
| 3 | 3 |
import ( |
| 4 | 4 |
"bytes" |
| 5 | 5 |
"encoding/json" |
| 6 |
+ "fmt" |
|
| 6 | 7 |
"io" |
| 7 | 8 |
"strings" |
| 8 | 9 |
"sync" |
| ... | ... |
@@ -45,7 +46,7 @@ func (e *Events) Install(eng *engine.Engine) error {
|
| 45 | 45 |
return nil |
| 46 | 46 |
} |
| 47 | 47 |
|
| 48 |
-func (e *Events) Get(job *engine.Job) engine.Status {
|
|
| 48 |
+func (e *Events) Get(job *engine.Job) error {
|
|
| 49 | 49 |
var ( |
| 50 | 50 |
since = job.GetenvInt64("since")
|
| 51 | 51 |
until = job.GetenvInt64("until")
|
| ... | ... |
@@ -54,7 +55,7 @@ func (e *Events) Get(job *engine.Job) engine.Status {
|
| 54 | 54 |
|
| 55 | 55 |
eventFilters, err := filters.FromParam(job.Getenv("filters"))
|
| 56 | 56 |
if err != nil {
|
| 57 |
- return job.Error(err) |
|
| 57 |
+ return err |
|
| 58 | 58 |
} |
| 59 | 59 |
|
| 60 | 60 |
// If no until, disable timeout |
| ... | ... |
@@ -71,7 +72,7 @@ func (e *Events) Get(job *engine.Job) engine.Status {
|
| 71 | 71 |
// Resend every event in the [since, until] time interval. |
| 72 | 72 |
if since != 0 {
|
| 73 | 73 |
if err := e.writeCurrent(job, since, until, eventFilters); err != nil {
|
| 74 |
- return job.Error(err) |
|
| 74 |
+ return err |
|
| 75 | 75 |
} |
| 76 | 76 |
} |
| 77 | 77 |
|
| ... | ... |
@@ -79,31 +80,31 @@ func (e *Events) Get(job *engine.Job) engine.Status {
|
| 79 | 79 |
select {
|
| 80 | 80 |
case event, ok := <-listener: |
| 81 | 81 |
if !ok {
|
| 82 |
- return engine.StatusOK |
|
| 82 |
+ return nil |
|
| 83 | 83 |
} |
| 84 | 84 |
if err := writeEvent(job, event, eventFilters); err != nil {
|
| 85 |
- return job.Error(err) |
|
| 85 |
+ return err |
|
| 86 | 86 |
} |
| 87 | 87 |
case <-timeout.C: |
| 88 |
- return engine.StatusOK |
|
| 88 |
+ return nil |
|
| 89 | 89 |
} |
| 90 | 90 |
} |
| 91 | 91 |
} |
| 92 | 92 |
|
| 93 |
-func (e *Events) Log(job *engine.Job) engine.Status {
|
|
| 93 |
+func (e *Events) Log(job *engine.Job) error {
|
|
| 94 | 94 |
if len(job.Args) != 3 {
|
| 95 |
- return job.Errorf("usage: %s ACTION ID FROM", job.Name)
|
|
| 95 |
+ return fmt.Errorf("usage: %s ACTION ID FROM", job.Name)
|
|
| 96 | 96 |
} |
| 97 | 97 |
// not waiting for receivers |
| 98 | 98 |
go e.log(job.Args[0], job.Args[1], job.Args[2]) |
| 99 |
- return engine.StatusOK |
|
| 99 |
+ return nil |
|
| 100 | 100 |
} |
| 101 | 101 |
|
| 102 |
-func (e *Events) SubscribersCount(job *engine.Job) engine.Status {
|
|
| 102 |
+func (e *Events) SubscribersCount(job *engine.Job) error {
|
|
| 103 | 103 |
ret := &engine.Env{}
|
| 104 | 104 |
ret.SetInt("count", e.subscribersCount())
|
| 105 | 105 |
ret.WriteTo(job.Stdout) |
| 106 |
- return engine.StatusOK |
|
| 106 |
+ return nil |
|
| 107 | 107 |
} |
| 108 | 108 |
|
| 109 | 109 |
func writeEvent(job *engine.Job, event *utils.JSONMessage, eventFilters filters.Args) error {
|
| ... | ... |
@@ -2,6 +2,7 @@ package graph |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"encoding/json" |
| 5 |
+ "fmt" |
|
| 5 | 6 |
"io" |
| 6 | 7 |
"io/ioutil" |
| 7 | 8 |
"os" |
| ... | ... |
@@ -19,14 +20,14 @@ import ( |
| 19 | 19 |
// uncompressed tar ball. |
| 20 | 20 |
// name is the set of tags to export. |
| 21 | 21 |
// out is the writer where the images are written to. |
| 22 |
-func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
|
|
| 22 |
+func (s *TagStore) CmdImageExport(job *engine.Job) error {
|
|
| 23 | 23 |
if len(job.Args) < 1 {
|
| 24 |
- return job.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name)
|
|
| 24 |
+ return fmt.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name)
|
|
| 25 | 25 |
} |
| 26 | 26 |
// get image json |
| 27 | 27 |
tempdir, err := ioutil.TempDir("", "docker-export-")
|
| 28 | 28 |
if err != nil {
|
| 29 |
- return job.Error(err) |
|
| 29 |
+ return err |
|
| 30 | 30 |
} |
| 31 | 31 |
defer os.RemoveAll(tempdir) |
| 32 | 32 |
|
| ... | ... |
@@ -48,13 +49,13 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
|
| 48 | 48 |
for tag, id := range rootRepo {
|
| 49 | 49 |
addKey(name, tag, id) |
| 50 | 50 |
if err := s.exportImage(job.Eng, id, tempdir); err != nil {
|
| 51 |
- return job.Error(err) |
|
| 51 |
+ return err |
|
| 52 | 52 |
} |
| 53 | 53 |
} |
| 54 | 54 |
} else {
|
| 55 | 55 |
img, err := s.LookupImage(name) |
| 56 | 56 |
if err != nil {
|
| 57 |
- return job.Error(err) |
|
| 57 |
+ return err |
|
| 58 | 58 |
} |
| 59 | 59 |
|
| 60 | 60 |
if img != nil {
|
| ... | ... |
@@ -67,13 +68,13 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
|
| 67 | 67 |
addKey(repoName, repoTag, img.ID) |
| 68 | 68 |
} |
| 69 | 69 |
if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil {
|
| 70 |
- return job.Error(err) |
|
| 70 |
+ return err |
|
| 71 | 71 |
} |
| 72 | 72 |
|
| 73 | 73 |
} else {
|
| 74 | 74 |
// this must be an ID that didn't get looked up just right? |
| 75 | 75 |
if err := s.exportImage(job.Eng, name, tempdir); err != nil {
|
| 76 |
- return job.Error(err) |
|
| 76 |
+ return err |
|
| 77 | 77 |
} |
| 78 | 78 |
} |
| 79 | 79 |
} |
| ... | ... |
@@ -83,7 +84,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
|
| 83 | 83 |
if len(rootRepoMap) > 0 {
|
| 84 | 84 |
rootRepoJson, _ := json.Marshal(rootRepoMap) |
| 85 | 85 |
if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil {
|
| 86 |
- return job.Error(err) |
|
| 86 |
+ return err |
|
| 87 | 87 |
} |
| 88 | 88 |
} else {
|
| 89 | 89 |
log.Debugf("There were no repositories to write")
|
| ... | ... |
@@ -91,15 +92,15 @@ func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
|
| 91 | 91 |
|
| 92 | 92 |
fs, err := archive.Tar(tempdir, archive.Uncompressed) |
| 93 | 93 |
if err != nil {
|
| 94 |
- return job.Error(err) |
|
| 94 |
+ return err |
|
| 95 | 95 |
} |
| 96 | 96 |
defer fs.Close() |
| 97 | 97 |
|
| 98 | 98 |
if _, err := io.Copy(job.Stdout, fs); err != nil {
|
| 99 |
- return job.Error(err) |
|
| 99 |
+ return err |
|
| 100 | 100 |
} |
| 101 | 101 |
log.Debugf("End export job: %s", job.Name)
|
| 102 |
- return engine.StatusOK |
|
| 102 |
+ return nil |
|
| 103 | 103 |
} |
| 104 | 104 |
|
| 105 | 105 |
// FIXME: this should be a top-level function, not a class method |
| ... | ... |
@@ -1,6 +1,7 @@ |
| 1 | 1 |
package graph |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 4 | 5 |
"strings" |
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/docker/engine" |
| ... | ... |
@@ -8,14 +9,14 @@ import ( |
| 8 | 8 |
"github.com/docker/docker/utils" |
| 9 | 9 |
) |
| 10 | 10 |
|
| 11 |
-func (s *TagStore) CmdHistory(job *engine.Job) engine.Status {
|
|
| 11 |
+func (s *TagStore) CmdHistory(job *engine.Job) error {
|
|
| 12 | 12 |
if n := len(job.Args); n != 1 {
|
| 13 |
- return job.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 13 |
+ return fmt.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 14 | 14 |
} |
| 15 | 15 |
name := job.Args[0] |
| 16 | 16 |
foundImage, err := s.LookupImage(name) |
| 17 | 17 |
if err != nil {
|
| 18 |
- return job.Error(err) |
|
| 18 |
+ return err |
|
| 19 | 19 |
} |
| 20 | 20 |
|
| 21 | 21 |
lookupMap := make(map[string][]string) |
| ... | ... |
@@ -41,7 +42,7 @@ func (s *TagStore) CmdHistory(job *engine.Job) engine.Status {
|
| 41 | 41 |
return nil |
| 42 | 42 |
}) |
| 43 | 43 |
if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
| 44 |
- return job.Error(err) |
|
| 44 |
+ return err |
|
| 45 | 45 |
} |
| 46 |
- return engine.StatusOK |
|
| 46 |
+ return nil |
|
| 47 | 47 |
} |
| ... | ... |
@@ -3,6 +3,7 @@ package graph |
| 3 | 3 |
import ( |
| 4 | 4 |
"bytes" |
| 5 | 5 |
"encoding/json" |
| 6 |
+ "fmt" |
|
| 6 | 7 |
"net/http" |
| 7 | 8 |
"net/url" |
| 8 | 9 |
|
| ... | ... |
@@ -14,9 +15,9 @@ import ( |
| 14 | 14 |
"github.com/docker/docker/utils" |
| 15 | 15 |
) |
| 16 | 16 |
|
| 17 |
-func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
|
|
| 17 |
+func (s *TagStore) CmdImport(job *engine.Job) error {
|
|
| 18 | 18 |
if n := len(job.Args); n != 2 && n != 3 {
|
| 19 |
- return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
|
|
| 19 |
+ return fmt.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
|
|
| 20 | 20 |
} |
| 21 | 21 |
var ( |
| 22 | 22 |
src = job.Args[0] |
| ... | ... |
@@ -37,7 +38,7 @@ func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
|
| 37 | 37 |
} else {
|
| 38 | 38 |
u, err := url.Parse(src) |
| 39 | 39 |
if err != nil {
|
| 40 |
- return job.Error(err) |
|
| 40 |
+ return err |
|
| 41 | 41 |
} |
| 42 | 42 |
if u.Scheme == "" {
|
| 43 | 43 |
u.Scheme = "http" |
| ... | ... |
@@ -47,7 +48,7 @@ func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
|
| 47 | 47 |
job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u))
|
| 48 | 48 |
resp, err = utils.Download(u.String()) |
| 49 | 49 |
if err != nil {
|
| 50 |
- return job.Error(err) |
|
| 50 |
+ return err |
|
| 51 | 51 |
} |
| 52 | 52 |
progressReader := progressreader.New(progressreader.Config{
|
| 53 | 53 |
In: resp.Body, |
| ... | ... |
@@ -69,20 +70,20 @@ func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
|
| 69 | 69 |
buildConfigJob.Setenv("config", job.Getenv("config"))
|
| 70 | 70 |
|
| 71 | 71 |
if err := buildConfigJob.Run(); err != nil {
|
| 72 |
- return job.Error(err) |
|
| 72 |
+ return err |
|
| 73 | 73 |
} |
| 74 | 74 |
if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil {
|
| 75 |
- return job.Error(err) |
|
| 75 |
+ return err |
|
| 76 | 76 |
} |
| 77 | 77 |
|
| 78 | 78 |
img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, &newConfig) |
| 79 | 79 |
if err != nil {
|
| 80 |
- return job.Error(err) |
|
| 80 |
+ return err |
|
| 81 | 81 |
} |
| 82 | 82 |
// Optionally register the image at REPO/TAG |
| 83 | 83 |
if repo != "" {
|
| 84 | 84 |
if err := s.Set(repo, tag, img.ID, true); err != nil {
|
| 85 |
- return job.Error(err) |
|
| 85 |
+ return err |
|
| 86 | 86 |
} |
| 87 | 87 |
} |
| 88 | 88 |
job.Stdout.Write(sf.FormatStatus("", img.ID))
|
| ... | ... |
@@ -93,5 +94,5 @@ func (s *TagStore) CmdImport(job *engine.Job) engine.Status {
|
| 93 | 93 |
if err = job.Eng.Job("log", "import", logID, "").Run(); err != nil {
|
| 94 | 94 |
log.Errorf("Error logging event 'import' for %s: %s", logID, err)
|
| 95 | 95 |
} |
| 96 |
- return engine.StatusOK |
|
| 96 |
+ return nil |
|
| 97 | 97 |
} |
| ... | ... |
@@ -1,6 +1,7 @@ |
| 1 | 1 |
package graph |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 4 | 5 |
"log" |
| 5 | 6 |
"path" |
| 6 | 7 |
"strings" |
| ... | ... |
@@ -16,7 +17,7 @@ var acceptedImageFilterTags = map[string]struct{}{
|
| 16 | 16 |
"label": {},
|
| 17 | 17 |
} |
| 18 | 18 |
|
| 19 |
-func (s *TagStore) CmdImages(job *engine.Job) engine.Status {
|
|
| 19 |
+func (s *TagStore) CmdImages(job *engine.Job) error {
|
|
| 20 | 20 |
var ( |
| 21 | 21 |
allImages map[string]*image.Image |
| 22 | 22 |
err error |
| ... | ... |
@@ -26,11 +27,11 @@ func (s *TagStore) CmdImages(job *engine.Job) engine.Status {
|
| 26 | 26 |
|
| 27 | 27 |
imageFilters, err := filters.FromParam(job.Getenv("filters"))
|
| 28 | 28 |
if err != nil {
|
| 29 |
- return job.Error(err) |
|
| 29 |
+ return err |
|
| 30 | 30 |
} |
| 31 | 31 |
for name := range imageFilters {
|
| 32 | 32 |
if _, ok := acceptedImageFilterTags[name]; !ok {
|
| 33 |
- return job.Errorf("Invalid filter '%s'", name)
|
|
| 33 |
+ return fmt.Errorf("Invalid filter '%s'", name)
|
|
| 34 | 34 |
} |
| 35 | 35 |
} |
| 36 | 36 |
|
| ... | ... |
@@ -50,7 +51,7 @@ func (s *TagStore) CmdImages(job *engine.Job) engine.Status {
|
| 50 | 50 |
allImages, err = s.graph.Heads() |
| 51 | 51 |
} |
| 52 | 52 |
if err != nil {
|
| 53 |
- return job.Error(err) |
|
| 53 |
+ return err |
|
| 54 | 54 |
} |
| 55 | 55 |
lookup := make(map[string]*engine.Env) |
| 56 | 56 |
s.Lock() |
| ... | ... |
@@ -133,7 +134,7 @@ func (s *TagStore) CmdImages(job *engine.Job) engine.Status {
|
| 133 | 133 |
|
| 134 | 134 |
outs.ReverseSort() |
| 135 | 135 |
if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
| 136 |
- return job.Error(err) |
|
| 136 |
+ return err |
|
| 137 | 137 |
} |
| 138 |
- return engine.StatusOK |
|
| 138 |
+ return nil |
|
| 139 | 139 |
} |
| ... | ... |
@@ -18,10 +18,10 @@ import ( |
| 18 | 18 |
|
| 19 | 19 |
// Loads a set of images into the repository. This is the complementary of ImageExport. |
| 20 | 20 |
// The input stream is an uncompressed tar ball containing images and metadata. |
| 21 |
-func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
|
|
| 21 |
+func (s *TagStore) CmdLoad(job *engine.Job) error {
|
|
| 22 | 22 |
tmpImageDir, err := ioutil.TempDir("", "docker-import-")
|
| 23 | 23 |
if err != nil {
|
| 24 |
- return job.Error(err) |
|
| 24 |
+ return err |
|
| 25 | 25 |
} |
| 26 | 26 |
defer os.RemoveAll(tmpImageDir) |
| 27 | 27 |
|
| ... | ... |
@@ -30,11 +30,11 @@ func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
|
| 30 | 30 |
) |
| 31 | 31 |
|
| 32 | 32 |
if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
|
| 33 |
- return job.Error(err) |
|
| 33 |
+ return err |
|
| 34 | 34 |
} |
| 35 | 35 |
images, err := s.graph.Map() |
| 36 | 36 |
if err != nil {
|
| 37 |
- return job.Error(err) |
|
| 37 |
+ return err |
|
| 38 | 38 |
} |
| 39 | 39 |
excludes := make([]string, len(images)) |
| 40 | 40 |
i := 0 |
| ... | ... |
@@ -43,18 +43,18 @@ func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
|
| 43 | 43 |
i++ |
| 44 | 44 |
} |
| 45 | 45 |
if err := chrootarchive.Untar(job.Stdin, repoDir, &archive.TarOptions{ExcludePatterns: excludes}); err != nil {
|
| 46 |
- return job.Error(err) |
|
| 46 |
+ return err |
|
| 47 | 47 |
} |
| 48 | 48 |
|
| 49 | 49 |
dirs, err := ioutil.ReadDir(repoDir) |
| 50 | 50 |
if err != nil {
|
| 51 |
- return job.Error(err) |
|
| 51 |
+ return err |
|
| 52 | 52 |
} |
| 53 | 53 |
|
| 54 | 54 |
for _, d := range dirs {
|
| 55 | 55 |
if d.IsDir() {
|
| 56 | 56 |
if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil {
|
| 57 |
- return job.Error(err) |
|
| 57 |
+ return err |
|
| 58 | 58 |
} |
| 59 | 59 |
} |
| 60 | 60 |
} |
| ... | ... |
@@ -63,21 +63,21 @@ func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
|
| 63 | 63 |
if err == nil {
|
| 64 | 64 |
repositories := map[string]Repository{}
|
| 65 | 65 |
if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
|
| 66 |
- return job.Error(err) |
|
| 66 |
+ return err |
|
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 | 69 |
for imageName, tagMap := range repositories {
|
| 70 | 70 |
for tag, address := range tagMap {
|
| 71 | 71 |
if err := s.Set(imageName, tag, address, true); err != nil {
|
| 72 |
- return job.Error(err) |
|
| 72 |
+ return err |
|
| 73 | 73 |
} |
| 74 | 74 |
} |
| 75 | 75 |
} |
| 76 | 76 |
} else if !os.IsNotExist(err) {
|
| 77 |
- return job.Error(err) |
|
| 77 |
+ return err |
|
| 78 | 78 |
} |
| 79 | 79 |
|
| 80 |
- return engine.StatusOK |
|
| 80 |
+ return nil |
|
| 81 | 81 |
} |
| 82 | 82 |
|
| 83 | 83 |
func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error {
|
| ... | ... |
@@ -3,9 +3,11 @@ |
| 3 | 3 |
package graph |
| 4 | 4 |
|
| 5 | 5 |
import ( |
| 6 |
+ "fmt" |
|
| 7 |
+ |
|
| 6 | 8 |
"github.com/docker/docker/engine" |
| 7 | 9 |
) |
| 8 | 10 |
|
| 9 |
-func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
|
|
| 10 |
- return job.Errorf("CmdLoad is not supported on this platform")
|
|
| 11 |
+func (s *TagStore) CmdLoad(job *engine.Job) error {
|
|
| 12 |
+ return fmt.Errorf("CmdLoad is not supported on this platform")
|
|
| 11 | 13 |
} |
| ... | ... |
@@ -20,9 +20,9 @@ import ( |
| 20 | 20 |
"github.com/docker/docker/utils" |
| 21 | 21 |
) |
| 22 | 22 |
|
| 23 |
-func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
|
|
| 23 |
+func (s *TagStore) CmdPull(job *engine.Job) error {
|
|
| 24 | 24 |
if n := len(job.Args); n != 1 && n != 2 {
|
| 25 |
- return job.Errorf("Usage: %s IMAGE [TAG|DIGEST]", job.Name)
|
|
| 25 |
+ return fmt.Errorf("Usage: %s IMAGE [TAG|DIGEST]", job.Name)
|
|
| 26 | 26 |
} |
| 27 | 27 |
|
| 28 | 28 |
var ( |
| ... | ... |
@@ -36,7 +36,7 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
|
| 36 | 36 |
// Resolve the Repository name from fqn to RepositoryInfo |
| 37 | 37 |
repoInfo, err := registry.ResolveRepositoryInfo(job, localName) |
| 38 | 38 |
if err != nil {
|
| 39 |
- return job.Error(err) |
|
| 39 |
+ return err |
|
| 40 | 40 |
} |
| 41 | 41 |
|
| 42 | 42 |
if len(job.Args) > 1 {
|
| ... | ... |
@@ -52,21 +52,21 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
|
| 52 | 52 |
// Another pull of the same repository is already taking place; just wait for it to finish |
| 53 | 53 |
job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", repoInfo.LocalName))
|
| 54 | 54 |
<-c |
| 55 |
- return engine.StatusOK |
|
| 55 |
+ return nil |
|
| 56 | 56 |
} |
| 57 |
- return job.Error(err) |
|
| 57 |
+ return err |
|
| 58 | 58 |
} |
| 59 | 59 |
defer s.poolRemove("pull", utils.ImageReference(repoInfo.LocalName, tag))
|
| 60 | 60 |
|
| 61 | 61 |
log.Debugf("pulling image from host %q with remote name %q", repoInfo.Index.Name, repoInfo.RemoteName)
|
| 62 | 62 |
endpoint, err := repoInfo.GetEndpoint() |
| 63 | 63 |
if err != nil {
|
| 64 |
- return job.Error(err) |
|
| 64 |
+ return err |
|
| 65 | 65 |
} |
| 66 | 66 |
|
| 67 | 67 |
r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) |
| 68 | 68 |
if err != nil {
|
| 69 |
- return job.Error(err) |
|
| 69 |
+ return err |
|
| 70 | 70 |
} |
| 71 | 71 |
|
| 72 | 72 |
logName := repoInfo.LocalName |
| ... | ... |
@@ -87,7 +87,7 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
|
| 87 | 87 |
if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil {
|
| 88 | 88 |
log.Errorf("Error logging event 'pull' for %s: %s", logName, err)
|
| 89 | 89 |
} |
| 90 |
- return engine.StatusOK |
|
| 90 |
+ return nil |
|
| 91 | 91 |
} else if err != registry.ErrDoesNotExist && err != ErrV2RegistryUnavailable {
|
| 92 | 92 |
log.Errorf("Error from V2 registry: %s", err)
|
| 93 | 93 |
} |
| ... | ... |
@@ -97,14 +97,14 @@ func (s *TagStore) CmdPull(job *engine.Job) engine.Status {
|
| 97 | 97 |
|
| 98 | 98 |
log.Debugf("pulling v1 repository with local name %q", repoInfo.LocalName)
|
| 99 | 99 |
if err = s.pullRepository(r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err != nil {
|
| 100 |
- return job.Error(err) |
|
| 100 |
+ return err |
|
| 101 | 101 |
} |
| 102 | 102 |
|
| 103 | 103 |
if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil {
|
| 104 | 104 |
log.Errorf("Error logging event 'pull' for %s: %s", logName, err)
|
| 105 | 105 |
} |
| 106 | 106 |
|
| 107 |
- return engine.StatusOK |
|
| 107 |
+ return nil |
|
| 108 | 108 |
} |
| 109 | 109 |
|
| 110 | 110 |
func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, askedTag string, sf *utils.StreamFormatter, parallel bool) error {
|
| ... | ... |
@@ -492,9 +492,9 @@ func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint * |
| 492 | 492 |
} |
| 493 | 493 |
|
| 494 | 494 |
// FIXME: Allow to interrupt current push when new push of same image is done. |
| 495 |
-func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
|
|
| 495 |
+func (s *TagStore) CmdPush(job *engine.Job) error {
|
|
| 496 | 496 |
if n := len(job.Args); n != 1 {
|
| 497 |
- return job.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 497 |
+ return fmt.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 498 | 498 |
} |
| 499 | 499 |
var ( |
| 500 | 500 |
localName = job.Args[0] |
| ... | ... |
@@ -506,7 +506,7 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
|
| 506 | 506 |
// Resolve the Repository name from fqn to RepositoryInfo |
| 507 | 507 |
repoInfo, err := registry.ResolveRepositoryInfo(job, localName) |
| 508 | 508 |
if err != nil {
|
| 509 |
- return job.Error(err) |
|
| 509 |
+ return err |
|
| 510 | 510 |
} |
| 511 | 511 |
|
| 512 | 512 |
tag := job.Getenv("tag")
|
| ... | ... |
@@ -514,18 +514,18 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
|
| 514 | 514 |
job.GetenvJson("metaHeaders", &metaHeaders)
|
| 515 | 515 |
|
| 516 | 516 |
if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil {
|
| 517 |
- return job.Error(err) |
|
| 517 |
+ return err |
|
| 518 | 518 |
} |
| 519 | 519 |
defer s.poolRemove("push", repoInfo.LocalName)
|
| 520 | 520 |
|
| 521 | 521 |
endpoint, err := repoInfo.GetEndpoint() |
| 522 | 522 |
if err != nil {
|
| 523 |
- return job.Error(err) |
|
| 523 |
+ return err |
|
| 524 | 524 |
} |
| 525 | 525 |
|
| 526 | 526 |
r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) |
| 527 | 527 |
if err != nil {
|
| 528 |
- return job.Error(err) |
|
| 528 |
+ return err |
|
| 529 | 529 |
} |
| 530 | 530 |
|
| 531 | 531 |
reposLen := 1 |
| ... | ... |
@@ -536,23 +536,23 @@ func (s *TagStore) CmdPush(job *engine.Job) engine.Status {
|
| 536 | 536 |
// If it fails, try to get the repository |
| 537 | 537 |
localRepo, exists := s.Repositories[repoInfo.LocalName] |
| 538 | 538 |
if !exists {
|
| 539 |
- return job.Errorf("Repository does not exist: %s", repoInfo.LocalName)
|
|
| 539 |
+ return fmt.Errorf("Repository does not exist: %s", repoInfo.LocalName)
|
|
| 540 | 540 |
} |
| 541 | 541 |
|
| 542 | 542 |
if repoInfo.Index.Official || endpoint.Version == registry.APIVersion2 {
|
| 543 | 543 |
err := s.pushV2Repository(r, localRepo, job.Stdout, repoInfo, tag, sf) |
| 544 | 544 |
if err == nil {
|
| 545 |
- return engine.StatusOK |
|
| 545 |
+ return nil |
|
| 546 | 546 |
} |
| 547 | 547 |
|
| 548 | 548 |
if err != ErrV2RegistryUnavailable {
|
| 549 |
- return job.Errorf("Error pushing to registry: %s", err)
|
|
| 549 |
+ return fmt.Errorf("Error pushing to registry: %s", err)
|
|
| 550 | 550 |
} |
| 551 | 551 |
} |
| 552 | 552 |
|
| 553 | 553 |
if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil {
|
| 554 |
- return job.Error(err) |
|
| 554 |
+ return err |
|
| 555 | 555 |
} |
| 556 |
- return engine.StatusOK |
|
| 556 |
+ return nil |
|
| 557 | 557 |
|
| 558 | 558 |
} |
| ... | ... |
@@ -55,36 +55,36 @@ func (s *TagStore) Install(eng *engine.Engine) error {
|
| 55 | 55 |
// That is a requirement of the current registry client implementation, |
| 56 | 56 |
// because a re-encoded json might invalidate the image checksum at |
| 57 | 57 |
// the next upload, even with functionaly identical content. |
| 58 |
-func (s *TagStore) CmdSet(job *engine.Job) engine.Status {
|
|
| 58 |
+func (s *TagStore) CmdSet(job *engine.Job) error {
|
|
| 59 | 59 |
if len(job.Args) != 1 {
|
| 60 |
- return job.Errorf("usage: %s NAME", job.Name)
|
|
| 60 |
+ return fmt.Errorf("usage: %s NAME", job.Name)
|
|
| 61 | 61 |
} |
| 62 | 62 |
var ( |
| 63 | 63 |
imgJSON = []byte(job.Getenv("json"))
|
| 64 | 64 |
layer = job.Stdin |
| 65 | 65 |
) |
| 66 | 66 |
if len(imgJSON) == 0 {
|
| 67 |
- return job.Errorf("mandatory key 'json' is not set")
|
|
| 67 |
+ return fmt.Errorf("mandatory key 'json' is not set")
|
|
| 68 | 68 |
} |
| 69 | 69 |
// We have to pass an *image.Image object, even though it will be completely |
| 70 | 70 |
// ignored in favor of the redundant json data. |
| 71 | 71 |
// FIXME: the current prototype of Graph.Register is stupid and redundant. |
| 72 | 72 |
img, err := image.NewImgJSON(imgJSON) |
| 73 | 73 |
if err != nil {
|
| 74 |
- return job.Error(err) |
|
| 74 |
+ return err |
|
| 75 | 75 |
} |
| 76 | 76 |
if err := s.graph.Register(img, layer); err != nil {
|
| 77 |
- return job.Error(err) |
|
| 77 |
+ return err |
|
| 78 | 78 |
} |
| 79 |
- return engine.StatusOK |
|
| 79 |
+ return nil |
|
| 80 | 80 |
} |
| 81 | 81 |
|
| 82 | 82 |
// CmdGet returns information about an image. |
| 83 | 83 |
// If the image doesn't exist, an empty object is returned, to allow |
| 84 | 84 |
// checking for an image's existence. |
| 85 |
-func (s *TagStore) CmdGet(job *engine.Job) engine.Status {
|
|
| 85 |
+func (s *TagStore) CmdGet(job *engine.Job) error {
|
|
| 86 | 86 |
if len(job.Args) != 1 {
|
| 87 |
- return job.Errorf("usage: %s NAME", job.Name)
|
|
| 87 |
+ return fmt.Errorf("usage: %s NAME", job.Name)
|
|
| 88 | 88 |
} |
| 89 | 89 |
name := job.Args[0] |
| 90 | 90 |
res := &engine.Env{}
|
| ... | ... |
@@ -92,7 +92,7 @@ func (s *TagStore) CmdGet(job *engine.Job) engine.Status {
|
| 92 | 92 |
// Note: if the image doesn't exist, LookupImage returns |
| 93 | 93 |
// nil, nil. |
| 94 | 94 |
if err != nil {
|
| 95 |
- return job.Error(err) |
|
| 95 |
+ return err |
|
| 96 | 96 |
} |
| 97 | 97 |
if img != nil {
|
| 98 | 98 |
// We don't directly expose all fields of the Image objects, |
| ... | ... |
@@ -116,23 +116,23 @@ func (s *TagStore) CmdGet(job *engine.Job) engine.Status {
|
| 116 | 116 |
res.SetJson("Parent", img.Parent)
|
| 117 | 117 |
} |
| 118 | 118 |
res.WriteTo(job.Stdout) |
| 119 |
- return engine.StatusOK |
|
| 119 |
+ return nil |
|
| 120 | 120 |
} |
| 121 | 121 |
|
| 122 | 122 |
// CmdLookup return an image encoded in JSON |
| 123 |
-func (s *TagStore) CmdLookup(job *engine.Job) engine.Status {
|
|
| 123 |
+func (s *TagStore) CmdLookup(job *engine.Job) error {
|
|
| 124 | 124 |
if len(job.Args) != 1 {
|
| 125 |
- return job.Errorf("usage: %s NAME", job.Name)
|
|
| 125 |
+ return fmt.Errorf("usage: %s NAME", job.Name)
|
|
| 126 | 126 |
} |
| 127 | 127 |
name := job.Args[0] |
| 128 | 128 |
if image, err := s.LookupImage(name); err == nil && image != nil {
|
| 129 | 129 |
if job.GetenvBool("raw") {
|
| 130 | 130 |
b, err := image.RawJson() |
| 131 | 131 |
if err != nil {
|
| 132 |
- return job.Error(err) |
|
| 132 |
+ return err |
|
| 133 | 133 |
} |
| 134 | 134 |
job.Stdout.Write(b) |
| 135 |
- return engine.StatusOK |
|
| 135 |
+ return nil |
|
| 136 | 136 |
} |
| 137 | 137 |
|
| 138 | 138 |
out := &engine.Env{}
|
| ... | ... |
@@ -150,32 +150,32 @@ func (s *TagStore) CmdLookup(job *engine.Job) engine.Status {
|
| 150 | 150 |
out.SetInt64("Size", image.Size)
|
| 151 | 151 |
out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
|
| 152 | 152 |
if _, err = out.WriteTo(job.Stdout); err != nil {
|
| 153 |
- return job.Error(err) |
|
| 153 |
+ return err |
|
| 154 | 154 |
} |
| 155 |
- return engine.StatusOK |
|
| 155 |
+ return nil |
|
| 156 | 156 |
} |
| 157 |
- return job.Errorf("No such image: %s", name)
|
|
| 157 |
+ return fmt.Errorf("No such image: %s", name)
|
|
| 158 | 158 |
} |
| 159 | 159 |
|
| 160 | 160 |
// CmdTarLayer return the tarLayer of the image |
| 161 |
-func (s *TagStore) CmdTarLayer(job *engine.Job) engine.Status {
|
|
| 161 |
+func (s *TagStore) CmdTarLayer(job *engine.Job) error {
|
|
| 162 | 162 |
if len(job.Args) != 1 {
|
| 163 |
- return job.Errorf("usage: %s NAME", job.Name)
|
|
| 163 |
+ return fmt.Errorf("usage: %s NAME", job.Name)
|
|
| 164 | 164 |
} |
| 165 | 165 |
name := job.Args[0] |
| 166 | 166 |
if image, err := s.LookupImage(name); err == nil && image != nil {
|
| 167 | 167 |
fs, err := image.TarLayer() |
| 168 | 168 |
if err != nil {
|
| 169 |
- return job.Error(err) |
|
| 169 |
+ return err |
|
| 170 | 170 |
} |
| 171 | 171 |
defer fs.Close() |
| 172 | 172 |
|
| 173 | 173 |
written, err := io.Copy(job.Stdout, fs) |
| 174 | 174 |
if err != nil {
|
| 175 |
- return job.Error(err) |
|
| 175 |
+ return err |
|
| 176 | 176 |
} |
| 177 | 177 |
log.Debugf("rendered layer for %s of [%d] size", image.ID, written)
|
| 178 |
- return engine.StatusOK |
|
| 178 |
+ return nil |
|
| 179 | 179 |
} |
| 180 |
- return job.Errorf("No such image: %s", name)
|
|
| 180 |
+ return fmt.Errorf("No such image: %s", name)
|
|
| 181 | 181 |
} |
| ... | ... |
@@ -1,19 +1,18 @@ |
| 1 | 1 |
package graph |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 5 |
+ |
|
| 4 | 6 |
"github.com/docker/docker/engine" |
| 5 | 7 |
) |
| 6 | 8 |
|
| 7 |
-func (s *TagStore) CmdTag(job *engine.Job) engine.Status {
|
|
| 9 |
+func (s *TagStore) CmdTag(job *engine.Job) error {
|
|
| 8 | 10 |
if len(job.Args) != 2 && len(job.Args) != 3 {
|
| 9 |
- return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
|
|
| 11 |
+ return fmt.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
|
|
| 10 | 12 |
} |
| 11 | 13 |
var tag string |
| 12 | 14 |
if len(job.Args) == 3 {
|
| 13 | 15 |
tag = job.Args[2] |
| 14 | 16 |
} |
| 15 |
- if err := s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
|
|
| 16 |
- return job.Error(err) |
|
| 17 |
- } |
|
| 18 |
- return engine.StatusOK |
|
| 17 |
+ return s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force"))
|
|
| 19 | 18 |
} |
| ... | ... |
@@ -1,16 +1,17 @@ |
| 1 | 1 |
package graph |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 4 | 5 |
"strings" |
| 5 | 6 |
|
| 6 | 7 |
"github.com/docker/docker/engine" |
| 7 | 8 |
"github.com/docker/docker/image" |
| 8 | 9 |
) |
| 9 | 10 |
|
| 10 |
-func (s *TagStore) CmdViz(job *engine.Job) engine.Status {
|
|
| 11 |
+func (s *TagStore) CmdViz(job *engine.Job) error {
|
|
| 11 | 12 |
images, _ := s.graph.Map() |
| 12 | 13 |
if images == nil {
|
| 13 |
- return engine.StatusOK |
|
| 14 |
+ return nil |
|
| 14 | 15 |
} |
| 15 | 16 |
job.Stdout.Write([]byte("digraph docker {\n"))
|
| 16 | 17 |
|
| ... | ... |
@@ -21,7 +22,7 @@ func (s *TagStore) CmdViz(job *engine.Job) engine.Status {
|
| 21 | 21 |
for _, image := range images {
|
| 22 | 22 |
parentImage, err = image.GetParent() |
| 23 | 23 |
if err != nil {
|
| 24 |
- return job.Errorf("Error while getting parent image: %v", err)
|
|
| 24 |
+ return fmt.Errorf("Error while getting parent image: %v", err)
|
|
| 25 | 25 |
} |
| 26 | 26 |
if parentImage != nil {
|
| 27 | 27 |
job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
|
| ... | ... |
@@ -34,5 +35,5 @@ func (s *TagStore) CmdViz(job *engine.Job) engine.Status {
|
| 34 | 34 |
job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
|
| 35 | 35 |
} |
| 36 | 36 |
job.Stdout.Write([]byte(" base [style=invisible]\n}\n"))
|
| 37 |
- return engine.StatusOK |
|
| 37 |
+ return nil |
|
| 38 | 38 |
} |
| ... | ... |
@@ -1,6 +1,8 @@ |
| 1 | 1 |
package registry |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "fmt" |
|
| 5 |
+ |
|
| 4 | 6 |
log "github.com/Sirupsen/logrus" |
| 5 | 7 |
"github.com/docker/docker/engine" |
| 6 | 8 |
) |
| ... | ... |
@@ -38,7 +40,7 @@ func (s *Service) Install(eng *engine.Engine) error {
|
| 38 | 38 |
// Auth contacts the public registry with the provided credentials, |
| 39 | 39 |
// and returns OK if authentication was sucessful. |
| 40 | 40 |
// It can be used to verify the validity of a client's credentials. |
| 41 |
-func (s *Service) Auth(job *engine.Job) engine.Status {
|
|
| 41 |
+func (s *Service) Auth(job *engine.Job) error {
|
|
| 42 | 42 |
var ( |
| 43 | 43 |
authConfig = new(AuthConfig) |
| 44 | 44 |
endpoint *Endpoint |
| ... | ... |
@@ -56,25 +58,25 @@ func (s *Service) Auth(job *engine.Job) engine.Status {
|
| 56 | 56 |
} |
| 57 | 57 |
|
| 58 | 58 |
if index, err = ResolveIndexInfo(job, addr); err != nil {
|
| 59 |
- return job.Error(err) |
|
| 59 |
+ return err |
|
| 60 | 60 |
} |
| 61 | 61 |
|
| 62 | 62 |
if endpoint, err = NewEndpoint(index); err != nil {
|
| 63 | 63 |
log.Errorf("unable to get new registry endpoint: %s", err)
|
| 64 |
- return job.Error(err) |
|
| 64 |
+ return err |
|
| 65 | 65 |
} |
| 66 | 66 |
|
| 67 | 67 |
authConfig.ServerAddress = endpoint.String() |
| 68 | 68 |
|
| 69 | 69 |
if status, err = Login(authConfig, endpoint, HTTPRequestFactory(nil)); err != nil {
|
| 70 | 70 |
log.Errorf("unable to login against registry endpoint %s: %s", endpoint, err)
|
| 71 |
- return job.Error(err) |
|
| 71 |
+ return err |
|
| 72 | 72 |
} |
| 73 | 73 |
|
| 74 | 74 |
log.Infof("successful registry login for endpoint %s: %s", endpoint, status)
|
| 75 | 75 |
job.Printf("%s\n", status)
|
| 76 | 76 |
|
| 77 |
- return engine.StatusOK |
|
| 77 |
+ return nil |
|
| 78 | 78 |
} |
| 79 | 79 |
|
| 80 | 80 |
// Search queries the public registry for images matching the specified |
| ... | ... |
@@ -93,9 +95,9 @@ func (s *Service) Auth(job *engine.Job) engine.Status {
|
| 93 | 93 |
// Results are sent as a collection of structured messages (using engine.Table). |
| 94 | 94 |
// Each result is sent as a separate message. |
| 95 | 95 |
// Results are ordered by number of stars on the public registry. |
| 96 |
-func (s *Service) Search(job *engine.Job) engine.Status {
|
|
| 96 |
+func (s *Service) Search(job *engine.Job) error {
|
|
| 97 | 97 |
if n := len(job.Args); n != 1 {
|
| 98 |
- return job.Errorf("Usage: %s TERM", job.Name)
|
|
| 98 |
+ return fmt.Errorf("Usage: %s TERM", job.Name)
|
|
| 99 | 99 |
} |
| 100 | 100 |
var ( |
| 101 | 101 |
term = job.Args[0] |
| ... | ... |
@@ -107,20 +109,20 @@ func (s *Service) Search(job *engine.Job) engine.Status {
|
| 107 | 107 |
|
| 108 | 108 |
repoInfo, err := ResolveRepositoryInfo(job, term) |
| 109 | 109 |
if err != nil {
|
| 110 |
- return job.Error(err) |
|
| 110 |
+ return err |
|
| 111 | 111 |
} |
| 112 | 112 |
// *TODO: Search multiple indexes. |
| 113 | 113 |
endpoint, err := repoInfo.GetEndpoint() |
| 114 | 114 |
if err != nil {
|
| 115 |
- return job.Error(err) |
|
| 115 |
+ return err |
|
| 116 | 116 |
} |
| 117 | 117 |
r, err := NewSession(authConfig, HTTPRequestFactory(metaHeaders), endpoint, true) |
| 118 | 118 |
if err != nil {
|
| 119 |
- return job.Error(err) |
|
| 119 |
+ return err |
|
| 120 | 120 |
} |
| 121 | 121 |
results, err := r.SearchRepositories(repoInfo.GetSearchTerm()) |
| 122 | 122 |
if err != nil {
|
| 123 |
- return job.Error(err) |
|
| 123 |
+ return err |
|
| 124 | 124 |
} |
| 125 | 125 |
outs := engine.NewTable("star_count", 0)
|
| 126 | 126 |
for _, result := range results.Results {
|
| ... | ... |
@@ -130,31 +132,31 @@ func (s *Service) Search(job *engine.Job) engine.Status {
|
| 130 | 130 |
} |
| 131 | 131 |
outs.ReverseSort() |
| 132 | 132 |
if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
| 133 |
- return job.Error(err) |
|
| 133 |
+ return err |
|
| 134 | 134 |
} |
| 135 |
- return engine.StatusOK |
|
| 135 |
+ return nil |
|
| 136 | 136 |
} |
| 137 | 137 |
|
| 138 | 138 |
// ResolveRepository splits a repository name into its components |
| 139 | 139 |
// and configuration of the associated registry. |
| 140 |
-func (s *Service) ResolveRepository(job *engine.Job) engine.Status {
|
|
| 140 |
+func (s *Service) ResolveRepository(job *engine.Job) error {
|
|
| 141 | 141 |
var ( |
| 142 | 142 |
reposName = job.Args[0] |
| 143 | 143 |
) |
| 144 | 144 |
|
| 145 | 145 |
repoInfo, err := s.Config.NewRepositoryInfo(reposName) |
| 146 | 146 |
if err != nil {
|
| 147 |
- return job.Error(err) |
|
| 147 |
+ return err |
|
| 148 | 148 |
} |
| 149 | 149 |
|
| 150 | 150 |
out := engine.Env{}
|
| 151 | 151 |
err = out.SetJson("repository", repoInfo)
|
| 152 | 152 |
if err != nil {
|
| 153 |
- return job.Error(err) |
|
| 153 |
+ return err |
|
| 154 | 154 |
} |
| 155 | 155 |
out.WriteTo(job.Stdout) |
| 156 | 156 |
|
| 157 |
- return engine.StatusOK |
|
| 157 |
+ return nil |
|
| 158 | 158 |
} |
| 159 | 159 |
|
| 160 | 160 |
// Convenience wrapper for calling resolve_repository Job from a running job. |
| ... | ... |
@@ -175,24 +177,24 @@ func ResolveRepositoryInfo(jobContext *engine.Job, reposName string) (*Repositor |
| 175 | 175 |
} |
| 176 | 176 |
|
| 177 | 177 |
// ResolveIndex takes indexName and returns index info |
| 178 |
-func (s *Service) ResolveIndex(job *engine.Job) engine.Status {
|
|
| 178 |
+func (s *Service) ResolveIndex(job *engine.Job) error {
|
|
| 179 | 179 |
var ( |
| 180 | 180 |
indexName = job.Args[0] |
| 181 | 181 |
) |
| 182 | 182 |
|
| 183 | 183 |
index, err := s.Config.NewIndexInfo(indexName) |
| 184 | 184 |
if err != nil {
|
| 185 |
- return job.Error(err) |
|
| 185 |
+ return err |
|
| 186 | 186 |
} |
| 187 | 187 |
|
| 188 | 188 |
out := engine.Env{}
|
| 189 | 189 |
err = out.SetJson("index", index)
|
| 190 | 190 |
if err != nil {
|
| 191 |
- return job.Error(err) |
|
| 191 |
+ return err |
|
| 192 | 192 |
} |
| 193 | 193 |
out.WriteTo(job.Stdout) |
| 194 | 194 |
|
| 195 |
- return engine.StatusOK |
|
| 195 |
+ return nil |
|
| 196 | 196 |
} |
| 197 | 197 |
|
| 198 | 198 |
// Convenience wrapper for calling resolve_index Job from a running job. |
| ... | ... |
@@ -213,13 +215,13 @@ func ResolveIndexInfo(jobContext *engine.Job, indexName string) (*IndexInfo, err |
| 213 | 213 |
} |
| 214 | 214 |
|
| 215 | 215 |
// GetRegistryConfig returns current registry configuration. |
| 216 |
-func (s *Service) GetRegistryConfig(job *engine.Job) engine.Status {
|
|
| 216 |
+func (s *Service) GetRegistryConfig(job *engine.Job) error {
|
|
| 217 | 217 |
out := engine.Env{}
|
| 218 | 218 |
err := out.SetJson("config", s.Config)
|
| 219 | 219 |
if err != nil {
|
| 220 |
- return job.Error(err) |
|
| 220 |
+ return err |
|
| 221 | 221 |
} |
| 222 | 222 |
out.WriteTo(job.Stdout) |
| 223 | 223 |
|
| 224 |
- return engine.StatusOK |
|
| 224 |
+ return nil |
|
| 225 | 225 |
} |
| ... | ... |
@@ -21,9 +21,9 @@ func (t *TrustStore) Install(eng *engine.Engine) error {
|
| 21 | 21 |
return nil |
| 22 | 22 |
} |
| 23 | 23 |
|
| 24 |
-func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status {
|
|
| 24 |
+func (t *TrustStore) CmdCheckKey(job *engine.Job) error {
|
|
| 25 | 25 |
if n := len(job.Args); n != 1 {
|
| 26 |
- return job.Errorf("Usage: %s NAMESPACE", job.Name)
|
|
| 26 |
+ return fmt.Errorf("Usage: %s NAMESPACE", job.Name)
|
|
| 27 | 27 |
} |
| 28 | 28 |
var ( |
| 29 | 29 |
namespace = job.Args[0] |
| ... | ... |
@@ -31,11 +31,11 @@ func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status {
|
| 31 | 31 |
) |
| 32 | 32 |
|
| 33 | 33 |
if keyBytes == "" {
|
| 34 |
- return job.Errorf("Missing PublicKey")
|
|
| 34 |
+ return fmt.Errorf("Missing PublicKey")
|
|
| 35 | 35 |
} |
| 36 | 36 |
pk, err := libtrust.UnmarshalPublicKeyJWK([]byte(keyBytes)) |
| 37 | 37 |
if err != nil {
|
| 38 |
- return job.Errorf("Error unmarshalling public key: %s", err)
|
|
| 38 |
+ return fmt.Errorf("Error unmarshalling public key: %s", err)
|
|
| 39 | 39 |
} |
| 40 | 40 |
|
| 41 | 41 |
permission := uint16(job.GetenvInt("Permission"))
|
| ... | ... |
@@ -47,13 +47,13 @@ func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status {
|
| 47 | 47 |
defer t.RUnlock() |
| 48 | 48 |
if t.graph == nil {
|
| 49 | 49 |
job.Stdout.Write([]byte("no graph"))
|
| 50 |
- return engine.StatusOK |
|
| 50 |
+ return nil |
|
| 51 | 51 |
} |
| 52 | 52 |
|
| 53 | 53 |
// Check if any expired grants |
| 54 | 54 |
verified, err := t.graph.Verify(pk, namespace, permission) |
| 55 | 55 |
if err != nil {
|
| 56 |
- return job.Errorf("Error verifying key to namespace: %s", namespace)
|
|
| 56 |
+ return fmt.Errorf("Error verifying key to namespace: %s", namespace)
|
|
| 57 | 57 |
} |
| 58 | 58 |
if !verified {
|
| 59 | 59 |
log.Debugf("Verification failed for %s using key %s", namespace, pk.KeyID())
|
| ... | ... |
@@ -64,11 +64,11 @@ func (t *TrustStore) CmdCheckKey(job *engine.Job) engine.Status {
|
| 64 | 64 |
job.Stdout.Write([]byte("verified"))
|
| 65 | 65 |
} |
| 66 | 66 |
|
| 67 |
- return engine.StatusOK |
|
| 67 |
+ return nil |
|
| 68 | 68 |
} |
| 69 | 69 |
|
| 70 |
-func (t *TrustStore) CmdUpdateBase(job *engine.Job) engine.Status {
|
|
| 70 |
+func (t *TrustStore) CmdUpdateBase(job *engine.Job) error {
|
|
| 71 | 71 |
t.fetch() |
| 72 | 72 |
|
| 73 |
- return engine.StatusOK |
|
| 73 |
+ return nil |
|
| 74 | 74 |
} |