Closes #9311 Handles container id/name collisions against daemon functionalities according to #8069
Michael Crosby authored on 2015/02/07 07:01:28... | ... |
@@ -863,6 +863,12 @@ func (cli *DockerCli) CmdInspect(args ...string) error { |
863 | 863 |
for _, name := range cmd.Args() { |
864 | 864 |
obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) |
865 | 865 |
if err != nil { |
866 |
+ if strings.Contains(err.Error(), "Too many") { |
|
867 |
+ fmt.Fprintf(cli.err, "Error: %s", err.Error()) |
|
868 |
+ status = 1 |
|
869 |
+ continue |
|
870 |
+ } |
|
871 |
+ |
|
866 | 872 |
obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) |
867 | 873 |
if err != nil { |
868 | 874 |
if strings.Contains(err.Error(), "No such") { |
... | ... |
@@ -1114,7 +1114,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp |
1114 | 1114 |
w.Header().Set("Content-Type", "application/x-tar") |
1115 | 1115 |
if err := job.Run(); err != nil { |
1116 | 1116 |
log.Errorf("%s", err.Error()) |
1117 |
- if strings.Contains(strings.ToLower(err.Error()), "no such container") { |
|
1117 |
+ if strings.Contains(strings.ToLower(err.Error()), "no such id") { |
|
1118 | 1118 |
w.WriteHeader(http.StatusNotFound) |
1119 | 1119 |
} else if strings.Contains(err.Error(), "no such file or directory") { |
1120 | 1120 |
return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) |
... | ... |
@@ -87,9 +87,9 @@ func (b *Builder) commit(id string, autoCmd []string, comment string) error { |
87 | 87 |
} |
88 | 88 |
defer container.Unmount() |
89 | 89 |
} |
90 |
- container := b.Daemon.Get(id) |
|
91 |
- if container == nil { |
|
92 |
- return fmt.Errorf("An error occured while creating the container") |
|
90 |
+ container, err := b.Daemon.Get(id) |
|
91 |
+ if err != nil { |
|
92 |
+ return err |
|
93 | 93 |
} |
94 | 94 |
|
95 | 95 |
// Note: Actually copy the struct |
... | ... |
@@ -710,7 +710,11 @@ func fixPermissions(source, destination string, uid, gid int, destExisted bool) |
710 | 710 |
|
711 | 711 |
func (b *Builder) clearTmp() { |
712 | 712 |
for c := range b.TmpContainers { |
713 |
- tmp := b.Daemon.Get(c) |
|
713 |
+ tmp, err := b.Daemon.Get(c) |
|
714 |
+ if err != nil { |
|
715 |
+ fmt.Fprint(b.OutStream, err.Error()) |
|
716 |
+ } |
|
717 |
+ |
|
714 | 718 |
if err := b.Daemon.Destroy(tmp); err != nil { |
715 | 719 |
fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) |
716 | 720 |
return |
... | ... |
@@ -28,9 +28,9 @@ func (daemon *Daemon) ContainerAttach(job *engine.Job) engine.Status { |
28 | 28 |
stderr = job.GetenvBool("stderr") |
29 | 29 |
) |
30 | 30 |
|
31 |
- container := daemon.Get(name) |
|
32 |
- if container == nil { |
|
33 |
- return job.Errorf("No such container: %s", name) |
|
31 |
+ container, err := daemon.Get(name) |
|
32 |
+ if err != nil { |
|
33 |
+ return job.Error(err) |
|
34 | 34 |
} |
35 | 35 |
|
36 | 36 |
//logs |
... | ... |
@@ -9,24 +9,29 @@ func (daemon *Daemon) ContainerChanges(job *engine.Job) engine.Status { |
9 | 9 |
return job.Errorf("Usage: %s CONTAINER", job.Name) |
10 | 10 |
} |
11 | 11 |
name := job.Args[0] |
12 |
- if container := daemon.Get(name); container != nil { |
|
13 |
- outs := engine.NewTable("", 0) |
|
14 |
- changes, err := container.Changes() |
|
15 |
- if err != nil { |
|
16 |
- return job.Error(err) |
|
17 |
- } |
|
18 |
- for _, change := range changes { |
|
19 |
- out := &engine.Env{} |
|
20 |
- if err := out.Import(change); err != nil { |
|
21 |
- return job.Error(err) |
|
22 |
- } |
|
23 |
- outs.Add(out) |
|
24 |
- } |
|
25 |
- if _, err := outs.WriteListTo(job.Stdout); err != nil { |
|
12 |
+ |
|
13 |
+ container, error := daemon.Get(name) |
|
14 |
+ if error != nil { |
|
15 |
+ return job.Error(error) |
|
16 |
+ } |
|
17 |
+ |
|
18 |
+ outs := engine.NewTable("", 0) |
|
19 |
+ changes, err := container.Changes() |
|
20 |
+ if err != nil { |
|
21 |
+ return job.Error(err) |
|
22 |
+ } |
|
23 |
+ |
|
24 |
+ for _, change := range changes { |
|
25 |
+ out := &engine.Env{} |
|
26 |
+ if err := out.Import(change); err != nil { |
|
26 | 27 |
return job.Error(err) |
27 | 28 |
} |
28 |
- } else { |
|
29 |
- return job.Errorf("No such container: %s", name) |
|
29 |
+ outs.Add(out) |
|
30 | 30 |
} |
31 |
+ |
|
32 |
+ if _, err := outs.WriteListTo(job.Stdout); err != nil { |
|
33 |
+ return job.Error(err) |
|
34 |
+ } |
|
35 |
+ |
|
31 | 36 |
return engine.StatusOK |
32 | 37 |
} |
... | ... |
@@ -12,9 +12,9 @@ func (daemon *Daemon) ContainerCommit(job *engine.Job) engine.Status { |
12 | 12 |
} |
13 | 13 |
name := job.Args[0] |
14 | 14 |
|
15 |
- container := daemon.Get(name) |
|
16 |
- if container == nil { |
|
17 |
- return job.Errorf("No such container: %s", name) |
|
15 |
+ container, err := daemon.Get(name) |
|
16 |
+ if err != nil { |
|
17 |
+ return job.Error(err) |
|
18 | 18 |
} |
19 | 19 |
|
20 | 20 |
var ( |
... | ... |
@@ -1126,7 +1126,12 @@ func (container *Container) updateParentsHosts() error { |
1126 | 1126 |
if ref.ParentID == "0" { |
1127 | 1127 |
continue |
1128 | 1128 |
} |
1129 |
- c := container.daemon.Get(ref.ParentID) |
|
1129 |
+ |
|
1130 |
+ c, err := container.daemon.Get(ref.ParentID) |
|
1131 |
+ if err != nil { |
|
1132 |
+ log.Error(err) |
|
1133 |
+ } |
|
1134 |
+ |
|
1130 | 1135 |
if c != nil && !container.daemon.config.DisableNetwork && container.hostConfig.NetworkMode.IsPrivate() { |
1131 | 1136 |
log.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress) |
1132 | 1137 |
if err := etchosts.Update(c.HostsPath, container.NetworkSettings.IPAddress, ref.Name); err != nil { |
... | ... |
@@ -1395,9 +1400,9 @@ func (container *Container) GetMountLabel() string { |
1395 | 1395 |
|
1396 | 1396 |
func (container *Container) getIpcContainer() (*Container, error) { |
1397 | 1397 |
containerID := container.hostConfig.IpcMode.Container() |
1398 |
- c := container.daemon.Get(containerID) |
|
1399 |
- if c == nil { |
|
1400 |
- return nil, fmt.Errorf("no such container to join IPC: %s", containerID) |
|
1398 |
+ c, err := container.daemon.Get(containerID) |
|
1399 |
+ if err != nil { |
|
1400 |
+ return nil, err |
|
1401 | 1401 |
} |
1402 | 1402 |
if !c.IsRunning() { |
1403 | 1403 |
return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) |
... | ... |
@@ -1412,9 +1417,9 @@ func (container *Container) getNetworkedContainer() (*Container, error) { |
1412 | 1412 |
if len(parts) != 2 { |
1413 | 1413 |
return nil, fmt.Errorf("no container specified to join network") |
1414 | 1414 |
} |
1415 |
- nc := container.daemon.Get(parts[1]) |
|
1416 |
- if nc == nil { |
|
1417 |
- return nil, fmt.Errorf("no such container to join network: %s", parts[1]) |
|
1415 |
+ nc, err := container.daemon.Get(parts[1]) |
|
1416 |
+ if err != nil { |
|
1417 |
+ return nil, err |
|
1418 | 1418 |
} |
1419 | 1419 |
if !nc.IsRunning() { |
1420 | 1420 |
return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1]) |
... | ... |
@@ -16,18 +16,19 @@ func (daemon *Daemon) ContainerCopy(job *engine.Job) engine.Status { |
16 | 16 |
resource = job.Args[1] |
17 | 17 |
) |
18 | 18 |
|
19 |
- if container := daemon.Get(name); container != nil { |
|
19 |
+ container, err := daemon.Get(name) |
|
20 |
+ if err != nil { |
|
21 |
+ return job.Error(err) |
|
22 |
+ } |
|
20 | 23 |
|
21 |
- data, err := container.Copy(resource) |
|
22 |
- if err != nil { |
|
23 |
- return job.Error(err) |
|
24 |
- } |
|
25 |
- defer data.Close() |
|
24 |
+ data, err := container.Copy(resource) |
|
25 |
+ if err != nil { |
|
26 |
+ return job.Error(err) |
|
27 |
+ } |
|
28 |
+ defer data.Close() |
|
26 | 29 |
|
27 |
- if _, err := io.Copy(job.Stdout, data); err != nil { |
|
28 |
- return job.Error(err) |
|
29 |
- } |
|
30 |
- return engine.StatusOK |
|
30 |
+ if _, err := io.Copy(job.Stdout, data); err != nil { |
|
31 |
+ return job.Error(err) |
|
31 | 32 |
} |
32 |
- return job.Errorf("No such container: %s", name) |
|
33 |
+ return engine.StatusOK |
|
33 | 34 |
} |
... | ... |
@@ -132,9 +132,9 @@ func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode run |
132 | 132 |
return label.DisableSecOpt(), nil |
133 | 133 |
} |
134 | 134 |
if ipcContainer := ipcMode.Container(); ipcContainer != "" { |
135 |
- c := daemon.Get(ipcContainer) |
|
136 |
- if c == nil { |
|
137 |
- return nil, fmt.Errorf("no such container to join IPC: %s", ipcContainer) |
|
135 |
+ c, err := daemon.Get(ipcContainer) |
|
136 |
+ if err != nil { |
|
137 |
+ return nil, err |
|
138 | 138 |
} |
139 | 139 |
if !c.IsRunning() { |
140 | 140 |
return nil, fmt.Errorf("cannot join IPC of a non running container: %s", ipcContainer) |
... | ... |
@@ -155,28 +155,39 @@ func (daemon *Daemon) Install(eng *engine.Engine) error { |
155 | 155 |
return nil |
156 | 156 |
} |
157 | 157 |
|
158 |
-// Get looks for a container by the specified ID or name, and returns it. |
|
159 |
-// If the container is not found, or if an error occurs, nil is returned. |
|
160 |
-func (daemon *Daemon) Get(name string) *Container { |
|
161 |
- id, err := daemon.idIndex.Get(name) |
|
162 |
- if err == nil { |
|
163 |
- return daemon.containers.Get(id) |
|
158 |
+// Get looks for a container with the provided prefix |
|
159 |
+func (daemon *Daemon) Get(prefix string) (*Container, error) { |
|
160 |
+ if containerByID := daemon.containers.Get(prefix); containerByID != nil { |
|
161 |
+ |
|
162 |
+ // prefix is an exact match to a full container ID |
|
163 |
+ return containerByID, nil |
|
164 | 164 |
} |
165 | 165 |
|
166 |
- if c, _ := daemon.GetByName(name); c != nil { |
|
167 |
- return c |
|
166 |
+ // Either GetByName finds an entity matching prefix exactly, or it doesn't. |
|
167 |
+ // Check value of containerByName and ignore any errors |
|
168 |
+ containerByName, _ := daemon.GetByName(prefix) |
|
169 |
+ containerId, indexError := daemon.idIndex.Get(prefix) |
|
170 |
+ |
|
171 |
+ if containerByName != nil { |
|
172 |
+ |
|
173 |
+ // prefix is an exact match to a full container Name |
|
174 |
+ return containerByName, nil |
|
168 | 175 |
} |
169 | 176 |
|
170 |
- if err == truncindex.ErrDuplicateID { |
|
171 |
- log.Errorf("Short ID %s is ambiguous: please retry with more characters or use the full ID.\n", name) |
|
177 |
+ if containerId != "" { |
|
178 |
+ |
|
179 |
+ // prefix is a fuzzy match to a container ID |
|
180 |
+ return daemon.containers.Get(containerId), nil |
|
172 | 181 |
} |
173 |
- return nil |
|
182 |
+ |
|
183 |
+ return nil, indexError |
|
174 | 184 |
} |
175 | 185 |
|
176 | 186 |
// Exists returns a true if a container of the specified ID or name exists, |
177 | 187 |
// false otherwise. |
178 | 188 |
func (daemon *Daemon) Exists(id string) bool { |
179 |
- return daemon.Get(id) != nil |
|
189 |
+ c, _ := daemon.Get(id) |
|
190 |
+ return c != nil |
|
180 | 191 |
} |
181 | 192 |
|
182 | 193 |
func (daemon *Daemon) containerRoot(id string) string { |
... | ... |
@@ -715,9 +726,9 @@ func (daemon *Daemon) Children(name string) (map[string]*Container, error) { |
715 | 715 |
children := make(map[string]*Container) |
716 | 716 |
|
717 | 717 |
err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { |
718 |
- c := daemon.Get(e.ID()) |
|
719 |
- if c == nil { |
|
720 |
- return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p) |
|
718 |
+ c, err := daemon.Get(e.ID()) |
|
719 |
+ if err != nil { |
|
720 |
+ return err |
|
721 | 721 |
} |
722 | 722 |
children[p] = c |
723 | 723 |
return nil |
... | ... |
@@ -754,7 +765,10 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig. |
754 | 754 |
if err != nil { |
755 | 755 |
return err |
756 | 756 |
} |
757 |
- child := daemon.Get(parts["name"]) |
|
757 |
+ child, err := daemon.Get(parts["name"]) |
|
758 |
+ if err != nil { |
|
759 |
+ return err |
|
760 |
+ } |
|
758 | 761 |
if child == nil { |
759 | 762 |
return fmt.Errorf("Could not get container for %s", parts["name"]) |
760 | 763 |
} |
... | ... |
@@ -1100,18 +1114,18 @@ func (daemon *Daemon) Stats(c *Container) (*execdriver.ResourceStats, error) { |
1100 | 1100 |
} |
1101 | 1101 |
|
1102 | 1102 |
func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{}, error) { |
1103 |
- c := daemon.Get(name) |
|
1104 |
- if c == nil { |
|
1105 |
- return nil, fmt.Errorf("no such container") |
|
1103 |
+ c, err := daemon.Get(name) |
|
1104 |
+ if err != nil { |
|
1105 |
+ return nil, err |
|
1106 | 1106 |
} |
1107 | 1107 |
ch := daemon.statsCollector.collect(c) |
1108 | 1108 |
return ch, nil |
1109 | 1109 |
} |
1110 | 1110 |
|
1111 | 1111 |
func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface{}) error { |
1112 |
- c := daemon.Get(name) |
|
1113 |
- if c == nil { |
|
1114 |
- return fmt.Errorf("no such container") |
|
1112 |
+ c, err := daemon.Get(name) |
|
1113 |
+ if err != nil { |
|
1114 |
+ return err |
|
1115 | 1115 |
} |
1116 | 1116 |
daemon.statsCollector.unsubscribe(c, ch) |
1117 | 1117 |
return nil |
1118 | 1118 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,101 @@ |
0 |
+package daemon |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "github.com/docker/docker/pkg/graphdb" |
|
4 |
+ "github.com/docker/docker/pkg/truncindex" |
|
5 |
+ "os" |
|
6 |
+ "path" |
|
7 |
+ "testing" |
|
8 |
+) |
|
9 |
+ |
|
10 |
+// |
|
11 |
+// https://github.com/docker/docker/issues/8069 |
|
12 |
+// |
|
13 |
+ |
|
14 |
+func TestGet(t *testing.T) { |
|
15 |
+ c1 := &Container{ |
|
16 |
+ ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", |
|
17 |
+ Name: "tender_bardeen", |
|
18 |
+ } |
|
19 |
+ c2 := &Container{ |
|
20 |
+ ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", |
|
21 |
+ Name: "drunk_hawking", |
|
22 |
+ } |
|
23 |
+ c3 := &Container{ |
|
24 |
+ ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", |
|
25 |
+ Name: "3cdbd1aa", |
|
26 |
+ } |
|
27 |
+ c4 := &Container{ |
|
28 |
+ ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", |
|
29 |
+ Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", |
|
30 |
+ } |
|
31 |
+ c5 := &Container{ |
|
32 |
+ ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", |
|
33 |
+ Name: "d22d69a2b896", |
|
34 |
+ } |
|
35 |
+ |
|
36 |
+ store := &contStore{ |
|
37 |
+ s: map[string]*Container{ |
|
38 |
+ c1.ID: c1, |
|
39 |
+ c2.ID: c2, |
|
40 |
+ c3.ID: c3, |
|
41 |
+ c4.ID: c4, |
|
42 |
+ c5.ID: c5, |
|
43 |
+ }, |
|
44 |
+ } |
|
45 |
+ |
|
46 |
+ index := truncindex.NewTruncIndex([]string{}) |
|
47 |
+ index.Add(c1.ID) |
|
48 |
+ index.Add(c2.ID) |
|
49 |
+ index.Add(c3.ID) |
|
50 |
+ index.Add(c4.ID) |
|
51 |
+ index.Add(c5.ID) |
|
52 |
+ |
|
53 |
+ daemonTestDbPath := path.Join(os.TempDir(), "daemon_test.db") |
|
54 |
+ graph, err := graphdb.NewSqliteConn(daemonTestDbPath) |
|
55 |
+ if err != nil { |
|
56 |
+ t.Fatalf("Failed to create daemon test sqlite database at %s", daemonTestDbPath) |
|
57 |
+ } |
|
58 |
+ graph.Set(c1.Name, c1.ID) |
|
59 |
+ graph.Set(c2.Name, c2.ID) |
|
60 |
+ graph.Set(c3.Name, c3.ID) |
|
61 |
+ graph.Set(c4.Name, c4.ID) |
|
62 |
+ graph.Set(c5.Name, c5.ID) |
|
63 |
+ |
|
64 |
+ daemon := &Daemon{ |
|
65 |
+ containers: store, |
|
66 |
+ idIndex: index, |
|
67 |
+ containerGraph: graph, |
|
68 |
+ } |
|
69 |
+ |
|
70 |
+ if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { |
|
71 |
+ t.Fatal("Should explicitly match full container IDs") |
|
72 |
+ } |
|
73 |
+ |
|
74 |
+ if container, _ := daemon.Get("75fb0b8009"); container != c4 { |
|
75 |
+ t.Fatal("Should match a partial ID") |
|
76 |
+ } |
|
77 |
+ |
|
78 |
+ if container, _ := daemon.Get("drunk_hawking"); container != c2 { |
|
79 |
+ t.Fatal("Should match a full name") |
|
80 |
+ } |
|
81 |
+ |
|
82 |
+ // c3.Name is a partial match for both c3.ID and c2.ID |
|
83 |
+ if c, _ := daemon.Get("3cdbd1aa"); c != c3 { |
|
84 |
+ t.Fatal("Should match a full name even though it collides with another container's ID") |
|
85 |
+ } |
|
86 |
+ |
|
87 |
+ if container, _ := daemon.Get("d22d69a2b896"); container != c5 { |
|
88 |
+ t.Fatal("Should match a container where the provided prefix is an exact match to the it's name, and is also a prefix for it's ID") |
|
89 |
+ } |
|
90 |
+ |
|
91 |
+ if _, err := daemon.Get("3cdbd1"); err == nil { |
|
92 |
+ t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") |
|
93 |
+ } |
|
94 |
+ |
|
95 |
+ if _, err := daemon.Get("nothing"); err == nil { |
|
96 |
+ t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") |
|
97 |
+ } |
|
98 |
+ |
|
99 |
+ os.Remove(daemonTestDbPath) |
|
100 |
+} |
... | ... |
@@ -17,10 +17,10 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { |
17 | 17 |
removeVolume := job.GetenvBool("removeVolume") |
18 | 18 |
removeLink := job.GetenvBool("removeLink") |
19 | 19 |
forceRemove := job.GetenvBool("forceRemove") |
20 |
- container := daemon.Get(name) |
|
21 | 20 |
|
22 |
- if container == nil { |
|
23 |
- return job.Errorf("No such container: %s", name) |
|
21 |
+ container, err := daemon.Get(name) |
|
22 |
+ if err != nil { |
|
23 |
+ return job.Error(err) |
|
24 | 24 |
} |
25 | 25 |
|
26 | 26 |
if removeLink { |
... | ... |
@@ -36,7 +36,7 @@ func (daemon *Daemon) ContainerRm(job *engine.Job) engine.Status { |
36 | 36 |
if pe == nil { |
37 | 37 |
return job.Errorf("Cannot get parent %s for name %s", parent, name) |
38 | 38 |
} |
39 |
- parentContainer := daemon.Get(pe.ID()) |
|
39 |
+ parentContainer, _ := daemon.Get(pe.ID()) |
|
40 | 40 |
|
41 | 41 |
if parentContainer != nil { |
42 | 42 |
parentContainer.DisableLink(n) |
... | ... |
@@ -97,10 +97,9 @@ func (d *Daemon) unregisterExecCommand(execConfig *execConfig) { |
97 | 97 |
} |
98 | 98 |
|
99 | 99 |
func (d *Daemon) getActiveContainer(name string) (*Container, error) { |
100 |
- container := d.Get(name) |
|
101 |
- |
|
102 |
- if container == nil { |
|
103 |
- return nil, fmt.Errorf("No such container: %s", name) |
|
100 |
+ container, err := d.Get(name) |
|
101 |
+ if err != nil { |
|
102 |
+ return nil, err |
|
104 | 103 |
} |
105 | 104 |
|
106 | 105 |
if !container.IsRunning() { |
... | ... |
@@ -11,20 +11,23 @@ func (daemon *Daemon) ContainerExport(job *engine.Job) engine.Status { |
11 | 11 |
return job.Errorf("Usage: %s container_id", job.Name) |
12 | 12 |
} |
13 | 13 |
name := job.Args[0] |
14 |
- if container := daemon.Get(name); container != nil { |
|
15 |
- data, err := container.Export() |
|
16 |
- if err != nil { |
|
17 |
- return job.Errorf("%s: %s", name, err) |
|
18 |
- } |
|
19 |
- defer data.Close() |
|
20 | 14 |
|
21 |
- // Stream the entire contents of the container (basically a volatile snapshot) |
|
22 |
- if _, err := io.Copy(job.Stdout, data); err != nil { |
|
23 |
- return job.Errorf("%s: %s", name, err) |
|
24 |
- } |
|
25 |
- // FIXME: factor job-specific LogEvent to engine.Job.Run() |
|
26 |
- container.LogEvent("export") |
|
27 |
- return engine.StatusOK |
|
15 |
+ container, err := daemon.Get(name) |
|
16 |
+ if err != nil { |
|
17 |
+ return job.Error(err) |
|
28 | 18 |
} |
29 |
- return job.Errorf("No such container: %s", name) |
|
19 |
+ |
|
20 |
+ data, err := container.Export() |
|
21 |
+ if err != nil { |
|
22 |
+ return job.Errorf("%s: %s", name, err) |
|
23 |
+ } |
|
24 |
+ defer data.Close() |
|
25 |
+ |
|
26 |
+ // Stream the entire contents of the container (basically a volatile snapshot) |
|
27 |
+ if _, err := io.Copy(job.Stdout, data); err != nil { |
|
28 |
+ return job.Errorf("%s: %s", name, err) |
|
29 |
+ } |
|
30 |
+ // FIXME: factor job-specific LogEvent to engine.Job.Run() |
|
31 |
+ container.LogEvent("export") |
|
32 |
+ return engine.StatusOK |
|
30 | 33 |
} |
... | ... |
@@ -13,60 +13,62 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status { |
13 | 13 |
return job.Errorf("usage: %s NAME", job.Name) |
14 | 14 |
} |
15 | 15 |
name := job.Args[0] |
16 |
- if container := daemon.Get(name); container != nil { |
|
17 |
- container.Lock() |
|
18 |
- defer container.Unlock() |
|
19 |
- if job.GetenvBool("raw") { |
|
20 |
- b, err := json.Marshal(&struct { |
|
21 |
- *Container |
|
22 |
- HostConfig *runconfig.HostConfig |
|
23 |
- }{container, container.hostConfig}) |
|
24 |
- if err != nil { |
|
25 |
- return job.Error(err) |
|
26 |
- } |
|
27 |
- job.Stdout.Write(b) |
|
28 |
- return engine.StatusOK |
|
16 |
+ container, err := daemon.Get(name) |
|
17 |
+ if err != nil { |
|
18 |
+ return job.Error(err) |
|
19 |
+ } |
|
20 |
+ |
|
21 |
+ container.Lock() |
|
22 |
+ defer container.Unlock() |
|
23 |
+ if job.GetenvBool("raw") { |
|
24 |
+ b, err := json.Marshal(&struct { |
|
25 |
+ *Container |
|
26 |
+ HostConfig *runconfig.HostConfig |
|
27 |
+ }{container, container.hostConfig}) |
|
28 |
+ if err != nil { |
|
29 |
+ return job.Error(err) |
|
29 | 30 |
} |
31 |
+ job.Stdout.Write(b) |
|
32 |
+ return engine.StatusOK |
|
33 |
+ } |
|
30 | 34 |
|
31 |
- out := &engine.Env{} |
|
32 |
- out.SetJson("Id", container.ID) |
|
33 |
- out.SetAuto("Created", container.Created) |
|
34 |
- out.SetJson("Path", container.Path) |
|
35 |
- out.SetList("Args", container.Args) |
|
36 |
- out.SetJson("Config", container.Config) |
|
37 |
- out.SetJson("State", container.State) |
|
38 |
- out.Set("Image", container.ImageID) |
|
39 |
- out.SetJson("NetworkSettings", container.NetworkSettings) |
|
40 |
- out.Set("ResolvConfPath", container.ResolvConfPath) |
|
41 |
- out.Set("HostnamePath", container.HostnamePath) |
|
42 |
- out.Set("HostsPath", container.HostsPath) |
|
43 |
- out.SetJson("Name", container.Name) |
|
44 |
- out.SetInt("RestartCount", container.RestartCount) |
|
45 |
- out.Set("Driver", container.Driver) |
|
46 |
- out.Set("ExecDriver", container.ExecDriver) |
|
47 |
- out.Set("MountLabel", container.MountLabel) |
|
48 |
- out.Set("ProcessLabel", container.ProcessLabel) |
|
49 |
- out.SetJson("Volumes", container.Volumes) |
|
50 |
- out.SetJson("VolumesRW", container.VolumesRW) |
|
51 |
- out.SetJson("AppArmorProfile", container.AppArmorProfile) |
|
35 |
+ out := &engine.Env{} |
|
36 |
+ out.SetJson("Id", container.ID) |
|
37 |
+ out.SetAuto("Created", container.Created) |
|
38 |
+ out.SetJson("Path", container.Path) |
|
39 |
+ out.SetList("Args", container.Args) |
|
40 |
+ out.SetJson("Config", container.Config) |
|
41 |
+ out.SetJson("State", container.State) |
|
42 |
+ out.Set("Image", container.ImageID) |
|
43 |
+ out.SetJson("NetworkSettings", container.NetworkSettings) |
|
44 |
+ out.Set("ResolvConfPath", container.ResolvConfPath) |
|
45 |
+ out.Set("HostnamePath", container.HostnamePath) |
|
46 |
+ out.Set("HostsPath", container.HostsPath) |
|
47 |
+ out.SetJson("Name", container.Name) |
|
48 |
+ out.SetInt("RestartCount", container.RestartCount) |
|
49 |
+ out.Set("Driver", container.Driver) |
|
50 |
+ out.Set("ExecDriver", container.ExecDriver) |
|
51 |
+ out.Set("MountLabel", container.MountLabel) |
|
52 |
+ out.Set("ProcessLabel", container.ProcessLabel) |
|
53 |
+ out.SetJson("Volumes", container.Volumes) |
|
54 |
+ out.SetJson("VolumesRW", container.VolumesRW) |
|
55 |
+ out.SetJson("AppArmorProfile", container.AppArmorProfile) |
|
52 | 56 |
|
53 |
- out.SetList("ExecIDs", container.GetExecIDs()) |
|
57 |
+ out.SetList("ExecIDs", container.GetExecIDs()) |
|
54 | 58 |
|
55 |
- if children, err := daemon.Children(container.Name); err == nil { |
|
56 |
- for linkAlias, child := range children { |
|
57 |
- container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) |
|
58 |
- } |
|
59 |
+ if children, err := daemon.Children(container.Name); err == nil { |
|
60 |
+ for linkAlias, child := range children { |
|
61 |
+ container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) |
|
59 | 62 |
} |
63 |
+ } |
|
60 | 64 |
|
61 |
- out.SetJson("HostConfig", container.hostConfig) |
|
65 |
+ out.SetJson("HostConfig", container.hostConfig) |
|
62 | 66 |
|
63 |
- container.hostConfig.Links = nil |
|
64 |
- if _, err := out.WriteTo(job.Stdout); err != nil { |
|
65 |
- return job.Error(err) |
|
66 |
- } |
|
67 |
- return engine.StatusOK |
|
67 |
+ container.hostConfig.Links = nil |
|
68 |
+ if _, err := out.WriteTo(job.Stdout); err != nil { |
|
69 |
+ return job.Error(err) |
|
68 | 70 |
} |
69 |
- return job.Errorf("No such container: %s", name) |
|
71 |
+ return engine.StatusOK |
|
70 | 72 |
} |
71 | 73 |
|
72 | 74 |
func (daemon *Daemon) ContainerExecInspect(job *engine.Job) engine.Status { |
... | ... |
@@ -38,22 +38,23 @@ func (daemon *Daemon) ContainerKill(job *engine.Job) engine.Status { |
38 | 38 |
} |
39 | 39 |
} |
40 | 40 |
|
41 |
- if container := daemon.Get(name); container != nil { |
|
42 |
- // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) |
|
43 |
- if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { |
|
44 |
- if err := container.Kill(); err != nil { |
|
45 |
- return job.Errorf("Cannot kill container %s: %s", name, err) |
|
46 |
- } |
|
47 |
- container.LogEvent("kill") |
|
48 |
- } else { |
|
49 |
- // Otherwise, just send the requested signal |
|
50 |
- if err := container.KillSig(int(sig)); err != nil { |
|
51 |
- return job.Errorf("Cannot kill container %s: %s", name, err) |
|
52 |
- } |
|
53 |
- // FIXME: Add event for signals |
|
41 |
+ container, err := daemon.Get(name) |
|
42 |
+ if err != nil { |
|
43 |
+ return job.Error(err) |
|
44 |
+ } |
|
45 |
+ |
|
46 |
+ // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) |
|
47 |
+ if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { |
|
48 |
+ if err := container.Kill(); err != nil { |
|
49 |
+ return job.Errorf("Cannot kill container %s: %s", name, err) |
|
54 | 50 |
} |
51 |
+ container.LogEvent("kill") |
|
55 | 52 |
} else { |
56 |
- return job.Errorf("No such container: %s", name) |
|
53 |
+ // Otherwise, just send the requested signal |
|
54 |
+ if err := container.KillSig(int(sig)); err != nil { |
|
55 |
+ return job.Errorf("Cannot kill container %s: %s", name, err) |
|
56 |
+ } |
|
57 |
+ // FIXME: Add event for signals |
|
57 | 58 |
} |
58 | 59 |
return engine.StatusOK |
59 | 60 |
} |
... | ... |
@@ -63,16 +63,16 @@ func (daemon *Daemon) Containers(job *engine.Job) engine.Status { |
63 | 63 |
|
64 | 64 |
var beforeCont, sinceCont *Container |
65 | 65 |
if before != "" { |
66 |
- beforeCont = daemon.Get(before) |
|
67 |
- if beforeCont == nil { |
|
68 |
- return job.Error(fmt.Errorf("Could not find container with name or id %s", before)) |
|
66 |
+ beforeCont, err = daemon.Get(before) |
|
67 |
+ if err != nil { |
|
68 |
+ return job.Error(err) |
|
69 | 69 |
} |
70 | 70 |
} |
71 | 71 |
|
72 | 72 |
if since != "" { |
73 |
- sinceCont = daemon.Get(since) |
|
74 |
- if sinceCont == nil { |
|
75 |
- return job.Error(fmt.Errorf("Could not find container with name or id %s", since)) |
|
73 |
+ sinceCont, err = daemon.Get(since) |
|
74 |
+ if err != nil { |
|
75 |
+ return job.Error(err) |
|
76 | 76 |
} |
77 | 77 |
} |
78 | 78 |
|
... | ... |
@@ -40,9 +40,9 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) engine.Status { |
40 | 40 |
if tail == "" { |
41 | 41 |
tail = "all" |
42 | 42 |
} |
43 |
- container := daemon.Get(name) |
|
44 |
- if container == nil { |
|
45 |
- return job.Errorf("No such container: %s", name) |
|
43 |
+ container, err := daemon.Get(name) |
|
44 |
+ if err != nil { |
|
45 |
+ return job.Error(err) |
|
46 | 46 |
} |
47 | 47 |
cLog, err := container.ReadLog("json") |
48 | 48 |
if err != nil && os.IsNotExist(err) { |
... | ... |
@@ -9,9 +9,9 @@ func (daemon *Daemon) ContainerPause(job *engine.Job) engine.Status { |
9 | 9 |
return job.Errorf("Usage: %s CONTAINER", job.Name) |
10 | 10 |
} |
11 | 11 |
name := job.Args[0] |
12 |
- container := daemon.Get(name) |
|
13 |
- if container == nil { |
|
14 |
- return job.Errorf("No such container: %s", name) |
|
12 |
+ container, err := daemon.Get(name) |
|
13 |
+ if err != nil { |
|
14 |
+ return job.Error(err) |
|
15 | 15 |
} |
16 | 16 |
if err := container.Pause(); err != nil { |
17 | 17 |
return job.Errorf("Cannot pause container %s: %s", name, err) |
... | ... |
@@ -25,9 +25,9 @@ func (daemon *Daemon) ContainerUnpause(job *engine.Job) engine.Status { |
25 | 25 |
return job.Errorf("Usage: %s CONTAINER", job.Name) |
26 | 26 |
} |
27 | 27 |
name := job.Args[0] |
28 |
- container := daemon.Get(name) |
|
29 |
- if container == nil { |
|
30 |
- return job.Errorf("No such container: %s", name) |
|
28 |
+ container, err := daemon.Get(name) |
|
29 |
+ if err != nil { |
|
30 |
+ return job.Error(err) |
|
31 | 31 |
} |
32 | 32 |
if err := container.Unpause(); err != nil { |
33 | 33 |
return job.Errorf("Cannot unpause container %s: %s", name, err) |
... | ... |
@@ -11,9 +11,9 @@ func (daemon *Daemon) ContainerRename(job *engine.Job) engine.Status { |
11 | 11 |
oldName := job.Args[0] |
12 | 12 |
newName := job.Args[1] |
13 | 13 |
|
14 |
- container := daemon.Get(oldName) |
|
15 |
- if container == nil { |
|
16 |
- return job.Errorf("No such container: %s", oldName) |
|
14 |
+ container, err := daemon.Get(oldName) |
|
15 |
+ if err != nil { |
|
16 |
+ return job.Error(err) |
|
17 | 17 |
} |
18 | 18 |
|
19 | 19 |
oldName = container.Name |
... | ... |
@@ -19,14 +19,14 @@ func (daemon *Daemon) ContainerResize(job *engine.Job) engine.Status { |
19 | 19 |
if err != nil { |
20 | 20 |
return job.Error(err) |
21 | 21 |
} |
22 |
- |
|
23 |
- if container := daemon.Get(name); container != nil { |
|
24 |
- if err := container.Resize(height, width); err != nil { |
|
25 |
- return job.Error(err) |
|
26 |
- } |
|
27 |
- return engine.StatusOK |
|
22 |
+ container, err := daemon.Get(name) |
|
23 |
+ if err != nil { |
|
24 |
+ return job.Error(err) |
|
28 | 25 |
} |
29 |
- return job.Errorf("No such container: %s", name) |
|
26 |
+ if err := container.Resize(height, width); err != nil { |
|
27 |
+ return job.Error(err) |
|
28 |
+ } |
|
29 |
+ return engine.StatusOK |
|
30 | 30 |
} |
31 | 31 |
|
32 | 32 |
func (daemon *Daemon) ContainerExecResize(job *engine.Job) engine.Status { |
... | ... |
@@ -15,13 +15,13 @@ func (daemon *Daemon) ContainerRestart(job *engine.Job) engine.Status { |
15 | 15 |
if job.EnvExists("t") { |
16 | 16 |
t = job.GetenvInt("t") |
17 | 17 |
} |
18 |
- if container := daemon.Get(name); container != nil { |
|
19 |
- if err := container.Restart(int(t)); err != nil { |
|
20 |
- return job.Errorf("Cannot restart container %s: %s\n", name, err) |
|
21 |
- } |
|
22 |
- container.LogEvent("restart") |
|
23 |
- } else { |
|
24 |
- return job.Errorf("No such container: %s\n", name) |
|
18 |
+ container, err := daemon.Get(name) |
|
19 |
+ if err != nil { |
|
20 |
+ return job.Error(err) |
|
25 | 21 |
} |
22 |
+ if err := container.Restart(int(t)); err != nil { |
|
23 |
+ return job.Errorf("Cannot restart container %s: %s\n", name, err) |
|
24 |
+ } |
|
25 |
+ container.LogEvent("restart") |
|
26 | 26 |
return engine.StatusOK |
27 | 27 |
} |
... | ... |
@@ -14,12 +14,12 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) engine.Status { |
14 | 14 |
return job.Errorf("Usage: %s container_id", job.Name) |
15 | 15 |
} |
16 | 16 |
var ( |
17 |
- name = job.Args[0] |
|
18 |
- container = daemon.Get(name) |
|
17 |
+ name = job.Args[0] |
|
19 | 18 |
) |
20 | 19 |
|
21 |
- if container == nil { |
|
22 |
- return job.Errorf("No such container: %s", name) |
|
20 |
+ container, err := daemon.Get(name) |
|
21 |
+ if err != nil { |
|
22 |
+ return job.Error(err) |
|
23 | 23 |
} |
24 | 24 |
|
25 | 25 |
if container.IsPaused() { |
... | ... |
@@ -15,16 +15,16 @@ func (daemon *Daemon) ContainerStop(job *engine.Job) engine.Status { |
15 | 15 |
if job.EnvExists("t") { |
16 | 16 |
t = job.GetenvInt("t") |
17 | 17 |
} |
18 |
- if container := daemon.Get(name); container != nil { |
|
19 |
- if !container.IsRunning() { |
|
20 |
- return job.Errorf("Container already stopped") |
|
21 |
- } |
|
22 |
- if err := container.Stop(int(t)); err != nil { |
|
23 |
- return job.Errorf("Cannot stop container %s: %s\n", name, err) |
|
24 |
- } |
|
25 |
- container.LogEvent("stop") |
|
26 |
- } else { |
|
27 |
- return job.Errorf("No such container: %s\n", name) |
|
18 |
+ container, err := daemon.Get(name) |
|
19 |
+ if err != nil { |
|
20 |
+ return job.Error(err) |
|
28 | 21 |
} |
22 |
+ if !container.IsRunning() { |
|
23 |
+ return job.Errorf("Container already stopped") |
|
24 |
+ } |
|
25 |
+ if err := container.Stop(int(t)); err != nil { |
|
26 |
+ return job.Errorf("Cannot stop container %s: %s\n", name, err) |
|
27 |
+ } |
|
28 |
+ container.LogEvent("stop") |
|
29 | 29 |
return engine.StatusOK |
30 | 30 |
} |
... | ... |
@@ -21,59 +21,59 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) engine.Status { |
21 | 21 |
psArgs = job.Args[1] |
22 | 22 |
} |
23 | 23 |
|
24 |
- if container := daemon.Get(name); container != nil { |
|
25 |
- if !container.IsRunning() { |
|
26 |
- return job.Errorf("Container %s is not running", name) |
|
27 |
- } |
|
28 |
- pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) |
|
29 |
- if err != nil { |
|
30 |
- return job.Error(err) |
|
31 |
- } |
|
32 |
- output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() |
|
33 |
- if err != nil { |
|
34 |
- return job.Errorf("Error running ps: %s", err) |
|
35 |
- } |
|
24 |
+ container, err := daemon.Get(name) |
|
25 |
+ if err != nil { |
|
26 |
+ return job.Error(err) |
|
27 |
+ } |
|
28 |
+ if !container.IsRunning() { |
|
29 |
+ return job.Errorf("Container %s is not running", name) |
|
30 |
+ } |
|
31 |
+ pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) |
|
32 |
+ if err != nil { |
|
33 |
+ return job.Error(err) |
|
34 |
+ } |
|
35 |
+ output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() |
|
36 |
+ if err != nil { |
|
37 |
+ return job.Errorf("Error running ps: %s", err) |
|
38 |
+ } |
|
36 | 39 |
|
37 |
- lines := strings.Split(string(output), "\n") |
|
38 |
- header := strings.Fields(lines[0]) |
|
39 |
- out := &engine.Env{} |
|
40 |
- out.SetList("Titles", header) |
|
40 |
+ lines := strings.Split(string(output), "\n") |
|
41 |
+ header := strings.Fields(lines[0]) |
|
42 |
+ out := &engine.Env{} |
|
43 |
+ out.SetList("Titles", header) |
|
41 | 44 |
|
42 |
- pidIndex := -1 |
|
43 |
- for i, name := range header { |
|
44 |
- if name == "PID" { |
|
45 |
- pidIndex = i |
|
46 |
- } |
|
47 |
- } |
|
48 |
- if pidIndex == -1 { |
|
49 |
- return job.Errorf("Couldn't find PID field in ps output") |
|
45 |
+ pidIndex := -1 |
|
46 |
+ for i, name := range header { |
|
47 |
+ if name == "PID" { |
|
48 |
+ pidIndex = i |
|
50 | 49 |
} |
50 |
+ } |
|
51 |
+ if pidIndex == -1 { |
|
52 |
+ return job.Errorf("Couldn't find PID field in ps output") |
|
53 |
+ } |
|
51 | 54 |
|
52 |
- processes := [][]string{} |
|
53 |
- for _, line := range lines[1:] { |
|
54 |
- if len(line) == 0 { |
|
55 |
- continue |
|
56 |
- } |
|
57 |
- fields := strings.Fields(line) |
|
58 |
- p, err := strconv.Atoi(fields[pidIndex]) |
|
59 |
- if err != nil { |
|
60 |
- return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) |
|
61 |
- } |
|
55 |
+ processes := [][]string{} |
|
56 |
+ for _, line := range lines[1:] { |
|
57 |
+ if len(line) == 0 { |
|
58 |
+ continue |
|
59 |
+ } |
|
60 |
+ fields := strings.Fields(line) |
|
61 |
+ p, err := strconv.Atoi(fields[pidIndex]) |
|
62 |
+ if err != nil { |
|
63 |
+ return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) |
|
64 |
+ } |
|
62 | 65 |
|
63 |
- for _, pid := range pids { |
|
64 |
- if pid == p { |
|
65 |
- // Make sure number of fields equals number of header titles |
|
66 |
- // merging "overhanging" fields |
|
67 |
- process := fields[:len(header)-1] |
|
68 |
- process = append(process, strings.Join(fields[len(header)-1:], " ")) |
|
69 |
- processes = append(processes, process) |
|
70 |
- } |
|
66 |
+ for _, pid := range pids { |
|
67 |
+ if pid == p { |
|
68 |
+ // Make sure number of fields equals number of header titles |
|
69 |
+ // merging "overhanging" fields |
|
70 |
+ process := fields[:len(header)-1] |
|
71 |
+ process = append(process, strings.Join(fields[len(header)-1:], " ")) |
|
72 |
+ processes = append(processes, process) |
|
71 | 73 |
} |
72 | 74 |
} |
73 |
- out.SetJson("Processes", processes) |
|
74 |
- out.WriteTo(job.Stdout) |
|
75 |
- return engine.StatusOK |
|
76 |
- |
|
77 | 75 |
} |
78 |
- return job.Errorf("No such container: %s", name) |
|
76 |
+ out.SetJson("Processes", processes) |
|
77 |
+ out.WriteTo(job.Stdout) |
|
78 |
+ return engine.StatusOK |
|
79 | 79 |
} |
... | ... |
@@ -266,9 +266,9 @@ func (container *Container) applyVolumesFrom() error { |
266 | 266 |
continue |
267 | 267 |
} |
268 | 268 |
|
269 |
- c := container.daemon.Get(id) |
|
270 |
- if c == nil { |
|
271 |
- return fmt.Errorf("container %s not found, impossible to mount its volumes", id) |
|
269 |
+ c, err := container.daemon.Get(id) |
|
270 |
+ if err != nil { |
|
271 |
+ return err |
|
272 | 272 |
} |
273 | 273 |
|
274 | 274 |
var ( |
... | ... |
@@ -11,10 +11,11 @@ func (daemon *Daemon) ContainerWait(job *engine.Job) engine.Status { |
11 | 11 |
return job.Errorf("Usage: %s", job.Name) |
12 | 12 |
} |
13 | 13 |
name := job.Args[0] |
14 |
- if container := daemon.Get(name); container != nil { |
|
15 |
- status, _ := container.WaitStop(-1 * time.Second) |
|
16 |
- job.Printf("%d\n", status) |
|
17 |
- return engine.StatusOK |
|
14 |
+ container, err := daemon.Get(name) |
|
15 |
+ if err != nil { |
|
16 |
+ return job.Errorf("%s: %s", job.Name, err.Error()) |
|
18 | 17 |
} |
19 |
- return job.Errorf("%s: No such container: %s", job.Name, name) |
|
18 |
+ status, _ := container.WaitStop(-1 * time.Second) |
|
19 |
+ job.Printf("%d\n", status) |
|
20 |
+ return engine.StatusOK |
|
20 | 21 |
} |
... | ... |
@@ -325,7 +325,7 @@ func TestPostCreateNull(t *testing.T) { |
325 | 325 |
|
326 | 326 |
containerAssertExists(eng, containerID, t) |
327 | 327 |
|
328 |
- c := daemon.Get(containerID) |
|
328 |
+ c, _ := daemon.Get(containerID) |
|
329 | 329 |
if c.Config.Cpuset != "" { |
330 | 330 |
t.Fatalf("Cpuset should have been empty - instead its:" + c.Config.Cpuset) |
331 | 331 |
} |
... | ... |
@@ -282,12 +282,12 @@ func TestDaemonCreate(t *testing.T) { |
282 | 282 |
} |
283 | 283 |
|
284 | 284 |
// Make sure we can get the container with Get() |
285 |
- if daemon.Get(container.ID) == nil { |
|
285 |
+ if _, err := daemon.Get(container.ID); err != nil { |
|
286 | 286 |
t.Errorf("Unable to get newly created container") |
287 | 287 |
} |
288 | 288 |
|
289 | 289 |
// Make sure it is the right container |
290 |
- if daemon.Get(container.ID) != container { |
|
290 |
+ if c, _ := daemon.Get(container.ID); c != container { |
|
291 | 291 |
t.Errorf("Get() returned the wrong container") |
292 | 292 |
} |
293 | 293 |
|
... | ... |
@@ -383,8 +383,8 @@ func TestDestroy(t *testing.T) { |
383 | 383 |
} |
384 | 384 |
|
385 | 385 |
// Make sure daemon.Get() refuses to return the unexisting container |
386 |
- if daemon.Get(container.ID) != nil { |
|
387 |
- t.Errorf("Unable to get newly created container") |
|
386 |
+ if c, _ := daemon.Get(container.ID); c != nil { |
|
387 |
+ t.Errorf("Got a container that should not exist") |
|
388 | 388 |
} |
389 | 389 |
|
390 | 390 |
// Test double destroy |
... | ... |
@@ -407,16 +407,16 @@ func TestGet(t *testing.T) { |
407 | 407 |
container3, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) |
408 | 408 |
defer daemon.Destroy(container3) |
409 | 409 |
|
410 |
- if daemon.Get(container1.ID) != container1 { |
|
411 |
- t.Errorf("Get(test1) returned %v while expecting %v", daemon.Get(container1.ID), container1) |
|
410 |
+ if c, _ := daemon.Get(container1.ID); c != container1 { |
|
411 |
+ t.Errorf("Get(test1) returned %v while expecting %v", c, container1) |
|
412 | 412 |
} |
413 | 413 |
|
414 |
- if daemon.Get(container2.ID) != container2 { |
|
415 |
- t.Errorf("Get(test2) returned %v while expecting %v", daemon.Get(container2.ID), container2) |
|
414 |
+ if c, _ := daemon.Get(container2.ID); c != container2 { |
|
415 |
+ t.Errorf("Get(test2) returned %v while expecting %v", c, container2) |
|
416 | 416 |
} |
417 | 417 |
|
418 |
- if daemon.Get(container3.ID) != container3 { |
|
419 |
- t.Errorf("Get(test3) returned %v while expecting %v", daemon.Get(container3.ID), container3) |
|
418 |
+ if c, _ := daemon.Get(container3.ID); c != container3 { |
|
419 |
+ t.Errorf("Get(test3) returned %v while expecting %v", c, container3) |
|
420 | 420 |
} |
421 | 421 |
|
422 | 422 |
} |
... | ... |
@@ -485,9 +485,9 @@ func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daem |
485 | 485 |
t.Fatal(err) |
486 | 486 |
} |
487 | 487 |
|
488 |
- container := daemon.Get(id) |
|
489 |
- if container == nil { |
|
490 |
- t.Fatalf("Couldn't fetch test container %s", id) |
|
488 |
+ container, err := daemon.Get(id) |
|
489 |
+ if err != nil { |
|
490 |
+ t.Fatal(err) |
|
491 | 491 |
} |
492 | 492 |
|
493 | 493 |
setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { |
... | ... |
@@ -646,8 +646,8 @@ func TestRestore(t *testing.T) { |
646 | 646 |
if runningCount != 0 { |
647 | 647 |
t.Fatalf("Expected 0 container alive, %d found", runningCount) |
648 | 648 |
} |
649 |
- container3 := daemon2.Get(container1.ID) |
|
650 |
- if container3 == nil { |
|
649 |
+ container3, err := daemon2.Get(container1.ID) |
|
650 |
+ if err != nil { |
|
651 | 651 |
t.Fatal("Unable to Get container") |
652 | 652 |
} |
653 | 653 |
if err := container3.Run(); err != nil { |
... | ... |
@@ -666,16 +666,21 @@ func TestDefaultContainerName(t *testing.T) { |
666 | 666 |
t.Fatal(err) |
667 | 667 |
} |
668 | 668 |
|
669 |
- container := daemon.Get(createNamedTestContainer(eng, config, t, "some_name")) |
|
669 |
+ container, err := daemon.Get(createNamedTestContainer(eng, config, t, "some_name")) |
|
670 |
+ if err != nil { |
|
671 |
+ t.Fatal(err) |
|
672 |
+ } |
|
670 | 673 |
containerID := container.ID |
671 | 674 |
|
672 | 675 |
if container.Name != "/some_name" { |
673 | 676 |
t.Fatalf("Expect /some_name got %s", container.Name) |
674 | 677 |
} |
675 | 678 |
|
676 |
- if c := daemon.Get("/some_name"); c == nil { |
|
679 |
+ c, err := daemon.Get("/some_name") |
|
680 |
+ if err != nil { |
|
677 | 681 |
t.Fatalf("Couldn't retrieve test container as /some_name") |
678 |
- } else if c.ID != containerID { |
|
682 |
+ } |
|
683 |
+ if c.ID != containerID { |
|
679 | 684 |
t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID) |
680 | 685 |
} |
681 | 686 |
} |
... | ... |
@@ -690,14 +695,17 @@ func TestRandomContainerName(t *testing.T) { |
690 | 690 |
t.Fatal(err) |
691 | 691 |
} |
692 | 692 |
|
693 |
- container := daemon.Get(createTestContainer(eng, config, t)) |
|
693 |
+ container, err := daemon.Get(createTestContainer(eng, config, t)) |
|
694 |
+ if err != nil { |
|
695 |
+ t.Fatal(err) |
|
696 |
+ } |
|
694 | 697 |
containerID := container.ID |
695 | 698 |
|
696 | 699 |
if container.Name == "" { |
697 | 700 |
t.Fatalf("Expected not empty container name") |
698 | 701 |
} |
699 | 702 |
|
700 |
- if c := daemon.Get(container.Name); c == nil { |
|
703 |
+ if c, err := daemon.Get(container.Name); err != nil { |
|
701 | 704 |
log.Fatalf("Could not lookup container %s by its name", container.Name) |
702 | 705 |
} else if c.ID != containerID { |
703 | 706 |
log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID) |
... | ... |
@@ -737,13 +745,16 @@ func TestContainerNameValidation(t *testing.T) { |
737 | 737 |
t.Fatal(err) |
738 | 738 |
} |
739 | 739 |
|
740 |
- container := daemon.Get(engine.Tail(outputBuffer, 1)) |
|
740 |
+ container, err := daemon.Get(engine.Tail(outputBuffer, 1)) |
|
741 |
+ if err != nil { |
|
742 |
+ t.Fatal(err) |
|
743 |
+ } |
|
741 | 744 |
|
742 | 745 |
if container.Name != "/"+test.Name { |
743 | 746 |
t.Fatalf("Expect /%s got %s", test.Name, container.Name) |
744 | 747 |
} |
745 | 748 |
|
746 |
- if c := daemon.Get("/" + test.Name); c == nil { |
|
749 |
+ if c, err := daemon.Get("/" + test.Name); err != nil { |
|
747 | 750 |
t.Fatalf("Couldn't retrieve test container as /%s", test.Name) |
748 | 751 |
} else if c.ID != container.ID { |
749 | 752 |
t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID) |
... | ... |
@@ -762,7 +773,10 @@ func TestLinkChildContainer(t *testing.T) { |
762 | 762 |
t.Fatal(err) |
763 | 763 |
} |
764 | 764 |
|
765 |
- container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) |
|
765 |
+ container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) |
|
766 |
+ if err != nil { |
|
767 |
+ t.Fatal(err) |
|
768 |
+ } |
|
766 | 769 |
|
767 | 770 |
webapp, err := daemon.GetByName("/webapp") |
768 | 771 |
if err != nil { |
... | ... |
@@ -778,7 +792,10 @@ func TestLinkChildContainer(t *testing.T) { |
778 | 778 |
t.Fatal(err) |
779 | 779 |
} |
780 | 780 |
|
781 |
- childContainer := daemon.Get(createTestContainer(eng, config, t)) |
|
781 |
+ childContainer, err := daemon.Get(createTestContainer(eng, config, t)) |
|
782 |
+ if err != nil { |
|
783 |
+ t.Fatal(err) |
|
784 |
+ } |
|
782 | 785 |
|
783 | 786 |
if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { |
784 | 787 |
t.Fatal(err) |
... | ... |
@@ -804,7 +821,10 @@ func TestGetAllChildren(t *testing.T) { |
804 | 804 |
t.Fatal(err) |
805 | 805 |
} |
806 | 806 |
|
807 |
- container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) |
|
807 |
+ container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) |
|
808 |
+ if err != nil { |
|
809 |
+ t.Fatal(err) |
|
810 |
+ } |
|
808 | 811 |
|
809 | 812 |
webapp, err := daemon.GetByName("/webapp") |
810 | 813 |
if err != nil { |
... | ... |
@@ -820,7 +840,10 @@ func TestGetAllChildren(t *testing.T) { |
820 | 820 |
t.Fatal(err) |
821 | 821 |
} |
822 | 822 |
|
823 |
- childContainer := daemon.Get(createTestContainer(eng, config, t)) |
|
823 |
+ childContainer, err := daemon.Get(createTestContainer(eng, config, t)) |
|
824 |
+ if err != nil { |
|
825 |
+ t.Fatal(err) |
|
826 |
+ } |
|
824 | 827 |
|
825 | 828 |
if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { |
826 | 829 |
t.Fatal(err) |
... | ... |
@@ -117,7 +117,7 @@ func containerAssertExists(eng *engine.Engine, id string, t Fataler) { |
117 | 117 |
|
118 | 118 |
func containerAssertNotExists(eng *engine.Engine, id string, t Fataler) { |
119 | 119 |
daemon := mkDaemonFromEngine(eng, t) |
120 |
- if c := daemon.Get(id); c != nil { |
|
120 |
+ if c, _ := daemon.Get(id); c != nil { |
|
121 | 121 |
t.Fatal(fmt.Errorf("Container %s should not exist", id)) |
122 | 122 |
} |
123 | 123 |
} |
... | ... |
@@ -142,9 +142,9 @@ func assertHttpError(r *httptest.ResponseRecorder, t Fataler) { |
142 | 142 |
|
143 | 143 |
func getContainer(eng *engine.Engine, id string, t Fataler) *daemon.Container { |
144 | 144 |
daemon := mkDaemonFromEngine(eng, t) |
145 |
- c := daemon.Get(id) |
|
146 |
- if c == nil { |
|
147 |
- t.Fatal(fmt.Errorf("No such container: %s", id)) |
|
145 |
+ c, err := daemon.Get(id) |
|
146 |
+ if err != nil { |
|
147 |
+ t.Fatal(err) |
|
148 | 148 |
} |
149 | 149 |
return c |
150 | 150 |
} |
... | ... |
@@ -10,10 +10,8 @@ import ( |
10 | 10 |
) |
11 | 11 |
|
12 | 12 |
var ( |
13 |
- // ErrNoID is thrown when attempting to use empty prefixes |
|
14 |
- ErrNoID = errors.New("prefix can't be empty") |
|
15 |
- // ErrDuplicateID is thrown when a duplicated id was found |
|
16 |
- ErrDuplicateID = errors.New("multiple IDs were found") |
|
13 |
+ ErrEmptyPrefix = errors.New("Prefix can't be empty") |
|
14 |
+ ErrAmbiguousPrefix = errors.New("Multiple IDs found with provided prefix") |
|
17 | 15 |
) |
18 | 16 |
|
19 | 17 |
func init() { |
... | ... |
@@ -47,7 +45,7 @@ func (idx *TruncIndex) addID(id string) error { |
47 | 47 |
return fmt.Errorf("illegal character: ' '") |
48 | 48 |
} |
49 | 49 |
if id == "" { |
50 |
- return ErrNoID |
|
50 |
+ return ErrEmptyPrefix |
|
51 | 51 |
} |
52 | 52 |
if _, exists := idx.ids[id]; exists { |
53 | 53 |
return fmt.Errorf("id already exists: '%s'", id) |
... | ... |
@@ -87,26 +85,26 @@ func (idx *TruncIndex) Delete(id string) error { |
87 | 87 |
// Get retrieves an ID from the TruncIndex. If there are multiple IDs |
88 | 88 |
// with the given prefix, an error is thrown. |
89 | 89 |
func (idx *TruncIndex) Get(s string) (string, error) { |
90 |
- idx.RLock() |
|
91 |
- defer idx.RUnlock() |
|
90 |
+ if s == "" { |
|
91 |
+ return "", ErrEmptyPrefix |
|
92 |
+ } |
|
92 | 93 |
var ( |
93 | 94 |
id string |
94 | 95 |
) |
95 |
- if s == "" { |
|
96 |
- return "", ErrNoID |
|
97 |
- } |
|
98 | 96 |
subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { |
99 | 97 |
if id != "" { |
100 | 98 |
// we haven't found the ID if there are two or more IDs |
101 | 99 |
id = "" |
102 |
- return ErrDuplicateID |
|
100 |
+ return ErrAmbiguousPrefix |
|
103 | 101 |
} |
104 | 102 |
id = string(prefix) |
105 | 103 |
return nil |
106 | 104 |
} |
107 | 105 |
|
106 |
+ idx.RLock() |
|
107 |
+ defer idx.RUnlock() |
|
108 | 108 |
if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { |
109 |
- return "", fmt.Errorf("no such id: %s", s) |
|
109 |
+ return "", err |
|
110 | 110 |
} |
111 | 111 |
if id != "" { |
112 | 112 |
return id, nil |
... | ... |
@@ -59,6 +59,11 @@ func TestTruncIndex(t *testing.T) { |
59 | 59 |
assertIndexGet(t, index, id[:4], "", true) |
60 | 60 |
assertIndexGet(t, index, id[:1], "", true) |
61 | 61 |
|
62 |
+ // An ambiguous id prefix should return an error |
|
63 |
+ if _, err := index.Get(id[:4]); err == nil || err == nil { |
|
64 |
+ t.Fatal("An ambiguous id prefix should return an error") |
|
65 |
+ } |
|
66 |
+ |
|
62 | 67 |
// 7 characters should NOT conflict |
63 | 68 |
assertIndexGet(t, index, id[:7], id, false) |
64 | 69 |
assertIndexGet(t, index, id2[:7], id2, false) |