When the containerd 1.0 runtime changes were made, we inadvertantly
removed the functionality where any running containers are killed on
startup when not using live-restore.
This change restores that behavior.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
... | ... |
@@ -247,6 +247,11 @@ func (daemon *Daemon) restore() error { |
247 | 247 |
logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID) |
248 | 248 |
return |
249 | 249 |
} |
250 |
+ } else if !daemon.configStore.LiveRestoreEnabled { |
|
251 |
+ if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) { |
|
252 |
+ logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container") |
|
253 |
+ return |
|
254 |
+ } |
|
250 | 255 |
} |
251 | 256 |
|
252 | 257 |
if c.IsRunning() || c.IsPaused() { |
... | ... |
@@ -317,24 +322,24 @@ func (daemon *Daemon) restore() error { |
317 | 317 |
activeSandboxes[c.NetworkSettings.SandboxID] = options |
318 | 318 |
mapLock.Unlock() |
319 | 319 |
} |
320 |
- } else { |
|
321 |
- // get list of containers we need to restart |
|
322 |
- |
|
323 |
- // Do not autostart containers which |
|
324 |
- // has endpoints in a swarm scope |
|
325 |
- // network yet since the cluster is |
|
326 |
- // not initialized yet. We will start |
|
327 |
- // it after the cluster is |
|
328 |
- // initialized. |
|
329 |
- if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { |
|
330 |
- mapLock.Lock() |
|
331 |
- restartContainers[c] = make(chan struct{}) |
|
332 |
- mapLock.Unlock() |
|
333 |
- } else if c.HostConfig != nil && c.HostConfig.AutoRemove { |
|
334 |
- mapLock.Lock() |
|
335 |
- removeContainers[c.ID] = c |
|
336 |
- mapLock.Unlock() |
|
337 |
- } |
|
320 |
+ } |
|
321 |
+ |
|
322 |
+ // get list of containers we need to restart |
|
323 |
+ |
|
324 |
+ // Do not autostart containers which |
|
325 |
+ // has endpoints in a swarm scope |
|
326 |
+ // network yet since the cluster is |
|
327 |
+ // not initialized yet. We will start |
|
328 |
+ // it after the cluster is |
|
329 |
+ // initialized. |
|
330 |
+ if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint { |
|
331 |
+ mapLock.Lock() |
|
332 |
+ restartContainers[c] = make(chan struct{}) |
|
333 |
+ mapLock.Unlock() |
|
334 |
+ } else if c.HostConfig != nil && c.HostConfig.AutoRemove { |
|
335 |
+ mapLock.Lock() |
|
336 |
+ removeContainers[c.ID] = c |
|
337 |
+ mapLock.Unlock() |
|
338 | 338 |
} |
339 | 339 |
|
340 | 340 |
c.Lock() |
341 | 341 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,112 @@ |
0 |
+package container |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "context" |
|
4 |
+ "fmt" |
|
5 |
+ "testing" |
|
6 |
+ "time" |
|
7 |
+ |
|
8 |
+ "github.com/docker/docker/api/types" |
|
9 |
+ "github.com/docker/docker/api/types/container" |
|
10 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
11 |
+) |
|
12 |
+ |
|
13 |
+func TestDaemonRestartKillContainers(t *testing.T) { |
|
14 |
+ type testCase struct { |
|
15 |
+ desc string |
|
16 |
+ config *container.Config |
|
17 |
+ hostConfig *container.HostConfig |
|
18 |
+ |
|
19 |
+ xRunning bool |
|
20 |
+ xRunningLiveRestore bool |
|
21 |
+ } |
|
22 |
+ |
|
23 |
+ for _, c := range []testCase{ |
|
24 |
+ { |
|
25 |
+ desc: "container without restart policy", |
|
26 |
+ config: &container.Config{Image: "busybox", Cmd: []string{"top"}}, |
|
27 |
+ xRunningLiveRestore: true, |
|
28 |
+ }, |
|
29 |
+ { |
|
30 |
+ desc: "container with restart=always", |
|
31 |
+ config: &container.Config{Image: "busybox", Cmd: []string{"top"}}, |
|
32 |
+ hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}}, |
|
33 |
+ xRunning: true, |
|
34 |
+ xRunningLiveRestore: true, |
|
35 |
+ }, |
|
36 |
+ } { |
|
37 |
+ for _, liveRestoreEnabled := range []bool{false, true} { |
|
38 |
+ for fnName, stopDaemon := range map[string]func(*testing.T, *daemon.Daemon){ |
|
39 |
+ "kill-daemon": func(t *testing.T, d *daemon.Daemon) { |
|
40 |
+ if err := d.Kill(); err != nil { |
|
41 |
+ t.Fatal(err) |
|
42 |
+ } |
|
43 |
+ }, |
|
44 |
+ "stop-daemon": func(t *testing.T, d *daemon.Daemon) { |
|
45 |
+ d.Stop(t) |
|
46 |
+ }, |
|
47 |
+ } { |
|
48 |
+ t.Run(fmt.Sprintf("live-restore=%v/%s/%s", liveRestoreEnabled, c.desc, fnName), func(t *testing.T) { |
|
49 |
+ c := c |
|
50 |
+ liveRestoreEnabled := liveRestoreEnabled |
|
51 |
+ stopDaemon := stopDaemon |
|
52 |
+ |
|
53 |
+ t.Parallel() |
|
54 |
+ |
|
55 |
+ d := daemon.New(t, "", "dockerd", daemon.Config{}) |
|
56 |
+ client, err := d.NewClient() |
|
57 |
+ if err != nil { |
|
58 |
+ t.Fatal(err) |
|
59 |
+ } |
|
60 |
+ |
|
61 |
+ var args []string |
|
62 |
+ if liveRestoreEnabled { |
|
63 |
+ args = []string{"--live-restore"} |
|
64 |
+ } |
|
65 |
+ |
|
66 |
+ d.StartWithBusybox(t, args...) |
|
67 |
+ defer d.Stop(t) |
|
68 |
+ ctx := context.Background() |
|
69 |
+ |
|
70 |
+ resp, err := client.ContainerCreate(ctx, c.config, c.hostConfig, nil, "") |
|
71 |
+ if err != nil { |
|
72 |
+ t.Fatal(err) |
|
73 |
+ } |
|
74 |
+ defer client.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true}) |
|
75 |
+ |
|
76 |
+ if err := client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil { |
|
77 |
+ t.Fatal(err) |
|
78 |
+ } |
|
79 |
+ |
|
80 |
+ stopDaemon(t, d) |
|
81 |
+ d.Start(t, args...) |
|
82 |
+ |
|
83 |
+ expected := c.xRunning |
|
84 |
+ if liveRestoreEnabled { |
|
85 |
+ expected = c.xRunningLiveRestore |
|
86 |
+ } |
|
87 |
+ |
|
88 |
+ var running bool |
|
89 |
+ for i := 0; i < 30; i++ { |
|
90 |
+ inspect, err := client.ContainerInspect(ctx, resp.ID) |
|
91 |
+ if err != nil { |
|
92 |
+ t.Fatal(err) |
|
93 |
+ } |
|
94 |
+ |
|
95 |
+ running = inspect.State.Running |
|
96 |
+ if running == expected { |
|
97 |
+ break |
|
98 |
+ } |
|
99 |
+ time.Sleep(2 * time.Second) |
|
100 |
+ |
|
101 |
+ } |
|
102 |
+ |
|
103 |
+ if running != expected { |
|
104 |
+ t.Fatalf("got unexpected running state, expected %v, got: %v", expected, running) |
|
105 |
+ } |
|
106 |
+ // TODO(cpuguy83): test pause states... this seems to be rather undefined currently |
|
107 |
+ }) |
|
108 |
+ } |
|
109 |
+ } |
|
110 |
+ } |
|
111 |
+} |