+ runtime: Add -r flag to dockerd in order to restart previously running container....
Guillaume J. Charmes authored on 2013/04/27 06:02:01... | ... |
@@ -993,11 +993,11 @@ func (srv *Server) CmdRun(stdin io.ReadCloser, stdout rcli.DockerConn, args ...s |
993 | 993 |
return nil |
994 | 994 |
} |
995 | 995 |
|
996 |
-func NewServer() (*Server, error) { |
|
996 |
+func NewServer(autoRestart bool) (*Server, error) { |
|
997 | 997 |
if runtime.GOARCH != "amd64" { |
998 | 998 |
log.Fatalf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH) |
999 | 999 |
} |
1000 |
- runtime, err := NewRuntime() |
|
1000 |
+ runtime, err := NewRuntime(autoRestart) |
|
1001 | 1001 |
if err != nil { |
1002 | 1002 |
return nil, err |
1003 | 1003 |
} |
... | ... |
@@ -28,6 +28,7 @@ func main() { |
28 | 28 |
// FIXME: Switch d and D ? (to be more sshd like) |
29 | 29 |
flDaemon := flag.Bool("d", false, "Daemon mode") |
30 | 30 |
flDebug := flag.Bool("D", false, "Debug mode") |
31 |
+ flAutoRestart := flag.Bool("r", false, "Restart previously running containers") |
|
31 | 32 |
bridgeName := flag.String("b", "", "Attach containers to a pre-existing network bridge") |
32 | 33 |
pidfile := flag.String("p", "/var/run/docker.pid", "File containing process PID") |
33 | 34 |
flag.Parse() |
... | ... |
@@ -45,7 +46,7 @@ func main() { |
45 | 45 |
flag.Usage() |
46 | 46 |
return |
47 | 47 |
} |
48 |
- if err := daemon(*pidfile); err != nil { |
|
48 |
+ if err := daemon(*pidfile, *flAutoRestart); err != nil { |
|
49 | 49 |
log.Fatal(err) |
50 | 50 |
} |
51 | 51 |
} else { |
... | ... |
@@ -82,7 +83,7 @@ func removePidFile(pidfile string) { |
82 | 82 |
} |
83 | 83 |
} |
84 | 84 |
|
85 |
-func daemon(pidfile string) error { |
|
85 |
+func daemon(pidfile string, autoRestart bool) error { |
|
86 | 86 |
if err := createPidFile(pidfile); err != nil { |
87 | 87 |
log.Fatal(err) |
88 | 88 |
} |
... | ... |
@@ -97,7 +98,7 @@ func daemon(pidfile string) error { |
97 | 97 |
os.Exit(0) |
98 | 98 |
}() |
99 | 99 |
|
100 |
- service, err := docker.NewServer() |
|
100 |
+ service, err := docker.NewServer(autoRestart) |
|
101 | 101 |
if err != nil { |
102 | 102 |
return err |
103 | 103 |
} |
... | ... |
@@ -31,6 +31,7 @@ type Runtime struct { |
31 | 31 |
idIndex *TruncIndex |
32 | 32 |
capabilities *Capabilities |
33 | 33 |
kernelVersion *KernelVersionInfo |
34 |
+ autoRestart bool |
|
34 | 35 |
} |
35 | 36 |
|
36 | 37 |
var sysInitPath string |
... | ... |
@@ -167,23 +168,6 @@ func (runtime *Runtime) Register(container *Container) error { |
167 | 167 |
// init the wait lock |
168 | 168 |
container.waitLock = make(chan struct{}) |
169 | 169 |
|
170 |
- // FIXME: if the container is supposed to be running but is not, auto restart it? |
|
171 |
- // if so, then we need to restart monitor and init a new lock |
|
172 |
- // If the container is supposed to be running, make sure of it |
|
173 |
- if container.State.Running { |
|
174 |
- if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil { |
|
175 |
- return err |
|
176 |
- } else { |
|
177 |
- if !strings.Contains(string(output), "RUNNING") { |
|
178 |
- Debugf("Container %s was supposed to be running be is not.", container.Id) |
|
179 |
- container.State.setStopped(-127) |
|
180 |
- if err := container.ToDisk(); err != nil { |
|
181 |
- return err |
|
182 |
- } |
|
183 |
- } |
|
184 |
- } |
|
185 |
- } |
|
186 |
- |
|
187 | 170 |
// Even if not running, we init the lock (prevents races in start/stop/kill) |
188 | 171 |
container.State.initLock() |
189 | 172 |
|
... | ... |
@@ -202,11 +186,43 @@ func (runtime *Runtime) Register(container *Container) error { |
202 | 202 |
runtime.containers.PushBack(container) |
203 | 203 |
runtime.idIndex.Add(container.Id) |
204 | 204 |
|
205 |
+ // When we actually restart, Start() do the monitoring. |
|
206 |
+ // However, when we simply 'reattach', we have to restart a monitor |
|
207 |
+ nomonitor := false |
|
208 |
+ |
|
209 |
+ // FIXME: if the container is supposed to be running but is not, auto restart it? |
|
210 |
+ // if so, then we need to restart monitor and init a new lock |
|
211 |
+ // If the container is supposed to be running, make sure of it |
|
212 |
+ if container.State.Running { |
|
213 |
+ if output, err := exec.Command("lxc-info", "-n", container.Id).CombinedOutput(); err != nil { |
|
214 |
+ return err |
|
215 |
+ } else { |
|
216 |
+ if !strings.Contains(string(output), "RUNNING") { |
|
217 |
+ Debugf("Container %s was supposed to be running be is not.", container.Id) |
|
218 |
+ if runtime.autoRestart { |
|
219 |
+ Debugf("Restarting") |
|
220 |
+ container.State.Ghost = false |
|
221 |
+ container.State.setStopped(0) |
|
222 |
+ if err := container.Start(); err != nil { |
|
223 |
+ return err |
|
224 |
+ } |
|
225 |
+ nomonitor = true |
|
226 |
+ } else { |
|
227 |
+ Debugf("Marking as stopped") |
|
228 |
+ container.State.setStopped(-127) |
|
229 |
+ if err := container.ToDisk(); err != nil { |
|
230 |
+ return err |
|
231 |
+ } |
|
232 |
+ } |
|
233 |
+ } |
|
234 |
+ } |
|
235 |
+ } |
|
236 |
+ |
|
205 | 237 |
// If the container is not running or just has been flagged not running |
206 | 238 |
// then close the wait lock chan (will be reset upon start) |
207 | 239 |
if !container.State.Running { |
208 | 240 |
close(container.waitLock) |
209 |
- } else { |
|
241 |
+ } else if !nomonitor { |
|
210 | 242 |
container.allocateNetwork() |
211 | 243 |
go container.monitor() |
212 | 244 |
} |
... | ... |
@@ -292,8 +308,8 @@ func (runtime *Runtime) restore() error { |
292 | 292 |
} |
293 | 293 |
|
294 | 294 |
// FIXME: harmonize with NewGraph() |
295 |
-func NewRuntime() (*Runtime, error) { |
|
296 |
- runtime, err := NewRuntimeFromDirectory("/var/lib/docker") |
|
295 |
+func NewRuntime(autoRestart bool) (*Runtime, error) { |
|
296 |
+ runtime, err := NewRuntimeFromDirectory("/var/lib/docker", autoRestart) |
|
297 | 297 |
if err != nil { |
298 | 298 |
return nil, err |
299 | 299 |
} |
... | ... |
@@ -314,19 +330,19 @@ func NewRuntime() (*Runtime, error) { |
314 | 314 |
_, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes")) |
315 | 315 |
runtime.capabilities.MemoryLimit = err1 == nil && err2 == nil |
316 | 316 |
if !runtime.capabilities.MemoryLimit { |
317 |
- log.Printf("WARNING: Your kernel does not support cgroup memory limit.") |
|
317 |
+ log.Printf("WARNING: Your kernel does not support cgroup memory limit.") |
|
318 | 318 |
} |
319 | 319 |
|
320 | 320 |
_, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes")) |
321 | 321 |
runtime.capabilities.SwapLimit = err == nil |
322 | 322 |
if !runtime.capabilities.SwapLimit { |
323 |
- log.Printf("WARNING: Your kernel does not support cgroup swap limit.") |
|
323 |
+ log.Printf("WARNING: Your kernel does not support cgroup swap limit.") |
|
324 | 324 |
} |
325 | 325 |
} |
326 | 326 |
return runtime, nil |
327 | 327 |
} |
328 | 328 |
|
329 |
-func NewRuntimeFromDirectory(root string) (*Runtime, error) { |
|
329 |
+func NewRuntimeFromDirectory(root string, autoRestart bool) (*Runtime, error) { |
|
330 | 330 |
runtimeRepo := path.Join(root, "containers") |
331 | 331 |
|
332 | 332 |
if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) { |
... | ... |
@@ -363,6 +379,7 @@ func NewRuntimeFromDirectory(root string) (*Runtime, error) { |
363 | 363 |
authConfig: authConfig, |
364 | 364 |
idIndex: NewTruncIndex(), |
365 | 365 |
capabilities: &Capabilities{}, |
366 |
+ autoRestart: autoRestart, |
|
366 | 367 |
} |
367 | 368 |
|
368 | 369 |
if err := runtime.restore(); err != nil { |
... | ... |
@@ -63,7 +63,7 @@ func init() { |
63 | 63 |
NetworkBridgeIface = "testdockbr0" |
64 | 64 |
|
65 | 65 |
// Make it our Store root |
66 |
- runtime, err := NewRuntimeFromDirectory(unitTestStoreBase) |
|
66 |
+ runtime, err := NewRuntimeFromDirectory(unitTestStoreBase, false) |
|
67 | 67 |
if err != nil { |
68 | 68 |
panic(err) |
69 | 69 |
} |
... | ... |
@@ -89,7 +89,7 @@ func newTestRuntime() (*Runtime, error) { |
89 | 89 |
return nil, err |
90 | 90 |
} |
91 | 91 |
|
92 |
- runtime, err := NewRuntimeFromDirectory(root) |
|
92 |
+ runtime, err := NewRuntimeFromDirectory(root, false) |
|
93 | 93 |
if err != nil { |
94 | 94 |
return nil, err |
95 | 95 |
} |
... | ... |
@@ -320,7 +320,7 @@ func TestRestore(t *testing.T) { |
320 | 320 |
t.Fatal(err) |
321 | 321 |
} |
322 | 322 |
|
323 |
- runtime1, err := NewRuntimeFromDirectory(root) |
|
323 |
+ runtime1, err := NewRuntimeFromDirectory(root, false) |
|
324 | 324 |
if err != nil { |
325 | 325 |
t.Fatal(err) |
326 | 326 |
} |
... | ... |
@@ -379,7 +379,7 @@ func TestRestore(t *testing.T) { |
379 | 379 |
|
380 | 380 |
// Here are are simulating a docker restart - that is, reloading all containers |
381 | 381 |
// from scratch |
382 |
- runtime2, err := NewRuntimeFromDirectory(root) |
|
382 |
+ runtime2, err := NewRuntimeFromDirectory(root, false) |
|
383 | 383 |
if err != nil { |
384 | 384 |
t.Fatal(err) |
385 | 385 |
} |