Browse code

golint fixes for daemon/ package

- some method names were changed to have a 'Locking' suffix, as the
downcased versions already existed, and the existing functions simply
had locks around the already downcased version.
- deleting unused functions
- package comment
- magic numbers replaced by golang constants
- comments all over

Signed-off-by: Morgan Bauer <mbauer@us.ibm.com>

Morgan Bauer authored on 2015/07/31 06:01:53
Showing 49 changed files
... ...
@@ -3,7 +3,7 @@
3 3
 package server
4 4
 
5 5
 func (s *Server) registerSubRouter() {
6
-	httpHandler := s.daemon.NetworkApiRouter()
6
+	httpHandler := s.daemon.NetworkAPIRouter()
7 7
 
8 8
 	subrouter := s.router.PathPrefix("/v{version:[0-9.]+}/networks").Subrouter()
9 9
 	subrouter.Methods("GET", "POST", "PUT", "DELETE").HandlerFunc(httpHandler)
... ...
@@ -29,7 +29,7 @@ func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, err
29 29
 		res = res[1:]
30 30
 	}
31 31
 
32
-	return container.Copy(res)
32
+	return container.copy(res)
33 33
 }
34 34
 
35 35
 // ContainerStatPath stats the filesystem resource at the specified path in the
... ...
@@ -142,7 +142,7 @@ func (container *Container) StatPath(path string) (stat *types.ContainerPathStat
142 142
 	defer container.Unmount()
143 143
 
144 144
 	err = container.mountVolumes()
145
-	defer container.UnmountVolumes(true)
145
+	defer container.unmountVolumes(true)
146 146
 	if err != nil {
147 147
 		return nil, err
148 148
 	}
... ...
@@ -177,7 +177,7 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
177 177
 	defer func() {
178 178
 		if err != nil {
179 179
 			// unmount any volumes
180
-			container.UnmountVolumes(true)
180
+			container.unmountVolumes(true)
181 181
 			// unmount the container's rootfs
182 182
 			container.Unmount()
183 183
 		}
... ...
@@ -212,13 +212,13 @@ func (container *Container) ArchivePath(path string) (content io.ReadCloser, sta
212 212
 
213 213
 	content = ioutils.NewReadCloserWrapper(data, func() error {
214 214
 		err := data.Close()
215
-		container.UnmountVolumes(true)
215
+		container.unmountVolumes(true)
216 216
 		container.Unmount()
217 217
 		container.Unlock()
218 218
 		return err
219 219
 	})
220 220
 
221
-	container.LogEvent("archive-path")
221
+	container.logEvent("archive-path")
222 222
 
223 223
 	return content, stat, nil
224 224
 }
... ...
@@ -239,7 +239,7 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
239 239
 	defer container.Unmount()
240 240
 
241 241
 	err = container.mountVolumes()
242
-	defer container.UnmountVolumes(true)
242
+	defer container.unmountVolumes(true)
243 243
 	if err != nil {
244 244
 		return err
245 245
 	}
... ...
@@ -288,7 +288,7 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
288 288
 	}
289 289
 
290 290
 	if !toVolume && container.hostConfig.ReadonlyRootfs {
291
-		return ErrContainerRootfsReadonly
291
+		return ErrRootFSReadOnly
292 292
 	}
293 293
 
294 294
 	options := &archive.TarOptions{
... ...
@@ -302,7 +302,7 @@ func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool,
302 302
 		return err
303 303
 	}
304 304
 
305
-	container.LogEvent("extract-to-dir")
305
+	container.logEvent("extract-to-dir")
306 306
 
307 307
 	return nil
308 308
 }
... ...
@@ -6,6 +6,7 @@ import (
6 6
 	"github.com/docker/docker/pkg/stdcopy"
7 7
 )
8 8
 
9
+// ContainerAttachWithLogsConfig holds the streams to use when connecting to a container to view logs.
9 10
 type ContainerAttachWithLogsConfig struct {
10 11
 	InStream                       io.ReadCloser
11 12
 	OutStream                      io.Writer
... ...
@@ -13,6 +14,7 @@ type ContainerAttachWithLogsConfig struct {
13 13
 	Logs, Stream                   bool
14 14
 }
15 15
 
16
+// ContainerAttachWithLogs attaches to logs according to the config passed in. See ContainerAttachWithLogsConfig.
16 17
 func (daemon *Daemon) ContainerAttachWithLogs(container *Container, c *ContainerAttachWithLogsConfig) error {
17 18
 	var errStream io.Writer
18 19
 
... ...
@@ -36,15 +38,18 @@ func (daemon *Daemon) ContainerAttachWithLogs(container *Container, c *Container
36 36
 		stderr = errStream
37 37
 	}
38 38
 
39
-	return container.AttachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
39
+	return container.attachWithLogs(stdin, stdout, stderr, c.Logs, c.Stream)
40 40
 }
41 41
 
42
+// ContainerWsAttachWithLogsConfig attach with websockets, since all
43
+// stream data is delegated to the websocket to handle, there
42 44
 type ContainerWsAttachWithLogsConfig struct {
43 45
 	InStream             io.ReadCloser
44 46
 	OutStream, ErrStream io.Writer
45 47
 	Logs, Stream         bool
46 48
 }
47 49
 
50
+// ContainerWsAttachWithLogs websocket connection
48 51
 func (daemon *Daemon) ContainerWsAttachWithLogs(container *Container, c *ContainerWsAttachWithLogsConfig) error {
49
-	return container.AttachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
52
+	return container.attachWithLogs(c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
50 53
 }
... ...
@@ -9,5 +9,5 @@ func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) {
9 9
 		return nil, err
10 10
 	}
11 11
 
12
-	return container.Changes()
12
+	return container.changes()
13 13
 }
... ...
@@ -5,6 +5,8 @@ import (
5 5
 	"github.com/docker/docker/runconfig"
6 6
 )
7 7
 
8
+// ContainerCommitConfig contains build configs for commit operation,
9
+// and is used when making a commit with the current state of the container.
8 10
 type ContainerCommitConfig struct {
9 11
 	Pause   bool
10 12
 	Repo    string
... ...
@@ -15,14 +17,14 @@ type ContainerCommitConfig struct {
15 15
 }
16 16
 
17 17
 // Commit creates a new filesystem image from the current state of a container.
18
-// The image can optionally be tagged into a repository
18
+// The image can optionally be tagged into a repository.
19 19
 func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*image.Image, error) {
20
-	if c.Pause && !container.IsPaused() {
21
-		container.Pause()
22
-		defer container.Unpause()
20
+	if c.Pause && !container.isPaused() {
21
+		container.pause()
22
+		defer container.unpause()
23 23
 	}
24 24
 
25
-	rwTar, err := container.ExportRw()
25
+	rwTar, err := container.exportRw()
26 26
 	if err != nil {
27 27
 		return nil, err
28 28
 	}
... ...
@@ -55,6 +57,6 @@ func (daemon *Daemon) Commit(container *Container, c *ContainerCommitConfig) (*i
55 55
 			return img, err
56 56
 		}
57 57
 	}
58
-	container.LogEvent("commit")
58
+	container.logEvent("commit")
59 59
 	return img, nil
60 60
 }
... ...
@@ -18,8 +18,8 @@ type CommonConfig struct {
18 18
 	Bridge         bridgeConfig // Bridge holds bridge network specific configuration.
19 19
 	Context        map[string][]string
20 20
 	DisableBridge  bool
21
-	Dns            []string
22
-	DnsSearch      []string
21
+	DNS            []string
22
+	DNSSearch      []string
23 23
 	ExecDriver     string
24 24
 	ExecOptions    []string
25 25
 	ExecRoot       string
... ...
@@ -50,8 +50,8 @@ func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string)
50 50
 	cmd.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, defaultExec, usageFn("Exec driver to use"))
51 51
 	cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU"))
52 52
 	// FIXME: why the inconsistency between "hosts" and "sockets"?
53
-	cmd.Var(opts.NewListOptsRef(&config.Dns, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use"))
54
-	cmd.Var(opts.NewListOptsRef(&config.DnsSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use"))
53
+	cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use"))
54
+	cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use"))
55 55
 	cmd.Var(opts.NewListOptsRef(&config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon"))
56 56
 	cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs"))
57 57
 	cmd.Var(opts.NewMapOpts(config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options"))
... ...
@@ -34,13 +34,12 @@ import (
34 34
 )
35 35
 
36 36
 var (
37
-	ErrNotATTY                 = errors.New("The PTY is not a file")
38
-	ErrNoTTY                   = errors.New("No PTY found")
39
-	ErrContainerStart          = errors.New("The container failed to start. Unknown error")
40
-	ErrContainerStartTimeout   = errors.New("The container failed to start due to timed out.")
41
-	ErrContainerRootfsReadonly = errors.New("container rootfs is marked read-only")
37
+	// ErrRootFSReadOnly is returned when a container
38
+	// rootfs is marked readonly.
39
+	ErrRootFSReadOnly = errors.New("container rootfs is marked read-only")
42 40
 )
43 41
 
42
+// ErrContainerNotRunning holds the id of the container that is not running.
44 43
 type ErrContainerNotRunning struct {
45 44
 	id string
46 45
 }
... ...
@@ -49,48 +48,49 @@ func (e ErrContainerNotRunning) Error() string {
49 49
 	return fmt.Sprintf("Container %s is not running", e.id)
50 50
 }
51 51
 
52
-type StreamConfig struct {
52
+type streamConfig struct {
53 53
 	stdout    *broadcastwriter.BroadcastWriter
54 54
 	stderr    *broadcastwriter.BroadcastWriter
55 55
 	stdin     io.ReadCloser
56 56
 	stdinPipe io.WriteCloser
57 57
 }
58 58
 
59
-// CommonContainer holds the settings for a container which are applicable
60
-// across all platforms supported by the daemon.
59
+// CommonContainer holds the fields for a container which are
60
+// applicable across all platforms supported by the daemon.
61 61
 type CommonContainer struct {
62
-	StreamConfig
63
-
64
-	*State `json:"State"` // Needed for remote api version <= 1.11
65
-	root   string         // Path to the "home" of the container, including metadata.
66
-	basefs string         // Path to the graphdriver mountpoint
67
-
68
-	ID                       string
69
-	Created                  time.Time
70
-	Path                     string
71
-	Args                     []string
72
-	Config                   *runconfig.Config
73
-	ImageID                  string `json:"Image"`
74
-	NetworkSettings          *network.Settings
75
-	LogPath                  string
76
-	Name                     string
77
-	Driver                   string
78
-	ExecDriver               string
79
-	MountLabel, ProcessLabel string
80
-	RestartCount             int
81
-	HasBeenStartedBefore     bool
82
-	HasBeenManuallyStopped   bool // used for unless-stopped restart policy
83
-	hostConfig               *runconfig.HostConfig
84
-	command                  *execdriver.Command
85
-	monitor                  *containerMonitor
86
-	execCommands             *execStore
87
-	daemon                   *Daemon
62
+	streamConfig
63
+	// embed for Container to support states directly.
64
+	*State          `json:"State"` // Needed for remote api version <= 1.11
65
+	root            string         // Path to the "home" of the container, including metadata.
66
+	basefs          string         // Path to the graphdriver mountpoint
67
+	ID              string
68
+	Created         time.Time
69
+	Path            string
70
+	Args            []string
71
+	Config          *runconfig.Config
72
+	ImageID         string `json:"Image"`
73
+	NetworkSettings *network.Settings
74
+	LogPath         string
75
+	Name            string
76
+	Driver          string
77
+	ExecDriver      string
78
+	// MountLabel contains the options for the 'mount' command
79
+	MountLabel             string
80
+	ProcessLabel           string
81
+	RestartCount           int
82
+	HasBeenStartedBefore   bool
83
+	HasBeenManuallyStopped bool // used for unless-stopped restart policy
84
+	hostConfig             *runconfig.HostConfig
85
+	command                *execdriver.Command
86
+	monitor                *containerMonitor
87
+	execCommands           *execStore
88
+	daemon                 *Daemon
88 89
 	// logDriver for closing
89 90
 	logDriver logger.Logger
90 91
 	logCopier *logger.Copier
91 92
 }
92 93
 
93
-func (container *Container) FromDisk() error {
94
+func (container *Container) fromDisk() error {
94 95
 	pth, err := container.jsonPath()
95 96
 	if err != nil {
96 97
 		return err
... ...
@@ -131,10 +131,10 @@ func (container *Container) toDisk() error {
131 131
 		return err
132 132
 	}
133 133
 
134
-	return container.WriteHostConfig()
134
+	return container.writeHostConfig()
135 135
 }
136 136
 
137
-func (container *Container) ToDisk() error {
137
+func (container *Container) toDiskLocking() error {
138 138
 	container.Lock()
139 139
 	err := container.toDisk()
140 140
 	container.Unlock()
... ...
@@ -165,7 +165,7 @@ func (container *Container) readHostConfig() error {
165 165
 	return json.NewDecoder(f).Decode(&container.hostConfig)
166 166
 }
167 167
 
168
-func (container *Container) WriteHostConfig() error {
168
+func (container *Container) writeHostConfig() error {
169 169
 	data, err := json.Marshal(container.hostConfig)
170 170
 	if err != nil {
171 171
 		return err
... ...
@@ -179,7 +179,7 @@ func (container *Container) WriteHostConfig() error {
179 179
 	return ioutil.WriteFile(pth, data, 0666)
180 180
 }
181 181
 
182
-func (container *Container) LogEvent(action string) {
182
+func (container *Container) logEvent(action string) {
183 183
 	d := container.daemon
184 184
 	d.EventsService.Log(
185 185
 		action,
... ...
@@ -188,7 +188,7 @@ func (container *Container) LogEvent(action string) {
188 188
 	)
189 189
 }
190 190
 
191
-// Evaluates `path` in the scope of the container's basefs, with proper path
191
+// GetResourcePath evaluates `path` in the scope of the container's basefs, with proper path
192 192
 // sanitisation. Symlinks are all scoped to the basefs of the container, as
193 193
 // though the container's basefs was `/`.
194 194
 //
... ...
@@ -221,18 +221,18 @@ func (container *Container) GetResourcePath(path string) (string, error) {
221 221
 //       if no component of the returned path changes (such as a component
222 222
 //       symlinking to a different path) between using this method and using the
223 223
 //       path. See symlink.FollowSymlinkInScope for more details.
224
-func (container *Container) GetRootResourcePath(path string) (string, error) {
224
+func (container *Container) getRootResourcePath(path string) (string, error) {
225 225
 	// IMPORTANT - These are paths on the OS where the daemon is running, hence
226 226
 	// any filepath operations must be done in an OS agnostic way.
227 227
 	cleanPath := filepath.Join(string(os.PathSeparator), path)
228 228
 	return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root)
229 229
 }
230 230
 
231
-func (container *Container) ExportRw() (archive.Archive, error) {
231
+func (container *Container) exportContainerRw() (archive.Archive, error) {
232 232
 	if container.daemon == nil {
233 233
 		return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
234 234
 	}
235
-	archive, err := container.daemon.Diff(container)
235
+	archive, err := container.daemon.diff(container)
236 236
 	if err != nil {
237 237
 		return nil, err
238 238
 	}
... ...
@@ -243,6 +243,10 @@ func (container *Container) ExportRw() (archive.Archive, error) {
243 243
 		nil
244 244
 }
245 245
 
246
+// Start prepares the container to run by setting up everything the
247
+// container needs, such as storage and networking, as well as links
248
+// between containers. The container is left waiting for a signal to
249
+// begin running.
246 250
 func (container *Container) Start() (err error) {
247 251
 	container.Lock()
248 252
 	defer container.Unlock()
... ...
@@ -266,7 +270,7 @@ func (container *Container) Start() (err error) {
266 266
 			}
267 267
 			container.toDisk()
268 268
 			container.cleanup()
269
-			container.LogEvent("die")
269
+			container.logEvent("die")
270 270
 		}
271 271
 	}()
272 272
 
... ...
@@ -302,7 +306,7 @@ func (container *Container) Start() (err error) {
302 302
 	return container.waitForStart()
303 303
 }
304 304
 
305
-func (container *Container) Run() error {
305
+func (container *Container) run() error {
306 306
 	if err := container.Start(); err != nil {
307 307
 		return err
308 308
 	}
... ...
@@ -311,7 +315,7 @@ func (container *Container) Run() error {
311 311
 	return nil
312 312
 }
313 313
 
314
-func (container *Container) Output() (output []byte, err error) {
314
+func (container *Container) output() (output []byte, err error) {
315 315
 	pipe := container.StdoutPipe()
316 316
 	defer pipe.Close()
317 317
 	if err := container.Start(); err != nil {
... ...
@@ -322,7 +326,7 @@ func (container *Container) Output() (output []byte, err error) {
322 322
 	return output, err
323 323
 }
324 324
 
325
-// StreamConfig.StdinPipe returns a WriteCloser which can be used to feed data
325
+// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data
326 326
 // to the standard input of the container's active process.
327 327
 // Container.StdoutPipe and Container.StderrPipe each return a ReadCloser
328 328
 // which can be used to retrieve the standard output (and error) generated
... ...
@@ -330,17 +334,17 @@ func (container *Container) Output() (output []byte, err error) {
330 330
 // copied and delivered to all StdoutPipe and StderrPipe consumers, using
331 331
 // a kind of "broadcaster".
332 332
 
333
-func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser {
333
+func (streamConfig *streamConfig) StdinPipe() io.WriteCloser {
334 334
 	return streamConfig.stdinPipe
335 335
 }
336 336
 
337
-func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser {
337
+func (streamConfig *streamConfig) StdoutPipe() io.ReadCloser {
338 338
 	reader, writer := io.Pipe()
339 339
 	streamConfig.stdout.AddWriter(writer)
340 340
 	return ioutils.NewBufReader(reader)
341 341
 }
342 342
 
343
-func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser {
343
+func (streamConfig *streamConfig) StderrPipe() io.ReadCloser {
344 344
 	reader, writer := io.Pipe()
345 345
 	streamConfig.stderr.AddWriter(writer)
346 346
 	return ioutils.NewBufReader(reader)
... ...
@@ -353,7 +357,7 @@ func (container *Container) isNetworkAllocated() bool {
353 353
 // cleanup releases any network resources allocated to the container along with any rules
354 354
 // around how containers are linked together.  It also unmounts the container's root filesystem.
355 355
 func (container *Container) cleanup() {
356
-	container.ReleaseNetwork()
356
+	container.releaseNetwork()
357 357
 
358 358
 	if err := container.Unmount(); err != nil {
359 359
 		logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err)
... ...
@@ -363,10 +367,15 @@ func (container *Container) cleanup() {
363 363
 		container.daemon.unregisterExecCommand(eConfig)
364 364
 	}
365 365
 
366
-	container.UnmountVolumes(false)
366
+	container.unmountVolumes(false)
367 367
 }
368 368
 
369
-func (container *Container) KillSig(sig int) error {
369
+// killSig sends the container the given signal. This wrapper for the
370
+// host specific kill command prepares the container before attempting
371
+// to send the signal. An error is returned if the container is paused
372
+// or not running, or if there is a problem returned from the
373
+// underlying kill command.
374
+func (container *Container) killSig(sig int) error {
370 375
 	logrus.Debugf("Sending %d to %s", sig, container.ID)
371 376
 	container.Lock()
372 377
 	defer container.Unlock()
... ...
@@ -391,24 +400,24 @@ func (container *Container) KillSig(sig int) error {
391 391
 		return nil
392 392
 	}
393 393
 
394
-	if err := container.daemon.Kill(container, sig); err != nil {
394
+	if err := container.daemon.kill(container, sig); err != nil {
395 395
 		return err
396 396
 	}
397
-	container.LogEvent("kill")
397
+	container.logEvent("kill")
398 398
 	return nil
399 399
 }
400 400
 
401
-// Wrapper aroung KillSig() suppressing "no such process" error.
401
+// Wrapper aroung killSig() suppressing "no such process" error.
402 402
 func (container *Container) killPossiblyDeadProcess(sig int) error {
403
-	err := container.KillSig(sig)
403
+	err := container.killSig(sig)
404 404
 	if err == syscall.ESRCH {
405
-		logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig)
405
+		logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.getPID(), sig)
406 406
 		return nil
407 407
 	}
408 408
 	return err
409 409
 }
410 410
 
411
-func (container *Container) Pause() error {
411
+func (container *Container) pause() error {
412 412
 	container.Lock()
413 413
 	defer container.Unlock()
414 414
 
... ...
@@ -426,11 +435,11 @@ func (container *Container) Pause() error {
426 426
 		return err
427 427
 	}
428 428
 	container.Paused = true
429
-	container.LogEvent("pause")
429
+	container.logEvent("pause")
430 430
 	return nil
431 431
 }
432 432
 
433
-func (container *Container) Unpause() error {
433
+func (container *Container) unpause() error {
434 434
 	container.Lock()
435 435
 	defer container.Unlock()
436 436
 
... ...
@@ -448,17 +457,18 @@ func (container *Container) Unpause() error {
448 448
 		return err
449 449
 	}
450 450
 	container.Paused = false
451
-	container.LogEvent("unpause")
451
+	container.logEvent("unpause")
452 452
 	return nil
453 453
 }
454 454
 
455
+// Kill forcefully terminates a container.
455 456
 func (container *Container) Kill() error {
456 457
 	if !container.IsRunning() {
457 458
 		return ErrContainerNotRunning{container.ID}
458 459
 	}
459 460
 
460 461
 	// 1. Send SIGKILL
461
-	if err := container.killPossiblyDeadProcess(9); err != nil {
462
+	if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil {
462 463
 		// While normally we might "return err" here we're not going to
463 464
 		// because if we can't stop the container by this point then
464 465
 		// its probably because its already stopped. Meaning, between
... ...
@@ -487,15 +497,20 @@ func (container *Container) Kill() error {
487 487
 	return nil
488 488
 }
489 489
 
490
+// Stop halts a container by sending SIGTERM, waiting for the given
491
+// duration in seconds, and then calling SIGKILL and waiting for the
492
+// process to exit. If a negative duration is given, Stop will wait
493
+// for SIGTERM forever. If the container is not running Stop returns
494
+// immediately.
490 495
 func (container *Container) Stop(seconds int) error {
491 496
 	if !container.IsRunning() {
492 497
 		return nil
493 498
 	}
494 499
 
495 500
 	// 1. Send a SIGTERM
496
-	if err := container.killPossiblyDeadProcess(15); err != nil {
501
+	if err := container.killPossiblyDeadProcess(int(syscall.SIGTERM)); err != nil {
497 502
 		logrus.Infof("Failed to send SIGTERM to the process, force killing")
498
-		if err := container.killPossiblyDeadProcess(9); err != nil {
503
+		if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil {
499 504
 			return err
500 505
 		}
501 506
 	}
... ...
@@ -510,10 +525,14 @@ func (container *Container) Stop(seconds int) error {
510 510
 		}
511 511
 	}
512 512
 
513
-	container.LogEvent("stop")
513
+	container.logEvent("stop")
514 514
 	return nil
515 515
 }
516 516
 
517
+// Restart attempts to gracefully stop and then start the
518
+// container. When stopping, wait for the given duration in seconds to
519
+// gracefully stop, before forcefully terminating the container. If
520
+// given a negative duration, wait forever for a graceful stop.
517 521
 func (container *Container) Restart(seconds int) error {
518 522
 	// Avoid unnecessarily unmounting and then directly mounting
519 523
 	// the container when the container stops and then starts
... ...
@@ -530,10 +549,12 @@ func (container *Container) Restart(seconds int) error {
530 530
 		return err
531 531
 	}
532 532
 
533
-	container.LogEvent("restart")
533
+	container.logEvent("restart")
534 534
 	return nil
535 535
 }
536 536
 
537
+// Resize changes the TTY of the process running inside the container
538
+// to the given height and width. The container must be running.
537 539
 func (container *Container) Resize(h, w int) error {
538 540
 	if !container.IsRunning() {
539 541
 		return ErrContainerNotRunning{container.ID}
... ...
@@ -541,11 +562,11 @@ func (container *Container) Resize(h, w int) error {
541 541
 	if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
542 542
 		return err
543 543
 	}
544
-	container.LogEvent("resize")
544
+	container.logEvent("resize")
545 545
 	return nil
546 546
 }
547 547
 
548
-func (container *Container) Export() (archive.Archive, error) {
548
+func (container *Container) export() (archive.Archive, error) {
549 549
 	if err := container.Mount(); err != nil {
550 550
 		return nil, err
551 551
 	}
... ...
@@ -560,46 +581,45 @@ func (container *Container) Export() (archive.Archive, error) {
560 560
 		container.Unmount()
561 561
 		return err
562 562
 	})
563
-	container.LogEvent("export")
563
+	container.logEvent("export")
564 564
 	return arch, err
565 565
 }
566 566
 
567
+// Mount sets container.basefs
567 568
 func (container *Container) Mount() error {
568 569
 	return container.daemon.Mount(container)
569 570
 }
570 571
 
571 572
 func (container *Container) changes() ([]archive.Change, error) {
572
-	return container.daemon.Changes(container)
573
-}
574
-
575
-func (container *Container) Changes() ([]archive.Change, error) {
576 573
 	container.Lock()
577 574
 	defer container.Unlock()
578
-	return container.changes()
575
+	return container.daemon.changes(container)
579 576
 }
580 577
 
581
-func (container *Container) GetImage() (*image.Image, error) {
578
+func (container *Container) getImage() (*image.Image, error) {
582 579
 	if container.daemon == nil {
583 580
 		return nil, fmt.Errorf("Can't get image of unregistered container")
584 581
 	}
585 582
 	return container.daemon.graph.Get(container.ImageID)
586 583
 }
587 584
 
585
+// Unmount asks the daemon to release the layered filesystems that are
586
+// mounted by the container.
588 587
 func (container *Container) Unmount() error {
589
-	return container.daemon.Unmount(container)
588
+	return container.daemon.unmount(container)
590 589
 }
591 590
 
592 591
 func (container *Container) hostConfigPath() (string, error) {
593
-	return container.GetRootResourcePath("hostconfig.json")
592
+	return container.getRootResourcePath("hostconfig.json")
594 593
 }
595 594
 
596 595
 func (container *Container) jsonPath() (string, error) {
597
-	return container.GetRootResourcePath("config.json")
596
+	return container.getRootResourcePath("config.json")
598 597
 }
599 598
 
600 599
 // This method must be exported to be used from the lxc template
601 600
 // This directory is only usable when the container is running
602
-func (container *Container) RootfsPath() string {
601
+func (container *Container) rootfsPath() string {
603 602
 	return container.basefs
604 603
 }
605 604
 
... ...
@@ -610,7 +630,7 @@ func validateID(id string) error {
610 610
 	return nil
611 611
 }
612 612
 
613
-func (container *Container) Copy(resource string) (rc io.ReadCloser, err error) {
613
+func (container *Container) copy(resource string) (rc io.ReadCloser, err error) {
614 614
 	container.Lock()
615 615
 
616 616
 	defer func() {
... ...
@@ -629,7 +649,7 @@ func (container *Container) Copy(resource string) (rc io.ReadCloser, err error)
629 629
 	defer func() {
630 630
 		if err != nil {
631 631
 			// unmount any volumes
632
-			container.UnmountVolumes(true)
632
+			container.unmountVolumes(true)
633 633
 			// unmount the container's rootfs
634 634
 			container.Unmount()
635 635
 		}
... ...
@@ -666,17 +686,17 @@ func (container *Container) Copy(resource string) (rc io.ReadCloser, err error)
666 666
 
667 667
 	reader := ioutils.NewReadCloserWrapper(archive, func() error {
668 668
 		err := archive.Close()
669
-		container.UnmountVolumes(true)
669
+		container.unmountVolumes(true)
670 670
 		container.Unmount()
671 671
 		container.Unlock()
672 672
 		return err
673 673
 	})
674
-	container.LogEvent("copy")
674
+	container.logEvent("copy")
675 675
 	return reader, nil
676 676
 }
677 677
 
678 678
 // Returns true if the container exposes a certain port
679
-func (container *Container) Exposes(p nat.Port) bool {
679
+func (container *Container) exposes(p nat.Port) bool {
680 680
 	_, exists := container.Config.ExposedPorts[p]
681 681
 	return exists
682 682
 }
... ...
@@ -718,7 +738,7 @@ func (container *Container) getLogger() (logger.Logger, error) {
718 718
 
719 719
 	// Set logging file for "json-logger"
720 720
 	if cfg.Type == jsonfilelog.Name {
721
-		ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
721
+		ctx.LogPath, err = container.getRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
722 722
 		if err != nil {
723 723
 			return nil, err
724 724
 		}
... ...
@@ -764,7 +784,7 @@ func (container *Container) waitForStart() error {
764 764
 	return nil
765 765
 }
766 766
 
767
-func (container *Container) GetProcessLabel() string {
767
+func (container *Container) getProcessLabel() string {
768 768
 	// even if we have a process label return "" if we are running
769 769
 	// in privileged mode
770 770
 	if container.hostConfig.Privileged {
... ...
@@ -773,31 +793,22 @@ func (container *Container) GetProcessLabel() string {
773 773
 	return container.ProcessLabel
774 774
 }
775 775
 
776
-func (container *Container) GetMountLabel() string {
776
+func (container *Container) getMountLabel() string {
777 777
 	if container.hostConfig.Privileged {
778 778
 		return ""
779 779
 	}
780 780
 	return container.MountLabel
781 781
 }
782 782
 
783
-func (container *Container) Stats() (*execdriver.ResourceStats, error) {
784
-	return container.daemon.Stats(container)
785
-}
786
-
787
-func (c *Container) LogDriverType() string {
788
-	c.Lock()
789
-	defer c.Unlock()
790
-	if c.hostConfig.LogConfig.Type == "" {
791
-		return c.daemon.defaultLogConfig.Type
792
-	}
793
-	return c.hostConfig.LogConfig.Type
783
+func (container *Container) stats() (*execdriver.ResourceStats, error) {
784
+	return container.daemon.stats(container)
794 785
 }
795 786
 
796
-func (container *Container) GetExecIDs() []string {
787
+func (container *Container) getExecIDs() []string {
797 788
 	return container.execCommands.List()
798 789
 }
799 790
 
800
-func (container *Container) Exec(execConfig *execConfig) error {
791
+func (container *Container) exec(ExecConfig *ExecConfig) error {
801 792
 	container.Lock()
802 793
 	defer container.Unlock()
803 794
 
... ...
@@ -810,16 +821,16 @@ func (container *Container) Exec(execConfig *execConfig) error {
810 810
 				c.Close()
811 811
 			}
812 812
 		}
813
-		close(execConfig.waitStart)
813
+		close(ExecConfig.waitStart)
814 814
 	}
815 815
 
816 816
 	// We use a callback here instead of a goroutine and an chan for
817 817
 	// synchronization purposes
818
-	cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) })
818
+	cErr := promise.Go(func() error { return container.monitorExec(ExecConfig, callback) })
819 819
 
820 820
 	// Exec should not return until the process is actually running
821 821
 	select {
822
-	case <-execConfig.waitStart:
822
+	case <-ExecConfig.waitStart:
823 823
 	case err := <-cErr:
824 824
 		return err
825 825
 	}
... ...
@@ -827,46 +838,48 @@ func (container *Container) Exec(execConfig *execConfig) error {
827 827
 	return nil
828 828
 }
829 829
 
830
-func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error {
830
+func (container *Container) monitorExec(ExecConfig *ExecConfig, callback execdriver.StartCallback) error {
831 831
 	var (
832 832
 		err      error
833 833
 		exitCode int
834 834
 	)
835
-	pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin)
836
-	exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback)
835
+	pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin)
836
+	exitCode, err = container.daemon.Exec(container, ExecConfig, pipes, callback)
837 837
 	if err != nil {
838 838
 		logrus.Errorf("Error running command in existing container %s: %s", container.ID, err)
839 839
 	}
840 840
 	logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode)
841
-	if execConfig.OpenStdin {
842
-		if err := execConfig.StreamConfig.stdin.Close(); err != nil {
841
+	if ExecConfig.OpenStdin {
842
+		if err := ExecConfig.streamConfig.stdin.Close(); err != nil {
843 843
 			logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err)
844 844
 		}
845 845
 	}
846
-	if err := execConfig.StreamConfig.stdout.Clean(); err != nil {
846
+	if err := ExecConfig.streamConfig.stdout.Clean(); err != nil {
847 847
 		logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err)
848 848
 	}
849
-	if err := execConfig.StreamConfig.stderr.Clean(); err != nil {
849
+	if err := ExecConfig.streamConfig.stderr.Clean(); err != nil {
850 850
 		logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err)
851 851
 	}
852
-	if execConfig.ProcessConfig.Terminal != nil {
853
-		if err := execConfig.ProcessConfig.Terminal.Close(); err != nil {
852
+	if ExecConfig.ProcessConfig.Terminal != nil {
853
+		if err := ExecConfig.ProcessConfig.Terminal.Close(); err != nil {
854 854
 			logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err)
855 855
 		}
856 856
 	}
857 857
 	// remove the exec command from the container's store only and not the
858 858
 	// daemon's store so that the exec command can be inspected.
859
-	container.execCommands.Delete(execConfig.ID)
859
+	container.execCommands.Delete(ExecConfig.ID)
860 860
 	return err
861 861
 }
862 862
 
863
-func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
864
-	return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr)
863
+// Attach connects to the container's TTY, delegating to standard
864
+// streams or websockets depending on the configuration.
865
+func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
866
+	return attach(&container.streamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr)
865 867
 }
866 868
 
867
-func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
869
+func (container *Container) attachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
868 870
 	if logs {
869
-		logDriver, err := c.getLogger()
871
+		logDriver, err := container.getLogger()
870 872
 		if err != nil {
871 873
 			return err
872 874
 		}
... ...
@@ -896,7 +909,7 @@ func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer
896 896
 		}
897 897
 	}
898 898
 
899
-	c.LogEvent("attach")
899
+	container.logEvent("attach")
900 900
 
901 901
 	//stream
902 902
 	if stream {
... ...
@@ -910,17 +923,17 @@ func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer
910 910
 			}()
911 911
 			stdinPipe = r
912 912
 		}
913
-		<-c.Attach(stdinPipe, stdout, stderr)
913
+		<-container.Attach(stdinPipe, stdout, stderr)
914 914
 		// If we are in stdinonce mode, wait for the process to end
915 915
 		// otherwise, simply return
916
-		if c.Config.StdinOnce && !c.Config.Tty {
917
-			c.WaitStop(-1 * time.Second)
916
+		if container.Config.StdinOnce && !container.Config.Tty {
917
+			container.WaitStop(-1 * time.Second)
918 918
 		}
919 919
 	}
920 920
 	return nil
921 921
 }
922 922
 
923
-func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
923
+func attach(streamConfig *streamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
924 924
 	var (
925 925
 		cStdout, cStderr io.ReadCloser
926 926
 		cStdin           io.WriteCloser
... ...
@@ -18,7 +18,9 @@ import (
18 18
 	"github.com/docker/docker/daemon/execdriver"
19 19
 	"github.com/docker/docker/daemon/links"
20 20
 	"github.com/docker/docker/daemon/network"
21
+	"github.com/docker/docker/pkg/archive"
21 22
 	"github.com/docker/docker/pkg/directory"
23
+	"github.com/docker/docker/pkg/ioutils"
22 24
 	"github.com/docker/docker/pkg/nat"
23 25
 	"github.com/docker/docker/pkg/stringid"
24 26
 	"github.com/docker/docker/pkg/system"
... ...
@@ -35,8 +37,13 @@ import (
35 35
 	"github.com/opencontainers/runc/libcontainer/label"
36 36
 )
37 37
 
38
+// DefaultPathEnv is unix style list of directories to search for
39
+// executables. Each directory is separated from the next by a colon
40
+// ':' character .
38 41
 const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
39 42
 
43
+// Container holds the fields specific to unixen implementations. See
44
+// CommonContainer for standard fields common to all containers.
40 45
 type Container struct {
41 46
 	CommonContainer
42 47
 
... ...
@@ -47,15 +54,15 @@ type Container struct {
47 47
 	HostsPath       string
48 48
 	MountPoints     map[string]*mountPoint
49 49
 	ResolvConfPath  string
50
-	UpdateDns       bool
51
-	Volumes         map[string]string // Deprecated since 1.7, kept for backwards compatibility
52
-	VolumesRW       map[string]bool   // Deprecated since 1.7, kept for backwards compatibility
50
+
51
+	Volumes   map[string]string // Deprecated since 1.7, kept for backwards compatibility
52
+	VolumesRW map[string]bool   // Deprecated since 1.7, kept for backwards compatibility
53 53
 }
54 54
 
55 55
 func killProcessDirectly(container *Container) error {
56 56
 	if _, err := container.WaitStop(10 * time.Second); err != nil {
57 57
 		// Ensure that we don't kill ourselves
58
-		if pid := container.GetPid(); pid != 0 {
58
+		if pid := container.getPID(); pid != 0 {
59 59
 			logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
60 60
 			if err := syscall.Kill(pid, 9); err != nil {
61 61
 				if err != syscall.ESRCH {
... ...
@@ -73,7 +80,7 @@ func (container *Container) setupLinkedContainers() ([]string, error) {
73 73
 		env    []string
74 74
 		daemon = container.daemon
75 75
 	)
76
-	children, err := daemon.Children(container.Name)
76
+	children, err := daemon.children(container.Name)
77 77
 	if err != nil {
78 78
 		return nil, err
79 79
 	}
... ...
@@ -231,7 +238,7 @@ func populateCommand(c *Container, env []string) error {
231 231
 	for _, ul := range ulimits {
232 232
 		ulIdx[ul.Name] = ul
233 233
 	}
234
-	for name, ul := range c.daemon.config.Ulimits {
234
+	for name, ul := range c.daemon.configStore.Ulimits {
235 235
 		if _, exists := ulIdx[name]; !exists {
236 236
 			ulimits = append(ulimits, ul)
237 237
 		}
... ...
@@ -277,7 +284,7 @@ func populateCommand(c *Container, env []string) error {
277 277
 
278 278
 	c.command = &execdriver.Command{
279 279
 		ID:                 c.ID,
280
-		Rootfs:             c.RootfsPath(),
280
+		Rootfs:             c.rootfsPath(),
281 281
 		ReadonlyRootfs:     c.hostConfig.ReadonlyRootfs,
282 282
 		InitPath:           "/.dockerinit",
283 283
 		WorkingDir:         c.Config.WorkingDir,
... ...
@@ -292,8 +299,8 @@ func populateCommand(c *Container, env []string) error {
292 292
 		CapDrop:            c.hostConfig.CapDrop.Slice(),
293 293
 		GroupAdd:           c.hostConfig.GroupAdd,
294 294
 		ProcessConfig:      processConfig,
295
-		ProcessLabel:       c.GetProcessLabel(),
296
-		MountLabel:         c.GetMountLabel(),
295
+		ProcessLabel:       c.getProcessLabel(),
296
+		MountLabel:         c.getMountLabel(),
297 297
 		LxcConfig:          lxcConfig,
298 298
 		AppArmorProfile:    c.AppArmorProfile,
299 299
 		CgroupParent:       c.hostConfig.CgroupParent,
... ...
@@ -321,8 +328,8 @@ func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Devi
321 321
 	return append(devs, userDevices...)
322 322
 }
323 323
 
324
-// GetSize, return real size, virtual size
325
-func (container *Container) GetSize() (int64, int64) {
324
+// GetSize returns the real size & virtual size of the container.
325
+func (container *Container) getSize() (int64, int64) {
326 326
 	var (
327 327
 		sizeRw, sizeRootfs int64
328 328
 		err                error
... ...
@@ -373,7 +380,7 @@ func (container *Container) trySetNetworkMount(destination string, path string)
373 373
 }
374 374
 
375 375
 func (container *Container) buildHostnameFile() error {
376
-	hostnamePath, err := container.GetRootResourcePath("hostname")
376
+	hostnamePath, err := container.getRootResourcePath("hostname")
377 377
 	if err != nil {
378 378
 		return err
379 379
 	}
... ...
@@ -400,13 +407,13 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
400 400
 		joinOptions = append(joinOptions, libnetwork.JoinOptionUseDefaultSandbox())
401 401
 	}
402 402
 
403
-	container.HostsPath, err = container.GetRootResourcePath("hosts")
403
+	container.HostsPath, err = container.getRootResourcePath("hosts")
404 404
 	if err != nil {
405 405
 		return nil, err
406 406
 	}
407 407
 	joinOptions = append(joinOptions, libnetwork.JoinOptionHostsPath(container.HostsPath))
408 408
 
409
-	container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf")
409
+	container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf")
410 410
 	if err != nil {
411 411
 		return nil, err
412 412
 	}
... ...
@@ -414,8 +421,8 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
414 414
 
415 415
 	if len(container.hostConfig.DNS) > 0 {
416 416
 		dns = container.hostConfig.DNS
417
-	} else if len(container.daemon.config.Dns) > 0 {
418
-		dns = container.daemon.config.Dns
417
+	} else if len(container.daemon.configStore.DNS) > 0 {
418
+		dns = container.daemon.configStore.DNS
419 419
 	}
420 420
 
421 421
 	for _, d := range dns {
... ...
@@ -424,8 +431,8 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
424 424
 
425 425
 	if len(container.hostConfig.DNSSearch) > 0 {
426 426
 		dnsSearch = container.hostConfig.DNSSearch
427
-	} else if len(container.daemon.config.DnsSearch) > 0 {
428
-		dnsSearch = container.daemon.config.DnsSearch
427
+	} else if len(container.daemon.configStore.DNSSearch) > 0 {
428
+		dnsSearch = container.daemon.configStore.DNSSearch
429 429
 	}
430 430
 
431 431
 	for _, ds := range dnsSearch {
... ...
@@ -445,7 +452,7 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
445 445
 
446 446
 	var childEndpoints, parentEndpoints []string
447 447
 
448
-	children, err := container.daemon.Children(container.Name)
448
+	children, err := container.daemon.children(container.Name)
449 449
 	if err != nil {
450 450
 		return nil, err
451 451
 	}
... ...
@@ -470,7 +477,7 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
470 470
 		joinOptions = append(joinOptions, libnetwork.JoinOptionExtraHost(parts[0], parts[1]))
471 471
 	}
472 472
 
473
-	refs := container.daemon.ContainerGraph().RefPaths(container.ID)
473
+	refs := container.daemon.containerGraph().RefPaths(container.ID)
474 474
 	for _, ref := range refs {
475 475
 		if ref.ParentID == "0" {
476 476
 			continue
... ...
@@ -481,7 +488,7 @@ func (container *Container) buildJoinOptions() ([]libnetwork.EndpointOption, err
481 481
 			logrus.Error(err)
482 482
 		}
483 483
 
484
-		if c != nil && !container.daemon.config.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() {
484
+		if c != nil && !container.daemon.configStore.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() {
485 485
 			logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, container.NetworkSettings.IPAddress)
486 486
 			joinOptions = append(joinOptions, libnetwork.JoinOptionParentUpdate(c.NetworkSettings.EndpointID, ref.Name, container.NetworkSettings.IPAddress))
487 487
 			if c.NetworkSettings.EndpointID != "" {
... ...
@@ -642,7 +649,7 @@ func (container *Container) updateNetworkSettings(n libnetwork.Network, ep libne
642 642
 	}
643 643
 
644 644
 	if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
645
-		networkSettings.Bridge = container.daemon.config.Bridge.Iface
645
+		networkSettings.Bridge = container.daemon.configStore.Bridge.Iface
646 646
 	}
647 647
 
648 648
 	container.NetworkSettings = networkSettings
... ...
@@ -651,7 +658,7 @@ func (container *Container) updateNetworkSettings(n libnetwork.Network, ep libne
651 651
 
652 652
 // UpdateNetwork is used to update the container's network (e.g. when linked containers
653 653
 // get removed/unlinked).
654
-func (container *Container) UpdateNetwork() error {
654
+func (container *Container) updateNetwork() error {
655 655
 	n, err := container.daemon.netController.NetworkByID(container.NetworkSettings.NetworkID)
656 656
 	if err != nil {
657 657
 		return fmt.Errorf("error locating network id %s: %v", container.NetworkSettings.NetworkID, err)
... ...
@@ -803,7 +810,7 @@ func (container *Container) secondaryNetworkRequired(primaryNetworkType string)
803 803
 		return false
804 804
 	}
805 805
 
806
-	if container.daemon.config.DisableBridge {
806
+	if container.daemon.configStore.DisableBridge {
807 807
 		return false
808 808
 	}
809 809
 
... ...
@@ -816,7 +823,7 @@ func (container *Container) secondaryNetworkRequired(primaryNetworkType string)
816 816
 	return false
817 817
 }
818 818
 
819
-func (container *Container) AllocateNetwork() error {
819
+func (container *Container) allocateNetwork() error {
820 820
 	mode := container.hostConfig.NetworkMode
821 821
 	controller := container.daemon.netController
822 822
 	if container.Config.NetworkDisabled || mode.IsContainer() {
... ...
@@ -837,7 +844,7 @@ func (container *Container) AllocateNetwork() error {
837 837
 		return fmt.Errorf("conflicting options: publishing a service and network mode")
838 838
 	}
839 839
 
840
-	if runconfig.NetworkMode(networkDriver).IsBridge() && container.daemon.config.DisableBridge {
840
+	if runconfig.NetworkMode(networkDriver).IsBridge() && container.daemon.configStore.DisableBridge {
841 841
 		container.Config.NetworkDisabled = true
842 842
 		return nil
843 843
 	}
... ...
@@ -861,7 +868,7 @@ func (container *Container) AllocateNetwork() error {
861 861
 		return err
862 862
 	}
863 863
 
864
-	return container.WriteHostConfig()
864
+	return container.writeHostConfig()
865 865
 }
866 866
 
867 867
 func (container *Container) configureNetwork(networkName, service, networkDriver string, canCreateNetwork bool) error {
... ...
@@ -945,13 +952,28 @@ func (container *Container) initializeNetworking() error {
945 945
 
946 946
 	}
947 947
 
948
-	if err := container.AllocateNetwork(); err != nil {
948
+	if err := container.allocateNetwork(); err != nil {
949 949
 		return err
950 950
 	}
951 951
 
952 952
 	return container.buildHostnameFile()
953 953
 }
954 954
 
955
+func (container *Container) exportRw() (archive.Archive, error) {
956
+	if container.daemon == nil {
957
+		return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID)
958
+	}
959
+	archive, err := container.daemon.diff(container)
960
+	if err != nil {
961
+		return nil, err
962
+	}
963
+	return ioutils.NewReadCloserWrapper(archive, func() error {
964
+			err := archive.Close()
965
+			return err
966
+		}),
967
+		nil
968
+}
969
+
955 970
 func (container *Container) getIpcContainer() (*Container, error) {
956 971
 	containerID := container.hostConfig.IpcMode.Container()
957 972
 	c, err := container.daemon.Get(containerID)
... ...
@@ -1013,7 +1035,7 @@ func (container *Container) getNetworkedContainer() (*Container, error) {
1013 1013
 	}
1014 1014
 }
1015 1015
 
1016
-func (container *Container) ReleaseNetwork() {
1016
+func (container *Container) releaseNetwork() {
1017 1017
 	if container.hostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled {
1018 1018
 		return
1019 1019
 	}
... ...
@@ -1060,7 +1082,7 @@ func (container *Container) ReleaseNetwork() {
1060 1060
 	}
1061 1061
 }
1062 1062
 
1063
-func (container *Container) UnmountVolumes(forceSyscall bool) error {
1063
+func (container *Container) unmountVolumes(forceSyscall bool) error {
1064 1064
 	var volumeMounts []mountPoint
1065 1065
 
1066 1066
 	for _, mntPoint := range container.MountPoints {
... ...
@@ -7,12 +7,15 @@ import (
7 7
 	"strings"
8 8
 
9 9
 	"github.com/docker/docker/daemon/execdriver"
10
+	"github.com/docker/docker/pkg/archive"
10 11
 )
11 12
 
12
-// This is deliberately empty on Windows as the default path will be set by
13
+// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
13 14
 // the container. Docker has no context of what the default path should be.
14 15
 const DefaultPathEnv = ""
15 16
 
17
+// Container holds fields specific to the Windows implementation. See
18
+// CommonContainer for standard fields common to all containers.
16 19
 type Container struct {
17 20
 	CommonContainer
18 21
 
... ...
@@ -23,14 +26,6 @@ func killProcessDirectly(container *Container) error {
23 23
 	return nil
24 24
 }
25 25
 
26
-func (container *Container) setupContainerDns() error {
27
-	return nil
28
-}
29
-
30
-func (container *Container) updateParentsHosts() error {
31
-	return nil
32
-}
33
-
34 26
 func (container *Container) setupLinkedContainers() ([]string, error) {
35 27
 	return nil, nil
36 28
 }
... ...
@@ -60,7 +55,7 @@ func populateCommand(c *Container, env []string) error {
60 60
 		if !c.Config.NetworkDisabled {
61 61
 			en.Interface = &execdriver.NetworkInterface{
62 62
 				MacAddress:   c.Config.MacAddress,
63
-				Bridge:       c.daemon.config.Bridge.VirtualSwitchName,
63
+				Bridge:       c.daemon.configStore.Bridge.VirtualSwitchName,
64 64
 				PortBindings: c.hostConfig.PortBindings,
65 65
 
66 66
 				// TODO Windows. Include IPAddress. There already is a
... ...
@@ -118,7 +113,7 @@ func populateCommand(c *Container, env []string) error {
118 118
 	// TODO Windows: Factor out remainder of unused fields.
119 119
 	c.command = &execdriver.Command{
120 120
 		ID:             c.ID,
121
-		Rootfs:         c.RootfsPath(),
121
+		Rootfs:         c.rootfsPath(),
122 122
 		ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
123 123
 		InitPath:       "/.dockerinit",
124 124
 		WorkingDir:     c.Config.WorkingDir,
... ...
@@ -128,8 +123,8 @@ func populateCommand(c *Container, env []string) error {
128 128
 		CapAdd:         c.hostConfig.CapAdd.Slice(),
129 129
 		CapDrop:        c.hostConfig.CapDrop.Slice(),
130 130
 		ProcessConfig:  processConfig,
131
-		ProcessLabel:   c.GetProcessLabel(),
132
-		MountLabel:     c.GetMountLabel(),
131
+		ProcessLabel:   c.getProcessLabel(),
132
+		MountLabel:     c.getMountLabel(),
133 133
 		FirstStart:     !c.HasBeenStartedBefore,
134 134
 		LayerFolder:    layerFolder,
135 135
 		LayerPaths:     layerPaths,
... ...
@@ -138,28 +133,33 @@ func populateCommand(c *Container, env []string) error {
138 138
 	return nil
139 139
 }
140 140
 
141
-// GetSize, return real size, virtual size
142
-func (container *Container) GetSize() (int64, int64) {
141
+// GetSize returns real size & virtual size
142
+func (container *Container) getSize() (int64, int64) {
143 143
 	// TODO Windows
144 144
 	return 0, 0
145 145
 }
146 146
 
147
-func (container *Container) AllocateNetwork() error {
147
+// allocateNetwork is a no-op on Windows.
148
+func (container *Container) allocateNetwork() error {
148 149
 	return nil
149 150
 }
150 151
 
151
-func (container *Container) UpdateNetwork() error {
152
-	return nil
152
+func (container *Container) exportRw() (archive.Archive, error) {
153
+	if container.IsRunning() {
154
+		return nil, fmt.Errorf("Cannot export a running container.")
155
+	}
156
+	// TODO Windows. Implementation (different to Linux)
157
+	return nil, nil
153 158
 }
154 159
 
155
-func (container *Container) ReleaseNetwork() {
160
+func (container *Container) updateNetwork() error {
161
+	return nil
156 162
 }
157 163
 
158
-func (container *Container) RestoreNetwork() error {
159
-	return nil
164
+func (container *Container) releaseNetwork() {
160 165
 }
161 166
 
162
-func (container *Container) UnmountVolumes(forceSyscall bool) error {
167
+func (container *Container) unmountVolumes(forceSyscall bool) error {
163 168
 	return nil
164 169
 }
165 170
 
... ...
@@ -13,6 +13,7 @@ import (
13 13
 	"github.com/opencontainers/runc/libcontainer/label"
14 14
 )
15 15
 
16
+// ContainerCreate takes configs and creates a container.
16 17
 func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig, adjustCPUShares bool) (*Container, []string, error) {
17 18
 	if config == nil {
18 19
 		return nil, nil, fmt.Errorf("Config cannot be empty in order to create a container")
... ...
@@ -70,7 +71,7 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
70 70
 		hostConfig = &runconfig.HostConfig{}
71 71
 	}
72 72
 	if hostConfig.SecurityOpt == nil {
73
-		hostConfig.SecurityOpt, err = daemon.GenerateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
73
+		hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
74 74
 		if err != nil {
75 75
 			return nil, nil, err
76 76
 		}
... ...
@@ -104,15 +105,15 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos
104 104
 		return nil, nil, err
105 105
 	}
106 106
 
107
-	if err := container.ToDisk(); err != nil {
107
+	if err := container.toDiskLocking(); err != nil {
108 108
 		logrus.Errorf("Error saving new container to disk: %v", err)
109 109
 		return nil, nil, err
110 110
 	}
111
-	container.LogEvent("create")
111
+	container.logEvent("create")
112 112
 	return container, warnings, nil
113 113
 }
114 114
 
115
-func (daemon *Daemon) GenerateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
115
+func (daemon *Daemon) generateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
116 116
 	if ipcMode.IsHost() || pidMode.IsHost() {
117 117
 		return label.DisableSecOpt(), nil
118 118
 	}
... ...
@@ -1,3 +1,8 @@
1
+// Package daemon exposes the functions that occur on the host server
2
+// that the Docker daemon is running.
3
+//
4
+// In implementing the various functions of the daemon, there is often
5
+// a method-specific struct for configuring the runtime behavior.
1 6
 package daemon
2 7
 
3 8
 import (
... ...
@@ -19,6 +24,7 @@ import (
19 19
 	"github.com/docker/docker/daemon/execdriver"
20 20
 	"github.com/docker/docker/daemon/execdriver/execdrivers"
21 21
 	"github.com/docker/docker/daemon/graphdriver"
22
+	// register vfs
22 23
 	_ "github.com/docker/docker/daemon/graphdriver/vfs"
23 24
 	"github.com/docker/docker/daemon/logger"
24 25
 	"github.com/docker/docker/daemon/network"
... ...
@@ -47,7 +53,7 @@ var (
47 47
 	validContainerNameChars   = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
48 48
 	validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
49 49
 
50
-	ErrSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.")
50
+	errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.")
51 51
 )
52 52
 
53 53
 type contStore struct {
... ...
@@ -81,10 +87,11 @@ func (c *contStore) List() []*Container {
81 81
 		containers.Add(cont)
82 82
 	}
83 83
 	c.Unlock()
84
-	containers.Sort()
84
+	containers.sort()
85 85
 	return *containers
86 86
 }
87 87
 
88
+// Daemon holds information about the Docker daemon.
88 89
 type Daemon struct {
89 90
 	ID               string
90 91
 	repository       string
... ...
@@ -94,8 +101,8 @@ type Daemon struct {
94 94
 	graph            *graph.Graph
95 95
 	repositories     *graph.TagStore
96 96
 	idIndex          *truncindex.TruncIndex
97
-	config           *Config
98
-	containerGraph   *graphdb.Database
97
+	configStore      *Config
98
+	containerGraphDB *graphdb.Database
99 99
 	driver           graphdriver.Driver
100 100
 	execDriver       execdriver.Driver
101 101
 	statsCollector   *statsCollector
... ...
@@ -127,11 +134,11 @@ func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
127 127
 		return containerByName, nil
128 128
 	}
129 129
 
130
-	containerId, indexError := daemon.idIndex.Get(prefixOrName)
130
+	containerID, indexError := daemon.idIndex.Get(prefixOrName)
131 131
 	if indexError != nil {
132 132
 		return nil, indexError
133 133
 	}
134
-	return daemon.containers.Get(containerId), nil
134
+	return daemon.containers.Get(containerID), nil
135 135
 }
136 136
 
137 137
 // Exists returns a true if a container of the specified ID or name exists,
... ...
@@ -150,7 +157,7 @@ func (daemon *Daemon) containerRoot(id string) string {
150 150
 func (daemon *Daemon) load(id string) (*Container, error) {
151 151
 	container := daemon.newBaseContainer(id)
152 152
 
153
-	if err := container.FromDisk(); err != nil {
153
+	if err := container.fromDisk(); err != nil {
154 154
 		return nil, err
155 155
 	}
156 156
 
... ...
@@ -200,8 +207,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
200 200
 	if container.IsRunning() {
201 201
 		logrus.Debugf("killing old running container %s", container.ID)
202 202
 		// Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit
203
-		container.SetStopped(&execdriver.ExitStatus{ExitCode: 137})
204
-
203
+		container.setStoppedLocking(&execdriver.ExitStatus{ExitCode: 137})
205 204
 		// use the current driver and ensure that the container is dead x.x
206 205
 		cmd := &execdriver.Command{
207 206
 			ID: container.ID,
... ...
@@ -211,7 +217,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err
211 211
 		if err := container.Unmount(); err != nil {
212 212
 			logrus.Debugf("unmount error %s", err)
213 213
 		}
214
-		if err := container.ToDisk(); err != nil {
214
+		if err := container.toDiskLocking(); err != nil {
215 215
 			logrus.Errorf("Error saving stopped state to disk: %v", err)
216 216
 		}
217 217
 	}
... ...
@@ -235,7 +241,7 @@ func (daemon *Daemon) ensureName(container *Container) error {
235 235
 		}
236 236
 		container.Name = name
237 237
 
238
-		if err := container.ToDisk(); err != nil {
238
+		if err := container.toDiskLocking(); err != nil {
239 239
 			logrus.Errorf("Error saving container name to disk: %v", err)
240 240
 		}
241 241
 	}
... ...
@@ -283,7 +289,7 @@ func (daemon *Daemon) restore() error {
283 283
 		}
284 284
 	}
285 285
 
286
-	if entities := daemon.containerGraph.List("/", -1); entities != nil {
286
+	if entities := daemon.containerGraphDB.List("/", -1); entities != nil {
287 287
 		for _, p := range entities.Paths() {
288 288
 			if !debug && logrus.GetLevel() == logrus.InfoLevel {
289 289
 				fmt.Print(".")
... ...
@@ -318,7 +324,7 @@ func (daemon *Daemon) restore() error {
318 318
 
319 319
 			// check the restart policy on the containers and restart any container with
320 320
 			// the restart policy of "always"
321
-			if daemon.config.AutoRestart && container.shouldRestart() {
321
+			if daemon.configStore.AutoRestart && container.shouldRestart() {
322 322
 				logrus.Debugf("Starting container %s", container.ID)
323 323
 
324 324
 				if err := container.Start(); err != nil {
... ...
@@ -351,7 +357,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image.
351 351
 	return nil
352 352
 }
353 353
 
354
-func (daemon *Daemon) generateIdAndName(name string) (string, string, error) {
354
+func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
355 355
 	var (
356 356
 		err error
357 357
 		id  = stringid.GenerateNonCryptoID()
... ...
@@ -380,7 +386,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
380 380
 		name = "/" + name
381 381
 	}
382 382
 
383
-	if _, err := daemon.containerGraph.Set(name, id); err != nil {
383
+	if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
384 384
 		if !graphdb.IsNonUniqueNameError(err) {
385 385
 			return "", err
386 386
 		}
... ...
@@ -392,7 +398,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) {
392 392
 			}
393 393
 
394 394
 			// Remove name and continue starting the container
395
-			if err := daemon.containerGraph.Delete(name); err != nil {
395
+			if err := daemon.containerGraphDB.Delete(name); err != nil {
396 396
 				return "", err
397 397
 			}
398 398
 		} else {
... ...
@@ -413,7 +419,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) {
413 413
 			name = "/" + name
414 414
 		}
415 415
 
416
-		if _, err := daemon.containerGraph.Set(name, id); err != nil {
416
+		if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
417 417
 			if !graphdb.IsNonUniqueNameError(err) {
418 418
 				return "", err
419 419
 			}
... ...
@@ -423,7 +429,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) {
423 423
 	}
424 424
 
425 425
 	name = "/" + stringid.TruncateID(id)
426
-	if _, err := daemon.containerGraph.Set(name, id); err != nil {
426
+	if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
427 427
 		return "", err
428 428
 	}
429 429
 	return name, nil
... ...
@@ -460,7 +466,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
460 460
 		id  string
461 461
 		err error
462 462
 	)
463
-	id, name, err = daemon.generateIdAndName(name)
463
+	id, name, err = daemon.generateIDAndName(name)
464 464
 	if err != nil {
465 465
 		return nil, err
466 466
 	}
... ...
@@ -483,6 +489,9 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
483 483
 	return &base, err
484 484
 }
485 485
 
486
+// GetFullContainerName returns a constructed container name. I think
487
+// it has to do with the fact that a container is a file on disek and
488
+// this is sort of just creating a file name.
486 489
 func GetFullContainerName(name string) (string, error) {
487 490
 	if name == "" {
488 491
 		return "", fmt.Errorf("Container name cannot be empty")
... ...
@@ -493,12 +502,13 @@ func GetFullContainerName(name string) (string, error) {
493 493
 	return name, nil
494 494
 }
495 495
 
496
+// GetByName returns a container given a name.
496 497
 func (daemon *Daemon) GetByName(name string) (*Container, error) {
497 498
 	fullName, err := GetFullContainerName(name)
498 499
 	if err != nil {
499 500
 		return nil, err
500 501
 	}
501
-	entity := daemon.containerGraph.Get(fullName)
502
+	entity := daemon.containerGraphDB.Get(fullName)
502 503
 	if entity == nil {
503 504
 		return nil, fmt.Errorf("Could not find entity for %s", name)
504 505
 	}
... ...
@@ -509,14 +519,17 @@ func (daemon *Daemon) GetByName(name string) (*Container, error) {
509 509
 	return e, nil
510 510
 }
511 511
 
512
-func (daemon *Daemon) Children(name string) (map[string]*Container, error) {
512
+// children returns all child containers of the container with the
513
+// given name. The containers are returned as a map from the container
514
+// name to a pointer to Container.
515
+func (daemon *Daemon) children(name string) (map[string]*Container, error) {
513 516
 	name, err := GetFullContainerName(name)
514 517
 	if err != nil {
515 518
 		return nil, err
516 519
 	}
517 520
 	children := make(map[string]*Container)
518 521
 
519
-	err = daemon.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error {
522
+	err = daemon.containerGraphDB.Walk(name, func(p string, e *graphdb.Entity) error {
520 523
 		c, err := daemon.Get(e.ID())
521 524
 		if err != nil {
522 525
 			return err
... ...
@@ -531,24 +544,28 @@ func (daemon *Daemon) Children(name string) (map[string]*Container, error) {
531 531
 	return children, nil
532 532
 }
533 533
 
534
-func (daemon *Daemon) Parents(name string) ([]string, error) {
534
+// parents returns the names of the parent containers of the container
535
+// with the given name.
536
+func (daemon *Daemon) parents(name string) ([]string, error) {
535 537
 	name, err := GetFullContainerName(name)
536 538
 	if err != nil {
537 539
 		return nil, err
538 540
 	}
539 541
 
540
-	return daemon.containerGraph.Parents(name)
542
+	return daemon.containerGraphDB.Parents(name)
541 543
 }
542 544
 
543
-func (daemon *Daemon) RegisterLink(parent, child *Container, alias string) error {
545
+func (daemon *Daemon) registerLink(parent, child *Container, alias string) error {
544 546
 	fullName := filepath.Join(parent.Name, alias)
545
-	if !daemon.containerGraph.Exists(fullName) {
546
-		_, err := daemon.containerGraph.Set(fullName, child.ID)
547
+	if !daemon.containerGraphDB.Exists(fullName) {
548
+		_, err := daemon.containerGraphDB.Set(fullName, child.ID)
547 549
 		return err
548 550
 	}
549 551
 	return nil
550 552
 }
551 553
 
554
+// NewDaemon sets up everything for the daemon to be able to service
555
+// requests from the webserver.
552 556
 func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemon, err error) {
553 557
 	setDefaultMtu(config)
554 558
 
... ...
@@ -562,7 +579,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
562 562
 
563 563
 	// Verify the platform is supported as a daemon
564 564
 	if !platformSupported {
565
-		return nil, ErrSystemNotSupported
565
+		return nil, errSystemNotSupported
566 566
 	}
567 567
 
568 568
 	// Validate platform-specific requirements
... ...
@@ -705,7 +722,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
705 705
 		return nil, err
706 706
 	}
707 707
 
708
-	d.containerGraph = graph
708
+	d.containerGraphDB = graph
709 709
 
710 710
 	var sysInitPath string
711 711
 	if config.ExecDriver == "lxc" {
... ...
@@ -735,7 +752,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
735 735
 	d.graph = g
736 736
 	d.repositories = repositories
737 737
 	d.idIndex = truncindex.NewTruncIndex([]string{})
738
-	d.config = config
738
+	d.configStore = config
739 739
 	d.sysInitPath = sysInitPath
740 740
 	d.execDriver = ed
741 741
 	d.statsCollector = newStatsCollector(1 * time.Second)
... ...
@@ -753,6 +770,7 @@ func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
753 753
 	return d, nil
754 754
 }
755 755
 
756
+// Shutdown stops the daemon.
756 757
 func (daemon *Daemon) Shutdown() error {
757 758
 	daemon.shutdown = true
758 759
 	if daemon.containers != nil {
... ...
@@ -767,7 +785,7 @@ func (daemon *Daemon) Shutdown() error {
767 767
 				go func() {
768 768
 					defer group.Done()
769 769
 					// TODO(windows): Handle docker restart with paused containers
770
-					if c.IsPaused() {
770
+					if c.isPaused() {
771 771
 						// To terminate a process in freezer cgroup, we should send
772 772
 						// SIGTERM to this process then unfreeze it, and the process will
773 773
 						// force to terminate immediately.
... ...
@@ -777,11 +795,11 @@ func (daemon *Daemon) Shutdown() error {
777 777
 							logrus.Warnf("System does not support SIGTERM")
778 778
 							return
779 779
 						}
780
-						if err := daemon.Kill(c, int(sig)); err != nil {
780
+						if err := daemon.kill(c, int(sig)); err != nil {
781 781
 							logrus.Debugf("sending SIGTERM to container %s with error: %v", c.ID, err)
782 782
 							return
783 783
 						}
784
-						if err := c.Unpause(); err != nil {
784
+						if err := c.unpause(); err != nil {
785 785
 							logrus.Debugf("Failed to unpause container %s with error: %v", c.ID, err)
786 786
 							return
787 787
 						}
... ...
@@ -792,7 +810,7 @@ func (daemon *Daemon) Shutdown() error {
792 792
 								logrus.Warnf("System does not support SIGKILL")
793 793
 								return
794 794
 							}
795
-							daemon.Kill(c, int(sig))
795
+							daemon.kill(c, int(sig))
796 796
 						}
797 797
 					} else {
798 798
 						// If container failed to exit in 10 seconds of SIGTERM, then using the force
... ...
@@ -813,8 +831,8 @@ func (daemon *Daemon) Shutdown() error {
813 813
 		}
814 814
 	}
815 815
 
816
-	if daemon.containerGraph != nil {
817
-		if err := daemon.containerGraph.Close(); err != nil {
816
+	if daemon.containerGraphDB != nil {
817
+		if err := daemon.containerGraphDB.Close(); err != nil {
818 818
 			logrus.Errorf("Error during container graph.Close(): %v", err)
819 819
 		}
820 820
 	}
... ...
@@ -828,8 +846,10 @@ func (daemon *Daemon) Shutdown() error {
828 828
 	return nil
829 829
 }
830 830
 
831
+// Mount sets container.basefs
832
+// (is it not set coming in? why is it unset?)
831 833
 func (daemon *Daemon) Mount(container *Container) error {
832
-	dir, err := daemon.driver.Get(container.ID, container.GetMountLabel())
834
+	dir, err := daemon.driver.Get(container.ID, container.getMountLabel())
833 835
 	if err != nil {
834 836
 		return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, daemon.driver, err)
835 837
 	}
... ...
@@ -848,24 +868,24 @@ func (daemon *Daemon) Mount(container *Container) error {
848 848
 	return nil
849 849
 }
850 850
 
851
-func (daemon *Daemon) Unmount(container *Container) error {
851
+func (daemon *Daemon) unmount(container *Container) error {
852 852
 	daemon.driver.Put(container.ID)
853 853
 	return nil
854 854
 }
855 855
 
856
-func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
856
+func (daemon *Daemon) run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
857 857
 	return daemon.execDriver.Run(c.command, pipes, startCallback)
858 858
 }
859 859
 
860
-func (daemon *Daemon) Kill(c *Container, sig int) error {
860
+func (daemon *Daemon) kill(c *Container, sig int) error {
861 861
 	return daemon.execDriver.Kill(c.command, sig)
862 862
 }
863 863
 
864
-func (daemon *Daemon) Stats(c *Container) (*execdriver.ResourceStats, error) {
864
+func (daemon *Daemon) stats(c *Container) (*execdriver.ResourceStats, error) {
865 865
 	return daemon.execDriver.Stats(c.ID)
866 866
 }
867 867
 
868
-func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{}, error) {
868
+func (daemon *Daemon) subscribeToContainerStats(name string) (chan interface{}, error) {
869 869
 	c, err := daemon.Get(name)
870 870
 	if err != nil {
871 871
 		return nil, err
... ...
@@ -874,7 +894,7 @@ func (daemon *Daemon) SubscribeToContainerStats(name string) (chan interface{},
874 874
 	return ch, nil
875 875
 }
876 876
 
877
-func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface{}) error {
877
+func (daemon *Daemon) unsubscribeToContainerStats(name string, ch chan interface{}) error {
878 878
 	c, err := daemon.Get(name)
879 879
 	if err != nil {
880 880
 		return err
... ...
@@ -883,12 +903,12 @@ func (daemon *Daemon) UnsubscribeToContainerStats(name string, ch chan interface
883 883
 	return nil
884 884
 }
885 885
 
886
-func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) {
886
+func (daemon *Daemon) changes(container *Container) ([]archive.Change, error) {
887 887
 	initID := fmt.Sprintf("%s-init", container.ID)
888 888
 	return daemon.driver.Changes(container.ID, initID)
889 889
 }
890 890
 
891
-func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) {
891
+func (daemon *Daemon) diff(container *Container) (archive.Archive, error) {
892 892
 	initID := fmt.Sprintf("%s-init", container.ID)
893 893
 	return daemon.driver.Diff(container.ID, initID)
894 894
 }
... ...
@@ -923,6 +943,8 @@ func (daemon *Daemon) createRootfs(container *Container) error {
923 923
 	return nil
924 924
 }
925 925
 
926
+// Graph needs to be removed.
927
+//
926 928
 // FIXME: this is a convenience function for integration tests
927 929
 // which need direct access to daemon.graph.
928 930
 // Once the tests switch to using engine and jobs, this method
... ...
@@ -931,30 +953,39 @@ func (daemon *Daemon) Graph() *graph.Graph {
931 931
 	return daemon.graph
932 932
 }
933 933
 
934
+// Repositories returns all repositories.
934 935
 func (daemon *Daemon) Repositories() *graph.TagStore {
935 936
 	return daemon.repositories
936 937
 }
937 938
 
938
-func (daemon *Daemon) Config() *Config {
939
-	return daemon.config
939
+func (daemon *Daemon) config() *Config {
940
+	return daemon.configStore
940 941
 }
941 942
 
942
-func (daemon *Daemon) SystemInitPath() string {
943
+func (daemon *Daemon) systemInitPath() string {
943 944
 	return daemon.sysInitPath
944 945
 }
945 946
 
947
+// GraphDriver returns the currently used driver for processing
948
+// container layers.
946 949
 func (daemon *Daemon) GraphDriver() graphdriver.Driver {
947 950
 	return daemon.driver
948 951
 }
949 952
 
953
+// ExecutionDriver returns the currently used driver for creating and
954
+// starting execs in a container.
950 955
 func (daemon *Daemon) ExecutionDriver() execdriver.Driver {
951 956
 	return daemon.execDriver
952 957
 }
953 958
 
954
-func (daemon *Daemon) ContainerGraph() *graphdb.Database {
955
-	return daemon.containerGraph
959
+func (daemon *Daemon) containerGraph() *graphdb.Database {
960
+	return daemon.containerGraphDB
956 961
 }
957 962
 
963
+// ImageGetCached returns the earliest created image that is a child
964
+// of the image with imgID, that had the same config when it was
965
+// created. nil is returned if a child cannot be found. An error is
966
+// returned if the parent image cannot be found.
958 967
 func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
959 968
 	// Retrieve all images
960 969
 	images := daemon.Graph().Map()
... ...
@@ -1010,7 +1041,7 @@ func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
1010 1010
 	container.Lock()
1011 1011
 	defer container.Unlock()
1012 1012
 	// Register any links from the host config before starting the container
1013
-	if err := daemon.RegisterLinks(container, hostConfig); err != nil {
1013
+	if err := daemon.registerLinks(container, hostConfig); err != nil {
1014 1014
 		return err
1015 1015
 	}
1016 1016
 
... ...
@@ -3,5 +3,6 @@
3 3
 package daemon
4 4
 
5 5
 import (
6
+	// register the btrfs graphdriver
6 7
 	_ "github.com/docker/docker/daemon/graphdriver/btrfs"
7 8
 )
... ...
@@ -3,5 +3,6 @@
3 3
 package daemon
4 4
 
5 5
 import (
6
+	// register the devmapper graphdriver
6 7
 	_ "github.com/docker/docker/daemon/graphdriver/devmapper"
7 8
 )
... ...
@@ -3,5 +3,6 @@
3 3
 package daemon
4 4
 
5 5
 import (
6
+	// register the overlay graphdriver
6 7
 	_ "github.com/docker/docker/daemon/graphdriver/overlay"
7 8
 )
... ...
@@ -86,9 +86,9 @@ func TestGet(t *testing.T) {
86 86
 	graph.Set(c5.Name, c5.ID)
87 87
 
88 88
 	daemon := &Daemon{
89
-		containers:     store,
90
-		idIndex:        index,
91
-		containerGraph: graph,
89
+		containers:       store,
90
+		idIndex:          index,
91
+		containerGraphDB: graph,
92 92
 	}
93 93
 
94 94
 	if container, _ := daemon.Get("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
... ...
@@ -130,15 +130,15 @@ func TestLoadWithVolume(t *testing.T) {
130 130
 	}
131 131
 	defer os.RemoveAll(tmp)
132 132
 
133
-	containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
134
-	containerPath := filepath.Join(tmp, containerId)
133
+	containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
134
+	containerPath := filepath.Join(tmp, containerID)
135 135
 	if err := os.MkdirAll(containerPath, 0755); err != nil {
136 136
 		t.Fatal(err)
137 137
 	}
138 138
 
139
-	hostVolumeId := stringid.GenerateNonCryptoID()
140
-	vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeId)
141
-	volumePath := filepath.Join(tmp, "volumes", hostVolumeId)
139
+	hostVolumeID := stringid.GenerateNonCryptoID()
140
+	vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeID)
141
+	volumePath := filepath.Join(tmp, "volumes", hostVolumeID)
142 142
 
143 143
 	if err := os.MkdirAll(vfsPath, 0755); err != nil {
144 144
 		t.Fatal(err)
... ...
@@ -187,7 +187,7 @@ func TestLoadWithVolume(t *testing.T) {
187 187
 	}
188 188
 	defer volumedrivers.Unregister(volume.DefaultDriverName)
189 189
 
190
-	c, err := daemon.load(containerId)
190
+	c, err := daemon.load(containerID)
191 191
 	if err != nil {
192 192
 		t.Fatal(err)
193 193
 	}
... ...
@@ -202,8 +202,8 @@ func TestLoadWithVolume(t *testing.T) {
202 202
 	}
203 203
 
204 204
 	m := c.MountPoints["/vol1"]
205
-	if m.Name != hostVolumeId {
206
-		t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name)
205
+	if m.Name != hostVolumeID {
206
+		t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeID, m.Name)
207 207
 	}
208 208
 
209 209
 	if m.Destination != "/vol1" {
... ...
@@ -235,8 +235,8 @@ func TestLoadWithBindMount(t *testing.T) {
235 235
 	}
236 236
 	defer os.RemoveAll(tmp)
237 237
 
238
-	containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
239
-	containerPath := filepath.Join(tmp, containerId)
238
+	containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
239
+	containerPath := filepath.Join(tmp, containerID)
240 240
 	if err = os.MkdirAll(containerPath, 0755); err != nil {
241 241
 		t.Fatal(err)
242 242
 	}
... ...
@@ -275,7 +275,7 @@ func TestLoadWithBindMount(t *testing.T) {
275 275
 	}
276 276
 	defer volumedrivers.Unregister(volume.DefaultDriverName)
277 277
 
278
-	c, err := daemon.load(containerId)
278
+	c, err := daemon.load(containerID)
279 279
 	if err != nil {
280 280
 		t.Fatal(err)
281 281
 	}
... ...
@@ -314,14 +314,14 @@ func TestLoadWithVolume17RC(t *testing.T) {
314 314
 	}
315 315
 	defer os.RemoveAll(tmp)
316 316
 
317
-	containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
318
-	containerPath := filepath.Join(tmp, containerId)
317
+	containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
318
+	containerPath := filepath.Join(tmp, containerID)
319 319
 	if err := os.MkdirAll(containerPath, 0755); err != nil {
320 320
 		t.Fatal(err)
321 321
 	}
322 322
 
323
-	hostVolumeId := "6a3c03fc4a4e588561a543cc3bdd50089e27bd11bbb0e551e19bf735e2514101"
324
-	volumePath := filepath.Join(tmp, "volumes", hostVolumeId)
323
+	hostVolumeID := "6a3c03fc4a4e588561a543cc3bdd50089e27bd11bbb0e551e19bf735e2514101"
324
+	volumePath := filepath.Join(tmp, "volumes", hostVolumeID)
325 325
 
326 326
 	if err := os.MkdirAll(volumePath, 0755); err != nil {
327 327
 		t.Fatal(err)
... ...
@@ -366,7 +366,7 @@ func TestLoadWithVolume17RC(t *testing.T) {
366 366
 	}
367 367
 	defer volumedrivers.Unregister(volume.DefaultDriverName)
368 368
 
369
-	c, err := daemon.load(containerId)
369
+	c, err := daemon.load(containerID)
370 370
 	if err != nil {
371 371
 		t.Fatal(err)
372 372
 	}
... ...
@@ -381,8 +381,8 @@ func TestLoadWithVolume17RC(t *testing.T) {
381 381
 	}
382 382
 
383 383
 	m := c.MountPoints["/vol1"]
384
-	if m.Name != hostVolumeId {
385
-		t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeId, m.Name)
384
+	if m.Name != hostVolumeID {
385
+		t.Fatalf("Expected mount name to be %s, was %s\n", hostVolumeID, m.Name)
386 386
 	}
387 387
 
388 388
 	if m.Destination != "/vol1" {
... ...
@@ -414,15 +414,15 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) {
414 414
 	}
415 415
 	defer os.RemoveAll(tmp)
416 416
 
417
-	containerId := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
418
-	containerPath := filepath.Join(tmp, containerId)
417
+	containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
418
+	containerPath := filepath.Join(tmp, containerID)
419 419
 	if err := os.MkdirAll(containerPath, 0755); err != nil {
420 420
 		t.Fatal(err)
421 421
 	}
422 422
 
423
-	hostVolumeId := stringid.GenerateNonCryptoID()
424
-	vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeId)
425
-	volumePath := filepath.Join(tmp, "volumes", hostVolumeId)
423
+	hostVolumeID := stringid.GenerateNonCryptoID()
424
+	vfsPath := filepath.Join(tmp, "vfs", "dir", hostVolumeID)
425
+	volumePath := filepath.Join(tmp, "volumes", hostVolumeID)
426 426
 
427 427
 	if err := os.MkdirAll(vfsPath, 0755); err != nil {
428 428
 		t.Fatal(err)
... ...
@@ -471,7 +471,7 @@ func TestRemoveLocalVolumesFollowingSymlinks(t *testing.T) {
471 471
 	}
472 472
 	defer volumedrivers.Unregister(volume.DefaultDriverName)
473 473
 
474
-	c, err := daemon.load(containerId)
474
+	c, err := daemon.load(containerID)
475 475
 	if err != nil {
476 476
 		t.Fatal(err)
477 477
 	}
... ...
@@ -63,7 +63,7 @@ func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error
63 63
 	return err
64 64
 }
65 65
 
66
-func CheckKernelVersion(k, major, minor int) bool {
66
+func checkKernelVersion(k, major, minor int) bool {
67 67
 	if v, err := kernel.GetKernelVersion(); err != nil {
68 68
 		logrus.Warnf("%s", err)
69 69
 	} else {
... ...
@@ -82,7 +82,7 @@ func checkKernel() error {
82 82
 	// without actually causing a kernel panic, so we need this workaround until
83 83
 	// the circumstances of pre-3.10 crashes are clearer.
84 84
 	// For details see https://github.com/docker/docker/issues/407
85
-	if !CheckKernelVersion(3, 10, 0) {
85
+	if !checkKernelVersion(3, 10, 0) {
86 86
 		v, _ := kernel.GetKernelVersion()
87 87
 		if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
88 88
 			logrus.Warnf("Your Linux kernel version %s can be unstable running docker. Please upgrade your kernel to 3.10.0.", v.String())
... ...
@@ -161,7 +161,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostC
161 161
 		logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.")
162 162
 		hostConfig.KernelMemory = 0
163 163
 	}
164
-	if hostConfig.KernelMemory > 0 && !CheckKernelVersion(4, 0, 0) {
164
+	if hostConfig.KernelMemory > 0 && !checkKernelVersion(4, 0, 0) {
165 165
 		warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
166 166
 		logrus.Warnf("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
167 167
 	}
... ...
@@ -194,7 +194,6 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *runconfig.HostC
194 194
 	if hostConfig.BlkioWeight > 0 && (hostConfig.BlkioWeight < 10 || hostConfig.BlkioWeight > 1000) {
195 195
 		return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000.")
196 196
 	}
197
-
198 197
 	if hostConfig.OomKillDisable && !sysInfo.OomKillDisable {
199 198
 		hostConfig.OomKillDisable = false
200 199
 		return warnings, fmt.Errorf("Your kernel does not support oom kill disable.")
... ...
@@ -494,11 +493,14 @@ func setupInitLayer(initLayer string) error {
494 494
 	return nil
495 495
 }
496 496
 
497
-func (daemon *Daemon) NetworkApiRouter() func(w http.ResponseWriter, req *http.Request) {
497
+// NetworkAPIRouter implements a feature for server-experimental,
498
+// directly calling into libnetwork.
499
+func (daemon *Daemon) NetworkAPIRouter() func(w http.ResponseWriter, req *http.Request) {
498 500
 	return nwapi.NewHTTPHandler(daemon.netController)
499 501
 }
500 502
 
501
-func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
503
+// registerLinks writes the links to a file.
504
+func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
502 505
 	if hostConfig == nil || hostConfig.Links == nil {
503 506
 		return nil
504 507
 	}
... ...
@@ -523,7 +525,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
523 523
 		if child.hostConfig.NetworkMode.IsHost() {
524 524
 			return runconfig.ErrConflictHostNetworkAndLinks
525 525
 		}
526
-		if err := daemon.RegisterLink(container, child, alias); err != nil {
526
+		if err := daemon.registerLink(container, child, alias); err != nil {
527 527
 			return err
528 528
 		}
529 529
 	}
... ...
@@ -531,7 +533,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
531 531
 	// After we load all the links into the daemon
532 532
 	// set them to nil on the hostconfig
533 533
 	hostConfig.Links = nil
534
-	if err := container.WriteHostConfig(); err != nil {
534
+	if err := container.writeHostConfig(); err != nil {
535 535
 		return err
536 536
 	}
537 537
 
... ...
@@ -6,6 +6,7 @@ import (
6 6
 	"syscall"
7 7
 
8 8
 	"github.com/docker/docker/daemon/graphdriver"
9
+	// register the windows graph driver
9 10
 	_ "github.com/docker/docker/daemon/graphdriver/windows"
10 11
 	"github.com/docker/docker/pkg/parsers"
11 12
 	"github.com/docker/docker/runconfig"
... ...
@@ -13,7 +14,7 @@ import (
13 13
 )
14 14
 
15 15
 const (
16
-	DefaultVirtualSwitch = "Virtual Switch"
16
+	defaultVirtualSwitch = "Virtual Switch"
17 17
 	platformSupported    = true
18 18
 )
19 19
 
... ...
@@ -91,12 +92,14 @@ func isBridgeNetworkDisabled(config *Config) bool {
91 91
 func initNetworkController(config *Config) (libnetwork.NetworkController, error) {
92 92
 	// Set the name of the virtual switch if not specified by -b on daemon start
93 93
 	if config.Bridge.VirtualSwitchName == "" {
94
-		config.Bridge.VirtualSwitchName = DefaultVirtualSwitch
94
+		config.Bridge.VirtualSwitchName = defaultVirtualSwitch
95 95
 	}
96 96
 	return nil, nil
97 97
 }
98 98
 
99
-func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
99
+// registerLinks sets up links between containers and writes the
100
+// configuration out for persistence.
101
+func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
100 102
 	// TODO Windows. Factored out for network modes. There may be more
101 103
 	// refactoring required here.
102 104
 
... ...
@@ -114,7 +117,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
114 114
 			//An error from daemon.Get() means this name could not be found
115 115
 			return fmt.Errorf("Could not get container for %s", name)
116 116
 		}
117
-		if err := daemon.RegisterLink(container, child, alias); err != nil {
117
+		if err := daemon.registerLink(container, child, alias); err != nil {
118 118
 			return err
119 119
 		}
120 120
 	}
... ...
@@ -122,7 +125,7 @@ func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.
122 122
 	// After we load all the links into the daemon
123 123
 	// set them to nil on the hostconfig
124 124
 	hostConfig.Links = nil
125
-	if err := container.WriteHostConfig(); err != nil {
125
+	if err := container.writeHostConfig(); err != nil {
126 126
 		return err
127 127
 	}
128 128
 	return nil
... ...
@@ -3,5 +3,6 @@
3 3
 package daemon
4 4
 
5 5
 import (
6
+	// register the zfs driver
6 7
 	_ "github.com/docker/docker/daemon/graphdriver/zfs"
7 8
 )
... ...
@@ -8,10 +8,15 @@ import (
8 8
 	"github.com/Sirupsen/logrus"
9 9
 )
10 10
 
11
+// ContainerRmConfig is a holder for passing in runtime config.
11 12
 type ContainerRmConfig struct {
12 13
 	ForceRemove, RemoveVolume, RemoveLink bool
13 14
 }
14 15
 
16
+// ContainerRm removes the container id from the filesystem. An error
17
+// is returned if the container is not found, or if the remove
18
+// fails. If the remove succeeds, the container name is released, and
19
+// network links are removed.
15 20
 func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error {
16 21
 	container, err := daemon.Get(name)
17 22
 	if err != nil {
... ...
@@ -27,18 +32,18 @@ func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
27 27
 		if parent == "/" {
28 28
 			return fmt.Errorf("Conflict, cannot remove the default name of the container")
29 29
 		}
30
-		pe := daemon.ContainerGraph().Get(parent)
30
+		pe := daemon.containerGraph().Get(parent)
31 31
 		if pe == nil {
32 32
 			return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
33 33
 		}
34 34
 
35
-		if err := daemon.ContainerGraph().Delete(name); err != nil {
35
+		if err := daemon.containerGraph().Delete(name); err != nil {
36 36
 			return err
37 37
 		}
38 38
 
39 39
 		parentContainer, _ := daemon.Get(pe.ID())
40 40
 		if parentContainer != nil {
41
-			if err := parentContainer.UpdateNetwork(); err != nil {
41
+			if err := parentContainer.updateNetwork(); err != nil {
42 42
 				logrus.Debugf("Could not update network to remove link %s: %v", n, err)
43 43
 			}
44 44
 		}
... ...
@@ -75,23 +80,23 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
75 75
 	}
76 76
 
77 77
 	// Container state RemovalInProgress should be used to avoid races.
78
-	if err = container.SetRemovalInProgress(); err != nil {
78
+	if err = container.setRemovalInProgress(); err != nil {
79 79
 		return fmt.Errorf("Failed to set container state to RemovalInProgress: %s", err)
80 80
 	}
81 81
 
82
-	defer container.ResetRemovalInProgress()
82
+	defer container.resetRemovalInProgress()
83 83
 
84 84
 	if err = container.Stop(3); err != nil {
85 85
 		return err
86 86
 	}
87 87
 
88 88
 	// Mark container dead. We don't want anybody to be restarting it.
89
-	container.SetDead()
89
+	container.setDead()
90 90
 
91 91
 	// Save container state to disk. So that if error happens before
92 92
 	// container meta file got removed from disk, then a restart of
93 93
 	// docker should not make a dead container alive.
94
-	if err := container.ToDisk(); err != nil {
94
+	if err := container.toDiskLocking(); err != nil {
95 95
 		logrus.Errorf("Error saving dying container to disk: %v", err)
96 96
 	}
97 97
 
... ...
@@ -102,11 +107,11 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
102 102
 			daemon.idIndex.Delete(container.ID)
103 103
 			daemon.containers.Delete(container.ID)
104 104
 			os.RemoveAll(container.root)
105
-			container.LogEvent("destroy")
105
+			container.logEvent("destroy")
106 106
 		}
107 107
 	}()
108 108
 
109
-	if _, err := daemon.containerGraph.Purge(container.ID); err != nil {
109
+	if _, err := daemon.containerGraphDB.Purge(container.ID); err != nil {
110 110
 		logrus.Debugf("Unable to remove container from link graph: %s", err)
111 111
 	}
112 112
 
... ...
@@ -131,7 +136,7 @@ func (daemon *Daemon) rm(container *Container, forceRemove bool) (err error) {
131 131
 	daemon.idIndex.Delete(container.ID)
132 132
 	daemon.containers.Delete(container.ID)
133 133
 
134
-	container.LogEvent("destroy")
134
+	container.logEvent("destroy")
135 135
 	return nil
136 136
 }
137 137
 
... ...
@@ -17,13 +17,16 @@ import (
17 17
 	"github.com/docker/docker/runconfig"
18 18
 )
19 19
 
20
-type execConfig struct {
20
+// ExecConfig holds the configurations for execs. The Daemon keeps
21
+// track of both running and finished execs so that they can be
22
+// examined both during and after completion.
23
+type ExecConfig struct {
21 24
 	sync.Mutex
22 25
 	ID            string
23 26
 	Running       bool
24 27
 	ExitCode      int
25 28
 	ProcessConfig *execdriver.ProcessConfig
26
-	StreamConfig
29
+	streamConfig
27 30
 	OpenStdin  bool
28 31
 	OpenStderr bool
29 32
 	OpenStdout bool
... ...
@@ -35,21 +38,21 @@ type execConfig struct {
35 35
 }
36 36
 
37 37
 type execStore struct {
38
-	s map[string]*execConfig
38
+	s map[string]*ExecConfig
39 39
 	sync.RWMutex
40 40
 }
41 41
 
42 42
 func newExecStore() *execStore {
43
-	return &execStore{s: make(map[string]*execConfig, 0)}
43
+	return &execStore{s: make(map[string]*ExecConfig, 0)}
44 44
 }
45 45
 
46
-func (e *execStore) Add(id string, execConfig *execConfig) {
46
+func (e *execStore) Add(id string, ExecConfig *ExecConfig) {
47 47
 	e.Lock()
48
-	e.s[id] = execConfig
48
+	e.s[id] = ExecConfig
49 49
 	e.Unlock()
50 50
 }
51 51
 
52
-func (e *execStore) Get(id string) *execConfig {
52
+func (e *execStore) Get(id string) *ExecConfig {
53 53
 	e.RLock()
54 54
 	res := e.s[id]
55 55
 	e.RUnlock()
... ...
@@ -72,24 +75,24 @@ func (e *execStore) List() []string {
72 72
 	return IDs
73 73
 }
74 74
 
75
-func (execConfig *execConfig) Resize(h, w int) error {
75
+func (ExecConfig *ExecConfig) resize(h, w int) error {
76 76
 	select {
77
-	case <-execConfig.waitStart:
77
+	case <-ExecConfig.waitStart:
78 78
 	case <-time.After(time.Second):
79
-		return fmt.Errorf("Exec %s is not running, so it can not be resized.", execConfig.ID)
79
+		return fmt.Errorf("Exec %s is not running, so it can not be resized.", ExecConfig.ID)
80 80
 	}
81
-	return execConfig.ProcessConfig.Terminal.Resize(h, w)
81
+	return ExecConfig.ProcessConfig.Terminal.Resize(h, w)
82 82
 }
83 83
 
84
-func (d *Daemon) registerExecCommand(execConfig *execConfig) {
84
+func (d *Daemon) registerExecCommand(ExecConfig *ExecConfig) {
85 85
 	// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
86
-	execConfig.Container.execCommands.Add(execConfig.ID, execConfig)
86
+	ExecConfig.Container.execCommands.Add(ExecConfig.ID, ExecConfig)
87 87
 	// Storing execs in daemon for easy access via remote API.
88
-	d.execCommands.Add(execConfig.ID, execConfig)
88
+	d.execCommands.Add(ExecConfig.ID, ExecConfig)
89 89
 }
90 90
 
91
-func (d *Daemon) getExecConfig(name string) (*execConfig, error) {
92
-	execConfig := d.execCommands.Get(name)
91
+func (d *Daemon) getExecConfig(name string) (*ExecConfig, error) {
92
+	ExecConfig := d.execCommands.Get(name)
93 93
 
94 94
 	// If the exec is found but its container is not in the daemon's list of
95 95
 	// containers then it must have been delete, in which case instead of
... ...
@@ -97,20 +100,20 @@ func (d *Daemon) getExecConfig(name string) (*execConfig, error) {
97 97
 	// the user sees the same error now that they will after the
98 98
 	// 5 minute clean-up loop is run which erases old/dead execs.
99 99
 
100
-	if execConfig != nil && d.containers.Get(execConfig.Container.ID) != nil {
100
+	if ExecConfig != nil && d.containers.Get(ExecConfig.Container.ID) != nil {
101 101
 
102
-		if !execConfig.Container.IsRunning() {
103
-			return nil, fmt.Errorf("Container %s is not running", execConfig.Container.ID)
102
+		if !ExecConfig.Container.IsRunning() {
103
+			return nil, fmt.Errorf("Container %s is not running", ExecConfig.Container.ID)
104 104
 		}
105
-		return execConfig, nil
105
+		return ExecConfig, nil
106 106
 	}
107 107
 
108 108
 	return nil, fmt.Errorf("No such exec instance '%s' found in daemon", name)
109 109
 }
110 110
 
111
-func (d *Daemon) unregisterExecCommand(execConfig *execConfig) {
112
-	execConfig.Container.execCommands.Delete(execConfig.ID)
113
-	d.execCommands.Delete(execConfig.ID)
111
+func (d *Daemon) unregisterExecCommand(ExecConfig *ExecConfig) {
112
+	ExecConfig.Container.execCommands.Delete(ExecConfig.ID)
113
+	d.execCommands.Delete(ExecConfig.ID)
114 114
 }
115 115
 
116 116
 func (d *Daemon) getActiveContainer(name string) (*Container, error) {
... ...
@@ -122,12 +125,13 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) {
122 122
 	if !container.IsRunning() {
123 123
 		return nil, fmt.Errorf("Container %s is not running", name)
124 124
 	}
125
-	if container.IsPaused() {
125
+	if container.isPaused() {
126 126
 		return nil, fmt.Errorf("Container %s is paused, unpause the container before exec", name)
127 127
 	}
128 128
 	return container, nil
129 129
 }
130 130
 
131
+// ContainerExecCreate sets up an exec in a running container.
131 132
 func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) {
132 133
 	// Not all drivers support Exec (LXC for example)
133 134
 	if err := checkExecSupport(d.execDriver.Name()); err != nil {
... ...
@@ -155,55 +159,56 @@ func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, erro
155 155
 		Privileged: config.Privileged,
156 156
 	}
157 157
 
158
-	execConfig := &execConfig{
158
+	ExecConfig := &ExecConfig{
159 159
 		ID:            stringid.GenerateNonCryptoID(),
160 160
 		OpenStdin:     config.AttachStdin,
161 161
 		OpenStdout:    config.AttachStdout,
162 162
 		OpenStderr:    config.AttachStderr,
163
-		StreamConfig:  StreamConfig{},
163
+		streamConfig:  streamConfig{},
164 164
 		ProcessConfig: processConfig,
165 165
 		Container:     container,
166 166
 		Running:       false,
167 167
 		waitStart:     make(chan struct{}),
168 168
 	}
169 169
 
170
-	d.registerExecCommand(execConfig)
170
+	d.registerExecCommand(ExecConfig)
171 171
 
172
-	container.LogEvent("exec_create: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " "))
173
-
174
-	return execConfig.ID, nil
172
+	container.logEvent("exec_create: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
175 173
 
174
+	return ExecConfig.ID, nil
176 175
 }
177 176
 
177
+// ContainerExecStart starts a previously set up exec instance. The
178
+// std streams are set up.
178 179
 func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error {
179 180
 	var (
180 181
 		cStdin           io.ReadCloser
181 182
 		cStdout, cStderr io.Writer
182 183
 	)
183 184
 
184
-	execConfig, err := d.getExecConfig(execName)
185
+	ExecConfig, err := d.getExecConfig(execName)
185 186
 	if err != nil {
186 187
 		return err
187 188
 	}
188 189
 
189 190
 	func() {
190
-		execConfig.Lock()
191
-		defer execConfig.Unlock()
192
-		if execConfig.Running {
191
+		ExecConfig.Lock()
192
+		defer ExecConfig.Unlock()
193
+		if ExecConfig.Running {
193 194
 			err = fmt.Errorf("Error: Exec command %s is already running", execName)
194 195
 		}
195
-		execConfig.Running = true
196
+		ExecConfig.Running = true
196 197
 	}()
197 198
 	if err != nil {
198 199
 		return err
199 200
 	}
200 201
 
201
-	logrus.Debugf("starting exec command %s in container %s", execConfig.ID, execConfig.Container.ID)
202
-	container := execConfig.Container
202
+	logrus.Debugf("starting exec command %s in container %s", ExecConfig.ID, ExecConfig.Container.ID)
203
+	container := ExecConfig.Container
203 204
 
204
-	container.LogEvent("exec_start: " + execConfig.ProcessConfig.Entrypoint + " " + strings.Join(execConfig.ProcessConfig.Arguments, " "))
205
+	container.logEvent("exec_start: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " "))
205 206
 
206
-	if execConfig.OpenStdin {
207
+	if ExecConfig.OpenStdin {
207 208
 		r, w := io.Pipe()
208 209
 		go func() {
209 210
 			defer w.Close()
... ...
@@ -212,32 +217,32 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout
212 212
 		}()
213 213
 		cStdin = r
214 214
 	}
215
-	if execConfig.OpenStdout {
215
+	if ExecConfig.OpenStdout {
216 216
 		cStdout = stdout
217 217
 	}
218
-	if execConfig.OpenStderr {
218
+	if ExecConfig.OpenStderr {
219 219
 		cStderr = stderr
220 220
 	}
221 221
 
222
-	execConfig.StreamConfig.stderr = broadcastwriter.New()
223
-	execConfig.StreamConfig.stdout = broadcastwriter.New()
222
+	ExecConfig.streamConfig.stderr = broadcastwriter.New()
223
+	ExecConfig.streamConfig.stdout = broadcastwriter.New()
224 224
 	// Attach to stdin
225
-	if execConfig.OpenStdin {
226
-		execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdinPipe = io.Pipe()
225
+	if ExecConfig.OpenStdin {
226
+		ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdinPipe = io.Pipe()
227 227
 	} else {
228
-		execConfig.StreamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
228
+		ExecConfig.streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin
229 229
 	}
230 230
 
231
-	attachErr := attach(&execConfig.StreamConfig, execConfig.OpenStdin, true, execConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)
231
+	attachErr := attach(&ExecConfig.streamConfig, ExecConfig.OpenStdin, true, ExecConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr)
232 232
 
233 233
 	execErr := make(chan error)
234 234
 
235
-	// Note, the execConfig data will be removed when the container
235
+	// Note, the ExecConfig data will be removed when the container
236 236
 	// itself is deleted.  This allows us to query it (for things like
237 237
 	// the exitStatus) even after the cmd is done running.
238 238
 
239 239
 	go func() {
240
-		if err := container.Exec(execConfig); err != nil {
240
+		if err := container.exec(ExecConfig); err != nil {
241 241
 			execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err)
242 242
 		}
243 243
 	}()
... ...
@@ -260,16 +265,17 @@ func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout
260 260
 	}
261 261
 }
262 262
 
263
-func (d *Daemon) Exec(c *Container, execConfig *execConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
264
-	exitStatus, err := d.execDriver.Exec(c.command, execConfig.ProcessConfig, pipes, startCallback)
263
+// Exec calls the underlying exec driver to run
264
+func (d *Daemon) Exec(c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
265
+	exitStatus, err := d.execDriver.Exec(c.command, ExecConfig.ProcessConfig, pipes, startCallback)
265 266
 
266 267
 	// On err, make sure we don't leave ExitCode at zero
267 268
 	if err != nil && exitStatus == 0 {
268 269
 		exitStatus = 128
269 270
 	}
270 271
 
271
-	execConfig.ExitCode = exitStatus
272
-	execConfig.Running = false
272
+	ExecConfig.ExitCode = exitStatus
273
+	ExecConfig.Running = false
273 274
 
274 275
 	return exitStatus, err
275 276
 }
... ...
@@ -5,13 +5,15 @@ import (
5 5
 	"io"
6 6
 )
7 7
 
8
+// ContainerExport writes the contents of the container to the given
9
+// writer. An error is returned if the container cannot be found.
8 10
 func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
9 11
 	container, err := daemon.Get(name)
10 12
 	if err != nil {
11 13
 		return err
12 14
 	}
13 15
 
14
-	data, err := container.Export()
16
+	data, err := container.export()
15 17
 	if err != nil {
16 18
 		return fmt.Errorf("%s: %s", name, err)
17 19
 	}
... ...
@@ -22,10 +22,11 @@ func (history *History) Swap(i, j int) {
22 22
 	containers[i], containers[j] = containers[j], containers[i]
23 23
 }
24 24
 
25
+// Add the given container to history.
25 26
 func (history *History) Add(container *Container) {
26 27
 	*history = append(*history, container)
27 28
 }
28 29
 
29
-func (history *History) Sort() {
30
+func (history *History) sort() {
30 31
 	sort.Sort(history)
31 32
 }
... ...
@@ -13,6 +13,7 @@ import (
13 13
 	"github.com/docker/docker/utils"
14 14
 )
15 15
 
16
+// ImageDelete removes the image from the filesystem.
16 17
 // FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
17 18
 func (daemon *Daemon) ImageDelete(name string, force, noprune bool) ([]types.ImageDelete, error) {
18 19
 	list := []types.ImageDelete{}
... ...
@@ -17,6 +17,7 @@ import (
17 17
 	"github.com/docker/docker/utils"
18 18
 )
19 19
 
20
+// SystemInfo returns information about the host server the daemon is running on.
20 21
 func (daemon *Daemon) SystemInfo() (*types.Info, error) {
21 22
 	images := daemon.Graph().Map()
22 23
 	var imgcount int
... ...
@@ -50,11 +51,14 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
50 50
 		logrus.Errorf("Could not read system memory info: %v", err)
51 51
 	}
52 52
 
53
-	// if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION)
53
+	// if we still have the original dockerinit binary from before
54
+	// we copied it locally, let's return the path to that, since
55
+	// that's more intuitive (the copied path is trivial to derive
56
+	// by hand given VERSION)
54 57
 	initPath := utils.DockerInitPath("")
55 58
 	if initPath == "" {
56 59
 		// if that fails, we'll just return the path from the daemon
57
-		initPath = daemon.SystemInitPath()
60
+		initPath = daemon.systemInitPath()
58 61
 	}
59 62
 
60 63
 	sysInfo := sysinfo.New(false)
... ...
@@ -83,8 +87,8 @@ func (daemon *Daemon) SystemInfo() (*types.Info, error) {
83 83
 		InitPath:           initPath,
84 84
 		NCPU:               runtime.NumCPU(),
85 85
 		MemTotal:           meminfo.MemTotal,
86
-		DockerRootDir:      daemon.Config().Root,
87
-		Labels:             daemon.Config().Labels,
86
+		DockerRootDir:      daemon.config().Root,
87
+		Labels:             daemon.config().Labels,
88 88
 		ExperimentalBuild:  utils.ExperimentalBuild(),
89 89
 	}
90 90
 
... ...
@@ -7,6 +7,9 @@ import (
7 7
 	"github.com/docker/docker/api/types"
8 8
 )
9 9
 
10
+// ContainerInspect returns low-level information about a
11
+// container. Returns an error if the container cannot be found, or if
12
+// there is an error getting the data.
10 13
 func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) {
11 14
 	container, err := daemon.Get(name)
12 15
 	if err != nil {
... ...
@@ -30,7 +33,7 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
30 30
 	// make a copy to play with
31 31
 	hostConfig := *container.hostConfig
32 32
 
33
-	if children, err := daemon.Children(container.Name); err == nil {
33
+	if children, err := daemon.children(container.Name); err == nil {
34 34
 		for linkAlias, child := range children {
35 35
 			hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
36 36
 		}
... ...
@@ -73,7 +76,7 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
73 73
 		ExecDriver:      container.ExecDriver,
74 74
 		MountLabel:      container.MountLabel,
75 75
 		ProcessLabel:    container.ProcessLabel,
76
-		ExecIDs:         container.GetExecIDs(),
76
+		ExecIDs:         container.getExecIDs(),
77 77
 		HostConfig:      &hostConfig,
78 78
 	}
79 79
 
... ...
@@ -90,7 +93,9 @@ func (daemon *Daemon) getInspectData(container *Container) (*types.ContainerJSON
90 90
 	return contJSONBase, nil
91 91
 }
92 92
 
93
-func (daemon *Daemon) ContainerExecInspect(id string) (*execConfig, error) {
93
+// ContainerExecInspect returns low-level information about the exec
94
+// command. An error is returned if the exec cannot be found.
95
+func (daemon *Daemon) ContainerExecInspect(id string) (*ExecConfig, error) {
94 96
 	eConfig, err := daemon.getExecConfig(id)
95 97
 	if err != nil {
96 98
 		return nil, err
... ...
@@ -98,6 +103,8 @@ func (daemon *Daemon) ContainerExecInspect(id string) (*execConfig, error) {
98 98
 	return eConfig, nil
99 99
 }
100 100
 
101
+// VolumeInspect looks up a volume by name. An error is returned if
102
+// the volume cannot be found.
101 103
 func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) {
102 104
 	v, err := daemon.volumes.Get(name)
103 105
 	if err != nil {
... ...
@@ -14,6 +14,7 @@ func setPlatformSpecificContainerFields(container *Container, contJSONBase *type
14 14
 	return contJSONBase
15 15
 }
16 16
 
17
+// ContainerInspectPre120 is for backwards compatibility with pre v1.20 clients.
17 18
 func (daemon *Daemon) ContainerInspectPre120(name string) (*types.ContainerJSONPre120, error) {
18 19
 	container, err := daemon.Get(name)
19 20
 	if err != nil {
... ...
@@ -19,7 +19,7 @@ func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
19 19
 		}
20 20
 	} else {
21 21
 		// Otherwise, just send the requested signal
22
-		if err := container.KillSig(int(sig)); err != nil {
22
+		if err := container.killSig(int(sig)); err != nil {
23 23
 			return err
24 24
 		}
25 25
 	}
... ...
@@ -17,15 +17,24 @@ func (daemon *Daemon) List() []*Container {
17 17
 	return daemon.containers.List()
18 18
 }
19 19
 
20
+// ContainersConfig is a struct for configuring the command to list
21
+// containers.
20 22
 type ContainersConfig struct {
21
-	All     bool
22
-	Since   string
23
-	Before  string
24
-	Limit   int
25
-	Size    bool
23
+	// if true show all containers, otherwise only running containers.
24
+	All bool
25
+	// show all containers created after this container id
26
+	Since string
27
+	// show all containers created before this container id
28
+	Before string
29
+	// number of containers to return at most
30
+	Limit int
31
+	// if true include the sizes of the containers
32
+	Size bool
33
+	// return only containers that match filters
26 34
 	Filters string
27 35
 }
28 36
 
37
+// Containers returns a list of all the containers.
29 38
 func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container, error) {
30 39
 	var (
31 40
 		foundBefore bool
... ...
@@ -62,7 +71,7 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
62 62
 		}
63 63
 	}
64 64
 	names := map[string][]string{}
65
-	daemon.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
65
+	daemon.containerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
66 66
 		names[e.ID()] = append(names[e.ID()], p)
67 67
 		return nil
68 68
 	}, 1)
... ...
@@ -195,7 +204,7 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
195 195
 		}
196 196
 
197 197
 		if config.Size {
198
-			sizeRw, sizeRootFs := container.GetSize()
198
+			sizeRw, sizeRootFs := container.getSize()
199 199
 			newC.SizeRw = sizeRw
200 200
 			newC.SizeRootFs = sizeRootFs
201 201
 		}
... ...
@@ -215,6 +224,8 @@ func (daemon *Daemon) Containers(config *ContainersConfig) ([]*types.Container,
215 215
 	return containers, nil
216 216
 }
217 217
 
218
+// Volumes lists known volumes, using the filter to restrict the range
219
+// of volumes returned.
218 220
 func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, error) {
219 221
 	var volumesOut []*types.Volume
220 222
 	volFilters, err := filters.FromParam(filter)
... ...
@@ -1,8 +1,8 @@
1 1
 package daemon
2 2
 
3
-// Importing packages here only to make sure their init gets called and
4
-// therefore they register themselves to the logdriver factory.
5 3
 import (
4
+	// Importing packages here only to make sure their init gets called and
5
+	// therefore they register themselves to the logdriver factory.
6 6
 	_ "github.com/docker/docker/daemon/logger/fluentd"
7 7
 	_ "github.com/docker/docker/daemon/logger/gelf"
8 8
 	_ "github.com/docker/docker/daemon/logger/journald"
... ...
@@ -1,7 +1,7 @@
1 1
 package daemon
2 2
 
3
-// Importing packages here only to make sure their init gets called and
4
-// therefore they register themselves to the logdriver factory.
5 3
 import (
4
+	// Importing packages here only to make sure their init gets called and
5
+	// therefore they register themselves to the logdriver factory.
6 6
 	_ "github.com/docker/docker/daemon/logger/jsonfilelog"
7 7
 )
... ...
@@ -11,15 +11,25 @@ import (
11 11
 	"github.com/docker/docker/pkg/stdcopy"
12 12
 )
13 13
 
14
+// ContainerLogsConfig holds configs for logging operations. Exists
15
+// for users of the daemon to to pass it a logging configuration.
14 16
 type ContainerLogsConfig struct {
15
-	Follow, Timestamps   bool
16
-	Tail                 string
17
-	Since                time.Time
17
+	// if true stream log output
18
+	Follow bool
19
+	// if true include timestamps for each line of log output
20
+	Timestamps bool
21
+	// return that many lines of log output from the end
22
+	Tail string
23
+	// filter logs by returning on those entries after this time
24
+	Since time.Time
25
+	// whether or not to show stdout and stderr as well as log entries.
18 26
 	UseStdout, UseStderr bool
19 27
 	OutStream            io.Writer
20 28
 	Stop                 <-chan bool
21 29
 }
22 30
 
31
+// ContainerLogs hooks up a container's stdout and stderr streams
32
+// configured with the given struct.
23 33
 func (daemon *Daemon) ContainerLogs(container *Container, config *ContainerLogsConfig) error {
24 34
 	if !(config.UseStdout || config.UseStderr) {
25 35
 		return fmt.Errorf("You must choose at least one stream")
... ...
@@ -138,11 +138,11 @@ func (m *containerMonitor) Start() error {
138 138
 
139 139
 		pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin)
140 140
 
141
-		m.container.LogEvent("start")
141
+		m.container.logEvent("start")
142 142
 
143 143
 		m.lastStartTime = time.Now()
144 144
 
145
-		if exitStatus, err = m.container.daemon.Run(m.container, pipes, m.callback); err != nil {
145
+		if exitStatus, err = m.container.daemon.run(m.container, pipes, m.callback); err != nil {
146 146
 			// if we receive an internal error from the initial start of a container then lets
147 147
 			// return it instead of entering the restart loop
148 148
 			if m.container.RestartCount == 0 {
... ...
@@ -161,11 +161,11 @@ func (m *containerMonitor) Start() error {
161 161
 		m.resetMonitor(err == nil && exitStatus.ExitCode == 0)
162 162
 
163 163
 		if m.shouldRestart(exitStatus.ExitCode) {
164
-			m.container.SetRestarting(&exitStatus)
164
+			m.container.setRestarting(&exitStatus)
165 165
 			if exitStatus.OOMKilled {
166
-				m.container.LogEvent("oom")
166
+				m.container.logEvent("oom")
167 167
 			}
168
-			m.container.LogEvent("die")
168
+			m.container.logEvent("die")
169 169
 			m.resetContainer(true)
170 170
 
171 171
 			// sleep with a small time increment between each restart to help avoid issues cased by quickly
... ...
@@ -180,9 +180,9 @@ func (m *containerMonitor) Start() error {
180 180
 			continue
181 181
 		}
182 182
 		if exitStatus.OOMKilled {
183
-			m.container.LogEvent("oom")
183
+			m.container.logEvent("oom")
184 184
 		}
185
-		m.container.LogEvent("die")
185
+		m.container.logEvent("die")
186 186
 		m.resetContainer(true)
187 187
 		return err
188 188
 	}
... ...
@@ -270,7 +270,7 @@ func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid
270 270
 		close(m.startSignal)
271 271
 	}
272 272
 
273
-	if err := m.container.ToDisk(); err != nil {
273
+	if err := m.container.toDiskLocking(); err != nil {
274 274
 		logrus.Errorf("Error saving container to disk: %v", err)
275 275
 	}
276 276
 }
... ...
@@ -9,7 +9,7 @@ func (daemon *Daemon) ContainerPause(name string) error {
9 9
 		return err
10 10
 	}
11 11
 
12
-	if err := container.Pause(); err != nil {
12
+	if err := container.pause(); err != nil {
13 13
 		return fmt.Errorf("Cannot pause container %s: %s", name, err)
14 14
 	}
15 15
 
... ...
@@ -4,6 +4,9 @@ import (
4 4
 	"fmt"
5 5
 )
6 6
 
7
+// ContainerRename changes the name of a container, using the oldName
8
+// to find the container. An error is returned if newName is already
9
+// reserved.
7 10
 func (daemon *Daemon) ContainerRename(oldName, newName string) error {
8 11
 	if oldName == "" || newName == "" {
9 12
 		return fmt.Errorf("usage: docker rename OLD_NAME NEW_NAME")
... ...
@@ -27,10 +30,10 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
27 27
 	undo := func() {
28 28
 		container.Name = oldName
29 29
 		daemon.reserveName(container.ID, oldName)
30
-		daemon.containerGraph.Delete(newName)
30
+		daemon.containerGraphDB.Delete(newName)
31 31
 	}
32 32
 
33
-	if err := daemon.containerGraph.Delete(oldName); err != nil {
33
+	if err := daemon.containerGraphDB.Delete(oldName); err != nil {
34 34
 		undo()
35 35
 		return fmt.Errorf("Failed to delete container %q: %v", oldName, err)
36 36
 	}
... ...
@@ -40,6 +43,6 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
40 40
 		return err
41 41
 	}
42 42
 
43
-	container.LogEvent("rename")
43
+	container.logEvent("rename")
44 44
 	return nil
45 45
 }
... ...
@@ -1,5 +1,7 @@
1 1
 package daemon
2 2
 
3
+// ContainerResize changes the size of the TTY of the process running
4
+// in the container with the given name to the given height and width.
3 5
 func (daemon *Daemon) ContainerResize(name string, height, width int) error {
4 6
 	container, err := daemon.Get(name)
5 7
 	if err != nil {
... ...
@@ -9,11 +11,14 @@ func (daemon *Daemon) ContainerResize(name string, height, width int) error {
9 9
 	return container.Resize(height, width)
10 10
 }
11 11
 
12
+// ContainerExecResize changes the size of the TTY of the process
13
+// running in the exec with the given name to the given height and
14
+// width.
12 15
 func (daemon *Daemon) ContainerExecResize(name string, height, width int) error {
13
-	execConfig, err := daemon.getExecConfig(name)
16
+	ExecConfig, err := daemon.getExecConfig(name)
14 17
 	if err != nil {
15 18
 		return err
16 19
 	}
17 20
 
18
-	return execConfig.Resize(height, width)
21
+	return ExecConfig.resize(height, width)
19 22
 }
... ...
@@ -2,6 +2,12 @@ package daemon
2 2
 
3 3
 import "fmt"
4 4
 
5
+// ContainerRestart stops and starts a container. It attempts to
6
+// gracefully stop the container within the given timeout, forcefully
7
+// stopping it if the timeout is exceeded. If given a negative
8
+// timeout, ContainerRestart will wait forever until a graceful
9
+// stop. Returns an error if the container cannot be found, or if
10
+// there is an underlying error at any stage of the restart.
5 11
 func (daemon *Daemon) ContainerRestart(name string, seconds int) error {
6 12
 	container, err := daemon.Get(name)
7 13
 	if err != nil {
... ...
@@ -7,13 +7,14 @@ import (
7 7
 	"github.com/docker/docker/runconfig"
8 8
 )
9 9
 
10
+// ContainerStart starts a container.
10 11
 func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error {
11 12
 	container, err := daemon.Get(name)
12 13
 	if err != nil {
13 14
 		return err
14 15
 	}
15 16
 
16
-	if container.IsPaused() {
17
+	if container.isPaused() {
17 18
 		return fmt.Errorf("Cannot start a paused container, try unpause instead.")
18 19
 	}
19 20
 
... ...
@@ -9,8 +9,13 @@ import (
9 9
 	"github.com/docker/docker/pkg/units"
10 10
 )
11 11
 
12
+// State holds the current container state, and has methods to get and
13
+// set the state. Container has an embed, which allows all of the
14
+// functions defined against State to run against Container.
12 15
 type State struct {
13 16
 	sync.Mutex
17
+	// FIXME: Why do we have both paused and running if a
18
+	// container cannot be paused and running at the same time?
14 19
 	Running           bool
15 20
 	Paused            bool
16 21
 	Restarting        bool
... ...
@@ -25,6 +30,7 @@ type State struct {
25 25
 	waitChan          chan struct{}
26 26
 }
27 27
 
28
+// NewState creates a default state object with a fresh channel for state changes.
28 29
 func NewState() *State {
29 30
 	return &State{
30 31
 		waitChan: make(chan struct{}),
... ...
@@ -111,10 +117,11 @@ func wait(waitChan <-chan struct{}, timeout time.Duration) error {
111 111
 	}
112 112
 }
113 113
 
114
-// WaitRunning waits until state is running. If state already running it returns
115
-// immediately. If you want wait forever you must supply negative timeout.
116
-// Returns pid, that was passed to SetRunning
117
-func (s *State) WaitRunning(timeout time.Duration) (int, error) {
114
+// waitRunning waits until state is running. If state is already
115
+// running it returns immediately. If you want wait forever you must
116
+// supply negative timeout. Returns pid, that was passed to
117
+// setRunningLocking.
118
+func (s *State) waitRunning(timeout time.Duration) (int, error) {
118 119
 	s.Lock()
119 120
 	if s.Running {
120 121
 		pid := s.Pid
... ...
@@ -126,12 +133,12 @@ func (s *State) WaitRunning(timeout time.Duration) (int, error) {
126 126
 	if err := wait(waitChan, timeout); err != nil {
127 127
 		return -1, err
128 128
 	}
129
-	return s.GetPid(), nil
129
+	return s.getPID(), nil
130 130
 }
131 131
 
132 132
 // WaitStop waits until state is stopped. If state already stopped it returns
133 133
 // immediately. If you want wait forever you must supply negative timeout.
134
-// Returns exit code, that was passed to SetStopped
134
+// Returns exit code, that was passed to setStoppedLocking
135 135
 func (s *State) WaitStop(timeout time.Duration) (int, error) {
136 136
 	s.Lock()
137 137
 	if !s.Running {
... ...
@@ -144,9 +151,10 @@ func (s *State) WaitStop(timeout time.Duration) (int, error) {
144 144
 	if err := wait(waitChan, timeout); err != nil {
145 145
 		return -1, err
146 146
 	}
147
-	return s.GetExitCode(), nil
147
+	return s.getExitCode(), nil
148 148
 }
149 149
 
150
+// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running.
150 151
 func (s *State) IsRunning() bool {
151 152
 	s.Lock()
152 153
 	res := s.Running
... ...
@@ -154,21 +162,22 @@ func (s *State) IsRunning() bool {
154 154
 	return res
155 155
 }
156 156
 
157
-func (s *State) GetPid() int {
157
+// GetPID holds the process id of a container.
158
+func (s *State) getPID() int {
158 159
 	s.Lock()
159 160
 	res := s.Pid
160 161
 	s.Unlock()
161 162
 	return res
162 163
 }
163 164
 
164
-func (s *State) GetExitCode() int {
165
+func (s *State) getExitCode() int {
165 166
 	s.Lock()
166 167
 	res := s.ExitCode
167 168
 	s.Unlock()
168 169
 	return res
169 170
 }
170 171
 
171
-func (s *State) SetRunning(pid int) {
172
+func (s *State) setRunningLocking(pid int) {
172 173
 	s.Lock()
173 174
 	s.setRunning(pid)
174 175
 	s.Unlock()
... ...
@@ -186,7 +195,7 @@ func (s *State) setRunning(pid int) {
186 186
 	s.waitChan = make(chan struct{})
187 187
 }
188 188
 
189
-func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) {
189
+func (s *State) setStoppedLocking(exitStatus *execdriver.ExitStatus) {
190 190
 	s.Lock()
191 191
 	s.setStopped(exitStatus)
192 192
 	s.Unlock()
... ...
@@ -203,9 +212,9 @@ func (s *State) setStopped(exitStatus *execdriver.ExitStatus) {
203 203
 	s.waitChan = make(chan struct{})
204 204
 }
205 205
 
206
-// SetRestarting is when docker handles the auto restart of containers when they are
206
+// setRestarting is when docker handles the auto restart of containers when they are
207 207
 // in the middle of a stop and being restarted again
208
-func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) {
208
+func (s *State) setRestartingLocking(exitStatus *execdriver.ExitStatus) {
209 209
 	s.Lock()
210 210
 	s.setRestarting(exitStatus)
211 211
 	s.Unlock()
... ...
@@ -231,33 +240,14 @@ func (s *State) setError(err error) {
231 231
 	s.Error = err.Error()
232 232
 }
233 233
 
234
-func (s *State) IsRestarting() bool {
235
-	s.Lock()
236
-	res := s.Restarting
237
-	s.Unlock()
238
-	return res
239
-}
240
-
241
-func (s *State) SetPaused() {
242
-	s.Lock()
243
-	s.Paused = true
244
-	s.Unlock()
245
-}
246
-
247
-func (s *State) SetUnpaused() {
248
-	s.Lock()
249
-	s.Paused = false
250
-	s.Unlock()
251
-}
252
-
253
-func (s *State) IsPaused() bool {
234
+func (s *State) isPaused() bool {
254 235
 	s.Lock()
255 236
 	res := s.Paused
256 237
 	s.Unlock()
257 238
 	return res
258 239
 }
259 240
 
260
-func (s *State) SetRemovalInProgress() error {
241
+func (s *State) setRemovalInProgress() error {
261 242
 	s.Lock()
262 243
 	defer s.Unlock()
263 244
 	if s.removalInProgress {
... ...
@@ -267,13 +257,13 @@ func (s *State) SetRemovalInProgress() error {
267 267
 	return nil
268 268
 }
269 269
 
270
-func (s *State) ResetRemovalInProgress() {
270
+func (s *State) resetRemovalInProgress() {
271 271
 	s.Lock()
272 272
 	s.removalInProgress = false
273 273
 	s.Unlock()
274 274
 }
275 275
 
276
-func (s *State) SetDead() {
276
+func (s *State) setDead() {
277 277
 	s.Lock()
278 278
 	s.Dead = true
279 279
 	s.Unlock()
... ...
@@ -14,11 +14,12 @@ func TestStateRunStop(t *testing.T) {
14 14
 		started := make(chan struct{})
15 15
 		var pid int64
16 16
 		go func() {
17
-			runPid, _ := s.WaitRunning(-1 * time.Second)
17
+			runPid, _ := s.waitRunning(-1 * time.Second)
18 18
 			atomic.StoreInt64(&pid, int64(runPid))
19 19
 			close(started)
20 20
 		}()
21
-		s.SetRunning(i + 100)
21
+		s.setRunningLocking(i + 100)
22
+
22 23
 		if !s.IsRunning() {
23 24
 			t.Fatal("State not running")
24 25
 		}
... ...
@@ -38,8 +39,8 @@ func TestStateRunStop(t *testing.T) {
38 38
 		if runPid != i+100 {
39 39
 			t.Fatalf("Pid %v, expected %v", runPid, i+100)
40 40
 		}
41
-		if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 {
42
-			t.Fatalf("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil)
41
+		if pid, err := s.waitRunning(-1 * time.Second); err != nil || pid != i+100 {
42
+			t.Fatalf("waitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil)
43 43
 		}
44 44
 
45 45
 		stopped := make(chan struct{})
... ...
@@ -49,7 +50,7 @@ func TestStateRunStop(t *testing.T) {
49 49
 			atomic.StoreInt64(&exit, int64(exitCode))
50 50
 			close(stopped)
51 51
 		}()
52
-		s.SetStopped(&execdriver.ExitStatus{ExitCode: i})
52
+		s.setStoppedLocking(&execdriver.ExitStatus{ExitCode: i})
53 53
 		if s.IsRunning() {
54 54
 			t.Fatal("State is running")
55 55
 		}
... ...
@@ -79,7 +80,7 @@ func TestStateTimeoutWait(t *testing.T) {
79 79
 	s := NewState()
80 80
 	started := make(chan struct{})
81 81
 	go func() {
82
-		s.WaitRunning(100 * time.Millisecond)
82
+		s.waitRunning(100 * time.Millisecond)
83 83
 		close(started)
84 84
 	}()
85 85
 	select {
... ...
@@ -88,10 +89,12 @@ func TestStateTimeoutWait(t *testing.T) {
88 88
 	case <-started:
89 89
 		t.Log("Start callback fired")
90 90
 	}
91
-	s.SetRunning(42)
91
+
92
+	s.setRunningLocking(42)
93
+
92 94
 	stopped := make(chan struct{})
93 95
 	go func() {
94
-		s.WaitRunning(100 * time.Millisecond)
96
+		s.waitRunning(100 * time.Millisecond)
95 97
 		close(stopped)
96 98
 	}()
97 99
 	select {
... ...
@@ -10,14 +10,18 @@ import (
10 10
 	"github.com/opencontainers/runc/libcontainer"
11 11
 )
12 12
 
13
+// ContainerStatsConfig holds information for configuring the runtime
14
+// behavior of a daemon.ContainerStats() call.
13 15
 type ContainerStatsConfig struct {
14 16
 	Stream    bool
15 17
 	OutStream io.Writer
16 18
 	Stop      <-chan bool
17 19
 }
18 20
 
21
+// ContainerStats writes information about the container to the stream
22
+// given in the config object.
19 23
 func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig) error {
20
-	updates, err := daemon.SubscribeToContainerStats(name)
24
+	updates, err := daemon.subscribeToContainerStats(name)
21 25
 	if err != nil {
22 26
 		return err
23 27
 	}
... ...
@@ -26,7 +30,7 @@ func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig)
26 26
 		config.OutStream.Write(nil)
27 27
 	}
28 28
 
29
-	var preCpuStats types.CPUStats
29
+	var preCPUStats types.CPUStats
30 30
 	getStat := func(v interface{}) *types.Stats {
31 31
 		update := v.(*execdriver.ResourceStats)
32 32
 		// Retrieve the nw statistics from libnetwork and inject them in the Stats
... ...
@@ -34,17 +38,17 @@ func (daemon *Daemon) ContainerStats(name string, config *ContainerStatsConfig)
34 34
 			update.Stats.Interfaces = nwStats
35 35
 		}
36 36
 		ss := convertStatsToAPITypes(update.Stats)
37
-		ss.PreCPUStats = preCpuStats
37
+		ss.PreCPUStats = preCPUStats
38 38
 		ss.MemoryStats.Limit = uint64(update.MemoryLimit)
39 39
 		ss.Read = update.Read
40 40
 		ss.CPUStats.SystemUsage = update.SystemUsage
41
-		preCpuStats = ss.CPUStats
41
+		preCPUStats = ss.CPUStats
42 42
 		return ss
43 43
 	}
44 44
 
45 45
 	enc := json.NewEncoder(config.OutStream)
46 46
 
47
-	defer daemon.UnsubscribeToContainerStats(name, updates)
47
+	defer daemon.unsubscribeToContainerStats(name, updates)
48 48
 
49 49
 	noStreamFirstFrame := true
50 50
 	for {
... ...
@@ -23,10 +23,10 @@ import (
23 23
 // and will start processing stats when they are started.
24 24
 func newStatsCollector(interval time.Duration) *statsCollector {
25 25
 	s := &statsCollector{
26
-		interval:   interval,
27
-		publishers: make(map[*Container]*pubsub.Publisher),
28
-		clockTicks: uint64(system.GetClockTicks()),
29
-		bufReader:  bufio.NewReaderSize(nil, 128),
26
+		interval:            interval,
27
+		publishers:          make(map[*Container]*pubsub.Publisher),
28
+		clockTicksPerSecond: uint64(system.GetClockTicks()),
29
+		bufReader:           bufio.NewReaderSize(nil, 128),
30 30
 	}
31 31
 	go s.run()
32 32
 	return s
... ...
@@ -34,11 +34,11 @@ func newStatsCollector(interval time.Duration) *statsCollector {
34 34
 
35 35
 // statsCollector manages and provides container resource stats
36 36
 type statsCollector struct {
37
-	m          sync.Mutex
38
-	interval   time.Duration
39
-	clockTicks uint64
40
-	publishers map[*Container]*pubsub.Publisher
41
-	bufReader  *bufio.Reader
37
+	m                   sync.Mutex
38
+	interval            time.Duration
39
+	clockTicksPerSecond uint64
40
+	publishers          map[*Container]*pubsub.Publisher
41
+	bufReader           *bufio.Reader
42 42
 }
43 43
 
44 44
 // collect registers the container with the collector and adds it to
... ...
@@ -89,7 +89,7 @@ func (s *statsCollector) run() {
89 89
 	var pairs []publishersPair
90 90
 
91 91
 	for range time.Tick(s.interval) {
92
-		systemUsage, err := s.getSystemCpuUsage()
92
+		systemUsage, err := s.getSystemCPUUsage()
93 93
 		if err != nil {
94 94
 			logrus.Errorf("collecting system cpu usage: %v", err)
95 95
 			continue
... ...
@@ -107,7 +107,7 @@ func (s *statsCollector) run() {
107 107
 		s.m.Unlock()
108 108
 
109 109
 		for _, pair := range pairs {
110
-			stats, err := pair.container.Stats()
110
+			stats, err := pair.container.stats()
111 111
 			if err != nil {
112 112
 				if err != execdriver.ErrNotRunning {
113 113
 					logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err)
... ...
@@ -120,11 +120,17 @@ func (s *statsCollector) run() {
120 120
 	}
121 121
 }
122 122
 
123
-const nanoSeconds = 1e9
123
+const nanoSecondsPerSecond = 1e9
124 124
 
125
-// getSystemCpuUSage returns the host system's cpu usage in nanoseconds
126
-// for the system to match the cgroup readings are returned in the same format.
127
-func (s *statsCollector) getSystemCpuUsage() (uint64, error) {
125
+// getSystemCPUUsage returns the host system's cpu usage in
126
+// nanoseconds. An error is returned if the format of the underlying
127
+// file does not match.
128
+//
129
+// Uses /proc/stat defined by POSIX. Looks for the cpu
130
+// statistics line and then sums up the first seven fields
131
+// provided. See `man 5 proc` for details on specific field
132
+// information.
133
+func (s *statsCollector) getSystemCPUUsage() (uint64, error) {
128 134
 	var line string
129 135
 	f, err := os.Open("/proc/stat")
130 136
 	if err != nil {
... ...
@@ -147,15 +153,16 @@ func (s *statsCollector) getSystemCpuUsage() (uint64, error) {
147 147
 			if len(parts) < 8 {
148 148
 				return 0, fmt.Errorf("invalid number of cpu fields")
149 149
 			}
150
-			var sum uint64
150
+			var totalClockTicks uint64
151 151
 			for _, i := range parts[1:8] {
152 152
 				v, err := strconv.ParseUint(i, 10, 64)
153 153
 				if err != nil {
154 154
 					return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err)
155 155
 				}
156
-				sum += v
156
+				totalClockTicks += v
157 157
 			}
158
-			return (sum * nanoSeconds) / s.clockTicks, nil
158
+			return (totalClockTicks * nanoSecondsPerSecond) /
159
+				s.clockTicksPerSecond, nil
159 160
 		}
160 161
 	}
161 162
 	return 0, fmt.Errorf("invalid stat format")
... ...
@@ -2,6 +2,12 @@ package daemon
2 2
 
3 3
 import "fmt"
4 4
 
5
+// ContainerStop looks for the given container and terminates it,
6
+// waiting the given number of seconds before forcefully killing the
7
+// container. If a negative number of seconds is given, ContainerStop
8
+// will wait for a graceful termination. An error is returned if the
9
+// container is not found, is already stopped, or if there is a
10
+// problem stopping the container.
5 11
 func (daemon *Daemon) ContainerStop(name string, seconds int) error {
6 12
 	container, err := daemon.Get(name)
7 13
 	if err != nil {
... ...
@@ -11,6 +11,11 @@ import (
11 11
 	"github.com/docker/docker/api/types"
12 12
 )
13 13
 
14
+// ContainerTop lists the processes running inside of the given
15
+// container by calling ps with the given args, or with the flags
16
+// "-ef" if no args are given.  An error is returned if the container
17
+// is not found, or is not running, or if there are any problems
18
+// running ps, or parsing the output.
14 19
 func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
15 20
 	if psArgs == "" {
16 21
 		psArgs = "-ef"
... ...
@@ -50,6 +55,7 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container
50 50
 		return nil, fmt.Errorf("Couldn't find PID field in ps output")
51 51
 	}
52 52
 
53
+	// loop through the output and extract the PID from each line
53 54
 	for _, line := range lines[1:] {
54 55
 		if len(line) == 0 {
55 56
 			continue
... ...
@@ -70,6 +76,6 @@ func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.Container
70 70
 			}
71 71
 		}
72 72
 	}
73
-	container.LogEvent("top")
73
+	container.logEvent("top")
74 74
 	return procList, nil
75 75
 }
... ...
@@ -6,6 +6,7 @@ import (
6 6
 	"github.com/docker/docker/api/types"
7 7
 )
8 8
 
9
+// ContainerTop is not supported on Windows and returns an error.
9 10
 func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) {
10 11
 	return nil, fmt.Errorf("Top is not supported on Windows")
11 12
 }
... ...
@@ -9,7 +9,7 @@ func (daemon *Daemon) ContainerUnpause(name string) error {
9 9
 		return err
10 10
 	}
11 11
 
12
-	if err := container.Unpause(); err != nil {
12
+	if err := container.unpause(); err != nil {
13 13
 		return fmt.Errorf("Cannot unpause container %s: %s", name, err)
14 14
 	}
15 15
 
... ...
@@ -5,7 +5,7 @@ import "testing"
5 5
 func TestParseVolumeFrom(t *testing.T) {
6 6
 	cases := []struct {
7 7
 		spec    string
8
-		expId   string
8
+		expID   string
9 9
 		expMode string
10 10
 		fail    bool
11 11
 	}{
... ...
@@ -25,8 +25,8 @@ func TestParseVolumeFrom(t *testing.T) {
25 25
 			continue
26 26
 		}
27 27
 
28
-		if id != c.expId {
29
-			t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expId, id, c.spec)
28
+		if id != c.expID {
29
+			t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec)
30 30
 		}
31 31
 		if mode != c.expMode {
32 32
 			t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec)
... ...
@@ -249,7 +249,7 @@ func (daemon *Daemon) verifyVolumesInfo(container *Container) error {
249 249
 			}
250 250
 		}
251 251
 
252
-		return container.ToDisk()
252
+		return container.toDiskLocking()
253 253
 	}
254 254
 
255 255
 	return nil
... ...
@@ -2,6 +2,11 @@ package daemon
2 2
 
3 3
 import "time"
4 4
 
5
+// ContainerWait stops processing until the given container is
6
+// stopped. If the container is not found, an error is returned. On a
7
+// successful stop, the exit code of the container is returned. On a
8
+// timeout, an error is returned. If you want to wait forever, supply
9
+// a negative duration for the timeout.
5 10
 func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) {
6 11
 	container, err := daemon.Get(name)
7 12
 	if err != nil {
... ...
@@ -18,6 +18,7 @@ packages=(
18 18
 	builder/parser
19 19
 	builder/parser/dumper
20 20
 	cliconfig
21
+	daemon
21 22
 	daemon/events
22 23
 	daemon/execdriver
23 24
 	daemon/execdriver/execdrivers