Implements stats for lxc driver
| ... | ... |
@@ -2,13 +2,14 @@ package execdriver |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"errors" |
| 5 |
+ "github.com/docker/docker/daemon/execdriver/native/template" |
|
| 6 |
+ "github.com/docker/libcontainer" |
|
| 7 |
+ "github.com/docker/libcontainer/devices" |
|
| 5 | 8 |
"io" |
| 6 | 9 |
"os" |
| 7 | 10 |
"os/exec" |
| 11 |
+ "strings" |
|
| 8 | 12 |
"time" |
| 9 |
- |
|
| 10 |
- "github.com/docker/libcontainer" |
|
| 11 |
- "github.com/docker/libcontainer/devices" |
|
| 12 | 13 |
) |
| 13 | 14 |
|
| 14 | 15 |
// Context is a generic key value pair that allows |
| ... | ... |
@@ -156,3 +157,71 @@ type Command struct {
|
| 156 | 156 |
LxcConfig []string `json:"lxc_config"` |
| 157 | 157 |
AppArmorProfile string `json:"apparmor_profile"` |
| 158 | 158 |
} |
| 159 |
+ |
|
| 160 |
+func InitContainer(c *Command) *libcontainer.Config {
|
|
| 161 |
+ container := template.New() |
|
| 162 |
+ |
|
| 163 |
+ container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
|
|
| 164 |
+ container.Tty = c.ProcessConfig.Tty |
|
| 165 |
+ container.User = c.ProcessConfig.User |
|
| 166 |
+ container.WorkingDir = c.WorkingDir |
|
| 167 |
+ container.Env = c.ProcessConfig.Env |
|
| 168 |
+ container.Cgroups.Name = c.ID |
|
| 169 |
+ container.Cgroups.AllowedDevices = c.AllowedDevices |
|
| 170 |
+ container.MountConfig.DeviceNodes = c.AutoCreatedDevices |
|
| 171 |
+ container.RootFs = c.Rootfs |
|
| 172 |
+ container.MountConfig.ReadonlyFs = c.ReadonlyRootfs |
|
| 173 |
+ |
|
| 174 |
+ // check to see if we are running in ramdisk to disable pivot root |
|
| 175 |
+ container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
|
|
| 176 |
+ container.RestrictSys = true |
|
| 177 |
+ return container |
|
| 178 |
+} |
|
| 179 |
+ |
|
| 180 |
+func getEnv(key string, env []string) string {
|
|
| 181 |
+ for _, pair := range env {
|
|
| 182 |
+ parts := strings.Split(pair, "=") |
|
| 183 |
+ if parts[0] == key {
|
|
| 184 |
+ return parts[1] |
|
| 185 |
+ } |
|
| 186 |
+ } |
|
| 187 |
+ return "" |
|
| 188 |
+} |
|
| 189 |
+ |
|
| 190 |
+func SetupCgroups(container *libcontainer.Config, c *Command) error {
|
|
| 191 |
+ if c.Resources != nil {
|
|
| 192 |
+ container.Cgroups.CpuShares = c.Resources.CpuShares |
|
| 193 |
+ container.Cgroups.Memory = c.Resources.Memory |
|
| 194 |
+ container.Cgroups.MemoryReservation = c.Resources.Memory |
|
| 195 |
+ container.Cgroups.MemorySwap = c.Resources.MemorySwap |
|
| 196 |
+ container.Cgroups.CpusetCpus = c.Resources.Cpuset |
|
| 197 |
+ } |
|
| 198 |
+ |
|
| 199 |
+ return nil |
|
| 200 |
+} |
|
| 201 |
+ |
|
| 202 |
+func Stats(stateFile string, containerMemoryLimit int64, machineMemory int64) (*ResourceStats, error) {
|
|
| 203 |
+ state, err := libcontainer.GetState(stateFile) |
|
| 204 |
+ if err != nil {
|
|
| 205 |
+ if os.IsNotExist(err) {
|
|
| 206 |
+ return nil, ErrNotRunning |
|
| 207 |
+ } |
|
| 208 |
+ return nil, err |
|
| 209 |
+ } |
|
| 210 |
+ now := time.Now() |
|
| 211 |
+ stats, err := libcontainer.GetStats(nil, state) |
|
| 212 |
+ if err != nil {
|
|
| 213 |
+ return nil, err |
|
| 214 |
+ } |
|
| 215 |
+ // if the container does not have any memory limit specified set the |
|
| 216 |
+ // limit to the machines memory |
|
| 217 |
+ memoryLimit := containerMemoryLimit |
|
| 218 |
+ if memoryLimit == 0 {
|
|
| 219 |
+ memoryLimit = machineMemory |
|
| 220 |
+ } |
|
| 221 |
+ return &ResourceStats{
|
|
| 222 |
+ Read: now, |
|
| 223 |
+ ContainerStats: stats, |
|
| 224 |
+ MemoryLimit: memoryLimit, |
|
| 225 |
+ }, nil |
|
| 226 |
+} |
| ... | ... |
@@ -12,17 +12,19 @@ import ( |
| 12 | 12 |
"path/filepath" |
| 13 | 13 |
"strconv" |
| 14 | 14 |
"strings" |
| 15 |
+ "sync" |
|
| 15 | 16 |
"syscall" |
| 16 | 17 |
"time" |
| 17 | 18 |
|
| 18 |
- "github.com/kr/pty" |
|
| 19 |
- |
|
| 20 | 19 |
log "github.com/Sirupsen/logrus" |
| 21 | 20 |
"github.com/docker/docker/daemon/execdriver" |
| 21 |
+ sysinfo "github.com/docker/docker/pkg/system" |
|
| 22 | 22 |
"github.com/docker/docker/pkg/term" |
| 23 | 23 |
"github.com/docker/docker/utils" |
| 24 |
+ "github.com/docker/libcontainer" |
|
| 24 | 25 |
"github.com/docker/libcontainer/cgroups" |
| 25 | 26 |
"github.com/docker/libcontainer/mount/nodes" |
| 27 |
+ "github.com/kr/pty" |
|
| 26 | 28 |
) |
| 27 | 29 |
|
| 28 | 30 |
const DriverName = "lxc" |
| ... | ... |
@@ -30,10 +32,18 @@ const DriverName = "lxc" |
| 30 | 30 |
var ErrExec = errors.New("Unsupported: Exec is not supported by the lxc driver")
|
| 31 | 31 |
|
| 32 | 32 |
type driver struct {
|
| 33 |
- root string // root path for the driver to use |
|
| 34 |
- initPath string |
|
| 35 |
- apparmor bool |
|
| 36 |
- sharedRoot bool |
|
| 33 |
+ root string // root path for the driver to use |
|
| 34 |
+ initPath string |
|
| 35 |
+ apparmor bool |
|
| 36 |
+ sharedRoot bool |
|
| 37 |
+ activeContainers map[string]*activeContainer |
|
| 38 |
+ machineMemory int64 |
|
| 39 |
+ sync.Mutex |
|
| 40 |
+} |
|
| 41 |
+ |
|
| 42 |
+type activeContainer struct {
|
|
| 43 |
+ container *libcontainer.Config |
|
| 44 |
+ cmd *exec.Cmd |
|
| 37 | 45 |
} |
| 38 | 46 |
|
| 39 | 47 |
func NewDriver(root, initPath string, apparmor bool) (*driver, error) {
|
| ... | ... |
@@ -41,12 +51,17 @@ func NewDriver(root, initPath string, apparmor bool) (*driver, error) {
|
| 41 | 41 |
if err := linkLxcStart(root); err != nil {
|
| 42 | 42 |
return nil, err |
| 43 | 43 |
} |
| 44 |
- |
|
| 44 |
+ meminfo, err := sysinfo.ReadMemInfo() |
|
| 45 |
+ if err != nil {
|
|
| 46 |
+ return nil, err |
|
| 47 |
+ } |
|
| 45 | 48 |
return &driver{
|
| 46 |
- apparmor: apparmor, |
|
| 47 |
- root: root, |
|
| 48 |
- initPath: initPath, |
|
| 49 |
- sharedRoot: rootIsShared(), |
|
| 49 |
+ apparmor: apparmor, |
|
| 50 |
+ root: root, |
|
| 51 |
+ initPath: initPath, |
|
| 52 |
+ sharedRoot: rootIsShared(), |
|
| 53 |
+ activeContainers: make(map[string]*activeContainer), |
|
| 54 |
+ machineMemory: meminfo.MemTotal, |
|
| 50 | 55 |
}, nil |
| 51 | 56 |
} |
| 52 | 57 |
|
| ... | ... |
@@ -57,8 +72,9 @@ func (d *driver) Name() string {
|
| 57 | 57 |
|
| 58 | 58 |
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
|
| 59 | 59 |
var ( |
| 60 |
- term execdriver.Terminal |
|
| 61 |
- err error |
|
| 60 |
+ term execdriver.Terminal |
|
| 61 |
+ err error |
|
| 62 |
+ dataPath = d.containerDir(c.ID) |
|
| 62 | 63 |
) |
| 63 | 64 |
|
| 64 | 65 |
if c.ProcessConfig.Tty {
|
| ... | ... |
@@ -67,6 +83,16 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba |
| 67 | 67 |
term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) |
| 68 | 68 |
} |
| 69 | 69 |
c.ProcessConfig.Terminal = term |
| 70 |
+ container, err := d.createContainer(c) |
|
| 71 |
+ if err != nil {
|
|
| 72 |
+ return execdriver.ExitStatus{ExitCode: -1}, err
|
|
| 73 |
+ } |
|
| 74 |
+ d.Lock() |
|
| 75 |
+ d.activeContainers[c.ID] = &activeContainer{
|
|
| 76 |
+ container: container, |
|
| 77 |
+ cmd: &c.ProcessConfig.Cmd, |
|
| 78 |
+ } |
|
| 79 |
+ d.Unlock() |
|
| 70 | 80 |
|
| 71 | 81 |
c.Mounts = append(c.Mounts, execdriver.Mount{
|
| 72 | 82 |
Source: d.initPath, |
| ... | ... |
@@ -186,25 +212,89 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba |
| 186 | 186 |
close(waitLock) |
| 187 | 187 |
}() |
| 188 | 188 |
|
| 189 |
- // Poll lxc for RUNNING status |
|
| 190 |
- pid, err := d.waitForStart(c, waitLock) |
|
| 191 |
- if err != nil {
|
|
| 189 |
+ terminate := func(terr error) (execdriver.ExitStatus, error) {
|
|
| 192 | 190 |
if c.ProcessConfig.Process != nil {
|
| 193 | 191 |
c.ProcessConfig.Process.Kill() |
| 194 | 192 |
c.ProcessConfig.Wait() |
| 195 | 193 |
} |
| 196 |
- return execdriver.ExitStatus{ExitCode: -1}, err
|
|
| 194 |
+ return execdriver.ExitStatus{ExitCode: -1}, terr
|
|
| 195 |
+ } |
|
| 196 |
+ // Poll lxc for RUNNING status |
|
| 197 |
+ pid, err := d.waitForStart(c, waitLock) |
|
| 198 |
+ if err != nil {
|
|
| 199 |
+ return terminate(err) |
|
| 200 |
+ } |
|
| 201 |
+ |
|
| 202 |
+ cgroupPaths, err := cgroupPaths(c.ID) |
|
| 203 |
+ if err != nil {
|
|
| 204 |
+ return terminate(err) |
|
| 205 |
+ } |
|
| 206 |
+ |
|
| 207 |
+ state := &libcontainer.State{
|
|
| 208 |
+ InitPid: pid, |
|
| 209 |
+ CgroupPaths: cgroupPaths, |
|
| 210 |
+ } |
|
| 211 |
+ |
|
| 212 |
+ if err := libcontainer.SaveState(dataPath, state); err != nil {
|
|
| 213 |
+ return terminate(err) |
|
| 197 | 214 |
} |
| 198 | 215 |
|
| 199 | 216 |
c.ContainerPid = pid |
| 200 | 217 |
|
| 201 | 218 |
if startCallback != nil {
|
| 219 |
+ log.Debugf("Invoking startCallback")
|
|
| 202 | 220 |
startCallback(&c.ProcessConfig, pid) |
| 203 | 221 |
} |
| 222 |
+ oomKill := false |
|
| 223 |
+ oomKillNotification, err := libcontainer.NotifyOnOOM(state) |
|
| 224 |
+ if err == nil {
|
|
| 225 |
+ _, oomKill = <-oomKillNotification |
|
| 226 |
+ log.Debugf("oomKill error %s waitErr %s", oomKill, waitErr)
|
|
| 227 |
+ |
|
| 228 |
+ } else {
|
|
| 229 |
+ log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
|
|
| 230 |
+ } |
|
| 204 | 231 |
|
| 205 | 232 |
<-waitLock |
| 206 | 233 |
|
| 207 |
- return execdriver.ExitStatus{ExitCode: getExitCode(c)}, waitErr
|
|
| 234 |
+ // check oom error |
|
| 235 |
+ exitCode := getExitCode(c) |
|
| 236 |
+ if oomKill {
|
|
| 237 |
+ exitCode = 137 |
|
| 238 |
+ } |
|
| 239 |
+ return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr
|
|
| 240 |
+} |
|
| 241 |
+ |
|
| 242 |
+// createContainer populates and configures the container type with the |
|
| 243 |
+// data provided by the execdriver.Command |
|
| 244 |
+func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, error) {
|
|
| 245 |
+ container := execdriver.InitContainer(c) |
|
| 246 |
+ if err := execdriver.SetupCgroups(container, c); err != nil {
|
|
| 247 |
+ return nil, err |
|
| 248 |
+ } |
|
| 249 |
+ return container, nil |
|
| 250 |
+} |
|
| 251 |
+ |
|
| 252 |
+// Return an map of susbystem -> container cgroup |
|
| 253 |
+func cgroupPaths(containerId string) (map[string]string, error) {
|
|
| 254 |
+ subsystems, err := cgroups.GetAllSubsystems() |
|
| 255 |
+ if err != nil {
|
|
| 256 |
+ return nil, err |
|
| 257 |
+ } |
|
| 258 |
+ log.Debugf("subsystems: %s", subsystems)
|
|
| 259 |
+ paths := make(map[string]string) |
|
| 260 |
+ for _, subsystem := range subsystems {
|
|
| 261 |
+ cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem) |
|
| 262 |
+ log.Debugf("cgroup path %s %s", cgroupRoot, cgroupDir)
|
|
| 263 |
+ if err != nil {
|
|
| 264 |
+ //unsupported subystem |
|
| 265 |
+ continue |
|
| 266 |
+ } |
|
| 267 |
+ path := filepath.Join(cgroupRoot, cgroupDir, "lxc", containerId) |
|
| 268 |
+ paths[subsystem] = path |
|
| 269 |
+ } |
|
| 270 |
+ |
|
| 271 |
+ return paths, nil |
|
| 208 | 272 |
} |
| 209 | 273 |
|
| 210 | 274 |
/// Return the exit code of the process |
| ... | ... |
@@ -348,18 +438,26 @@ func (d *driver) Info(id string) execdriver.Info {
|
| 348 | 348 |
} |
| 349 | 349 |
} |
| 350 | 350 |
|
| 351 |
-func (d *driver) GetPidsForContainer(id string) ([]int, error) {
|
|
| 352 |
- pids := []int{}
|
|
| 353 |
- |
|
| 354 |
- // cpu is chosen because it is the only non optional subsystem in cgroups |
|
| 355 |
- subsystem := "cpu" |
|
| 351 |
+func findCgroupRootAndDir(subsystem string) (string, string, error) {
|
|
| 356 | 352 |
cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) |
| 357 | 353 |
if err != nil {
|
| 358 |
- return pids, err |
|
| 354 |
+ return "", "", err |
|
| 359 | 355 |
} |
| 360 | 356 |
|
| 361 | 357 |
cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) |
| 362 | 358 |
if err != nil {
|
| 359 |
+ return "", "", err |
|
| 360 |
+ } |
|
| 361 |
+ return cgroupRoot, cgroupDir, nil |
|
| 362 |
+} |
|
| 363 |
+ |
|
| 364 |
+func (d *driver) GetPidsForContainer(id string) ([]int, error) {
|
|
| 365 |
+ pids := []int{}
|
|
| 366 |
+ |
|
| 367 |
+ // cpu is chosen because it is the only non optional subsystem in cgroups |
|
| 368 |
+ subsystem := "cpu" |
|
| 369 |
+ cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem) |
|
| 370 |
+ if err != nil {
|
|
| 363 | 371 |
return pids, err |
| 364 | 372 |
} |
| 365 | 373 |
|
| ... | ... |
@@ -418,8 +516,12 @@ func rootIsShared() bool {
|
| 418 | 418 |
return true |
| 419 | 419 |
} |
| 420 | 420 |
|
| 421 |
+func (d *driver) containerDir(containerId string) string {
|
|
| 422 |
+ return path.Join(d.root, "containers", containerId) |
|
| 423 |
+} |
|
| 424 |
+ |
|
| 421 | 425 |
func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) {
|
| 422 |
- root := path.Join(d.root, "containers", c.ID, "config.lxc") |
|
| 426 |
+ root := path.Join(d.containerDir(c.ID), "config.lxc") |
|
| 423 | 427 |
|
| 424 | 428 |
fo, err := os.Create(root) |
| 425 | 429 |
if err != nil {
|
| ... | ... |
@@ -537,6 +639,5 @@ func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessCo |
| 537 | 537 |
} |
| 538 | 538 |
|
| 539 | 539 |
func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
|
| 540 |
- return nil, fmt.Errorf("container stats are not supported with LXC")
|
|
| 541 |
- |
|
| 540 |
+ return execdriver.Stats(d.containerDir(id), d.activeContainers[id].container.Cgroups.Memory, d.machineMemory) |
|
| 542 | 541 |
} |
| ... | ... |
@@ -4,12 +4,10 @@ package native |
| 4 | 4 |
|
| 5 | 5 |
import ( |
| 6 | 6 |
"fmt" |
| 7 |
- "os" |
|
| 8 | 7 |
"os/exec" |
| 9 | 8 |
"path/filepath" |
| 10 | 9 |
|
| 11 | 10 |
"github.com/docker/docker/daemon/execdriver" |
| 12 |
- "github.com/docker/docker/daemon/execdriver/native/template" |
|
| 13 | 11 |
"github.com/docker/libcontainer" |
| 14 | 12 |
"github.com/docker/libcontainer/apparmor" |
| 15 | 13 |
"github.com/docker/libcontainer/devices" |
| ... | ... |
@@ -20,22 +18,7 @@ import ( |
| 20 | 20 |
// createContainer populates and configures the container type with the |
| 21 | 21 |
// data provided by the execdriver.Command |
| 22 | 22 |
func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, error) {
|
| 23 |
- container := template.New() |
|
| 24 |
- |
|
| 25 |
- container.Hostname = getEnv("HOSTNAME", c.ProcessConfig.Env)
|
|
| 26 |
- container.Tty = c.ProcessConfig.Tty |
|
| 27 |
- container.User = c.ProcessConfig.User |
|
| 28 |
- container.WorkingDir = c.WorkingDir |
|
| 29 |
- container.Env = c.ProcessConfig.Env |
|
| 30 |
- container.Cgroups.Name = c.ID |
|
| 31 |
- container.Cgroups.AllowedDevices = c.AllowedDevices |
|
| 32 |
- container.MountConfig.DeviceNodes = c.AutoCreatedDevices |
|
| 33 |
- container.RootFs = c.Rootfs |
|
| 34 |
- container.MountConfig.ReadonlyFs = c.ReadonlyRootfs |
|
| 35 |
- |
|
| 36 |
- // check to see if we are running in ramdisk to disable pivot root |
|
| 37 |
- container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
|
|
| 38 |
- container.RestrictSys = true |
|
| 23 |
+ container := execdriver.InitContainer(c) |
|
| 39 | 24 |
|
| 40 | 25 |
if err := d.createIpc(container, c); err != nil {
|
| 41 | 26 |
return nil, err |
| ... | ... |
@@ -63,7 +46,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e |
| 63 | 63 |
container.AppArmorProfile = c.AppArmorProfile |
| 64 | 64 |
} |
| 65 | 65 |
|
| 66 |
- if err := d.setupCgroups(container, c); err != nil {
|
|
| 66 |
+ if err := execdriver.SetupCgroups(container, c); err != nil {
|
|
| 67 | 67 |
return nil, err |
| 68 | 68 |
} |
| 69 | 69 |
|
| ... | ... |
@@ -189,18 +172,6 @@ func (d *driver) setCapabilities(container *libcontainer.Config, c *execdriver.C |
| 189 | 189 |
return err |
| 190 | 190 |
} |
| 191 | 191 |
|
| 192 |
-func (d *driver) setupCgroups(container *libcontainer.Config, c *execdriver.Command) error {
|
|
| 193 |
- if c.Resources != nil {
|
|
| 194 |
- container.Cgroups.CpuShares = c.Resources.CpuShares |
|
| 195 |
- container.Cgroups.Memory = c.Resources.Memory |
|
| 196 |
- container.Cgroups.MemoryReservation = c.Resources.Memory |
|
| 197 |
- container.Cgroups.MemorySwap = c.Resources.MemorySwap |
|
| 198 |
- container.Cgroups.CpusetCpus = c.Resources.Cpuset |
|
| 199 |
- } |
|
| 200 |
- |
|
| 201 |
- return nil |
|
| 202 |
-} |
|
| 203 |
- |
|
| 204 | 192 |
func (d *driver) setupMounts(container *libcontainer.Config, c *execdriver.Command) error {
|
| 205 | 193 |
for _, m := range c.Mounts {
|
| 206 | 194 |
container.MountConfig.Mounts = append(container.MountConfig.Mounts, &mount.Mount{
|
| ... | ... |
@@ -11,10 +11,8 @@ import ( |
| 11 | 11 |
"os" |
| 12 | 12 |
"os/exec" |
| 13 | 13 |
"path/filepath" |
| 14 |
- "strings" |
|
| 15 | 14 |
"sync" |
| 16 | 15 |
"syscall" |
| 17 |
- "time" |
|
| 18 | 16 |
|
| 19 | 17 |
log "github.com/Sirupsen/logrus" |
| 20 | 18 |
"github.com/docker/docker/daemon/execdriver" |
| ... | ... |
@@ -291,40 +289,7 @@ func (d *driver) Clean(id string) error {
|
| 291 | 291 |
} |
| 292 | 292 |
|
| 293 | 293 |
func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
|
| 294 |
- c := d.activeContainers[id] |
|
| 295 |
- state, err := libcontainer.GetState(filepath.Join(d.root, id)) |
|
| 296 |
- if err != nil {
|
|
| 297 |
- if os.IsNotExist(err) {
|
|
| 298 |
- return nil, execdriver.ErrNotRunning |
|
| 299 |
- } |
|
| 300 |
- return nil, err |
|
| 301 |
- } |
|
| 302 |
- now := time.Now() |
|
| 303 |
- stats, err := libcontainer.GetStats(nil, state) |
|
| 304 |
- if err != nil {
|
|
| 305 |
- return nil, err |
|
| 306 |
- } |
|
| 307 |
- memoryLimit := c.container.Cgroups.Memory |
|
| 308 |
- // if the container does not have any memory limit specified set the |
|
| 309 |
- // limit to the machines memory |
|
| 310 |
- if memoryLimit == 0 {
|
|
| 311 |
- memoryLimit = d.machineMemory |
|
| 312 |
- } |
|
| 313 |
- return &execdriver.ResourceStats{
|
|
| 314 |
- Read: now, |
|
| 315 |
- ContainerStats: stats, |
|
| 316 |
- MemoryLimit: memoryLimit, |
|
| 317 |
- }, nil |
|
| 318 |
-} |
|
| 319 |
- |
|
| 320 |
-func getEnv(key string, env []string) string {
|
|
| 321 |
- for _, pair := range env {
|
|
| 322 |
- parts := strings.Split(pair, "=") |
|
| 323 |
- if parts[0] == key {
|
|
| 324 |
- return parts[1] |
|
| 325 |
- } |
|
| 326 |
- } |
|
| 327 |
- return "" |
|
| 294 |
+ return execdriver.Stats(filepath.Join(d.root, id), d.activeContainers[id].container.Cgroups.Memory, d.machineMemory) |
|
| 328 | 295 |
} |
| 329 | 296 |
|
| 330 | 297 |
type TtyConsole struct {
|
| ... | ... |
@@ -13,8 +13,6 @@ CONTAINER [CONTAINER...] |
| 13 | 13 |
|
| 14 | 14 |
Display a live stream of one or more containers' resource usage statistics |
| 15 | 15 |
|
| 16 |
-Note: this functionality currently only works when using the *libcontainer* exec-driver. |
|
| 17 |
- |
|
| 18 | 16 |
# OPTIONS |
| 19 | 17 |
**--help** |
| 20 | 18 |
Print usage statement |
| ... | ... |
@@ -524,8 +524,6 @@ Status Codes: |
| 524 | 524 |
|
| 525 | 525 |
This endpoint returns a live stream of a container's resource usage statistics. |
| 526 | 526 |
|
| 527 |
-> **Note**: this functionality currently only works when using the *libcontainer* exec-driver. |
|
| 528 |
- |
|
| 529 | 527 |
**Example request**: |
| 530 | 528 |
|
| 531 | 529 |
GET /containers/redis1/stats HTTP/1.1 |
| ... | ... |
@@ -2037,8 +2037,6 @@ more details on finding shared images from the command line. |
| 2037 | 2037 |
|
| 2038 | 2038 |
--help=false Print usage |
| 2039 | 2039 |
|
| 2040 |
-> **Note**: this functionality currently only works when using the *libcontainer* exec-driver. |
|
| 2041 |
- |
|
| 2042 | 2040 |
Running `docker stats` on multiple containers |
| 2043 | 2041 |
|
| 2044 | 2042 |
$ sudo docker stats redis1 redis2 |
| ... | ... |
@@ -35,7 +35,7 @@ func TestEventsContainerFailStartDie(t *testing.T) {
|
| 35 | 35 |
|
| 36 | 36 |
out, _, _ := dockerCmd(t, "images", "-q") |
| 37 | 37 |
image := strings.Split(out, "\n")[0] |
| 38 |
- eventsCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testeventdie", image, "blerg") |
|
| 38 |
+ eventsCmd := exec.Command(dockerBinary, "run", "--name", "testeventdie", image, "blerg") |
|
| 39 | 39 |
_, _, err := runCommandWithOutput(eventsCmd) |
| 40 | 40 |
if err == nil {
|
| 41 | 41 |
t.Fatalf("Container run with command blerg should have failed, but it did not")
|