libcontainerd/client_windows.go
94d70d83
 package libcontainerd
 
 import (
 	"errors"
 	"fmt"
 	"io"
6d264645
 	"io/ioutil"
a3aceeac
 	"os"
94d70d83
 	"path/filepath"
 	"strings"
 	"syscall"
 
c02f8275
 	"golang.org/x/net/context"
 
94d70d83
 	"github.com/Microsoft/hcsshim"
 	"github.com/Sirupsen/logrus"
0ed00b36
 	"github.com/docker/docker/pkg/sysinfo"
02309170
 	"github.com/opencontainers/runtime-spec/specs-go"
94d70d83
 )
 
 type client struct {
 	clientCommon
 
 	// Platform specific properties below here (none presently on Windows)
 }
 
 // Win32 error codes that are used for various workarounds
 // These really should be ALL_CAPS to match golangs syscall library and standard
 // Win32 error conventions, but golint insists on CamelCase.
 const (
 	CoEClassstring     = syscall.Errno(0x800401F3) // Invalid class string
 	ErrorNoNetwork     = syscall.Errno(1222)       // The network is not present or not started
 	ErrorBadPathname   = syscall.Errno(161)        // The specified path is invalid
 	ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
 )
 
 // defaultOwner is a tag passed to HCS to allow it to differentiate between
 // container creator management stacks. We hard code "docker" in the case
 // of docker.
 const defaultOwner = "docker"
 
 // Create is the entrypoint to create a container from a spec, and if successfully
a3aceeac
 // created, start it too. Table below shows the fields required for HCS JSON calling parameters,
 // where if not populated, is omitted.
78540d0d
 // +-----------------+--------------------------------------------+---------------------------------------------------+
 // |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
 // +-----------------+--------------------------------------------+---------------------------------------------------+
 // | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
 // | LayerFolderPath | %root%\windowsfilter\containerID           | %root%\windowsfilter\containerID (servicing only) |
 // | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
 // | SandboxPath     |                                            | %root%\windowsfilter                              |
 // | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
 // +-----------------+--------------------------------------------+---------------------------------------------------+
a3aceeac
 //
 // Isolation=Process example:
 //
 // {
 //	"SystemType": "Container",
 //	"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
 //	"Owner": "docker",
 //	"IsDummy": false,
 //	"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
 //	"IgnoreFlushesDuringBoot": true,
 //	"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
 //	"Layers": [{
 //		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
 //		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
 //	}],
 //	"HostName": "5e0055c814a6",
 //	"MappedDirectories": [],
 //	"HvPartition": false,
 //	"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
 //	"Servicing": false
 //}
 //
 // Isolation=Hyper-V example:
 //
 //{
 //	"SystemType": "Container",
 //	"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
 //	"Owner": "docker",
 //	"IsDummy": false,
 //	"IgnoreFlushesDuringBoot": true,
 //	"Layers": [{
 //		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
 //		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
 //	}],
 //	"HostName": "475c2c58933b",
 //	"MappedDirectories": [],
 //	"SandboxPath": "C:\\\\control\\\\windowsfilter",
 //	"HvPartition": true,
 //	"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
 //	"HvRuntime": {
 //		"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
 //	},
 //	"Servicing": false
 //}
37a3be24
 func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error {
740e26f3
 	clnt.lock(containerID)
 	defer clnt.unlock(containerID)
5231c553
 	logrus.Debugln("libcontainerd: client.Create() with spec", spec)
94d70d83
 
959c1a52
 	configuration := &hcsshim.ContainerConfig{
a3aceeac
 		SystemType: "Container",
 		Name:       containerID,
 		Owner:      defaultOwner,
f7fd408b
 		IgnoreFlushesDuringBoot: false,
94d70d83
 		HostName:                spec.Hostname,
a3aceeac
 		HvPartition:             false,
94d70d83
 	}
 
 	if spec.Windows.Resources != nil {
 		if spec.Windows.Resources.CPU != nil {
4e15420b
 			if spec.Windows.Resources.CPU.Count != nil {
0ed00b36
 				// This check is being done here rather than in adaptContainerSettings
 				// because we don't want to update the HostConfig in case this container
 				// is moved to a host with more CPUs than this one.
 				cpuCount := *spec.Windows.Resources.CPU.Count
 				hostCPUCount := uint64(sysinfo.NumCPU())
 				if cpuCount > hostCPUCount {
 					logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
 					cpuCount = hostCPUCount
 				}
 				configuration.ProcessorCount = uint32(cpuCount)
4e15420b
 			}
94d70d83
 			if spec.Windows.Resources.CPU.Shares != nil {
02309170
 				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
94d70d83
 			}
 			if spec.Windows.Resources.CPU.Percent != nil {
1b48203f
 				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000
94d70d83
 			}
 		}
 		if spec.Windows.Resources.Memory != nil {
 			if spec.Windows.Resources.Memory.Limit != nil {
1b48203f
 				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
94d70d83
 			}
 		}
 		if spec.Windows.Resources.Storage != nil {
 			if spec.Windows.Resources.Storage.Bps != nil {
959c1a52
 				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
94d70d83
 			}
 			if spec.Windows.Resources.Storage.Iops != nil {
959c1a52
 				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
94d70d83
 			}
 		}
 	}
 
a3aceeac
 	var layerOpt *LayerOption
da92dad5
 	for _, option := range options {
 		if s, ok := option.(*ServicingOption); ok {
959c1a52
 			configuration.Servicing = s.IsServicing
bbbc7206
 			continue
da92dad5
 		}
a3aceeac
 		if f, ok := option.(*FlushOption); ok {
 			configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot
 			continue
 		}
 		if h, ok := option.(*HyperVIsolationOption); ok {
 			configuration.HvPartition = h.IsHyperV
 			configuration.SandboxPath = h.SandboxPath
bbbc7206
 			continue
f7fd408b
 		}
a3aceeac
 		if l, ok := option.(*LayerOption); ok {
 			layerOpt = l
410a8612
 		}
 		if n, ok := option.(*NetworkEndpointsOption); ok {
 			configuration.EndpointList = n.Endpoints
d1e0a786
 			configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery
a3aceeac
 			continue
 		}
e85867cb
 		if c, ok := option.(*CredentialsOption); ok {
 			configuration.Credentials = c.Credentials
 			continue
 		}
a3aceeac
 	}
 
 	// We must have a layer option with at least one path
 	if layerOpt == nil || layerOpt.LayerPaths == nil {
 		return fmt.Errorf("no layer option or paths were supplied to the runtime")
 	}
 
 	if configuration.HvPartition {
2a765279
 		// Find the upper-most utility VM image, since the utility VM does not
 		// use layering in RS1.
a3aceeac
 		// TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable.
2a765279
 		var uvmImagePath string
 		for _, path := range layerOpt.LayerPaths {
 			fullPath := filepath.Join(path, "UtilityVM")
 			_, err := os.Stat(fullPath)
 			if err == nil {
 				uvmImagePath = fullPath
 				break
 			}
 			if !os.IsNotExist(err) {
 				return err
 			}
 		}
 		if uvmImagePath == "" {
 			return errors.New("utility VM image could not be found")
a3aceeac
 		}
2a765279
 		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
a3aceeac
 	} else {
 		configuration.VolumePath = spec.Root.Path
da92dad5
 	}
 
78540d0d
 	configuration.LayerFolderPath = layerOpt.LayerFolderPath
 
a3aceeac
 	for _, layerPath := range layerOpt.LayerPaths {
94d70d83
 		_, filename := filepath.Split(layerPath)
 		g, err := hcsshim.NameToGuid(filename)
 		if err != nil {
 			return err
 		}
959c1a52
 		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
94d70d83
 			ID:   g.ToString(),
 			Path: layerPath,
 		})
 	}
 
 	// Add the mounts (volumes, bind mounts etc) to the structure
959c1a52
 	mds := make([]hcsshim.MappedDir, len(spec.Mounts))
94d70d83
 	for i, mount := range spec.Mounts {
959c1a52
 		mds[i] = hcsshim.MappedDir{
94d70d83
 			HostPath:      mount.Source,
 			ContainerPath: mount.Destination,
bb585b9c
 			ReadOnly:      false,
 		}
 		for _, o := range mount.Options {
 			if strings.ToLower(o) == "ro" {
 				mds[i].ReadOnly = true
 			}
 		}
94d70d83
 	}
959c1a52
 	configuration.MappedDirectories = mds
94d70d83
 
959c1a52
 	hcsContainer, err := hcsshim.CreateContainer(containerID, configuration)
94d70d83
 	if err != nil {
 		return err
 	}
 
 	// Construct a container object for calling start on it.
 	container := &container{
 		containerCommon: containerCommon{
 			process: process{
 				processCommon: processCommon{
 					containerID:  containerID,
 					client:       clnt,
 					friendlyName: InitFriendlyName,
 				},
52237787
 				commandLine: strings.Join(spec.Process.Args, " "),
94d70d83
 			},
 			processes: make(map[string]*process),
 		},
959c1a52
 		ociSpec:      spec,
 		hcsContainer: hcsContainer,
94d70d83
 	}
 
 	container.options = options
 	for _, option := range options {
 		if err := option.Apply(container); err != nil {
5231c553
 			logrus.Errorf("libcontainerd: %v", err)
94d70d83
 		}
 	}
 
 	// Call start, and if it fails, delete the container from our
054992e2
 	// internal structure, start will keep HCS in sync by deleting the
94d70d83
 	// container there.
5231c553
 	logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID)
37a3be24
 	if err := container.start(attachStdio); err != nil {
94d70d83
 		clnt.deleteContainer(containerID)
 		return err
 	}
 
5231c553
 	logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID)
94d70d83
 	return nil
 
 }
 
 // AddProcess is the handler for adding a process to an already running
18083481
 // container. It's called through docker exec. It returns the system pid of the
 // exec'd process.
37a3be24
 func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) {
94d70d83
 	clnt.lock(containerID)
 	defer clnt.unlock(containerID)
 	container, err := clnt.getContainer(containerID)
 	if err != nil {
18083481
 		return -1, err
94d70d83
 	}
959c1a52
 	// Note we always tell HCS to
 	// create stdout as it's required regardless of '-i' or '-t' options, so that
 	// docker can always grab the output through logs. We also tell HCS to always
 	// create stdin, even if it's not used - it will be closed shortly. Stderr
 	// is only created if it we're not -t.
 	createProcessParms := hcsshim.ProcessConfig{
 		EmulateConsole:   procToAdd.Terminal,
 		CreateStdInPipe:  true,
 		CreateStdOutPipe: true,
 		CreateStdErrPipe: !procToAdd.Terminal,
94d70d83
 	}
267c04aa
 	createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height)
 	createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width)
94d70d83
 
 	// Take working directory from the process to add if it is defined,
 	// otherwise take from the first process.
 	if procToAdd.Cwd != "" {
 		createProcessParms.WorkingDirectory = procToAdd.Cwd
 	} else {
 		createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd
 	}
 
 	// Configure the environment for the process
 	createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env)
 	createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ")
5207ff72
 	createProcessParms.User = procToAdd.User.Username
94d70d83
 
5231c553
 	logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine)
94d70d83
 
959c1a52
 	// Start the command running in the container.
94d70d83
 	var stdout, stderr io.ReadCloser
959c1a52
 	var stdin io.WriteCloser
 	newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms)
 	if err != nil {
5231c553
 		logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err)
18083481
 		return -1, err
959c1a52
 	}
 
740e26f3
 	pid := newProcess.Pid()
 
959c1a52
 	stdin, stdout, stderr, err = newProcess.Stdio()
94d70d83
 	if err != nil {
5231c553
 		logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err)
18083481
 		return -1, err
94d70d83
 	}
 
959c1a52
 	iopipe := &IOPipe{Terminal: procToAdd.Terminal}
 	iopipe.Stdin = createStdInCloser(stdin, newProcess)
 
94d70d83
 	// Convert io.ReadClosers to io.Readers
 	if stdout != nil {
6d264645
 		iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
94d70d83
 	}
 	if stderr != nil {
6d264645
 		iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
94d70d83
 	}
 
959c1a52
 	proc := &process{
 		processCommon: processCommon{
 			containerID:  containerID,
 			friendlyName: processFriendlyName,
 			client:       clnt,
 			systemPid:    uint32(pid),
 		},
 		commandLine: createProcessParms.CommandLine,
f6d5f7b9
 		hcsProcess:  newProcess,
959c1a52
 	}
 
 	// Add the process to the container's list of processes
 	container.processes[processFriendlyName] = proc
94d70d83
 
 	// Tell the engine to attach streams back to the client
37a3be24
 	if err := attachStdio(*iopipe); err != nil {
18083481
 		return -1, err
94d70d83
 	}
 
 	// Spin up a go routine waiting for exit to handle cleanup
959c1a52
 	go container.waitExit(proc, false)
94d70d83
 
18083481
 	return pid, nil
94d70d83
 }
 
 // Signal handles `docker stop` on Windows. While Linux has support for
 // the full range of signals, signals aren't really implemented on Windows.
 // We fake supporting regular stop and -9 to force kill.
 func (clnt *client) Signal(containerID string, sig int) error {
 	var (
 		cont *container
 		err  error
 	)
 
740e26f3
 	// Get the container as we need it to get the container handle.
94d70d83
 	clnt.lock(containerID)
 	defer clnt.unlock(containerID)
 	if cont, err = clnt.getContainer(containerID); err != nil {
 		return err
 	}
 
a5b64f28
 	cont.manualStopRequested = true
 
5231c553
 	logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid)
94d70d83
 
 	if syscall.Signal(sig) == syscall.SIGKILL {
 		// Terminate the compute system
959c1a52
 		if err := cont.hcsContainer.Terminate(); err != nil {
c58d0358
 			if !hcsshim.IsPending(err) {
5231c553
 				logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err)
959c1a52
 			}
94d70d83
 		}
 	} else {
 		// Terminate Process
c58d0358
 		if err := cont.hcsProcess.Kill(); err != nil && !hcsshim.IsAlreadyStopped(err) {
57e14714
 			// ignore errors
5231c553
 			logrus.Warnf("libcontainerd: failed to terminate pid %d in %s: %q", cont.systemPid, containerID, err)
94d70d83
 		}
 	}
a5b64f28
 
94d70d83
 	return nil
 }
 
b6c7becb
 // While Linux has support for the full range of signals, signals aren't really implemented on Windows.
 // We try to terminate the specified process whatever signal is requested.
 func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error {
 	clnt.lock(containerID)
 	defer clnt.unlock(containerID)
 	cont, err := clnt.getContainer(containerID)
 	if err != nil {
 		return err
 	}
 
 	for _, p := range cont.processes {
 		if p.friendlyName == processFriendlyName {
5eaf86c6
 			return p.hcsProcess.Kill()
b6c7becb
 		}
 	}
 
 	return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID)
 }
 
94d70d83
 // Resize handles a CLI event to resize an interactive docker run or docker exec
 // window.
 func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error {
 	// Get the libcontainerd container object
 	clnt.lock(containerID)
 	defer clnt.unlock(containerID)
 	cont, err := clnt.getContainer(containerID)
 	if err != nil {
 		return err
 	}
 
959c1a52
 	h, w := uint16(height), uint16(width)
 
94d70d83
 	if processFriendlyName == InitFriendlyName {
5231c553
 		logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid)
959c1a52
 		return cont.process.hcsProcess.ResizeConsole(w, h)
94d70d83
 	}
 
 	for _, p := range cont.processes {
 		if p.friendlyName == processFriendlyName {
5231c553
 			logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid)
959c1a52
 			return p.hcsProcess.ResizeConsole(w, h)
94d70d83
 		}
 	}
 
 	return fmt.Errorf("Resize could not find containerID %s to resize", containerID)
 
 }
 
 // Pause handles pause requests for containers
 func (clnt *client) Pause(containerID string) error {
69985e85
 	unlockContainer := true
 	// Get the libcontainerd container object
 	clnt.lock(containerID)
 	defer func() {
 		if unlockContainer {
 			clnt.unlock(containerID)
 		}
 	}()
 	container, err := clnt.getContainer(containerID)
 	if err != nil {
 		return err
 	}
 
 	for _, option := range container.options {
 		if h, ok := option.(*HyperVIsolationOption); ok {
 			if !h.IsHyperV {
 				return errors.New("cannot pause Windows Server Containers")
 			}
 			break
 		}
 	}
 
 	err = container.hcsContainer.Pause()
 	if err != nil {
 		return err
 	}
 
 	// Unlock container before calling back into the daemon
 	unlockContainer = false
 	clnt.unlock(containerID)
 
 	return clnt.backend.StateChanged(containerID, StateInfo{
 		CommonStateInfo: CommonStateInfo{
 			State: StatePause,
 		}})
94d70d83
 }
 
 // Resume handles resume requests for containers
 func (clnt *client) Resume(containerID string) error {
69985e85
 	unlockContainer := true
 	// Get the libcontainerd container object
 	clnt.lock(containerID)
 	defer func() {
 		if unlockContainer {
 			clnt.unlock(containerID)
 		}
 	}()
 	container, err := clnt.getContainer(containerID)
 	if err != nil {
 		return err
 	}
 
 	// This should never happen, since Windows Server Containers cannot be paused
 	for _, option := range container.options {
 		if h, ok := option.(*HyperVIsolationOption); ok {
 			if !h.IsHyperV {
 				return errors.New("cannot resume Windows Server Containers")
 			}
 			break
 		}
 	}
 
 	err = container.hcsContainer.Resume()
 	if err != nil {
 		return err
 	}
 
 	// Unlock container before calling back into the daemon
 	unlockContainer = false
 	clnt.unlock(containerID)
 
 	return clnt.backend.StateChanged(containerID, StateInfo{
 		CommonStateInfo: CommonStateInfo{
 			State: StateResume,
 		}})
94d70d83
 }
 
 // Stats handles stats requests for containers
 func (clnt *client) Stats(containerID string) (*Stats, error) {
340e5233
 	// Get the libcontainerd container object
 	clnt.lock(containerID)
 	defer clnt.unlock(containerID)
 	container, err := clnt.getContainer(containerID)
 	if err != nil {
 		return nil, err
 	}
 	s, err := container.hcsContainer.Statistics()
 	if err != nil {
 		return nil, err
 	}
 	st := Stats(s)
 	return &st, nil
94d70d83
 }
 
 // Restore is the handler for restoring a container
37a3be24
 func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error {
e331df5a
 	// TODO Windows: Implement this. For now, just tell the backend the container exited.
5231c553
 	logrus.Debugf("libcontainerd: Restore(%s)", containerID)
94d70d83
 	return clnt.backend.StateChanged(containerID, StateInfo{
818a5198
 		CommonStateInfo: CommonStateInfo{
 			State:    StateExit,
 			ExitCode: 1 << 31,
 		}})
94d70d83
 }
 
52237787
 // GetPidsForContainer returns a list of process IDs running in a container.
 // Although implemented, this is not used in Windows.
94d70d83
 func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) {
52237787
 	var pids []int
 	clnt.lock(containerID)
 	defer clnt.unlock(containerID)
 	cont, err := clnt.getContainer(containerID)
 	if err != nil {
 		return nil, err
 	}
 
 	// Add the first process
 	pids = append(pids, int(cont.containerCommon.systemPid))
 	// And add all the exec'd processes
 	for _, p := range cont.processes {
 		pids = append(pids, int(p.processCommon.systemPid))
 	}
 	return pids, nil
 }
 
 // Summary returns a summary of the processes running in a container.
 // This is present in Windows to support docker top. In linux, the
 // engine shells out to ps to get process information. On Windows, as
 // the containers could be Hyper-V containers, they would not be
 // visible on the container host. However, libcontainerd does have
 // that information.
 func (clnt *client) Summary(containerID string) ([]Summary, error) {
52f04748
 
 	// Get the libcontainerd container object
52237787
 	clnt.lock(containerID)
 	defer clnt.unlock(containerID)
52f04748
 	container, err := clnt.getContainer(containerID)
52237787
 	if err != nil {
 		return nil, err
 	}
52f04748
 	p, err := container.hcsContainer.ProcessList()
 	if err != nil {
 		return nil, err
52237787
 	}
52f04748
 	pl := make([]Summary, len(p))
 	for i := range p {
 		pl[i] = Summary(p[i])
 	}
 	return pl, nil
94d70d83
 }
 
e331df5a
 // UpdateResources updates resources for a running container.
94d70d83
 func (clnt *client) UpdateResources(containerID string, resources Resources) error {
 	// Updating resource isn't supported on Windows
 	// but we should return nil for enabling updating container
 	return nil
 }
d8fef66b
 
 func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error {
 	return errors.New("Windows: Containers do not support checkpoints")
 }
 
 func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error {
 	return errors.New("Windows: Containers do not support checkpoints")
 }
 
 func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) {
 	return nil, errors.New("Windows: Containers do not support checkpoints")
 }
2790ac68
 
 func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) {
 	return &ServerVersion{}, nil
 }