Browse code

Merge pull request #31273 from fabiokung/consistent-ro-view

No container locks on `docker ps`

Aaron Lehmann authored on 2017/06/24 07:28:55
Showing 32 changed files
... ...
@@ -1,6 +1,7 @@
1 1
 package container
2 2
 
3 3
 import (
4
+	"bytes"
4 5
 	"encoding/json"
5 6
 	"fmt"
6 7
 	"io"
... ...
@@ -14,8 +15,6 @@ import (
14 14
 	"syscall"
15 15
 	"time"
16 16
 
17
-	"golang.org/x/net/context"
18
-
19 17
 	"github.com/Sirupsen/logrus"
20 18
 	containertypes "github.com/docker/docker/api/types/container"
21 19
 	mounttypes "github.com/docker/docker/api/types/mount"
... ...
@@ -45,7 +44,7 @@ import (
45 45
 	"github.com/docker/libnetwork/options"
46 46
 	"github.com/docker/libnetwork/types"
47 47
 	agentexec "github.com/docker/swarmkit/agent/exec"
48
-	"github.com/opencontainers/selinux/go-selinux/label"
48
+	"golang.org/x/net/context"
49 49
 )
50 50
 
51 51
 const configFileName = "config.v2.json"
... ...
@@ -152,41 +151,51 @@ func (container *Container) FromDisk() error {
152 152
 		container.Platform = runtime.GOOS
153 153
 	}
154 154
 
155
-	if err := label.ReserveLabel(container.ProcessLabel); err != nil {
156
-		return err
157
-	}
158 155
 	return container.readHostConfig()
159 156
 }
160 157
 
161
-// ToDisk saves the container configuration on disk.
162
-func (container *Container) ToDisk() error {
158
+// toDisk saves the container configuration on disk and returns a deep copy.
159
+func (container *Container) toDisk() (*Container, error) {
160
+	var (
161
+		buf      bytes.Buffer
162
+		deepCopy Container
163
+	)
163 164
 	pth, err := container.ConfigPath()
164 165
 	if err != nil {
165
-		return err
166
+		return nil, err
166 167
 	}
167 168
 
168
-	jsonSource, err := ioutils.NewAtomicFileWriter(pth, 0644)
169
+	// Save container settings
170
+	f, err := ioutils.NewAtomicFileWriter(pth, 0644)
169 171
 	if err != nil {
170
-		return err
172
+		return nil, err
171 173
 	}
172
-	defer jsonSource.Close()
174
+	defer f.Close()
173 175
 
174
-	enc := json.NewEncoder(jsonSource)
176
+	w := io.MultiWriter(&buf, f)
177
+	if err := json.NewEncoder(w).Encode(container); err != nil {
178
+		return nil, err
179
+	}
175 180
 
176
-	// Save container settings
177
-	if err := enc.Encode(container); err != nil {
178
-		return err
181
+	if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil {
182
+		return nil, err
183
+	}
184
+	deepCopy.HostConfig, err = container.WriteHostConfig()
185
+	if err != nil {
186
+		return nil, err
179 187
 	}
180 188
 
181
-	return container.WriteHostConfig()
189
+	return &deepCopy, nil
182 190
 }
183 191
 
184
-// ToDiskLocking saves the container configuration on disk in a thread safe way.
185
-func (container *Container) ToDiskLocking() error {
186
-	container.Lock()
187
-	err := container.ToDisk()
188
-	container.Unlock()
189
-	return err
192
+// CheckpointTo makes the Container's current state visible to queries, and persists state.
193
+// Callers must hold a Container lock.
194
+func (container *Container) CheckpointTo(store ViewDB) error {
195
+	deepCopy, err := container.toDisk()
196
+	if err != nil {
197
+		return err
198
+	}
199
+	return store.Save(deepCopy)
190 200
 }
191 201
 
192 202
 // readHostConfig reads the host configuration from disk for the container.
... ...
@@ -218,20 +227,34 @@ func (container *Container) readHostConfig() error {
218 218
 	return nil
219 219
 }
220 220
 
221
-// WriteHostConfig saves the host configuration on disk for the container.
222
-func (container *Container) WriteHostConfig() error {
221
+// WriteHostConfig saves the host configuration on disk for the container,
222
+// and returns a deep copy of the saved object. Callers must hold a Container lock.
223
+func (container *Container) WriteHostConfig() (*containertypes.HostConfig, error) {
224
+	var (
225
+		buf      bytes.Buffer
226
+		deepCopy containertypes.HostConfig
227
+	)
228
+
223 229
 	pth, err := container.HostConfigPath()
224 230
 	if err != nil {
225
-		return err
231
+		return nil, err
226 232
 	}
227 233
 
228 234
 	f, err := ioutils.NewAtomicFileWriter(pth, 0644)
229 235
 	if err != nil {
230
-		return err
236
+		return nil, err
231 237
 	}
232 238
 	defer f.Close()
233 239
 
234
-	return json.NewEncoder(f).Encode(&container.HostConfig)
240
+	w := io.MultiWriter(&buf, f)
241
+	if err := json.NewEncoder(w).Encode(&container.HostConfig); err != nil {
242
+		return nil, err
243
+	}
244
+
245
+	if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil {
246
+		return nil, err
247
+	}
248
+	return &deepCopy, nil
235 249
 }
236 250
 
237 251
 // SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir
... ...
@@ -10,6 +10,7 @@ import (
10 10
 	"strings"
11 11
 
12 12
 	"github.com/Sirupsen/logrus"
13
+	"github.com/docker/docker/api/types"
13 14
 	containertypes "github.com/docker/docker/api/types/container"
14 15
 	mounttypes "github.com/docker/docker/api/types/mount"
15 16
 	"github.com/docker/docker/pkg/chrootarchive"
... ...
@@ -261,11 +262,8 @@ func (container *Container) ConfigMounts() []Mount {
261 261
 	return mounts
262 262
 }
263 263
 
264
-// UpdateContainer updates configuration of a container.
264
+// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container.
265 265
 func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error {
266
-	container.Lock()
267
-	defer container.Unlock()
268
-
269 266
 	// update resources of container
270 267
 	resources := hostConfig.Resources
271 268
 	cResources := &container.HostConfig.Resources
... ...
@@ -334,11 +332,6 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi
334 334
 		container.HostConfig.RestartPolicy = hostConfig.RestartPolicy
335 335
 	}
336 336
 
337
-	if err := container.ToDisk(); err != nil {
338
-		logrus.Errorf("Error saving updated container: %v", err)
339
-		return err
340
-	}
341
-
342 337
 	return nil
343 338
 }
344 339
 
... ...
@@ -462,3 +455,21 @@ func cleanResourcePath(path string) string {
462 462
 func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool {
463 463
 	return false
464 464
 }
465
+
466
+// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock.
467
+func (container *Container) GetMountPoints() []types.MountPoint {
468
+	mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
469
+	for _, m := range container.MountPoints {
470
+		mountPoints = append(mountPoints, types.MountPoint{
471
+			Type:        m.Type,
472
+			Name:        m.Name,
473
+			Source:      m.Path(),
474
+			Destination: m.Destination,
475
+			Driver:      m.Driver,
476
+			Mode:        m.Mode,
477
+			RW:          m.RW,
478
+			Propagation: m.Propagation,
479
+		})
480
+	}
481
+	return mountPoints
482
+}
... ...
@@ -7,6 +7,7 @@ import (
7 7
 	"os"
8 8
 	"path/filepath"
9 9
 
10
+	"github.com/docker/docker/api/types"
10 11
 	containertypes "github.com/docker/docker/api/types/container"
11 12
 	"github.com/docker/docker/pkg/system"
12 13
 )
... ...
@@ -125,11 +126,8 @@ func (container *Container) TmpfsMounts() ([]Mount, error) {
125 125
 	return mounts, nil
126 126
 }
127 127
 
128
-// UpdateContainer updates configuration of a container
128
+// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container.
129 129
 func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error {
130
-	container.Lock()
131
-	defer container.Unlock()
132
-
133 130
 	resources := hostConfig.Resources
134 131
 	if resources.CPUShares != 0 ||
135 132
 		resources.Memory != 0 ||
... ...
@@ -194,3 +192,19 @@ func (container *Container) BuildHostnameFile() error {
194 194
 func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool {
195 195
 	return true
196 196
 }
197
+
198
+// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock.
199
+func (container *Container) GetMountPoints() []types.MountPoint {
200
+	mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
201
+	for _, m := range container.MountPoints {
202
+		mountPoints = append(mountPoints, types.MountPoint{
203
+			Type:        m.Type,
204
+			Name:        m.Name,
205
+			Source:      m.Path(),
206
+			Destination: m.Destination,
207
+			Driver:      m.Driver,
208
+			RW:          m.RW,
209
+		})
210
+	}
211
+	return mountPoints
212
+}
... ...
@@ -13,9 +13,8 @@ type Health struct {
13 13
 
14 14
 // String returns a human-readable description of the health-check state
15 15
 func (s *Health) String() string {
16
-	// This happens when the container is being shutdown and the monitor has stopped
17
-	// or the monitor has yet to be setup.
18
-	if s.stop == nil {
16
+	// This happens when the monitor has yet to be setup.
17
+	if s.Status == "" {
19 18
 		return types.Unhealthy
20 19
 	}
21 20
 
... ...
@@ -44,6 +43,8 @@ func (s *Health) CloseMonitorChannel() {
44 44
 		logrus.Debug("CloseMonitorChannel: waiting for probe to stop")
45 45
 		close(s.stop)
46 46
 		s.stop = nil
47
+		// unhealthy when the monitor has stopped for compatibility reasons
48
+		s.Status = types.Unhealthy
47 49
 		logrus.Debug("CloseMonitorChannel done")
48 50
 	}
49 51
 }
50 52
new file mode 100644
... ...
@@ -0,0 +1,288 @@
0
+package container
1
+
2
+import (
3
+	"fmt"
4
+	"strings"
5
+	"time"
6
+
7
+	"github.com/Sirupsen/logrus"
8
+	"github.com/docker/docker/api/types"
9
+	"github.com/docker/docker/api/types/network"
10
+	"github.com/docker/docker/pkg/registrar"
11
+	"github.com/docker/go-connections/nat"
12
+	"github.com/hashicorp/go-memdb"
13
+)
14
+
15
+const (
16
+	memdbTable   = "containers"
17
+	memdbIDIndex = "id"
18
+)
19
+
20
+// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a
21
+// versioned ACID in-memory store.
22
+type Snapshot struct {
23
+	types.Container
24
+
25
+	// additional info queries need to filter on
26
+	// preserve nanosec resolution for queries
27
+	CreatedAt    time.Time
28
+	StartedAt    time.Time
29
+	Name         string
30
+	Pid          int
31
+	ExitCode     int
32
+	Running      bool
33
+	Paused       bool
34
+	Managed      bool
35
+	ExposedPorts nat.PortSet
36
+	PortBindings nat.PortSet
37
+	Health       string
38
+	HostConfig   struct {
39
+		Isolation string
40
+	}
41
+}
42
+
43
+// ViewDB provides an in-memory transactional (ACID) container Store
44
+type ViewDB interface {
45
+	Snapshot(nameIndex *registrar.Registrar) View
46
+	Save(*Container) error
47
+	Delete(*Container) error
48
+}
49
+
50
+// View can be used by readers to avoid locking
51
+type View interface {
52
+	All() ([]Snapshot, error)
53
+	Get(id string) (*Snapshot, error)
54
+}
55
+
56
+var schema = &memdb.DBSchema{
57
+	Tables: map[string]*memdb.TableSchema{
58
+		memdbTable: {
59
+			Name: memdbTable,
60
+			Indexes: map[string]*memdb.IndexSchema{
61
+				memdbIDIndex: {
62
+					Name:    memdbIDIndex,
63
+					Unique:  true,
64
+					Indexer: &containerByIDIndexer{},
65
+				},
66
+			},
67
+		},
68
+	},
69
+}
70
+
71
+type memDB struct {
72
+	store *memdb.MemDB
73
+}
74
+
75
+// NewViewDB provides the default implementation, with the default schema
76
+func NewViewDB() (ViewDB, error) {
77
+	store, err := memdb.NewMemDB(schema)
78
+	if err != nil {
79
+		return nil, err
80
+	}
81
+	return &memDB{store: store}, nil
82
+}
83
+
84
+// Snapshot provides a consistent read-only View of the database
85
+func (db *memDB) Snapshot(index *registrar.Registrar) View {
86
+	return &memdbView{
87
+		txn:       db.store.Txn(false),
88
+		nameIndex: index.GetAll(),
89
+	}
90
+}
91
+
92
+// Save atomically updates the in-memory store state for a Container.
93
+// Only read only (deep) copies of containers may be passed in.
94
+func (db *memDB) Save(c *Container) error {
95
+	txn := db.store.Txn(true)
96
+	defer txn.Commit()
97
+	return txn.Insert(memdbTable, c)
98
+}
99
+
100
+// Delete removes an item by ID
101
+func (db *memDB) Delete(c *Container) error {
102
+	txn := db.store.Txn(true)
103
+	defer txn.Commit()
104
+	return txn.Delete(memdbTable, NewBaseContainer(c.ID, c.Root))
105
+}
106
+
107
+type memdbView struct {
108
+	txn       *memdb.Txn
109
+	nameIndex map[string][]string
110
+}
111
+
112
+// All returns a all items in this snapshot. Returned objects must never be modified.
113
+func (v *memdbView) All() ([]Snapshot, error) {
114
+	var all []Snapshot
115
+	iter, err := v.txn.Get(memdbTable, memdbIDIndex)
116
+	if err != nil {
117
+		return nil, err
118
+	}
119
+	for {
120
+		item := iter.Next()
121
+		if item == nil {
122
+			break
123
+		}
124
+		snapshot := v.transform(item.(*Container))
125
+		all = append(all, *snapshot)
126
+	}
127
+	return all, nil
128
+}
129
+
130
+// Get returns an item by id. Returned objects must never be modified.
131
+func (v *memdbView) Get(id string) (*Snapshot, error) {
132
+	s, err := v.txn.First(memdbTable, memdbIDIndex, id)
133
+	if err != nil {
134
+		return nil, err
135
+	}
136
+	return v.transform(s.(*Container)), nil
137
+}
138
+
139
+// transform maps a (deep) copied Container object to what queries need.
140
+// A lock on the Container is not held because these are immutable deep copies.
141
+func (v *memdbView) transform(container *Container) *Snapshot {
142
+	snapshot := &Snapshot{
143
+		Container: types.Container{
144
+			ID:      container.ID,
145
+			Names:   v.nameIndex[container.ID],
146
+			ImageID: container.ImageID.String(),
147
+			Ports:   []types.Port{},
148
+			Mounts:  container.GetMountPoints(),
149
+			State:   container.State.StateString(),
150
+			Status:  container.State.String(),
151
+			Created: container.Created.Unix(),
152
+		},
153
+		CreatedAt:    container.Created,
154
+		StartedAt:    container.StartedAt,
155
+		Name:         container.Name,
156
+		Pid:          container.Pid,
157
+		Managed:      container.Managed,
158
+		ExposedPorts: make(nat.PortSet),
159
+		PortBindings: make(nat.PortSet),
160
+		Health:       container.HealthString(),
161
+		Running:      container.Running,
162
+		Paused:       container.Paused,
163
+		ExitCode:     container.ExitCode(),
164
+	}
165
+
166
+	if snapshot.Names == nil {
167
+		// Dead containers will often have no name, so make sure the response isn't null
168
+		snapshot.Names = []string{}
169
+	}
170
+
171
+	if container.HostConfig != nil {
172
+		snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode)
173
+		snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation)
174
+		for binding := range container.HostConfig.PortBindings {
175
+			snapshot.PortBindings[binding] = struct{}{}
176
+		}
177
+	}
178
+
179
+	if container.Config != nil {
180
+		snapshot.Image = container.Config.Image
181
+		snapshot.Labels = container.Config.Labels
182
+		for exposed := range container.Config.ExposedPorts {
183
+			snapshot.ExposedPorts[exposed] = struct{}{}
184
+		}
185
+	}
186
+
187
+	if len(container.Args) > 0 {
188
+		args := []string{}
189
+		for _, arg := range container.Args {
190
+			if strings.Contains(arg, " ") {
191
+				args = append(args, fmt.Sprintf("'%s'", arg))
192
+			} else {
193
+				args = append(args, arg)
194
+			}
195
+		}
196
+		argsAsString := strings.Join(args, " ")
197
+		snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString)
198
+	} else {
199
+		snapshot.Command = container.Path
200
+	}
201
+
202
+	snapshot.Ports = []types.Port{}
203
+	networks := make(map[string]*network.EndpointSettings)
204
+	if container.NetworkSettings != nil {
205
+		for name, netw := range container.NetworkSettings.Networks {
206
+			if netw == nil || netw.EndpointSettings == nil {
207
+				continue
208
+			}
209
+			networks[name] = &network.EndpointSettings{
210
+				EndpointID:          netw.EndpointID,
211
+				Gateway:             netw.Gateway,
212
+				IPAddress:           netw.IPAddress,
213
+				IPPrefixLen:         netw.IPPrefixLen,
214
+				IPv6Gateway:         netw.IPv6Gateway,
215
+				GlobalIPv6Address:   netw.GlobalIPv6Address,
216
+				GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen,
217
+				MacAddress:          netw.MacAddress,
218
+				NetworkID:           netw.NetworkID,
219
+			}
220
+			if netw.IPAMConfig != nil {
221
+				networks[name].IPAMConfig = &network.EndpointIPAMConfig{
222
+					IPv4Address: netw.IPAMConfig.IPv4Address,
223
+					IPv6Address: netw.IPAMConfig.IPv6Address,
224
+				}
225
+			}
226
+		}
227
+		for port, bindings := range container.NetworkSettings.Ports {
228
+			p, err := nat.ParsePort(port.Port())
229
+			if err != nil {
230
+				logrus.Warnf("invalid port map %+v", err)
231
+				continue
232
+			}
233
+			if len(bindings) == 0 {
234
+				snapshot.Ports = append(snapshot.Ports, types.Port{
235
+					PrivatePort: uint16(p),
236
+					Type:        port.Proto(),
237
+				})
238
+				continue
239
+			}
240
+			for _, binding := range bindings {
241
+				h, err := nat.ParsePort(binding.HostPort)
242
+				if err != nil {
243
+					logrus.Warnf("invalid host port map %+v", err)
244
+					continue
245
+				}
246
+				snapshot.Ports = append(snapshot.Ports, types.Port{
247
+					PrivatePort: uint16(p),
248
+					PublicPort:  uint16(h),
249
+					Type:        port.Proto(),
250
+					IP:          binding.HostIP,
251
+				})
252
+			}
253
+		}
254
+	}
255
+	snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks}
256
+
257
+	return snapshot
258
+}
259
+
260
+// containerByIDIndexer is used to extract the ID field from Container types.
261
+// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct.
262
+type containerByIDIndexer struct{}
263
+
264
+// FromObject implements the memdb.SingleIndexer interface for Container objects
265
+func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) {
266
+	c, ok := obj.(*Container)
267
+	if !ok {
268
+		return false, nil, fmt.Errorf("%T is not a Container", obj)
269
+	}
270
+	// Add the null character as a terminator
271
+	v := c.ID + "\x00"
272
+	return true, []byte(v), nil
273
+}
274
+
275
+// FromArgs implements the memdb.Indexer interface
276
+func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) {
277
+	if len(args) != 1 {
278
+		return nil, fmt.Errorf("must provide only a single argument")
279
+	}
280
+	arg, ok := args[0].(string)
281
+	if !ok {
282
+		return nil, fmt.Errorf("argument must be a string: %#v", args[0])
283
+	}
284
+	// Add the null character as a terminator
285
+	arg += "\x00"
286
+	return []byte(arg), nil
287
+}
0 288
new file mode 100644
... ...
@@ -0,0 +1,106 @@
0
+package container
1
+
2
+import (
3
+	"io/ioutil"
4
+	"os"
5
+	"path/filepath"
6
+	"testing"
7
+
8
+	containertypes "github.com/docker/docker/api/types/container"
9
+	"github.com/docker/docker/pkg/registrar"
10
+	"github.com/pborman/uuid"
11
+)
12
+
13
+var root string
14
+
15
+func TestMain(m *testing.M) {
16
+	var err error
17
+	root, err = ioutil.TempDir("", "docker-container-test-")
18
+	if err != nil {
19
+		panic(err)
20
+	}
21
+	defer os.RemoveAll(root)
22
+
23
+	os.Exit(m.Run())
24
+}
25
+
26
+func newContainer(t *testing.T) *Container {
27
+	var (
28
+		id    = uuid.New()
29
+		cRoot = filepath.Join(root, id)
30
+	)
31
+	if err := os.MkdirAll(cRoot, 0755); err != nil {
32
+		t.Fatal(err)
33
+	}
34
+	c := NewBaseContainer(id, cRoot)
35
+	c.HostConfig = &containertypes.HostConfig{}
36
+	return c
37
+}
38
+
39
+func TestViewSaveDelete(t *testing.T) {
40
+	db, err := NewViewDB()
41
+	if err != nil {
42
+		t.Fatal(err)
43
+	}
44
+	c := newContainer(t)
45
+	if err := c.CheckpointTo(db); err != nil {
46
+		t.Fatal(err)
47
+	}
48
+	if err := db.Delete(c); err != nil {
49
+		t.Fatal(err)
50
+	}
51
+}
52
+
53
+func TestViewAll(t *testing.T) {
54
+	var (
55
+		db, _ = NewViewDB()
56
+		names = registrar.NewRegistrar()
57
+		one   = newContainer(t)
58
+		two   = newContainer(t)
59
+	)
60
+	one.Pid = 10
61
+	if err := one.CheckpointTo(db); err != nil {
62
+		t.Fatal(err)
63
+	}
64
+	two.Pid = 20
65
+	if err := two.CheckpointTo(db); err != nil {
66
+		t.Fatal(err)
67
+	}
68
+
69
+	all, err := db.Snapshot(names).All()
70
+	if err != nil {
71
+		t.Fatal(err)
72
+	}
73
+	if l := len(all); l != 2 {
74
+		t.Fatalf("expected 2 items, got %d", l)
75
+	}
76
+	byID := make(map[string]Snapshot)
77
+	for i := range all {
78
+		byID[all[i].ID] = all[i]
79
+	}
80
+	if s, ok := byID[one.ID]; !ok || s.Pid != 10 {
81
+		t.Fatalf("expected something different with for id=%s: %v", one.ID, s)
82
+	}
83
+	if s, ok := byID[two.ID]; !ok || s.Pid != 20 {
84
+		t.Fatalf("expected something different with for id=%s: %v", two.ID, s)
85
+	}
86
+}
87
+
88
+func TestViewGet(t *testing.T) {
89
+	var (
90
+		db, _ = NewViewDB()
91
+		names = registrar.NewRegistrar()
92
+		one   = newContainer(t)
93
+	)
94
+	one.ImageID = "some-image-123"
95
+	if err := one.CheckpointTo(db); err != nil {
96
+		t.Fatal(err)
97
+	}
98
+	s, err := db.Snapshot(names).Get(one.ID)
99
+	if err != nil {
100
+		t.Fatal(err)
101
+	}
102
+	if s == nil || s.ImageID != "some-image-123" {
103
+		t.Fatalf("expected ImageID=some-image-123. Got: %v", s)
104
+	}
105
+}
... ...
@@ -18,6 +18,7 @@ import (
18 18
 	"github.com/docker/docker/pkg/truncindex"
19 19
 	"github.com/docker/docker/runconfig"
20 20
 	"github.com/docker/go-connections/nat"
21
+	"github.com/opencontainers/selinux/go-selinux/label"
21 22
 )
22 23
 
23 24
 // GetContainer looks for a container using the provided information, which could be
... ...
@@ -90,6 +91,9 @@ func (daemon *Daemon) load(id string) (*container.Container, error) {
90 90
 	if err := container.FromDisk(); err != nil {
91 91
 		return nil, err
92 92
 	}
93
+	if err := label.ReserveLabel(container.ProcessLabel); err != nil {
94
+		return nil, err
95
+	}
93 96
 
94 97
 	if container.ID != id {
95 98
 		return container, fmt.Errorf("Container %s is stored at %s", container.ID, id)
... ...
@@ -99,7 +103,7 @@ func (daemon *Daemon) load(id string) (*container.Container, error) {
99 99
 }
100 100
 
101 101
 // Register makes a container object usable by the daemon as <container.ID>
102
-func (daemon *Daemon) Register(c *container.Container) {
102
+func (daemon *Daemon) Register(c *container.Container) error {
103 103
 	// Attach to stdout and stderr
104 104
 	if c.Config.OpenStdin {
105 105
 		c.StreamConfig.NewInputPipes()
... ...
@@ -107,8 +111,14 @@ func (daemon *Daemon) Register(c *container.Container) {
107 107
 		c.StreamConfig.NewNopInputPipe()
108 108
 	}
109 109
 
110
+	// once in the memory store it is visible to other goroutines
111
+	// grab a Lock until it has been checkpointed to avoid races
112
+	c.Lock()
113
+	defer c.Unlock()
114
+
110 115
 	daemon.containers.Add(c.ID, c)
111 116
 	daemon.idIndex.Add(c.ID)
117
+	return c.CheckpointTo(daemon.containersReplica)
112 118
 }
113 119
 
114 120
 func (daemon *Daemon) newContainer(name string, platform string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) {
... ...
@@ -212,7 +222,7 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *
212 212
 
213 213
 	runconfig.SetDefaultNetModeIfBlank(hostConfig)
214 214
 	container.HostConfig = hostConfig
215
-	return container.ToDisk()
215
+	return container.CheckpointTo(daemon.containersReplica)
216 216
 }
217 217
 
218 218
 // verifyContainerSettings performs validation of the hostconfig and config
... ...
@@ -301,7 +311,7 @@ func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostCon
301 301
 			return nil, fmt.Errorf("maximum retry count cannot be negative")
302 302
 		}
303 303
 	case "":
304
-	// do nothing
304
+		// do nothing
305 305
 	default:
306 306
 		return nil, fmt.Errorf("invalid restart policy '%s'", p.Name)
307 307
 	}
... ...
@@ -44,6 +44,7 @@ func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []str
44 44
 
45 45
 	return nil
46 46
 }
47
+
47 48
 func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) {
48 49
 	var (
49 50
 		sboxOptions []libnetwork.SandboxOption
... ...
@@ -568,7 +569,7 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) error {
568 568
 
569 569
 	}
570 570
 
571
-	if err := container.WriteHostConfig(); err != nil {
571
+	if _, err := container.WriteHostConfig(); err != nil {
572 572
 		return err
573 573
 	}
574 574
 	networkActions.WithValues("allocate").UpdateSince(start)
... ...
@@ -1005,10 +1006,8 @@ func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName
1005 1005
 			return err
1006 1006
 		}
1007 1007
 	}
1008
-	if err := container.ToDisk(); err != nil {
1009
-		return fmt.Errorf("Error saving container to disk: %v", err)
1010
-	}
1011
-	return nil
1008
+
1009
+	return container.CheckpointTo(daemon.containersReplica)
1012 1010
 }
1013 1011
 
1014 1012
 // DisconnectFromNetwork disconnects container from network n.
... ...
@@ -1044,16 +1043,16 @@ func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, netw
1044 1044
 		return err
1045 1045
 	}
1046 1046
 
1047
-	if err := container.ToDisk(); err != nil {
1048
-		return fmt.Errorf("Error saving container to disk: %v", err)
1047
+	if err := container.CheckpointTo(daemon.containersReplica); err != nil {
1048
+		return err
1049 1049
 	}
1050 1050
 
1051 1051
 	if n != nil {
1052
-		attributes := map[string]string{
1052
+		daemon.LogNetworkEventWithAttributes(n, "disconnect", map[string]string{
1053 1053
 			"container": container.ID,
1054
-		}
1055
-		daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes)
1054
+		})
1056 1055
 	}
1056
+
1057 1057
 	return nil
1058 1058
 }
1059 1059
 
... ...
@@ -167,12 +167,9 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (
167 167
 	runconfig.SetDefaultNetModeIfBlank(container.HostConfig)
168 168
 
169 169
 	daemon.updateContainerNetworkSettings(container, endpointsConfigs)
170
-
171
-	if err := container.ToDisk(); err != nil {
172
-		logrus.Errorf("Error saving new container to disk: %v", err)
170
+	if err := daemon.Register(container); err != nil {
173 171
 		return nil, err
174 172
 	}
175
-	daemon.Register(container)
176 173
 	stateCtr.set(container.ID, "stopped")
177 174
 	daemon.LogContainerEvent(container, "create")
178 175
 	return container, nil
... ...
@@ -83,6 +83,7 @@ type Daemon struct {
83 83
 	ID                    string
84 84
 	repository            string
85 85
 	containers            container.Store
86
+	containersReplica     container.ViewDB
86 87
 	execCommands          *exec.Store
87 88
 	downloadManager       *xfer.LayerDownloadManager
88 89
 	uploadManager         *xfer.LayerUploadManager
... ...
@@ -182,17 +183,20 @@ func (daemon *Daemon) restore() error {
182 182
 	activeSandboxes := make(map[string]interface{})
183 183
 	for id, c := range containers {
184 184
 		if err := daemon.registerName(c); err != nil {
185
-			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
185
+			logrus.Errorf("Failed to register container name %s: %s", c.ID, err)
186 186
 			delete(containers, id)
187 187
 			continue
188 188
 		}
189
-		daemon.Register(c)
190
-
191 189
 		// verify that all volumes valid and have been migrated from the pre-1.7 layout
192 190
 		if err := daemon.verifyVolumesInfo(c); err != nil {
193 191
 			// don't skip the container due to error
194 192
 			logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err)
195 193
 		}
194
+		if err := daemon.Register(c); err != nil {
195
+			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
196
+			delete(containers, id)
197
+			continue
198
+		}
196 199
 
197 200
 		// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
198 201
 		// We should rewrite it to use the daemon defaults.
... ...
@@ -212,7 +216,7 @@ func (daemon *Daemon) restore() error {
212 212
 		go func(c *container.Container) {
213 213
 			defer wg.Done()
214 214
 			daemon.backportMountSpec(c)
215
-			if err := c.ToDiskLocking(); err != nil {
215
+			if err := daemon.checkpointAndSave(c); err != nil {
216 216
 				logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk")
217 217
 			}
218 218
 
... ...
@@ -271,6 +275,7 @@ func (daemon *Daemon) restore() error {
271 271
 				}
272 272
 			}
273 273
 
274
+			c.Lock()
274 275
 			if c.RemovalInProgress {
275 276
 				// We probably crashed in the middle of a removal, reset
276 277
 				// the flag.
... ...
@@ -281,10 +286,13 @@ func (daemon *Daemon) restore() error {
281 281
 				// be removed. So we put the container in the "dead"
282 282
 				// state and leave further processing up to them.
283 283
 				logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
284
-				c.ResetRemovalInProgress()
285
-				c.SetDead()
286
-				c.ToDisk()
284
+				c.RemovalInProgress = false
285
+				c.Dead = true
286
+				if err := c.CheckpointTo(daemon.containersReplica); err != nil {
287
+					logrus.Errorf("Failed to update container %s state: %v", c.ID, err)
288
+				}
287 289
 			}
290
+			c.Unlock()
288 291
 		}(c)
289 292
 	}
290 293
 	wg.Wait()
... ...
@@ -755,6 +763,9 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe
755 755
 	d.ID = trustKey.PublicKey().KeyID()
756 756
 	d.repository = daemonRepo
757 757
 	d.containers = container.NewMemoryStore()
758
+	if d.containersReplica, err = container.NewViewDB(); err != nil {
759
+		return nil, err
760
+	}
758 761
 	d.execCommands = exec.NewStore()
759 762
 	d.trustKey = trustKey
760 763
 	d.idIndex = truncindex.NewTruncIndex([]string{})
... ...
@@ -1222,3 +1233,13 @@ func CreateDaemonRoot(config *config.Config) error {
1222 1222
 	}
1223 1223
 	return setupDaemonRoot(config, realRoot, idMappings.RootPair())
1224 1224
 }
1225
+
1226
+// checkpointAndSave grabs a container lock to safely call container.CheckpointTo
1227
+func (daemon *Daemon) checkpointAndSave(container *container.Container) error {
1228
+	container.Lock()
1229
+	defer container.Unlock()
1230
+	if err := container.CheckpointTo(daemon.containersReplica); err != nil {
1231
+		return fmt.Errorf("Error saving container state: %v", err)
1232
+	}
1233
+	return nil
1234
+}
... ...
@@ -1146,7 +1146,8 @@ func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *
1146 1146
 
1147 1147
 	// After we load all the links into the daemon
1148 1148
 	// set them to nil on the hostconfig
1149
-	return container.WriteHostConfig()
1149
+	_, err := container.WriteHostConfig()
1150
+	return err
1150 1151
 }
1151 1152
 
1152 1153
 // conditionalMountOnStart is a platform specific helper function during the
... ...
@@ -284,7 +284,11 @@ func TestMigratePre17Volumes(t *testing.T) {
284 284
 	}
285 285
 	volumedrivers.Register(drv, volume.DefaultDriverName)
286 286
 
287
-	daemon := &Daemon{root: rootDir, repository: containerRoot, volumes: volStore}
287
+	daemon := &Daemon{
288
+		root:       rootDir,
289
+		repository: containerRoot,
290
+		volumes:    volStore,
291
+	}
288 292
 	err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600)
289 293
 	if err != nil {
290 294
 		t.Fatal(err)
... ...
@@ -103,14 +103,16 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
103 103
 	}
104 104
 
105 105
 	// Mark container dead. We don't want anybody to be restarting it.
106
-	container.SetDead()
106
+	container.Lock()
107
+	container.Dead = true
107 108
 
108 109
 	// Save container state to disk. So that if error happens before
109 110
 	// container meta file got removed from disk, then a restart of
110 111
 	// docker should not make a dead container alive.
111
-	if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) {
112
+	if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) {
112 113
 		logrus.Errorf("Error saving dying container to disk: %v", err)
113 114
 	}
115
+	container.Unlock()
114 116
 
115 117
 	// When container creation fails and `RWLayer` has not been created yet, we
116 118
 	// do not call `ReleaseRWLayer`
... ...
@@ -131,6 +133,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
131 131
 	selinuxFreeLxcContexts(container.ProcessLabel)
132 132
 	daemon.idIndex.Delete(container.ID)
133 133
 	daemon.containers.Delete(container.ID)
134
+	daemon.containersReplica.Delete(container)
134 135
 	if e := daemon.removeMountPoints(container, removeVolume); e != nil {
135 136
 		logrus.Error(e)
136 137
 	}
... ...
@@ -167,6 +167,13 @@ func handleProbeResult(d *Daemon, c *container.Container, result *types.Healthch
167 167
 		// Else we're starting or healthy. Stay in that state.
168 168
 	}
169 169
 
170
+	// replicate Health status changes
171
+	if err := c.CheckpointTo(d.containersReplica); err != nil {
172
+		// queries will be inconsistent until the next probe runs or other state mutations
173
+		// checkpoint the container
174
+		logrus.Errorf("Error replicating health state for container %s: %v", c.ID, err)
175
+	}
176
+
170 177
 	if oldStatus != h.Status {
171 178
 		d.LogContainerEvent(c, "health_status: "+h.Status)
172 179
 	}
... ...
@@ -29,7 +29,13 @@ func TestNoneHealthcheck(t *testing.T) {
29 29
 		},
30 30
 		State: &container.State{},
31 31
 	}
32
-	daemon := &Daemon{}
32
+	store, err := container.NewViewDB()
33
+	if err != nil {
34
+		t.Fatal(err)
35
+	}
36
+	daemon := &Daemon{
37
+		containersReplica: store,
38
+	}
33 39
 
34 40
 	daemon.initHealthMonitor(c)
35 41
 	if c.State.Health != nil {
... ...
@@ -62,8 +68,15 @@ func TestHealthStates(t *testing.T) {
62 62
 			Image: "image_name",
63 63
 		},
64 64
 	}
65
+
66
+	store, err := container.NewViewDB()
67
+	if err != nil {
68
+		t.Fatal(err)
69
+	}
70
+
65 71
 	daemon := &Daemon{
66
-		EventsService: e,
72
+		EventsService:     e,
73
+		containersReplica: store,
67 74
 	}
68 75
 
69 76
 	c.Config.Healthcheck = &containertypes.HealthConfig{
... ...
@@ -51,7 +51,7 @@ func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.Co
51 51
 		}
52 52
 	}
53 53
 
54
-	mountPoints := addMountPoints(container)
54
+	mountPoints := container.GetMountPoints()
55 55
 	networkSettings := &types.NetworkSettings{
56 56
 		NetworkSettingsBase: types.NetworkSettingsBase{
57 57
 			Bridge:                 container.NetworkSettings.Bridge,
... ...
@@ -104,7 +104,7 @@ func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er
104 104
 		return nil, err
105 105
 	}
106 106
 
107
-	mountPoints := addMountPoints(container)
107
+	mountPoints := container.GetMountPoints()
108 108
 	config := &v1p20.ContainerConfig{
109 109
 		Config:          container.Config,
110 110
 		MacAddress:      container.Config.MacAddress,
... ...
@@ -18,20 +18,6 @@ func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON,
18 18
 	return &v1p19.ContainerJSON{}, nil
19 19
 }
20 20
 
21
-func addMountPoints(container *container.Container) []types.MountPoint {
22
-	mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
23
-	for _, m := range container.MountPoints {
24
-		mountPoints = append(mountPoints, types.MountPoint{
25
-			Name:        m.Name,
26
-			Source:      m.Path(),
27
-			Destination: m.Destination,
28
-			Driver:      m.Driver,
29
-			RW:          m.RW,
30
-		})
31
-	}
32
-	return mountPoints
33
-}
34
-
35 21
 func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig {
36 22
 	return &backend.ExecProcessConfig{
37 23
 		Tty:        e.Tty,
... ...
@@ -64,23 +64,6 @@ func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON,
64 64
 	}, nil
65 65
 }
66 66
 
67
-func addMountPoints(container *container.Container) []types.MountPoint {
68
-	mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
69
-	for _, m := range container.MountPoints {
70
-		mountPoints = append(mountPoints, types.MountPoint{
71
-			Type:        m.Type,
72
-			Name:        m.Name,
73
-			Source:      m.Path(),
74
-			Destination: m.Destination,
75
-			Driver:      m.Driver,
76
-			Mode:        m.Mode,
77
-			RW:          m.RW,
78
-			Propagation: m.Propagation,
79
-		})
80
-	}
81
-	return mountPoints
82
-}
83
-
84 67
 func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig {
85 68
 	return &backend.ExecProcessConfig{
86 69
 		Tty:        e.Tty,
... ...
@@ -12,21 +12,6 @@ func setPlatformSpecificContainerFields(container *container.Container, contJSON
12 12
 	return contJSONBase
13 13
 }
14 14
 
15
-func addMountPoints(container *container.Container) []types.MountPoint {
16
-	mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
17
-	for _, m := range container.MountPoints {
18
-		mountPoints = append(mountPoints, types.MountPoint{
19
-			Type:        m.Type,
20
-			Name:        m.Name,
21
-			Source:      m.Path(),
22
-			Destination: m.Destination,
23
-			Driver:      m.Driver,
24
-			RW:          m.RW,
25
-		})
26
-	}
27
-	return mountPoints
28
-}
29
-
30 15
 // containerInspectPre120 get containers for pre 1.20 APIs.
31 16
 func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) {
32 17
 	return daemon.ContainerInspectCurrent(name, false)
... ...
@@ -10,7 +10,6 @@ import (
10 10
 	"github.com/Sirupsen/logrus"
11 11
 	"github.com/docker/docker/api/types"
12 12
 	"github.com/docker/docker/api/types/filters"
13
-	networktypes "github.com/docker/docker/api/types/network"
14 13
 	"github.com/docker/docker/container"
15 14
 	"github.com/docker/docker/image"
16 15
 	"github.com/docker/docker/volume"
... ...
@@ -47,7 +46,7 @@ type iterationAction int
47 47
 
48 48
 // containerReducer represents a reducer for a container.
49 49
 // Returns the object to serialize by the api.
50
-type containerReducer func(*container.Container, *listContext) (*types.Container, error)
50
+type containerReducer func(*container.Snapshot, *listContext) (*types.Container, error)
51 51
 
52 52
 const (
53 53
 	// includeContainer is the action to include a container in the reducer.
... ...
@@ -83,9 +82,9 @@ type listContext struct {
83 83
 	exitAllowed []int
84 84
 
85 85
 	// beforeFilter is a filter to ignore containers that appear before the one given
86
-	beforeFilter *container.Container
86
+	beforeFilter *container.Snapshot
87 87
 	// sinceFilter is a filter to stop the filtering when the iterator arrive to the given container
88
-	sinceFilter *container.Container
88
+	sinceFilter *container.Snapshot
89 89
 
90 90
 	// taskFilter tells if we should filter based on wether a container is part of a task
91 91
 	taskFilter bool
... ...
@@ -101,21 +100,21 @@ type listContext struct {
101 101
 	*types.ContainerListOptions
102 102
 }
103 103
 
104
-// byContainerCreated is a temporary type used to sort a list of containers by creation time.
105
-type byContainerCreated []*container.Container
104
+// byCreatedDescending is a temporary type used to sort a list of containers by creation time.
105
+type byCreatedDescending []container.Snapshot
106 106
 
107
-func (r byContainerCreated) Len() int      { return len(r) }
108
-func (r byContainerCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
109
-func (r byContainerCreated) Less(i, j int) bool {
110
-	return r[i].Created.UnixNano() < r[j].Created.UnixNano()
107
+func (r byCreatedDescending) Len() int      { return len(r) }
108
+func (r byCreatedDescending) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
109
+func (r byCreatedDescending) Less(i, j int) bool {
110
+	return r[j].CreatedAt.UnixNano() < r[i].CreatedAt.UnixNano()
111 111
 }
112 112
 
113 113
 // Containers returns the list of containers to show given the user's filtering.
114 114
 func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) {
115
-	return daemon.reduceContainers(config, daemon.transformContainer)
115
+	return daemon.reduceContainers(config, daemon.refreshImage)
116 116
 }
117 117
 
118
-func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container {
118
+func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) {
119 119
 	idSearch := false
120 120
 	names := ctx.filters.Get("name")
121 121
 	ids := ctx.filters.Get("id")
... ...
@@ -123,7 +122,9 @@ func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Conta
123 123
 		// if name or ID filters are not in use, return to
124 124
 		// standard behavior of walking the entire container
125 125
 		// list from the daemon's in-memory store
126
-		return daemon.List()
126
+		all, err := view.All()
127
+		sort.Sort(byCreatedDescending(all))
128
+		return all, err
127 129
 	}
128 130
 
129 131
 	// idSearch will determine if we limit name matching to the IDs
... ...
@@ -158,38 +159,46 @@ func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Conta
158 158
 		}
159 159
 	}
160 160
 
161
-	cntrs := make([]*container.Container, 0, len(matches))
161
+	cntrs := make([]container.Snapshot, 0, len(matches))
162 162
 	for id := range matches {
163
-		if c := daemon.containers.Get(id); c != nil {
164
-			cntrs = append(cntrs, c)
163
+		c, err := view.Get(id)
164
+		if err != nil {
165
+			return nil, err
166
+		}
167
+		if c != nil {
168
+			cntrs = append(cntrs, *c)
165 169
 		}
166 170
 	}
167 171
 
168 172
 	// Restore sort-order after filtering
169 173
 	// Created gives us nanosec resolution for sorting
170
-	sort.Sort(sort.Reverse(byContainerCreated(cntrs)))
174
+	sort.Sort(byCreatedDescending(cntrs))
171 175
 
172
-	return cntrs
176
+	return cntrs, nil
173 177
 }
174 178
 
175 179
 // reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer.
176 180
 func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) {
177 181
 	var (
182
+		view       = daemon.containersReplica.Snapshot(daemon.nameIndex)
178 183
 		containers = []*types.Container{}
179 184
 	)
180 185
 
181
-	ctx, err := daemon.foldFilter(config)
186
+	ctx, err := daemon.foldFilter(view, config)
182 187
 	if err != nil {
183 188
 		return nil, err
184 189
 	}
185 190
 
186 191
 	// fastpath to only look at a subset of containers if specific name
187 192
 	// or ID matches were provided by the user--otherwise we potentially
188
-	// end up locking and querying many more containers than intended
189
-	containerList := daemon.filterByNameIDMatches(ctx)
193
+	// end up querying many more containers than intended
194
+	containerList, err := daemon.filterByNameIDMatches(view, ctx)
195
+	if err != nil {
196
+		return nil, err
197
+	}
190 198
 
191
-	for _, container := range containerList {
192
-		t, err := daemon.reducePsContainer(container, ctx, reducer)
199
+	for i := range containerList {
200
+		t, err := daemon.reducePsContainer(&containerList[i], ctx, reducer)
193 201
 		if err != nil {
194 202
 			if err != errStopIteration {
195 203
 				return nil, err
... ...
@@ -206,23 +215,17 @@ func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reduc
206 206
 }
207 207
 
208 208
 // reducePsContainer is the basic representation for a container as expected by the ps command.
209
-func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) {
210
-	container.Lock()
211
-
209
+func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *listContext, reducer containerReducer) (*types.Container, error) {
212 210
 	// filter containers to return
213
-	action := includeContainerInList(container, ctx)
214
-	switch action {
211
+	switch includeContainerInList(container, ctx) {
215 212
 	case excludeContainer:
216
-		container.Unlock()
217 213
 		return nil, nil
218 214
 	case stopIteration:
219
-		container.Unlock()
220 215
 		return nil, errStopIteration
221 216
 	}
222 217
 
223 218
 	// transform internal container struct into api structs
224 219
 	newC, err := reducer(container, ctx)
225
-	container.Unlock()
226 220
 	if err != nil {
227 221
 		return nil, err
228 222
 	}
... ...
@@ -237,7 +240,7 @@ func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *lis
237 237
 }
238 238
 
239 239
 // foldFilter generates the container filter based on the user's filtering options.
240
-func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) {
240
+func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerListOptions) (*listContext, error) {
241 241
 	psFilters := config.Filters
242 242
 
243 243
 	if err := psFilters.Validate(acceptedPsFilterTags); err != nil {
... ...
@@ -294,10 +297,10 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte
294 294
 		return nil, err
295 295
 	}
296 296
 
297
-	var beforeContFilter, sinceContFilter *container.Container
297
+	var beforeContFilter, sinceContFilter *container.Snapshot
298 298
 
299 299
 	err = psFilters.WalkValues("before", func(value string) error {
300
-		beforeContFilter, err = daemon.GetContainer(value)
300
+		beforeContFilter, err = view.Get(value)
301 301
 		return err
302 302
 	})
303 303
 	if err != nil {
... ...
@@ -305,7 +308,7 @@ func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listConte
305 305
 	}
306 306
 
307 307
 	err = psFilters.WalkValues("since", func(value string) error {
308
-		sinceContFilter, err = daemon.GetContainer(value)
308
+		sinceContFilter, err = view.Get(value)
309 309
 		return err
310 310
 	})
311 311
 	if err != nil {
... ...
@@ -383,7 +386,7 @@ func portOp(key string, filter map[nat.Port]bool) func(value string) error {
383 383
 
384 384
 // includeContainerInList decides whether a container should be included in the output or not based in the filter.
385 385
 // It also decides if the iteration should be stopped or not.
386
-func includeContainerInList(container *container.Container, ctx *listContext) iterationAction {
386
+func includeContainerInList(container *container.Snapshot, ctx *listContext) iterationAction {
387 387
 	// Do not include container if it's in the list before the filter container.
388 388
 	// Set the filter container to nil to include the rest of containers after this one.
389 389
 	if ctx.beforeFilter != nil {
... ...
@@ -422,7 +425,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it
422 422
 	}
423 423
 
424 424
 	// Do not include container if any of the labels don't match
425
-	if !ctx.filters.MatchKVList("label", container.Config.Labels) {
425
+	if !ctx.filters.MatchKVList("label", container.Labels) {
426 426
 		return excludeContainer
427 427
 	}
428 428
 
... ...
@@ -440,7 +443,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it
440 440
 	if len(ctx.exitAllowed) > 0 {
441 441
 		shouldSkip := true
442 442
 		for _, code := range ctx.exitAllowed {
443
-			if code == container.ExitCode() && !container.Running && !container.StartedAt.IsZero() {
443
+			if code == container.ExitCode && !container.Running && !container.StartedAt.IsZero() {
444 444
 				shouldSkip = false
445 445
 				break
446 446
 			}
... ...
@@ -451,28 +454,34 @@ func includeContainerInList(container *container.Container, ctx *listContext) it
451 451
 	}
452 452
 
453 453
 	// Do not include container if its status doesn't match the filter
454
-	if !ctx.filters.Match("status", container.State.StateString()) {
454
+	if !ctx.filters.Match("status", container.State) {
455 455
 		return excludeContainer
456 456
 	}
457 457
 
458 458
 	// Do not include container if its health doesn't match the filter
459
-	if !ctx.filters.ExactMatch("health", container.State.HealthString()) {
459
+	if !ctx.filters.ExactMatch("health", container.Health) {
460 460
 		return excludeContainer
461 461
 	}
462 462
 
463 463
 	if ctx.filters.Include("volume") {
464
-		volumesByName := make(map[string]*volume.MountPoint)
465
-		for _, m := range container.MountPoints {
464
+		volumesByName := make(map[string]types.MountPoint)
465
+		for _, m := range container.Mounts {
466 466
 			if m.Name != "" {
467 467
 				volumesByName[m.Name] = m
468 468
 			} else {
469 469
 				volumesByName[m.Source] = m
470 470
 			}
471 471
 		}
472
+		volumesByDestination := make(map[string]types.MountPoint)
473
+		for _, m := range container.Mounts {
474
+			if m.Destination != "" {
475
+				volumesByDestination[m.Destination] = m
476
+			}
477
+		}
472 478
 
473 479
 		volumeExist := fmt.Errorf("volume mounted in container")
474 480
 		err := ctx.filters.WalkValues("volume", func(value string) error {
475
-			if _, exist := container.MountPoints[value]; exist {
481
+			if _, exist := volumesByDestination[value]; exist {
476 482
 				return volumeExist
477 483
 			}
478 484
 			if _, exist := volumesByName[value]; exist {
... ...
@@ -489,19 +498,25 @@ func includeContainerInList(container *container.Container, ctx *listContext) it
489 489
 		if len(ctx.images) == 0 {
490 490
 			return excludeContainer
491 491
 		}
492
-		if !ctx.images[container.ImageID] {
492
+		if !ctx.images[image.ID(container.ImageID)] {
493 493
 			return excludeContainer
494 494
 		}
495 495
 	}
496 496
 
497
-	networkExist := fmt.Errorf("container part of network")
497
+	var (
498
+		networkExist = errors.New("container part of network")
499
+		noNetworks   = errors.New("container is not part of any networks")
500
+	)
498 501
 	if ctx.filters.Include("network") {
499 502
 		err := ctx.filters.WalkValues("network", func(value string) error {
503
+			if container.NetworkSettings == nil {
504
+				return noNetworks
505
+			}
500 506
 			if _, ok := container.NetworkSettings.Networks[value]; ok {
501 507
 				return networkExist
502 508
 			}
503 509
 			for _, nw := range container.NetworkSettings.Networks {
504
-				if nw.EndpointSettings == nil {
510
+				if nw == nil {
505 511
 					continue
506 512
 				}
507 513
 				if strings.HasPrefix(nw.NetworkID, value) {
... ...
@@ -518,7 +533,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it
518 518
 	if len(ctx.publish) > 0 {
519 519
 		shouldSkip := true
520 520
 		for port := range ctx.publish {
521
-			if _, ok := container.HostConfig.PortBindings[port]; ok {
521
+			if _, ok := container.PortBindings[port]; ok {
522 522
 				shouldSkip = false
523 523
 				break
524 524
 			}
... ...
@@ -531,7 +546,7 @@ func includeContainerInList(container *container.Container, ctx *listContext) it
531 531
 	if len(ctx.expose) > 0 {
532 532
 		shouldSkip := true
533 533
 		for port := range ctx.expose {
534
-			if _, ok := container.Config.ExposedPorts[port]; ok {
534
+			if _, ok := container.ExposedPorts[port]; ok {
535 535
 				shouldSkip = false
536 536
 				break
537 537
 			}
... ...
@@ -544,106 +559,22 @@ func includeContainerInList(container *container.Container, ctx *listContext) it
544 544
 	return includeContainer
545 545
 }
546 546
 
547
-// transformContainer generates the container type expected by the docker ps command.
548
-func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) {
549
-	newC := &types.Container{
550
-		ID:      container.ID,
551
-		Names:   ctx.names[container.ID],
552
-		ImageID: container.ImageID.String(),
553
-	}
554
-	if newC.Names == nil {
555
-		// Dead containers will often have no name, so make sure the response isn't null
556
-		newC.Names = []string{}
557
-	}
558
-
559
-	image := container.Config.Image // if possible keep the original ref
560
-	if image != container.ImageID.String() {
547
+// refreshImage checks if the Image ref still points to the correct ID, and updates the ref to the actual ID when it doesn't
548
+func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*types.Container, error) {
549
+	c := s.Container
550
+	image := s.Image // keep the original ref if still valid (hasn't changed)
551
+	if image != s.ImageID {
561 552
 		id, _, err := daemon.GetImageIDAndPlatform(image)
562 553
 		if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE {
563 554
 			return nil, err
564 555
 		}
565
-		if err != nil || id != container.ImageID {
566
-			image = container.ImageID.String()
556
+		if err != nil || id.String() != s.ImageID {
557
+			// ref changed, we need to use original ID
558
+			image = s.ImageID
567 559
 		}
568 560
 	}
569
-	newC.Image = image
570
-
571
-	if len(container.Args) > 0 {
572
-		args := []string{}
573
-		for _, arg := range container.Args {
574
-			if strings.Contains(arg, " ") {
575
-				args = append(args, fmt.Sprintf("'%s'", arg))
576
-			} else {
577
-				args = append(args, arg)
578
-			}
579
-		}
580
-		argsAsString := strings.Join(args, " ")
581
-
582
-		newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString)
583
-	} else {
584
-		newC.Command = container.Path
585
-	}
586
-	newC.Created = container.Created.Unix()
587
-	newC.State = container.State.StateString()
588
-	newC.Status = container.State.String()
589
-	newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode)
590
-	// copy networks to avoid races
591
-	networks := make(map[string]*networktypes.EndpointSettings)
592
-	for name, network := range container.NetworkSettings.Networks {
593
-		if network == nil || network.EndpointSettings == nil {
594
-			continue
595
-		}
596
-		networks[name] = &networktypes.EndpointSettings{
597
-			EndpointID:          network.EndpointID,
598
-			Gateway:             network.Gateway,
599
-			IPAddress:           network.IPAddress,
600
-			IPPrefixLen:         network.IPPrefixLen,
601
-			IPv6Gateway:         network.IPv6Gateway,
602
-			GlobalIPv6Address:   network.GlobalIPv6Address,
603
-			GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen,
604
-			MacAddress:          network.MacAddress,
605
-			NetworkID:           network.NetworkID,
606
-		}
607
-		if network.IPAMConfig != nil {
608
-			networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{
609
-				IPv4Address: network.IPAMConfig.IPv4Address,
610
-				IPv6Address: network.IPAMConfig.IPv6Address,
611
-			}
612
-		}
613
-	}
614
-	newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks}
615
-
616
-	newC.Ports = []types.Port{}
617
-	for port, bindings := range container.NetworkSettings.Ports {
618
-		p, err := nat.ParsePort(port.Port())
619
-		if err != nil {
620
-			return nil, err
621
-		}
622
-		if len(bindings) == 0 {
623
-			newC.Ports = append(newC.Ports, types.Port{
624
-				PrivatePort: uint16(p),
625
-				Type:        port.Proto(),
626
-			})
627
-			continue
628
-		}
629
-		for _, binding := range bindings {
630
-			h, err := nat.ParsePort(binding.HostPort)
631
-			if err != nil {
632
-				return nil, err
633
-			}
634
-			newC.Ports = append(newC.Ports, types.Port{
635
-				PrivatePort: uint16(p),
636
-				PublicPort:  uint16(h),
637
-				Type:        port.Proto(),
638
-				IP:          binding.HostIP,
639
-			})
640
-		}
641
-	}
642
-
643
-	newC.Labels = container.Config.Labels
644
-	newC.Mounts = addMountPoints(container)
645
-
646
-	return newC, nil
561
+	c.Image = image
562
+	return &c, nil
647 563
 }
648 564
 
649 565
 // Volumes lists known volumes, using the filter to restrict the range
... ...
@@ -6,6 +6,6 @@ import "github.com/docker/docker/container"
6 6
 
7 7
 // excludeByIsolation is a platform specific helper function to support PS
8 8
 // filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix.
9
-func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction {
9
+func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction {
10 10
 	return includeContainer
11 11
 }
... ...
@@ -8,7 +8,7 @@ import (
8 8
 
9 9
 // excludeByIsolation is a platform specific helper function to support PS
10 10
 // filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix.
11
-func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction {
11
+func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction {
12 12
 	i := strings.ToLower(string(container.HostConfig.Isolation))
13 13
 	if i == "" {
14 14
 		i = "default"
... ...
@@ -39,6 +39,9 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
39 39
 			return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.")
40 40
 		}
41 41
 		daemon.updateHealthMonitor(c)
42
+		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
43
+			return err
44
+		}
42 45
 		daemon.LogContainerEvent(c, "oom")
43 46
 	case libcontainerd.StateExit:
44 47
 
... ...
@@ -90,7 +93,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
90 90
 		daemon.setStateCounter(c)
91 91
 
92 92
 		defer c.Unlock()
93
-		if err := c.ToDisk(); err != nil {
93
+		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
94 94
 			return err
95 95
 		}
96 96
 		return daemon.postRunProcessing(c, e)
... ...
@@ -119,30 +122,30 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
119 119
 		c.HasBeenStartedBefore = true
120 120
 		daemon.setStateCounter(c)
121 121
 
122
-		if err := c.ToDisk(); err != nil {
122
+		daemon.initHealthMonitor(c)
123
+		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
123 124
 			c.Reset(false)
124 125
 			return err
125 126
 		}
126
-		daemon.initHealthMonitor(c)
127 127
 
128 128
 		daemon.LogContainerEvent(c, "start")
129 129
 	case libcontainerd.StatePause:
130 130
 		// Container is already locked in this case
131 131
 		c.Paused = true
132 132
 		daemon.setStateCounter(c)
133
-		if err := c.ToDisk(); err != nil {
133
+		daemon.updateHealthMonitor(c)
134
+		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
134 135
 			return err
135 136
 		}
136
-		daemon.updateHealthMonitor(c)
137 137
 		daemon.LogContainerEvent(c, "pause")
138 138
 	case libcontainerd.StateResume:
139 139
 		// Container is already locked in this case
140 140
 		c.Paused = false
141 141
 		daemon.setStateCounter(c)
142
-		if err := c.ToDisk(); err != nil {
142
+		daemon.updateHealthMonitor(c)
143
+		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
143 144
 			return err
144 145
 		}
145
-		daemon.updateHealthMonitor(c)
146 146
 		daemon.LogContainerEvent(c, "unpause")
147 147
 	}
148 148
 	return nil
... ...
@@ -30,10 +30,6 @@ func (daemon *Daemon) registerName(container *container.Container) error {
30 30
 			return err
31 31
 		}
32 32
 		container.Name = name
33
-
34
-		if err := container.ToDiskLocking(); err != nil {
35
-			logrus.Errorf("Error saving container name to disk: %v", err)
36
-		}
37 33
 	}
38 34
 	return daemon.nameIndex.Reserve(container.Name, container.ID)
39 35
 }
... ...
@@ -82,7 +82,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
82 82
 		daemon.nameIndex.Release(oldName + k)
83 83
 	}
84 84
 	daemon.releaseName(oldName)
85
-	if err = container.ToDisk(); err != nil {
85
+	if err = container.CheckpointTo(daemon.containersReplica); err != nil {
86 86
 		return err
87 87
 	}
88 88
 
... ...
@@ -99,7 +99,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
99 99
 		if err != nil {
100 100
 			container.Name = oldName
101 101
 			container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint
102
-			if e := container.ToDisk(); e != nil {
102
+			if e := container.CheckpointTo(daemon.containersReplica); e != nil {
103 103
 				logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e)
104 104
 			}
105 105
 		}
... ...
@@ -52,7 +52,7 @@ func (daemon *Daemon) containerRestart(container *container.Container, seconds i
52 52
 		container.HostConfig.AutoRemove = autoRemove
53 53
 		// containerStop will write HostConfig to disk, we shall restore AutoRemove
54 54
 		// in disk too
55
-		if toDiskErr := container.ToDiskLocking(); toDiskErr != nil {
55
+		if toDiskErr := daemon.checkpointAndSave(container); toDiskErr != nil {
56 56
 			logrus.Errorf("Write container to disk error: %v", toDiskErr)
57 57
 		}
58 58
 
... ...
@@ -58,7 +58,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
58 58
 				// if user has change the network mode on starting, clean up the
59 59
 				// old networks. It is a deprecated feature and has been removed in Docker 1.12
60 60
 				container.NetworkSettings.Networks = nil
61
-				if err := container.ToDisk(); err != nil {
61
+				if err := container.CheckpointTo(daemon.containersReplica); err != nil {
62 62
 					return err
63 63
 				}
64 64
 			}
... ...
@@ -86,11 +86,6 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
86 86
 	return daemon.containerStart(container, checkpoint, checkpointDir, true)
87 87
 }
88 88
 
89
-// Start starts a container
90
-func (daemon *Daemon) Start(container *container.Container) error {
91
-	return daemon.containerStart(container, "", "", true)
92
-}
93
-
94 89
 // containerStart prepares the container to run by setting up everything the
95 90
 // container needs, such as storage and networking, as well as links
96 91
 // between containers. The container is left waiting for a signal to
... ...
@@ -117,8 +112,9 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint
117 117
 			if container.ExitCode() == 0 {
118 118
 				container.SetExitCode(128)
119 119
 			}
120
-			container.ToDisk()
121
-
120
+			if err := container.CheckpointTo(daemon.containersReplica); err != nil {
121
+				logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err)
122
+			}
122 123
 			container.Reset(false)
123 124
 
124 125
 			daemon.Cleanup(container)
... ...
@@ -9,13 +9,14 @@ import (
9 9
 	"github.com/docker/docker/libcontainerd"
10 10
 )
11 11
 
12
+// getLibcontainerdCreateOptions callers must hold a lock on the container
12 13
 func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) {
13 14
 	createOptions := []libcontainerd.CreateOption{}
14 15
 
15 16
 	// Ensure a runtime has been assigned to this container
16 17
 	if container.HostConfig.Runtime == "" {
17 18
 		container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
18
-		container.ToDisk()
19
+		container.CheckpointTo(daemon.containersReplica)
19 20
 	}
20 21
 
21 22
 	rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime)
... ...
@@ -38,7 +38,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
38 38
 		if restoreConfig {
39 39
 			container.Lock()
40 40
 			container.HostConfig = &backupHostConfig
41
-			container.ToDisk()
41
+			container.CheckpointTo(daemon.containersReplica)
42 42
 			container.Unlock()
43 43
 		}
44 44
 	}()
... ...
@@ -47,10 +47,18 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
47 47
 		return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\"."))
48 48
 	}
49 49
 
50
+	container.Lock()
50 51
 	if err := container.UpdateContainer(hostConfig); err != nil {
51 52
 		restoreConfig = true
53
+		container.Unlock()
52 54
 		return errCannotUpdate(container.ID, err)
53 55
 	}
56
+	if err := container.CheckpointTo(daemon.containersReplica); err != nil {
57
+		restoreConfig = true
58
+		container.Unlock()
59
+		return errCannotUpdate(container.ID, err)
60
+	}
61
+	container.Unlock()
54 62
 
55 63
 	// if Restart Policy changed, we need to update container monitor
56 64
 	if hostConfig.RestartPolicy.Name != "" {
... ...
@@ -137,6 +137,9 @@ func migrateVolume(id, vfs string) error {
137 137
 // verifyVolumesInfo ports volumes configured for the containers pre docker 1.7.
138 138
 // It reads the container configuration and creates valid mount points for the old volumes.
139 139
 func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error {
140
+	container.Lock()
141
+	defer container.Unlock()
142
+
140 143
 	// Inspect old structures only when we're upgrading from old versions
141 144
 	// to versions >= 1.7 and the MountPoints has not been populated with volumes data.
142 145
 	type volumes struct {
... ...
@@ -177,7 +180,6 @@ func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error {
177 177
 				container.MountPoints[destination] = &m
178 178
 			}
179 179
 		}
180
-		return container.ToDisk()
181 180
 	}
182 181
 	return nil
183 182
 }
... ...
@@ -2684,7 +2684,7 @@ func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) {
2684 2684
 	`)
2685 2685
 
2686 2686
 	configPath := filepath.Join(d.Root, "containers", id, "config.v2.json")
2687
-	err = ioutil.WriteFile(configPath, config, 600)
2687
+	c.Assert(ioutil.WriteFile(configPath, config, 600), checker.IsNil)
2688 2688
 	d.Start(c)
2689 2689
 
2690 2690
 	out, err = d.Cmd("inspect", "--type=container", "--format={{ json .Mounts }}", id)
... ...
@@ -4,6 +4,7 @@ import (
4 4
 	"encoding/json"
5 5
 	"fmt"
6 6
 	"net/http"
7
+	"regexp"
7 8
 	"strings"
8 9
 
9 10
 	"github.com/docker/docker/api/types"
... ...
@@ -50,14 +51,17 @@ func getPausedContainers(t testingT, dockerBinary string) []string {
50 50
 	return strings.Fields(result.Combined())
51 51
 }
52 52
 
53
+var alreadyExists = regexp.MustCompile(`Error response from daemon: removal of container (\w+) is already in progress`)
54
+
53 55
 func deleteAllContainers(t testingT, dockerBinary string) {
54 56
 	containers := getAllContainers(t, dockerBinary)
55 57
 	if len(containers) > 0 {
56 58
 		result := icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, containers...)...)
57 59
 		if result.Error != nil {
58 60
 			// If the error is "No such container: ..." this means the container doesn't exists anymore,
59
-			// we can safely ignore that one.
60
-			if strings.Contains(result.Stderr(), "No such container") {
61
+			// or if it is "... removal of container ... is already in progress" it will be removed eventually.
62
+			// We can safely ignore those.
63
+			if strings.Contains(result.Stderr(), "No such container") || alreadyExists.MatchString(result.Stderr()) {
61 64
 				return
62 65
 			}
63 66
 			t.Fatalf("error removing containers %v : %v (%s)", containers, result.Error, result.Combined())