Browse code

save deep copies of Container in the replica store

Reuse existing structures and rely on json serialization to deep copy
Container objects.

Also consolidate all "save" operations on container.CheckpointTo, which
now both saves a serialized json to disk, and replicates state to the
ACID in-memory store.

Signed-off-by: Fabio Kung <fabio.kung@gmail.com>

Fabio Kung authored on 2017/03/28 02:18:53
Showing 18 changed files
... ...
@@ -159,7 +159,7 @@ func (container *Container) FromDisk() error {
159 159
 }
160 160
 
161 161
 // ToDisk saves the container configuration on disk.
162
-func (container *Container) ToDisk() error {
162
+func (container *Container) toDisk() error {
163 163
 	pth, err := container.ConfigPath()
164 164
 	if err != nil {
165 165
 		return err
... ...
@@ -181,27 +181,13 @@ func (container *Container) ToDisk() error {
181 181
 	return container.WriteHostConfig()
182 182
 }
183 183
 
184
-// ToDiskLocking saves the container configuration on disk in a thread safe way.
185
-func (container *Container) ToDiskLocking() error {
186
-	container.Lock()
187
-	err := container.ToDisk()
188
-	container.Unlock()
189
-	return err
190
-}
191
-
192
-// CheckpointTo makes the Container's current state visible to queries.
184
+// CheckpointTo makes the Container's current state visible to queries, and persists state.
193 185
 // Callers must hold a Container lock.
194 186
 func (container *Container) CheckpointTo(store ViewDB) error {
195
-	return store.Save(container.snapshot())
196
-}
197
-
198
-// CheckpointAndSaveToDisk is equivalent to calling CheckpointTo and ToDisk.
199
-// Callers must hold a Container lock.
200
-func (container *Container) CheckpointAndSaveToDisk(store ViewDB) error {
201
-	if err := container.CheckpointTo(store); err != nil {
187
+	if err := container.toDisk(); err != nil {
202 188
 		return err
203 189
 	}
204
-	return container.ToDisk()
190
+	return store.Save(container)
205 191
 }
206 192
 
207 193
 // readHostConfig reads the host configuration from disk for the container.
208 194
deleted file mode 100644
... ...
@@ -1,153 +0,0 @@
1
-package container
2
-
3
-import (
4
-	"fmt"
5
-	"strings"
6
-	"time"
7
-
8
-	"github.com/Sirupsen/logrus"
9
-	"github.com/docker/docker/api/types"
10
-	"github.com/docker/docker/api/types/network"
11
-	"github.com/docker/go-connections/nat"
12
-)
13
-
14
-// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a
15
-// versioned ACID in-memory store. Pointers are avoided here to make sure all values are copied into the store.
16
-type Snapshot struct {
17
-	ID           string `json:"Id"`
18
-	Name         string
19
-	Pid          int
20
-	Managed      bool
21
-	Image        string
22
-	ImageID      string
23
-	Command      string
24
-	Ports        []types.Port
25
-	ExposedPorts nat.PortSet
26
-	PublishPorts nat.PortSet
27
-	Labels       map[string]string
28
-	State        string
29
-	Status       string
30
-	Health       string
31
-	HostConfig   struct {
32
-		NetworkMode string
33
-		Isolation   string
34
-	}
35
-	NetworkSettings types.SummaryNetworkSettings
36
-	Mounts          []types.MountPoint
37
-	Created         time.Time
38
-	StartedAt       time.Time
39
-	Running         bool
40
-	Paused          bool
41
-	ExitCode        int
42
-}
43
-
44
-// Snapshot provides a read only view of a Container. Callers must hold a Lock on the container object.
45
-func (container *Container) snapshot() *Snapshot {
46
-	snapshot := &Snapshot{
47
-		ID:           container.ID,
48
-		Name:         container.Name,
49
-		Pid:          container.Pid,
50
-		Managed:      container.Managed,
51
-		ImageID:      container.ImageID.String(),
52
-		Ports:        []types.Port{},
53
-		ExposedPorts: make(nat.PortSet),
54
-		PublishPorts: make(nat.PortSet),
55
-		State:        container.State.StateString(),
56
-		Status:       container.State.String(),
57
-		Health:       container.State.HealthString(),
58
-		Mounts:       container.GetMountPoints(),
59
-		Created:      container.Created,
60
-		StartedAt:    container.StartedAt,
61
-		Running:      container.Running,
62
-		Paused:       container.Paused,
63
-		ExitCode:     container.ExitCode(),
64
-	}
65
-
66
-	if container.HostConfig != nil {
67
-		snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation)
68
-		snapshot.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode)
69
-		for publish := range container.HostConfig.PortBindings {
70
-			snapshot.PublishPorts[publish] = struct{}{}
71
-		}
72
-	}
73
-
74
-	if container.Config != nil {
75
-		snapshot.Image = container.Config.Image
76
-		snapshot.Labels = container.Config.Labels
77
-		for exposed := range container.Config.ExposedPorts {
78
-			snapshot.ExposedPorts[exposed] = struct{}{}
79
-		}
80
-	}
81
-
82
-	if len(container.Args) > 0 {
83
-		args := []string{}
84
-		for _, arg := range container.Args {
85
-			if strings.Contains(arg, " ") {
86
-				args = append(args, fmt.Sprintf("'%s'", arg))
87
-			} else {
88
-				args = append(args, arg)
89
-			}
90
-		}
91
-		argsAsString := strings.Join(args, " ")
92
-		snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString)
93
-	} else {
94
-		snapshot.Command = container.Path
95
-	}
96
-
97
-	if container.NetworkSettings != nil {
98
-		networks := make(map[string]*network.EndpointSettings)
99
-		for name, netw := range container.NetworkSettings.Networks {
100
-			if netw == nil || netw.EndpointSettings == nil {
101
-				continue
102
-			}
103
-			networks[name] = &network.EndpointSettings{
104
-				EndpointID:          netw.EndpointID,
105
-				Gateway:             netw.Gateway,
106
-				IPAddress:           netw.IPAddress,
107
-				IPPrefixLen:         netw.IPPrefixLen,
108
-				IPv6Gateway:         netw.IPv6Gateway,
109
-				GlobalIPv6Address:   netw.GlobalIPv6Address,
110
-				GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen,
111
-				MacAddress:          netw.MacAddress,
112
-				NetworkID:           netw.NetworkID,
113
-			}
114
-			if netw.IPAMConfig != nil {
115
-				networks[name].IPAMConfig = &network.EndpointIPAMConfig{
116
-					IPv4Address: netw.IPAMConfig.IPv4Address,
117
-					IPv6Address: netw.IPAMConfig.IPv6Address,
118
-				}
119
-			}
120
-		}
121
-		snapshot.NetworkSettings = types.SummaryNetworkSettings{Networks: networks}
122
-		for port, bindings := range container.NetworkSettings.Ports {
123
-			p, err := nat.ParsePort(port.Port())
124
-			if err != nil {
125
-				logrus.Warnf("invalid port map %+v", err)
126
-				continue
127
-			}
128
-			if len(bindings) == 0 {
129
-				snapshot.Ports = append(snapshot.Ports, types.Port{
130
-					PrivatePort: uint16(p),
131
-					Type:        port.Proto(),
132
-				})
133
-				continue
134
-			}
135
-			for _, binding := range bindings {
136
-				h, err := nat.ParsePort(binding.HostPort)
137
-				if err != nil {
138
-					logrus.Warnf("invalid host port map %+v", err)
139
-					continue
140
-				}
141
-				snapshot.Ports = append(snapshot.Ports, types.Port{
142
-					PrivatePort: uint16(p),
143
-					PublicPort:  uint16(h),
144
-					Type:        port.Proto(),
145
-					IP:          binding.HostIP,
146
-				})
147
-			}
148
-		}
149
-
150
-	}
151
-
152
-	return snapshot
153
-}
... ...
@@ -1,18 +1,51 @@
1 1
 package container
2 2
 
3
-import "github.com/hashicorp/go-memdb"
3
+import (
4
+	"fmt"
5
+	"strings"
6
+	"time"
7
+
8
+	"github.com/Sirupsen/logrus"
9
+	"github.com/docker/docker/api/types"
10
+	"github.com/docker/docker/api/types/network"
11
+	"github.com/docker/docker/pkg/registrar"
12
+	"github.com/docker/go-connections/nat"
13
+	"github.com/hashicorp/go-memdb"
14
+)
4 15
 
5 16
 const (
6 17
 	memdbTable   = "containers"
7
-	memdbIDField = "ID"
8 18
 	memdbIDIndex = "id"
9 19
 )
10 20
 
21
+// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a
22
+// versioned ACID in-memory store.
23
+type Snapshot struct {
24
+	types.Container
25
+
26
+	// additional info queries need to filter on
27
+	// preserve nanosec resolution for queries
28
+	CreatedAt    time.Time
29
+	StartedAt    time.Time
30
+	Name         string
31
+	Pid          int
32
+	ExitCode     int
33
+	Running      bool
34
+	Paused       bool
35
+	Managed      bool
36
+	ExposedPorts nat.PortSet
37
+	PortBindings nat.PortSet
38
+	Health       string
39
+	HostConfig   struct {
40
+		Isolation string
41
+	}
42
+}
43
+
11 44
 // ViewDB provides an in-memory transactional (ACID) container Store
12 45
 type ViewDB interface {
13
-	Snapshot() View
14
-	Save(snapshot *Snapshot) error
15
-	Delete(id string) error
46
+	Snapshot(nameIndex *registrar.Registrar) View
47
+	Save(*Container) error
48
+	Delete(*Container) error
16 49
 }
17 50
 
18 51
 // View can be used by readers to avoid locking
... ...
@@ -29,7 +62,7 @@ var schema = &memdb.DBSchema{
29 29
 				memdbIDIndex: {
30 30
 					Name:    memdbIDIndex,
31 31
 					Unique:  true,
32
-					Indexer: &memdb.StringFieldIndex{Field: memdbIDField},
32
+					Indexer: &containerByIDIndexer{},
33 33
 				},
34 34
 			},
35 35
 		},
... ...
@@ -50,29 +83,38 @@ func NewViewDB() (ViewDB, error) {
50 50
 }
51 51
 
52 52
 // Snapshot provides a consistent read-only View of the database
53
-func (db *memDB) Snapshot() View {
54
-	return &memdbView{db.store.Txn(false)}
53
+func (db *memDB) Snapshot(index *registrar.Registrar) View {
54
+	return &memdbView{
55
+		txn:       db.store.Txn(false),
56
+		nameIndex: index.GetAll(),
57
+	}
55 58
 }
56 59
 
57
-// Save atomically updates the in-memory store
58
-func (db *memDB) Save(snapshot *Snapshot) error {
60
+// Save atomically updates the in-memory store from the current on-disk state of a Container.
61
+func (db *memDB) Save(c *Container) error {
59 62
 	txn := db.store.Txn(true)
60 63
 	defer txn.Commit()
61
-	return txn.Insert(memdbTable, snapshot)
64
+	deepCopy := NewBaseContainer(c.ID, c.Root)
65
+	err := deepCopy.FromDisk() // TODO: deal with reserveLabel
66
+	if err != nil {
67
+		return err
68
+	}
69
+	return txn.Insert(memdbTable, deepCopy)
62 70
 }
63 71
 
64 72
 // Delete removes an item by ID
65
-func (db *memDB) Delete(id string) error {
73
+func (db *memDB) Delete(c *Container) error {
66 74
 	txn := db.store.Txn(true)
67 75
 	defer txn.Commit()
68
-	return txn.Delete(memdbTable, &Snapshot{ID: id})
76
+	return txn.Delete(memdbTable, NewBaseContainer(c.ID, c.Root))
69 77
 }
70 78
 
71 79
 type memdbView struct {
72
-	txn *memdb.Txn
80
+	txn       *memdb.Txn
81
+	nameIndex map[string][]string
73 82
 }
74 83
 
75
-// All returns a all items in this snapshot
84
+// All returns a all items in this snapshot. Returned objects must never be modified.
76 85
 func (v *memdbView) All() ([]Snapshot, error) {
77 86
 	var all []Snapshot
78 87
 	iter, err := v.txn.Get(memdbTable, memdbIDIndex)
... ...
@@ -84,18 +126,167 @@ func (v *memdbView) All() ([]Snapshot, error) {
84 84
 		if item == nil {
85 85
 			break
86 86
 		}
87
-		snapshot := *(item.(*Snapshot)) // force a copy
88
-		all = append(all, snapshot)
87
+		snapshot := v.transform(item.(*Container))
88
+		all = append(all, *snapshot)
89 89
 	}
90 90
 	return all, nil
91 91
 }
92 92
 
93
-//Get returns an item by id
93
+// Get returns an item by id. Returned objects must never be modified.
94 94
 func (v *memdbView) Get(id string) (*Snapshot, error) {
95 95
 	s, err := v.txn.First(memdbTable, memdbIDIndex, id)
96 96
 	if err != nil {
97 97
 		return nil, err
98 98
 	}
99
-	snapshot := *(s.(*Snapshot)) // force a copy
100
-	return &snapshot, nil
99
+	return v.transform(s.(*Container)), nil
100
+}
101
+
102
+// transform maps a (deep) copied Container object to what queries need.
103
+// A lock on the Container is not held because these are immutable deep copies.
104
+func (v *memdbView) transform(container *Container) *Snapshot {
105
+	snapshot := &Snapshot{
106
+		Container: types.Container{
107
+			ID:      container.ID,
108
+			Names:   v.nameIndex[container.ID],
109
+			ImageID: container.ImageID.String(),
110
+			Ports:   []types.Port{},
111
+			Mounts:  container.GetMountPoints(),
112
+			State:   container.State.StateString(),
113
+			Status:  container.State.String(),
114
+			Created: container.Created.Unix(),
115
+		},
116
+		CreatedAt:    container.Created,
117
+		StartedAt:    container.StartedAt,
118
+		Name:         container.Name,
119
+		Pid:          container.Pid,
120
+		Managed:      container.Managed,
121
+		ExposedPorts: make(nat.PortSet),
122
+		PortBindings: make(nat.PortSet),
123
+		Health:       container.HealthString(),
124
+		Running:      container.Running,
125
+		Paused:       container.Paused,
126
+		ExitCode:     container.ExitCode(),
127
+	}
128
+
129
+	if snapshot.Names == nil {
130
+		// Dead containers will often have no name, so make sure the response isn't null
131
+		snapshot.Names = []string{}
132
+	}
133
+
134
+	if container.HostConfig != nil {
135
+		snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode)
136
+		snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation)
137
+		for binding := range container.HostConfig.PortBindings {
138
+			snapshot.PortBindings[binding] = struct{}{}
139
+		}
140
+	}
141
+
142
+	if container.Config != nil {
143
+		snapshot.Image = container.Config.Image
144
+		snapshot.Labels = container.Config.Labels
145
+		for exposed := range container.Config.ExposedPorts {
146
+			snapshot.ExposedPorts[exposed] = struct{}{}
147
+		}
148
+	}
149
+
150
+	if len(container.Args) > 0 {
151
+		args := []string{}
152
+		for _, arg := range container.Args {
153
+			if strings.Contains(arg, " ") {
154
+				args = append(args, fmt.Sprintf("'%s'", arg))
155
+			} else {
156
+				args = append(args, arg)
157
+			}
158
+		}
159
+		argsAsString := strings.Join(args, " ")
160
+		snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString)
161
+	} else {
162
+		snapshot.Command = container.Path
163
+	}
164
+
165
+	snapshot.Ports = []types.Port{}
166
+	networks := make(map[string]*network.EndpointSettings)
167
+	if container.NetworkSettings != nil {
168
+		for name, netw := range container.NetworkSettings.Networks {
169
+			if netw == nil || netw.EndpointSettings == nil {
170
+				continue
171
+			}
172
+			networks[name] = &network.EndpointSettings{
173
+				EndpointID:          netw.EndpointID,
174
+				Gateway:             netw.Gateway,
175
+				IPAddress:           netw.IPAddress,
176
+				IPPrefixLen:         netw.IPPrefixLen,
177
+				IPv6Gateway:         netw.IPv6Gateway,
178
+				GlobalIPv6Address:   netw.GlobalIPv6Address,
179
+				GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen,
180
+				MacAddress:          netw.MacAddress,
181
+				NetworkID:           netw.NetworkID,
182
+			}
183
+			if netw.IPAMConfig != nil {
184
+				networks[name].IPAMConfig = &network.EndpointIPAMConfig{
185
+					IPv4Address: netw.IPAMConfig.IPv4Address,
186
+					IPv6Address: netw.IPAMConfig.IPv6Address,
187
+				}
188
+			}
189
+		}
190
+		for port, bindings := range container.NetworkSettings.Ports {
191
+			p, err := nat.ParsePort(port.Port())
192
+			if err != nil {
193
+				logrus.Warnf("invalid port map %+v", err)
194
+				continue
195
+			}
196
+			if len(bindings) == 0 {
197
+				snapshot.Ports = append(snapshot.Ports, types.Port{
198
+					PrivatePort: uint16(p),
199
+					Type:        port.Proto(),
200
+				})
201
+				continue
202
+			}
203
+			for _, binding := range bindings {
204
+				h, err := nat.ParsePort(binding.HostPort)
205
+				if err != nil {
206
+					logrus.Warnf("invalid host port map %+v", err)
207
+					continue
208
+				}
209
+				snapshot.Ports = append(snapshot.Ports, types.Port{
210
+					PrivatePort: uint16(p),
211
+					PublicPort:  uint16(h),
212
+					Type:        port.Proto(),
213
+					IP:          binding.HostIP,
214
+				})
215
+			}
216
+		}
217
+	}
218
+	snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks}
219
+
220
+	return snapshot
221
+}
222
+
223
+// containerByIDIndexer is used to extract the ID field from Container types.
224
+// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct.
225
+type containerByIDIndexer struct{}
226
+
227
+// FromObject implements the memdb.SingleIndexer interface for Container objects
228
+func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) {
229
+	c, ok := obj.(*Container)
230
+	if !ok {
231
+		return false, nil, fmt.Errorf("%T is not a Container", obj)
232
+	}
233
+	// Add the null character as a terminator
234
+	v := c.ID + "\x00"
235
+	return true, []byte(v), nil
236
+}
237
+
238
+// FromArgs implements the memdb.Indexer interface
239
+func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) {
240
+	if len(args) != 1 {
241
+		return nil, fmt.Errorf("must provide only a single argument")
242
+	}
243
+	arg, ok := args[0].(string)
244
+	if !ok {
245
+		return nil, fmt.Errorf("argument must be a string: %#v", args[0])
246
+	}
247
+	// Add the null character as a terminator
248
+	arg += "\x00"
249
+	return []byte(arg), nil
101 250
 }
... ...
@@ -1,29 +1,73 @@
1 1
 package container
2 2
 
3
-import "testing"
3
+import (
4
+	"io/ioutil"
5
+	"os"
6
+	"path/filepath"
7
+	"testing"
4 8
 
5
-func TestViewSave(t *testing.T) {
9
+	containertypes "github.com/docker/docker/api/types/container"
10
+	"github.com/docker/docker/pkg/registrar"
11
+	"github.com/pborman/uuid"
12
+)
13
+
14
+var root string
15
+
16
+func TestMain(m *testing.M) {
17
+	var err error
18
+	root, err = ioutil.TempDir("", "docker-container-test-")
19
+	if err != nil {
20
+		panic(err)
21
+	}
22
+	defer os.RemoveAll(root)
23
+
24
+	os.Exit(m.Run())
25
+}
26
+
27
+func newContainer(t *testing.T) *Container {
28
+	var (
29
+		id    = uuid.New()
30
+		cRoot = filepath.Join(root, id)
31
+	)
32
+	if err := os.MkdirAll(cRoot, 0755); err != nil {
33
+		t.Fatal(err)
34
+	}
35
+	c := NewBaseContainer(id, cRoot)
36
+	c.HostConfig = &containertypes.HostConfig{}
37
+	return c
38
+}
39
+
40
+func TestViewSaveDelete(t *testing.T) {
6 41
 	db, err := NewViewDB()
7 42
 	if err != nil {
8 43
 		t.Fatal(err)
9 44
 	}
10
-	c := NewBaseContainer("id", "root")
45
+	c := newContainer(t)
11 46
 	if err := c.CheckpointTo(db); err != nil {
12 47
 		t.Fatal(err)
13 48
 	}
49
+	if err := db.Delete(c); err != nil {
50
+		t.Fatal(err)
51
+	}
14 52
 }
15 53
 
16 54
 func TestViewAll(t *testing.T) {
17 55
 	var (
18 56
 		db, _ = NewViewDB()
19
-		one   = NewBaseContainer("id1", "root1")
20
-		two   = NewBaseContainer("id2", "root2")
57
+		names = registrar.NewRegistrar()
58
+		one   = newContainer(t)
59
+		two   = newContainer(t)
21 60
 	)
22 61
 	one.Pid = 10
62
+	if err := one.CheckpointTo(db); err != nil {
63
+		t.Fatal(err)
64
+	}
23 65
 	two.Pid = 20
24
-	one.CheckpointTo(db)
25
-	two.CheckpointTo(db)
26
-	all, err := db.Snapshot().All()
66
+	if err := two.CheckpointTo(db); err != nil {
67
+		t.Fatal(err)
68
+	}
69
+
70
+	all, err := db.Snapshot(names).All()
27 71
 	if err != nil {
28 72
 		t.Fatal(err)
29 73
 	}
... ...
@@ -34,24 +78,29 @@ func TestViewAll(t *testing.T) {
34 34
 	for i := range all {
35 35
 		byID[all[i].ID] = all[i]
36 36
 	}
37
-	if s, ok := byID["id1"]; !ok || s.Pid != 10 {
38
-		t.Fatalf("expected something different with for id1: %v", s)
37
+	if s, ok := byID[one.ID]; !ok || s.Pid != 10 {
38
+		t.Fatalf("expected something different with for id=%s: %v", one.ID, s)
39 39
 	}
40
-	if s, ok := byID["id2"]; !ok || s.Pid != 20 {
41
-		t.Fatalf("expected something different with for id1: %v", s)
40
+	if s, ok := byID[two.ID]; !ok || s.Pid != 20 {
41
+		t.Fatalf("expected something different with for id=%s: %v", two.ID, s)
42 42
 	}
43 43
 }
44 44
 
45 45
 func TestViewGet(t *testing.T) {
46
-	db, _ := NewViewDB()
47
-	one := NewBaseContainer("id", "root")
46
+	var (
47
+		db, _ = NewViewDB()
48
+		names = registrar.NewRegistrar()
49
+		one   = newContainer(t)
50
+	)
48 51
 	one.ImageID = "some-image-123"
49
-	one.CheckpointTo(db)
50
-	s, err := db.Snapshot().Get("id")
52
+	if err := one.CheckpointTo(db); err != nil {
53
+		t.Fatal(err)
54
+	}
55
+	s, err := db.Snapshot(names).Get(one.ID)
51 56
 	if err != nil {
52 57
 		t.Fatal(err)
53 58
 	}
54 59
 	if s == nil || s.ImageID != "some-image-123" {
55
-		t.Fatalf("expected something different. Got: %v", s)
60
+		t.Fatalf("expected ImageID=some-image-123. Got: %v", s)
56 61
 	}
57 62
 }
... ...
@@ -218,7 +218,7 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *
218 218
 
219 219
 	runconfig.SetDefaultNetModeIfBlank(hostConfig)
220 220
 	container.HostConfig = hostConfig
221
-	return container.CheckpointAndSaveToDisk(daemon.containersReplica)
221
+	return container.CheckpointTo(daemon.containersReplica)
222 222
 }
223 223
 
224 224
 // verifyContainerSettings performs validation of the hostconfig and config
... ...
@@ -45,10 +45,11 @@ func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []str
45 45
 	return nil
46 46
 }
47 47
 
48
+// checkpointAndSave grabs a container lock to safely call container.CheckpointTo
48 49
 func (daemon *Daemon) checkpointAndSave(container *container.Container) error {
49 50
 	container.Lock()
50 51
 	defer container.Unlock()
51
-	if err := container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
52
+	if err := container.CheckpointTo(daemon.containersReplica); err != nil {
52 53
 		return fmt.Errorf("Error saving container state: %v", err)
53 54
 	}
54 55
 	return nil
... ...
@@ -167,11 +167,6 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (
167 167
 	runconfig.SetDefaultNetModeIfBlank(container.HostConfig)
168 168
 
169 169
 	daemon.updateContainerNetworkSettings(container, endpointsConfigs)
170
-
171
-	if err := container.ToDisk(); err != nil {
172
-		logrus.Errorf("Error saving new container to disk: %v", err)
173
-		return nil, err
174
-	}
175 170
 	if err := daemon.Register(container); err != nil {
176 171
 		return nil, err
177 172
 	}
... ...
@@ -217,7 +217,7 @@ func (daemon *Daemon) restore() error {
217 217
 		go func(c *container.Container) {
218 218
 			defer wg.Done()
219 219
 			daemon.backportMountSpec(c)
220
-			if err := c.ToDiskLocking(); err != nil {
220
+			if err := daemon.checkpointAndSave(c); err != nil {
221 221
 				logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk")
222 222
 			}
223 223
 
... ...
@@ -289,7 +289,9 @@ func (daemon *Daemon) restore() error {
289 289
 				logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
290 290
 				c.RemovalInProgress = false
291 291
 				c.Dead = true
292
-				c.ToDisk()
292
+				if err := c.CheckpointTo(daemon.containersReplica); err != nil {
293
+					logrus.Errorf("Failed to update container %s state: %v", c.ID, err)
294
+				}
293 295
 			}
294 296
 			c.Unlock()
295 297
 		}(c)
... ...
@@ -274,6 +274,10 @@ func TestMigratePre17Volumes(t *testing.T) {
274 274
 		}
275 275
 	`)
276 276
 
277
+	viewDB, err := container.NewViewDB()
278
+	if err != nil {
279
+		t.Fatal(err)
280
+	}
277 281
 	volStore, err := store.New(volumeRoot)
278 282
 	if err != nil {
279 283
 		t.Fatal(err)
... ...
@@ -284,7 +288,12 @@ func TestMigratePre17Volumes(t *testing.T) {
284 284
 	}
285 285
 	volumedrivers.Register(drv, volume.DefaultDriverName)
286 286
 
287
-	daemon := &Daemon{root: rootDir, repository: containerRoot, volumes: volStore}
287
+	daemon := &Daemon{
288
+		root:              rootDir,
289
+		repository:        containerRoot,
290
+		containersReplica: viewDB,
291
+		volumes:           volStore,
292
+	}
288 293
 	err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600)
289 294
 	if err != nil {
290 295
 		t.Fatal(err)
... ...
@@ -105,15 +105,11 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
105 105
 	// Mark container dead. We don't want anybody to be restarting it.
106 106
 	container.Lock()
107 107
 	container.Dead = true
108
-	if err = container.CheckpointTo(daemon.containersReplica); err != nil {
109
-		container.Unlock()
110
-		return err
111
-	}
112 108
 
113 109
 	// Save container state to disk. So that if error happens before
114 110
 	// container meta file got removed from disk, then a restart of
115 111
 	// docker should not make a dead container alive.
116
-	if err := container.ToDisk(); err != nil && !os.IsNotExist(err) {
112
+	if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) {
117 113
 		logrus.Errorf("Error saving dying container to disk: %v", err)
118 114
 	}
119 115
 	container.Unlock()
... ...
@@ -137,7 +133,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo
137 137
 	selinuxFreeLxcContexts(container.ProcessLabel)
138 138
 	daemon.idIndex.Delete(container.ID)
139 139
 	daemon.containers.Delete(container.ID)
140
-	daemon.containersReplica.Delete(container.ID)
140
+	daemon.containersReplica.Delete(container)
141 141
 	if e := daemon.removeMountPoints(container, removeVolume); e != nil {
142 142
 		logrus.Error(e)
143 143
 	}
... ...
@@ -100,18 +100,18 @@ type listContext struct {
100 100
 	*types.ContainerListOptions
101 101
 }
102 102
 
103
-// byContainerCreated is a temporary type used to sort a list of containers by creation time.
104
-type byContainerCreated []container.Snapshot
103
+// byCreatedDescending is a temporary type used to sort a list of containers by creation time.
104
+type byCreatedDescending []container.Snapshot
105 105
 
106
-func (r byContainerCreated) Len() int      { return len(r) }
107
-func (r byContainerCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
108
-func (r byContainerCreated) Less(i, j int) bool {
109
-	return r[i].Created.UnixNano() < r[j].Created.UnixNano()
106
+func (r byCreatedDescending) Len() int      { return len(r) }
107
+func (r byCreatedDescending) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
108
+func (r byCreatedDescending) Less(i, j int) bool {
109
+	return r[j].CreatedAt.UnixNano() < r[i].CreatedAt.UnixNano()
110 110
 }
111 111
 
112 112
 // Containers returns the list of containers to show given the user's filtering.
113 113
 func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) {
114
-	return daemon.reduceContainers(config, daemon.transformContainer)
114
+	return daemon.reduceContainers(config, daemon.refreshImage)
115 115
 }
116 116
 
117 117
 func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) {
... ...
@@ -123,7 +123,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex
123 123
 		// standard behavior of walking the entire container
124 124
 		// list from the daemon's in-memory store
125 125
 		all, err := view.All()
126
-		sort.Sort(sort.Reverse(byContainerCreated(all)))
126
+		sort.Sort(byCreatedDescending(all))
127 127
 		return all, err
128 128
 	}
129 129
 
... ...
@@ -172,7 +172,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex
172 172
 
173 173
 	// Restore sort-order after filtering
174 174
 	// Created gives us nanosec resolution for sorting
175
-	sort.Sort(sort.Reverse(byContainerCreated(cntrs)))
175
+	sort.Sort(byCreatedDescending(cntrs))
176 176
 
177 177
 	return cntrs, nil
178 178
 }
... ...
@@ -180,7 +180,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex
180 180
 // reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer.
181 181
 func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) {
182 182
 	var (
183
-		view       = daemon.containersReplica.Snapshot()
183
+		view       = daemon.containersReplica.Snapshot(daemon.nameIndex)
184 184
 		containers = []*types.Container{}
185 185
 	)
186 186
 
... ...
@@ -503,9 +503,15 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite
503 503
 		}
504 504
 	}
505 505
 
506
-	networkExist := fmt.Errorf("container part of network")
506
+	var (
507
+		networkExist = errors.New("container part of network")
508
+		noNetworks   = errors.New("container is not part of any networks")
509
+	)
507 510
 	if ctx.filters.Include("network") {
508 511
 		err := ctx.filters.WalkValues("network", func(value string) error {
512
+			if container.NetworkSettings == nil {
513
+				return noNetworks
514
+			}
509 515
 			if _, ok := container.NetworkSettings.Networks[value]; ok {
510 516
 				return networkExist
511 517
 			}
... ...
@@ -527,7 +533,7 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite
527 527
 	if len(ctx.publish) > 0 {
528 528
 		shouldSkip := true
529 529
 		for port := range ctx.publish {
530
-			if _, ok := container.PublishPorts[port]; ok {
530
+			if _, ok := container.PortBindings[port]; ok {
531 531
 				shouldSkip = false
532 532
 				break
533 533
 			}
... ...
@@ -553,40 +559,22 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite
553 553
 	return includeContainer
554 554
 }
555 555
 
556
-// transformContainer generates the container type expected by the docker ps command.
557
-func (daemon *Daemon) transformContainer(container *container.Snapshot, ctx *listContext) (*types.Container, error) {
558
-	newC := &types.Container{
559
-		ID:              container.ID,
560
-		Names:           ctx.names[container.ID],
561
-		ImageID:         container.ImageID,
562
-		Command:         container.Command,
563
-		Created:         container.Created.Unix(),
564
-		State:           container.State,
565
-		Status:          container.Status,
566
-		NetworkSettings: &container.NetworkSettings,
567
-		Ports:           container.Ports,
568
-		Labels:          container.Labels,
569
-		Mounts:          container.Mounts,
570
-	}
571
-	if newC.Names == nil {
572
-		// Dead containers will often have no name, so make sure the response isn't null
573
-		newC.Names = []string{}
574
-	}
575
-	newC.HostConfig.NetworkMode = container.HostConfig.NetworkMode
576
-
577
-	image := container.Image // if possible keep the original ref
578
-	if image != container.ImageID {
556
+// refreshImage checks if the Image ref still points to the correct ID, and updates the ref to the actual ID when it doesn't
557
+func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*types.Container, error) {
558
+	c := s.Container
559
+	image := s.Image // keep the original ref if still valid (hasn't changed)
560
+	if image != s.ImageID {
579 561
 		id, _, err := daemon.GetImageIDAndPlatform(image)
580 562
 		if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE {
581 563
 			return nil, err
582 564
 		}
583
-		if err != nil || id.String() != container.ImageID {
584
-			image = container.ImageID
565
+		if err != nil || id.String() != s.ImageID {
566
+			// ref changed, we need to use original ID
567
+			image = s.ImageID
585 568
 		}
586 569
 	}
587
-	newC.Image = image
588
-
589
-	return newC, nil
570
+	c.Image = image
571
+	return &c, nil
590 572
 }
591 573
 
592 574
 // Volumes lists known volumes, using the filter to restrict the range
... ...
@@ -90,7 +90,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
90 90
 		daemon.setStateCounter(c)
91 91
 
92 92
 		defer c.Unlock()
93
-		if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
93
+		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
94 94
 			return err
95 95
 		}
96 96
 		return daemon.postRunProcessing(c, e)
... ...
@@ -119,7 +119,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
119 119
 		c.HasBeenStartedBefore = true
120 120
 		daemon.setStateCounter(c)
121 121
 
122
-		if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
122
+		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
123 123
 			c.Reset(false)
124 124
 			return err
125 125
 		}
... ...
@@ -130,7 +130,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
130 130
 		// Container is already locked in this case
131 131
 		c.Paused = true
132 132
 		daemon.setStateCounter(c)
133
-		if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
133
+		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
134 134
 			return err
135 135
 		}
136 136
 		daemon.updateHealthMonitor(c)
... ...
@@ -139,7 +139,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
139 139
 		// Container is already locked in this case
140 140
 		c.Paused = false
141 141
 		daemon.setStateCounter(c)
142
-		if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
142
+		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
143 143
 			return err
144 144
 		}
145 145
 		daemon.updateHealthMonitor(c)
... ...
@@ -82,7 +82,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
82 82
 		daemon.nameIndex.Release(oldName + k)
83 83
 	}
84 84
 	daemon.releaseName(oldName)
85
-	if err = container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
85
+	if err = container.CheckpointTo(daemon.containersReplica); err != nil {
86 86
 		return err
87 87
 	}
88 88
 
... ...
@@ -99,7 +99,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error {
99 99
 		if err != nil {
100 100
 			container.Name = oldName
101 101
 			container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint
102
-			if e := container.CheckpointAndSaveToDisk(daemon.containersReplica); e != nil {
102
+			if e := container.CheckpointTo(daemon.containersReplica); e != nil {
103 103
 				logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e)
104 104
 			}
105 105
 		}
... ...
@@ -52,7 +52,7 @@ func (daemon *Daemon) containerRestart(container *container.Container, seconds i
52 52
 		container.HostConfig.AutoRemove = autoRemove
53 53
 		// containerStop will write HostConfig to disk, we shall restore AutoRemove
54 54
 		// in disk too
55
-		if toDiskErr := container.ToDiskLocking(); toDiskErr != nil {
55
+		if toDiskErr := daemon.checkpointAndSave(container); toDiskErr != nil {
56 56
 			logrus.Errorf("Write container to disk error: %v", toDiskErr)
57 57
 		}
58 58
 
... ...
@@ -58,7 +58,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos
58 58
 				// if user has change the network mode on starting, clean up the
59 59
 				// old networks. It is a deprecated feature and has been removed in Docker 1.12
60 60
 				container.NetworkSettings.Networks = nil
61
-				if err := container.ToDisk(); err != nil {
61
+				if err := container.CheckpointTo(daemon.containersReplica); err != nil {
62 62
 					return err
63 63
 				}
64 64
 			}
... ...
@@ -117,7 +117,7 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint
117 117
 			if container.ExitCode() == 0 {
118 118
 				container.SetExitCode(128)
119 119
 			}
120
-			if err := container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil {
120
+			if err := container.CheckpointTo(daemon.containersReplica); err != nil {
121 121
 				logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err)
122 122
 			}
123 123
 			container.Reset(false)
... ...
@@ -9,13 +9,14 @@ import (
9 9
 	"github.com/docker/docker/libcontainerd"
10 10
 )
11 11
 
12
+// getLibcontainerdCreateOptions callers must hold a lock on the container
12 13
 func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) {
13 14
 	createOptions := []libcontainerd.CreateOption{}
14 15
 
15 16
 	// Ensure a runtime has been assigned to this container
16 17
 	if container.HostConfig.Runtime == "" {
17 18
 		container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
18
-		container.ToDisk()
19
+		container.CheckpointTo(daemon.containersReplica)
19 20
 	}
20 21
 
21 22
 	rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime)
... ...
@@ -38,7 +38,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro
38 38
 		if restoreConfig {
39 39
 			container.Lock()
40 40
 			container.HostConfig = &backupHostConfig
41
-			container.CheckpointAndSaveToDisk(daemon.containersReplica)
41
+			container.CheckpointTo(daemon.containersReplica)
42 42
 			container.Unlock()
43 43
 		}
44 44
 	}()
... ...
@@ -180,7 +180,7 @@ func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error {
180 180
 				container.MountPoints[destination] = &m
181 181
 			}
182 182
 		}
183
-		return container.ToDisk()
183
+		return container.CheckpointTo(daemon.containersReplica)
184 184
 	}
185 185
 	return nil
186 186
 }