Signed-off-by: John Howard <jhoward@microsoft.com>
Adds the graphdriver for Linux containers on Windows
1 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,496 @@ |
0 |
+// +build windows |
|
1 |
+ |
|
2 |
+package lcow |
|
3 |
+ |
|
4 |
+// Maintainer: @jhowardmsft |
|
5 |
+// Graph-driver for Linux Containers On Windows (LCOW) |
|
6 |
+ |
|
7 |
+import ( |
|
8 |
+ "encoding/json" |
|
9 |
+ "fmt" |
|
10 |
+ "io" |
|
11 |
+ "io/ioutil" |
|
12 |
+ "os" |
|
13 |
+ "path/filepath" |
|
14 |
+ "strings" |
|
15 |
+ "sync" |
|
16 |
+ "time" |
|
17 |
+ |
|
18 |
+ "github.com/Sirupsen/logrus" |
|
19 |
+ "github.com/docker/docker/daemon/graphdriver" |
|
20 |
+ "github.com/docker/docker/pkg/archive" |
|
21 |
+ "github.com/docker/docker/pkg/idtools" |
|
22 |
+ "github.com/docker/docker/pkg/system" |
|
23 |
+ "github.com/jhowardmsft/opengcs/gogcs/client" |
|
24 |
+) |
|
25 |
+ |
|
26 |
+// init registers the LCOW driver to the register. |
|
27 |
+func init() { |
|
28 |
+ graphdriver.Register("lcow", InitLCOW) |
|
29 |
+} |
|
30 |
+ |
|
31 |
+const ( |
|
32 |
+ // sandboxFilename is the name of the file containing a layers sandbox (read-write layer) |
|
33 |
+ sandboxFilename = "sandbox.vhdx" |
|
34 |
+) |
|
35 |
+ |
|
36 |
+// cacheType is our internal structure representing an item in our local cache |
|
37 |
+// of things that have been mounted. |
|
38 |
+type cacheType struct { |
|
39 |
+ uvmPath string // Path in utility VM |
|
40 |
+ hostPath string // Path on host |
|
41 |
+ refCount int // How many times its been mounted |
|
42 |
+ isSandbox bool // True if a sandbox |
|
43 |
+} |
|
44 |
+ |
|
45 |
+// Driver represents an LCOW graph driver. |
|
46 |
+type Driver struct { |
|
47 |
+ // homeDir is the hostpath where we're storing everything |
|
48 |
+ homeDir string |
|
49 |
+ // cachedSandboxFile is the location of the local default-sized cached sandbox |
|
50 |
+ cachedSandboxFile string |
|
51 |
+ // options are the graphdriver options we are initialised with |
|
52 |
+ options []string |
|
53 |
+ // JJH LIFETIME TODO - Remove this and move up to daemon. For now, a global service utility-VM |
|
54 |
+ config client.Config |
|
55 |
+ |
|
56 |
+ // it is safe for windows to use a cache here because it does not support |
|
57 |
+ // restoring containers when the daemon dies. |
|
58 |
+ |
|
59 |
+ // cacheMu is the mutex protection add/update/deletes to our cache |
|
60 |
+ cacheMu sync.Mutex |
|
61 |
+ // cache is the cache of all the IDs we've mounted/unmounted. |
|
62 |
+ cache map[string]cacheType |
|
63 |
+} |
|
64 |
+ |
|
65 |
+// InitLCOW returns a new LCOW storage driver. |
|
66 |
+func InitLCOW(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { |
|
67 |
+ title := "lcowdriver: init:" |
|
68 |
+ logrus.Debugf("%s %s", title, home) |
|
69 |
+ |
|
70 |
+ d := &Driver{ |
|
71 |
+ homeDir: home, |
|
72 |
+ options: options, |
|
73 |
+ cachedSandboxFile: filepath.Join(home, "cache", sandboxFilename), |
|
74 |
+ cache: make(map[string]cacheType), |
|
75 |
+ } |
|
76 |
+ |
|
77 |
+ if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { |
|
78 |
+ return nil, fmt.Errorf("%s failed to create '%s': %v", title, home, err) |
|
79 |
+ } |
|
80 |
+ |
|
81 |
+ // Cache directory for blank sandbox so don't have to pull it from the service VM each time |
|
82 |
+ if err := idtools.MkdirAllAs(filepath.Dir(d.cachedSandboxFile), 0700, 0, 0); err != nil { |
|
83 |
+ return nil, fmt.Errorf("%s failed to create '%s': %v", title, home, err) |
|
84 |
+ } |
|
85 |
+ |
|
86 |
+ return d, nil |
|
87 |
+} |
|
88 |
+ |
|
89 |
+// startUvm starts the service utility VM if it isn't running. |
|
90 |
+// TODO @jhowardmsft. This will change before RS3 ships as we move to a model of one |
|
91 |
+// service VM globally to a service VM per container (or offline operation). However, |
|
92 |
+// for the initial bring-up of LCOW, this is acceptable. |
|
93 |
+func (d *Driver) startUvm(context string) error { |
|
94 |
+ // Nothing to do if it's already running |
|
95 |
+ if d.config.Uvm != nil { |
|
96 |
+ return nil |
|
97 |
+ } |
|
98 |
+ |
|
99 |
+ // So we need to start it. Generate a default configuration |
|
100 |
+ if err := d.config.GenerateDefault(d.options); err != nil { |
|
101 |
+ return fmt.Errorf("failed to generate default gogcs configuration (%s): %s", context, err) |
|
102 |
+ } |
|
103 |
+ |
|
104 |
+ d.config.Name = "LinuxServiceVM" // TODO @jhowardmsft - This requires an in-flight platform change. Can't hard code it to this longer term |
|
105 |
+ if err := d.config.Create(); err != nil { |
|
106 |
+ return fmt.Errorf("failed to start utility VM (%s): %s", context, err) |
|
107 |
+ } |
|
108 |
+ return nil |
|
109 |
+} |
|
110 |
+ |
|
111 |
+// terminateUvm terminates the service utility VM if its running. |
|
112 |
+func (d *Driver) terminateUvm(context string) error { |
|
113 |
+ // Nothing to do if it's not running |
|
114 |
+ if d.config.Uvm == nil { |
|
115 |
+ return nil |
|
116 |
+ } |
|
117 |
+ |
|
118 |
+ // FIXME: @jhowardmsft |
|
119 |
+ // This isn't thread-safe yet, but will change anyway with the lifetime |
|
120 |
+ // changes and multiple instances. Defering that work for now. |
|
121 |
+ uvm := d.config.Uvm |
|
122 |
+ d.config.Uvm = nil |
|
123 |
+ |
|
124 |
+ if err := uvm.Terminate(); err != nil { |
|
125 |
+ return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err) |
|
126 |
+ } |
|
127 |
+ |
|
128 |
+ if err := uvm.WaitTimeout(time.Duration(d.config.UvmTimeoutSeconds) * time.Second); err != nil { |
|
129 |
+ return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err) |
|
130 |
+ } |
|
131 |
+ |
|
132 |
+ return nil |
|
133 |
+} |
|
134 |
+ |
|
135 |
+// String returns the string representation of a driver. This should match |
|
136 |
+// the name the graph driver has been registered with. |
|
137 |
+func (d *Driver) String() string { |
|
138 |
+ return "lcow" |
|
139 |
+} |
|
140 |
+ |
|
141 |
+// Status returns the status of the driver. |
|
142 |
+func (d *Driver) Status() [][2]string { |
|
143 |
+ return [][2]string{ |
|
144 |
+ {"LCOW", ""}, |
|
145 |
+ } |
|
146 |
+} |
|
147 |
+ |
|
148 |
+// Exists returns true if the given id is registered with this driver. |
|
149 |
+func (d *Driver) Exists(id string) bool { |
|
150 |
+ _, err := os.Lstat(d.dir(id)) |
|
151 |
+ logrus.Debugf("lcowdriver: exists: id %s %t", id, err == nil) |
|
152 |
+ return err == nil |
|
153 |
+} |
|
154 |
+ |
|
155 |
+// CreateReadWrite creates a layer that is writable for use as a container |
|
156 |
+// file system. That equates to creating a sandbox VHDx. |
|
157 |
+func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { |
|
158 |
+ logrus.Debugf("lcowdriver: createreadwrite: id %s", id) |
|
159 |
+ |
|
160 |
+ if err := d.startUvm("createreadwrite"); err != nil { |
|
161 |
+ return err |
|
162 |
+ } |
|
163 |
+ |
|
164 |
+ if err := d.Create(id, parent, opts); err != nil { |
|
165 |
+ return err |
|
166 |
+ } |
|
167 |
+ |
|
168 |
+ return d.config.CreateSandbox(filepath.Join(d.dir(id), sandboxFilename), client.DefaultSandboxSizeMB, d.cachedSandboxFile) |
|
169 |
+} |
|
170 |
+ |
|
171 |
+// Create creates a new read-only layer with the given id. |
|
172 |
+func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { |
|
173 |
+ logrus.Debugf("lcowdriver: create: id %s parent: %s", id, parent) |
|
174 |
+ |
|
175 |
+ parentChain, err := d.getLayerChain(parent) |
|
176 |
+ if err != nil { |
|
177 |
+ return err |
|
178 |
+ } |
|
179 |
+ |
|
180 |
+ var layerChain []string |
|
181 |
+ if parent != "" { |
|
182 |
+ if !d.Exists(parent) { |
|
183 |
+ return fmt.Errorf("lcowdriver: cannot create read-only layer with missing parent %s", parent) |
|
184 |
+ } |
|
185 |
+ layerChain = []string{d.dir(parent)} |
|
186 |
+ } |
|
187 |
+ layerChain = append(layerChain, parentChain...) |
|
188 |
+ |
|
189 |
+ // Make sure layers are created with the correct ACL so that VMs can access them. |
|
190 |
+ layerPath := d.dir(id) |
|
191 |
+ logrus.Debugf("lcowdriver: create: id %s: creating layerPath %s", id, layerPath) |
|
192 |
+ if err := system.MkdirAllWithACL(layerPath, 755, system.SddlNtvmAdministratorsLocalSystem); err != nil { |
|
193 |
+ return err |
|
194 |
+ } |
|
195 |
+ |
|
196 |
+ if err := d.setLayerChain(id, layerChain); err != nil { |
|
197 |
+ if err2 := os.RemoveAll(layerPath); err2 != nil { |
|
198 |
+ logrus.Warnf("Failed to remove layer %s: %s", layerPath, err2) |
|
199 |
+ } |
|
200 |
+ return err |
|
201 |
+ } |
|
202 |
+ logrus.Debugf("lcowdriver: createreadwrite: id %s: success", id) |
|
203 |
+ |
|
204 |
+ return nil |
|
205 |
+} |
|
206 |
+ |
|
207 |
+// Remove unmounts and removes the dir information. |
|
208 |
+func (d *Driver) Remove(id string) error { |
|
209 |
+ logrus.Debugf("lcowdriver: remove: id %s", id) |
|
210 |
+ tmpID := fmt.Sprintf("%s-removing", id) |
|
211 |
+ tmpLayerPath := d.dir(tmpID) |
|
212 |
+ layerPath := d.dir(id) |
|
213 |
+ |
|
214 |
+ logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath) |
|
215 |
+ if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { |
|
216 |
+ return err |
|
217 |
+ } |
|
218 |
+ |
|
219 |
+ if err := os.RemoveAll(tmpLayerPath); err != nil { |
|
220 |
+ return err |
|
221 |
+ } |
|
222 |
+ |
|
223 |
+ logrus.Debugf("lcowdriver: remove: id %s: layerPath %s succeeded", id, layerPath) |
|
224 |
+ return nil |
|
225 |
+} |
|
226 |
+ |
|
227 |
+// Get returns the rootfs path for the id. It is reference counted and |
|
228 |
+// effectively can be thought of as a "mount the layer into the utility |
|
229 |
+// vm if it isn't already" |
|
230 |
+func (d *Driver) Get(id, mountLabel string) (string, error) { |
|
231 |
+ dir, _, _, err := d.getEx(id) |
|
232 |
+ return dir, err |
|
233 |
+} |
|
234 |
+ |
|
235 |
+// getEx is Get, but also returns the cache-entry and the size of the VHD |
|
236 |
+func (d *Driver) getEx(id string) (string, cacheType, int64, error) { |
|
237 |
+ title := "lcowdriver: getEx" |
|
238 |
+ logrus.Debugf("%s %s", title, id) |
|
239 |
+ |
|
240 |
+ if err := d.startUvm(fmt.Sprintf("getex %s", id)); err != nil { |
|
241 |
+ logrus.Debugf("%s failed to start utility vm: %s", title, err) |
|
242 |
+ return "", cacheType{}, 0, err |
|
243 |
+ } |
|
244 |
+ |
|
245 |
+ // Work out what we are working on |
|
246 |
+ vhdFilename, vhdSize, isSandbox, err := client.LayerVhdDetails(d.dir(id)) |
|
247 |
+ if err != nil { |
|
248 |
+ logrus.Debugf("%s failed to get LayerVhdDetails from %s: %s", title, d.dir(id), err) |
|
249 |
+ return "", cacheType{}, 0, fmt.Errorf("%s failed to open layer or sandbox VHD to open in %s: %s", title, d.dir(id), err) |
|
250 |
+ } |
|
251 |
+ logrus.Debugf("%s %s, size %d, isSandbox %t", title, vhdFilename, vhdSize, isSandbox) |
|
252 |
+ |
|
253 |
+ hotAddRequired := false |
|
254 |
+ d.cacheMu.Lock() |
|
255 |
+ var cacheEntry cacheType |
|
256 |
+ if _, ok := d.cache[id]; !ok { |
|
257 |
+ // The item is not currently in the cache. |
|
258 |
+ // |
|
259 |
+ // Sandboxes need hot-adding in the case that there is a single global utility VM |
|
260 |
+ // This will change for multiple instances with the lifetime changes. |
|
261 |
+ if isSandbox { |
|
262 |
+ hotAddRequired = true |
|
263 |
+ } |
|
264 |
+ d.cache[id] = cacheType{ |
|
265 |
+ uvmPath: fmt.Sprintf("/mnt/%s", id), |
|
266 |
+ refCount: 1, |
|
267 |
+ isSandbox: isSandbox, |
|
268 |
+ hostPath: vhdFilename, |
|
269 |
+ } |
|
270 |
+ } else { |
|
271 |
+ // Increment the reference counter in the cache. |
|
272 |
+ cacheEntry = d.cache[id] |
|
273 |
+ cacheEntry.refCount++ |
|
274 |
+ d.cache[id] = cacheEntry |
|
275 |
+ } |
|
276 |
+ |
|
277 |
+ cacheEntry = d.cache[id] |
|
278 |
+ logrus.Debugf("%s %s: isSandbox %t, refCount %d", title, id, cacheEntry.isSandbox, cacheEntry.refCount) |
|
279 |
+ d.cacheMu.Unlock() |
|
280 |
+ |
|
281 |
+ if hotAddRequired { |
|
282 |
+ logrus.Debugf("%s %s: Hot-Adding %s", title, id, vhdFilename) |
|
283 |
+ if err := d.config.HotAddVhd(vhdFilename, cacheEntry.uvmPath); err != nil { |
|
284 |
+ return "", cacheType{}, 0, fmt.Errorf("%s hot add %s failed: %s", title, vhdFilename, err) |
|
285 |
+ } |
|
286 |
+ } |
|
287 |
+ |
|
288 |
+ logrus.Debugf("%s %s success. %s: %+v: size %d", title, id, d.dir(id), cacheEntry, vhdSize) |
|
289 |
+ return d.dir(id), cacheEntry, vhdSize, nil |
|
290 |
+} |
|
291 |
+ |
|
292 |
+// Put does the reverse of get. If there are no more references to |
|
293 |
+// the layer, it unmounts it from the utility VM. |
|
294 |
+func (d *Driver) Put(id string) error { |
|
295 |
+ title := "lcowdriver: put" |
|
296 |
+ logrus.Debugf("%s %s", title, id) |
|
297 |
+ |
|
298 |
+ if err := d.startUvm(fmt.Sprintf("put %s", id)); err != nil { |
|
299 |
+ return err |
|
300 |
+ } |
|
301 |
+ |
|
302 |
+ d.cacheMu.Lock() |
|
303 |
+ // Bad-news if unmounting something that isn't in the cache. |
|
304 |
+ entry, ok := d.cache[id] |
|
305 |
+ if !ok { |
|
306 |
+ d.cacheMu.Unlock() |
|
307 |
+ return fmt.Errorf("%s possible ref-count error, or invalid id was passed to the graphdriver. Cannot handle id %s as it's not in the cache", title, id) |
|
308 |
+ } |
|
309 |
+ |
|
310 |
+ // Are we just decrementing the reference count |
|
311 |
+ if entry.refCount > 1 { |
|
312 |
+ entry.refCount-- |
|
313 |
+ d.cache[id] = entry |
|
314 |
+ logrus.Debugf("%s %s: refCount decremented to %d", title, id, entry.refCount) |
|
315 |
+ d.cacheMu.Unlock() |
|
316 |
+ return nil |
|
317 |
+ } |
|
318 |
+ |
|
319 |
+ // No more references, so tear it down if previously hot-added |
|
320 |
+ if entry.isSandbox { |
|
321 |
+ logrus.Debugf("%s %s: Hot-Removing %s", title, id, entry.hostPath) |
|
322 |
+ if err := d.config.HotRemoveVhd(entry.hostPath); err != nil { |
|
323 |
+ d.cacheMu.Unlock() |
|
324 |
+ return fmt.Errorf("%s failed to hot-remove %s from service utility VM: %s", title, entry.hostPath, err) |
|
325 |
+ } |
|
326 |
+ } |
|
327 |
+ |
|
328 |
+ // @jhowardmsft TEMPORARY FIX WHILE WAITING FOR HOT-REMOVE TO BE FIXED IN PLATFORM |
|
329 |
+ //d.terminateUvm(fmt.Sprintf("put %s", id)) |
|
330 |
+ |
|
331 |
+ // Remove from the cache map. |
|
332 |
+ delete(d.cache, id) |
|
333 |
+ d.cacheMu.Unlock() |
|
334 |
+ |
|
335 |
+ logrus.Debugf("%s %s: refCount 0. %s (%s) completed successfully", title, id, entry.hostPath, entry.uvmPath) |
|
336 |
+ return nil |
|
337 |
+} |
|
338 |
+ |
|
339 |
+// Cleanup ensures the information the driver stores is properly removed. |
|
340 |
+// We use this opportunity to cleanup any -removing folders which may be |
|
341 |
+// still left if the daemon was killed while it was removing a layer. |
|
342 |
+func (d *Driver) Cleanup() error { |
|
343 |
+ title := "lcowdriver: cleanup" |
|
344 |
+ logrus.Debugf(title) |
|
345 |
+ |
|
346 |
+ d.cacheMu.Lock() |
|
347 |
+ for k, v := range d.cache { |
|
348 |
+ logrus.Debugf("%s cache entry: %s: %+v", title, k, v) |
|
349 |
+ if v.refCount > 0 { |
|
350 |
+ logrus.Warnf("%s leaked %s: %+v", title, k, v) |
|
351 |
+ } |
|
352 |
+ } |
|
353 |
+ d.cacheMu.Unlock() |
|
354 |
+ |
|
355 |
+ items, err := ioutil.ReadDir(d.homeDir) |
|
356 |
+ if err != nil { |
|
357 |
+ if os.IsNotExist(err) { |
|
358 |
+ return nil |
|
359 |
+ } |
|
360 |
+ return err |
|
361 |
+ } |
|
362 |
+ |
|
363 |
+ // Note we don't return an error below - it's possible the files |
|
364 |
+ // are locked. However, next time around after the daemon exits, |
|
365 |
+ // we likely will be able to to cleanup successfully. Instead we log |
|
366 |
+ // warnings if there are errors. |
|
367 |
+ for _, item := range items { |
|
368 |
+ if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { |
|
369 |
+ if err := os.RemoveAll(filepath.Join(d.homeDir, item.Name())); err != nil { |
|
370 |
+ logrus.Warnf("%s failed to cleanup %s: %s", title, item.Name(), err) |
|
371 |
+ } else { |
|
372 |
+ logrus.Infof("%s cleaned up %s", title, item.Name()) |
|
373 |
+ } |
|
374 |
+ } |
|
375 |
+ } |
|
376 |
+ return nil |
|
377 |
+} |
|
378 |
+ |
|
379 |
+// Diff takes a layer (and it's parent layer which may be null, but |
|
380 |
+// is ignored by this implementation below) and returns a reader for |
|
381 |
+// a tarstream representing the layers contents. The id could be |
|
382 |
+// a read-only "layer.vhd" or a read-write "sandbox.vhdx". The semantics |
|
383 |
+// of this function dictate that the layer is already mounted. |
|
384 |
+func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { |
|
385 |
+ title := "lcowdriver: diff:" |
|
386 |
+ logrus.Debugf("%s id %s", title, id) |
|
387 |
+ |
|
388 |
+ if err := d.startUvm(fmt.Sprintf("diff %s", id)); err != nil { |
|
389 |
+ return nil, err |
|
390 |
+ } |
|
391 |
+ |
|
392 |
+ d.cacheMu.Lock() |
|
393 |
+ if _, ok := d.cache[id]; !ok { |
|
394 |
+ d.cacheMu.Unlock() |
|
395 |
+ return nil, fmt.Errorf("%s fail as %s is not in the cache", title, id) |
|
396 |
+ } |
|
397 |
+ cacheEntry := d.cache[id] |
|
398 |
+ d.cacheMu.Unlock() |
|
399 |
+ |
|
400 |
+ // Stat to get size |
|
401 |
+ fileInfo, err := os.Stat(cacheEntry.hostPath) |
|
402 |
+ if err != nil { |
|
403 |
+ return nil, fmt.Errorf("%s failed to stat %s: %s", title, cacheEntry.hostPath, err) |
|
404 |
+ } |
|
405 |
+ |
|
406 |
+ // Then obtain the tar stream for it |
|
407 |
+ logrus.Debugf("%s %s, size %d, isSandbox %t", title, cacheEntry.hostPath, fileInfo.Size(), cacheEntry.isSandbox) |
|
408 |
+ tarReadCloser, err := d.config.VhdToTar(cacheEntry.hostPath, cacheEntry.uvmPath, cacheEntry.isSandbox, fileInfo.Size()) |
|
409 |
+ if err != nil { |
|
410 |
+ return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err) |
|
411 |
+ } |
|
412 |
+ logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent) |
|
413 |
+ return tarReadCloser, nil |
|
414 |
+} |
|
415 |
+ |
|
416 |
+// ApplyDiff extracts the changeset from the given diff into the |
|
417 |
+// layer with the specified id and parent, returning the size of the |
|
418 |
+// new layer in bytes. The layer should not be mounted when calling |
|
419 |
+// this function. Another way of describing this is that ApplyDiff writes |
|
420 |
+// to a new layer (a VHD in LCOW) the contents of a tarstream it's given. |
|
421 |
+func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { |
|
422 |
+ logrus.Debugf("lcowdriver: applydiff: id %s", id) |
|
423 |
+ |
|
424 |
+ if err := d.startUvm(fmt.Sprintf("applydiff %s", id)); err != nil { |
|
425 |
+ return 0, err |
|
426 |
+ } |
|
427 |
+ |
|
428 |
+ return d.config.TarToVhd(filepath.Join(d.homeDir, id, "layer.vhd"), diff) |
|
429 |
+} |
|
430 |
+ |
|
431 |
+// Changes produces a list of changes between the specified layer |
|
432 |
+// and its parent layer. If parent is "", then all changes will be ADD changes. |
|
433 |
+// The layer should not be mounted when calling this function. |
|
434 |
+func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { |
|
435 |
+ logrus.Debugf("lcowdriver: changes: id %s parent %s", id, parent) |
|
436 |
+ // TODO @gupta-ak. Needs implementation with assistance from service VM |
|
437 |
+ return nil, nil |
|
438 |
+} |
|
439 |
+ |
|
440 |
+// DiffSize calculates the changes between the specified layer |
|
441 |
+// and its parent and returns the size in bytes of the changes |
|
442 |
+// relative to its base filesystem directory. |
|
443 |
+func (d *Driver) DiffSize(id, parent string) (size int64, err error) { |
|
444 |
+ logrus.Debugf("lcowdriver: diffsize: id %s", id) |
|
445 |
+ // TODO @gupta-ak. Needs implementation with assistance from service VM |
|
446 |
+ return 0, nil |
|
447 |
+} |
|
448 |
+ |
|
449 |
+// GetMetadata returns custom driver information. |
|
450 |
+func (d *Driver) GetMetadata(id string) (map[string]string, error) { |
|
451 |
+ logrus.Debugf("lcowdriver: getmetadata: id %s", id) |
|
452 |
+ m := make(map[string]string) |
|
453 |
+ m["dir"] = d.dir(id) |
|
454 |
+ return m, nil |
|
455 |
+} |
|
456 |
+ |
|
457 |
+// dir returns the absolute path to the layer. |
|
458 |
+func (d *Driver) dir(id string) string { |
|
459 |
+ return filepath.Join(d.homeDir, filepath.Base(id)) |
|
460 |
+} |
|
461 |
+ |
|
462 |
+// getLayerChain returns the layer chain information. |
|
463 |
+func (d *Driver) getLayerChain(id string) ([]string, error) { |
|
464 |
+ jPath := filepath.Join(d.dir(id), "layerchain.json") |
|
465 |
+ logrus.Debugf("lcowdriver: getlayerchain: id %s json %s", id, jPath) |
|
466 |
+ content, err := ioutil.ReadFile(jPath) |
|
467 |
+ if os.IsNotExist(err) { |
|
468 |
+ return nil, nil |
|
469 |
+ } else if err != nil { |
|
470 |
+ return nil, fmt.Errorf("lcowdriver: getlayerchain: %s unable to read layerchain file %s: %s", id, jPath, err) |
|
471 |
+ } |
|
472 |
+ |
|
473 |
+ var layerChain []string |
|
474 |
+ err = json.Unmarshal(content, &layerChain) |
|
475 |
+ if err != nil { |
|
476 |
+ return nil, fmt.Errorf("lcowdriver: getlayerchain: %s failed to unmarshall layerchain file %s: %s", id, jPath, err) |
|
477 |
+ } |
|
478 |
+ return layerChain, nil |
|
479 |
+} |
|
480 |
+ |
|
481 |
+// setLayerChain stores the layer chain information on disk. |
|
482 |
+func (d *Driver) setLayerChain(id string, chain []string) error { |
|
483 |
+ content, err := json.Marshal(&chain) |
|
484 |
+ if err != nil { |
|
485 |
+ return fmt.Errorf("lcowdriver: setlayerchain: %s failed to marshall layerchain json: %s", id, err) |
|
486 |
+ } |
|
487 |
+ |
|
488 |
+ jPath := filepath.Join(d.dir(id), "layerchain.json") |
|
489 |
+ logrus.Debugf("lcowdriver: setlayerchain: id %s json %s", id, jPath) |
|
490 |
+ err = ioutil.WriteFile(jPath, content, 0600) |
|
491 |
+ if err != nil { |
|
492 |
+ return fmt.Errorf("lcowdriver: setlayerchain: %s failed to write layerchain file: %s", id, err) |
|
493 |
+ } |
|
494 |
+ return nil |
|
495 |
+} |