Move plugins to shared distribution stack with images.
Create immutable plugin config that matches schema2 requirements.
Ensure data being pushed is same as pulled/created.
Store distribution artifacts in a blobstore.
Run init layer setup for every plugin start.
Fix breakouts from unsafe file accesses.
Add support for `docker plugin install --alias`
Uses normalized references for default names to avoid collisions when using default hosts/tags.
Some refactoring of the plugin manager to support the change, like removing the singleton manager and adding manager config struct.
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
Signed-off-by: Derek McGowan <derek@mcgstyle.net>
... | ... |
@@ -5,6 +5,7 @@ import ( |
5 | 5 |
"net/http" |
6 | 6 |
|
7 | 7 |
enginetypes "github.com/docker/docker/api/types" |
8 |
+ "github.com/docker/docker/reference" |
|
8 | 9 |
"golang.org/x/net/context" |
9 | 10 |
) |
10 | 11 |
|
... | ... |
@@ -13,11 +14,11 @@ type Backend interface { |
13 | 13 |
Disable(name string, config *enginetypes.PluginDisableConfig) error |
14 | 14 |
Enable(name string, config *enginetypes.PluginEnableConfig) error |
15 | 15 |
List() ([]enginetypes.Plugin, error) |
16 |
- Inspect(name string) (enginetypes.Plugin, error) |
|
16 |
+ Inspect(name string) (*enginetypes.Plugin, error) |
|
17 | 17 |
Remove(name string, config *enginetypes.PluginRmConfig) error |
18 | 18 |
Set(name string, args []string) error |
19 |
- Privileges(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) |
|
20 |
- Pull(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges) error |
|
21 |
- Push(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) error |
|
22 |
- CreateFromContext(ctx context.Context, tarCtx io.Reader, options *enginetypes.PluginCreateOptions) error |
|
19 |
+ Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) |
|
20 |
+ Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error |
|
21 |
+ Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error |
|
22 |
+ CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error |
|
23 | 23 |
} |
... | ... |
@@ -30,8 +30,8 @@ func (r *pluginRouter) initRoutes() { |
30 | 30 |
router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin), |
31 | 31 |
router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH? |
32 | 32 |
router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin), |
33 |
- router.NewPostRoute("/plugins/pull", r.pullPlugin), |
|
34 |
- router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin), |
|
33 |
+ router.Cancellable(router.NewPostRoute("/plugins/pull", r.pullPlugin)), |
|
34 |
+ router.Cancellable(router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin)), |
|
35 | 35 |
router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin), |
36 | 36 |
router.NewPostRoute("/plugins/create", r.createPlugin), |
37 | 37 |
} |
... | ... |
@@ -7,8 +7,13 @@ import ( |
7 | 7 |
"strconv" |
8 | 8 |
"strings" |
9 | 9 |
|
10 |
+ distreference "github.com/docker/distribution/reference" |
|
10 | 11 |
"github.com/docker/docker/api/server/httputils" |
11 | 12 |
"github.com/docker/docker/api/types" |
13 |
+ "github.com/docker/docker/pkg/ioutils" |
|
14 |
+ "github.com/docker/docker/pkg/streamformatter" |
|
15 |
+ "github.com/docker/docker/reference" |
|
16 |
+ "github.com/pkg/errors" |
|
12 | 17 |
"golang.org/x/net/context" |
13 | 18 |
) |
14 | 19 |
|
... | ... |
@@ -34,6 +39,48 @@ func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) |
34 | 34 |
return metaHeaders, authConfig |
35 | 35 |
} |
36 | 36 |
|
37 |
+// parseRemoteRef parses the remote reference into a reference.Named |
|
38 |
+// returning the tag associated with the reference. In the case the |
|
39 |
+// given reference string includes both digest and tag, the returned |
|
40 |
+// reference will have the digest without the tag, but the tag will |
|
41 |
+// be returned. |
|
42 |
+func parseRemoteRef(remote string) (reference.Named, string, error) { |
|
43 |
+ // Parse remote reference, supporting remotes with name and tag |
|
44 |
+ // NOTE: Using distribution reference to handle references |
|
45 |
+ // containing both a name and digest |
|
46 |
+ remoteRef, err := distreference.ParseNamed(remote) |
|
47 |
+ if err != nil { |
|
48 |
+ return nil, "", err |
|
49 |
+ } |
|
50 |
+ |
|
51 |
+ var tag string |
|
52 |
+ if t, ok := remoteRef.(distreference.Tagged); ok { |
|
53 |
+ tag = t.Tag() |
|
54 |
+ } |
|
55 |
+ |
|
56 |
+ // Convert distribution reference to docker reference |
|
57 |
+ // TODO: remove when docker reference changes reconciled upstream |
|
58 |
+ ref, err := reference.WithName(remoteRef.Name()) |
|
59 |
+ if err != nil { |
|
60 |
+ return nil, "", err |
|
61 |
+ } |
|
62 |
+ if d, ok := remoteRef.(distreference.Digested); ok { |
|
63 |
+ ref, err = reference.WithDigest(ref, d.Digest()) |
|
64 |
+ if err != nil { |
|
65 |
+ return nil, "", err |
|
66 |
+ } |
|
67 |
+ } else if tag != "" { |
|
68 |
+ ref, err = reference.WithTag(ref, tag) |
|
69 |
+ if err != nil { |
|
70 |
+ return nil, "", err |
|
71 |
+ } |
|
72 |
+ } else { |
|
73 |
+ ref = reference.WithDefaultTag(ref) |
|
74 |
+ } |
|
75 |
+ |
|
76 |
+ return ref, tag, nil |
|
77 |
+} |
|
78 |
+ |
|
37 | 79 |
func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { |
38 | 80 |
if err := httputils.ParseForm(r); err != nil { |
39 | 81 |
return err |
... | ... |
@@ -41,7 +88,12 @@ func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter |
41 | 41 |
|
42 | 42 |
metaHeaders, authConfig := parseHeaders(r.Header) |
43 | 43 |
|
44 |
- privileges, err := pr.backend.Privileges(r.FormValue("name"), metaHeaders, authConfig) |
|
44 |
+ ref, _, err := parseRemoteRef(r.FormValue("remote")) |
|
45 |
+ if err != nil { |
|
46 |
+ return err |
|
47 |
+ } |
|
48 |
+ |
|
49 |
+ privileges, err := pr.backend.Privileges(ctx, ref, metaHeaders, authConfig) |
|
45 | 50 |
if err != nil { |
46 | 51 |
return err |
47 | 52 |
} |
... | ... |
@@ -50,20 +102,66 @@ func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter |
50 | 50 |
|
51 | 51 |
func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { |
52 | 52 |
if err := httputils.ParseForm(r); err != nil { |
53 |
- return err |
|
53 |
+ return errors.Wrap(err, "failed to parse form") |
|
54 | 54 |
} |
55 | 55 |
|
56 | 56 |
var privileges types.PluginPrivileges |
57 |
- if err := json.NewDecoder(r.Body).Decode(&privileges); err != nil { |
|
58 |
- return err |
|
57 |
+ dec := json.NewDecoder(r.Body) |
|
58 |
+ if err := dec.Decode(&privileges); err != nil { |
|
59 |
+ return errors.Wrap(err, "failed to parse privileges") |
|
60 |
+ } |
|
61 |
+ if dec.More() { |
|
62 |
+ return errors.New("invalid privileges") |
|
59 | 63 |
} |
60 | 64 |
|
61 | 65 |
metaHeaders, authConfig := parseHeaders(r.Header) |
62 | 66 |
|
63 |
- if err := pr.backend.Pull(r.FormValue("name"), metaHeaders, authConfig, privileges); err != nil { |
|
67 |
+ ref, tag, err := parseRemoteRef(r.FormValue("remote")) |
|
68 |
+ if err != nil { |
|
64 | 69 |
return err |
65 | 70 |
} |
66 |
- w.WriteHeader(http.StatusCreated) |
|
71 |
+ |
|
72 |
+ name := r.FormValue("name") |
|
73 |
+ if name == "" { |
|
74 |
+ if _, ok := ref.(reference.Canonical); ok { |
|
75 |
+ trimmed := reference.TrimNamed(ref) |
|
76 |
+ if tag != "" { |
|
77 |
+ nt, err := reference.WithTag(trimmed, tag) |
|
78 |
+ if err != nil { |
|
79 |
+ return err |
|
80 |
+ } |
|
81 |
+ name = nt.String() |
|
82 |
+ } else { |
|
83 |
+ name = reference.WithDefaultTag(trimmed).String() |
|
84 |
+ } |
|
85 |
+ } else { |
|
86 |
+ name = ref.String() |
|
87 |
+ } |
|
88 |
+ } else { |
|
89 |
+ localRef, err := reference.ParseNamed(name) |
|
90 |
+ if err != nil { |
|
91 |
+ return err |
|
92 |
+ } |
|
93 |
+ if _, ok := localRef.(reference.Canonical); ok { |
|
94 |
+ return errors.New("cannot use digest in plugin tag") |
|
95 |
+ } |
|
96 |
+ if distreference.IsNameOnly(localRef) { |
|
97 |
+ // TODO: log change in name to out stream |
|
98 |
+ name = reference.WithDefaultTag(localRef).String() |
|
99 |
+ } |
|
100 |
+ } |
|
101 |
+ w.Header().Set("Docker-Plugin-Name", name) |
|
102 |
+ |
|
103 |
+ w.Header().Set("Content-Type", "application/json") |
|
104 |
+ output := ioutils.NewWriteFlusher(w) |
|
105 |
+ |
|
106 |
+ if err := pr.backend.Pull(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { |
|
107 |
+ if !output.Flushed() { |
|
108 |
+ return err |
|
109 |
+ } |
|
110 |
+ output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) |
|
111 |
+ } |
|
112 |
+ |
|
67 | 113 |
return nil |
68 | 114 |
} |
69 | 115 |
|
... | ... |
@@ -125,12 +223,21 @@ func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, |
125 | 125 |
|
126 | 126 |
func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { |
127 | 127 |
if err := httputils.ParseForm(r); err != nil { |
128 |
- return err |
|
128 |
+ return errors.Wrap(err, "failed to parse form") |
|
129 | 129 |
} |
130 | 130 |
|
131 | 131 |
metaHeaders, authConfig := parseHeaders(r.Header) |
132 | 132 |
|
133 |
- return pr.backend.Push(vars["name"], metaHeaders, authConfig) |
|
133 |
+ w.Header().Set("Content-Type", "application/json") |
|
134 |
+ output := ioutils.NewWriteFlusher(w) |
|
135 |
+ |
|
136 |
+ if err := pr.backend.Push(ctx, vars["name"], metaHeaders, authConfig, output); err != nil { |
|
137 |
+ if !output.Flushed() { |
|
138 |
+ return err |
|
139 |
+ } |
|
140 |
+ output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) |
|
141 |
+ } |
|
142 |
+ return nil |
|
134 | 143 |
} |
135 | 144 |
|
136 | 145 |
func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { |
... | ... |
@@ -1347,16 +1347,13 @@ definitions: |
1347 | 1347 |
Plugin: |
1348 | 1348 |
description: "A plugin for the Engine API" |
1349 | 1349 |
type: "object" |
1350 |
- required: [Settings, Enabled, Config, Name, Tag] |
|
1350 |
+ required: [Settings, Enabled, Config, Name] |
|
1351 | 1351 |
properties: |
1352 | 1352 |
Id: |
1353 | 1353 |
type: "string" |
1354 | 1354 |
Name: |
1355 | 1355 |
type: "string" |
1356 | 1356 |
x-nullable: false |
1357 |
- Tag: |
|
1358 |
- type: "string" |
|
1359 |
- x-nullable: false |
|
1360 | 1357 |
Enabled: |
1361 | 1358 |
description: "True when the plugin is running. False when the plugin is not running, only installed." |
1362 | 1359 |
type: "boolean" |
... | ... |
@@ -1392,7 +1389,7 @@ definitions: |
1392 | 1392 |
- Documentation |
1393 | 1393 |
- Interface |
1394 | 1394 |
- Entrypoint |
1395 |
- - Workdir |
|
1395 |
+ - WorkDir |
|
1396 | 1396 |
- Network |
1397 | 1397 |
- Linux |
1398 | 1398 |
- PropagatedMount |
... | ... |
@@ -1423,7 +1420,7 @@ definitions: |
1423 | 1423 |
type: "array" |
1424 | 1424 |
items: |
1425 | 1425 |
type: "string" |
1426 |
- Workdir: |
|
1426 |
+ WorkDir: |
|
1427 | 1427 |
type: "string" |
1428 | 1428 |
x-nullable: false |
1429 | 1429 |
User: |
... | ... |
@@ -1490,6 +1487,15 @@ definitions: |
1490 | 1490 |
type: "array" |
1491 | 1491 |
items: |
1492 | 1492 |
type: "string" |
1493 |
+ rootfs: |
|
1494 |
+ type: "object" |
|
1495 |
+ properties: |
|
1496 |
+ type: |
|
1497 |
+ type: "string" |
|
1498 |
+ diff_ids: |
|
1499 |
+ type: "array" |
|
1500 |
+ items: |
|
1501 |
+ type: "string" |
|
1493 | 1502 |
example: |
1494 | 1503 |
Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" |
1495 | 1504 |
Name: "tiborvass/no-remove" |
... | ... |
@@ -1528,7 +1534,7 @@ definitions: |
1528 | 1528 |
Entrypoint: |
1529 | 1529 |
- "plugin-no-remove" |
1530 | 1530 |
- "/data" |
1531 |
- Workdir: "" |
|
1531 |
+ WorkDir: "" |
|
1532 | 1532 |
User: {} |
1533 | 1533 |
Network: |
1534 | 1534 |
Type: "host" |
... | ... |
@@ -6397,7 +6403,7 @@ paths: |
6397 | 6397 |
Entrypoint: |
6398 | 6398 |
- "plugin-no-remove" |
6399 | 6399 |
- "/data" |
6400 |
- Workdir: "" |
|
6400 |
+ WorkDir: "" |
|
6401 | 6401 |
User: {} |
6402 | 6402 |
Network: |
6403 | 6403 |
Type: "host" |
... | ... |
@@ -6503,14 +6509,22 @@ paths: |
6503 | 6503 |
schema: |
6504 | 6504 |
$ref: "#/definitions/ErrorResponse" |
6505 | 6505 |
parameters: |
6506 |
- - name: "name" |
|
6506 |
+ - name: "remote" |
|
6507 | 6507 |
in: "query" |
6508 | 6508 |
description: | |
6509 |
- The plugin to install. |
|
6509 |
+ Remote reference for plugin to install. |
|
6510 | 6510 |
|
6511 | 6511 |
The `:latest` tag is optional, and is used as the default if omitted. |
6512 | 6512 |
required: true |
6513 | 6513 |
type: "string" |
6514 |
+ - name: "name" |
|
6515 |
+ in: "query" |
|
6516 |
+ description: | |
|
6517 |
+ Local name for the pulled plugin. |
|
6518 |
+ |
|
6519 |
+ The `:latest` tag is optional, and is used as the default if omitted. |
|
6520 |
+ required: false |
|
6521 |
+ type: "string" |
|
6514 | 6522 |
- name: "X-Registry-Auth" |
6515 | 6523 |
in: "header" |
6516 | 6524 |
description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" |
... | ... |
@@ -350,6 +350,7 @@ type PluginInstallOptions struct { |
350 | 350 |
Disabled bool |
351 | 351 |
AcceptAllPermissions bool |
352 | 352 |
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry |
353 |
+ RemoteRef string // RemoteRef is the plugin name on the registry |
|
353 | 354 |
PrivilegeFunc RequestPrivilegeFunc |
354 | 355 |
AcceptPermissionsFunc func(PluginPrivileges) (bool, error) |
355 | 356 |
Args []string |
... | ... |
@@ -25,10 +25,6 @@ type Plugin struct { |
25 | 25 |
// settings |
26 | 26 |
// Required: true |
27 | 27 |
Settings PluginSettings `json:"Settings"` |
28 |
- |
|
29 |
- // tag |
|
30 |
- // Required: true |
|
31 |
- Tag string `json:"Tag"` |
|
32 | 28 |
} |
33 | 29 |
|
34 | 30 |
// PluginConfig The config of a plugin. |
... | ... |
@@ -78,9 +74,12 @@ type PluginConfig struct { |
78 | 78 |
// user |
79 | 79 |
User PluginConfigUser `json:"User,omitempty"` |
80 | 80 |
|
81 |
- // workdir |
|
81 |
+ // work dir |
|
82 | 82 |
// Required: true |
83 |
- Workdir string `json:"Workdir"` |
|
83 |
+ WorkDir string `json:"WorkDir"` |
|
84 |
+ |
|
85 |
+ // rootfs |
|
86 |
+ Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` |
|
84 | 87 |
} |
85 | 88 |
|
86 | 89 |
// PluginConfigArgs plugin config args |
... | ... |
@@ -143,6 +142,17 @@ type PluginConfigNetwork struct { |
143 | 143 |
Type string `json:"Type"` |
144 | 144 |
} |
145 | 145 |
|
146 |
+// PluginConfigRootfs plugin config rootfs |
|
147 |
+// swagger:model PluginConfigRootfs |
|
148 |
+type PluginConfigRootfs struct { |
|
149 |
+ |
|
150 |
+ // diff ids |
|
151 |
+ DiffIds []string `json:"diff_ids"` |
|
152 |
+ |
|
153 |
+ // type |
|
154 |
+ Type string `json:"type,omitempty"` |
|
155 |
+} |
|
156 |
+ |
|
146 | 157 |
// PluginConfigUser plugin config user |
147 | 158 |
// swagger:model PluginConfigUser |
148 | 159 |
type PluginConfigUser struct { |
... | ... |
@@ -64,8 +64,8 @@ func newCreateCommand(dockerCli *command.DockerCli) *cobra.Command { |
64 | 64 |
options := pluginCreateOptions{} |
65 | 65 |
|
66 | 66 |
cmd := &cobra.Command{ |
67 |
- Use: "create [OPTIONS] PLUGIN[:tag] PATH-TO-ROOTFS(rootfs + config.json)", |
|
68 |
- Short: "Create a plugin from a rootfs and config", |
|
67 |
+ Use: "create [OPTIONS] PLUGIN PLUGIN-DATA-DIR", |
|
68 |
+ Short: "Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory.", |
|
69 | 69 |
Args: cli.RequiresMinArgs(2), |
70 | 70 |
RunE: func(cmd *cobra.Command, args []string) error { |
71 | 71 |
options.repoName = args[0] |
... | ... |
@@ -6,7 +6,6 @@ import ( |
6 | 6 |
"github.com/docker/docker/api/types" |
7 | 7 |
"github.com/docker/docker/cli" |
8 | 8 |
"github.com/docker/docker/cli/command" |
9 |
- "github.com/docker/docker/reference" |
|
10 | 9 |
"github.com/spf13/cobra" |
11 | 10 |
"golang.org/x/net/context" |
12 | 11 |
) |
... | ... |
@@ -29,18 +28,7 @@ func newDisableCommand(dockerCli *command.DockerCli) *cobra.Command { |
29 | 29 |
} |
30 | 30 |
|
31 | 31 |
func runDisable(dockerCli *command.DockerCli, name string, force bool) error { |
32 |
- named, err := reference.ParseNamed(name) // FIXME: validate |
|
33 |
- if err != nil { |
|
34 |
- return err |
|
35 |
- } |
|
36 |
- if reference.IsNameOnly(named) { |
|
37 |
- named = reference.WithDefaultTag(named) |
|
38 |
- } |
|
39 |
- ref, ok := named.(reference.NamedTagged) |
|
40 |
- if !ok { |
|
41 |
- return fmt.Errorf("invalid name: %s", named.String()) |
|
42 |
- } |
|
43 |
- if err := dockerCli.Client().PluginDisable(context.Background(), ref.String(), types.PluginDisableOptions{Force: force}); err != nil { |
|
32 |
+ if err := dockerCli.Client().PluginDisable(context.Background(), name, types.PluginDisableOptions{Force: force}); err != nil { |
|
44 | 33 |
return err |
45 | 34 |
} |
46 | 35 |
fmt.Fprintln(dockerCli.Out(), name) |
... | ... |
@@ -6,7 +6,6 @@ import ( |
6 | 6 |
"github.com/docker/docker/api/types" |
7 | 7 |
"github.com/docker/docker/cli" |
8 | 8 |
"github.com/docker/docker/cli/command" |
9 |
- "github.com/docker/docker/reference" |
|
10 | 9 |
"github.com/spf13/cobra" |
11 | 10 |
"golang.org/x/net/context" |
12 | 11 |
) |
... | ... |
@@ -36,23 +35,11 @@ func newEnableCommand(dockerCli *command.DockerCli) *cobra.Command { |
36 | 36 |
|
37 | 37 |
func runEnable(dockerCli *command.DockerCli, opts *enableOpts) error { |
38 | 38 |
name := opts.name |
39 |
- |
|
40 |
- named, err := reference.ParseNamed(name) // FIXME: validate |
|
41 |
- if err != nil { |
|
42 |
- return err |
|
43 |
- } |
|
44 |
- if reference.IsNameOnly(named) { |
|
45 |
- named = reference.WithDefaultTag(named) |
|
46 |
- } |
|
47 |
- ref, ok := named.(reference.NamedTagged) |
|
48 |
- if !ok { |
|
49 |
- return fmt.Errorf("invalid name: %s", named.String()) |
|
50 |
- } |
|
51 | 39 |
if opts.timeout < 0 { |
52 | 40 |
return fmt.Errorf("negative timeout %d is invalid", opts.timeout) |
53 | 41 |
} |
54 | 42 |
|
55 |
- if err := dockerCli.Client().PluginEnable(context.Background(), ref.String(), types.PluginEnableOptions{Timeout: opts.timeout}); err != nil { |
|
43 |
+ if err := dockerCli.Client().PluginEnable(context.Background(), name, types.PluginEnableOptions{Timeout: opts.timeout}); err != nil { |
|
56 | 44 |
return err |
57 | 45 |
} |
58 | 46 |
fmt.Fprintln(dockerCli.Out(), name) |
... | ... |
@@ -2,12 +2,16 @@ package plugin |
2 | 2 |
|
3 | 3 |
import ( |
4 | 4 |
"bufio" |
5 |
+ "errors" |
|
5 | 6 |
"fmt" |
6 | 7 |
"strings" |
7 | 8 |
|
9 |
+ distreference "github.com/docker/distribution/reference" |
|
8 | 10 |
"github.com/docker/docker/api/types" |
11 |
+ registrytypes "github.com/docker/docker/api/types/registry" |
|
9 | 12 |
"github.com/docker/docker/cli" |
10 | 13 |
"github.com/docker/docker/cli/command" |
14 |
+ "github.com/docker/docker/pkg/jsonmessage" |
|
11 | 15 |
"github.com/docker/docker/reference" |
12 | 16 |
"github.com/docker/docker/registry" |
13 | 17 |
"github.com/spf13/cobra" |
... | ... |
@@ -16,6 +20,7 @@ import ( |
16 | 16 |
|
17 | 17 |
type pluginOptions struct { |
18 | 18 |
name string |
19 |
+ alias string |
|
19 | 20 |
grantPerms bool |
20 | 21 |
disable bool |
21 | 22 |
args []string |
... | ... |
@@ -39,41 +44,67 @@ func newInstallCommand(dockerCli *command.DockerCli) *cobra.Command { |
39 | 39 |
flags := cmd.Flags() |
40 | 40 |
flags.BoolVar(&options.grantPerms, "grant-all-permissions", false, "Grant all permissions necessary to run the plugin") |
41 | 41 |
flags.BoolVar(&options.disable, "disable", false, "Do not enable the plugin on install") |
42 |
+ flags.StringVar(&options.alias, "alias", "", "Local name for plugin") |
|
42 | 43 |
|
43 | 44 |
return cmd |
44 | 45 |
} |
45 | 46 |
|
46 |
-func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error { |
|
47 |
- named, err := reference.ParseNamed(opts.name) // FIXME: validate |
|
47 |
+func getRepoIndexFromUnnormalizedRef(ref distreference.Named) (*registrytypes.IndexInfo, error) { |
|
48 |
+ named, err := reference.ParseNamed(ref.Name()) |
|
48 | 49 |
if err != nil { |
49 |
- return err |
|
50 |
+ return nil, err |
|
50 | 51 |
} |
51 |
- if reference.IsNameOnly(named) { |
|
52 |
- named = reference.WithDefaultTag(named) |
|
52 |
+ |
|
53 |
+ repoInfo, err := registry.ParseRepositoryInfo(named) |
|
54 |
+ if err != nil { |
|
55 |
+ return nil, err |
|
53 | 56 |
} |
54 |
- ref, ok := named.(reference.NamedTagged) |
|
55 |
- if !ok { |
|
56 |
- return fmt.Errorf("invalid name: %s", named.String()) |
|
57 |
+ |
|
58 |
+ return repoInfo.Index, nil |
|
59 |
+} |
|
60 |
+ |
|
61 |
+func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error { |
|
62 |
+ // Parse name using distribution reference package to support name |
|
63 |
+ // containing both tag and digest. Names with both tag and digest |
|
64 |
+ // will be treated by the daemon as a pull by digest with |
|
65 |
+ // an alias for the tag (if no alias is provided). |
|
66 |
+ ref, err := distreference.ParseNamed(opts.name) |
|
67 |
+ if err != nil { |
|
68 |
+ return err |
|
57 | 69 |
} |
58 | 70 |
|
59 |
- ctx := context.Background() |
|
71 |
+ alias := "" |
|
72 |
+ if opts.alias != "" { |
|
73 |
+ aref, err := reference.ParseNamed(opts.alias) |
|
74 |
+ if err != nil { |
|
75 |
+ return err |
|
76 |
+ } |
|
77 |
+ aref = reference.WithDefaultTag(aref) |
|
78 |
+ if _, ok := aref.(reference.NamedTagged); !ok { |
|
79 |
+ return fmt.Errorf("invalid name: %s", opts.alias) |
|
80 |
+ } |
|
81 |
+ alias = aref.String() |
|
82 |
+ } |
|
60 | 83 |
|
61 |
- repoInfo, err := registry.ParseRepositoryInfo(named) |
|
84 |
+ index, err := getRepoIndexFromUnnormalizedRef(ref) |
|
62 | 85 |
if err != nil { |
63 | 86 |
return err |
64 | 87 |
} |
65 | 88 |
|
66 |
- authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index) |
|
89 |
+ ctx := context.Background() |
|
90 |
+ |
|
91 |
+ authConfig := command.ResolveAuthConfig(ctx, dockerCli, index) |
|
67 | 92 |
|
68 | 93 |
encodedAuth, err := command.EncodeAuthToBase64(authConfig) |
69 | 94 |
if err != nil { |
70 | 95 |
return err |
71 | 96 |
} |
72 | 97 |
|
73 |
- registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "plugin install") |
|
98 |
+ registryAuthFunc := command.RegistryAuthenticationPrivilegedFunc(dockerCli, index, "plugin install") |
|
74 | 99 |
|
75 | 100 |
options := types.PluginInstallOptions{ |
76 | 101 |
RegistryAuth: encodedAuth, |
102 |
+ RemoteRef: ref.String(), |
|
77 | 103 |
Disabled: opts.disable, |
78 | 104 |
AcceptAllPermissions: opts.grantPerms, |
79 | 105 |
AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.name), |
... | ... |
@@ -81,10 +112,19 @@ func runInstall(dockerCli *command.DockerCli, opts pluginOptions) error { |
81 | 81 |
PrivilegeFunc: registryAuthFunc, |
82 | 82 |
Args: opts.args, |
83 | 83 |
} |
84 |
- if err := dockerCli.Client().PluginInstall(ctx, ref.String(), options); err != nil { |
|
84 |
+ |
|
85 |
+ responseBody, err := dockerCli.Client().PluginInstall(ctx, alias, options) |
|
86 |
+ if err != nil { |
|
87 |
+ if strings.Contains(err.Error(), "target is image") { |
|
88 |
+ return errors.New(err.Error() + " - Use `docker image pull`") |
|
89 |
+ } |
|
90 |
+ return err |
|
91 |
+ } |
|
92 |
+ defer responseBody.Close() |
|
93 |
+ if err := jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil); err != nil { |
|
85 | 94 |
return err |
86 | 95 |
} |
87 |
- fmt.Fprintln(dockerCli.Out(), opts.name) |
|
96 |
+ fmt.Fprintf(dockerCli.Out(), "Installed plugin %s\n", opts.name) // todo: return proper values from the API for this result |
|
88 | 97 |
return nil |
89 | 98 |
} |
90 | 99 |
|
... | ... |
@@ -44,7 +44,7 @@ func runList(dockerCli *command.DockerCli, opts listOptions) error { |
44 | 44 |
} |
45 | 45 |
|
46 | 46 |
w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) |
47 |
- fmt.Fprintf(w, "ID \tNAME \tTAG \tDESCRIPTION\tENABLED") |
|
47 |
+ fmt.Fprintf(w, "ID \tNAME \tDESCRIPTION\tENABLED") |
|
48 | 48 |
fmt.Fprintf(w, "\n") |
49 | 49 |
|
50 | 50 |
for _, p := range plugins { |
... | ... |
@@ -56,7 +56,7 @@ func runList(dockerCli *command.DockerCli, opts listOptions) error { |
56 | 56 |
desc = stringutils.Ellipsis(desc, 45) |
57 | 57 |
} |
58 | 58 |
|
59 |
- fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%v\n", id, p.Name, p.Tag, desc, p.Enabled) |
|
59 |
+ fmt.Fprintf(w, "%s\t%s\t%s\t%v\n", id, p.Name, desc, p.Enabled) |
|
60 | 60 |
} |
61 | 61 |
w.Flush() |
62 | 62 |
return nil |
... | ... |
@@ -7,6 +7,7 @@ import ( |
7 | 7 |
|
8 | 8 |
"github.com/docker/docker/cli" |
9 | 9 |
"github.com/docker/docker/cli/command" |
10 |
+ "github.com/docker/docker/pkg/jsonmessage" |
|
10 | 11 |
"github.com/docker/docker/reference" |
11 | 12 |
"github.com/docker/docker/registry" |
12 | 13 |
"github.com/spf13/cobra" |
... | ... |
@@ -49,5 +50,10 @@ func runPush(dockerCli *command.DockerCli, name string) error { |
49 | 49 |
if err != nil { |
50 | 50 |
return err |
51 | 51 |
} |
52 |
- return dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth) |
|
52 |
+ responseBody, err := dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth) |
|
53 |
+ if err != nil { |
|
54 |
+ return err |
|
55 |
+ } |
|
56 |
+ defer responseBody.Close() |
|
57 |
+ return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil) |
|
53 | 58 |
} |
... | ... |
@@ -6,7 +6,6 @@ import ( |
6 | 6 |
"github.com/docker/docker/api/types" |
7 | 7 |
"github.com/docker/docker/cli" |
8 | 8 |
"github.com/docker/docker/cli/command" |
9 |
- "github.com/docker/docker/reference" |
|
10 | 9 |
"github.com/spf13/cobra" |
11 | 10 |
"golang.org/x/net/context" |
12 | 11 |
) |
... | ... |
@@ -41,21 +40,8 @@ func runRemove(dockerCli *command.DockerCli, opts *rmOptions) error { |
41 | 41 |
|
42 | 42 |
var errs cli.Errors |
43 | 43 |
for _, name := range opts.plugins { |
44 |
- named, err := reference.ParseNamed(name) // FIXME: validate |
|
45 |
- if err != nil { |
|
46 |
- errs = append(errs, err) |
|
47 |
- continue |
|
48 |
- } |
|
49 |
- if reference.IsNameOnly(named) { |
|
50 |
- named = reference.WithDefaultTag(named) |
|
51 |
- } |
|
52 |
- ref, ok := named.(reference.NamedTagged) |
|
53 |
- if !ok { |
|
54 |
- errs = append(errs, fmt.Errorf("invalid name: %s", named.String())) |
|
55 |
- continue |
|
56 |
- } |
|
57 | 44 |
// TODO: pass names to api instead of making multiple api calls |
58 |
- if err := dockerCli.Client().PluginRemove(ctx, ref.String(), types.PluginRemoveOptions{Force: opts.force}); err != nil { |
|
45 |
+ if err := dockerCli.Client().PluginRemove(ctx, name, types.PluginRemoveOptions{Force: opts.force}); err != nil { |
|
59 | 46 |
errs = append(errs, err) |
60 | 47 |
continue |
61 | 48 |
} |
... | ... |
@@ -1,13 +1,10 @@ |
1 | 1 |
package plugin |
2 | 2 |
|
3 | 3 |
import ( |
4 |
- "fmt" |
|
5 |
- |
|
6 | 4 |
"golang.org/x/net/context" |
7 | 5 |
|
8 | 6 |
"github.com/docker/docker/cli" |
9 | 7 |
"github.com/docker/docker/cli/command" |
10 |
- "github.com/docker/docker/reference" |
|
11 | 8 |
"github.com/spf13/cobra" |
12 | 9 |
) |
13 | 10 |
|
... | ... |
@@ -17,24 +14,9 @@ func newSetCommand(dockerCli *command.DockerCli) *cobra.Command { |
17 | 17 |
Short: "Change settings for a plugin", |
18 | 18 |
Args: cli.RequiresMinArgs(2), |
19 | 19 |
RunE: func(cmd *cobra.Command, args []string) error { |
20 |
- return runSet(dockerCli, args[0], args[1:]) |
|
20 |
+ return dockerCli.Client().PluginSet(context.Background(), args[0], args[1:]) |
|
21 | 21 |
}, |
22 | 22 |
} |
23 | 23 |
|
24 | 24 |
return cmd |
25 | 25 |
} |
26 |
- |
|
27 |
-func runSet(dockerCli *command.DockerCli, name string, args []string) error { |
|
28 |
- named, err := reference.ParseNamed(name) // FIXME: validate |
|
29 |
- if err != nil { |
|
30 |
- return err |
|
31 |
- } |
|
32 |
- if reference.IsNameOnly(named) { |
|
33 |
- named = reference.WithDefaultTag(named) |
|
34 |
- } |
|
35 |
- ref, ok := named.(reference.NamedTagged) |
|
36 |
- if !ok { |
|
37 |
- return fmt.Errorf("invalid name: %s", named.String()) |
|
38 |
- } |
|
39 |
- return dockerCli.Client().PluginSet(context.Background(), ref.String(), args) |
|
40 |
-} |
... | ... |
@@ -111,8 +111,8 @@ type PluginAPIClient interface { |
111 | 111 |
PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error |
112 | 112 |
PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error |
113 | 113 |
PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error |
114 |
- PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error |
|
115 |
- PluginPush(ctx context.Context, name string, registryAuth string) error |
|
114 |
+ PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) |
|
115 |
+ PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) |
|
116 | 116 |
PluginSet(ctx context.Context, name string, args []string) error |
117 | 117 |
PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) |
118 | 118 |
PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error |
... | ... |
@@ -2,73 +2,96 @@ package client |
2 | 2 |
|
3 | 3 |
import ( |
4 | 4 |
"encoding/json" |
5 |
+ "io" |
|
5 | 6 |
"net/http" |
6 | 7 |
"net/url" |
7 | 8 |
|
9 |
+ "github.com/docker/distribution/reference" |
|
8 | 10 |
"github.com/docker/docker/api/types" |
11 |
+ "github.com/pkg/errors" |
|
9 | 12 |
"golang.org/x/net/context" |
10 | 13 |
) |
11 | 14 |
|
12 | 15 |
// PluginInstall installs a plugin |
13 |
-func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (err error) { |
|
14 |
- // FIXME(vdemeester) name is a ref, we might want to parse/validate it here. |
|
16 |
+func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { |
|
15 | 17 |
query := url.Values{} |
16 |
- query.Set("name", name) |
|
18 |
+ if _, err := reference.ParseNamed(options.RemoteRef); err != nil { |
|
19 |
+ return nil, errors.Wrap(err, "invalid remote reference") |
|
20 |
+ } |
|
21 |
+ query.Set("remote", options.RemoteRef) |
|
22 |
+ |
|
17 | 23 |
resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) |
18 | 24 |
if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { |
25 |
+ // todo: do inspect before to check existing name before checking privileges |
|
19 | 26 |
newAuthHeader, privilegeErr := options.PrivilegeFunc() |
20 | 27 |
if privilegeErr != nil { |
21 | 28 |
ensureReaderClosed(resp) |
22 |
- return privilegeErr |
|
29 |
+ return nil, privilegeErr |
|
23 | 30 |
} |
24 | 31 |
options.RegistryAuth = newAuthHeader |
25 | 32 |
resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) |
26 | 33 |
} |
27 | 34 |
if err != nil { |
28 | 35 |
ensureReaderClosed(resp) |
29 |
- return err |
|
36 |
+ return nil, err |
|
30 | 37 |
} |
31 | 38 |
|
32 | 39 |
var privileges types.PluginPrivileges |
33 | 40 |
if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { |
34 | 41 |
ensureReaderClosed(resp) |
35 |
- return err |
|
42 |
+ return nil, err |
|
36 | 43 |
} |
37 | 44 |
ensureReaderClosed(resp) |
38 | 45 |
|
39 | 46 |
if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { |
40 | 47 |
accept, err := options.AcceptPermissionsFunc(privileges) |
41 | 48 |
if err != nil { |
42 |
- return err |
|
49 |
+ return nil, err |
|
43 | 50 |
} |
44 | 51 |
if !accept { |
45 |
- return pluginPermissionDenied{name} |
|
52 |
+ return nil, pluginPermissionDenied{options.RemoteRef} |
|
46 | 53 |
} |
47 | 54 |
} |
48 | 55 |
|
49 |
- _, err = cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) |
|
56 |
+ // set name for plugin pull, if empty should default to remote reference |
|
57 |
+ query.Set("name", name) |
|
58 |
+ |
|
59 |
+ resp, err = cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) |
|
50 | 60 |
if err != nil { |
51 |
- return err |
|
61 |
+ return nil, err |
|
52 | 62 |
} |
53 | 63 |
|
54 |
- defer func() { |
|
64 |
+ name = resp.header.Get("Docker-Plugin-Name") |
|
65 |
+ |
|
66 |
+ pr, pw := io.Pipe() |
|
67 |
+ go func() { // todo: the client should probably be designed more around the actual api |
|
68 |
+ _, err := io.Copy(pw, resp.body) |
|
55 | 69 |
if err != nil { |
56 |
- delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) |
|
57 |
- ensureReaderClosed(delResp) |
|
70 |
+ pw.CloseWithError(err) |
|
71 |
+ return |
|
58 | 72 |
} |
59 |
- }() |
|
60 |
- |
|
61 |
- if len(options.Args) > 0 { |
|
62 |
- if err := cli.PluginSet(ctx, name, options.Args); err != nil { |
|
63 |
- return err |
|
73 |
+ defer func() { |
|
74 |
+ if err != nil { |
|
75 |
+ delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) |
|
76 |
+ ensureReaderClosed(delResp) |
|
77 |
+ } |
|
78 |
+ }() |
|
79 |
+ if len(options.Args) > 0 { |
|
80 |
+ if err := cli.PluginSet(ctx, name, options.Args); err != nil { |
|
81 |
+ pw.CloseWithError(err) |
|
82 |
+ return |
|
83 |
+ } |
|
64 | 84 |
} |
65 |
- } |
|
66 | 85 |
|
67 |
- if options.Disabled { |
|
68 |
- return nil |
|
69 |
- } |
|
86 |
+ if options.Disabled { |
|
87 |
+ pw.Close() |
|
88 |
+ return |
|
89 |
+ } |
|
70 | 90 |
|
71 |
- return cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) |
|
91 |
+ err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) |
|
92 |
+ pw.CloseWithError(err) |
|
93 |
+ }() |
|
94 |
+ return pr, nil |
|
72 | 95 |
} |
73 | 96 |
|
74 | 97 |
func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { |
... | ... |
@@ -1,13 +1,17 @@ |
1 | 1 |
package client |
2 | 2 |
|
3 | 3 |
import ( |
4 |
+ "io" |
|
5 |
+ |
|
4 | 6 |
"golang.org/x/net/context" |
5 | 7 |
) |
6 | 8 |
|
7 | 9 |
// PluginPush pushes a plugin to a registry |
8 |
-func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) error { |
|
10 |
+func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { |
|
9 | 11 |
headers := map[string][]string{"X-Registry-Auth": {registryAuth}} |
10 | 12 |
resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) |
11 |
- ensureReaderClosed(resp) |
|
12 |
- return err |
|
13 |
+ if err != nil { |
|
14 |
+ return nil, err |
|
15 |
+ } |
|
16 |
+ return resp.body, nil |
|
13 | 17 |
} |
... | ... |
@@ -16,7 +16,7 @@ func TestPluginPushError(t *testing.T) { |
16 | 16 |
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), |
17 | 17 |
} |
18 | 18 |
|
19 |
- err := client.PluginPush(context.Background(), "plugin_name", "") |
|
19 |
+ _, err := client.PluginPush(context.Background(), "plugin_name", "") |
|
20 | 20 |
if err == nil || err.Error() != "Error response from daemon: Server error" { |
21 | 21 |
t.Fatalf("expected a Server Error, got %v", err) |
22 | 22 |
} |
... | ... |
@@ -44,7 +44,7 @@ func TestPluginPush(t *testing.T) { |
44 | 44 |
}), |
45 | 45 |
} |
46 | 46 |
|
47 |
- err := client.PluginPush(context.Background(), "plugin_name", "authtoken") |
|
47 |
+ _, err := client.PluginPush(context.Background(), "plugin_name", "authtoken") |
|
48 | 48 |
if err != nil { |
49 | 49 |
t.Fatal(err) |
50 | 50 |
} |
... | ... |
@@ -42,7 +42,6 @@ import ( |
42 | 42 |
"github.com/docker/docker/pkg/plugingetter" |
43 | 43 |
"github.com/docker/docker/pkg/signal" |
44 | 44 |
"github.com/docker/docker/pkg/system" |
45 |
- "github.com/docker/docker/plugin" |
|
46 | 45 |
"github.com/docker/docker/registry" |
47 | 46 |
"github.com/docker/docker/runconfig" |
48 | 47 |
"github.com/docker/go-connections/tlsconfig" |
... | ... |
@@ -471,7 +470,7 @@ func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { |
471 | 471 |
volume.NewRouter(d), |
472 | 472 |
build.NewRouter(dockerfile.NewBuildManager(d)), |
473 | 473 |
swarmrouter.NewRouter(c), |
474 |
- pluginrouter.NewRouter(plugin.GetManager()), |
|
474 |
+ pluginrouter.NewRouter(d.PluginManager()), |
|
475 | 475 |
} |
476 | 476 |
|
477 | 477 |
if d.NetworkControllerEnabled() { |
... | ... |
@@ -13,6 +13,7 @@ import ( |
13 | 13 |
"github.com/docker/docker/api/types/network" |
14 | 14 |
swarmtypes "github.com/docker/docker/api/types/swarm" |
15 | 15 |
clustertypes "github.com/docker/docker/daemon/cluster/provider" |
16 |
+ "github.com/docker/docker/plugin" |
|
16 | 17 |
"github.com/docker/docker/reference" |
17 | 18 |
"github.com/docker/libnetwork" |
18 | 19 |
"github.com/docker/libnetwork/cluster" |
... | ... |
@@ -54,4 +55,5 @@ type Backend interface { |
54 | 54 |
WaitForDetachment(context.Context, string, string, string, string) error |
55 | 55 |
GetRepository(context.Context, reference.NamedTagged, *types.AuthConfig) (distribution.Repository, bool, error) |
56 | 56 |
LookupImage(name string) (*types.ImageInspect, error) |
57 |
+ PluginManager() *plugin.Manager |
|
57 | 58 |
} |
... | ... |
@@ -8,7 +8,6 @@ import ( |
8 | 8 |
"github.com/docker/docker/api/types/network" |
9 | 9 |
executorpkg "github.com/docker/docker/daemon/cluster/executor" |
10 | 10 |
clustertypes "github.com/docker/docker/daemon/cluster/provider" |
11 |
- "github.com/docker/docker/plugin" |
|
12 | 11 |
networktypes "github.com/docker/libnetwork/types" |
13 | 12 |
"github.com/docker/swarmkit/agent/exec" |
14 | 13 |
"github.com/docker/swarmkit/agent/secrets" |
... | ... |
@@ -54,7 +53,7 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { |
54 | 54 |
addPlugins("Authorization", info.Plugins.Authorization) |
55 | 55 |
|
56 | 56 |
// add v2 plugins |
57 |
- v2Plugins, err := plugin.GetManager().List() |
|
57 |
+ v2Plugins, err := e.backend.PluginManager().List() |
|
58 | 58 |
if err == nil { |
59 | 59 |
for _, plgn := range v2Plugins { |
60 | 60 |
for _, typ := range plgn.Config.Interface.Types { |
... | ... |
@@ -67,13 +66,9 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { |
67 | 67 |
} else if typ.Capability == "networkdriver" { |
68 | 68 |
plgnTyp = "Network" |
69 | 69 |
} |
70 |
- plgnName := plgn.Name |
|
71 |
- if plgn.Tag != "" { |
|
72 |
- plgnName += ":" + plgn.Tag |
|
73 |
- } |
|
74 | 70 |
plugins[api.PluginDescription{ |
75 | 71 |
Type: plgnTyp, |
76 |
- Name: plgnName, |
|
72 |
+ Name: plgn.Name, |
|
77 | 73 |
}] = struct{}{} |
78 | 74 |
} |
79 | 75 |
} |
... | ... |
@@ -8,7 +8,6 @@ package daemon |
8 | 8 |
import ( |
9 | 9 |
"encoding/json" |
10 | 10 |
"fmt" |
11 |
- "io" |
|
12 | 11 |
"io/ioutil" |
13 | 12 |
"net" |
14 | 13 |
"os" |
... | ... |
@@ -17,7 +16,6 @@ import ( |
17 | 17 |
"runtime" |
18 | 18 |
"strings" |
19 | 19 |
"sync" |
20 |
- "syscall" |
|
21 | 20 |
"time" |
22 | 21 |
|
23 | 22 |
"github.com/Sirupsen/logrus" |
... | ... |
@@ -28,6 +26,7 @@ import ( |
28 | 28 |
"github.com/docker/docker/container" |
29 | 29 |
"github.com/docker/docker/daemon/events" |
30 | 30 |
"github.com/docker/docker/daemon/exec" |
31 |
+ "github.com/docker/docker/daemon/initlayer" |
|
31 | 32 |
"github.com/docker/docker/dockerversion" |
32 | 33 |
"github.com/docker/docker/plugin" |
33 | 34 |
"github.com/docker/libnetwork/cluster" |
... | ... |
@@ -42,14 +41,11 @@ import ( |
42 | 42 |
"github.com/docker/docker/pkg/fileutils" |
43 | 43 |
"github.com/docker/docker/pkg/idtools" |
44 | 44 |
"github.com/docker/docker/pkg/plugingetter" |
45 |
- "github.com/docker/docker/pkg/progress" |
|
46 | 45 |
"github.com/docker/docker/pkg/registrar" |
47 | 46 |
"github.com/docker/docker/pkg/signal" |
48 |
- "github.com/docker/docker/pkg/streamformatter" |
|
49 | 47 |
"github.com/docker/docker/pkg/sysinfo" |
50 | 48 |
"github.com/docker/docker/pkg/system" |
51 | 49 |
"github.com/docker/docker/pkg/truncindex" |
52 |
- pluginstore "github.com/docker/docker/plugin/store" |
|
53 | 50 |
"github.com/docker/docker/reference" |
54 | 51 |
"github.com/docker/docker/registry" |
55 | 52 |
"github.com/docker/docker/runconfig" |
... | ... |
@@ -59,6 +55,7 @@ import ( |
59 | 59 |
"github.com/docker/libnetwork" |
60 | 60 |
nwconfig "github.com/docker/libnetwork/config" |
61 | 61 |
"github.com/docker/libtrust" |
62 |
+ "github.com/pkg/errors" |
|
62 | 63 |
) |
63 | 64 |
|
64 | 65 |
var ( |
... | ... |
@@ -99,7 +96,8 @@ type Daemon struct { |
99 | 99 |
gidMaps []idtools.IDMap |
100 | 100 |
layerStore layer.Store |
101 | 101 |
imageStore image.Store |
102 |
- PluginStore *pluginstore.Store |
|
102 |
+ PluginStore *plugin.Store // todo: remove |
|
103 |
+ pluginManager *plugin.Manager |
|
103 | 104 |
nameIndex *registrar.Registrar |
104 | 105 |
linkIndex *linkIndex |
105 | 106 |
containerd libcontainerd.Client |
... | ... |
@@ -554,10 +552,19 @@ func NewDaemon(config *Config, registryService registry.Service, containerdRemot |
554 | 554 |
} |
555 | 555 |
|
556 | 556 |
d.RegistryService = registryService |
557 |
- d.PluginStore = pluginstore.NewStore(config.Root) |
|
557 |
+ d.PluginStore = plugin.NewStore(config.Root) // todo: remove |
|
558 | 558 |
// Plugin system initialization should happen before restore. Do not change order. |
559 |
- if err := d.pluginInit(config, containerdRemote); err != nil { |
|
560 |
- return nil, err |
|
559 |
+ d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ |
|
560 |
+ Root: filepath.Join(config.Root, "plugins"), |
|
561 |
+ ExecRoot: "/run/docker/plugins", // possibly needs fixing |
|
562 |
+ Store: d.PluginStore, |
|
563 |
+ Executor: containerdRemote, |
|
564 |
+ RegistryService: registryService, |
|
565 |
+ LiveRestoreEnabled: config.LiveRestoreEnabled, |
|
566 |
+ LogPluginEvent: d.LogPluginEvent, // todo: make private |
|
567 |
+ }) |
|
568 |
+ if err != nil { |
|
569 |
+ return nil, errors.Wrap(err, "couldn't create plugin manager") |
|
561 | 570 |
} |
562 | 571 |
|
563 | 572 |
d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ |
... | ... |
@@ -895,36 +902,6 @@ func (daemon *Daemon) V6Subnets() []net.IPNet { |
895 | 895 |
return subnets |
896 | 896 |
} |
897 | 897 |
|
898 |
-func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { |
|
899 |
- progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) |
|
900 |
- operationCancelled := false |
|
901 |
- |
|
902 |
- for prog := range progressChan { |
|
903 |
- if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { |
|
904 |
- // don't log broken pipe errors as this is the normal case when a client aborts |
|
905 |
- if isBrokenPipe(err) { |
|
906 |
- logrus.Info("Pull session cancelled") |
|
907 |
- } else { |
|
908 |
- logrus.Errorf("error writing progress to client: %v", err) |
|
909 |
- } |
|
910 |
- cancelFunc() |
|
911 |
- operationCancelled = true |
|
912 |
- // Don't return, because we need to continue draining |
|
913 |
- // progressChan until it's closed to avoid a deadlock. |
|
914 |
- } |
|
915 |
- } |
|
916 |
-} |
|
917 |
- |
|
918 |
-func isBrokenPipe(e error) bool { |
|
919 |
- if netErr, ok := e.(*net.OpError); ok { |
|
920 |
- e = netErr.Err |
|
921 |
- if sysErr, ok := netErr.Err.(*os.SyscallError); ok { |
|
922 |
- e = sysErr.Err |
|
923 |
- } |
|
924 |
- } |
|
925 |
- return e == syscall.EPIPE |
|
926 |
-} |
|
927 |
- |
|
928 | 898 |
// GraphDriverName returns the name of the graph driver used by the layer.Store |
929 | 899 |
func (daemon *Daemon) GraphDriverName() string { |
930 | 900 |
return daemon.layerStore.DriverName() |
... | ... |
@@ -956,7 +933,7 @@ func tempDir(rootDir string, rootUID, rootGID int) (string, error) { |
956 | 956 |
|
957 | 957 |
func (daemon *Daemon) setupInitLayer(initPath string) error { |
958 | 958 |
rootUID, rootGID := daemon.GetRemappedUIDGID() |
959 |
- return setupInitLayer(initPath, rootUID, rootGID) |
|
959 |
+ return initlayer.Setup(initPath, rootUID, rootGID) |
|
960 | 960 |
} |
961 | 961 |
|
962 | 962 |
func setDefaultMtu(config *Config) { |
... | ... |
@@ -1270,12 +1247,8 @@ func (daemon *Daemon) SetCluster(cluster Cluster) { |
1270 | 1270 |
daemon.cluster = cluster |
1271 | 1271 |
} |
1272 | 1272 |
|
1273 |
-func (daemon *Daemon) pluginInit(cfg *Config, remote libcontainerd.Remote) error { |
|
1274 |
- return plugin.Init(cfg.Root, daemon.PluginStore, remote, daemon.RegistryService, cfg.LiveRestoreEnabled, daemon.LogPluginEvent) |
|
1275 |
-} |
|
1276 |
- |
|
1277 | 1273 |
func (daemon *Daemon) pluginShutdown() { |
1278 |
- manager := plugin.GetManager() |
|
1274 |
+ manager := daemon.pluginManager |
|
1279 | 1275 |
// Check for a valid manager object. In error conditions, daemon init can fail |
1280 | 1276 |
// and shutdown called, before plugin manager is initialized. |
1281 | 1277 |
if manager != nil { |
... | ... |
@@ -1283,6 +1256,11 @@ func (daemon *Daemon) pluginShutdown() { |
1283 | 1283 |
} |
1284 | 1284 |
} |
1285 | 1285 |
|
1286 |
+// PluginManager returns current pluginManager associated with the daemon |
|
1287 |
+func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method |
|
1288 |
+ return daemon.pluginManager |
|
1289 |
+} |
|
1290 |
+ |
|
1286 | 1291 |
// CreateDaemonRoot creates the root for the daemon |
1287 | 1292 |
func CreateDaemonRoot(config *Config) error { |
1288 | 1293 |
// get the canonical path to the Docker root directory |
... | ... |
@@ -96,16 +96,6 @@ func (daemon *Daemon) getLayerInit() func(string) error { |
96 | 96 |
return nil |
97 | 97 |
} |
98 | 98 |
|
99 |
-// setupInitLayer populates a directory with mountpoints suitable |
|
100 |
-// for bind-mounting dockerinit into the container. The mountpoint is simply an |
|
101 |
-// empty file at /.dockerinit |
|
102 |
-// |
|
103 |
-// This extra layer is used by all containers as the top-most ro layer. It protects |
|
104 |
-// the container from unwanted side-effects on the rw layer. |
|
105 |
-func setupInitLayer(initLayer string, rootUID, rootGID int) error { |
|
106 |
- return nil |
|
107 |
-} |
|
108 |
- |
|
109 | 99 |
func checkKernel() error { |
110 | 100 |
// solaris can rely upon checkSystem() below, we don't skew kernel versions |
111 | 101 |
return nil |
... | ... |
@@ -858,63 +858,6 @@ func (daemon *Daemon) getLayerInit() func(string) error { |
858 | 858 |
return daemon.setupInitLayer |
859 | 859 |
} |
860 | 860 |
|
861 |
-// setupInitLayer populates a directory with mountpoints suitable |
|
862 |
-// for bind-mounting things into the container. |
|
863 |
-// |
|
864 |
-// This extra layer is used by all containers as the top-most ro layer. It protects |
|
865 |
-// the container from unwanted side-effects on the rw layer. |
|
866 |
-func setupInitLayer(initLayer string, rootUID, rootGID int) error { |
|
867 |
- for pth, typ := range map[string]string{ |
|
868 |
- "/dev/pts": "dir", |
|
869 |
- "/dev/shm": "dir", |
|
870 |
- "/proc": "dir", |
|
871 |
- "/sys": "dir", |
|
872 |
- "/.dockerenv": "file", |
|
873 |
- "/etc/resolv.conf": "file", |
|
874 |
- "/etc/hosts": "file", |
|
875 |
- "/etc/hostname": "file", |
|
876 |
- "/dev/console": "file", |
|
877 |
- "/etc/mtab": "/proc/mounts", |
|
878 |
- } { |
|
879 |
- parts := strings.Split(pth, "/") |
|
880 |
- prev := "/" |
|
881 |
- for _, p := range parts[1:] { |
|
882 |
- prev = filepath.Join(prev, p) |
|
883 |
- syscall.Unlink(filepath.Join(initLayer, prev)) |
|
884 |
- } |
|
885 |
- |
|
886 |
- if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { |
|
887 |
- if os.IsNotExist(err) { |
|
888 |
- if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { |
|
889 |
- return err |
|
890 |
- } |
|
891 |
- switch typ { |
|
892 |
- case "dir": |
|
893 |
- if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { |
|
894 |
- return err |
|
895 |
- } |
|
896 |
- case "file": |
|
897 |
- f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) |
|
898 |
- if err != nil { |
|
899 |
- return err |
|
900 |
- } |
|
901 |
- f.Chown(rootUID, rootGID) |
|
902 |
- f.Close() |
|
903 |
- default: |
|
904 |
- if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { |
|
905 |
- return err |
|
906 |
- } |
|
907 |
- } |
|
908 |
- } else { |
|
909 |
- return err |
|
910 |
- } |
|
911 |
- } |
|
912 |
- } |
|
913 |
- |
|
914 |
- // Layer is ready to use, if it wasn't before. |
|
915 |
- return nil |
|
916 |
-} |
|
917 |
- |
|
918 | 861 |
// Parse the remapped root (user namespace) option, which can be one of: |
919 | 862 |
// username - valid username from /etc/passwd |
920 | 863 |
// username:groupname - valid username; valid groupname from /etc/group |
... | ... |
@@ -61,10 +61,6 @@ func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.Thro |
61 | 61 |
return nil, nil |
62 | 62 |
} |
63 | 63 |
|
64 |
-func setupInitLayer(initLayer string, rootUID, rootGID int) error { |
|
65 |
- return nil |
|
66 |
-} |
|
67 |
- |
|
68 | 64 |
func (daemon *Daemon) getLayerInit() func(string) error { |
69 | 65 |
return nil |
70 | 66 |
} |
... | ... |
@@ -9,6 +9,7 @@ import ( |
9 | 9 |
"github.com/docker/docker/api/types" |
10 | 10 |
"github.com/docker/docker/builder" |
11 | 11 |
"github.com/docker/docker/distribution" |
12 |
+ progressutils "github.com/docker/docker/distribution/utils" |
|
12 | 13 |
"github.com/docker/docker/pkg/progress" |
13 | 14 |
"github.com/docker/docker/reference" |
14 | 15 |
"github.com/docker/docker/registry" |
... | ... |
@@ -84,7 +85,7 @@ func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference. |
84 | 84 |
ctx, cancelFunc := context.WithCancel(ctx) |
85 | 85 |
|
86 | 86 |
go func() { |
87 |
- writeDistributionProgress(cancelFunc, outStream, progressChan) |
|
87 |
+ progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) |
|
88 | 88 |
close(writesDone) |
89 | 89 |
}() |
90 | 90 |
|
... | ... |
@@ -6,6 +6,7 @@ import ( |
6 | 6 |
"github.com/docker/distribution/manifest/schema2" |
7 | 7 |
"github.com/docker/docker/api/types" |
8 | 8 |
"github.com/docker/docker/distribution" |
9 |
+ progressutils "github.com/docker/docker/distribution/utils" |
|
9 | 10 |
"github.com/docker/docker/pkg/progress" |
10 | 11 |
"github.com/docker/docker/reference" |
11 | 12 |
"golang.org/x/net/context" |
... | ... |
@@ -34,7 +35,7 @@ func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHead |
34 | 34 |
ctx, cancelFunc := context.WithCancel(ctx) |
35 | 35 |
|
36 | 36 |
go func() { |
37 |
- writeDistributionProgress(cancelFunc, outStream, progressChan) |
|
37 |
+ progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) |
|
38 | 38 |
close(writesDone) |
39 | 39 |
}() |
40 | 40 |
|
41 | 41 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,13 @@ |
0 |
+// +build solaris,cgo |
|
1 |
+ |
|
2 |
+package initlayer |
|
3 |
+ |
|
4 |
+// Setup populates a directory with mountpoints suitable |
|
5 |
+// for bind-mounting dockerinit into the container. The mountpoint is simply an |
|
6 |
+// empty file at /.dockerinit |
|
7 |
+// |
|
8 |
+// This extra layer is used by all containers as the top-most ro layer. It protects |
|
9 |
+// the container from unwanted side-effects on the rw layer. |
|
10 |
+func Setup(initLayer string, rootUID, rootGID int) error { |
|
11 |
+ return nil |
|
12 |
+} |
0 | 13 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,69 @@ |
0 |
+// +build linux freebsd |
|
1 |
+ |
|
2 |
+package initlayer |
|
3 |
+ |
|
4 |
+import ( |
|
5 |
+ "os" |
|
6 |
+ "path/filepath" |
|
7 |
+ "strings" |
|
8 |
+ "syscall" |
|
9 |
+ |
|
10 |
+ "github.com/docker/docker/pkg/idtools" |
|
11 |
+) |
|
12 |
+ |
|
13 |
+// Setup populates a directory with mountpoints suitable |
|
14 |
+// for bind-mounting things into the container. |
|
15 |
+// |
|
16 |
+// This extra layer is used by all containers as the top-most ro layer. It protects |
|
17 |
+// the container from unwanted side-effects on the rw layer. |
|
18 |
+func Setup(initLayer string, rootUID, rootGID int) error { |
|
19 |
+ for pth, typ := range map[string]string{ |
|
20 |
+ "/dev/pts": "dir", |
|
21 |
+ "/dev/shm": "dir", |
|
22 |
+ "/proc": "dir", |
|
23 |
+ "/sys": "dir", |
|
24 |
+ "/.dockerenv": "file", |
|
25 |
+ "/etc/resolv.conf": "file", |
|
26 |
+ "/etc/hosts": "file", |
|
27 |
+ "/etc/hostname": "file", |
|
28 |
+ "/dev/console": "file", |
|
29 |
+ "/etc/mtab": "/proc/mounts", |
|
30 |
+ } { |
|
31 |
+ parts := strings.Split(pth, "/") |
|
32 |
+ prev := "/" |
|
33 |
+ for _, p := range parts[1:] { |
|
34 |
+ prev = filepath.Join(prev, p) |
|
35 |
+ syscall.Unlink(filepath.Join(initLayer, prev)) |
|
36 |
+ } |
|
37 |
+ |
|
38 |
+ if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { |
|
39 |
+ if os.IsNotExist(err) { |
|
40 |
+ if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { |
|
41 |
+ return err |
|
42 |
+ } |
|
43 |
+ switch typ { |
|
44 |
+ case "dir": |
|
45 |
+ if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { |
|
46 |
+ return err |
|
47 |
+ } |
|
48 |
+ case "file": |
|
49 |
+ f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) |
|
50 |
+ if err != nil { |
|
51 |
+ return err |
|
52 |
+ } |
|
53 |
+ f.Chown(rootUID, rootGID) |
|
54 |
+ f.Close() |
|
55 |
+ default: |
|
56 |
+ if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { |
|
57 |
+ return err |
|
58 |
+ } |
|
59 |
+ } |
|
60 |
+ } else { |
|
61 |
+ return err |
|
62 |
+ } |
|
63 |
+ } |
|
64 |
+ } |
|
65 |
+ |
|
66 |
+ // Layer is ready to use, if it wasn't before. |
|
67 |
+ return nil |
|
68 |
+} |
0 | 69 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,13 @@ |
0 |
+// +build windows |
|
1 |
+ |
|
2 |
+package initlayer |
|
3 |
+ |
|
4 |
+// Setup populates a directory with mountpoints suitable |
|
5 |
+// for bind-mounting dockerinit into the container. The mountpoint is simply an |
|
6 |
+// empty file at /.dockerinit |
|
7 |
+// |
|
8 |
+// This extra layer is used by all containers as the top-most ro layer. It protects |
|
9 |
+// the container from unwanted side-effects on the rw layer. |
|
10 |
+func Setup(initLayer string, rootUID, rootGID int) error { |
|
11 |
+ return nil |
|
12 |
+} |
... | ... |
@@ -3,6 +3,7 @@ package metadata |
3 | 3 |
import ( |
4 | 4 |
"github.com/docker/docker/image/v1" |
5 | 5 |
"github.com/docker/docker/layer" |
6 |
+ "github.com/pkg/errors" |
|
6 | 7 |
) |
7 | 8 |
|
8 | 9 |
// V1IDService maps v1 IDs to layers on disk. |
... | ... |
@@ -24,6 +25,9 @@ func (idserv *V1IDService) namespace() string { |
24 | 24 |
|
25 | 25 |
// Get finds a layer by its V1 ID. |
26 | 26 |
func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { |
27 |
+ if idserv.store == nil { |
|
28 |
+ return "", errors.New("no v1IDService storage") |
|
29 |
+ } |
|
27 | 30 |
if err := v1.ValidateID(v1ID); err != nil { |
28 | 31 |
return layer.DiffID(""), err |
29 | 32 |
} |
... | ... |
@@ -37,6 +41,9 @@ func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { |
37 | 37 |
|
38 | 38 |
// Set associates an image with a V1 ID. |
39 | 39 |
func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { |
40 |
+ if idserv.store == nil { |
|
41 |
+ return nil |
|
42 |
+ } |
|
40 | 43 |
if err := v1.ValidateID(v1ID); err != nil { |
41 | 44 |
return err |
42 | 45 |
} |
... | ... |
@@ -5,6 +5,7 @@ import ( |
5 | 5 |
"crypto/sha256" |
6 | 6 |
"encoding/hex" |
7 | 7 |
"encoding/json" |
8 |
+ "errors" |
|
8 | 9 |
|
9 | 10 |
"github.com/docker/distribution/digest" |
10 | 11 |
"github.com/docker/docker/api/types" |
... | ... |
@@ -125,6 +126,9 @@ func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { |
125 | 125 |
|
126 | 126 |
// GetMetadata finds the metadata associated with a layer DiffID. |
127 | 127 |
func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { |
128 |
+ if serv.store == nil { |
|
129 |
+ return nil, errors.New("no metadata storage") |
|
130 |
+ } |
|
128 | 131 |
jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) |
129 | 132 |
if err != nil { |
130 | 133 |
return nil, err |
... | ... |
@@ -140,6 +144,9 @@ func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, e |
140 | 140 |
|
141 | 141 |
// GetDiffID finds a layer DiffID from a digest. |
142 | 142 |
func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { |
143 |
+ if serv.store == nil { |
|
144 |
+ return layer.DiffID(""), errors.New("no metadata storage") |
|
145 |
+ } |
|
143 | 146 |
diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) |
144 | 147 |
if err != nil { |
145 | 148 |
return layer.DiffID(""), err |
... | ... |
@@ -151,6 +158,12 @@ func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, erro |
151 | 151 |
// Add associates metadata with a layer DiffID. If too many metadata entries are |
152 | 152 |
// present, the oldest one is dropped. |
153 | 153 |
func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { |
154 |
+ if serv.store == nil { |
|
155 |
+ // Support a service which has no backend storage, in this case |
|
156 |
+ // an add becomes a no-op. |
|
157 |
+ // TODO: implement in memory storage |
|
158 |
+ return nil |
|
159 |
+ } |
|
154 | 160 |
oldMetadata, err := serv.GetMetadata(diffID) |
155 | 161 |
if err != nil { |
156 | 162 |
oldMetadata = nil |
... | ... |
@@ -192,6 +205,12 @@ func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, me |
192 | 192 |
|
193 | 193 |
// Remove unassociates a metadata entry from a layer DiffID. |
194 | 194 |
func (serv *v2MetadataService) Remove(metadata V2Metadata) error { |
195 |
+ if serv.store == nil { |
|
196 |
+ // Support a service which has no backend storage, in this case |
|
197 |
+ // an remove becomes a no-op. |
|
198 |
+ // TODO: implement in memory storage |
|
199 |
+ return nil |
|
200 |
+ } |
|
195 | 201 |
diffID, err := serv.GetDiffID(metadata.Digest) |
196 | 202 |
if err != nil { |
197 | 203 |
return err |
... | ... |
@@ -102,11 +102,7 @@ func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, end |
102 | 102 |
scope := auth.RepositoryScope{ |
103 | 103 |
Repository: repoName, |
104 | 104 |
Actions: actions, |
105 |
- } |
|
106 |
- |
|
107 |
- // Keep image repositories blank for scope compatibility |
|
108 |
- if repoInfo.Class != "image" { |
|
109 |
- scope.Class = repoInfo.Class |
|
105 |
+ Class: repoInfo.Class, |
|
110 | 106 |
} |
111 | 107 |
|
112 | 108 |
creds := registry.NewStaticCredentialStore(authConfig) |
113 | 109 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,44 @@ |
0 |
+package utils |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "io" |
|
4 |
+ "net" |
|
5 |
+ "os" |
|
6 |
+ "syscall" |
|
7 |
+ |
|
8 |
+ "github.com/Sirupsen/logrus" |
|
9 |
+ "github.com/docker/docker/pkg/progress" |
|
10 |
+ "github.com/docker/docker/pkg/streamformatter" |
|
11 |
+) |
|
12 |
+ |
|
13 |
+// WriteDistributionProgress is a helper for writing progress from chan to JSON |
|
14 |
+// stream with an optional cancel function. |
|
15 |
+func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { |
|
16 |
+ progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) |
|
17 |
+ operationCancelled := false |
|
18 |
+ |
|
19 |
+ for prog := range progressChan { |
|
20 |
+ if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { |
|
21 |
+ // don't log broken pipe errors as this is the normal case when a client aborts |
|
22 |
+ if isBrokenPipe(err) { |
|
23 |
+ logrus.Info("Pull session cancelled") |
|
24 |
+ } else { |
|
25 |
+ logrus.Errorf("error writing progress to client: %v", err) |
|
26 |
+ } |
|
27 |
+ cancelFunc() |
|
28 |
+ operationCancelled = true |
|
29 |
+ // Don't return, because we need to continue draining |
|
30 |
+ // progressChan until it's closed to avoid a deadlock. |
|
31 |
+ } |
|
32 |
+ } |
|
33 |
+} |
|
34 |
+ |
|
35 |
+func isBrokenPipe(e error) bool { |
|
36 |
+ if netErr, ok := e.(*net.OpError); ok { |
|
37 |
+ e = netErr.Err |
|
38 |
+ if sysErr, ok := netErr.Err.(*os.SyscallError); ok { |
|
39 |
+ e = sysErr.Err |
|
40 |
+ } |
|
41 |
+ } |
|
42 |
+ return e == syscall.EPIPE |
|
43 |
+} |
... | ... |
@@ -109,93 +109,6 @@ commands and options, see the |
109 | 109 |
|
110 | 110 |
## Developing a plugin |
111 | 111 |
|
112 |
-Currently, there are no CLI commands available to help you develop a plugin. |
|
113 |
-This is expected to change in a future release. The manual process for creating |
|
114 |
-plugins is described in this section. |
|
115 |
- |
|
116 |
-### Plugin location and files |
|
117 |
- |
|
118 |
-Plugins are stored in `/var/lib/docker/plugins`. The `plugins.json` file lists |
|
119 |
-each plugin's configuration, and each plugin is stored in a directory with a |
|
120 |
-unique identifier. |
|
121 |
- |
|
122 |
-```bash |
|
123 |
-# ls -la /var/lib/docker/plugins |
|
124 |
-total 20 |
|
125 |
-drwx------ 4 root root 4096 Aug 8 18:03 . |
|
126 |
-drwx--x--x 12 root root 4096 Aug 8 17:53 .. |
|
127 |
-drwxr-xr-x 3 root root 4096 Aug 8 17:56 cd851ce43a403 |
|
128 |
--rw------- 1 root root 2107 Aug 8 18:03 plugins.json |
|
129 |
-``` |
|
130 |
- |
|
131 |
-### Format of plugins.json |
|
132 |
- |
|
133 |
-The `plugins.json` is an inventory of all installed plugins. This example shows |
|
134 |
-a `plugins.json` with a single plugin installed. |
|
135 |
- |
|
136 |
-```json |
|
137 |
-# cat plugins.json |
|
138 |
-{ |
|
139 |
- "cd851ce43a403": { |
|
140 |
- "plugin": { |
|
141 |
- "Config": { |
|
142 |
- "Args": { |
|
143 |
- "Value": null, |
|
144 |
- "Settable": null, |
|
145 |
- "Description": "", |
|
146 |
- "Name": "" |
|
147 |
- }, |
|
148 |
- "Env": null, |
|
149 |
- "Devices": null, |
|
150 |
- "Mounts": null, |
|
151 |
- "Capabilities": [ |
|
152 |
- "CAP_SYS_ADMIN" |
|
153 |
- ], |
|
154 |
- "Description": "sshFS plugin for Docker", |
|
155 |
- "Documentation": "https://docs.docker.com/engine/extend/plugins/", |
|
156 |
- "Interface": { |
|
157 |
- "Socket": "sshfs.sock", |
|
158 |
- "Types": [ |
|
159 |
- "docker.volumedriver/1.0" |
|
160 |
- ] |
|
161 |
- }, |
|
162 |
- "Entrypoint": [ |
|
163 |
- "/go/bin/docker-volume-sshfs" |
|
164 |
- ], |
|
165 |
- "Workdir": "", |
|
166 |
- "User": {}, |
|
167 |
- "Network": { |
|
168 |
- "Type": "host" |
|
169 |
- } |
|
170 |
- }, |
|
171 |
- "Config": { |
|
172 |
- "Devices": null, |
|
173 |
- "Args": null, |
|
174 |
- "Env": [], |
|
175 |
- "Mounts": [] |
|
176 |
- }, |
|
177 |
- "Active": true, |
|
178 |
- "Tag": "latest", |
|
179 |
- "Name": "vieux/sshfs", |
|
180 |
- "Id": "cd851ce43a403" |
|
181 |
- } |
|
182 |
- } |
|
183 |
-} |
|
184 |
-``` |
|
185 |
- |
|
186 |
-### Contents of a plugin directory |
|
187 |
- |
|
188 |
-Each directory within `/var/lib/docker/plugins/` contains a `rootfs` directory |
|
189 |
-and two JSON files. |
|
190 |
- |
|
191 |
-```bash |
|
192 |
-# ls -la /var/lib/docker/plugins/cd851ce43a403 |
|
193 |
-total 12 |
|
194 |
-drwx------ 19 root root 4096 Aug 8 17:56 rootfs |
|
195 |
--rw-r--r-- 1 root root 50 Aug 8 17:56 plugin-settings.json |
|
196 |
--rw------- 1 root root 347 Aug 8 17:56 config.json |
|
197 |
-``` |
|
198 |
- |
|
199 | 112 |
#### The rootfs directory |
200 | 113 |
The `rootfs` directory represents the root filesystem of the plugin. In this |
201 | 114 |
example, it was created from a Dockerfile: |
... | ... |
@@ -206,20 +119,17 @@ plugin's filesystem for docker to communicate with the plugin. |
206 | 206 |
```bash |
207 | 207 |
$ git clone https://github.com/vieux/docker-volume-sshfs |
208 | 208 |
$ cd docker-volume-sshfs |
209 |
-$ docker build -t rootfs . |
|
210 |
-$ id=$(docker create rootfs true) # id was cd851ce43a403 when the image was created |
|
211 |
-$ sudo mkdir -p /var/lib/docker/plugins/$id/rootfs |
|
212 |
-$ sudo docker export "$id" | sudo tar -x -C /var/lib/docker/plugins/$id/rootfs |
|
213 |
-$ sudo chgrp -R docker /var/lib/docker/plugins/ |
|
209 |
+$ docker build -t rootfsimage . |
|
210 |
+$ id=$(docker create rootfsimage true) # id was cd851ce43a403 when the image was created |
|
211 |
+$ sudo mkdir -p myplugin/rootfs |
|
212 |
+$ sudo docker export "$id" | sudo tar -x -C myplugin/rootfs |
|
214 | 213 |
$ docker rm -vf "$id" |
215 |
-$ docker rmi rootfs |
|
214 |
+$ docker rmi rootfsimage |
|
216 | 215 |
``` |
217 | 216 |
|
218 |
-#### The config.json and plugin-settings.json files |
|
217 |
+#### The config.json file |
|
219 | 218 |
|
220 |
-The `config.json` file describes the plugin. The `plugin-settings.json` file |
|
221 |
-contains runtime parameters and is only required if your plugin has runtime |
|
222 |
-parameters. [See the Plugins Config reference](config.md). |
|
219 |
+The `config.json` file describes the plugin. See the [plugins config reference](config.md). |
|
223 | 220 |
|
224 | 221 |
Consider the following `config.json` file. |
225 | 222 |
|
... | ... |
@@ -242,56 +152,15 @@ Consider the following `config.json` file. |
242 | 242 |
This plugin is a volume driver. It requires a `host` network and the |
243 | 243 |
`CAP_SYS_ADMIN` capability. It depends upon the `/go/bin/docker-volume-sshfs` |
244 | 244 |
entrypoint and uses the `/run/docker/plugins/sshfs.sock` socket to communicate |
245 |
-with Docker Engine. |
|
246 |
- |
|
247 |
- |
|
248 |
-Consider the following `plugin-settings.json` file. |
|
249 |
- |
|
250 |
-```json |
|
251 |
-{ |
|
252 |
- "Devices": null, |
|
253 |
- "Args": null, |
|
254 |
- "Env": [], |
|
255 |
- "Mounts": [] |
|
256 |
-} |
|
257 |
-``` |
|
258 |
- |
|
259 |
-This plugin has no runtime parameters. |
|
260 |
- |
|
261 |
-Each of these JSON files is included as part of `plugins.json`, as you can see |
|
262 |
-by looking back at the example above. After a plugin is installed, `config.json` |
|
263 |
-is read-only, but `plugin-settings.json` is read-write, and includes all runtime |
|
264 |
-configuration options for the plugin. |
|
245 |
+with Docker Engine. This plugin has no runtime parameters. |
|
265 | 246 |
|
266 | 247 |
### Creating the plugin |
267 | 248 |
|
268 |
-Follow these steps to create a plugin: |
|
269 |
- |
|
270 |
-1. Choose a name for the plugin. Plugin name uses the same format as images, |
|
271 |
- for example: `<repo_name>/<name>`. |
|
272 |
- |
|
273 |
-2. Create a `rootfs` and export it to `/var/lib/docker/plugins/$id/rootfs` |
|
274 |
- using `docker export`. See [The rootfs directory](#the-rootfs-directory) for |
|
275 |
- an example of creating a `rootfs`. |
|
276 |
- |
|
277 |
-3. Create a `config.json` file in `/var/lib/docker/plugins/$id/`. |
|
278 |
- |
|
279 |
-4. Create a `plugin-settings.json` file if needed. |
|
280 |
- |
|
281 |
-5. Create or add a section to `/var/lib/docker/plugins/plugins.json`. Use |
|
282 |
- `<user>/<name>` as “Name” and `$id` as “Id”. |
|
283 |
- |
|
284 |
-6. Restart the Docker Engine service. |
|
285 |
- |
|
286 |
-7. Run `docker plugin ls`. |
|
287 |
- * If your plugin is enabled, you can push it to the |
|
288 |
- registry. |
|
289 |
- * If the plugin is not listed or is disabled, something went wrong. |
|
290 |
- Check the daemon logs for errors. |
|
291 |
- |
|
292 |
-8. If you are not already logged in, use `docker login` to authenticate against |
|
293 |
- the registry so that you can push to it. |
|
294 |
- |
|
295 |
-9. Run `docker plugin push <repo_name>/<name>` to push the plugin. |
|
249 |
+A new plugin can be created by running |
|
250 |
+`docker plugin create <plugin-name> ./path/to/plugin/data` where the plugin |
|
251 |
+data contains a plugin configuration file `config.json` and a root filesystem |
|
252 |
+in subdirectory `rootfs`. |
|
296 | 253 |
|
297 |
-The plugin can now be used by any user with access to your registry. |
|
254 |
+After that the plugin `<plugin-name>` will show up in `docker plugin ls`. |
|
255 |
+Plugins can be pushed to remote registries with |
|
256 |
+`docker plugin push <plugin-name>`. |
|
298 | 257 |
\ No newline at end of file |
... | ... |
@@ -16,9 +16,9 @@ keywords: "plugin, create" |
16 | 16 |
# plugin create |
17 | 17 |
|
18 | 18 |
```markdown |
19 |
-Usage: docker plugin create [OPTIONS] PLUGIN[:tag] PATH-TO-ROOTFS(rootfs + config.json) |
|
19 |
+Usage: docker plugin create [OPTIONS] PLUGIN PLUGIN-DATA-DIR |
|
20 | 20 |
|
21 |
-Create a plugin from a rootfs and configuration |
|
21 |
+Create a plugin from a rootfs and configuration. Plugin data directory must contain config.json and rootfs directory. |
|
22 | 22 |
|
23 | 23 |
Options: |
24 | 24 |
--compress Compress the context using gzip |
... | ... |
@@ -36,8 +36,7 @@ $ docker plugin inspect tiborvass/no-remove:latest |
36 | 36 |
```JSON |
37 | 37 |
{ |
38 | 38 |
"Id": "8c74c978c434745c3ade82f1bc0acf38d04990eaf494fa507c16d9f1daa99c21", |
39 |
- "Name": "tiborvass/no-remove", |
|
40 |
- "Tag": "latest", |
|
39 |
+ "Name": "tiborvass/no-remove:latest", |
|
41 | 40 |
"Enabled": true, |
42 | 41 |
"Config": { |
43 | 42 |
"Mounts": [ |
... | ... |
@@ -21,6 +21,7 @@ Usage: docker plugin install [OPTIONS] PLUGIN [KEY=VALUE...] |
21 | 21 |
Install a plugin |
22 | 22 |
|
23 | 23 |
Options: |
24 |
+ --alias string Local name for plugin |
|
24 | 25 |
--disable Do not enable the plugin on install |
25 | 26 |
--grant-all-permissions Grant all permissions necessary to run the plugin |
26 | 27 |
--help Print usage |
... | ... |
@@ -12,10 +12,10 @@ import ( |
12 | 12 |
) |
13 | 13 |
|
14 | 14 |
var ( |
15 |
- authzPluginName = "riyaz/authz-no-volume-plugin" |
|
15 |
+ authzPluginName = "tonistiigi/authz-no-volume-plugin" |
|
16 | 16 |
authzPluginTag = "latest" |
17 | 17 |
authzPluginNameWithTag = authzPluginName + ":" + authzPluginTag |
18 |
- authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest" |
|
18 |
+ authzPluginBadManifestName = "tonistiigi/authz-plugin-bad-manifest" |
|
19 | 19 |
nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin" |
20 | 20 |
) |
21 | 21 |
|
... | ... |
@@ -425,20 +425,20 @@ func (s *DockerSuite) TestInspectPlugin(c *check.C) { |
425 | 425 |
|
426 | 426 |
out, _, err := dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) |
427 | 427 |
c.Assert(err, checker.IsNil) |
428 |
- c.Assert(strings.TrimSpace(out), checker.Equals, pName) |
|
428 |
+ c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) |
|
429 | 429 |
|
430 | 430 |
out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) |
431 | 431 |
c.Assert(err, checker.IsNil) |
432 |
- c.Assert(strings.TrimSpace(out), checker.Equals, pName) |
|
432 |
+ c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) |
|
433 | 433 |
|
434 | 434 |
// Even without tag the inspect still work |
435 |
- out, _, err = dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pName) |
|
435 |
+ out, _, err = dockerCmdWithError("inspect", "--type", "plugin", "--format", "{{.Name}}", pNameWithTag) |
|
436 | 436 |
c.Assert(err, checker.IsNil) |
437 |
- c.Assert(strings.TrimSpace(out), checker.Equals, pName) |
|
437 |
+ c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) |
|
438 | 438 |
|
439 |
- out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pName) |
|
439 |
+ out, _, err = dockerCmdWithError("inspect", "--format", "{{.Name}}", pNameWithTag) |
|
440 | 440 |
c.Assert(err, checker.IsNil) |
441 |
- c.Assert(strings.TrimSpace(out), checker.Equals, pName) |
|
441 |
+ c.Assert(strings.TrimSpace(out), checker.Equals, pNameWithTag) |
|
442 | 442 |
|
443 | 443 |
_, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) |
444 | 444 |
c.Assert(err, checker.IsNil) |
... | ... |
@@ -777,7 +777,7 @@ func (s *DockerNetworkSuite) TestDockerPluginV2NetworkDriver(c *check.C) { |
777 | 777 |
testRequires(c, DaemonIsLinux, IsAmd64, Network) |
778 | 778 |
|
779 | 779 |
var ( |
780 |
- npName = "tiborvass/test-docker-netplugin" |
|
780 |
+ npName = "tonistiigi/test-docker-netplugin" |
|
781 | 781 |
npTag = "latest" |
782 | 782 |
npNameWithTag = npName + ":" + npTag |
783 | 783 |
) |
... | ... |
@@ -1,6 +1,8 @@ |
1 | 1 |
package main |
2 | 2 |
|
3 | 3 |
import ( |
4 |
+ "fmt" |
|
5 |
+ |
|
4 | 6 |
"github.com/docker/docker/pkg/integration/checker" |
5 | 7 |
"github.com/go-check/check" |
6 | 8 |
|
... | ... |
@@ -12,7 +14,7 @@ import ( |
12 | 12 |
|
13 | 13 |
var ( |
14 | 14 |
pluginProcessName = "sample-volume-plugin" |
15 |
- pName = "tiborvass/sample-volume-plugin" |
|
15 |
+ pName = "tonistiigi/sample-volume-plugin" |
|
16 | 16 |
pTag = "latest" |
17 | 17 |
pNameWithTag = pName + ":" + pTag |
18 | 18 |
) |
... | ... |
@@ -139,11 +141,18 @@ func (s *DockerSuite) TestPluginInstallArgs(c *check.C) { |
139 | 139 |
c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") |
140 | 140 |
} |
141 | 141 |
|
142 |
-func (s *DockerSuite) TestPluginInstallImage(c *check.C) { |
|
143 |
- testRequires(c, DaemonIsLinux, IsAmd64, Network) |
|
144 |
- out, _, err := dockerCmdWithError("plugin", "install", "redis") |
|
142 |
+func (s *DockerRegistrySuite) TestPluginInstallImage(c *check.C) { |
|
143 |
+ testRequires(c, DaemonIsLinux, IsAmd64) |
|
144 |
+ |
|
145 |
+ repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) |
|
146 |
+ // tag the image to upload it to the private registry |
|
147 |
+ dockerCmd(c, "tag", "busybox", repoName) |
|
148 |
+ // push the image to the registry |
|
149 |
+ dockerCmd(c, "push", repoName) |
|
150 |
+ |
|
151 |
+ out, _, err := dockerCmdWithError("plugin", "install", repoName) |
|
145 | 152 |
c.Assert(err, checker.NotNil) |
146 |
- c.Assert(out, checker.Contains, "content is not a plugin") |
|
153 |
+ c.Assert(out, checker.Contains, "target is image") |
|
147 | 154 |
} |
148 | 155 |
|
149 | 156 |
func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) { |
... | ... |
@@ -179,6 +188,9 @@ func (s *DockerSuite) TestPluginCreate(c *check.C) { |
179 | 179 |
err = ioutil.WriteFile(filepath.Join(temp, "config.json"), []byte(data), 0644) |
180 | 180 |
c.Assert(err, checker.IsNil) |
181 | 181 |
|
182 |
+ err = os.MkdirAll(filepath.Join(temp, "rootfs"), 0700) |
|
183 |
+ c.Assert(err, checker.IsNil) |
|
184 |
+ |
|
182 | 185 |
out, _, err := dockerCmdWithError("plugin", "create", name, temp) |
183 | 186 |
c.Assert(err, checker.IsNil) |
184 | 187 |
c.Assert(out, checker.Contains, name) |
... | ... |
@@ -31,7 +31,7 @@ import ( |
31 | 31 |
icmd "github.com/docker/docker/pkg/integration/cmd" |
32 | 32 |
"github.com/docker/docker/pkg/ioutils" |
33 | 33 |
"github.com/docker/docker/pkg/stringutils" |
34 |
- "github.com/docker/go-units" |
|
34 |
+ units "github.com/docker/go-units" |
|
35 | 35 |
"github.com/go-check/check" |
36 | 36 |
) |
37 | 37 |
|
... | ... |
@@ -250,11 +250,7 @@ func deleteAllPlugins(c *check.C) { |
250 | 250 |
var errs []string |
251 | 251 |
for _, p := range plugins { |
252 | 252 |
pluginName := p.Name |
253 |
- tag := p.Tag |
|
254 |
- if tag == "" { |
|
255 |
- tag = "latest" |
|
256 |
- } |
|
257 |
- status, b, err := sockRequest("DELETE", "/plugins/"+pluginName+":"+tag+"?force=1", nil) |
|
253 |
+ status, b, err := sockRequest("DELETE", "/plugins/"+pluginName+"?force=1", nil) |
|
258 | 254 |
if err != nil { |
259 | 255 |
errs = append(errs, err.Error()) |
260 | 256 |
continue |
... | ... |
@@ -44,6 +44,17 @@ func ChanOutput(progressChan chan<- Progress) Output { |
44 | 44 |
return chanOutput(progressChan) |
45 | 45 |
} |
46 | 46 |
|
47 |
+type discardOutput struct{} |
|
48 |
+ |
|
49 |
+func (discardOutput) WriteProgress(Progress) error { |
|
50 |
+ return nil |
|
51 |
+} |
|
52 |
+ |
|
53 |
+// DiscardOutput returns an Output that discards progress |
|
54 |
+func DiscardOutput() Output { |
|
55 |
+ return discardOutput{} |
|
56 |
+} |
|
57 |
+ |
|
47 | 58 |
// Update is a convenience function to write a progress update to the channel. |
48 | 59 |
func Update(out Output, id, action string) { |
49 | 60 |
out.WriteProgress(Progress{ID: id, Action: action}) |
... | ... |
@@ -3,37 +3,39 @@ |
3 | 3 |
package plugin |
4 | 4 |
|
5 | 5 |
import ( |
6 |
- "bytes" |
|
6 |
+ "archive/tar" |
|
7 |
+ "compress/gzip" |
|
7 | 8 |
"encoding/json" |
8 | 9 |
"fmt" |
9 | 10 |
"io" |
10 | 11 |
"io/ioutil" |
11 | 12 |
"net/http" |
12 | 13 |
"os" |
14 |
+ "path" |
|
13 | 15 |
"path/filepath" |
14 |
- "reflect" |
|
15 |
- "regexp" |
|
16 |
+ "strings" |
|
16 | 17 |
|
17 | 18 |
"github.com/Sirupsen/logrus" |
19 |
+ "github.com/docker/distribution/digest" |
|
20 |
+ "github.com/docker/distribution/manifest/schema2" |
|
18 | 21 |
"github.com/docker/docker/api/types" |
19 |
- "github.com/docker/docker/pkg/archive" |
|
22 |
+ "github.com/docker/docker/distribution" |
|
23 |
+ progressutils "github.com/docker/docker/distribution/utils" |
|
24 |
+ "github.com/docker/docker/distribution/xfer" |
|
25 |
+ "github.com/docker/docker/image" |
|
26 |
+ "github.com/docker/docker/layer" |
|
20 | 27 |
"github.com/docker/docker/pkg/chrootarchive" |
21 |
- "github.com/docker/docker/pkg/stringid" |
|
22 |
- "github.com/docker/docker/plugin/distribution" |
|
28 |
+ "github.com/docker/docker/pkg/pools" |
|
29 |
+ "github.com/docker/docker/pkg/progress" |
|
23 | 30 |
"github.com/docker/docker/plugin/v2" |
24 | 31 |
"github.com/docker/docker/reference" |
25 | 32 |
"github.com/pkg/errors" |
26 | 33 |
"golang.org/x/net/context" |
27 | 34 |
) |
28 | 35 |
|
29 |
-var ( |
|
30 |
- validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`) |
|
31 |
- validPartialID = regexp.MustCompile(`^([a-f0-9]{1,64})$`) |
|
32 |
-) |
|
33 |
- |
|
34 | 36 |
// Disable deactivates a plugin. This means resources (volumes, networks) cant use them. |
35 |
-func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error { |
|
36 |
- p, err := pm.pluginStore.GetByName(name) |
|
37 |
+func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error { |
|
38 |
+ p, err := pm.config.Store.GetV2Plugin(refOrID) |
|
37 | 39 |
if err != nil { |
38 | 40 |
return err |
39 | 41 |
} |
... | ... |
@@ -48,13 +50,13 @@ func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error |
48 | 48 |
if err := pm.disable(p, c); err != nil { |
49 | 49 |
return err |
50 | 50 |
} |
51 |
- pm.pluginEventLogger(p.GetID(), name, "disable") |
|
51 |
+ pm.config.LogPluginEvent(p.GetID(), refOrID, "disable") |
|
52 | 52 |
return nil |
53 | 53 |
} |
54 | 54 |
|
55 | 55 |
// Enable activates a plugin, which implies that they are ready to be used by containers. |
56 |
-func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error { |
|
57 |
- p, err := pm.pluginStore.GetByName(name) |
|
56 |
+func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error { |
|
57 |
+ p, err := pm.config.Store.GetV2Plugin(refOrID) |
|
58 | 58 |
if err != nil { |
59 | 59 |
return err |
60 | 60 |
} |
... | ... |
@@ -63,71 +65,74 @@ func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error { |
63 | 63 |
if err := pm.enable(p, c, false); err != nil { |
64 | 64 |
return err |
65 | 65 |
} |
66 |
- pm.pluginEventLogger(p.GetID(), name, "enable") |
|
66 |
+ pm.config.LogPluginEvent(p.GetID(), refOrID, "enable") |
|
67 | 67 |
return nil |
68 | 68 |
} |
69 | 69 |
|
70 | 70 |
// Inspect examines a plugin config |
71 |
-func (pm *Manager) Inspect(refOrID string) (tp types.Plugin, err error) { |
|
72 |
- // Match on full ID |
|
73 |
- if validFullID.MatchString(refOrID) { |
|
74 |
- p, err := pm.pluginStore.GetByID(refOrID) |
|
75 |
- if err == nil { |
|
76 |
- return p.PluginObj, nil |
|
77 |
- } |
|
71 |
+func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { |
|
72 |
+ p, err := pm.config.Store.GetV2Plugin(refOrID) |
|
73 |
+ if err != nil { |
|
74 |
+ return nil, err |
|
78 | 75 |
} |
79 | 76 |
|
80 |
- // Match on full name |
|
81 |
- if pluginName, err := getPluginName(refOrID); err == nil { |
|
82 |
- if p, err := pm.pluginStore.GetByName(pluginName); err == nil { |
|
83 |
- return p.PluginObj, nil |
|
84 |
- } |
|
85 |
- } |
|
77 |
+ return &p.PluginObj, nil |
|
78 |
+} |
|
86 | 79 |
|
87 |
- // Match on partial ID |
|
88 |
- if validPartialID.MatchString(refOrID) { |
|
89 |
- p, err := pm.pluginStore.Search(refOrID) |
|
90 |
- if err == nil { |
|
91 |
- return p.PluginObj, nil |
|
92 |
- } |
|
93 |
- return tp, err |
|
80 |
+func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error { |
|
81 |
+ if outStream != nil { |
|
82 |
+ // Include a buffer so that slow client connections don't affect |
|
83 |
+ // transfer performance. |
|
84 |
+ progressChan := make(chan progress.Progress, 100) |
|
85 |
+ |
|
86 |
+ writesDone := make(chan struct{}) |
|
87 |
+ |
|
88 |
+ defer func() { |
|
89 |
+ close(progressChan) |
|
90 |
+ <-writesDone |
|
91 |
+ }() |
|
92 |
+ |
|
93 |
+ var cancelFunc context.CancelFunc |
|
94 |
+ ctx, cancelFunc = context.WithCancel(ctx) |
|
95 |
+ |
|
96 |
+ go func() { |
|
97 |
+ progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) |
|
98 |
+ close(writesDone) |
|
99 |
+ }() |
|
100 |
+ |
|
101 |
+ config.ProgressOutput = progress.ChanOutput(progressChan) |
|
102 |
+ } else { |
|
103 |
+ config.ProgressOutput = progress.DiscardOutput() |
|
94 | 104 |
} |
105 |
+ return distribution.Pull(ctx, ref, config) |
|
106 |
+} |
|
95 | 107 |
|
96 |
- return tp, fmt.Errorf("no such plugin name or ID associated with %q", refOrID) |
|
108 |
+type tempConfigStore struct { |
|
109 |
+ config []byte |
|
110 |
+ configDigest digest.Digest |
|
97 | 111 |
} |
98 | 112 |
|
99 |
-func (pm *Manager) pull(name string, metaHeader http.Header, authConfig *types.AuthConfig) (reference.Named, distribution.PullData, error) { |
|
100 |
- ref, err := distribution.GetRef(name) |
|
101 |
- if err != nil { |
|
102 |
- logrus.Debugf("error in distribution.GetRef: %v", err) |
|
103 |
- return nil, nil, err |
|
104 |
- } |
|
105 |
- name = ref.String() |
|
113 |
+func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) { |
|
114 |
+ dgst := digest.FromBytes(c) |
|
106 | 115 |
|
107 |
- if p, _ := pm.pluginStore.GetByName(name); p != nil { |
|
108 |
- logrus.Debug("plugin already exists") |
|
109 |
- return nil, nil, fmt.Errorf("%s exists", name) |
|
110 |
- } |
|
116 |
+ s.config = c |
|
117 |
+ s.configDigest = dgst |
|
111 | 118 |
|
112 |
- pd, err := distribution.Pull(ref, pm.registryService, metaHeader, authConfig) |
|
113 |
- if err != nil { |
|
114 |
- logrus.Debugf("error in distribution.Pull(): %v", err) |
|
115 |
- return nil, nil, err |
|
116 |
- } |
|
117 |
- return ref, pd, nil |
|
119 |
+ return dgst, nil |
|
118 | 120 |
} |
119 | 121 |
|
120 |
-func computePrivileges(pd distribution.PullData) (types.PluginPrivileges, error) { |
|
121 |
- config, err := pd.Config() |
|
122 |
- if err != nil { |
|
123 |
- return nil, err |
|
122 |
+func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) { |
|
123 |
+ if d != s.configDigest { |
|
124 |
+ return nil, digest.ErrDigestNotFound |
|
124 | 125 |
} |
126 |
+ return s.config, nil |
|
127 |
+} |
|
125 | 128 |
|
126 |
- var c types.PluginConfig |
|
127 |
- if err := json.Unmarshal(config, &c); err != nil { |
|
128 |
- return nil, err |
|
129 |
- } |
|
129 |
+func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { |
|
130 |
+ return configToRootFS(c) |
|
131 |
+} |
|
130 | 132 |
|
133 |
+func computePrivileges(c types.PluginConfig) (types.PluginPrivileges, error) { |
|
131 | 134 |
var privileges types.PluginPrivileges |
132 | 135 |
if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" { |
133 | 136 |
privileges = append(privileges, types.PluginPrivilege{ |
... | ... |
@@ -173,67 +178,89 @@ func computePrivileges(pd distribution.PullData) (types.PluginPrivileges, error) |
173 | 173 |
} |
174 | 174 |
|
175 | 175 |
// Privileges pulls a plugin config and computes the privileges required to install it. |
176 |
-func (pm *Manager) Privileges(name string, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { |
|
177 |
- _, pd, err := pm.pull(name, metaHeader, authConfig) |
|
178 |
- if err != nil { |
|
176 |
+func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { |
|
177 |
+ // create image store instance |
|
178 |
+ cs := &tempConfigStore{} |
|
179 |
+ |
|
180 |
+ // DownloadManager not defined because only pulling configuration. |
|
181 |
+ pluginPullConfig := &distribution.ImagePullConfig{ |
|
182 |
+ Config: distribution.Config{ |
|
183 |
+ MetaHeaders: metaHeader, |
|
184 |
+ AuthConfig: authConfig, |
|
185 |
+ RegistryService: pm.config.RegistryService, |
|
186 |
+ ImageEventLogger: func(string, string, string) {}, |
|
187 |
+ ImageStore: cs, |
|
188 |
+ }, |
|
189 |
+ Schema2Types: distribution.PluginTypes, |
|
190 |
+ } |
|
191 |
+ |
|
192 |
+ if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil { |
|
179 | 193 |
return nil, err |
180 | 194 |
} |
181 |
- return computePrivileges(pd) |
|
195 |
+ |
|
196 |
+ if cs.config == nil { |
|
197 |
+ return nil, errors.New("no configuration pulled") |
|
198 |
+ } |
|
199 |
+ var config types.PluginConfig |
|
200 |
+ if err := json.Unmarshal(cs.config, &config); err != nil { |
|
201 |
+ return nil, err |
|
202 |
+ } |
|
203 |
+ |
|
204 |
+ return computePrivileges(config) |
|
182 | 205 |
} |
183 | 206 |
|
184 | 207 |
// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. |
185 |
-func (pm *Manager) Pull(name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges) (err error) { |
|
186 |
- ref, pd, err := pm.pull(name, metaHeader, authConfig) |
|
208 |
+func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { |
|
209 |
+ pm.muGC.RLock() |
|
210 |
+ defer pm.muGC.RUnlock() |
|
211 |
+ |
|
212 |
+ // revalidate because Pull is public |
|
213 |
+ nameref, err := reference.ParseNamed(name) |
|
187 | 214 |
if err != nil { |
188 |
- return err |
|
215 |
+ return errors.Wrapf(err, "failed to parse %q", name) |
|
189 | 216 |
} |
217 |
+ name = reference.WithDefaultTag(nameref).String() |
|
190 | 218 |
|
191 |
- requiredPrivileges, err := computePrivileges(pd) |
|
192 |
- if err != nil { |
|
219 |
+ if err := pm.config.Store.validateName(name); err != nil { |
|
193 | 220 |
return err |
194 | 221 |
} |
195 | 222 |
|
196 |
- if !reflect.DeepEqual(privileges, requiredPrivileges) { |
|
197 |
- return errors.New("incorrect privileges") |
|
198 |
- } |
|
223 |
+ tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") |
|
224 |
+ defer os.RemoveAll(tmpRootFSDir) |
|
199 | 225 |
|
200 |
- pluginID := stringid.GenerateNonCryptoID() |
|
201 |
- pluginDir := filepath.Join(pm.libRoot, pluginID) |
|
202 |
- if err := os.MkdirAll(pluginDir, 0755); err != nil { |
|
203 |
- logrus.Debugf("error in MkdirAll: %v", err) |
|
204 |
- return err |
|
226 |
+ dm := &downloadManager{ |
|
227 |
+ tmpDir: tmpRootFSDir, |
|
228 |
+ blobStore: pm.blobStore, |
|
205 | 229 |
} |
206 | 230 |
|
207 |
- defer func() { |
|
208 |
- if err != nil { |
|
209 |
- if delErr := os.RemoveAll(pluginDir); delErr != nil { |
|
210 |
- logrus.Warnf("unable to remove %q from failed plugin pull: %v", pluginDir, delErr) |
|
211 |
- } |
|
212 |
- } |
|
213 |
- }() |
|
231 |
+ pluginPullConfig := &distribution.ImagePullConfig{ |
|
232 |
+ Config: distribution.Config{ |
|
233 |
+ MetaHeaders: metaHeader, |
|
234 |
+ AuthConfig: authConfig, |
|
235 |
+ RegistryService: pm.config.RegistryService, |
|
236 |
+ ImageEventLogger: pm.config.LogPluginEvent, |
|
237 |
+ ImageStore: dm, |
|
238 |
+ }, |
|
239 |
+ DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead |
|
240 |
+ Schema2Types: distribution.PluginTypes, |
|
241 |
+ } |
|
214 | 242 |
|
215 |
- err = distribution.WritePullData(pd, filepath.Join(pm.libRoot, pluginID), true) |
|
243 |
+ err = pm.pull(ctx, ref, pluginPullConfig, outStream) |
|
216 | 244 |
if err != nil { |
217 |
- logrus.Debugf("error in distribution.WritePullData(): %v", err) |
|
245 |
+ go pm.GC() |
|
218 | 246 |
return err |
219 | 247 |
} |
220 | 248 |
|
221 |
- tag := distribution.GetTag(ref) |
|
222 |
- p := v2.NewPlugin(ref.Name(), pluginID, pm.runRoot, pm.libRoot, tag) |
|
223 |
- err = p.InitPlugin() |
|
224 |
- if err != nil { |
|
249 |
+ if _, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil { |
|
225 | 250 |
return err |
226 | 251 |
} |
227 |
- pm.pluginStore.Add(p) |
|
228 |
- |
|
229 |
- pm.pluginEventLogger(pluginID, ref.String(), "pull") |
|
230 | 252 |
|
231 | 253 |
return nil |
232 | 254 |
} |
233 | 255 |
|
234 | 256 |
// List displays the list of plugins and associated metadata. |
235 | 257 |
func (pm *Manager) List() ([]types.Plugin, error) { |
236 |
- plugins := pm.pluginStore.GetAll() |
|
258 |
+ plugins := pm.config.Store.GetAll() |
|
237 | 259 |
out := make([]types.Plugin, 0, len(plugins)) |
238 | 260 |
for _, p := range plugins { |
239 | 261 |
out = append(out, p.PluginObj) |
... | ... |
@@ -242,38 +269,211 @@ func (pm *Manager) List() ([]types.Plugin, error) { |
242 | 242 |
} |
243 | 243 |
|
244 | 244 |
// Push pushes a plugin to the store. |
245 |
-func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.AuthConfig) error { |
|
246 |
- p, err := pm.pluginStore.GetByName(name) |
|
245 |
+func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error { |
|
246 |
+ p, err := pm.config.Store.GetV2Plugin(name) |
|
247 | 247 |
if err != nil { |
248 | 248 |
return err |
249 | 249 |
} |
250 |
- dest := filepath.Join(pm.libRoot, p.GetID()) |
|
251 |
- config, err := ioutil.ReadFile(filepath.Join(dest, "config.json")) |
|
250 |
+ |
|
251 |
+ ref, err := reference.ParseNamed(p.Name()) |
|
252 | 252 |
if err != nil { |
253 |
- return err |
|
253 |
+ return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name()) |
|
254 | 254 |
} |
255 | 255 |
|
256 |
- var dummy types.Plugin |
|
257 |
- err = json.Unmarshal(config, &dummy) |
|
258 |
- if err != nil { |
|
259 |
- return err |
|
256 |
+ var po progress.Output |
|
257 |
+ if outStream != nil { |
|
258 |
+ // Include a buffer so that slow client connections don't affect |
|
259 |
+ // transfer performance. |
|
260 |
+ progressChan := make(chan progress.Progress, 100) |
|
261 |
+ |
|
262 |
+ writesDone := make(chan struct{}) |
|
263 |
+ |
|
264 |
+ defer func() { |
|
265 |
+ close(progressChan) |
|
266 |
+ <-writesDone |
|
267 |
+ }() |
|
268 |
+ |
|
269 |
+ var cancelFunc context.CancelFunc |
|
270 |
+ ctx, cancelFunc = context.WithCancel(ctx) |
|
271 |
+ |
|
272 |
+ go func() { |
|
273 |
+ progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) |
|
274 |
+ close(writesDone) |
|
275 |
+ }() |
|
276 |
+ |
|
277 |
+ po = progress.ChanOutput(progressChan) |
|
278 |
+ } else { |
|
279 |
+ po = progress.DiscardOutput() |
|
280 |
+ } |
|
281 |
+ |
|
282 |
+ // TODO: replace these with manager |
|
283 |
+ is := &pluginConfigStore{ |
|
284 |
+ pm: pm, |
|
285 |
+ plugin: p, |
|
286 |
+ } |
|
287 |
+ ls := &pluginLayerProvider{ |
|
288 |
+ pm: pm, |
|
289 |
+ plugin: p, |
|
290 |
+ } |
|
291 |
+ rs := &pluginReference{ |
|
292 |
+ name: ref, |
|
293 |
+ pluginID: p.Config, |
|
260 | 294 |
} |
261 | 295 |
|
262 |
- rootfs, err := archive.Tar(p.Rootfs, archive.Gzip) |
|
296 |
+ uploadManager := xfer.NewLayerUploadManager(3) |
|
297 |
+ |
|
298 |
+ imagePushConfig := &distribution.ImagePushConfig{ |
|
299 |
+ Config: distribution.Config{ |
|
300 |
+ MetaHeaders: metaHeader, |
|
301 |
+ AuthConfig: authConfig, |
|
302 |
+ ProgressOutput: po, |
|
303 |
+ RegistryService: pm.config.RegistryService, |
|
304 |
+ ReferenceStore: rs, |
|
305 |
+ ImageEventLogger: pm.config.LogPluginEvent, |
|
306 |
+ ImageStore: is, |
|
307 |
+ RequireSchema2: true, |
|
308 |
+ }, |
|
309 |
+ ConfigMediaType: schema2.MediaTypePluginConfig, |
|
310 |
+ LayerStore: ls, |
|
311 |
+ UploadManager: uploadManager, |
|
312 |
+ } |
|
313 |
+ |
|
314 |
+ return distribution.Push(ctx, ref, imagePushConfig) |
|
315 |
+} |
|
316 |
+ |
|
317 |
+type pluginReference struct { |
|
318 |
+ name reference.Named |
|
319 |
+ pluginID digest.Digest |
|
320 |
+} |
|
321 |
+ |
|
322 |
+func (r *pluginReference) References(id digest.Digest) []reference.Named { |
|
323 |
+ if r.pluginID != id { |
|
324 |
+ return nil |
|
325 |
+ } |
|
326 |
+ return []reference.Named{r.name} |
|
327 |
+} |
|
328 |
+ |
|
329 |
+func (r *pluginReference) ReferencesByName(ref reference.Named) []reference.Association { |
|
330 |
+ return []reference.Association{ |
|
331 |
+ { |
|
332 |
+ Ref: r.name, |
|
333 |
+ ID: r.pluginID, |
|
334 |
+ }, |
|
335 |
+ } |
|
336 |
+} |
|
337 |
+ |
|
338 |
+func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) { |
|
339 |
+ if r.name.String() != ref.String() { |
|
340 |
+ return digest.Digest(""), reference.ErrDoesNotExist |
|
341 |
+ } |
|
342 |
+ return r.pluginID, nil |
|
343 |
+} |
|
344 |
+ |
|
345 |
+func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error { |
|
346 |
+ // Read only, ignore |
|
347 |
+ return nil |
|
348 |
+} |
|
349 |
+func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { |
|
350 |
+ // Read only, ignore |
|
351 |
+ return nil |
|
352 |
+} |
|
353 |
+func (r *pluginReference) Delete(ref reference.Named) (bool, error) { |
|
354 |
+ // Read only, ignore |
|
355 |
+ return false, nil |
|
356 |
+} |
|
357 |
+ |
|
358 |
+type pluginConfigStore struct { |
|
359 |
+ pm *Manager |
|
360 |
+ plugin *v2.Plugin |
|
361 |
+} |
|
362 |
+ |
|
363 |
+func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) { |
|
364 |
+ return digest.Digest(""), errors.New("cannot store config on push") |
|
365 |
+} |
|
366 |
+ |
|
367 |
+func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) { |
|
368 |
+ if s.plugin.Config != d { |
|
369 |
+ return nil, errors.New("plugin not found") |
|
370 |
+ } |
|
371 |
+ rwc, err := s.pm.blobStore.Get(d) |
|
263 | 372 |
if err != nil { |
264 |
- return err |
|
373 |
+ return nil, err |
|
374 |
+ } |
|
375 |
+ defer rwc.Close() |
|
376 |
+ return ioutil.ReadAll(rwc) |
|
377 |
+} |
|
378 |
+ |
|
379 |
+func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { |
|
380 |
+ return configToRootFS(c) |
|
381 |
+} |
|
382 |
+ |
|
383 |
+type pluginLayerProvider struct { |
|
384 |
+ pm *Manager |
|
385 |
+ plugin *v2.Plugin |
|
386 |
+} |
|
387 |
+ |
|
388 |
+func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) { |
|
389 |
+ rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs) |
|
390 |
+ var i int |
|
391 |
+ for i = 1; i <= len(rootFS.DiffIDs); i++ { |
|
392 |
+ if layer.CreateChainID(rootFS.DiffIDs[:i]) == id { |
|
393 |
+ break |
|
394 |
+ } |
|
395 |
+ } |
|
396 |
+ if i > len(rootFS.DiffIDs) { |
|
397 |
+ return nil, errors.New("layer not found") |
|
398 |
+ } |
|
399 |
+ return &pluginLayer{ |
|
400 |
+ pm: p.pm, |
|
401 |
+ diffIDs: rootFS.DiffIDs[:i], |
|
402 |
+ blobs: p.plugin.Blobsums[:i], |
|
403 |
+ }, nil |
|
404 |
+} |
|
405 |
+ |
|
406 |
+type pluginLayer struct { |
|
407 |
+ pm *Manager |
|
408 |
+ diffIDs []layer.DiffID |
|
409 |
+ blobs []digest.Digest |
|
410 |
+} |
|
411 |
+ |
|
412 |
+func (l *pluginLayer) ChainID() layer.ChainID { |
|
413 |
+ return layer.CreateChainID(l.diffIDs) |
|
414 |
+} |
|
415 |
+ |
|
416 |
+func (l *pluginLayer) DiffID() layer.DiffID { |
|
417 |
+ return l.diffIDs[len(l.diffIDs)-1] |
|
418 |
+} |
|
419 |
+ |
|
420 |
+func (l *pluginLayer) Parent() distribution.PushLayer { |
|
421 |
+ if len(l.diffIDs) == 1 { |
|
422 |
+ return nil |
|
423 |
+ } |
|
424 |
+ return &pluginLayer{ |
|
425 |
+ pm: l.pm, |
|
426 |
+ diffIDs: l.diffIDs[:len(l.diffIDs)-1], |
|
427 |
+ blobs: l.blobs[:len(l.diffIDs)-1], |
|
265 | 428 |
} |
266 |
- defer rootfs.Close() |
|
429 |
+} |
|
267 | 430 |
|
268 |
- _, err = distribution.Push(name, pm.registryService, metaHeader, authConfig, ioutil.NopCloser(bytes.NewReader(config)), rootfs) |
|
269 |
- // XXX: Ignore returning digest for now. |
|
270 |
- // Since digest needs to be written to the ProgressWriter. |
|
271 |
- return err |
|
431 |
+func (l *pluginLayer) Open() (io.ReadCloser, error) { |
|
432 |
+ return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1]) |
|
433 |
+} |
|
434 |
+ |
|
435 |
+func (l *pluginLayer) Size() (int64, error) { |
|
436 |
+ return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1]) |
|
437 |
+} |
|
438 |
+ |
|
439 |
+func (l *pluginLayer) MediaType() string { |
|
440 |
+ return schema2.MediaTypeLayer |
|
441 |
+} |
|
442 |
+ |
|
443 |
+func (l *pluginLayer) Release() { |
|
444 |
+ // Nothing needs to be release, no references held |
|
272 | 445 |
} |
273 | 446 |
|
274 | 447 |
// Remove deletes plugin's root directory. |
275 |
-func (pm *Manager) Remove(name string, config *types.PluginRmConfig) (err error) { |
|
276 |
- p, err := pm.pluginStore.GetByName(name) |
|
448 |
+func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { |
|
449 |
+ p, err := pm.config.Store.GetV2Plugin(name) |
|
277 | 450 |
pm.mu.RLock() |
278 | 451 |
c := pm.cMap[p] |
279 | 452 |
pm.mu.RUnlock() |
... | ... |
@@ -297,95 +497,194 @@ func (pm *Manager) Remove(name string, config *types.PluginRmConfig) (err error) |
297 | 297 |
} |
298 | 298 |
} |
299 | 299 |
|
300 |
- id := p.GetID() |
|
301 |
- pluginDir := filepath.Join(pm.libRoot, id) |
|
302 |
- |
|
303 | 300 |
defer func() { |
304 |
- if err == nil || config.ForceRemove { |
|
305 |
- pm.pluginStore.Remove(p) |
|
306 |
- pm.pluginEventLogger(id, name, "remove") |
|
307 |
- } |
|
301 |
+ go pm.GC() |
|
308 | 302 |
}() |
309 | 303 |
|
310 |
- if err = os.RemoveAll(pluginDir); err != nil { |
|
311 |
- return errors.Wrap(err, "failed to remove plugin directory") |
|
304 |
+ id := p.GetID() |
|
305 |
+ pm.config.Store.Remove(p) |
|
306 |
+ pluginDir := filepath.Join(pm.config.Root, id) |
|
307 |
+ if err := os.RemoveAll(pluginDir); err != nil { |
|
308 |
+ logrus.Warnf("unable to remove %q from plugin remove: %v", pluginDir, err) |
|
312 | 309 |
} |
310 |
+ pm.config.LogPluginEvent(id, name, "remove") |
|
313 | 311 |
return nil |
314 | 312 |
} |
315 | 313 |
|
316 | 314 |
// Set sets plugin args |
317 | 315 |
func (pm *Manager) Set(name string, args []string) error { |
318 |
- p, err := pm.pluginStore.GetByName(name) |
|
316 |
+ p, err := pm.config.Store.GetV2Plugin(name) |
|
319 | 317 |
if err != nil { |
320 | 318 |
return err |
321 | 319 |
} |
322 |
- return p.Set(args) |
|
320 |
+ if err := p.Set(args); err != nil { |
|
321 |
+ return err |
|
322 |
+ } |
|
323 |
+ return pm.save(p) |
|
323 | 324 |
} |
324 | 325 |
|
325 | 326 |
// CreateFromContext creates a plugin from the given pluginDir which contains |
326 | 327 |
// both the rootfs and the config.json and a repoName with optional tag. |
327 |
-func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.Reader, options *types.PluginCreateOptions) error { |
|
328 |
- repoName := options.RepoName |
|
329 |
- ref, err := distribution.GetRef(repoName) |
|
328 |
+func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) { |
|
329 |
+ pm.muGC.RLock() |
|
330 |
+ defer pm.muGC.RUnlock() |
|
331 |
+ |
|
332 |
+ ref, err := reference.ParseNamed(options.RepoName) |
|
330 | 333 |
if err != nil { |
331 |
- return err |
|
334 |
+ return errors.Wrapf(err, "failed to parse reference %v", options.RepoName) |
|
335 |
+ } |
|
336 |
+ if _, ok := ref.(reference.Canonical); ok { |
|
337 |
+ return errors.Errorf("canonical references are not permitted") |
|
332 | 338 |
} |
339 |
+ name := reference.WithDefaultTag(ref).String() |
|
333 | 340 |
|
334 |
- name := ref.Name() |
|
335 |
- tag := distribution.GetTag(ref) |
|
336 |
- pluginID := stringid.GenerateNonCryptoID() |
|
341 |
+ if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin() |
|
342 |
+ return err |
|
343 |
+ } |
|
337 | 344 |
|
338 |
- p := v2.NewPlugin(name, pluginID, pm.runRoot, pm.libRoot, tag) |
|
345 |
+ tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") |
|
346 |
+ defer os.RemoveAll(tmpRootFSDir) |
|
347 |
+ if err != nil { |
|
348 |
+ return errors.Wrap(err, "failed to create temp directory") |
|
349 |
+ } |
|
350 |
+ var configJSON []byte |
|
351 |
+ rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON) |
|
339 | 352 |
|
340 |
- if v, _ := pm.pluginStore.GetByName(p.Name()); v != nil { |
|
341 |
- return fmt.Errorf("plugin %q already exists", p.Name()) |
|
353 |
+ rootFSBlob, err := pm.blobStore.New() |
|
354 |
+ if err != nil { |
|
355 |
+ return err |
|
342 | 356 |
} |
357 |
+ defer rootFSBlob.Close() |
|
358 |
+ gzw := gzip.NewWriter(rootFSBlob) |
|
359 |
+ layerDigester := digest.Canonical.New() |
|
360 |
+ rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash())) |
|
343 | 361 |
|
344 |
- pluginDir := filepath.Join(pm.libRoot, pluginID) |
|
345 |
- if err := os.MkdirAll(pluginDir, 0755); err != nil { |
|
362 |
+ if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil { |
|
363 |
+ return err |
|
364 |
+ } |
|
365 |
+ if err := rootFS.Close(); err != nil { |
|
346 | 366 |
return err |
347 | 367 |
} |
348 | 368 |
|
349 |
- // In case an error happens, remove the created directory. |
|
350 |
- if err := pm.createFromContext(ctx, tarCtx, pluginDir, repoName, p); err != nil { |
|
351 |
- if err := os.RemoveAll(pluginDir); err != nil { |
|
352 |
- logrus.Warnf("unable to remove %q from failed plugin creation: %v", pluginDir, err) |
|
353 |
- } |
|
369 |
+ if configJSON == nil { |
|
370 |
+ return errors.New("config not found") |
|
371 |
+ } |
|
372 |
+ |
|
373 |
+ if err := gzw.Close(); err != nil { |
|
374 |
+ return errors.Wrap(err, "error closing gzip writer") |
|
375 |
+ } |
|
376 |
+ |
|
377 |
+ var config types.PluginConfig |
|
378 |
+ if err := json.Unmarshal(configJSON, &config); err != nil { |
|
379 |
+ return errors.Wrap(err, "failed to parse config") |
|
380 |
+ } |
|
381 |
+ |
|
382 |
+ if err := pm.validateConfig(config); err != nil { |
|
354 | 383 |
return err |
355 | 384 |
} |
356 | 385 |
|
357 |
- return nil |
|
358 |
-} |
|
386 |
+ pm.mu.Lock() |
|
387 |
+ defer pm.mu.Unlock() |
|
359 | 388 |
|
360 |
-func (pm *Manager) createFromContext(ctx context.Context, tarCtx io.Reader, pluginDir, repoName string, p *v2.Plugin) error { |
|
361 |
- if err := chrootarchive.Untar(tarCtx, pluginDir, nil); err != nil { |
|
389 |
+ rootFSBlobsum, err := rootFSBlob.Commit() |
|
390 |
+ if err != nil { |
|
362 | 391 |
return err |
363 | 392 |
} |
393 |
+ defer func() { |
|
394 |
+ if err != nil { |
|
395 |
+ go pm.GC() |
|
396 |
+ } |
|
397 |
+ }() |
|
398 |
+ |
|
399 |
+ config.Rootfs = &types.PluginConfigRootfs{ |
|
400 |
+ Type: "layers", |
|
401 |
+ DiffIds: []string{layerDigester.Digest().String()}, |
|
402 |
+ } |
|
364 | 403 |
|
365 |
- if err := p.InitPlugin(); err != nil { |
|
404 |
+ configBlob, err := pm.blobStore.New() |
|
405 |
+ if err != nil { |
|
406 |
+ return err |
|
407 |
+ } |
|
408 |
+ defer configBlob.Close() |
|
409 |
+ if err := json.NewEncoder(configBlob).Encode(config); err != nil { |
|
410 |
+ return errors.Wrap(err, "error encoding json config") |
|
411 |
+ } |
|
412 |
+ configBlobsum, err := configBlob.Commit() |
|
413 |
+ if err != nil { |
|
366 | 414 |
return err |
367 | 415 |
} |
368 | 416 |
|
369 |
- if err := pm.pluginStore.Add(p); err != nil { |
|
417 |
+ p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil) |
|
418 |
+ if err != nil { |
|
370 | 419 |
return err |
371 | 420 |
} |
372 | 421 |
|
373 |
- pm.pluginEventLogger(p.GetID(), repoName, "create") |
|
422 |
+ pm.config.LogPluginEvent(p.PluginObj.ID, name, "create") |
|
374 | 423 |
|
375 | 424 |
return nil |
376 | 425 |
} |
377 | 426 |
|
378 |
-func getPluginName(name string) (string, error) { |
|
379 |
- named, err := reference.ParseNamed(name) // FIXME: validate |
|
380 |
- if err != nil { |
|
381 |
- return "", err |
|
382 |
- } |
|
383 |
- if reference.IsNameOnly(named) { |
|
384 |
- named = reference.WithDefaultTag(named) |
|
385 |
- } |
|
386 |
- ref, ok := named.(reference.NamedTagged) |
|
387 |
- if !ok { |
|
388 |
- return "", fmt.Errorf("invalid name: %s", named.String()) |
|
389 |
- } |
|
390 |
- return ref.String(), nil |
|
427 |
+func (pm *Manager) validateConfig(config types.PluginConfig) error { |
|
428 |
+ return nil // TODO: |
|
429 |
+} |
|
430 |
+ |
|
431 |
+func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser { |
|
432 |
+ pr, pw := io.Pipe() |
|
433 |
+ go func() { |
|
434 |
+ tarReader := tar.NewReader(in) |
|
435 |
+ tarWriter := tar.NewWriter(pw) |
|
436 |
+ defer in.Close() |
|
437 |
+ |
|
438 |
+ hasRootFS := false |
|
439 |
+ |
|
440 |
+ for { |
|
441 |
+ hdr, err := tarReader.Next() |
|
442 |
+ if err == io.EOF { |
|
443 |
+ if !hasRootFS { |
|
444 |
+ pw.CloseWithError(errors.Wrap(err, "no rootfs found")) |
|
445 |
+ return |
|
446 |
+ } |
|
447 |
+ // Signals end of archive. |
|
448 |
+ tarWriter.Close() |
|
449 |
+ pw.Close() |
|
450 |
+ return |
|
451 |
+ } |
|
452 |
+ if err != nil { |
|
453 |
+ pw.CloseWithError(errors.Wrap(err, "failed to read from tar")) |
|
454 |
+ return |
|
455 |
+ } |
|
456 |
+ |
|
457 |
+ content := io.Reader(tarReader) |
|
458 |
+ name := path.Clean(hdr.Name) |
|
459 |
+ if path.IsAbs(name) { |
|
460 |
+ name = name[1:] |
|
461 |
+ } |
|
462 |
+ if name == configFileName { |
|
463 |
+ dt, err := ioutil.ReadAll(content) |
|
464 |
+ if err != nil { |
|
465 |
+ pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName)) |
|
466 |
+ return |
|
467 |
+ } |
|
468 |
+ *config = dt |
|
469 |
+ } |
|
470 |
+ if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName { |
|
471 |
+ hdr.Name = path.Clean(path.Join(parts[1:]...)) |
|
472 |
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") { |
|
473 |
+ hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:] |
|
474 |
+ } |
|
475 |
+ if err := tarWriter.WriteHeader(hdr); err != nil { |
|
476 |
+ pw.CloseWithError(errors.Wrap(err, "error writing tar header")) |
|
477 |
+ return |
|
478 |
+ } |
|
479 |
+ if _, err := pools.Copy(tarWriter, content); err != nil { |
|
480 |
+ pw.CloseWithError(errors.Wrap(err, "error copying tar data")) |
|
481 |
+ return |
|
482 |
+ } |
|
483 |
+ hasRootFS = true |
|
484 |
+ } else { |
|
485 |
+ io.Copy(ioutil.Discard, content) |
|
486 |
+ } |
|
487 |
+ } |
|
488 |
+ }() |
|
489 |
+ return pr |
|
391 | 490 |
} |
... | ... |
@@ -8,6 +8,7 @@ import ( |
8 | 8 |
"net/http" |
9 | 9 |
|
10 | 10 |
"github.com/docker/docker/api/types" |
11 |
+ "github.com/docker/docker/reference" |
|
11 | 12 |
"golang.org/x/net/context" |
12 | 13 |
) |
13 | 14 |
|
... | ... |
@@ -24,17 +25,17 @@ func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error { |
24 | 24 |
} |
25 | 25 |
|
26 | 26 |
// Inspect examines a plugin config |
27 |
-func (pm *Manager) Inspect(refOrID string) (tp types.Plugin, err error) { |
|
28 |
- return tp, errNotSupported |
|
27 |
+func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { |
|
28 |
+ return nil, errNotSupported |
|
29 | 29 |
} |
30 | 30 |
|
31 | 31 |
// Privileges pulls a plugin config and computes the privileges required to install it. |
32 |
-func (pm *Manager) Privileges(name string, metaHeaders http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { |
|
32 |
+func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { |
|
33 | 33 |
return nil, errNotSupported |
34 | 34 |
} |
35 | 35 |
|
36 | 36 |
// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. |
37 |
-func (pm *Manager) Pull(name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges) error { |
|
37 |
+func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer) error { |
|
38 | 38 |
return errNotSupported |
39 | 39 |
} |
40 | 40 |
|
... | ... |
@@ -44,7 +45,7 @@ func (pm *Manager) List() ([]types.Plugin, error) { |
44 | 44 |
} |
45 | 45 |
|
46 | 46 |
// Push pushes a plugin to the store. |
47 |
-func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.AuthConfig) error { |
|
47 |
+func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, out io.Writer) error { |
|
48 | 48 |
return errNotSupported |
49 | 49 |
} |
50 | 50 |
|
... | ... |
@@ -60,6 +61,6 @@ func (pm *Manager) Set(name string, args []string) error { |
60 | 60 |
|
61 | 61 |
// CreateFromContext creates a plugin from the given pluginDir which contains |
62 | 62 |
// both the rootfs and the config.json and a repoName with optional tag. |
63 |
-func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.Reader, options *types.PluginCreateOptions) error { |
|
63 |
+func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error { |
|
64 | 64 |
return errNotSupported |
65 | 65 |
} |
66 | 66 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,181 @@ |
0 |
+package plugin |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "io" |
|
4 |
+ "io/ioutil" |
|
5 |
+ "os" |
|
6 |
+ "path/filepath" |
|
7 |
+ |
|
8 |
+ "github.com/Sirupsen/logrus" |
|
9 |
+ "github.com/docker/distribution/digest" |
|
10 |
+ "github.com/docker/docker/distribution/xfer" |
|
11 |
+ "github.com/docker/docker/image" |
|
12 |
+ "github.com/docker/docker/layer" |
|
13 |
+ "github.com/docker/docker/pkg/archive" |
|
14 |
+ "github.com/docker/docker/pkg/progress" |
|
15 |
+ "github.com/pkg/errors" |
|
16 |
+ "golang.org/x/net/context" |
|
17 |
+) |
|
18 |
+ |
|
19 |
+type blobstore interface { |
|
20 |
+ New() (WriteCommitCloser, error) |
|
21 |
+ Get(dgst digest.Digest) (io.ReadCloser, error) |
|
22 |
+ Size(dgst digest.Digest) (int64, error) |
|
23 |
+} |
|
24 |
+ |
|
25 |
+type basicBlobStore struct { |
|
26 |
+ path string |
|
27 |
+} |
|
28 |
+ |
|
29 |
+func newBasicBlobStore(p string) (*basicBlobStore, error) { |
|
30 |
+ tmpdir := filepath.Join(p, "tmp") |
|
31 |
+ if err := os.MkdirAll(tmpdir, 0700); err != nil { |
|
32 |
+ return nil, errors.Wrapf(err, "failed to mkdir %v", p) |
|
33 |
+ } |
|
34 |
+ return &basicBlobStore{path: p}, nil |
|
35 |
+} |
|
36 |
+ |
|
37 |
+func (b *basicBlobStore) New() (WriteCommitCloser, error) { |
|
38 |
+ f, err := ioutil.TempFile(filepath.Join(b.path, "tmp"), ".insertion") |
|
39 |
+ if err != nil { |
|
40 |
+ return nil, errors.Wrap(err, "failed to create temp file") |
|
41 |
+ } |
|
42 |
+ return newInsertion(f), nil |
|
43 |
+} |
|
44 |
+ |
|
45 |
+func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) { |
|
46 |
+ return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) |
|
47 |
+} |
|
48 |
+ |
|
49 |
+func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) { |
|
50 |
+ stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) |
|
51 |
+ if err != nil { |
|
52 |
+ return 0, err |
|
53 |
+ } |
|
54 |
+ return stat.Size(), nil |
|
55 |
+} |
|
56 |
+ |
|
57 |
+func (b *basicBlobStore) gc(whitelist map[digest.Digest]struct{}) { |
|
58 |
+ for _, alg := range []string{string(digest.Canonical)} { |
|
59 |
+ items, err := ioutil.ReadDir(filepath.Join(b.path, alg)) |
|
60 |
+ if err != nil { |
|
61 |
+ continue |
|
62 |
+ } |
|
63 |
+ for _, fi := range items { |
|
64 |
+ if _, exists := whitelist[digest.Digest(alg+":"+fi.Name())]; !exists { |
|
65 |
+ p := filepath.Join(b.path, alg, fi.Name()) |
|
66 |
+ err := os.RemoveAll(p) |
|
67 |
+ logrus.Debugf("cleaned up blob %v: %v", p, err) |
|
68 |
+ } |
|
69 |
+ } |
|
70 |
+ } |
|
71 |
+ |
|
72 |
+} |
|
73 |
+ |
|
74 |
+// WriteCommitCloser defines object that can be committed to blobstore. |
|
75 |
+type WriteCommitCloser interface { |
|
76 |
+ io.WriteCloser |
|
77 |
+ Commit() (digest.Digest, error) |
|
78 |
+} |
|
79 |
+ |
|
80 |
+type insertion struct { |
|
81 |
+ io.Writer |
|
82 |
+ f *os.File |
|
83 |
+ digester digest.Digester |
|
84 |
+ closed bool |
|
85 |
+} |
|
86 |
+ |
|
87 |
+func newInsertion(tempFile *os.File) *insertion { |
|
88 |
+ digester := digest.Canonical.New() |
|
89 |
+ return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())} |
|
90 |
+} |
|
91 |
+ |
|
92 |
+func (i *insertion) Commit() (digest.Digest, error) { |
|
93 |
+ p := i.f.Name() |
|
94 |
+ d := filepath.Join(filepath.Join(p, "../../")) |
|
95 |
+ i.f.Sync() |
|
96 |
+ defer os.RemoveAll(p) |
|
97 |
+ if err := i.f.Close(); err != nil { |
|
98 |
+ return "", err |
|
99 |
+ } |
|
100 |
+ i.closed = true |
|
101 |
+ dgst := i.digester.Digest() |
|
102 |
+ if err := os.MkdirAll(filepath.Join(d, string(dgst.Algorithm())), 0700); err != nil { |
|
103 |
+ return "", errors.Wrapf(err, "failed to mkdir %v", d) |
|
104 |
+ } |
|
105 |
+ if err := os.Rename(p, filepath.Join(d, string(dgst.Algorithm()), dgst.Hex())); err != nil { |
|
106 |
+ return "", errors.Wrapf(err, "failed to rename %v", p) |
|
107 |
+ } |
|
108 |
+ return dgst, nil |
|
109 |
+} |
|
110 |
+ |
|
111 |
+func (i *insertion) Close() error { |
|
112 |
+ if i.closed { |
|
113 |
+ return nil |
|
114 |
+ } |
|
115 |
+ defer os.RemoveAll(i.f.Name()) |
|
116 |
+ return i.f.Close() |
|
117 |
+} |
|
118 |
+ |
|
119 |
+type downloadManager struct { |
|
120 |
+ blobStore blobstore |
|
121 |
+ tmpDir string |
|
122 |
+ blobs []digest.Digest |
|
123 |
+ configDigest digest.Digest |
|
124 |
+} |
|
125 |
+ |
|
126 |
+func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { |
|
127 |
+ for _, l := range layers { |
|
128 |
+ b, err := dm.blobStore.New() |
|
129 |
+ if err != nil { |
|
130 |
+ return initialRootFS, nil, err |
|
131 |
+ } |
|
132 |
+ defer b.Close() |
|
133 |
+ rc, _, err := l.Download(ctx, progressOutput) |
|
134 |
+ if err != nil { |
|
135 |
+ return initialRootFS, nil, errors.Wrap(err, "failed to download") |
|
136 |
+ } |
|
137 |
+ defer rc.Close() |
|
138 |
+ r := io.TeeReader(rc, b) |
|
139 |
+ inflatedLayerData, err := archive.DecompressStream(r) |
|
140 |
+ if err != nil { |
|
141 |
+ return initialRootFS, nil, err |
|
142 |
+ } |
|
143 |
+ digester := digest.Canonical.New() |
|
144 |
+ if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { |
|
145 |
+ return initialRootFS, nil, err |
|
146 |
+ } |
|
147 |
+ initialRootFS.Append(layer.DiffID(digester.Digest())) |
|
148 |
+ d, err := b.Commit() |
|
149 |
+ if err != nil { |
|
150 |
+ return initialRootFS, nil, err |
|
151 |
+ } |
|
152 |
+ dm.blobs = append(dm.blobs, d) |
|
153 |
+ } |
|
154 |
+ return initialRootFS, nil, nil |
|
155 |
+} |
|
156 |
+ |
|
157 |
+func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) { |
|
158 |
+ b, err := dm.blobStore.New() |
|
159 |
+ if err != nil { |
|
160 |
+ return "", err |
|
161 |
+ } |
|
162 |
+ defer b.Close() |
|
163 |
+ n, err := b.Write(dt) |
|
164 |
+ if err != nil { |
|
165 |
+ return "", err |
|
166 |
+ } |
|
167 |
+ if n != len(dt) { |
|
168 |
+ return "", io.ErrShortWrite |
|
169 |
+ } |
|
170 |
+ d, err := b.Commit() |
|
171 |
+ dm.configDigest = d |
|
172 |
+ return d, err |
|
173 |
+} |
|
174 |
+ |
|
175 |
+func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) { |
|
176 |
+ return nil, digest.ErrDigestNotFound |
|
177 |
+} |
|
178 |
+func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) { |
|
179 |
+ return configToRootFS(c) |
|
180 |
+} |
0 | 181 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,26 @@ |
0 |
+package plugin |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "sync" |
|
4 |
+ |
|
5 |
+ "github.com/docker/docker/pkg/plugins" |
|
6 |
+ "github.com/docker/docker/plugin/v2" |
|
7 |
+) |
|
8 |
+ |
|
9 |
+// Store manages the plugin inventory in memory and on-disk |
|
10 |
+type Store struct { |
|
11 |
+ sync.RWMutex |
|
12 |
+ plugins map[string]*v2.Plugin |
|
13 |
+ /* handlers are necessary for transition path of legacy plugins |
|
14 |
+ * to the new model. Legacy plugins use Handle() for registering an |
|
15 |
+ * activation callback.*/ |
|
16 |
+ handlers map[string][]func(string, *plugins.Client) |
|
17 |
+} |
|
18 |
+ |
|
19 |
+// NewStore creates a Store. |
|
20 |
+func NewStore(libRoot string) *Store { |
|
21 |
+ return &Store{ |
|
22 |
+ plugins: make(map[string]*v2.Plugin), |
|
23 |
+ handlers: make(map[string][]func(string, *plugins.Client)), |
|
24 |
+ } |
|
25 |
+} |
0 | 26 |
deleted file mode 100644 |
... | ... |
@@ -1,222 +0,0 @@ |
1 |
-package distribution |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "encoding/json" |
|
5 |
- "fmt" |
|
6 |
- "io" |
|
7 |
- "io/ioutil" |
|
8 |
- "net/http" |
|
9 |
- "os" |
|
10 |
- "path/filepath" |
|
11 |
- |
|
12 |
- "github.com/Sirupsen/logrus" |
|
13 |
- "github.com/docker/distribution" |
|
14 |
- "github.com/docker/distribution/manifest/schema2" |
|
15 |
- "github.com/docker/docker/api/types" |
|
16 |
- dockerdist "github.com/docker/docker/distribution" |
|
17 |
- archive "github.com/docker/docker/pkg/chrootarchive" |
|
18 |
- "github.com/docker/docker/reference" |
|
19 |
- "github.com/docker/docker/registry" |
|
20 |
- "golang.org/x/net/context" |
|
21 |
-) |
|
22 |
- |
|
23 |
-// PullData is the plugin config and the rootfs |
|
24 |
-type PullData interface { |
|
25 |
- Config() ([]byte, error) |
|
26 |
- Layer() (io.ReadCloser, error) |
|
27 |
-} |
|
28 |
- |
|
29 |
-type pullData struct { |
|
30 |
- repository distribution.Repository |
|
31 |
- manifest schema2.Manifest |
|
32 |
- index int |
|
33 |
-} |
|
34 |
- |
|
35 |
-func (pd *pullData) Config() ([]byte, error) { |
|
36 |
- blobs := pd.repository.Blobs(context.Background()) |
|
37 |
- config, err := blobs.Get(context.Background(), pd.manifest.Config.Digest) |
|
38 |
- if err != nil { |
|
39 |
- return nil, err |
|
40 |
- } |
|
41 |
- // validate |
|
42 |
- var p types.Plugin |
|
43 |
- if err := json.Unmarshal(config, &p); err != nil { |
|
44 |
- return nil, err |
|
45 |
- } |
|
46 |
- return config, nil |
|
47 |
-} |
|
48 |
- |
|
49 |
-func (pd *pullData) Layer() (io.ReadCloser, error) { |
|
50 |
- if pd.index >= len(pd.manifest.Layers) { |
|
51 |
- return nil, io.EOF |
|
52 |
- } |
|
53 |
- |
|
54 |
- blobs := pd.repository.Blobs(context.Background()) |
|
55 |
- rsc, err := blobs.Open(context.Background(), pd.manifest.Layers[pd.index].Digest) |
|
56 |
- if err != nil { |
|
57 |
- return nil, err |
|
58 |
- } |
|
59 |
- pd.index++ |
|
60 |
- return rsc, nil |
|
61 |
-} |
|
62 |
- |
|
63 |
-// GetRef returns the distribution reference for a given name. |
|
64 |
-func GetRef(name string) (reference.Named, error) { |
|
65 |
- ref, err := reference.ParseNamed(name) |
|
66 |
- if err != nil { |
|
67 |
- return nil, err |
|
68 |
- } |
|
69 |
- return ref, nil |
|
70 |
-} |
|
71 |
- |
|
72 |
-// GetTag returns the tag associated with the given reference name. |
|
73 |
-func GetTag(ref reference.Named) string { |
|
74 |
- tag := DefaultTag |
|
75 |
- if ref, ok := ref.(reference.NamedTagged); ok { |
|
76 |
- tag = ref.Tag() |
|
77 |
- } |
|
78 |
- return tag |
|
79 |
-} |
|
80 |
- |
|
81 |
-// Pull downloads the plugin from Store |
|
82 |
-func Pull(ref reference.Named, rs registry.Service, metaheader http.Header, authConfig *types.AuthConfig) (PullData, error) { |
|
83 |
- repoInfo, err := rs.ResolveRepository(ref) |
|
84 |
- if err != nil { |
|
85 |
- logrus.Debugf("pull.go: error in ResolveRepository: %v", err) |
|
86 |
- return nil, err |
|
87 |
- } |
|
88 |
- repoInfo.Class = "plugin" |
|
89 |
- |
|
90 |
- if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil { |
|
91 |
- logrus.Debugf("pull.go: error in ValidateRepoName: %v", err) |
|
92 |
- return nil, err |
|
93 |
- } |
|
94 |
- |
|
95 |
- endpoints, err := rs.LookupPullEndpoints(repoInfo.Hostname()) |
|
96 |
- if err != nil { |
|
97 |
- logrus.Debugf("pull.go: error in LookupPullEndpoints: %v", err) |
|
98 |
- return nil, err |
|
99 |
- } |
|
100 |
- |
|
101 |
- var confirmedV2 bool |
|
102 |
- var repository distribution.Repository |
|
103 |
- |
|
104 |
- for _, endpoint := range endpoints { |
|
105 |
- if confirmedV2 && endpoint.Version == registry.APIVersion1 { |
|
106 |
- logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) |
|
107 |
- continue |
|
108 |
- } |
|
109 |
- |
|
110 |
- // TODO: reuse contexts |
|
111 |
- repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaheader, authConfig, "pull") |
|
112 |
- if err != nil { |
|
113 |
- logrus.Debugf("pull.go: error in NewV2Repository: %v", err) |
|
114 |
- return nil, err |
|
115 |
- } |
|
116 |
- if !confirmedV2 { |
|
117 |
- logrus.Debug("pull.go: !confirmedV2") |
|
118 |
- return nil, ErrUnsupportedRegistry |
|
119 |
- } |
|
120 |
- logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) |
|
121 |
- break |
|
122 |
- } |
|
123 |
- |
|
124 |
- tag := DefaultTag |
|
125 |
- if ref, ok := ref.(reference.NamedTagged); ok { |
|
126 |
- tag = ref.Tag() |
|
127 |
- } |
|
128 |
- |
|
129 |
- // tags := repository.Tags(context.Background()) |
|
130 |
- // desc, err := tags.Get(context.Background(), tag) |
|
131 |
- // if err != nil { |
|
132 |
- // return nil, err |
|
133 |
- // } |
|
134 |
- // |
|
135 |
- msv, err := repository.Manifests(context.Background()) |
|
136 |
- if err != nil { |
|
137 |
- logrus.Debugf("pull.go: error in repository.Manifests: %v", err) |
|
138 |
- return nil, err |
|
139 |
- } |
|
140 |
- manifest, err := msv.Get(context.Background(), "", distribution.WithTag(tag)) |
|
141 |
- if err != nil { |
|
142 |
- logrus.Debugf("pull.go: error in msv.Get(): %v", err) |
|
143 |
- return nil, dockerdist.TranslatePullError(err, repoInfo) |
|
144 |
- } |
|
145 |
- |
|
146 |
- _, pl, err := manifest.Payload() |
|
147 |
- if err != nil { |
|
148 |
- logrus.Debugf("pull.go: error in manifest.Payload(): %v", err) |
|
149 |
- return nil, err |
|
150 |
- } |
|
151 |
- var m schema2.Manifest |
|
152 |
- if err := json.Unmarshal(pl, &m); err != nil { |
|
153 |
- logrus.Debugf("pull.go: error in json.Unmarshal(): %v", err) |
|
154 |
- return nil, err |
|
155 |
- } |
|
156 |
- if m.Config.MediaType != schema2.MediaTypePluginConfig { |
|
157 |
- return nil, ErrUnsupportedMediaType |
|
158 |
- } |
|
159 |
- |
|
160 |
- pd := &pullData{ |
|
161 |
- repository: repository, |
|
162 |
- manifest: m, |
|
163 |
- } |
|
164 |
- |
|
165 |
- logrus.Debugf("manifest: %s", pl) |
|
166 |
- return pd, nil |
|
167 |
-} |
|
168 |
- |
|
169 |
-// WritePullData extracts manifest and rootfs to the disk. |
|
170 |
-func WritePullData(pd PullData, dest string, extract bool) error { |
|
171 |
- config, err := pd.Config() |
|
172 |
- if err != nil { |
|
173 |
- return err |
|
174 |
- } |
|
175 |
- var p types.Plugin |
|
176 |
- if err := json.Unmarshal(config, &p); err != nil { |
|
177 |
- return err |
|
178 |
- } |
|
179 |
- logrus.Debugf("plugin: %#v", p) |
|
180 |
- |
|
181 |
- if err := os.MkdirAll(dest, 0700); err != nil { |
|
182 |
- return err |
|
183 |
- } |
|
184 |
- |
|
185 |
- if extract { |
|
186 |
- if err := ioutil.WriteFile(filepath.Join(dest, "config.json"), config, 0600); err != nil { |
|
187 |
- return err |
|
188 |
- } |
|
189 |
- |
|
190 |
- if err := os.MkdirAll(filepath.Join(dest, "rootfs"), 0700); err != nil { |
|
191 |
- return err |
|
192 |
- } |
|
193 |
- } |
|
194 |
- |
|
195 |
- for i := 0; ; i++ { |
|
196 |
- l, err := pd.Layer() |
|
197 |
- if err == io.EOF { |
|
198 |
- break |
|
199 |
- } |
|
200 |
- if err != nil { |
|
201 |
- return err |
|
202 |
- } |
|
203 |
- |
|
204 |
- if !extract { |
|
205 |
- f, err := os.Create(filepath.Join(dest, fmt.Sprintf("layer%d.tar", i))) |
|
206 |
- if err != nil { |
|
207 |
- l.Close() |
|
208 |
- return err |
|
209 |
- } |
|
210 |
- io.Copy(f, l) |
|
211 |
- l.Close() |
|
212 |
- f.Close() |
|
213 |
- continue |
|
214 |
- } |
|
215 |
- |
|
216 |
- if _, err := archive.ApplyLayer(filepath.Join(dest, "rootfs"), l); err != nil { |
|
217 |
- return err |
|
218 |
- } |
|
219 |
- |
|
220 |
- } |
|
221 |
- return nil |
|
222 |
-} |
223 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,134 +0,0 @@ |
1 |
-package distribution |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "crypto/sha256" |
|
5 |
- "io" |
|
6 |
- "net/http" |
|
7 |
- |
|
8 |
- "github.com/Sirupsen/logrus" |
|
9 |
- "github.com/docker/distribution" |
|
10 |
- "github.com/docker/distribution/digest" |
|
11 |
- "github.com/docker/distribution/manifest/schema2" |
|
12 |
- "github.com/docker/docker/api/types" |
|
13 |
- dockerdist "github.com/docker/docker/distribution" |
|
14 |
- "github.com/docker/docker/reference" |
|
15 |
- "github.com/docker/docker/registry" |
|
16 |
- "golang.org/x/net/context" |
|
17 |
-) |
|
18 |
- |
|
19 |
-// Push pushes a plugin to a registry. |
|
20 |
-func Push(name string, rs registry.Service, metaHeader http.Header, authConfig *types.AuthConfig, config io.ReadCloser, layers io.ReadCloser) (digest.Digest, error) { |
|
21 |
- ref, err := reference.ParseNamed(name) |
|
22 |
- if err != nil { |
|
23 |
- return "", err |
|
24 |
- } |
|
25 |
- |
|
26 |
- repoInfo, err := rs.ResolveRepository(ref) |
|
27 |
- if err != nil { |
|
28 |
- return "", err |
|
29 |
- } |
|
30 |
- repoInfo.Class = "plugin" |
|
31 |
- |
|
32 |
- if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil { |
|
33 |
- return "", err |
|
34 |
- } |
|
35 |
- |
|
36 |
- endpoints, err := rs.LookupPushEndpoints(repoInfo.Hostname()) |
|
37 |
- if err != nil { |
|
38 |
- return "", err |
|
39 |
- } |
|
40 |
- |
|
41 |
- var confirmedV2 bool |
|
42 |
- var repository distribution.Repository |
|
43 |
- for _, endpoint := range endpoints { |
|
44 |
- if confirmedV2 && endpoint.Version == registry.APIVersion1 { |
|
45 |
- logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) |
|
46 |
- continue |
|
47 |
- } |
|
48 |
- repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaHeader, authConfig, "push", "pull") |
|
49 |
- if err != nil { |
|
50 |
- return "", err |
|
51 |
- } |
|
52 |
- if !confirmedV2 { |
|
53 |
- return "", ErrUnsupportedRegistry |
|
54 |
- } |
|
55 |
- logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) |
|
56 |
- // This means that we found an endpoint. and we are ready to push |
|
57 |
- break |
|
58 |
- } |
|
59 |
- |
|
60 |
- // Returns a reference to the repository's blob service. |
|
61 |
- blobs := repository.Blobs(context.Background()) |
|
62 |
- |
|
63 |
- // Descriptor = {mediaType, size, digest} |
|
64 |
- var descs []distribution.Descriptor |
|
65 |
- |
|
66 |
- for i, f := range []io.ReadCloser{config, layers} { |
|
67 |
- bw, err := blobs.Create(context.Background()) |
|
68 |
- if err != nil { |
|
69 |
- logrus.Debugf("Error in blobs.Create: %v", err) |
|
70 |
- return "", err |
|
71 |
- } |
|
72 |
- h := sha256.New() |
|
73 |
- r := io.TeeReader(f, h) |
|
74 |
- _, err = io.Copy(bw, r) |
|
75 |
- if err != nil { |
|
76 |
- f.Close() |
|
77 |
- logrus.Debugf("Error in io.Copy: %v", err) |
|
78 |
- return "", err |
|
79 |
- } |
|
80 |
- f.Close() |
|
81 |
- mt := schema2.MediaTypeLayer |
|
82 |
- if i == 0 { |
|
83 |
- mt = schema2.MediaTypePluginConfig |
|
84 |
- } |
|
85 |
- // Commit completes the write process to the BlobService. |
|
86 |
- // The descriptor arg to Commit is called the "provisional" descriptor and |
|
87 |
- // used for validation. |
|
88 |
- // The returned descriptor should be the one used. Its called the "Canonical" |
|
89 |
- // descriptor. |
|
90 |
- desc, err := bw.Commit(context.Background(), distribution.Descriptor{ |
|
91 |
- MediaType: mt, |
|
92 |
- // XXX: What about the Size? |
|
93 |
- Digest: digest.NewDigest("sha256", h), |
|
94 |
- }) |
|
95 |
- if err != nil { |
|
96 |
- logrus.Debugf("Error in bw.Commit: %v", err) |
|
97 |
- return "", err |
|
98 |
- } |
|
99 |
- // The canonical descriptor is set the mediatype again, just in case. |
|
100 |
- // Don't touch the digest or the size here. |
|
101 |
- desc.MediaType = mt |
|
102 |
- logrus.Debugf("pushed blob: %s %s", desc.MediaType, desc.Digest) |
|
103 |
- descs = append(descs, desc) |
|
104 |
- } |
|
105 |
- |
|
106 |
- // XXX: schema2.Versioned needs a MediaType as well. |
|
107 |
- // "application/vnd.docker.distribution.manifest.v2+json" |
|
108 |
- m, err := schema2.FromStruct(schema2.Manifest{Versioned: schema2.SchemaVersion, Config: descs[0], Layers: descs[1:]}) |
|
109 |
- if err != nil { |
|
110 |
- logrus.Debugf("error in schema2.FromStruct: %v", err) |
|
111 |
- return "", err |
|
112 |
- } |
|
113 |
- |
|
114 |
- msv, err := repository.Manifests(context.Background()) |
|
115 |
- if err != nil { |
|
116 |
- logrus.Debugf("error in repository.Manifests: %v", err) |
|
117 |
- return "", err |
|
118 |
- } |
|
119 |
- |
|
120 |
- _, pl, err := m.Payload() |
|
121 |
- if err != nil { |
|
122 |
- logrus.Debugf("error in m.Payload: %v", err) |
|
123 |
- return "", err |
|
124 |
- } |
|
125 |
- |
|
126 |
- logrus.Debugf("Pushed manifest: %s", pl) |
|
127 |
- |
|
128 |
- tag := DefaultTag |
|
129 |
- if tagged, ok := ref.(reference.NamedTagged); ok { |
|
130 |
- tag = tagged.Tag() |
|
131 |
- } |
|
132 |
- |
|
133 |
- return msv.Put(context.Background(), m, distribution.WithTag(tag)) |
|
134 |
-} |
135 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,12 +0,0 @@ |
1 |
-package distribution |
|
2 |
- |
|
3 |
-import "errors" |
|
4 |
- |
|
5 |
-// ErrUnsupportedRegistry indicates that the registry does not support v2 protocol |
|
6 |
-var ErrUnsupportedRegistry = errors.New("only V2 repositories are supported for plugin distribution") |
|
7 |
- |
|
8 |
-// ErrUnsupportedMediaType indicates we are pulling content that's not a plugin |
|
9 |
-var ErrUnsupportedMediaType = errors.New("content is not a plugin") |
|
10 |
- |
|
11 |
-// DefaultTag is the default tag for plugins |
|
12 |
-const DefaultTag = "latest" |
... | ... |
@@ -3,25 +3,34 @@ package plugin |
3 | 3 |
import ( |
4 | 4 |
"encoding/json" |
5 | 5 |
"io" |
6 |
+ "io/ioutil" |
|
6 | 7 |
"os" |
7 | 8 |
"path/filepath" |
9 |
+ "reflect" |
|
10 |
+ "regexp" |
|
8 | 11 |
"strings" |
9 | 12 |
"sync" |
10 | 13 |
|
11 | 14 |
"github.com/Sirupsen/logrus" |
15 |
+ "github.com/docker/distribution/digest" |
|
16 |
+ "github.com/docker/docker/api/types" |
|
17 |
+ "github.com/docker/docker/image" |
|
18 |
+ "github.com/docker/docker/layer" |
|
12 | 19 |
"github.com/docker/docker/libcontainerd" |
20 |
+ "github.com/docker/docker/pkg/ioutils" |
|
13 | 21 |
"github.com/docker/docker/pkg/mount" |
14 |
- "github.com/docker/docker/plugin/store" |
|
15 | 22 |
"github.com/docker/docker/plugin/v2" |
23 |
+ "github.com/docker/docker/reference" |
|
16 | 24 |
"github.com/docker/docker/registry" |
25 |
+ "github.com/pkg/errors" |
|
17 | 26 |
) |
18 | 27 |
|
19 |
-var ( |
|
20 |
- manager *Manager |
|
21 |
-) |
|
28 |
+const configFileName = "config.json" |
|
29 |
+const rootFSFileName = "rootfs" |
|
30 |
+ |
|
31 |
+var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`) |
|
22 | 32 |
|
23 | 33 |
func (pm *Manager) restorePlugin(p *v2.Plugin) error { |
24 |
- p.Restore(pm.runRoot) |
|
25 | 34 |
if p.IsEnabled() { |
26 | 35 |
return pm.restore(p) |
27 | 36 |
} |
... | ... |
@@ -30,17 +39,25 @@ func (pm *Manager) restorePlugin(p *v2.Plugin) error { |
30 | 30 |
|
31 | 31 |
type eventLogger func(id, name, action string) |
32 | 32 |
|
33 |
+// ManagerConfig defines configuration needed to start new manager. |
|
34 |
+type ManagerConfig struct { |
|
35 |
+ Store *Store // remove |
|
36 |
+ Executor libcontainerd.Remote |
|
37 |
+ RegistryService registry.Service |
|
38 |
+ LiveRestoreEnabled bool // TODO: remove |
|
39 |
+ LogPluginEvent eventLogger |
|
40 |
+ Root string |
|
41 |
+ ExecRoot string |
|
42 |
+} |
|
43 |
+ |
|
33 | 44 |
// Manager controls the plugin subsystem. |
34 | 45 |
type Manager struct { |
35 |
- libRoot string |
|
36 |
- runRoot string |
|
37 |
- pluginStore *store.Store |
|
38 |
- containerdClient libcontainerd.Client |
|
39 |
- registryService registry.Service |
|
40 |
- liveRestore bool |
|
41 |
- pluginEventLogger eventLogger |
|
42 |
- mu sync.RWMutex // protects cMap |
|
43 |
- cMap map[*v2.Plugin]*controller |
|
46 |
+ config ManagerConfig |
|
47 |
+ mu sync.RWMutex // protects cMap |
|
48 |
+ muGC sync.RWMutex // protects blobstore deletions |
|
49 |
+ cMap map[*v2.Plugin]*controller |
|
50 |
+ containerdClient libcontainerd.Client |
|
51 |
+ blobStore *basicBlobStore |
|
44 | 52 |
} |
45 | 53 |
|
46 | 54 |
// controller represents the manager's control on a plugin. |
... | ... |
@@ -50,36 +67,56 @@ type controller struct { |
50 | 50 |
timeoutInSecs int |
51 | 51 |
} |
52 | 52 |
|
53 |
-// GetManager returns the singleton plugin Manager |
|
54 |
-func GetManager() *Manager { |
|
55 |
- return manager |
|
53 |
+// pluginRegistryService ensures that all resolved repositories |
|
54 |
+// are of the plugin class. |
|
55 |
+type pluginRegistryService struct { |
|
56 |
+ registry.Service |
|
56 | 57 |
} |
57 | 58 |
|
58 |
-// Init (was NewManager) instantiates the singleton Manager. |
|
59 |
-// TODO: revert this to NewManager once we get rid of all the singletons. |
|
60 |
-func Init(root string, ps *store.Store, remote libcontainerd.Remote, rs registry.Service, liveRestore bool, evL eventLogger) (err error) { |
|
61 |
- if manager != nil { |
|
62 |
- return nil |
|
59 |
+func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { |
|
60 |
+ repoInfo, err = s.Service.ResolveRepository(name) |
|
61 |
+ if repoInfo != nil { |
|
62 |
+ repoInfo.Class = "plugin" |
|
63 | 63 |
} |
64 |
+ return |
|
65 |
+} |
|
64 | 66 |
|
65 |
- root = filepath.Join(root, "plugins") |
|
66 |
- manager = &Manager{ |
|
67 |
- libRoot: root, |
|
68 |
- runRoot: "/run/docker/plugins", |
|
69 |
- pluginStore: ps, |
|
70 |
- registryService: rs, |
|
71 |
- liveRestore: liveRestore, |
|
72 |
- pluginEventLogger: evL, |
|
67 |
+// NewManager returns a new plugin manager. |
|
68 |
+func NewManager(config ManagerConfig) (*Manager, error) { |
|
69 |
+ if config.RegistryService != nil { |
|
70 |
+ config.RegistryService = pluginRegistryService{config.RegistryService} |
|
73 | 71 |
} |
74 |
- if err := os.MkdirAll(manager.runRoot, 0700); err != nil { |
|
75 |
- return err |
|
72 |
+ manager := &Manager{ |
|
73 |
+ config: config, |
|
76 | 74 |
} |
77 |
- manager.containerdClient, err = remote.Client(manager) |
|
75 |
+ if err := os.MkdirAll(manager.config.Root, 0700); err != nil { |
|
76 |
+ return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.Root) |
|
77 |
+ } |
|
78 |
+ if err := os.MkdirAll(manager.config.ExecRoot, 0700); err != nil { |
|
79 |
+ return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.ExecRoot) |
|
80 |
+ } |
|
81 |
+ if err := os.MkdirAll(manager.tmpDir(), 0700); err != nil { |
|
82 |
+ return nil, errors.Wrapf(err, "failed to mkdir %v", manager.tmpDir()) |
|
83 |
+ } |
|
84 |
+ var err error |
|
85 |
+ manager.containerdClient, err = config.Executor.Client(manager) // todo: move to another struct |
|
78 | 86 |
if err != nil { |
79 |
- return err |
|
87 |
+ return nil, errors.Wrap(err, "failed to create containerd client") |
|
80 | 88 |
} |
89 |
+ manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs")) |
|
90 |
+ if err != nil { |
|
91 |
+ return nil, err |
|
92 |
+ } |
|
93 |
+ |
|
81 | 94 |
manager.cMap = make(map[*v2.Plugin]*controller) |
82 |
- return manager.reload() |
|
95 |
+ if err := manager.reload(); err != nil { |
|
96 |
+ return nil, errors.Wrap(err, "failed to restore plugins") |
|
97 |
+ } |
|
98 |
+ return manager, nil |
|
99 |
+} |
|
100 |
+ |
|
101 |
+func (pm *Manager) tmpDir() string { |
|
102 |
+ return filepath.Join(pm.config.Root, "tmp") |
|
83 | 103 |
} |
84 | 104 |
|
85 | 105 |
// StateChanged updates plugin internals using libcontainerd events. |
... | ... |
@@ -88,7 +125,7 @@ func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error { |
88 | 88 |
|
89 | 89 |
switch e.State { |
90 | 90 |
case libcontainerd.StateExit: |
91 |
- p, err := pm.pluginStore.GetByID(id) |
|
91 |
+ p, err := pm.config.Store.GetV2Plugin(id) |
|
92 | 92 |
if err != nil { |
93 | 93 |
return err |
94 | 94 |
} |
... | ... |
@@ -102,7 +139,7 @@ func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error { |
102 | 102 |
restart := c.restart |
103 | 103 |
pm.mu.RUnlock() |
104 | 104 |
|
105 |
- p.RemoveFromDisk() |
|
105 |
+ os.RemoveAll(filepath.Join(pm.config.ExecRoot, id)) |
|
106 | 106 |
|
107 | 107 |
if p.PropagatedMount != "" { |
108 | 108 |
if err := mount.Unmount(p.PropagatedMount); err != nil { |
... | ... |
@@ -118,37 +155,38 @@ func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error { |
118 | 118 |
return nil |
119 | 119 |
} |
120 | 120 |
|
121 |
-// reload is used on daemon restarts to load the manager's state |
|
122 |
-func (pm *Manager) reload() error { |
|
123 |
- dt, err := os.Open(filepath.Join(pm.libRoot, "plugins.json")) |
|
121 |
+func (pm *Manager) reload() error { // todo: restore |
|
122 |
+ dir, err := ioutil.ReadDir(pm.config.Root) |
|
124 | 123 |
if err != nil { |
125 |
- if os.IsNotExist(err) { |
|
126 |
- return nil |
|
127 |
- } |
|
128 |
- return err |
|
124 |
+ return errors.Wrapf(err, "failed to read %v", pm.config.Root) |
|
129 | 125 |
} |
130 |
- defer dt.Close() |
|
131 |
- |
|
132 | 126 |
plugins := make(map[string]*v2.Plugin) |
133 |
- if err := json.NewDecoder(dt).Decode(&plugins); err != nil { |
|
134 |
- return err |
|
127 |
+ for _, v := range dir { |
|
128 |
+ if validFullID.MatchString(v.Name()) { |
|
129 |
+ p, err := pm.loadPlugin(v.Name()) |
|
130 |
+ if err != nil { |
|
131 |
+ return err |
|
132 |
+ } |
|
133 |
+ plugins[p.GetID()] = p |
|
134 |
+ } |
|
135 | 135 |
} |
136 |
- pm.pluginStore.SetAll(plugins) |
|
137 | 136 |
|
138 |
- var group sync.WaitGroup |
|
139 |
- group.Add(len(plugins)) |
|
137 |
+ pm.config.Store.SetAll(plugins) |
|
138 |
+ |
|
139 |
+ var wg sync.WaitGroup |
|
140 |
+ wg.Add(len(plugins)) |
|
140 | 141 |
for _, p := range plugins { |
141 |
- c := &controller{} |
|
142 |
+ c := &controller{} // todo: remove this |
|
142 | 143 |
pm.cMap[p] = c |
143 | 144 |
go func(p *v2.Plugin) { |
144 |
- defer group.Done() |
|
145 |
+ defer wg.Done() |
|
145 | 146 |
if err := pm.restorePlugin(p); err != nil { |
146 | 147 |
logrus.Errorf("failed to restore plugin '%s': %s", p.Name(), err) |
147 | 148 |
return |
148 | 149 |
} |
149 | 150 |
|
150 | 151 |
if p.Rootfs != "" { |
151 |
- p.Rootfs = filepath.Join(pm.libRoot, p.PluginObj.ID, "rootfs") |
|
152 |
+ p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") |
|
152 | 153 |
} |
153 | 154 |
|
154 | 155 |
// We should only enable rootfs propagation for certain plugin types that need it. |
... | ... |
@@ -165,8 +203,8 @@ func (pm *Manager) reload() error { |
165 | 165 |
} |
166 | 166 |
} |
167 | 167 |
|
168 |
- pm.pluginStore.Update(p) |
|
169 |
- requiresManualRestore := !pm.liveRestore && p.IsEnabled() |
|
168 |
+ pm.save(p) |
|
169 |
+ requiresManualRestore := !pm.config.LiveRestoreEnabled && p.IsEnabled() |
|
170 | 170 |
|
171 | 171 |
if requiresManualRestore { |
172 | 172 |
// if liveRestore is not enabled, the plugin will be stopped now so we should enable it |
... | ... |
@@ -176,10 +214,50 @@ func (pm *Manager) reload() error { |
176 | 176 |
} |
177 | 177 |
}(p) |
178 | 178 |
} |
179 |
- group.Wait() |
|
179 |
+ wg.Wait() |
|
180 | 180 |
return nil |
181 | 181 |
} |
182 | 182 |
|
183 |
+func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) { |
|
184 |
+ p := filepath.Join(pm.config.Root, id, configFileName) |
|
185 |
+ dt, err := ioutil.ReadFile(p) |
|
186 |
+ if err != nil { |
|
187 |
+ return nil, errors.Wrapf(err, "error reading %v", p) |
|
188 |
+ } |
|
189 |
+ var plugin v2.Plugin |
|
190 |
+ if err := json.Unmarshal(dt, &plugin); err != nil { |
|
191 |
+ return nil, errors.Wrapf(err, "error decoding %v", p) |
|
192 |
+ } |
|
193 |
+ return &plugin, nil |
|
194 |
+} |
|
195 |
+ |
|
196 |
+func (pm *Manager) save(p *v2.Plugin) error { |
|
197 |
+ pluginJSON, err := json.Marshal(p) |
|
198 |
+ if err != nil { |
|
199 |
+ return errors.Wrap(err, "failed to marshal plugin json") |
|
200 |
+ } |
|
201 |
+ if err := ioutils.AtomicWriteFile(filepath.Join(pm.config.Root, p.GetID(), configFileName), pluginJSON, 0600); err != nil { |
|
202 |
+ return err |
|
203 |
+ } |
|
204 |
+ return nil |
|
205 |
+} |
|
206 |
+ |
|
207 |
+// GC cleans up unrefrenced blobs. This is recommended to run in a goroutine |
|
208 |
+func (pm *Manager) GC() { |
|
209 |
+ pm.muGC.Lock() |
|
210 |
+ defer pm.muGC.Unlock() |
|
211 |
+ |
|
212 |
+ whitelist := make(map[digest.Digest]struct{}) |
|
213 |
+ for _, p := range pm.config.Store.GetAll() { |
|
214 |
+ whitelist[p.Config] = struct{}{} |
|
215 |
+ for _, b := range p.Blobsums { |
|
216 |
+ whitelist[b] = struct{}{} |
|
217 |
+ } |
|
218 |
+ } |
|
219 |
+ |
|
220 |
+ pm.blobStore.gc(whitelist) |
|
221 |
+} |
|
222 |
+ |
|
183 | 223 |
type logHook struct{ id string } |
184 | 224 |
|
185 | 225 |
func (logHook) Levels() []logrus.Level { |
... | ... |
@@ -209,3 +287,32 @@ func attachToLog(id string) func(libcontainerd.IOPipe) error { |
209 | 209 |
return nil |
210 | 210 |
} |
211 | 211 |
} |
212 |
+ |
|
213 |
+func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error { |
|
214 |
+ // todo: make a better function that doesn't check order |
|
215 |
+ if !reflect.DeepEqual(privileges, requiredPrivileges) { |
|
216 |
+ return errors.New("incorrect privileges") |
|
217 |
+ } |
|
218 |
+ return nil |
|
219 |
+} |
|
220 |
+ |
|
221 |
+func configToRootFS(c []byte) (*image.RootFS, error) { |
|
222 |
+ var pluginConfig types.PluginConfig |
|
223 |
+ if err := json.Unmarshal(c, &pluginConfig); err != nil { |
|
224 |
+ return nil, err |
|
225 |
+ } |
|
226 |
+ |
|
227 |
+ return rootFSFromPlugin(pluginConfig.Rootfs), nil |
|
228 |
+} |
|
229 |
+ |
|
230 |
+func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS { |
|
231 |
+ rootFS := image.RootFS{ |
|
232 |
+ Type: pluginfs.Type, |
|
233 |
+ DiffIDs: make([]layer.DiffID, len(pluginfs.DiffIds)), |
|
234 |
+ } |
|
235 |
+ for i := range pluginfs.DiffIds { |
|
236 |
+ rootFS.DiffIDs[i] = layer.DiffID(pluginfs.DiffIds[i]) |
|
237 |
+ } |
|
238 |
+ |
|
239 |
+ return &rootFS |
|
240 |
+} |
... | ... |
@@ -3,26 +3,32 @@ |
3 | 3 |
package plugin |
4 | 4 |
|
5 | 5 |
import ( |
6 |
+ "encoding/json" |
|
6 | 7 |
"fmt" |
8 |
+ "os" |
|
7 | 9 |
"path/filepath" |
8 | 10 |
"syscall" |
9 | 11 |
"time" |
10 | 12 |
|
11 | 13 |
"github.com/Sirupsen/logrus" |
14 |
+ "github.com/docker/distribution/digest" |
|
15 |
+ "github.com/docker/docker/api/types" |
|
16 |
+ "github.com/docker/docker/daemon/initlayer" |
|
12 | 17 |
"github.com/docker/docker/libcontainerd" |
13 |
- "github.com/docker/docker/oci" |
|
14 | 18 |
"github.com/docker/docker/pkg/mount" |
15 | 19 |
"github.com/docker/docker/pkg/plugins" |
20 |
+ "github.com/docker/docker/pkg/stringid" |
|
16 | 21 |
"github.com/docker/docker/plugin/v2" |
17 | 22 |
specs "github.com/opencontainers/runtime-spec/specs-go" |
23 |
+ "github.com/pkg/errors" |
|
18 | 24 |
) |
19 | 25 |
|
20 | 26 |
func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { |
21 |
- p.Rootfs = filepath.Join(pm.libRoot, p.PluginObj.ID, "rootfs") |
|
27 |
+ p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") |
|
22 | 28 |
if p.IsEnabled() && !force { |
23 | 29 |
return fmt.Errorf("plugin %s is already enabled", p.Name()) |
24 | 30 |
} |
25 |
- spec, err := p.InitSpec(oci.DefaultSpec()) |
|
31 |
+ spec, err := p.InitSpec(pm.config.ExecRoot) |
|
26 | 32 |
if err != nil { |
27 | 33 |
return err |
28 | 34 |
} |
... | ... |
@@ -40,6 +46,10 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { |
40 | 40 |
} |
41 | 41 |
} |
42 | 42 |
|
43 |
+ if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), 0, 0); err != nil { |
|
44 |
+ return err |
|
45 |
+ } |
|
46 |
+ |
|
43 | 47 |
if err := pm.containerdClient.Create(p.GetID(), "", "", specs.Spec(*spec), attachToLog(p.GetID())); err != nil { |
44 | 48 |
if p.PropagatedMount != "" { |
45 | 49 |
if err := mount.Unmount(p.PropagatedMount); err != nil { |
... | ... |
@@ -53,7 +63,7 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { |
53 | 53 |
} |
54 | 54 |
|
55 | 55 |
func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error { |
56 |
- client, err := plugins.NewClientWithTimeout("unix://"+filepath.Join(p.GetRuntimeSourcePath(), p.GetSocket()), nil, c.timeoutInSecs) |
|
56 |
+ client, err := plugins.NewClientWithTimeout("unix://"+filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()), nil, c.timeoutInSecs) |
|
57 | 57 |
if err != nil { |
58 | 58 |
c.restart = false |
59 | 59 |
shutdownPlugin(p, c, pm.containerdClient) |
... | ... |
@@ -61,9 +71,10 @@ func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error { |
61 | 61 |
} |
62 | 62 |
|
63 | 63 |
p.SetPClient(client) |
64 |
- pm.pluginStore.SetState(p, true) |
|
65 |
- pm.pluginStore.CallHandler(p) |
|
66 |
- return nil |
|
64 |
+ pm.config.Store.SetState(p, true) |
|
65 |
+ pm.config.Store.CallHandler(p) |
|
66 |
+ |
|
67 |
+ return pm.save(p) |
|
67 | 68 |
} |
68 | 69 |
|
69 | 70 |
func (pm *Manager) restore(p *v2.Plugin) error { |
... | ... |
@@ -71,7 +82,7 @@ func (pm *Manager) restore(p *v2.Plugin) error { |
71 | 71 |
return err |
72 | 72 |
} |
73 | 73 |
|
74 |
- if pm.liveRestore { |
|
74 |
+ if pm.config.LiveRestoreEnabled { |
|
75 | 75 |
c := &controller{} |
76 | 76 |
if pids, _ := pm.containerdClient.GetPidsForContainer(p.GetID()); len(pids) == 0 { |
77 | 77 |
// plugin is not running, so follow normal startup procedure |
... | ... |
@@ -115,19 +126,19 @@ func (pm *Manager) disable(p *v2.Plugin, c *controller) error { |
115 | 115 |
|
116 | 116 |
c.restart = false |
117 | 117 |
shutdownPlugin(p, c, pm.containerdClient) |
118 |
- pm.pluginStore.SetState(p, false) |
|
119 |
- return nil |
|
118 |
+ pm.config.Store.SetState(p, false) |
|
119 |
+ return pm.save(p) |
|
120 | 120 |
} |
121 | 121 |
|
122 | 122 |
// Shutdown stops all plugins and called during daemon shutdown. |
123 | 123 |
func (pm *Manager) Shutdown() { |
124 |
- plugins := pm.pluginStore.GetAll() |
|
124 |
+ plugins := pm.config.Store.GetAll() |
|
125 | 125 |
for _, p := range plugins { |
126 | 126 |
pm.mu.RLock() |
127 | 127 |
c := pm.cMap[p] |
128 | 128 |
pm.mu.RUnlock() |
129 | 129 |
|
130 |
- if pm.liveRestore && p.IsEnabled() { |
|
130 |
+ if pm.config.LiveRestoreEnabled && p.IsEnabled() { |
|
131 | 131 |
logrus.Debug("Plugin active when liveRestore is set, skipping shutdown") |
132 | 132 |
continue |
133 | 133 |
} |
... | ... |
@@ -137,3 +148,69 @@ func (pm *Manager) Shutdown() { |
137 | 137 |
} |
138 | 138 |
} |
139 | 139 |
} |
140 |
+ |
|
141 |
+// createPlugin creates a new plugin. take lock before calling. |
|
142 |
+func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges) (p *v2.Plugin, err error) { |
|
143 |
+ if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store |
|
144 |
+ return nil, err |
|
145 |
+ } |
|
146 |
+ |
|
147 |
+ configRC, err := pm.blobStore.Get(configDigest) |
|
148 |
+ if err != nil { |
|
149 |
+ return nil, err |
|
150 |
+ } |
|
151 |
+ defer configRC.Close() |
|
152 |
+ |
|
153 |
+ var config types.PluginConfig |
|
154 |
+ dec := json.NewDecoder(configRC) |
|
155 |
+ if err := dec.Decode(&config); err != nil { |
|
156 |
+ return nil, errors.Wrapf(err, "failed to parse config") |
|
157 |
+ } |
|
158 |
+ if dec.More() { |
|
159 |
+ return nil, errors.New("invalid config json") |
|
160 |
+ } |
|
161 |
+ |
|
162 |
+ requiredPrivileges, err := computePrivileges(config) |
|
163 |
+ if err != nil { |
|
164 |
+ return nil, err |
|
165 |
+ } |
|
166 |
+ if privileges != nil { |
|
167 |
+ if err := validatePrivileges(requiredPrivileges, *privileges); err != nil { |
|
168 |
+ return nil, err |
|
169 |
+ } |
|
170 |
+ } |
|
171 |
+ |
|
172 |
+ p = &v2.Plugin{ |
|
173 |
+ PluginObj: types.Plugin{ |
|
174 |
+ Name: name, |
|
175 |
+ ID: stringid.GenerateRandomID(), |
|
176 |
+ Config: config, |
|
177 |
+ }, |
|
178 |
+ Config: configDigest, |
|
179 |
+ Blobsums: blobsums, |
|
180 |
+ } |
|
181 |
+ p.InitEmptySettings() |
|
182 |
+ |
|
183 |
+ pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) |
|
184 |
+ if err := os.MkdirAll(pdir, 0700); err != nil { |
|
185 |
+ return nil, errors.Wrapf(err, "failed to mkdir %v", pdir) |
|
186 |
+ } |
|
187 |
+ |
|
188 |
+ defer func() { |
|
189 |
+ if err != nil { |
|
190 |
+ os.RemoveAll(pdir) |
|
191 |
+ } |
|
192 |
+ }() |
|
193 |
+ |
|
194 |
+ if err := os.Rename(rootFSDir, filepath.Join(pdir, rootFSFileName)); err != nil { |
|
195 |
+ return nil, errors.Wrap(err, "failed to rename rootfs") |
|
196 |
+ } |
|
197 |
+ |
|
198 |
+ if err := pm.save(p); err != nil { |
|
199 |
+ return nil, err |
|
200 |
+ } |
|
201 |
+ |
|
202 |
+ pm.config.Store.Add(p) // todo: remove |
|
203 |
+ |
|
204 |
+ return p, nil |
|
205 |
+} |
140 | 206 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,263 @@ |
0 |
+package plugin |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "fmt" |
|
4 |
+ "strings" |
|
5 |
+ |
|
6 |
+ "github.com/Sirupsen/logrus" |
|
7 |
+ "github.com/docker/docker/pkg/plugingetter" |
|
8 |
+ "github.com/docker/docker/pkg/plugins" |
|
9 |
+ "github.com/docker/docker/plugin/v2" |
|
10 |
+ "github.com/docker/docker/reference" |
|
11 |
+ "github.com/pkg/errors" |
|
12 |
+) |
|
13 |
+ |
|
14 |
+/* allowV1PluginsFallback determines daemon's support for V1 plugins. |
|
15 |
+ * When the time comes to remove support for V1 plugins, flipping |
|
16 |
+ * this bool is all that will be needed. |
|
17 |
+ */ |
|
18 |
+const allowV1PluginsFallback bool = true |
|
19 |
+ |
|
20 |
+/* defaultAPIVersion is the version of the plugin API for volume, network, |
|
21 |
+ IPAM and authz. This is a very stable API. When we update this API, then |
|
22 |
+ pluginType should include a version. e.g. "networkdriver/2.0". |
|
23 |
+*/ |
|
24 |
+const defaultAPIVersion string = "1.0" |
|
25 |
+ |
|
26 |
+// ErrNotFound indicates that a plugin was not found locally. |
|
27 |
+type ErrNotFound string |
|
28 |
+ |
|
29 |
+func (name ErrNotFound) Error() string { return fmt.Sprintf("plugin %q not found", string(name)) } |
|
30 |
+ |
|
31 |
+// ErrAmbiguous indicates that a plugin was not found locally. |
|
32 |
+type ErrAmbiguous string |
|
33 |
+ |
|
34 |
+func (name ErrAmbiguous) Error() string { |
|
35 |
+ return fmt.Sprintf("multiple plugins found for %q", string(name)) |
|
36 |
+} |
|
37 |
+ |
|
38 |
+// GetV2Plugin retreives a plugin by name, id or partial ID. |
|
39 |
+func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) { |
|
40 |
+ ps.RLock() |
|
41 |
+ defer ps.RUnlock() |
|
42 |
+ |
|
43 |
+ id, err := ps.resolvePluginID(refOrID) |
|
44 |
+ if err != nil { |
|
45 |
+ return nil, err |
|
46 |
+ } |
|
47 |
+ |
|
48 |
+ p, idOk := ps.plugins[id] |
|
49 |
+ if !idOk { |
|
50 |
+ return nil, errors.WithStack(ErrNotFound(id)) |
|
51 |
+ } |
|
52 |
+ |
|
53 |
+ return p, nil |
|
54 |
+} |
|
55 |
+ |
|
56 |
+// validateName returns error if name is already reserved. always call with lock and full name |
|
57 |
+func (ps *Store) validateName(name string) error { |
|
58 |
+ for _, p := range ps.plugins { |
|
59 |
+ if p.Name() == name { |
|
60 |
+ return errors.Errorf("%v already exists", name) |
|
61 |
+ } |
|
62 |
+ } |
|
63 |
+ return nil |
|
64 |
+} |
|
65 |
+ |
|
66 |
+// GetAll retreives all plugins. |
|
67 |
+func (ps *Store) GetAll() map[string]*v2.Plugin { |
|
68 |
+ ps.RLock() |
|
69 |
+ defer ps.RUnlock() |
|
70 |
+ return ps.plugins |
|
71 |
+} |
|
72 |
+ |
|
73 |
+// SetAll initialized plugins during daemon restore. |
|
74 |
+func (ps *Store) SetAll(plugins map[string]*v2.Plugin) { |
|
75 |
+ ps.Lock() |
|
76 |
+ defer ps.Unlock() |
|
77 |
+ ps.plugins = plugins |
|
78 |
+} |
|
79 |
+ |
|
80 |
+func (ps *Store) getAllByCap(capability string) []plugingetter.CompatPlugin { |
|
81 |
+ ps.RLock() |
|
82 |
+ defer ps.RUnlock() |
|
83 |
+ |
|
84 |
+ result := make([]plugingetter.CompatPlugin, 0, 1) |
|
85 |
+ for _, p := range ps.plugins { |
|
86 |
+ if p.IsEnabled() { |
|
87 |
+ if _, err := p.FilterByCap(capability); err == nil { |
|
88 |
+ result = append(result, p) |
|
89 |
+ } |
|
90 |
+ } |
|
91 |
+ } |
|
92 |
+ return result |
|
93 |
+} |
|
94 |
+ |
|
95 |
+// SetState sets the active state of the plugin and updates plugindb. |
|
96 |
+func (ps *Store) SetState(p *v2.Plugin, state bool) { |
|
97 |
+ ps.Lock() |
|
98 |
+ defer ps.Unlock() |
|
99 |
+ |
|
100 |
+ p.PluginObj.Enabled = state |
|
101 |
+} |
|
102 |
+ |
|
103 |
+// Add adds a plugin to memory and plugindb. |
|
104 |
+// An error will be returned if there is a collision. |
|
105 |
+func (ps *Store) Add(p *v2.Plugin) error { |
|
106 |
+ ps.Lock() |
|
107 |
+ defer ps.Unlock() |
|
108 |
+ |
|
109 |
+ if v, exist := ps.plugins[p.GetID()]; exist { |
|
110 |
+ return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name()) |
|
111 |
+ } |
|
112 |
+ ps.plugins[p.GetID()] = p |
|
113 |
+ return nil |
|
114 |
+} |
|
115 |
+ |
|
116 |
+// Remove removes a plugin from memory and plugindb. |
|
117 |
+func (ps *Store) Remove(p *v2.Plugin) { |
|
118 |
+ ps.Lock() |
|
119 |
+ delete(ps.plugins, p.GetID()) |
|
120 |
+ ps.Unlock() |
|
121 |
+} |
|
122 |
+ |
|
123 |
+// Get returns an enabled plugin matching the given name and capability. |
|
124 |
+func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) { |
|
125 |
+ var ( |
|
126 |
+ p *v2.Plugin |
|
127 |
+ err error |
|
128 |
+ ) |
|
129 |
+ |
|
130 |
+ // Lookup using new model. |
|
131 |
+ if ps != nil { |
|
132 |
+ p, err = ps.GetV2Plugin(name) |
|
133 |
+ if err == nil { |
|
134 |
+ p.AddRefCount(mode) |
|
135 |
+ if p.IsEnabled() { |
|
136 |
+ return p.FilterByCap(capability) |
|
137 |
+ } |
|
138 |
+ // Plugin was found but it is disabled, so we should not fall back to legacy plugins |
|
139 |
+ // but we should error out right away |
|
140 |
+ return nil, ErrNotFound(name) |
|
141 |
+ } |
|
142 |
+ if _, ok := errors.Cause(err).(ErrNotFound); !ok { |
|
143 |
+ return nil, err |
|
144 |
+ } |
|
145 |
+ } |
|
146 |
+ |
|
147 |
+ // Lookup using legacy model. |
|
148 |
+ if allowV1PluginsFallback { |
|
149 |
+ p, err := plugins.Get(name, capability) |
|
150 |
+ if err != nil { |
|
151 |
+ return nil, fmt.Errorf("legacy plugin: %v", err) |
|
152 |
+ } |
|
153 |
+ return p, nil |
|
154 |
+ } |
|
155 |
+ |
|
156 |
+ return nil, err |
|
157 |
+} |
|
158 |
+ |
|
159 |
+// GetAllManagedPluginsByCap returns a list of managed plugins matching the given capability. |
|
160 |
+func (ps *Store) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin { |
|
161 |
+ return ps.getAllByCap(capability) |
|
162 |
+} |
|
163 |
+ |
|
164 |
+// GetAllByCap returns a list of enabled plugins matching the given capability. |
|
165 |
+func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) { |
|
166 |
+ result := make([]plugingetter.CompatPlugin, 0, 1) |
|
167 |
+ |
|
168 |
+ /* Daemon start always calls plugin.Init thereby initializing a store. |
|
169 |
+ * So store on experimental builds can never be nil, even while |
|
170 |
+ * handling legacy plugins. However, there are legacy plugin unit |
|
171 |
+ * tests where the volume subsystem directly talks with the plugin, |
|
172 |
+ * bypassing the daemon. For such tests, this check is necessary. |
|
173 |
+ */ |
|
174 |
+ if ps != nil { |
|
175 |
+ ps.RLock() |
|
176 |
+ result = ps.getAllByCap(capability) |
|
177 |
+ ps.RUnlock() |
|
178 |
+ } |
|
179 |
+ |
|
180 |
+ // Lookup with legacy model |
|
181 |
+ if allowV1PluginsFallback { |
|
182 |
+ pl, err := plugins.GetAll(capability) |
|
183 |
+ if err != nil { |
|
184 |
+ return nil, fmt.Errorf("legacy plugin: %v", err) |
|
185 |
+ } |
|
186 |
+ for _, p := range pl { |
|
187 |
+ result = append(result, p) |
|
188 |
+ } |
|
189 |
+ } |
|
190 |
+ return result, nil |
|
191 |
+} |
|
192 |
+ |
|
193 |
+// Handle sets a callback for a given capability. It is only used by network |
|
194 |
+// and ipam drivers during plugin registration. The callback registers the |
|
195 |
+// driver with the subsystem (network, ipam). |
|
196 |
+func (ps *Store) Handle(capability string, callback func(string, *plugins.Client)) { |
|
197 |
+ pluginType := fmt.Sprintf("docker.%s/%s", strings.ToLower(capability), defaultAPIVersion) |
|
198 |
+ |
|
199 |
+ // Register callback with new plugin model. |
|
200 |
+ ps.Lock() |
|
201 |
+ handlers, ok := ps.handlers[pluginType] |
|
202 |
+ if !ok { |
|
203 |
+ handlers = []func(string, *plugins.Client){} |
|
204 |
+ } |
|
205 |
+ handlers = append(handlers, callback) |
|
206 |
+ ps.handlers[pluginType] = handlers |
|
207 |
+ ps.Unlock() |
|
208 |
+ |
|
209 |
+ // Register callback with legacy plugin model. |
|
210 |
+ if allowV1PluginsFallback { |
|
211 |
+ plugins.Handle(capability, callback) |
|
212 |
+ } |
|
213 |
+} |
|
214 |
+ |
|
215 |
+// CallHandler calls the registered callback. It is invoked during plugin enable. |
|
216 |
+func (ps *Store) CallHandler(p *v2.Plugin) { |
|
217 |
+ for _, typ := range p.GetTypes() { |
|
218 |
+ for _, handler := range ps.handlers[typ.String()] { |
|
219 |
+ handler(p.Name(), p.Client()) |
|
220 |
+ } |
|
221 |
+ } |
|
222 |
+} |
|
223 |
+ |
|
224 |
+func (ps *Store) resolvePluginID(idOrName string) (string, error) { |
|
225 |
+ ps.RLock() // todo: fix |
|
226 |
+ defer ps.RUnlock() |
|
227 |
+ |
|
228 |
+ if validFullID.MatchString(idOrName) { |
|
229 |
+ return idOrName, nil |
|
230 |
+ } |
|
231 |
+ |
|
232 |
+ ref, err := reference.ParseNamed(idOrName) |
|
233 |
+ if err != nil { |
|
234 |
+ return "", errors.Wrapf(err, "failed to parse %v", idOrName) |
|
235 |
+ } |
|
236 |
+ if _, ok := ref.(reference.Canonical); ok { |
|
237 |
+ logrus.Warnf("canonical references cannot be resolved: %v", ref.String()) |
|
238 |
+ return "", errors.WithStack(ErrNotFound(idOrName)) |
|
239 |
+ } |
|
240 |
+ |
|
241 |
+ fullRef := reference.WithDefaultTag(ref) |
|
242 |
+ |
|
243 |
+ for _, p := range ps.plugins { |
|
244 |
+ if p.PluginObj.Name == fullRef.String() { |
|
245 |
+ return p.PluginObj.ID, nil |
|
246 |
+ } |
|
247 |
+ } |
|
248 |
+ |
|
249 |
+ var found *v2.Plugin |
|
250 |
+ for id, p := range ps.plugins { // this can be optimized |
|
251 |
+ if strings.HasPrefix(id, idOrName) { |
|
252 |
+ if found != nil { |
|
253 |
+ return "", errors.WithStack(ErrAmbiguous(idOrName)) |
|
254 |
+ } |
|
255 |
+ found = p |
|
256 |
+ } |
|
257 |
+ } |
|
258 |
+ if found == nil { |
|
259 |
+ return "", errors.WithStack(ErrNotFound(idOrName)) |
|
260 |
+ } |
|
261 |
+ return found.PluginObj.ID, nil |
|
262 |
+} |
0 | 263 |
deleted file mode 100644 |
... | ... |
@@ -1,31 +0,0 @@ |
1 |
-package store |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "path/filepath" |
|
5 |
- "sync" |
|
6 |
- |
|
7 |
- "github.com/docker/docker/pkg/plugins" |
|
8 |
- "github.com/docker/docker/plugin/v2" |
|
9 |
-) |
|
10 |
- |
|
11 |
-// Store manages the plugin inventory in memory and on-disk |
|
12 |
-type Store struct { |
|
13 |
- sync.RWMutex |
|
14 |
- plugins map[string]*v2.Plugin |
|
15 |
- /* handlers are necessary for transition path of legacy plugins |
|
16 |
- * to the new model. Legacy plugins use Handle() for registering an |
|
17 |
- * activation callback.*/ |
|
18 |
- handlers map[string][]func(string, *plugins.Client) |
|
19 |
- nameToID map[string]string |
|
20 |
- plugindb string |
|
21 |
-} |
|
22 |
- |
|
23 |
-// NewStore creates a Store. |
|
24 |
-func NewStore(libRoot string) *Store { |
|
25 |
- return &Store{ |
|
26 |
- plugins: make(map[string]*v2.Plugin), |
|
27 |
- handlers: make(map[string][]func(string, *plugins.Client)), |
|
28 |
- nameToID: make(map[string]string), |
|
29 |
- plugindb: filepath.Join(libRoot, "plugins", "plugins.json"), |
|
30 |
- } |
|
31 |
-} |
32 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,294 +0,0 @@ |
1 |
-package store |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "encoding/json" |
|
5 |
- "fmt" |
|
6 |
- "strings" |
|
7 |
- |
|
8 |
- "github.com/Sirupsen/logrus" |
|
9 |
- "github.com/docker/docker/pkg/ioutils" |
|
10 |
- "github.com/docker/docker/pkg/plugingetter" |
|
11 |
- "github.com/docker/docker/pkg/plugins" |
|
12 |
- "github.com/docker/docker/plugin/v2" |
|
13 |
- "github.com/docker/docker/reference" |
|
14 |
-) |
|
15 |
- |
|
16 |
-/* allowV1PluginsFallback determines daemon's support for V1 plugins. |
|
17 |
- * When the time comes to remove support for V1 plugins, flipping |
|
18 |
- * this bool is all that will be needed. |
|
19 |
- */ |
|
20 |
-const allowV1PluginsFallback bool = true |
|
21 |
- |
|
22 |
-/* defaultAPIVersion is the version of the plugin API for volume, network, |
|
23 |
- IPAM and authz. This is a very stable API. When we update this API, then |
|
24 |
- pluginType should include a version. e.g. "networkdriver/2.0". |
|
25 |
-*/ |
|
26 |
-const defaultAPIVersion string = "1.0" |
|
27 |
- |
|
28 |
-// ErrNotFound indicates that a plugin was not found locally. |
|
29 |
-type ErrNotFound string |
|
30 |
- |
|
31 |
-func (name ErrNotFound) Error() string { return fmt.Sprintf("plugin %q not found", string(name)) } |
|
32 |
- |
|
33 |
-// ErrAmbiguous indicates that a plugin was not found locally. |
|
34 |
-type ErrAmbiguous string |
|
35 |
- |
|
36 |
-func (name ErrAmbiguous) Error() string { |
|
37 |
- return fmt.Sprintf("multiple plugins found for %q", string(name)) |
|
38 |
-} |
|
39 |
- |
|
40 |
-// GetByName retreives a plugin by name. |
|
41 |
-func (ps *Store) GetByName(name string) (*v2.Plugin, error) { |
|
42 |
- ps.RLock() |
|
43 |
- defer ps.RUnlock() |
|
44 |
- |
|
45 |
- id, nameOk := ps.nameToID[name] |
|
46 |
- if !nameOk { |
|
47 |
- return nil, ErrNotFound(name) |
|
48 |
- } |
|
49 |
- |
|
50 |
- p, idOk := ps.plugins[id] |
|
51 |
- if !idOk { |
|
52 |
- return nil, ErrNotFound(id) |
|
53 |
- } |
|
54 |
- return p, nil |
|
55 |
-} |
|
56 |
- |
|
57 |
-// GetByID retreives a plugin by ID. |
|
58 |
-func (ps *Store) GetByID(id string) (*v2.Plugin, error) { |
|
59 |
- ps.RLock() |
|
60 |
- defer ps.RUnlock() |
|
61 |
- |
|
62 |
- p, idOk := ps.plugins[id] |
|
63 |
- if !idOk { |
|
64 |
- return nil, ErrNotFound(id) |
|
65 |
- } |
|
66 |
- return p, nil |
|
67 |
-} |
|
68 |
- |
|
69 |
-// GetAll retreives all plugins. |
|
70 |
-func (ps *Store) GetAll() map[string]*v2.Plugin { |
|
71 |
- ps.RLock() |
|
72 |
- defer ps.RUnlock() |
|
73 |
- return ps.plugins |
|
74 |
-} |
|
75 |
- |
|
76 |
-// SetAll initialized plugins during daemon restore. |
|
77 |
-func (ps *Store) SetAll(plugins map[string]*v2.Plugin) { |
|
78 |
- ps.Lock() |
|
79 |
- defer ps.Unlock() |
|
80 |
- ps.plugins = plugins |
|
81 |
-} |
|
82 |
- |
|
83 |
-func (ps *Store) getAllByCap(capability string) []plugingetter.CompatPlugin { |
|
84 |
- ps.RLock() |
|
85 |
- defer ps.RUnlock() |
|
86 |
- |
|
87 |
- result := make([]plugingetter.CompatPlugin, 0, 1) |
|
88 |
- for _, p := range ps.plugins { |
|
89 |
- if p.IsEnabled() { |
|
90 |
- if _, err := p.FilterByCap(capability); err == nil { |
|
91 |
- result = append(result, p) |
|
92 |
- } |
|
93 |
- } |
|
94 |
- } |
|
95 |
- return result |
|
96 |
-} |
|
97 |
- |
|
98 |
-// SetState sets the active state of the plugin and updates plugindb. |
|
99 |
-func (ps *Store) SetState(p *v2.Plugin, state bool) { |
|
100 |
- ps.Lock() |
|
101 |
- defer ps.Unlock() |
|
102 |
- |
|
103 |
- p.PluginObj.Enabled = state |
|
104 |
- ps.updatePluginDB() |
|
105 |
-} |
|
106 |
- |
|
107 |
-// Add adds a plugin to memory and plugindb. |
|
108 |
-// An error will be returned if there is a collision. |
|
109 |
-func (ps *Store) Add(p *v2.Plugin) error { |
|
110 |
- ps.Lock() |
|
111 |
- defer ps.Unlock() |
|
112 |
- |
|
113 |
- if v, exist := ps.plugins[p.GetID()]; exist { |
|
114 |
- return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name()) |
|
115 |
- } |
|
116 |
- // Since both Pull() and CreateFromContext() calls GetByName() before any plugin |
|
117 |
- // to search for collision (to fail fast), it is unlikely the following check |
|
118 |
- // will return an error. |
|
119 |
- // However, in case two CreateFromContext() are called at the same time, |
|
120 |
- // there is still a remote possibility that a collision might happen. |
|
121 |
- // For that reason we still perform the collision check below as it is protected |
|
122 |
- // by ps.Lock() and ps.Unlock() above. |
|
123 |
- if _, exist := ps.nameToID[p.Name()]; exist { |
|
124 |
- return fmt.Errorf("plugin %q already exists", p.Name()) |
|
125 |
- } |
|
126 |
- ps.plugins[p.GetID()] = p |
|
127 |
- ps.nameToID[p.Name()] = p.GetID() |
|
128 |
- ps.updatePluginDB() |
|
129 |
- return nil |
|
130 |
-} |
|
131 |
- |
|
132 |
-// Update updates a plugin to memory and plugindb. |
|
133 |
-func (ps *Store) Update(p *v2.Plugin) { |
|
134 |
- ps.Lock() |
|
135 |
- defer ps.Unlock() |
|
136 |
- |
|
137 |
- ps.plugins[p.GetID()] = p |
|
138 |
- ps.nameToID[p.Name()] = p.GetID() |
|
139 |
- ps.updatePluginDB() |
|
140 |
-} |
|
141 |
- |
|
142 |
-// Remove removes a plugin from memory and plugindb. |
|
143 |
-func (ps *Store) Remove(p *v2.Plugin) { |
|
144 |
- ps.Lock() |
|
145 |
- delete(ps.plugins, p.GetID()) |
|
146 |
- delete(ps.nameToID, p.Name()) |
|
147 |
- ps.updatePluginDB() |
|
148 |
- ps.Unlock() |
|
149 |
-} |
|
150 |
- |
|
151 |
-// Callers are expected to hold the store lock. |
|
152 |
-func (ps *Store) updatePluginDB() error { |
|
153 |
- jsonData, err := json.Marshal(ps.plugins) |
|
154 |
- if err != nil { |
|
155 |
- logrus.Debugf("Error in json.Marshal: %v", err) |
|
156 |
- return err |
|
157 |
- } |
|
158 |
- ioutils.AtomicWriteFile(ps.plugindb, jsonData, 0600) |
|
159 |
- return nil |
|
160 |
-} |
|
161 |
- |
|
162 |
-// Get returns an enabled plugin matching the given name and capability. |
|
163 |
-func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) { |
|
164 |
- var ( |
|
165 |
- p *v2.Plugin |
|
166 |
- err error |
|
167 |
- ) |
|
168 |
- |
|
169 |
- // Lookup using new model. |
|
170 |
- if ps != nil { |
|
171 |
- fullName := name |
|
172 |
- if named, err := reference.ParseNamed(fullName); err == nil { // FIXME: validate |
|
173 |
- if reference.IsNameOnly(named) { |
|
174 |
- named = reference.WithDefaultTag(named) |
|
175 |
- } |
|
176 |
- ref, ok := named.(reference.NamedTagged) |
|
177 |
- if !ok { |
|
178 |
- return nil, fmt.Errorf("invalid name: %s", named.String()) |
|
179 |
- } |
|
180 |
- fullName = ref.String() |
|
181 |
- } |
|
182 |
- p, err = ps.GetByName(fullName) |
|
183 |
- if err == nil { |
|
184 |
- p.AddRefCount(mode) |
|
185 |
- if p.IsEnabled() { |
|
186 |
- return p.FilterByCap(capability) |
|
187 |
- } |
|
188 |
- // Plugin was found but it is disabled, so we should not fall back to legacy plugins |
|
189 |
- // but we should error out right away |
|
190 |
- return nil, ErrNotFound(fullName) |
|
191 |
- } |
|
192 |
- if _, ok := err.(ErrNotFound); !ok { |
|
193 |
- return nil, err |
|
194 |
- } |
|
195 |
- } |
|
196 |
- |
|
197 |
- // Lookup using legacy model. |
|
198 |
- if allowV1PluginsFallback { |
|
199 |
- p, err := plugins.Get(name, capability) |
|
200 |
- if err != nil { |
|
201 |
- return nil, fmt.Errorf("legacy plugin: %v", err) |
|
202 |
- } |
|
203 |
- return p, nil |
|
204 |
- } |
|
205 |
- |
|
206 |
- return nil, err |
|
207 |
-} |
|
208 |
- |
|
209 |
-// GetAllManagedPluginsByCap returns a list of managed plugins matching the given capability. |
|
210 |
-func (ps *Store) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin { |
|
211 |
- return ps.getAllByCap(capability) |
|
212 |
-} |
|
213 |
- |
|
214 |
-// GetAllByCap returns a list of enabled plugins matching the given capability. |
|
215 |
-func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) { |
|
216 |
- result := make([]plugingetter.CompatPlugin, 0, 1) |
|
217 |
- |
|
218 |
- /* Daemon start always calls plugin.Init thereby initializing a store. |
|
219 |
- * So store on experimental builds can never be nil, even while |
|
220 |
- * handling legacy plugins. However, there are legacy plugin unit |
|
221 |
- * tests where the volume subsystem directly talks with the plugin, |
|
222 |
- * bypassing the daemon. For such tests, this check is necessary. |
|
223 |
- */ |
|
224 |
- if ps != nil { |
|
225 |
- ps.RLock() |
|
226 |
- result = ps.getAllByCap(capability) |
|
227 |
- ps.RUnlock() |
|
228 |
- } |
|
229 |
- |
|
230 |
- // Lookup with legacy model |
|
231 |
- if allowV1PluginsFallback { |
|
232 |
- pl, err := plugins.GetAll(capability) |
|
233 |
- if err != nil { |
|
234 |
- return nil, fmt.Errorf("legacy plugin: %v", err) |
|
235 |
- } |
|
236 |
- for _, p := range pl { |
|
237 |
- result = append(result, p) |
|
238 |
- } |
|
239 |
- } |
|
240 |
- return result, nil |
|
241 |
-} |
|
242 |
- |
|
243 |
-// Handle sets a callback for a given capability. It is only used by network |
|
244 |
-// and ipam drivers during plugin registration. The callback registers the |
|
245 |
-// driver with the subsystem (network, ipam). |
|
246 |
-func (ps *Store) Handle(capability string, callback func(string, *plugins.Client)) { |
|
247 |
- pluginType := fmt.Sprintf("docker.%s/%s", strings.ToLower(capability), defaultAPIVersion) |
|
248 |
- |
|
249 |
- // Register callback with new plugin model. |
|
250 |
- ps.Lock() |
|
251 |
- handlers, ok := ps.handlers[pluginType] |
|
252 |
- if !ok { |
|
253 |
- handlers = []func(string, *plugins.Client){} |
|
254 |
- } |
|
255 |
- handlers = append(handlers, callback) |
|
256 |
- ps.handlers[pluginType] = handlers |
|
257 |
- ps.Unlock() |
|
258 |
- |
|
259 |
- // Register callback with legacy plugin model. |
|
260 |
- if allowV1PluginsFallback { |
|
261 |
- plugins.Handle(capability, callback) |
|
262 |
- } |
|
263 |
-} |
|
264 |
- |
|
265 |
-// CallHandler calls the registered callback. It is invoked during plugin enable. |
|
266 |
-func (ps *Store) CallHandler(p *v2.Plugin) { |
|
267 |
- for _, typ := range p.GetTypes() { |
|
268 |
- for _, handler := range ps.handlers[typ.String()] { |
|
269 |
- handler(p.Name(), p.Client()) |
|
270 |
- } |
|
271 |
- } |
|
272 |
-} |
|
273 |
- |
|
274 |
-// Search retreives a plugin by ID Prefix |
|
275 |
-// If no plugin is found, then ErrNotFound is returned |
|
276 |
-// If multiple plugins are found, then ErrAmbiguous is returned |
|
277 |
-func (ps *Store) Search(partialID string) (*v2.Plugin, error) { |
|
278 |
- ps.RLock() |
|
279 |
- defer ps.RUnlock() |
|
280 |
- |
|
281 |
- var found *v2.Plugin |
|
282 |
- for id, p := range ps.plugins { |
|
283 |
- if strings.HasPrefix(id, partialID) { |
|
284 |
- if found != nil { |
|
285 |
- return nil, ErrAmbiguous(partialID) |
|
286 |
- } |
|
287 |
- found = p |
|
288 |
- } |
|
289 |
- } |
|
290 |
- if found == nil { |
|
291 |
- return nil, ErrNotFound(partialID) |
|
292 |
- } |
|
293 |
- return found, nil |
|
294 |
-} |
295 | 1 |
deleted file mode 100644 |
... | ... |
@@ -1,34 +0,0 @@ |
1 |
-package store |
|
2 |
- |
|
3 |
-import ( |
|
4 |
- "testing" |
|
5 |
- |
|
6 |
- "github.com/docker/docker/api/types" |
|
7 |
- "github.com/docker/docker/plugin/v2" |
|
8 |
-) |
|
9 |
- |
|
10 |
-func TestFilterByCapNeg(t *testing.T) { |
|
11 |
- p := v2.NewPlugin("test", "1234567890", "/run/docker", "/var/lib/docker/plugins", "latest") |
|
12 |
- |
|
13 |
- iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} |
|
14 |
- i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} |
|
15 |
- p.PluginObj.Config.Interface = i |
|
16 |
- |
|
17 |
- _, err := p.FilterByCap("foobar") |
|
18 |
- if err == nil { |
|
19 |
- t.Fatalf("expected inadequate error, got %v", err) |
|
20 |
- } |
|
21 |
-} |
|
22 |
- |
|
23 |
-func TestFilterByCapPos(t *testing.T) { |
|
24 |
- p := v2.NewPlugin("test", "1234567890", "/run/docker", "/var/lib/docker/plugins", "latest") |
|
25 |
- |
|
26 |
- iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} |
|
27 |
- i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} |
|
28 |
- p.PluginObj.Config.Interface = i |
|
29 |
- |
|
30 |
- _, err := p.FilterByCap("volumedriver") |
|
31 |
- if err != nil { |
|
32 |
- t.Fatalf("expected no error, got %v", err) |
|
33 |
- } |
|
34 |
-} |
35 | 1 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,33 @@ |
0 |
+package plugin |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "testing" |
|
4 |
+ |
|
5 |
+ "github.com/docker/docker/api/types" |
|
6 |
+ "github.com/docker/docker/plugin/v2" |
|
7 |
+) |
|
8 |
+ |
|
9 |
+func TestFilterByCapNeg(t *testing.T) { |
|
10 |
+ p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} |
|
11 |
+ iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} |
|
12 |
+ i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} |
|
13 |
+ p.PluginObj.Config.Interface = i |
|
14 |
+ |
|
15 |
+ _, err := p.FilterByCap("foobar") |
|
16 |
+ if err == nil { |
|
17 |
+ t.Fatalf("expected inadequate error, got %v", err) |
|
18 |
+ } |
|
19 |
+} |
|
20 |
+ |
|
21 |
+func TestFilterByCapPos(t *testing.T) { |
|
22 |
+ p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}} |
|
23 |
+ |
|
24 |
+ iType := types.PluginInterfaceType{"volumedriver", "docker", "1.0"} |
|
25 |
+ i := types.PluginConfigInterface{"plugins.sock", []types.PluginInterfaceType{iType}} |
|
26 |
+ p.PluginObj.Config.Interface = i |
|
27 |
+ |
|
28 |
+ _, err := p.FilterByCap("volumedriver") |
|
29 |
+ if err != nil { |
|
30 |
+ t.Fatalf("expected no error, got %v", err) |
|
31 |
+ } |
|
32 |
+} |
... | ... |
@@ -1,32 +1,27 @@ |
1 | 1 |
package v2 |
2 | 2 |
|
3 | 3 |
import ( |
4 |
- "encoding/json" |
|
5 |
- "errors" |
|
6 | 4 |
"fmt" |
7 |
- "os" |
|
8 |
- "path/filepath" |
|
9 | 5 |
"strings" |
10 | 6 |
"sync" |
11 | 7 |
|
8 |
+ "github.com/docker/distribution/digest" |
|
12 | 9 |
"github.com/docker/docker/api/types" |
13 |
- "github.com/docker/docker/oci" |
|
14 | 10 |
"github.com/docker/docker/pkg/plugingetter" |
15 | 11 |
"github.com/docker/docker/pkg/plugins" |
16 |
- "github.com/docker/docker/pkg/system" |
|
17 |
- specs "github.com/opencontainers/runtime-spec/specs-go" |
|
18 | 12 |
) |
19 | 13 |
|
20 | 14 |
// Plugin represents an individual plugin. |
21 | 15 |
type Plugin struct { |
22 |
- mu sync.RWMutex |
|
23 |
- PluginObj types.Plugin `json:"plugin"` |
|
24 |
- pClient *plugins.Client |
|
25 |
- runtimeSourcePath string |
|
26 |
- refCount int |
|
27 |
- LibRoot string // TODO: make private |
|
28 |
- PropagatedMount string // TODO: make private |
|
29 |
- Rootfs string // TODO: make private |
|
16 |
+ mu sync.RWMutex |
|
17 |
+ PluginObj types.Plugin `json:"plugin"` // todo: embed struct |
|
18 |
+ pClient *plugins.Client |
|
19 |
+ refCount int |
|
20 |
+ PropagatedMount string // TODO: make private |
|
21 |
+ Rootfs string // TODO: make private |
|
22 |
+ |
|
23 |
+ Config digest.Digest |
|
24 |
+ Blobsums []digest.Digest |
|
30 | 25 |
} |
31 | 26 |
|
32 | 27 |
const defaultPluginRuntimeDestination = "/run/docker/plugins" |
... | ... |
@@ -40,33 +35,6 @@ func (e ErrInadequateCapability) Error() string { |
40 | 40 |
return fmt.Sprintf("plugin does not provide %q capability", e.cap) |
41 | 41 |
} |
42 | 42 |
|
43 |
-func newPluginObj(name, id, tag string) types.Plugin { |
|
44 |
- return types.Plugin{Name: name, ID: id, Tag: tag} |
|
45 |
-} |
|
46 |
- |
|
47 |
-// NewPlugin creates a plugin. |
|
48 |
-func NewPlugin(name, id, runRoot, libRoot, tag string) *Plugin { |
|
49 |
- return &Plugin{ |
|
50 |
- PluginObj: newPluginObj(name, id, tag), |
|
51 |
- runtimeSourcePath: filepath.Join(runRoot, id), |
|
52 |
- LibRoot: libRoot, |
|
53 |
- } |
|
54 |
-} |
|
55 |
- |
|
56 |
-// Restore restores the plugin |
|
57 |
-func (p *Plugin) Restore(runRoot string) { |
|
58 |
- p.runtimeSourcePath = filepath.Join(runRoot, p.GetID()) |
|
59 |
-} |
|
60 |
- |
|
61 |
-// GetRuntimeSourcePath gets the Source (host) path of the plugin socket |
|
62 |
-// This path gets bind mounted into the plugin. |
|
63 |
-func (p *Plugin) GetRuntimeSourcePath() string { |
|
64 |
- p.mu.RLock() |
|
65 |
- defer p.mu.RUnlock() |
|
66 |
- |
|
67 |
- return p.runtimeSourcePath |
|
68 |
-} |
|
69 |
- |
|
70 | 43 |
// BasePath returns the path to which all paths returned by the plugin are relative to. |
71 | 44 |
// For Plugin objects this returns the host path of the plugin container's rootfs. |
72 | 45 |
func (p *Plugin) BasePath() string { |
... | ... |
@@ -96,12 +64,7 @@ func (p *Plugin) IsV1() bool { |
96 | 96 |
|
97 | 97 |
// Name returns the plugin name. |
98 | 98 |
func (p *Plugin) Name() string { |
99 |
- name := p.PluginObj.Name |
|
100 |
- if len(p.PluginObj.Tag) > 0 { |
|
101 |
- // TODO: this feels hacky, maybe we should be storing the distribution reference rather than splitting these |
|
102 |
- name += ":" + p.PluginObj.Tag |
|
103 |
- } |
|
104 |
- return name |
|
99 |
+ return p.PluginObj.Name |
|
105 | 100 |
} |
106 | 101 |
|
107 | 102 |
// FilterByCap query the plugin for a given capability. |
... | ... |
@@ -115,23 +78,8 @@ func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { |
115 | 115 |
return nil, ErrInadequateCapability{capability} |
116 | 116 |
} |
117 | 117 |
|
118 |
-// RemoveFromDisk deletes the plugin's runtime files from disk. |
|
119 |
-func (p *Plugin) RemoveFromDisk() error { |
|
120 |
- return os.RemoveAll(p.runtimeSourcePath) |
|
121 |
-} |
|
122 |
- |
|
123 |
-// InitPlugin populates the plugin object from the plugin config file. |
|
124 |
-func (p *Plugin) InitPlugin() error { |
|
125 |
- dt, err := os.Open(filepath.Join(p.LibRoot, p.PluginObj.ID, "config.json")) |
|
126 |
- if err != nil { |
|
127 |
- return err |
|
128 |
- } |
|
129 |
- err = json.NewDecoder(dt).Decode(&p.PluginObj.Config) |
|
130 |
- dt.Close() |
|
131 |
- if err != nil { |
|
132 |
- return err |
|
133 |
- } |
|
134 |
- |
|
118 |
+// InitEmptySettings initializes empty settings for a plugin. |
|
119 |
+func (p *Plugin) InitEmptySettings() { |
|
135 | 120 |
p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) |
136 | 121 |
copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) |
137 | 122 |
p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) |
... | ... |
@@ -144,18 +92,6 @@ func (p *Plugin) InitPlugin() error { |
144 | 144 |
} |
145 | 145 |
p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) |
146 | 146 |
copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) |
147 |
- |
|
148 |
- return p.writeSettings() |
|
149 |
-} |
|
150 |
- |
|
151 |
-func (p *Plugin) writeSettings() error { |
|
152 |
- f, err := os.Create(filepath.Join(p.LibRoot, p.PluginObj.ID, "plugin-settings.json")) |
|
153 |
- if err != nil { |
|
154 |
- return err |
|
155 |
- } |
|
156 |
- err = json.NewEncoder(f).Encode(&p.PluginObj.Settings) |
|
157 |
- f.Close() |
|
158 |
- return err |
|
159 | 147 |
} |
160 | 148 |
|
161 | 149 |
// Set is used to pass arguments to the plugin. |
... | ... |
@@ -243,8 +179,7 @@ next: |
243 | 243 |
return fmt.Errorf("setting %q not found in the plugin configuration", s.name) |
244 | 244 |
} |
245 | 245 |
|
246 |
- // update the settings on disk |
|
247 |
- return p.writeSettings() |
|
246 |
+ return nil |
|
248 | 247 |
} |
249 | 248 |
|
250 | 249 |
// IsEnabled returns the active state of the plugin. |
... | ... |
@@ -307,107 +242,3 @@ func (p *Plugin) Acquire() { |
307 | 307 |
func (p *Plugin) Release() { |
308 | 308 |
p.AddRefCount(plugingetter.RELEASE) |
309 | 309 |
} |
310 |
- |
|
311 |
-// InitSpec creates an OCI spec from the plugin's config. |
|
312 |
-func (p *Plugin) InitSpec(s specs.Spec) (*specs.Spec, error) { |
|
313 |
- s.Root = specs.Root{ |
|
314 |
- Path: p.Rootfs, |
|
315 |
- Readonly: false, // TODO: all plugins should be readonly? settable in config? |
|
316 |
- } |
|
317 |
- |
|
318 |
- userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) |
|
319 |
- for _, m := range p.PluginObj.Settings.Mounts { |
|
320 |
- userMounts[m.Destination] = struct{}{} |
|
321 |
- } |
|
322 |
- |
|
323 |
- if err := os.MkdirAll(p.runtimeSourcePath, 0755); err != nil { |
|
324 |
- return nil, err |
|
325 |
- } |
|
326 |
- |
|
327 |
- mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ |
|
328 |
- Source: &p.runtimeSourcePath, |
|
329 |
- Destination: defaultPluginRuntimeDestination, |
|
330 |
- Type: "bind", |
|
331 |
- Options: []string{"rbind", "rshared"}, |
|
332 |
- }) |
|
333 |
- |
|
334 |
- if p.PluginObj.Config.Network.Type != "" { |
|
335 |
- // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) |
|
336 |
- if p.PluginObj.Config.Network.Type == "host" { |
|
337 |
- oci.RemoveNamespace(&s, specs.NamespaceType("network")) |
|
338 |
- } |
|
339 |
- etcHosts := "/etc/hosts" |
|
340 |
- resolvConf := "/etc/resolv.conf" |
|
341 |
- mounts = append(mounts, |
|
342 |
- types.PluginMount{ |
|
343 |
- Source: &etcHosts, |
|
344 |
- Destination: etcHosts, |
|
345 |
- Type: "bind", |
|
346 |
- Options: []string{"rbind", "ro"}, |
|
347 |
- }, |
|
348 |
- types.PluginMount{ |
|
349 |
- Source: &resolvConf, |
|
350 |
- Destination: resolvConf, |
|
351 |
- Type: "bind", |
|
352 |
- Options: []string{"rbind", "ro"}, |
|
353 |
- }) |
|
354 |
- } |
|
355 |
- |
|
356 |
- for _, mnt := range mounts { |
|
357 |
- m := specs.Mount{ |
|
358 |
- Destination: mnt.Destination, |
|
359 |
- Type: mnt.Type, |
|
360 |
- Options: mnt.Options, |
|
361 |
- } |
|
362 |
- if mnt.Source == nil { |
|
363 |
- return nil, errors.New("mount source is not specified") |
|
364 |
- } |
|
365 |
- m.Source = *mnt.Source |
|
366 |
- s.Mounts = append(s.Mounts, m) |
|
367 |
- } |
|
368 |
- |
|
369 |
- for i, m := range s.Mounts { |
|
370 |
- if strings.HasPrefix(m.Destination, "/dev/") { |
|
371 |
- if _, ok := userMounts[m.Destination]; ok { |
|
372 |
- s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) |
|
373 |
- } |
|
374 |
- } |
|
375 |
- } |
|
376 |
- |
|
377 |
- if p.PluginObj.Config.PropagatedMount != "" { |
|
378 |
- p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) |
|
379 |
- s.Linux.RootfsPropagation = "rshared" |
|
380 |
- } |
|
381 |
- |
|
382 |
- if p.PluginObj.Config.Linux.DeviceCreation { |
|
383 |
- rwm := "rwm" |
|
384 |
- s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}} |
|
385 |
- } |
|
386 |
- for _, dev := range p.PluginObj.Settings.Devices { |
|
387 |
- path := *dev.Path |
|
388 |
- d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") |
|
389 |
- if err != nil { |
|
390 |
- return nil, err |
|
391 |
- } |
|
392 |
- s.Linux.Devices = append(s.Linux.Devices, d...) |
|
393 |
- s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) |
|
394 |
- } |
|
395 |
- |
|
396 |
- envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) |
|
397 |
- envs[0] = "PATH=" + system.DefaultPathEnv |
|
398 |
- envs = append(envs, p.PluginObj.Settings.Env...) |
|
399 |
- |
|
400 |
- args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) |
|
401 |
- cwd := p.PluginObj.Config.Workdir |
|
402 |
- if len(cwd) == 0 { |
|
403 |
- cwd = "/" |
|
404 |
- } |
|
405 |
- s.Process.Terminal = false |
|
406 |
- s.Process.Args = args |
|
407 |
- s.Process.Cwd = cwd |
|
408 |
- s.Process.Env = envs |
|
409 |
- |
|
410 |
- s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...) |
|
411 |
- |
|
412 |
- return &s, nil |
|
413 |
-} |
414 | 310 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,121 @@ |
0 |
+// +build linux |
|
1 |
+ |
|
2 |
+package v2 |
|
3 |
+ |
|
4 |
+import ( |
|
5 |
+ "errors" |
|
6 |
+ "os" |
|
7 |
+ "path/filepath" |
|
8 |
+ "strings" |
|
9 |
+ |
|
10 |
+ "github.com/docker/docker/api/types" |
|
11 |
+ "github.com/docker/docker/oci" |
|
12 |
+ "github.com/docker/docker/pkg/system" |
|
13 |
+ specs "github.com/opencontainers/runtime-spec/specs-go" |
|
14 |
+) |
|
15 |
+ |
|
16 |
+// InitSpec creates an OCI spec from the plugin's config. |
|
17 |
+func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { |
|
18 |
+ s := oci.DefaultSpec() |
|
19 |
+ s.Root = specs.Root{ |
|
20 |
+ Path: p.Rootfs, |
|
21 |
+ Readonly: false, // TODO: all plugins should be readonly? settable in config? |
|
22 |
+ } |
|
23 |
+ |
|
24 |
+ userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) |
|
25 |
+ for _, m := range p.PluginObj.Settings.Mounts { |
|
26 |
+ userMounts[m.Destination] = struct{}{} |
|
27 |
+ } |
|
28 |
+ |
|
29 |
+ execRoot = filepath.Join(execRoot, p.PluginObj.ID) |
|
30 |
+ if err := os.MkdirAll(execRoot, 0700); err != nil { |
|
31 |
+ return nil, err |
|
32 |
+ } |
|
33 |
+ |
|
34 |
+ mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ |
|
35 |
+ Source: &execRoot, |
|
36 |
+ Destination: defaultPluginRuntimeDestination, |
|
37 |
+ Type: "bind", |
|
38 |
+ Options: []string{"rbind", "rshared"}, |
|
39 |
+ }) |
|
40 |
+ |
|
41 |
+ if p.PluginObj.Config.Network.Type != "" { |
|
42 |
+ // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) |
|
43 |
+ if p.PluginObj.Config.Network.Type == "host" { |
|
44 |
+ oci.RemoveNamespace(&s, specs.NamespaceType("network")) |
|
45 |
+ } |
|
46 |
+ etcHosts := "/etc/hosts" |
|
47 |
+ resolvConf := "/etc/resolv.conf" |
|
48 |
+ mounts = append(mounts, |
|
49 |
+ types.PluginMount{ |
|
50 |
+ Source: &etcHosts, |
|
51 |
+ Destination: etcHosts, |
|
52 |
+ Type: "bind", |
|
53 |
+ Options: []string{"rbind", "ro"}, |
|
54 |
+ }, |
|
55 |
+ types.PluginMount{ |
|
56 |
+ Source: &resolvConf, |
|
57 |
+ Destination: resolvConf, |
|
58 |
+ Type: "bind", |
|
59 |
+ Options: []string{"rbind", "ro"}, |
|
60 |
+ }) |
|
61 |
+ } |
|
62 |
+ |
|
63 |
+ for _, mnt := range mounts { |
|
64 |
+ m := specs.Mount{ |
|
65 |
+ Destination: mnt.Destination, |
|
66 |
+ Type: mnt.Type, |
|
67 |
+ Options: mnt.Options, |
|
68 |
+ } |
|
69 |
+ if mnt.Source == nil { |
|
70 |
+ return nil, errors.New("mount source is not specified") |
|
71 |
+ } |
|
72 |
+ m.Source = *mnt.Source |
|
73 |
+ s.Mounts = append(s.Mounts, m) |
|
74 |
+ } |
|
75 |
+ |
|
76 |
+ for i, m := range s.Mounts { |
|
77 |
+ if strings.HasPrefix(m.Destination, "/dev/") { |
|
78 |
+ if _, ok := userMounts[m.Destination]; ok { |
|
79 |
+ s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) |
|
80 |
+ } |
|
81 |
+ } |
|
82 |
+ } |
|
83 |
+ |
|
84 |
+ if p.PluginObj.Config.PropagatedMount != "" { |
|
85 |
+ p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) |
|
86 |
+ s.Linux.RootfsPropagation = "rshared" |
|
87 |
+ } |
|
88 |
+ |
|
89 |
+ if p.PluginObj.Config.Linux.DeviceCreation { |
|
90 |
+ rwm := "rwm" |
|
91 |
+ s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}} |
|
92 |
+ } |
|
93 |
+ for _, dev := range p.PluginObj.Settings.Devices { |
|
94 |
+ path := *dev.Path |
|
95 |
+ d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") |
|
96 |
+ if err != nil { |
|
97 |
+ return nil, err |
|
98 |
+ } |
|
99 |
+ s.Linux.Devices = append(s.Linux.Devices, d...) |
|
100 |
+ s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) |
|
101 |
+ } |
|
102 |
+ |
|
103 |
+ envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) |
|
104 |
+ envs[0] = "PATH=" + system.DefaultPathEnv |
|
105 |
+ envs = append(envs, p.PluginObj.Settings.Env...) |
|
106 |
+ |
|
107 |
+ args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) |
|
108 |
+ cwd := p.PluginObj.Config.WorkDir |
|
109 |
+ if len(cwd) == 0 { |
|
110 |
+ cwd = "/" |
|
111 |
+ } |
|
112 |
+ s.Process.Terminal = false |
|
113 |
+ s.Process.Args = args |
|
114 |
+ s.Process.Cwd = cwd |
|
115 |
+ s.Process.Env = envs |
|
116 |
+ |
|
117 |
+ s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...) |
|
118 |
+ |
|
119 |
+ return &s, nil |
|
120 |
+} |
0 | 121 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,14 @@ |
0 |
+// +build !linux |
|
1 |
+ |
|
2 |
+package v2 |
|
3 |
+ |
|
4 |
+import ( |
|
5 |
+ "errors" |
|
6 |
+ |
|
7 |
+ specs "github.com/opencontainers/runtime-spec/specs-go" |
|
8 |
+) |
|
9 |
+ |
|
10 |
+// InitSpec creates an OCI spec from the plugin's config. |
|
11 |
+func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { |
|
12 |
+ return nil, errors.New("not supported") |
|
13 |
+} |
... | ... |
@@ -111,23 +111,25 @@ func lookup(name string, mode int) (volume.Driver, error) { |
111 | 111 |
if ok { |
112 | 112 |
return ext, nil |
113 | 113 |
} |
114 |
+ if drivers.plugingetter != nil { |
|
115 |
+ p, err := drivers.plugingetter.Get(name, extName, mode) |
|
116 |
+ if err != nil { |
|
117 |
+ return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) |
|
118 |
+ } |
|
114 | 119 |
|
115 |
- p, err := drivers.plugingetter.Get(name, extName, mode) |
|
116 |
- if err != nil { |
|
117 |
- return nil, fmt.Errorf("Error looking up volume plugin %s: %v", name, err) |
|
118 |
- } |
|
119 |
- |
|
120 |
- d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client()) |
|
121 |
- if err := validateDriver(d); err != nil { |
|
122 |
- return nil, err |
|
123 |
- } |
|
120 |
+ d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client()) |
|
121 |
+ if err := validateDriver(d); err != nil { |
|
122 |
+ return nil, err |
|
123 |
+ } |
|
124 | 124 |
|
125 |
- if p.IsV1() { |
|
126 |
- drivers.Lock() |
|
127 |
- drivers.extensions[name] = d |
|
128 |
- drivers.Unlock() |
|
125 |
+ if p.IsV1() { |
|
126 |
+ drivers.Lock() |
|
127 |
+ drivers.extensions[name] = d |
|
128 |
+ drivers.Unlock() |
|
129 |
+ } |
|
130 |
+ return d, nil |
|
129 | 131 |
} |
130 |
- return d, nil |
|
132 |
+ return nil, fmt.Errorf("Error looking up volume plugin %s", name) |
|
131 | 133 |
} |
132 | 134 |
|
133 | 135 |
func validateDriver(vd volume.Driver) error { |
... | ... |
@@ -179,9 +181,13 @@ func GetDriverList() []string { |
179 | 179 |
|
180 | 180 |
// GetAllDrivers lists all the registered drivers |
181 | 181 |
func GetAllDrivers() ([]volume.Driver, error) { |
182 |
- plugins, err := drivers.plugingetter.GetAllByCap(extName) |
|
183 |
- if err != nil { |
|
184 |
- return nil, fmt.Errorf("error listing plugins: %v", err) |
|
182 |
+ var plugins []getter.CompatPlugin |
|
183 |
+ if drivers.plugingetter != nil { |
|
184 |
+ var err error |
|
185 |
+ plugins, err = drivers.plugingetter.GetAllByCap(extName) |
|
186 |
+ if err != nil { |
|
187 |
+ return nil, fmt.Errorf("error listing plugins: %v", err) |
|
188 |
+ } |
|
185 | 189 |
} |
186 | 190 |
var ds []volume.Driver |
187 | 191 |
|
... | ... |
@@ -3,14 +3,10 @@ package volumedrivers |
3 | 3 |
import ( |
4 | 4 |
"testing" |
5 | 5 |
|
6 |
- pluginstore "github.com/docker/docker/plugin/store" |
|
7 | 6 |
volumetestutils "github.com/docker/docker/volume/testutils" |
8 | 7 |
) |
9 | 8 |
|
10 | 9 |
func TestGetDriver(t *testing.T) { |
11 |
- pluginStore := pluginstore.NewStore("/var/lib/docker") |
|
12 |
- RegisterPluginGetter(pluginStore) |
|
13 |
- |
|
14 | 10 |
_, err := GetDriver("missing") |
15 | 11 |
if err == nil { |
16 | 12 |
t.Fatal("Expected error, was nil") |
... | ... |
@@ -7,15 +7,11 @@ import ( |
7 | 7 |
"strings" |
8 | 8 |
"testing" |
9 | 9 |
|
10 |
- pluginstore "github.com/docker/docker/plugin/store" |
|
11 | 10 |
"github.com/docker/docker/volume/drivers" |
12 | 11 |
volumetestutils "github.com/docker/docker/volume/testutils" |
13 | 12 |
) |
14 | 13 |
|
15 | 14 |
func TestCreate(t *testing.T) { |
16 |
- pluginStore := pluginstore.NewStore("/var/lib/docker") |
|
17 |
- volumedrivers.RegisterPluginGetter(pluginStore) |
|
18 |
- |
|
19 | 15 |
volumedrivers.Register(volumetestutils.NewFakeDriver("fake"), "fake") |
20 | 16 |
defer volumedrivers.Unregister("fake") |
21 | 17 |
dir, err := ioutil.TempDir("", "test-create") |