Browse code

Flexible dockerfile builder that uses client calls

This commit adds a library for executing Dockerfile builds without a
direct dependency on the Docker daemon, and is able to create an entire
Dockerfile into a squashed layer directly. It uses the upload and exec
calls for COPY, ADD, and RUN, and reuses the same logic as the existing
builder for transforming environment variables and arguments.

Clayton Coleman authored on 2016/03/07 14:17:26
Showing 39 changed files
... ...
@@ -8163,6 +8163,88 @@ _oc_volumes()
8163 8163
     must_have_one_noun=()
8164 8164
 }
8165 8165
 
8166
+_oc_ex_dockerbuild()
8167
+{
8168
+    last_command="oc_ex_dockerbuild"
8169
+    commands=()
8170
+
8171
+    flags=()
8172
+    two_word_flags=()
8173
+    flags_with_completion=()
8174
+    flags_completion=()
8175
+
8176
+    flags+=("--dockerfile=")
8177
+    flags_with_completion+=("--dockerfile")
8178
+    flags_completion+=("_filedir")
8179
+    flags+=("--api-version=")
8180
+    flags+=("--certificate-authority=")
8181
+    flags_with_completion+=("--certificate-authority")
8182
+    flags_completion+=("_filedir")
8183
+    flags+=("--client-certificate=")
8184
+    flags_with_completion+=("--client-certificate")
8185
+    flags_completion+=("_filedir")
8186
+    flags+=("--client-key=")
8187
+    flags_with_completion+=("--client-key")
8188
+    flags_completion+=("_filedir")
8189
+    flags+=("--cluster=")
8190
+    flags+=("--config=")
8191
+    flags_with_completion+=("--config")
8192
+    flags_completion+=("_filedir")
8193
+    flags+=("--context=")
8194
+    flags+=("--google-json-key=")
8195
+    flags+=("--insecure-skip-tls-verify")
8196
+    flags+=("--log-flush-frequency=")
8197
+    flags+=("--match-server-version")
8198
+    flags+=("--namespace=")
8199
+    two_word_flags+=("-n")
8200
+    flags+=("--server=")
8201
+    flags+=("--token=")
8202
+    flags+=("--user=")
8203
+
8204
+    must_have_one_flag=()
8205
+    must_have_one_noun=()
8206
+}
8207
+
8208
+_oc_ex()
8209
+{
8210
+    last_command="oc_ex"
8211
+    commands=()
8212
+    commands+=("dockerbuild")
8213
+
8214
+    flags=()
8215
+    two_word_flags=()
8216
+    flags_with_completion=()
8217
+    flags_completion=()
8218
+
8219
+    flags+=("--api-version=")
8220
+    flags+=("--certificate-authority=")
8221
+    flags_with_completion+=("--certificate-authority")
8222
+    flags_completion+=("_filedir")
8223
+    flags+=("--client-certificate=")
8224
+    flags_with_completion+=("--client-certificate")
8225
+    flags_completion+=("_filedir")
8226
+    flags+=("--client-key=")
8227
+    flags_with_completion+=("--client-key")
8228
+    flags_completion+=("_filedir")
8229
+    flags+=("--cluster=")
8230
+    flags+=("--config=")
8231
+    flags_with_completion+=("--config")
8232
+    flags_completion+=("_filedir")
8233
+    flags+=("--context=")
8234
+    flags+=("--google-json-key=")
8235
+    flags+=("--insecure-skip-tls-verify")
8236
+    flags+=("--log-flush-frequency=")
8237
+    flags+=("--match-server-version")
8238
+    flags+=("--namespace=")
8239
+    two_word_flags+=("-n")
8240
+    flags+=("--server=")
8241
+    flags+=("--token=")
8242
+    flags+=("--user=")
8243
+
8244
+    must_have_one_flag=()
8245
+    must_have_one_noun=()
8246
+}
8247
+
8166 8248
 _oc_options()
8167 8249
 {
8168 8250
     last_command="oc_options"
... ...
@@ -8256,6 +8338,7 @@ _oc()
8256 8256
     commands+=("whoami")
8257 8257
     commands+=("env")
8258 8258
     commands+=("volumes")
8259
+    commands+=("ex")
8259 8260
     commands+=("options")
8260 8261
 
8261 8262
     flags=()
... ...
@@ -11748,6 +11748,88 @@ _openshift_cli_volumes()
11748 11748
     must_have_one_noun=()
11749 11749
 }
11750 11750
 
11751
+_openshift_cli_ex_dockerbuild()
11752
+{
11753
+    last_command="openshift_cli_ex_dockerbuild"
11754
+    commands=()
11755
+
11756
+    flags=()
11757
+    two_word_flags=()
11758
+    flags_with_completion=()
11759
+    flags_completion=()
11760
+
11761
+    flags+=("--dockerfile=")
11762
+    flags_with_completion+=("--dockerfile")
11763
+    flags_completion+=("_filedir")
11764
+    flags+=("--api-version=")
11765
+    flags+=("--certificate-authority=")
11766
+    flags_with_completion+=("--certificate-authority")
11767
+    flags_completion+=("_filedir")
11768
+    flags+=("--client-certificate=")
11769
+    flags_with_completion+=("--client-certificate")
11770
+    flags_completion+=("_filedir")
11771
+    flags+=("--client-key=")
11772
+    flags_with_completion+=("--client-key")
11773
+    flags_completion+=("_filedir")
11774
+    flags+=("--cluster=")
11775
+    flags+=("--config=")
11776
+    flags_with_completion+=("--config")
11777
+    flags_completion+=("_filedir")
11778
+    flags+=("--context=")
11779
+    flags+=("--google-json-key=")
11780
+    flags+=("--insecure-skip-tls-verify")
11781
+    flags+=("--log-flush-frequency=")
11782
+    flags+=("--match-server-version")
11783
+    flags+=("--namespace=")
11784
+    two_word_flags+=("-n")
11785
+    flags+=("--server=")
11786
+    flags+=("--token=")
11787
+    flags+=("--user=")
11788
+
11789
+    must_have_one_flag=()
11790
+    must_have_one_noun=()
11791
+}
11792
+
11793
+_openshift_cli_ex()
11794
+{
11795
+    last_command="openshift_cli_ex"
11796
+    commands=()
11797
+    commands+=("dockerbuild")
11798
+
11799
+    flags=()
11800
+    two_word_flags=()
11801
+    flags_with_completion=()
11802
+    flags_completion=()
11803
+
11804
+    flags+=("--api-version=")
11805
+    flags+=("--certificate-authority=")
11806
+    flags_with_completion+=("--certificate-authority")
11807
+    flags_completion+=("_filedir")
11808
+    flags+=("--client-certificate=")
11809
+    flags_with_completion+=("--client-certificate")
11810
+    flags_completion+=("_filedir")
11811
+    flags+=("--client-key=")
11812
+    flags_with_completion+=("--client-key")
11813
+    flags_completion+=("_filedir")
11814
+    flags+=("--cluster=")
11815
+    flags+=("--config=")
11816
+    flags_with_completion+=("--config")
11817
+    flags_completion+=("_filedir")
11818
+    flags+=("--context=")
11819
+    flags+=("--google-json-key=")
11820
+    flags+=("--insecure-skip-tls-verify")
11821
+    flags+=("--log-flush-frequency=")
11822
+    flags+=("--match-server-version")
11823
+    flags+=("--namespace=")
11824
+    two_word_flags+=("-n")
11825
+    flags+=("--server=")
11826
+    flags+=("--token=")
11827
+    flags+=("--user=")
11828
+
11829
+    must_have_one_flag=()
11830
+    must_have_one_noun=()
11831
+}
11832
+
11751 11833
 _openshift_cli_options()
11752 11834
 {
11753 11835
     last_command="openshift_cli_options"
... ...
@@ -11841,6 +11923,7 @@ _openshift_cli()
11841 11841
     commands+=("whoami")
11842 11842
     commands+=("env")
11843 11843
     commands+=("volumes")
11844
+    commands+=("ex")
11844 11845
     commands+=("options")
11845 11846
 
11846 11847
     flags=()
... ...
@@ -1061,6 +1061,19 @@ DEPRECATED: set env
1061 1061
 ====
1062 1062
 
1063 1063
 
1064
+== oc ex dockerbuild
1065
+Perform a direct Docker build
1066
+
1067
+====
1068
+
1069
+[options="nowrap"]
1070
+----
1071
+  # Build the current directory into a single layer and tag
1072
+  $ oc dockerbuild . myimage:latest
1073
+----
1074
+====
1075
+
1076
+
1064 1077
 == oc exec
1065 1078
 Execute a command in a container.
1066 1079
 
... ...
@@ -15,6 +15,7 @@ import (
15 15
 
16 16
 	"github.com/openshift/origin/pkg/cmd/admin"
17 17
 	"github.com/openshift/origin/pkg/cmd/cli/cmd"
18
+	"github.com/openshift/origin/pkg/cmd/cli/cmd/dockerbuild"
18 19
 	"github.com/openshift/origin/pkg/cmd/cli/cmd/rsync"
19 20
 	"github.com/openshift/origin/pkg/cmd/cli/cmd/set"
20 21
 	"github.com/openshift/origin/pkg/cmd/cli/policy"
... ...
@@ -177,6 +178,16 @@ func NewCommandCLI(name, fullName string, in io.Reader, out, errout io.Writer) *
177 177
 	templates.ActsAsRootCommand(cmds, filters, groups...).
178 178
 		ExposeFlags(loginCmd, "certificate-authority", "insecure-skip-tls-verify", "token")
179 179
 
180
+	// experimental commands are those that are bundled with the binary but not displayed to end users
181
+	// directly
182
+	experimental := &cobra.Command{
183
+		Use: "ex", // Because this command exposes no description, it will not be shown in help
184
+	}
185
+	experimental.AddCommand(
186
+		dockerbuild.NewCmdDockerbuild(fullName, f, out, errout),
187
+	)
188
+	cmds.AddCommand(experimental)
189
+
180 190
 	if name == fullName {
181 191
 		cmds.AddCommand(version.NewVersionCommand(fullName, false))
182 192
 	}
183 193
new file mode 100644
... ...
@@ -0,0 +1,115 @@
0
+package dockerbuild
1
+
2
+import (
3
+	"fmt"
4
+	"io"
5
+	"os"
6
+	"path/filepath"
7
+	"strings"
8
+
9
+	docker "github.com/fsouza/go-dockerclient"
10
+	"github.com/spf13/cobra"
11
+
12
+	"k8s.io/kubernetes/pkg/credentialprovider"
13
+	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
14
+
15
+	cmdutil "github.com/openshift/origin/pkg/cmd/util"
16
+	"github.com/openshift/origin/pkg/cmd/util/clientcmd"
17
+	"github.com/openshift/origin/pkg/util/docker/dockerfile/builder"
18
+)
19
+
20
+const (
21
+	dockerbuildLong = `
22
+Build a Dockerfile into a single layer
23
+
24
+Builds the provided directory with a Dockerfile into a single layered image.
25
+Requires that you have a working connection to a Docker engine.`
26
+
27
+	dockerbuildExample = `  # Build the current directory into a single layer and tag
28
+  $ %[1]s dockerbuild . myimage:latest`
29
+)
30
+
31
+type DockerbuildOptions struct {
32
+	Out io.Writer
33
+	Err io.Writer
34
+
35
+	Client *docker.Client
36
+
37
+	Directory      string
38
+	Tag            string
39
+	DockerfilePath string
40
+	Keyring        credentialprovider.DockerKeyring
41
+	Arguments      cmdutil.Environment
42
+}
43
+
44
+func NewCmdDockerbuild(fullName string, f *clientcmd.Factory, out, errOut io.Writer) *cobra.Command {
45
+	options := &DockerbuildOptions{
46
+		Out: out,
47
+		Err: errOut,
48
+	}
49
+	cmd := &cobra.Command{
50
+		Use:     "dockerbuild DIRECTORY TAG [--dockerfile=PATH]",
51
+		Short:   "Perform a direct Docker build",
52
+		Long:    dockerbuildLong,
53
+		Example: fmt.Sprintf(dockerbuildExample, fullName),
54
+		Run: func(cmd *cobra.Command, args []string) {
55
+			kcmdutil.CheckErr(options.Complete(f, cmd, args))
56
+			kcmdutil.CheckErr(options.Validate())
57
+			if err := options.Run(); err != nil {
58
+				// TODO: move met to kcmdutil
59
+				if err == cmdutil.ErrExit {
60
+					os.Exit(1)
61
+				}
62
+				kcmdutil.CheckErr(err)
63
+			}
64
+		},
65
+	}
66
+
67
+	cmd.Flags().StringVar(&options.DockerfilePath, "dockerfile", options.DockerfilePath, "An optional path to a Dockerfile to use.")
68
+	cmd.MarkFlagFilename("dockerfile")
69
+
70
+	return cmd
71
+}
72
+
73
+func (o *DockerbuildOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, args []string) error {
74
+	paths, envArgs, ok := cmdutil.SplitEnvironmentFromResources(args)
75
+	if !ok {
76
+		return kcmdutil.UsageError(cmd, "context directory must be specified before environment changes: %s", strings.Join(args, " "))
77
+	}
78
+	if len(paths) != 2 {
79
+		return kcmdutil.UsageError(cmd, "the directory to build and tag must be specified")
80
+	}
81
+	o.Arguments, _, _ = cmdutil.ParseEnvironmentArguments(envArgs)
82
+	o.Directory = paths[0]
83
+	o.Tag = paths[1]
84
+	if len(o.DockerfilePath) == 0 {
85
+		o.DockerfilePath = filepath.Join(o.Directory, "Dockerfile")
86
+	}
87
+	client, err := docker.NewClientFromEnv()
88
+	if err != nil {
89
+		return err
90
+	}
91
+	o.Client = client
92
+
93
+	o.Keyring = credentialprovider.NewDockerKeyring()
94
+
95
+	return nil
96
+}
97
+
98
+func (o *DockerbuildOptions) Validate() error {
99
+	return nil
100
+}
101
+
102
+func (o *DockerbuildOptions) Run() error {
103
+	f, err := os.Open(o.DockerfilePath)
104
+	if err != nil {
105
+		return err
106
+	}
107
+	defer f.Close()
108
+	e := builder.NewClientExecutor(o.Client)
109
+	e.Out, e.ErrOut = o.Out, o.Err
110
+	e.Directory = o.Directory
111
+	e.Tag = o.Tag
112
+	e.AuthFn = o.Keyring.Lookup
113
+	return e.Build(f, o.Arguments)
114
+}
... ...
@@ -3,9 +3,11 @@ package set
3 3
 import (
4 4
 	"fmt"
5 5
 	"io"
6
+	"net"
6 7
 	"net/url"
7 8
 	"os"
8 9
 	"strconv"
10
+	"strings"
9 11
 
10 12
 	"github.com/spf13/cobra"
11 13
 	kapi "k8s.io/kubernetes/pkg/api"
... ...
@@ -13,12 +15,10 @@ import (
13 13
 	kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
14 14
 	"k8s.io/kubernetes/pkg/kubectl/resource"
15 15
 	"k8s.io/kubernetes/pkg/runtime"
16
+	"k8s.io/kubernetes/pkg/util/intstr"
16 17
 
17 18
 	cmdutil "github.com/openshift/origin/pkg/cmd/util"
18 19
 	"github.com/openshift/origin/pkg/cmd/util/clientcmd"
19
-	"k8s.io/kubernetes/pkg/util/intstr"
20
-	"net"
21
-	"strings"
22 20
 )
23 21
 
24 22
 const (
25 23
new file mode 100644
... ...
@@ -0,0 +1,192 @@
0
+
1
+                                 Apache License
2
+                           Version 2.0, January 2004
3
+                        https://www.apache.org/licenses/
4
+
5
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+   1. Definitions.
8
+
9
+      "License" shall mean the terms and conditions for use, reproduction,
10
+      and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+      "Licensor" shall mean the copyright owner or entity authorized by
13
+      the copyright owner that is granting the License.
14
+
15
+      "Legal Entity" shall mean the union of the acting entity and all
16
+      other entities that control, are controlled by, or are under common
17
+      control with that entity. For the purposes of this definition,
18
+      "control" means (i) the power, direct or indirect, to cause the
19
+      direction or management of such entity, whether by contract or
20
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+      outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+      "You" (or "Your") shall mean an individual or Legal Entity
24
+      exercising permissions granted by this License.
25
+
26
+      "Source" form shall mean the preferred form for making modifications,
27
+      including but not limited to software source code, documentation
28
+      source, and configuration files.
29
+
30
+      "Object" form shall mean any form resulting from mechanical
31
+      transformation or translation of a Source form, including but
32
+      not limited to compiled object code, generated documentation,
33
+      and conversions to other media types.
34
+
35
+      "Work" shall mean the work of authorship, whether in Source or
36
+      Object form, made available under the License, as indicated by a
37
+      copyright notice that is included in or attached to the work
38
+      (an example is provided in the Appendix below).
39
+
40
+      "Derivative Works" shall mean any work, whether in Source or Object
41
+      form, that is based on (or derived from) the Work and for which the
42
+      editorial revisions, annotations, elaborations, or other modifications
43
+      represent, as a whole, an original work of authorship. For the purposes
44
+      of this License, Derivative Works shall not include works that remain
45
+      separable from, or merely link (or bind by name) to the interfaces of,
46
+      the Work and Derivative Works thereof.
47
+
48
+      "Contribution" shall mean any work of authorship, including
49
+      the original version of the Work and any modifications or additions
50
+      to that Work or Derivative Works thereof, that is intentionally
51
+      submitted to Licensor for inclusion in the Work by the copyright owner
52
+      or by an individual or Legal Entity authorized to submit on behalf of
53
+      the copyright owner. For the purposes of this definition, "submitted"
54
+      means any form of electronic, verbal, or written communication sent
55
+      to the Licensor or its representatives, including but not limited to
56
+      communication on electronic mailing lists, source code control systems,
57
+      and issue tracking systems that are managed by, or on behalf of, the
58
+      Licensor for the purpose of discussing and improving the Work, but
59
+      excluding communication that is conspicuously marked or otherwise
60
+      designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+      "Contributor" shall mean Licensor and any individual or Legal Entity
63
+      on behalf of whom a Contribution has been received by Licensor and
64
+      subsequently incorporated within the Work.
65
+
66
+   2. Grant of Copyright License. Subject to the terms and conditions of
67
+      this License, each Contributor hereby grants to You a perpetual,
68
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+      copyright license to reproduce, prepare Derivative Works of,
70
+      publicly display, publicly perform, sublicense, and distribute the
71
+      Work and such Derivative Works in Source or Object form.
72
+
73
+   3. Grant of Patent License. Subject to the terms and conditions of
74
+      this License, each Contributor hereby grants to You a perpetual,
75
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+      (except as stated in this section) patent license to make, have made,
77
+      use, offer to sell, sell, import, and otherwise transfer the Work,
78
+      where such license applies only to those patent claims licensable
79
+      by such Contributor that are necessarily infringed by their
80
+      Contribution(s) alone or by combination of their Contribution(s)
81
+      with the Work to which such Contribution(s) was submitted. If You
82
+      institute patent litigation against any entity (including a
83
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+      or a Contribution incorporated within the Work constitutes direct
85
+      or contributory patent infringement, then any patent licenses
86
+      granted to You under this License for that Work shall terminate
87
+      as of the date such litigation is filed.
88
+
89
+   4. Redistribution. You may reproduce and distribute copies of the
90
+      Work or Derivative Works thereof in any medium, with or without
91
+      modifications, and in Source or Object form, provided that You
92
+      meet the following conditions:
93
+
94
+      (a) You must give any other recipients of the Work or
95
+          Derivative Works a copy of this License; and
96
+
97
+      (b) You must cause any modified files to carry prominent notices
98
+          stating that You changed the files; and
99
+
100
+      (c) You must retain, in the Source form of any Derivative Works
101
+          that You distribute, all copyright, patent, trademark, and
102
+          attribution notices from the Source form of the Work,
103
+          excluding those notices that do not pertain to any part of
104
+          the Derivative Works; and
105
+
106
+      (d) If the Work includes a "NOTICE" text file as part of its
107
+          distribution, then any Derivative Works that You distribute must
108
+          include a readable copy of the attribution notices contained
109
+          within such NOTICE file, excluding those notices that do not
110
+          pertain to any part of the Derivative Works, in at least one
111
+          of the following places: within a NOTICE text file distributed
112
+          as part of the Derivative Works; within the Source form or
113
+          documentation, if provided along with the Derivative Works; or,
114
+          within a display generated by the Derivative Works, if and
115
+          wherever such third-party notices normally appear. The contents
116
+          of the NOTICE file are for informational purposes only and
117
+          do not modify the License. You may add Your own attribution
118
+          notices within Derivative Works that You distribute, alongside
119
+          or as an addendum to the NOTICE text from the Work, provided
120
+          that such additional attribution notices cannot be construed
121
+          as modifying the License.
122
+
123
+      You may add Your own copyright statement to Your modifications and
124
+      may provide additional or different license terms and conditions
125
+      for use, reproduction, or distribution of Your modifications, or
126
+      for any such Derivative Works as a whole, provided Your use,
127
+      reproduction, and distribution of the Work otherwise complies with
128
+      the conditions stated in this License.
129
+
130
+   5. Submission of Contributions. Unless You explicitly state otherwise,
131
+      any Contribution intentionally submitted for inclusion in the Work
132
+      by You to the Licensor shall be under the terms and conditions of
133
+      this License, without any additional terms or conditions.
134
+      Notwithstanding the above, nothing herein shall supersede or modify
135
+      the terms of any separate license agreement you may have executed
136
+      with Licensor regarding such Contributions.
137
+
138
+   6. Trademarks. This License does not grant permission to use the trade
139
+      names, trademarks, service marks, or product names of the Licensor,
140
+      except as required for reasonable and customary use in describing the
141
+      origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+   7. Disclaimer of Warranty. Unless required by applicable law or
144
+      agreed to in writing, Licensor provides the Work (and each
145
+      Contributor provides its Contributions) on an "AS IS" BASIS,
146
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+      implied, including, without limitation, any warranties or conditions
148
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+      PARTICULAR PURPOSE. You are solely responsible for determining the
150
+      appropriateness of using or redistributing the Work and assume any
151
+      risks associated with Your exercise of permissions under this License.
152
+
153
+   8. Limitation of Liability. In no event and under no legal theory,
154
+      whether in tort (including negligence), contract, or otherwise,
155
+      unless required by applicable law (such as deliberate and grossly
156
+      negligent acts) or agreed to in writing, shall any Contributor be
157
+      liable to You for damages, including any direct, indirect, special,
158
+      incidental, or consequential damages of any character arising as a
159
+      result of this License or out of the use or inability to use the
160
+      Work (including but not limited to damages for loss of goodwill,
161
+      work stoppage, computer failure or malfunction, or any and all
162
+      other commercial damages or losses), even if such Contributor
163
+      has been advised of the possibility of such damages.
164
+
165
+   9. Accepting Warranty or Additional Liability. While redistributing
166
+      the Work or Derivative Works thereof, You may choose to offer,
167
+      and charge a fee for, acceptance of support, warranty, indemnity,
168
+      or other liability obligations and/or rights consistent with this
169
+      License. However, in accepting such obligations, You may act only
170
+      on Your own behalf and on Your sole responsibility, not on behalf
171
+      of any other Contributor, and only if You agree to indemnify,
172
+      defend, and hold each Contributor harmless for any liability
173
+      incurred by, or claims asserted against, such Contributor by reason
174
+      of your accepting any such warranty or additional liability.
175
+
176
+   END OF TERMS AND CONDITIONS
177
+
178
+   Copyright 2013-2016 Docker, Inc.
179
+   Copyright 2016 The OpenShift Authors
180
+
181
+   Licensed under the Apache License, Version 2.0 (the "License");
182
+   you may not use this file except in compliance with the License.
183
+   You may obtain a copy of the License at
184
+
185
+       https://www.apache.org/licenses/LICENSE-2.0
186
+
187
+   Unless required by applicable law or agreed to in writing, software
188
+   distributed under the License is distributed on an "AS IS" BASIS,
189
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
190
+   See the License for the specific language governing permissions and
191
+   limitations under the License.
0 192
new file mode 100644
... ...
@@ -0,0 +1,292 @@
0
+package builder
1
+
2
+import (
3
+	"bytes"
4
+	"fmt"
5
+	"io/ioutil"
6
+	"log"
7
+	"os"
8
+	"path/filepath"
9
+	"runtime"
10
+	"strings"
11
+
12
+	docker "github.com/fsouza/go-dockerclient"
13
+
14
+	"github.com/docker/docker/builder/command"
15
+	"github.com/docker/docker/builder/parser"
16
+)
17
+
18
+// Copy defines a copy operation required on the container.
19
+type Copy struct {
20
+	Src      string
21
+	Dest     []string
22
+	Download bool
23
+}
24
+
25
+// Run defines a run operation required in the container.
26
+type Run struct {
27
+	Shell bool
28
+	Args  []string
29
+}
30
+
31
+type Executor interface {
32
+	Copy(copies ...Copy) error
33
+	Run(run Run, config docker.Config) error
34
+}
35
+
36
+type logExecutor struct{}
37
+
38
+func (logExecutor) Copy(copies ...Copy) error {
39
+	for _, c := range copies {
40
+		log.Printf("COPY %s -> %v (download:%t)", c.Src, c.Dest, c.Download)
41
+	}
42
+	return nil
43
+}
44
+
45
+func (logExecutor) Run(run Run, config docker.Config) error {
46
+	log.Printf("RUN %v %t (%v)", run.Args, run.Shell, config.Env)
47
+	return nil
48
+}
49
+
50
+type noopExecutor struct{}
51
+
52
+func (noopExecutor) Copy(copies ...Copy) error {
53
+	return nil
54
+}
55
+
56
+func (noopExecutor) Run(run Run, config docker.Config) error {
57
+	return nil
58
+}
59
+
60
+var (
61
+	LogExecutor  = logExecutor{}
62
+	NoopExecutor = noopExecutor{}
63
+)
64
+
65
+type Builder struct {
66
+	RunConfig docker.Config
67
+
68
+	Env    []string
69
+	Args   map[string]string
70
+	CmdSet bool
71
+
72
+	AllowedArgs map[string]bool
73
+
74
+	PendingRuns   []Run
75
+	PendingCopies []Copy
76
+
77
+	Executor Executor
78
+}
79
+
80
+func NewBuilder() *Builder {
81
+	args := make(map[string]bool)
82
+	for k, v := range builtinAllowedBuildArgs {
83
+		args[k] = v
84
+	}
85
+	return &Builder{
86
+		Args:        make(map[string]string),
87
+		AllowedArgs: args,
88
+	}
89
+}
90
+
91
+// Step creates a new step from the current state.
92
+func (b *Builder) Step() *Step {
93
+	dst := make([]string, len(b.Env)+len(b.RunConfig.Env))
94
+	copy(dst, b.Env)
95
+	dst = append(dst, b.RunConfig.Env...)
96
+	return &Step{Env: dst}
97
+}
98
+
99
+// Run executes a step, transforming the current builder and
100
+// invoking any Copy or Run operations.
101
+func (b *Builder) Run(step *Step, exec Executor) error {
102
+	fn, ok := evaluateTable[step.Command]
103
+	if !ok {
104
+		return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(step.Command))
105
+	}
106
+	if err := fn(b, step.Args, step.Attrs, step.Original); err != nil {
107
+		return err
108
+	}
109
+
110
+	copies := b.PendingCopies
111
+	b.PendingCopies = nil
112
+	runs := b.PendingRuns
113
+	b.PendingRuns = nil
114
+
115
+	if err := exec.Copy(copies...); err != nil {
116
+		return err
117
+	}
118
+	for _, run := range runs {
119
+		config := b.Config()
120
+		if err := exec.Run(run, *config); err != nil {
121
+			return nil
122
+		}
123
+	}
124
+
125
+	return nil
126
+}
127
+
128
+// RequiresStart returns true if a running container environment is necessary
129
+// to invoke the provided commands
130
+func (b *Builder) RequiresStart(node *parser.Node) bool {
131
+	for _, child := range node.Children {
132
+		if child.Value == command.Run {
133
+			return true
134
+		}
135
+	}
136
+	return false
137
+}
138
+
139
+// Config returns a snapshot of the current RunConfig intended for
140
+// use with a container commit.
141
+func (b *Builder) Config() *docker.Config {
142
+	config := b.RunConfig
143
+	if config.OnBuild == nil {
144
+		config.OnBuild = []string{}
145
+	}
146
+	if config.Entrypoint == nil {
147
+		config.Entrypoint = []string{}
148
+	}
149
+	config.Image = ""
150
+	return &config
151
+}
152
+
153
+// Arguments returns the currently active arguments.
154
+func (b *Builder) Arguments() []string {
155
+	var envs []string
156
+	for key, val := range b.Args {
157
+		if _, ok := b.AllowedArgs[key]; ok {
158
+			envs = append(envs, fmt.Sprintf("%s=%s", key, val))
159
+		}
160
+	}
161
+	return envs
162
+}
163
+
164
+// ErrNoFROM is returned if the Dockerfile did not contain a FROM
165
+// statement.
166
+var ErrNoFROM = fmt.Errorf("no FROM statement found")
167
+
168
+// From returns the image this dockerfile depends on, or an error
169
+// if no FROM is found or if multiple FROM are specified. If a
170
+// single from is found the passed node is updated with only
171
+// the remaining statements.  The builder's RunConfig.Image field
172
+// is set to the first From found, or left unchanged if already
173
+// set.
174
+func (b *Builder) From(node *parser.Node) (string, error) {
175
+	children := SplitChildren(node, command.From)
176
+	switch {
177
+	case len(children) == 0:
178
+		return "", ErrNoFROM
179
+	case len(children) > 1:
180
+		return "", fmt.Errorf("multiple FROM statements are not supported")
181
+	default:
182
+		step := b.Step()
183
+		if err := step.Resolve(children[0]); err != nil {
184
+			return "", err
185
+		}
186
+		if err := b.Run(step, NoopExecutor); err != nil {
187
+			return "", err
188
+		}
189
+		return b.RunConfig.Image, nil
190
+	}
191
+}
192
+
193
+// FromImage updates the builder to use the provided image (resetting RunConfig
194
+// and recording the image environment), and updates the node with any ONBUILD
195
+// statements extracted from the parent image.
196
+func (b *Builder) FromImage(image *docker.Image, node *parser.Node) error {
197
+	SplitChildren(node, command.From)
198
+
199
+	b.RunConfig = *image.Config
200
+	b.Env = b.RunConfig.Env
201
+	b.RunConfig.Env = nil
202
+
203
+	// Check to see if we have a default PATH, note that windows won't
204
+	// have one as its set by HCS
205
+	if runtime.GOOS != "windows" && !hasEnvName(b.Env, "PATH") {
206
+		b.RunConfig.Env = append(b.RunConfig.Env, "PATH="+defaultPathEnv)
207
+	}
208
+
209
+	// Join the image onbuild statements into node
210
+	if image.Config == nil || len(image.Config.OnBuild) == 0 {
211
+		return nil
212
+	}
213
+	extra, err := parser.Parse(bytes.NewBufferString(strings.Join(image.Config.OnBuild, "\n")))
214
+	if err != nil {
215
+		return err
216
+	}
217
+	for _, child := range extra.Children {
218
+		switch strings.ToUpper(child.Value) {
219
+		case "ONBUILD":
220
+			return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
221
+		case "MAINTAINER", "FROM":
222
+			return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", child.Value)
223
+		}
224
+	}
225
+	node.Children = append(extra.Children, node.Children...)
226
+	// Since we've processed the OnBuild statements, clear them from the runconfig state.
227
+	b.RunConfig.OnBuild = nil
228
+	return nil
229
+}
230
+
231
+// SplitChildren removes any children with the provided value from node
232
+// and returns them as an array. node.Children is updated.
233
+func SplitChildren(node *parser.Node, value string) []*parser.Node {
234
+	var split []*parser.Node
235
+	var children []*parser.Node
236
+	for _, child := range node.Children {
237
+		if child.Value == value {
238
+			split = append(split, child)
239
+		} else {
240
+			children = append(children, child)
241
+		}
242
+	}
243
+	node.Children = children
244
+	return split
245
+}
246
+
247
+// StepFunc is invoked with the result of a resolved step.
248
+type StepFunc func(*Builder, []string, map[string]bool, string) error
249
+
250
+var evaluateTable = map[string]StepFunc{
251
+	command.Env:        env,
252
+	command.Label:      label,
253
+	command.Maintainer: maintainer,
254
+	command.Add:        add,
255
+	command.Copy:       dispatchCopy, // copy() is a go builtin
256
+	command.From:       from,
257
+	command.Onbuild:    onbuild,
258
+	command.Workdir:    workdir,
259
+	command.Run:        run,
260
+	command.Cmd:        cmd,
261
+	command.Entrypoint: entrypoint,
262
+	command.Expose:     expose,
263
+	command.Volume:     volume,
264
+	command.User:       user,
265
+	// TODO: use the public constants for these when we update dockerfile/
266
+	commandStopSignal: stopSignal,
267
+	commandArg:        arg,
268
+}
269
+
270
+// builtinAllowedBuildArgs is list of built-in allowed build args
271
+var builtinAllowedBuildArgs = map[string]bool{
272
+	"HTTP_PROXY":  true,
273
+	"http_proxy":  true,
274
+	"HTTPS_PROXY": true,
275
+	"https_proxy": true,
276
+	"FTP_PROXY":   true,
277
+	"ftp_proxy":   true,
278
+	"NO_PROXY":    true,
279
+	"no_proxy":    true,
280
+}
281
+
282
+// ParseDockerIgnore returns a list of the excludes in the .dockerignore file.
283
+// extracted from fsouza/go-dockerclient.
284
+func ParseDockerignore(root string) ([]string, error) {
285
+	var excludes []string
286
+	ignore, err := ioutil.ReadFile(filepath.Join(root, ".dockerignore"))
287
+	if err != nil && !os.IsNotExist(err) {
288
+		return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
289
+	}
290
+	return strings.Split(string(ignore), "\n"), nil
291
+}
0 292
new file mode 100644
... ...
@@ -0,0 +1,207 @@
0
+package builder
1
+
2
+import (
3
+	"bytes"
4
+	"io/ioutil"
5
+	"os"
6
+	"testing"
7
+
8
+	"fmt"
9
+	"github.com/docker/docker/builder/parser"
10
+	docker "github.com/fsouza/go-dockerclient"
11
+	"reflect"
12
+)
13
+
14
+func TestRun(t *testing.T) {
15
+	f, err := os.Open("../../../../../images/dockerregistry/Dockerfile")
16
+	if err != nil {
17
+		t.Fatal(err)
18
+	}
19
+	node, err := parser.Parse(f)
20
+	if err != nil {
21
+		t.Fatal(err)
22
+	}
23
+	b := NewBuilder()
24
+	from, err := b.From(node)
25
+	if err != nil {
26
+		t.Fatal(err)
27
+	}
28
+	if from != "openshift/origin-base" {
29
+		t.Fatalf("unexpected from: %s", from)
30
+	}
31
+	for _, child := range node.Children {
32
+		step := b.Step()
33
+		if err := step.Resolve(child); err != nil {
34
+			t.Fatal(err)
35
+		}
36
+		if err := b.Run(step, LogExecutor); err != nil {
37
+			t.Fatal(err)
38
+		}
39
+	}
40
+	t.Logf("config: %#v", b.Config())
41
+	t.Logf(node.Dump())
42
+}
43
+
44
+type testExecutor struct {
45
+	Copies  []Copy
46
+	Runs    []Run
47
+	Configs []docker.Config
48
+	Err     error
49
+}
50
+
51
+func (e *testExecutor) Copy(copies ...Copy) error {
52
+	e.Copies = append(e.Copies, copies...)
53
+	return e.Err
54
+}
55
+func (e *testExecutor) Run(run Run, config docker.Config) error {
56
+	e.Runs = append(e.Runs, run)
57
+	e.Configs = append(e.Configs, config)
58
+	return e.Err
59
+}
60
+
61
+func TestBuilder(t *testing.T) {
62
+	testCases := []struct {
63
+		Dockerfile string
64
+		From       string
65
+		Copies     []Copy
66
+		Runs       []Run
67
+		Config     docker.Config
68
+		ErrFn      func(err error) bool
69
+	}{
70
+		{
71
+			Dockerfile: "fixtures/dir/Dockerfile",
72
+			From:       "busybox",
73
+			Copies: []Copy{
74
+				{Src: ".", Dest: []string{"/"}, Download: false},
75
+				{Src: ".", Dest: []string{"/dir"}},
76
+				{Src: "subdir/", Dest: []string{"/test/"}, Download: false},
77
+			},
78
+			Config: docker.Config{
79
+				Image: "busybox",
80
+			},
81
+		},
82
+		{
83
+			Dockerfile: "fixtures/ignore/Dockerfile",
84
+			From:       "busybox",
85
+			Copies: []Copy{
86
+				{Src: ".", Dest: []string{"/"}},
87
+			},
88
+			Config: docker.Config{
89
+				Image: "busybox",
90
+			},
91
+		},
92
+		{
93
+			Dockerfile: "fixtures/Dockerfile.env",
94
+			From:       "busybox",
95
+			Config: docker.Config{
96
+				Env:   []string{"name=value", "name2=value2a            value2b", "name1=value1", "name3=value3a\\n\"value3b\"", "name4=value4a\\\\nvalue4b"},
97
+				Image: "busybox",
98
+			},
99
+		},
100
+		{
101
+			Dockerfile: "fixtures/Dockerfile.edgecases",
102
+			From:       "busybox",
103
+			Copies: []Copy{
104
+				{Src: ".", Dest: []string{"/"}, Download: true},
105
+				{Src: ".", Dest: []string{"/test/copy"}},
106
+			},
107
+			Runs: []Run{
108
+				{Shell: false, Args: []string{"ls", "-la"}},
109
+				{Shell: false, Args: []string{"echo", "'1234'"}},
110
+				{Shell: true, Args: []string{"echo \"1234\""}},
111
+				{Shell: true, Args: []string{"echo 1234"}},
112
+				{Shell: true, Args: []string{"echo '1234' &&     echo \"456\" &&     echo 789"}},
113
+				{Shell: true, Args: []string{"sh -c 'echo root:testpass         > /tmp/passwd'"}},
114
+				{Shell: true, Args: []string{"mkdir -p /test /test2 /test3/test"}},
115
+			},
116
+			Config: docker.Config{
117
+				User:         "docker:root",
118
+				ExposedPorts: map[docker.Port]struct{}{"6000/tcp": {}, "3000/tcp": {}, "9000/tcp": {}, "5000/tcp": {}},
119
+				Env:          []string{"SCUBA=1 DUBA 3"},
120
+				Cmd:          []string{"/bin/sh", "-c", "echo 'test' | wc -"},
121
+				Image:        "busybox",
122
+				Volumes:      map[string]struct{}{"/test2": {}, "/test3": {}, "/test": {}},
123
+				WorkingDir:   "/test",
124
+				OnBuild:      []string{"RUN [\"echo\", \"test\"]", "RUN echo test", "COPY . /"},
125
+			},
126
+		},
127
+		{
128
+			Dockerfile: "fixtures/Dockerfile.exposedefault",
129
+			From:       "busybox",
130
+			Config: docker.Config{
131
+				ExposedPorts: map[docker.Port]struct{}{"3469/tcp": {}},
132
+				Image:        "busybox",
133
+			},
134
+		},
135
+		{
136
+			Dockerfile: "fixtures/Dockerfile.add",
137
+			From:       "busybox",
138
+			Copies: []Copy{
139
+				{Src: "https://github.com/openshift/origin/raw/master/README.md", Dest: []string{"/README.md"}, Download: true},
140
+				{Src: "https://github.com/openshift/origin/raw/master/LICENSE", Dest: []string{"/"}, Download: true},
141
+				{Src: "https://github.com/openshift/origin/raw/master/LICENSE", Dest: []string{"/A"}, Download: true},
142
+				{Src: "https://github.com/openshift/origin/raw/master/LICENSE", Dest: []string{"/a"}, Download: true},
143
+				{Src: "https://github.com/openshift/origin/raw/master/LICENSE", Dest: []string{"/b/a"}, Download: true},
144
+				{Src: "https://github.com/openshift/origin/raw/master/LICENSE", Dest: []string{"/b/"}, Download: true},
145
+				{Src: "https://github.com/openshift/ruby-hello-world/archive/master.zip", Dest: []string{"/tmp/"}, Download: true},
146
+			},
147
+			Runs: []Run{
148
+				{Shell: true, Args: []string{"mkdir ./b"}},
149
+			},
150
+			Config: docker.Config{
151
+				Image: "busybox",
152
+				User:  "root",
153
+			},
154
+		},
155
+	}
156
+	for i, test := range testCases {
157
+		data, err := ioutil.ReadFile(test.Dockerfile)
158
+		if err != nil {
159
+			t.Errorf("%d: %v", i, err)
160
+			continue
161
+		}
162
+		node, err := parser.Parse(bytes.NewBuffer(data))
163
+		if err != nil {
164
+			t.Errorf("%d: %v", i, err)
165
+			continue
166
+		}
167
+		b := NewBuilder()
168
+		from, err := b.From(node)
169
+		if err != nil {
170
+			t.Errorf("%d: %v", i, err)
171
+			continue
172
+		}
173
+		if from != test.From {
174
+			t.Errorf("%d: unexpected FROM: %s", i, from)
175
+		}
176
+		e := &testExecutor{}
177
+		var lastErr error
178
+		for j, child := range node.Children {
179
+			step := b.Step()
180
+			if err := step.Resolve(child); err != nil {
181
+				lastErr = fmt.Errorf("%d: %d: %s: resolve: %v", i, j, step.Original, err)
182
+				break
183
+			}
184
+			if err := b.Run(step, e); err != nil {
185
+				lastErr = fmt.Errorf("%d: %d: %s: run: %v", i, j, step.Original, err)
186
+				break
187
+			}
188
+		}
189
+		if lastErr != nil {
190
+			if test.ErrFn == nil || !test.ErrFn(lastErr) {
191
+				t.Errorf("%d: unexpected error: %v", i, lastErr)
192
+			}
193
+			continue
194
+		}
195
+		if !reflect.DeepEqual(test.Copies, e.Copies) {
196
+			t.Errorf("%d: unexpected copies: %#v", i, e.Copies)
197
+		}
198
+		if !reflect.DeepEqual(test.Runs, e.Runs) {
199
+			t.Errorf("%d: unexpected runs: %#v", i, e.Runs)
200
+		}
201
+		lastConfig := b.RunConfig
202
+		if !reflect.DeepEqual(test.Config, lastConfig) {
203
+			t.Errorf("%d: unexpected config: %#v", i, lastConfig)
204
+		}
205
+	}
206
+}
0 207
new file mode 100644
... ...
@@ -0,0 +1,397 @@
0
+package builder
1
+
2
+import (
3
+	"archive/tar"
4
+	"bytes"
5
+	"crypto/rand"
6
+	"encoding/base64"
7
+	"fmt"
8
+	"io"
9
+	"os"
10
+	"path"
11
+	"runtime"
12
+	"strings"
13
+
14
+	"github.com/docker/docker/builder/parser"
15
+	docker "github.com/fsouza/go-dockerclient"
16
+	"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
17
+	"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils"
18
+	"github.com/golang/glog"
19
+)
20
+
21
+// ClientExecutor can run Docker builds from a Docker client.
22
+type ClientExecutor struct {
23
+	// Client is a client to a Docker daemon.
24
+	Client *docker.Client
25
+	// Directory is the context directory to build from, will use
26
+	// the current working directory if not set.
27
+	Directory string
28
+	// Excludes are a list of file patterns that should be excluded
29
+	// from the context. Will be set to the contents of the
30
+	// .dockerignore file if nil.
31
+	Excludes []string
32
+	// Tag is an optional value to tag the resulting built image.
33
+	Tag string
34
+
35
+	Out, ErrOut io.Writer
36
+
37
+	// Container is optional and can be set to a container to use as
38
+	// the execution environment for a build.
39
+	Container *docker.Container
40
+	// Image is optional and may be set to control which image is used
41
+	// as a base for this build. Otherwise the FROM value from the
42
+	// Dockerfile is read (will be pulled if not locally present).
43
+	Image *docker.Image
44
+
45
+	// AuthFn will handle authenticating any docker pulls if Image
46
+	// is set to nil.
47
+	AuthFn func(name string) ([]docker.AuthConfiguration, bool)
48
+	// HostConfig is used to start the container (if necessary).
49
+	HostConfig *docker.HostConfig
50
+}
51
+
52
+// NewClientExecutor creates a client executor.
53
+func NewClientExecutor(client *docker.Client) *ClientExecutor {
54
+	return &ClientExecutor{Client: client}
55
+}
56
+
57
+// Build is a helper method to perform a Docker build against the
58
+// provided Docker client. It will load the image if not specified,
59
+// create a container if one does not already exist, and start a
60
+// container if the Dockerfile contains RUN commands. It will cleanup
61
+// any containers it creates directly, and set the e.Image.ID field
62
+// to the generated image.
63
+func (e *ClientExecutor) Build(r io.Reader, args map[string]string) error {
64
+	b := NewBuilder()
65
+	b.Args = args
66
+
67
+	if e.Excludes == nil {
68
+		excludes, err := ParseDockerignore(e.Directory)
69
+		if err != nil {
70
+			return err
71
+		}
72
+		e.Excludes = append(excludes, ".dockerignore")
73
+	}
74
+
75
+	// TODO: check the Docker daemon version (1.20 is required for Upload)
76
+
77
+	node, err := parser.Parse(r)
78
+	if err != nil {
79
+		return err
80
+	}
81
+
82
+	// identify the base image
83
+	from, err := b.From(node)
84
+	if err != nil {
85
+		return err
86
+	}
87
+	// load the image
88
+	if e.Image == nil {
89
+		if from == NoBaseImageSpecifier {
90
+			if runtime.GOOS == "windows" {
91
+				return fmt.Errorf("building from scratch images is not supported")
92
+			}
93
+			from, err = e.CreateScratchImage()
94
+			if err != nil {
95
+				return err
96
+			}
97
+			defer e.CleanupImage(from)
98
+		}
99
+		glog.V(4).Infof("Retrieving image %q", from)
100
+		e.Image, err = e.LoadImage(from)
101
+		if err != nil {
102
+			return err
103
+		}
104
+	}
105
+
106
+	// update the builder with any information from the image, including ONBUILD
107
+	// statements
108
+	if err := b.FromImage(e.Image, node); err != nil {
109
+		return err
110
+	}
111
+
112
+	b.RunConfig.Image = from
113
+	glog.V(4).Infof("step: FROM %s", from)
114
+
115
+	// create a container to execute in, if necessary
116
+	mustStart := b.RequiresStart(node)
117
+	if e.Container == nil {
118
+		opts := docker.CreateContainerOptions{
119
+			Config: &docker.Config{
120
+				Image: from,
121
+			},
122
+		}
123
+		if mustStart {
124
+			// TODO: windows support
125
+			opts.Config.Cmd = []string{"sleep 86400"}
126
+			opts.Config.Entrypoint = []string{"/bin/sh", "-c"}
127
+		}
128
+		if len(opts.Config.Cmd) == 0 {
129
+			opts.Config.Entrypoint = []string{"/bin/sh", "-c", "# NOP"}
130
+		}
131
+		container, err := e.Client.CreateContainer(opts)
132
+		if err != nil {
133
+			return err
134
+		}
135
+		e.Container = container
136
+
137
+		// if we create the container, take responsibilty for cleaning up
138
+		defer e.Cleanup()
139
+	}
140
+
141
+	// TODO: lazy start
142
+	if mustStart && !e.Container.State.Running {
143
+		if err := e.Client.StartContainer(e.Container.ID, e.HostConfig); err != nil {
144
+			return err
145
+		}
146
+		// TODO: is this racy? may have to loop wait in the actual run step
147
+	}
148
+
149
+	for _, child := range node.Children {
150
+		step := b.Step()
151
+		if err := step.Resolve(child); err != nil {
152
+			return err
153
+		}
154
+		glog.V(4).Infof("step: %s", step.Original)
155
+		if err := b.Run(step, e); err != nil {
156
+			return err
157
+		}
158
+	}
159
+
160
+	config := b.Config()
161
+	var repository, tag string
162
+	if len(e.Tag) > 0 {
163
+		repository, tag = docker.ParseRepositoryTag(e.Tag)
164
+		glog.V(4).Infof("Committing built container %s as image %q: %#v", e.Container.ID, e.Tag, config)
165
+	} else {
166
+		glog.V(4).Infof("Committing built container %s: %#v", e.Container.ID, config)
167
+	}
168
+
169
+	image, err := e.Client.CommitContainer(docker.CommitContainerOptions{
170
+		Container:  e.Container.ID,
171
+		Run:        config,
172
+		Repository: repository,
173
+		Tag:        tag,
174
+	})
175
+	if err != nil {
176
+		return err
177
+	}
178
+	e.Image = image
179
+	glog.V(4).Infof("Committed %s to %s", e.Container.ID, e.Image.ID)
180
+	return nil
181
+}
182
+
183
+// Cleanup will remove the container that created the build.
184
+func (e *ClientExecutor) Cleanup() error {
185
+	if e.Container == nil {
186
+		return nil
187
+	}
188
+	err := e.Client.RemoveContainer(docker.RemoveContainerOptions{
189
+		ID:            e.Container.ID,
190
+		RemoveVolumes: true,
191
+		Force:         true,
192
+	})
193
+	if _, ok := err.(*docker.NoSuchContainer); err != nil && !ok {
194
+		return err
195
+	}
196
+	e.Container = nil
197
+	return nil
198
+}
199
+
200
+// CreateScratchImage creates a new, zero byte layer that is identical to "scratch"
201
+// except that the resulting image will have two layers.
202
+func (e *ClientExecutor) CreateScratchImage() (string, error) {
203
+	random := make([]byte, 16)
204
+	if _, err := io.ReadFull(rand.Reader, random); err != nil {
205
+		return "", err
206
+	}
207
+	name := fmt.Sprintf("scratch-%s", base64.URLEncoding.EncodeToString(random))
208
+
209
+	buf := &bytes.Buffer{}
210
+	w := tar.NewWriter(buf)
211
+	w.Close()
212
+
213
+	return name, e.Client.ImportImage(docker.ImportImageOptions{
214
+		Repository:  name,
215
+		Source:      "-",
216
+		InputStream: buf,
217
+	})
218
+}
219
+
220
+// CleanupImage attempts to remove the provided image.
221
+func (e *ClientExecutor) CleanupImage(name string) error {
222
+	return e.Client.RemoveImage(name)
223
+}
224
+
225
+// LoadImage checks the client for an image matching from. If not found,
226
+// attempts to pull the image and then tries to inspect again.
227
+func (e *ClientExecutor) LoadImage(from string) (*docker.Image, error) {
228
+	image, err := e.Client.InspectImage(from)
229
+	if err == nil {
230
+		return image, nil
231
+	}
232
+	if err != docker.ErrNoSuchImage {
233
+		return nil, err
234
+	}
235
+
236
+	var registry string
237
+	repository, tag := docker.ParseRepositoryTag(from)
238
+	if parts := strings.SplitN(repository, "/", 2); len(parts) > 1 {
239
+		registry, repository = parts[0], parts[1]
240
+	}
241
+
242
+	// TODO: we may want to abstract looping over multiple credentials
243
+	auth, _ := e.AuthFn(repository)
244
+	if len(auth) == 0 {
245
+		auth = append(auth, docker.AuthConfiguration{})
246
+	}
247
+
248
+	var lastErr error
249
+	for _, config := range auth {
250
+		// TODO: handle IDs?
251
+		err = e.Client.PullImage(docker.PullImageOptions{
252
+			Registry:   registry,
253
+			Repository: repository,
254
+			Tag:        tag,
255
+		}, config)
256
+		if err == nil {
257
+			break
258
+		}
259
+		lastErr = err
260
+		continue
261
+	}
262
+	if lastErr != nil {
263
+		return nil, lastErr
264
+	}
265
+
266
+	return e.Client.InspectImage(from)
267
+}
268
+
269
+// Run executes a single Run command against the current container using exec().
270
+// Since exec does not allow ENV or WORKINGDIR to be set, we force the execution of
271
+// the user command into a shell and perform those operations before. Since RUN
272
+// requires /bin/sh, we can use both 'cd' and 'export'.
273
+func (e *ClientExecutor) Run(run Run, config docker.Config) error {
274
+	args := make([]string, len(run.Args))
275
+	copy(args, run.Args)
276
+
277
+	if runtime.GOOS == "windows" {
278
+		if len(config.WorkingDir) > 0 {
279
+			args[0] = fmt.Sprintf("cd %s && %s", bashQuote(config.WorkingDir), args[0])
280
+		}
281
+		// TODO: implement windows ENV
282
+		args = append([]string{"cmd", "/S", "/C"}, args...)
283
+	} else {
284
+		if len(config.WorkingDir) > 0 {
285
+			args[0] = fmt.Sprintf("cd %s && %s", bashQuote(config.WorkingDir), args[0])
286
+		}
287
+		if len(config.Env) > 0 {
288
+			args[0] = exportEnv(config.Env) + args[0]
289
+		}
290
+		args = append([]string{"/bin/sh", "-c"}, args...)
291
+	}
292
+
293
+	config.Cmd = args
294
+
295
+	exec, err := e.Client.CreateExec(docker.CreateExecOptions{
296
+		Cmd:          config.Cmd,
297
+		Container:    e.Container.ID,
298
+		AttachStdout: true,
299
+		AttachStderr: true,
300
+		User:         config.User,
301
+	})
302
+	if err != nil {
303
+		return err
304
+	}
305
+	err = e.Client.StartExec(exec.ID, docker.StartExecOptions{
306
+		OutputStream: e.Out,
307
+		ErrorStream:  e.ErrOut,
308
+	})
309
+	return err
310
+}
311
+
312
+func (e *ClientExecutor) Copy(copies ...Copy) error {
313
+	container := e.Container
314
+	for _, c := range copies {
315
+		// TODO: reuse source
316
+		for _, dst := range c.Dest {
317
+			glog.V(4).Infof("Archiving %s %t", c.Src, c.Download)
318
+			r, closer, err := e.Archive(c.Src, dst, c.Download, c.Download)
319
+			if err != nil {
320
+				return err
321
+			}
322
+			glog.V(5).Infof("Uploading to %s at %s", container.ID, dst)
323
+			err = e.Client.UploadToContainer(container.ID, docker.UploadToContainerOptions{
324
+				InputStream: r,
325
+				Path:        "/",
326
+			})
327
+			if err := closer.Close(); err != nil {
328
+				glog.Errorf("Error while closing stream container copy stream %s: %v", container.ID, err)
329
+			}
330
+			if err != nil {
331
+				return err
332
+			}
333
+		}
334
+	}
335
+	return nil
336
+}
337
+
338
+type closers []func() error
339
+
340
+func (c closers) Close() error {
341
+	var lastErr error
342
+	for _, fn := range c {
343
+		if err := fn(); err != nil {
344
+			lastErr = err
345
+		}
346
+	}
347
+	return lastErr
348
+}
349
+
350
+func (e *ClientExecutor) Archive(src, dst string, allowDecompression, allowDownload bool) (io.Reader, io.Closer, error) {
351
+	var closer closers
352
+	var base string
353
+	var infos []CopyInfo
354
+	var err error
355
+	if isURL(src) {
356
+		if !allowDownload {
357
+			return nil, nil, fmt.Errorf("source can't be a URL")
358
+		}
359
+		infos, base, err = DownloadURL(src, dst)
360
+		if len(base) > 0 {
361
+			closer = append(closer, func() error { return os.RemoveAll(base) })
362
+		}
363
+	} else {
364
+		base = e.Directory
365
+		infos, err = CalcCopyInfo(src, base, allowDecompression, true)
366
+	}
367
+	if err != nil {
368
+		closer.Close()
369
+		return nil, nil, err
370
+	}
371
+
372
+	dst = trimLeadingPath(dst)
373
+	patterns, patDirs, _, _ := fileutils.CleanPatterns(e.Excludes)
374
+	options := &archive.TarOptions{RebaseNames: make(map[string]string)}
375
+	for _, info := range infos {
376
+		if ok, _ := fileutils.OptimizedMatches(info.Path, patterns, patDirs); ok {
377
+			continue
378
+		}
379
+		options.IncludeFiles = append(options.IncludeFiles, info.Path)
380
+		if info.FromDir || strings.HasSuffix(dst, "/") || strings.HasSuffix(dst, "/.") || dst == "." {
381
+			if strings.HasSuffix(info.Path, "/") {
382
+				options.RebaseNames[info.Path] = dst
383
+			} else {
384
+				options.RebaseNames[info.Path] = path.Join(dst, path.Base(info.Path))
385
+			}
386
+		} else {
387
+			options.RebaseNames[info.Path] = dst
388
+		}
389
+	}
390
+	options.ExcludePatterns = e.Excludes
391
+
392
+	glog.V(4).Infof("Tar of directory %s %#v", base, options)
393
+	rc, err := archive.TarWithOptions(base, options)
394
+	closer = append(closer, rc.Close)
395
+	return rc, closer, err
396
+}
0 397
new file mode 100644
... ...
@@ -0,0 +1,591 @@
0
+// +build conformance
1
+
2
+package builder
3
+
4
+import (
5
+	"archive/tar"
6
+	"bytes"
7
+	"flag"
8
+	"fmt"
9
+	"io"
10
+	"io/ioutil"
11
+	"os"
12
+	"os/exec"
13
+	"path/filepath"
14
+	"reflect"
15
+	"strings"
16
+	"testing"
17
+	"time"
18
+
19
+	"github.com/docker/docker/builder/command"
20
+	"github.com/docker/docker/builder/parser"
21
+	docker "github.com/fsouza/go-dockerclient"
22
+	"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
23
+	"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils"
24
+
25
+	"k8s.io/kubernetes/pkg/conversion"
26
+	"k8s.io/kubernetes/pkg/util"
27
+)
28
+
29
+var compareLayers = flag.Bool("compare-layers", false, "If true, compare each generated layer for equivalence")
30
+
31
+type conformanceTest struct {
32
+	Dockerfile string
33
+	Git        string
34
+	ContextDir string
35
+	Ignore     []ignoreFunc
36
+}
37
+
38
+// TestConformance* compares the result of running the direct build against a
39
+// sequential docker build. A dockerfile and git repo is loaded, then each step
40
+// in the file is run sequentially, committing after each step. The generated
41
+// image.Config and the resulting filesystems are compared. The next step reuses
42
+// the previously generated layer and performs an incremental diff. This ensures
43
+// that each step is functionally equivalent.
44
+//
45
+// Deviations:
46
+// * Builds run at different times
47
+//   * Modification timestamps are ignored on files
48
+//   * Some processes (gem install) result in files created in the image that
49
+//     have different content because of that (timestamps in files). We treat
50
+//     a file that is identical except for size within 10 bytes and neither old
51
+//     or new is zero bytes to be identical.
52
+// * Docker container commit with ENV FOO=BAR and a Docker build with line
53
+//   ENV FOO=BAR will generate an image with FOO=BAR in different positions
54
+//   (commit places the variable first, build: last). We try to align the
55
+//   generated environment variable to ensure they are equal.
56
+// * The parent image ID is ignored.
57
+// * Red Hat versions of Docker inject /var/run/secrets differently in build
58
+//   than at runtime.
59
+//
60
+// TODO: .dockerignore
61
+// TODO: check context dir
62
+// TODO: ONBUILD
63
+// TODO: ensure that the final built image has the right UIDs
64
+//
65
+func TestConformanceInternal(t *testing.T) {
66
+	testCases := []conformanceTest{
67
+		{
68
+			ContextDir: "fixtures/dir",
69
+		},
70
+		{
71
+			ContextDir: "fixtures/ignore",
72
+		},
73
+		{
74
+			Dockerfile: "fixtures/Dockerfile.env",
75
+		},
76
+		{
77
+			Dockerfile: "fixtures/Dockerfile.edgecases",
78
+		},
79
+		{
80
+			Dockerfile: "fixtures/Dockerfile.exposedefault",
81
+		},
82
+		{
83
+			Dockerfile: "fixtures/Dockerfile.add",
84
+		},
85
+	}
86
+
87
+	c, err := docker.NewClientFromEnv()
88
+	if err != nil {
89
+		t.Fatal(err)
90
+	}
91
+
92
+	for i, test := range testCases {
93
+		conformanceTester(t, c, test, i, *compareLayers)
94
+	}
95
+}
96
+
97
+// TestConformanceExternal applies external repo testing that may be more expensive or
98
+// change more frequently.
99
+func TestConformanceExternal(t *testing.T) {
100
+	testCases := []conformanceTest{
101
+		{
102
+			// Tests user ownership change under COPY
103
+			Git: "https://github.com/openshift/ruby-hello-world.git",
104
+		},
105
+		{
106
+			// Tests Non-default location dockerfile
107
+			Dockerfile: "Dockerfile.build",
108
+			Git:        "https://github.com/docker-library/hello-world.git",
109
+		},
110
+		{
111
+			// Tests COPY and other complex interactions of ENV
112
+			ContextDir: "9.3",
113
+			Dockerfile: "9.3/Dockerfile",
114
+			Git:        "https://github.com/docker-library/postgres.git",
115
+			Ignore: []ignoreFunc{
116
+				func(a, b *tar.Header) bool {
117
+					switch {
118
+					case (a != nil) == (b != nil):
119
+						return false
120
+					case a != nil:
121
+						return strings.HasPrefix(a.Name, "etc/ssl/certs/")
122
+					case b != nil:
123
+						return strings.HasPrefix(b.Name, "etc/ssl/certs/")
124
+					default:
125
+						return false
126
+					}
127
+				},
128
+			},
129
+		},
130
+	}
131
+
132
+	c, err := docker.NewClientFromEnv()
133
+	if err != nil {
134
+		t.Fatal(err)
135
+	}
136
+
137
+	for i, test := range testCases {
138
+		conformanceTester(t, c, test, i, *compareLayers)
139
+	}
140
+}
141
+
142
+func conformanceTester(t *testing.T, c *docker.Client, test conformanceTest, i int, deep bool) {
143
+	dockerfile := test.Dockerfile
144
+	if len(dockerfile) == 0 {
145
+		dockerfile = "Dockerfile"
146
+	}
147
+	tmpDir, err := ioutil.TempDir("", "dockerbuild-conformance-")
148
+	if err != nil {
149
+		t.Fatal(err)
150
+	}
151
+	defer os.RemoveAll(tmpDir)
152
+
153
+	dir := tmpDir
154
+	contextDir := filepath.Join(dir, test.ContextDir)
155
+	dockerfilePath := filepath.Join(dir, dockerfile)
156
+
157
+	// clone repo or copy the Dockerfile
158
+	var input string
159
+	switch {
160
+	case len(test.Git) > 0:
161
+		input = test.Git
162
+		cmd := exec.Command("git", "clone", test.Git, dir)
163
+		out, err := cmd.CombinedOutput()
164
+		if err != nil {
165
+			t.Errorf("unable to clone %q: %v\n%s", test.Git, err, out)
166
+			return
167
+		}
168
+
169
+	case len(test.Dockerfile) > 0:
170
+		input = dockerfile
171
+		dockerfilePath = filepath.Join(dir, "Dockerfile")
172
+		if _, err := fileutils.CopyFile(filepath.Join("", dockerfile), dockerfilePath); err != nil {
173
+			t.Fatal(err)
174
+		}
175
+		dockerfile = "Dockerfile"
176
+
177
+	default:
178
+		input = filepath.Join(test.ContextDir, dockerfile)
179
+		dockerfilePath = input
180
+		contextDir = test.ContextDir
181
+		dir = test.ContextDir
182
+	}
183
+
184
+	// read the dockerfile
185
+	data, err := ioutil.ReadFile(dockerfilePath)
186
+	if err != nil {
187
+		t.Errorf("%d: unable to read Dockerfile %q: %v", i, input, err)
188
+		return
189
+	}
190
+	node, err := parser.Parse(bytes.NewBuffer(data))
191
+	if err != nil {
192
+		t.Errorf("%d: can't parse Dockerfile %q: %v", i, input, err)
193
+		return
194
+	}
195
+	from, err := NewBuilder().From(node)
196
+	if err != nil {
197
+		t.Errorf("%d: can't get base FROM %q: %v", i, input, err)
198
+		return
199
+	}
200
+	nameFormat := "conformance-dockerbuild-%d-%s-%d"
201
+
202
+	var toDelete []string
203
+	steps := node.Children
204
+	lastImage := from
205
+
206
+	ignoreSmallFileChange := func(a, b *tar.Header) bool {
207
+		if a == nil || b == nil {
208
+			return false
209
+		}
210
+		diff := a.Size - b.Size
211
+		if differOnlyByFileSize(a, b, 10) {
212
+			t.Logf("WARNING: %s differs only in size by %d bytes, probably a timestamp value change", a.Name, diff)
213
+			return true
214
+		}
215
+		return false
216
+	}
217
+
218
+	if deep {
219
+		// execute each step on both Docker build and the direct builder, comparing as we
220
+		// go
221
+		fail := false
222
+		for j := range steps {
223
+			testFile := dockerfileWithFrom(lastImage, steps[j:j+1])
224
+
225
+			nameDirect := fmt.Sprintf(nameFormat, i, "direct", j)
226
+			nameDocker := fmt.Sprintf(nameFormat, i, "docker", j)
227
+
228
+			// run docker build
229
+			if err := ioutil.WriteFile(dockerfilePath, []byte(testFile), 0600); err != nil {
230
+				t.Errorf("%d: unable to update Dockerfile %q: %v", i, dockerfilePath, err)
231
+				break
232
+			}
233
+			in, err := archive.TarWithOptions(dir, &archive.TarOptions{IncludeFiles: []string{"."}})
234
+			if err != nil {
235
+				t.Errorf("%d: unable to generate build context %q: %v", i, dockerfilePath, err)
236
+				break
237
+			}
238
+			out := &bytes.Buffer{}
239
+			if err := c.BuildImage(docker.BuildImageOptions{
240
+				Name:                nameDocker,
241
+				Dockerfile:          dockerfile,
242
+				RmTmpContainer:      true,
243
+				ForceRmTmpContainer: true,
244
+				InputStream:         in,
245
+				OutputStream:        out,
246
+			}); err != nil {
247
+				in.Close()
248
+				t.Errorf("%d: unable to build Docker image %q: %v\n%s", i, test.Git, err, out)
249
+				break
250
+			}
251
+			toDelete = append(toDelete, nameDocker)
252
+
253
+			// run direct build
254
+			e := NewClientExecutor(c)
255
+			out = &bytes.Buffer{}
256
+			e.Out, e.ErrOut = out, out
257
+			e.Directory = contextDir
258
+			e.Tag = nameDirect
259
+			if err := e.Build(bytes.NewBufferString(testFile), nil); err != nil {
260
+				t.Errorf("%d: failed to build step %d in dockerfile %q: %s\n%s", i, j, dockerfilePath, steps[j].Original, out)
261
+				break
262
+			}
263
+			toDelete = append(toDelete, nameDirect)
264
+
265
+			// only compare filesystem on layers that change the filesystem
266
+			mutation := steps[j].Value == command.Add || steps[j].Value == command.Copy || steps[j].Value == command.Run
267
+			// metadata must be strictly equal
268
+			if !equivalentImages(
269
+				t, c, nameDocker, nameDirect, mutation,
270
+				metadataEqual,
271
+				append(ignoreFuncs{ignoreSmallFileChange, ignoreRunSecretsMode}, test.Ignore...)...,
272
+			) {
273
+				t.Errorf("%d: layered Docker build was not equivalent to direct layer image metadata %s", i, input)
274
+				fail = true
275
+			}
276
+
277
+			lastImage = nameDocker
278
+		}
279
+
280
+		if fail {
281
+			t.Fatalf("%d: Conformance test failed for %s", i, input)
282
+		}
283
+
284
+	} else {
285
+		exclude, _ := ParseDockerignore(dir)
286
+		exclude = append(exclude, ".dockerignore")
287
+		in, err := archive.TarWithOptions(dir, &archive.TarOptions{IncludeFiles: []string{"."}, ExcludePatterns: exclude})
288
+		if err != nil {
289
+			t.Errorf("%d: unable to generate build context %q: %v", i, dockerfilePath, err)
290
+			return
291
+		}
292
+		out := &bytes.Buffer{}
293
+		nameDocker := fmt.Sprintf(nameFormat, i, "docker", 0)
294
+		if err := c.BuildImage(docker.BuildImageOptions{
295
+			Name:                nameDocker,
296
+			Dockerfile:          dockerfile,
297
+			RmTmpContainer:      true,
298
+			ForceRmTmpContainer: true,
299
+			InputStream:         in,
300
+			OutputStream:        out,
301
+		}); err != nil {
302
+			in.Close()
303
+			t.Errorf("%d: unable to build Docker image %q: %v\n%s", i, test.Git, err, out)
304
+			return
305
+		}
306
+		lastImage = nameDocker
307
+		toDelete = append(toDelete, nameDocker)
308
+	}
309
+
310
+	// if we ran more than one step, compare the squashed output with the docker build output
311
+	if len(steps) > 1 || !deep {
312
+		nameDirect := fmt.Sprintf(nameFormat, i, "direct", len(steps)-1)
313
+		e := NewClientExecutor(c)
314
+		out := &bytes.Buffer{}
315
+		e.Out, e.ErrOut = out, out
316
+		e.Directory = contextDir
317
+		e.Tag = nameDirect
318
+		if err := e.Build(bytes.NewBuffer(data), nil); err != nil {
319
+			t.Errorf("%d: failed to build complete image in %q: %v\n%s", i, input, err, out)
320
+		} else {
321
+			if !equivalentImages(
322
+				t, c, lastImage, nameDirect, true,
323
+				// metadata should be loosely equivalent, but because we squash and because of limitations
324
+				// in docker commit, there are some differences
325
+				metadataLayerEquivalent,
326
+				append(ignoreFuncs{
327
+					ignoreSmallFileChange,
328
+					// the direct dockerfile contains all steps, the layered image is synthetic from our previous
329
+					// test and so only contains the last layer
330
+					ignoreDockerfileSize(dockerfile),
331
+					// on Red Hat distros of Docker, the /var/run/secrets directory is injected with different
332
+					// permissions depending on whether you RUN or BUILD
333
+					ignoreRunSecretsMode,
334
+				}, test.Ignore...)...,
335
+			) {
336
+				t.Errorf("%d: full Docker build was not equivalent to squashed image metadata %s", i, input)
337
+			}
338
+		}
339
+	}
340
+
341
+	for _, s := range toDelete {
342
+		c.RemoveImageExtended(s, docker.RemoveImageOptions{Force: true})
343
+	}
344
+}
345
+
346
+// ignoreFunc returns true if the difference between the two can be ignored
347
+type ignoreFunc func(a, b *tar.Header) bool
348
+
349
+type ignoreFuncs []ignoreFunc
350
+
351
+func (fns ignoreFuncs) Ignore(a, b *tar.Header) bool {
352
+	for _, fn := range fns {
353
+		if fn(a, b) {
354
+			return true
355
+		}
356
+	}
357
+	return false
358
+}
359
+
360
+// metadataFunc returns true if the metadata is equivalent
361
+type metadataFunc func(a, b *docker.Config) bool
362
+
363
+// metadataEqual checks that the metadata of two images is directly equivalent.
364
+func metadataEqual(a, b *docker.Config) bool {
365
+	// compare output metadata
366
+	a.Image, b.Image = "", ""
367
+	e1, e2 := envMap(a.Env), envMap(b.Env)
368
+	if !conversion.EqualitiesOrDie().DeepEqual(e1, e2) {
369
+		return false
370
+	}
371
+	a.Env, b.Env = nil, nil
372
+	if !conversion.EqualitiesOrDie().DeepEqual(a, b) {
373
+		return false
374
+	}
375
+	return true
376
+}
377
+
378
+// metadataLayerEquivalent returns true if the last layer of a is equivalent to b, assuming
379
+// that b is squashed over multiple layers, and a is not. b, for instance, will have an empty
380
+// slice entrypoint, while a would have a nil entrypoint.
381
+func metadataLayerEquivalent(a, b *docker.Config) bool {
382
+	if a.Entrypoint == nil && len(b.Entrypoint) == 0 {
383
+		// we are forced to set Entrypoint [] to reset the entrypoint
384
+		b.Entrypoint = nil
385
+	}
386
+	if len(a.OnBuild) == 1 && len(b.OnBuild) > 0 && a.OnBuild[0] == b.OnBuild[len(b.OnBuild)-1] {
387
+		// a layered file will only contain the last OnBuild statement
388
+		b.OnBuild = a.OnBuild
389
+	}
390
+	return metadataEqual(a, b)
391
+}
392
+
393
+// equivalentImages executes the provided checks against two docker images, returning true
394
+// if the images are equivalent, and recording a test suite error in any other condition.
395
+func equivalentImages(t *testing.T, c *docker.Client, a, b string, testFilesystem bool, metadataFn metadataFunc, ignoreFns ...ignoreFunc) bool {
396
+	imageA, err := c.InspectImage(a)
397
+	if err != nil {
398
+		t.Errorf("can't get image %q: %v", a, err)
399
+		return false
400
+	}
401
+	imageB, err := c.InspectImage(b)
402
+	if err != nil {
403
+		t.Errorf("can't get image %q: %v", b, err)
404
+		return false
405
+	}
406
+
407
+	if !metadataFn(imageA.Config, imageB.Config) {
408
+		t.Errorf("generated image metadata did not match: %s", util.ObjectDiff(imageA.Config, imageB.Config))
409
+		return false
410
+	}
411
+
412
+	// for mutation commands, check the layer diff
413
+	if testFilesystem {
414
+		differs, onlyA, onlyB, err := compareImageFS(c, a, b)
415
+		if err != nil {
416
+			t.Errorf("can't calculate FS differences %q: %v", a, err)
417
+			return false
418
+		}
419
+		for k, v := range differs {
420
+			if ignoreFuncs(ignoreFns).Ignore(v[0], v[1]) {
421
+				delete(differs, k)
422
+				continue
423
+			}
424
+			t.Errorf("%s %s differs: %s", a, k, util.ObjectDiff(v[0], v[1]))
425
+		}
426
+		for k, v := range onlyA {
427
+			if ignoreFuncs(ignoreFns).Ignore(v, nil) {
428
+				delete(onlyA, k)
429
+				continue
430
+			}
431
+		}
432
+		for k, v := range onlyB {
433
+			if ignoreFuncs(ignoreFns).Ignore(nil, v) {
434
+				delete(onlyB, k)
435
+				continue
436
+			}
437
+		}
438
+		if len(onlyA)+len(onlyB)+len(differs) > 0 {
439
+			t.Errorf("a=%v b=%v diff=%v", onlyA, onlyB, differs)
440
+			return false
441
+		}
442
+	}
443
+	return true
444
+}
445
+
446
+// dockerfileWithFrom returns the contents of a new docker file with a different
447
+// FROM as the first line.
448
+func dockerfileWithFrom(from string, steps []*parser.Node) string {
449
+	lines := []string{}
450
+	lines = append(lines, fmt.Sprintf("FROM %s", from))
451
+	for _, step := range steps {
452
+		lines = append(lines, step.Original)
453
+	}
454
+	return strings.Join(lines, "\n")
455
+}
456
+
457
+// envMap returns a map from a list of environment variables.
458
+func envMap(env []string) map[string]string {
459
+	out := make(map[string]string)
460
+	for _, envVar := range env {
461
+		parts := strings.SplitN(envVar, "=", 2)
462
+		if len(parts) != 2 {
463
+			out[envVar] = ""
464
+			continue
465
+		}
466
+		out[parts[0]] = parts[1]
467
+	}
468
+	return out
469
+}
470
+
471
+// differOnlyByFileSize returns true iff the headers differ only by size, but
472
+// that differences is less than within bytes.
473
+func differOnlyByFileSize(a, b *tar.Header, within int64) bool {
474
+	if a == nil || b == nil {
475
+		return false
476
+	}
477
+	if a.Size == b.Size {
478
+		return false
479
+	}
480
+
481
+	diff := a.Size - b.Size
482
+	if diff < 0 {
483
+		diff = diff * -1
484
+	}
485
+	if diff < within && a.Size != 0 && b.Size != 0 {
486
+		a.Size = b.Size
487
+		if reflect.DeepEqual(a, b) {
488
+			return true
489
+		}
490
+	}
491
+	return false
492
+}
493
+
494
+// ignore Dockerfile being different, artifact of this test
495
+func ignoreDockerfileSize(dockerfile string) ignoreFunc {
496
+	return func(a, b *tar.Header) bool {
497
+		if a == nil || b == nil {
498
+			return false
499
+		}
500
+		if !strings.HasSuffix(a.Name, dockerfile) {
501
+			return false
502
+		}
503
+		if a.Size != b.Size {
504
+			a.Size = b.Size
505
+			return reflect.DeepEqual(a, b)
506
+		}
507
+		return false
508
+	}
509
+}
510
+
511
+// ignore secrets permissions on Red Hat docker (is mounted in)
512
+func ignoreRunSecretsMode(a, b *tar.Header) bool {
513
+	if a == nil || b == nil {
514
+		return false
515
+	}
516
+	if a.Name != "run/secrets/" {
517
+		return false
518
+	}
519
+	if a.Mode == 16877 && b.Mode == 16832 {
520
+		a.Mode = b.Mode
521
+		return reflect.DeepEqual(a, b)
522
+	}
523
+	return false
524
+}
525
+
526
+// compareImageFS exports the file systems of two images and returns a map
527
+// of files that differ in any way (modification time excluded), only exist in
528
+// image A, or only existing in image B.
529
+func compareImageFS(c *docker.Client, a, b string) (differ map[string][]*tar.Header, onlyA, onlyB map[string]*tar.Header, err error) {
530
+	fsA, err := imageFSMetadata(c, a)
531
+	if err != nil {
532
+		return nil, nil, nil, err
533
+	}
534
+	fsB, err := imageFSMetadata(c, b)
535
+	if err != nil {
536
+		return nil, nil, nil, err
537
+	}
538
+	differ = make(map[string][]*tar.Header)
539
+	onlyA = make(map[string]*tar.Header)
540
+	onlyB = fsB
541
+	for k, v1 := range fsA {
542
+		v2, ok := fsB[k]
543
+		if !ok {
544
+			onlyA[k] = v1
545
+			continue
546
+		}
547
+		delete(onlyB, k)
548
+		// we ignore modification time differences
549
+		v1.ModTime = time.Time{}
550
+		v2.ModTime = time.Time{}
551
+		if !reflect.DeepEqual(v1, v2) {
552
+			differ[k] = []*tar.Header{v1, v2}
553
+		}
554
+	}
555
+	return differ, onlyA, onlyB, nil
556
+}
557
+
558
+// imageFSMetadata creates a container and reads the filesystem metadata out of the archive.
559
+func imageFSMetadata(c *docker.Client, name string) (map[string]*tar.Header, error) {
560
+	container, err := c.CreateContainer(docker.CreateContainerOptions{Name: name + "-export", Config: &docker.Config{Image: name}})
561
+	if err != nil {
562
+		return nil, err
563
+	}
564
+	defer c.RemoveContainer(docker.RemoveContainerOptions{ID: container.ID, RemoveVolumes: true, Force: true})
565
+
566
+	ch := make(chan struct{})
567
+	result := make(map[string]*tar.Header)
568
+	r, w := io.Pipe()
569
+	go func() {
570
+		defer close(ch)
571
+		out := tar.NewReader(r)
572
+		for {
573
+			h, err := out.Next()
574
+			if err != nil {
575
+				if err == io.EOF {
576
+					w.Close()
577
+				} else {
578
+					w.CloseWithError(err)
579
+				}
580
+				break
581
+			}
582
+			result[h.Name] = h
583
+		}
584
+	}()
585
+	if err := c.ExportContainer(docker.ExportContainerOptions{ID: container.ID, OutputStream: w}); err != nil {
586
+		return nil, err
587
+	}
588
+	<-ch
589
+	return result, nil
590
+}
0 591
new file mode 100644
... ...
@@ -0,0 +1,13 @@
0
+package builder
1
+
2
+const (
3
+	// in docker/system
4
+	NoBaseImageSpecifier = "scratch"
5
+
6
+	// not yet part of our import
7
+	commandArg        = "arg"
8
+	commandStopSignal = "stopsignal"
9
+
10
+	// in docker/system
11
+	defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
12
+)
0 13
new file mode 100644
... ...
@@ -0,0 +1,143 @@
0
+package builder
1
+
2
+import (
3
+	"fmt"
4
+	"io"
5
+	"io/ioutil"
6
+	"net/http"
7
+	"net/url"
8
+	"os"
9
+	"path"
10
+	"path/filepath"
11
+	"strings"
12
+)
13
+
14
+type CopyInfo struct {
15
+	os.FileInfo
16
+	Path       string
17
+	Decompress bool
18
+	FromDir    bool
19
+}
20
+
21
+// CalcCopyInfo identifies the source files selected by a Dockerfile ADD or COPY instruction.
22
+func CalcCopyInfo(origPath, rootPath string, allowLocalDecompression, allowWildcards bool) ([]CopyInfo, error) {
23
+	origPath = trimLeadingPath(origPath)
24
+	// Deal with wildcards
25
+	if allowWildcards && containsWildcards(origPath) {
26
+		var copyInfos []CopyInfo
27
+		if err := filepath.Walk(rootPath, func(path string, info os.FileInfo, err error) error {
28
+			if err != nil {
29
+				return err
30
+			}
31
+			if info.Name() == "" {
32
+				// Why are we doing this check?
33
+				return nil
34
+			}
35
+			if match, _ := filepath.Match(origPath, path); !match {
36
+				return nil
37
+			}
38
+
39
+			// Note we set allowWildcards to false in case the name has
40
+			// a * in it
41
+			subInfos, err := CalcCopyInfo(path, rootPath, allowLocalDecompression, false)
42
+			if err != nil {
43
+				return err
44
+			}
45
+			copyInfos = append(copyInfos, subInfos...)
46
+			return nil
47
+		}); err != nil {
48
+			return nil, err
49
+		}
50
+		return copyInfos, nil
51
+	}
52
+
53
+	// flatten the root directory so we can rebase it
54
+	if origPath == "." {
55
+		var copyInfos []CopyInfo
56
+		infos, err := ioutil.ReadDir(rootPath)
57
+		if err != nil {
58
+			return nil, err
59
+		}
60
+		for _, info := range infos {
61
+			copyInfos = append(copyInfos, CopyInfo{FileInfo: info, Path: info.Name(), Decompress: allowLocalDecompression, FromDir: true})
62
+		}
63
+		return copyInfos, nil
64
+	}
65
+
66
+	// Must be a dir or a file
67
+	fi, err := os.Stat(filepath.Join(rootPath, origPath))
68
+	if err != nil {
69
+		return nil, err
70
+	}
71
+
72
+	return []CopyInfo{{FileInfo: fi, Path: origPath, Decompress: allowLocalDecompression}}, nil
73
+}
74
+
75
+func DownloadURL(src, dst string) ([]CopyInfo, string, error) {
76
+	// get filename from URL
77
+	u, err := url.Parse(src)
78
+	if err != nil {
79
+		return nil, "", err
80
+	}
81
+	base := path.Base(u.Path)
82
+	if base == "." {
83
+		return nil, "", fmt.Errorf("cannot determine filename from url: %s", u)
84
+	}
85
+
86
+	resp, err := http.Get(src)
87
+	if err != nil {
88
+		return nil, "", err
89
+	}
90
+	defer resp.Body.Close()
91
+	if resp.StatusCode >= 400 {
92
+		return nil, "", fmt.Errorf("server returned a status code >= 400: %s", resp.Status)
93
+	}
94
+
95
+	tmpDir, err := ioutil.TempDir("", "dockerbuildurl-")
96
+	if err != nil {
97
+		return nil, "", err
98
+	}
99
+	tmpFileName := filepath.Join(tmpDir, base)
100
+	tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
101
+	if err != nil {
102
+		os.RemoveAll(tmpDir)
103
+		return nil, "", err
104
+	}
105
+	if _, err := io.Copy(tmpFile, resp.Body); err != nil {
106
+		os.RemoveAll(tmpDir)
107
+		return nil, "", err
108
+	}
109
+	if err := tmpFile.Close(); err != nil {
110
+		os.RemoveAll(tmpDir)
111
+		return nil, "", err
112
+	}
113
+	info, err := os.Stat(tmpFileName)
114
+	if err != nil {
115
+		os.RemoveAll(tmpDir)
116
+		return nil, "", err
117
+	}
118
+	return []CopyInfo{{FileInfo: info, Path: base}}, tmpDir, nil
119
+}
120
+
121
+func trimLeadingPath(origPath string) string {
122
+	// Work in daemon-specific OS filepath semantics
123
+	origPath = filepath.FromSlash(origPath)
124
+	if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
125
+		origPath = origPath[1:]
126
+	}
127
+	origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
128
+	return origPath
129
+}
130
+
131
+// containsWildcards checks whether the provided name has a wildcard.
132
+func containsWildcards(name string) bool {
133
+	for i := 0; i < len(name); i++ {
134
+		ch := name[i]
135
+		if ch == '\\' {
136
+			i++
137
+		} else if ch == '*' || ch == '?' || ch == '[' {
138
+			return true
139
+		}
140
+	}
141
+	return false
142
+}
0 143
new file mode 100644
... ...
@@ -0,0 +1,441 @@
0
+package builder
1
+
2
+// This file contains the dispatchers for each command. Note that
3
+// `nullDispatch` is not actually a command, but support for commands we parse
4
+// but do nothing with.
5
+//
6
+// See evaluator.go for a higher level discussion of the whole evaluator
7
+// package.
8
+
9
+import (
10
+	"fmt"
11
+	"os"
12
+	"path/filepath"
13
+	"regexp"
14
+	"runtime"
15
+	"strings"
16
+
17
+	docker "github.com/fsouza/go-dockerclient"
18
+
19
+	"github.com/openshift/origin/pkg/util/docker/dockerfile/builder/signal"
20
+	"github.com/openshift/origin/pkg/util/docker/dockerfile/builder/strslice"
21
+)
22
+
23
+// dispatch with no layer / parsing. This is effectively not a command.
24
+func nullDispatch(b *Builder, args []string, attributes map[string]bool, original string) error {
25
+	return nil
26
+}
27
+
28
+// ENV foo bar
29
+//
30
+// Sets the environment variable foo to bar, also makes interpolation
31
+// in the dockerfile available from the next statement on via ${foo}.
32
+//
33
+func env(b *Builder, args []string, attributes map[string]bool, original string) error {
34
+	if len(args) == 0 {
35
+		return errAtLeastOneArgument("ENV")
36
+	}
37
+
38
+	if len(args)%2 != 0 {
39
+		// should never get here, but just in case
40
+		return errTooManyArguments("ENV")
41
+	}
42
+
43
+	// TODO/FIXME/NOT USED
44
+	// Just here to show how to use the builder flags stuff within the
45
+	// context of a builder command. Will remove once we actually add
46
+	// a builder command to something!
47
+	/*
48
+		flBool1 := b.flags.AddBool("bool1", false)
49
+		flStr1 := b.flags.AddString("str1", "HI")
50
+
51
+		if err := b.flags.Parse(); err != nil {
52
+			return err
53
+		}
54
+
55
+		fmt.Printf("Bool1:%v\n", flBool1)
56
+		fmt.Printf("Str1:%v\n", flStr1)
57
+	*/
58
+
59
+	for j := 0; j < len(args); j++ {
60
+		// name  ==> args[j]
61
+		// value ==> args[j+1]
62
+		newVar := args[j] + "=" + args[j+1] + ""
63
+		gotOne := false
64
+		for i, envVar := range b.RunConfig.Env {
65
+			envParts := strings.SplitN(envVar, "=", 2)
66
+			if envParts[0] == args[j] {
67
+				b.RunConfig.Env[i] = newVar
68
+				gotOne = true
69
+				break
70
+			}
71
+		}
72
+		if !gotOne {
73
+			b.RunConfig.Env = append(b.RunConfig.Env, newVar)
74
+		}
75
+		j++
76
+	}
77
+
78
+	return nil
79
+}
80
+
81
+// MAINTAINER some text <maybe@an.email.address>
82
+//
83
+// Sets the maintainer metadata.
84
+func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error {
85
+	if len(args) != 1 {
86
+		return errExactlyOneArgument("MAINTAINER")
87
+	}
88
+
89
+	return nil
90
+}
91
+
92
+// LABEL some json data describing the image
93
+//
94
+// Sets the Label variable foo to bar,
95
+//
96
+func label(b *Builder, args []string, attributes map[string]bool, original string) error {
97
+	if len(args) == 0 {
98
+		return errAtLeastOneArgument("LABEL")
99
+	}
100
+	if len(args)%2 != 0 {
101
+		// should never get here, but just in case
102
+		return errTooManyArguments("LABEL")
103
+	}
104
+
105
+	if b.RunConfig.Labels == nil {
106
+		b.RunConfig.Labels = map[string]string{}
107
+	}
108
+
109
+	for j := 0; j < len(args); j++ {
110
+		// name  ==> args[j]
111
+		// value ==> args[j+1]
112
+		b.RunConfig.Labels[args[j]] = args[j+1]
113
+		j++
114
+	}
115
+	return nil
116
+}
117
+
118
+// ADD foo /path
119
+//
120
+// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
121
+// exist here. If you do not wish to have this automatic handling, use COPY.
122
+//
123
+func add(b *Builder, args []string, attributes map[string]bool, original string) error {
124
+	if len(args) < 2 {
125
+		return errAtLeastOneArgument("ADD")
126
+	}
127
+	for i := 1; i < len(args); i++ {
128
+		args[i] = makeAbsolute(args[i], b.RunConfig.WorkingDir)
129
+	}
130
+	b.PendingCopies = append(b.PendingCopies, Copy{Src: args[0], Dest: args[1:], Download: true})
131
+	return nil
132
+}
133
+
134
+// COPY foo /path
135
+//
136
+// Same as 'ADD' but without the tar and remote url handling.
137
+//
138
+func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error {
139
+	if len(args) < 2 {
140
+		return errAtLeastOneArgument("COPY")
141
+	}
142
+	for i := 1; i < len(args); i++ {
143
+		args[i] = makeAbsolute(args[i], b.RunConfig.WorkingDir)
144
+	}
145
+	b.PendingCopies = append(b.PendingCopies, Copy{Src: args[0], Dest: args[1:], Download: false})
146
+	return nil
147
+}
148
+
149
+// FROM imagename
150
+//
151
+// This sets the image the dockerfile will build on top of.
152
+//
153
+func from(b *Builder, args []string, attributes map[string]bool, original string) error {
154
+	if len(args) != 1 {
155
+		return errExactlyOneArgument("FROM")
156
+	}
157
+
158
+	name := args[0]
159
+	// Windows cannot support a container with no base image.
160
+	if name == NoBaseImageSpecifier {
161
+		if runtime.GOOS == "windows" {
162
+			return fmt.Errorf("Windows does not support FROM scratch")
163
+		}
164
+	}
165
+	b.RunConfig.Image = name
166
+	// TODO: handle onbuild
167
+	return nil
168
+}
169
+
170
+// ONBUILD RUN echo yo
171
+//
172
+// ONBUILD triggers run when the image is used in a FROM statement.
173
+//
174
+// ONBUILD handling has a lot of special-case functionality, the heading in
175
+// evaluator.go and comments around dispatch() in the same file explain the
176
+// special cases. search for 'OnBuild' in internals.go for additional special
177
+// cases.
178
+//
179
+func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error {
180
+	if len(args) == 0 {
181
+		return errAtLeastOneArgument("ONBUILD")
182
+	}
183
+
184
+	triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0]))
185
+	switch triggerInstruction {
186
+	case "ONBUILD":
187
+		return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
188
+	case "MAINTAINER", "FROM":
189
+		return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
190
+	}
191
+
192
+	original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "")
193
+
194
+	b.RunConfig.OnBuild = append(b.RunConfig.OnBuild, original)
195
+	return nil
196
+}
197
+
198
+// WORKDIR /tmp
199
+//
200
+// Set the working directory for future RUN/CMD/etc statements.
201
+//
202
+func workdir(b *Builder, args []string, attributes map[string]bool, original string) error {
203
+	if len(args) != 1 {
204
+		return errExactlyOneArgument("WORKDIR")
205
+	}
206
+
207
+	// This is from the Dockerfile and will not necessarily be in platform
208
+	// specific semantics, hence ensure it is converted.
209
+	workdir := filepath.FromSlash(args[0])
210
+
211
+	if !filepath.IsAbs(workdir) {
212
+		current := filepath.FromSlash(b.RunConfig.WorkingDir)
213
+		workdir = filepath.Join(string(os.PathSeparator), current, workdir)
214
+	}
215
+
216
+	b.RunConfig.WorkingDir = workdir
217
+	return nil
218
+}
219
+
220
+// RUN some command yo
221
+//
222
+// run a command and commit the image. Args are automatically prepended with
223
+// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is
224
+// only one argument. The difference in processing:
225
+//
226
+// RUN echo hi          # sh -c echo hi       (Linux)
227
+// RUN echo hi          # cmd /S /C echo hi   (Windows)
228
+// RUN [ "echo", "hi" ] # echo hi
229
+//
230
+func run(b *Builder, args []string, attributes map[string]bool, original string) error {
231
+	if b.RunConfig.Image == "" {
232
+		return fmt.Errorf("Please provide a source image with `from` prior to run")
233
+	}
234
+
235
+	args = handleJSONArgs(args, attributes)
236
+
237
+	run := Run{Args: args}
238
+
239
+	if !attributes["json"] {
240
+		run.Shell = true
241
+	}
242
+	b.PendingRuns = append(b.PendingRuns, run)
243
+	return nil
244
+}
245
+
246
+// CMD foo
247
+//
248
+// Set the default command to run in the container (which may be empty).
249
+// Argument handling is the same as RUN.
250
+//
251
+func cmd(b *Builder, args []string, attributes map[string]bool, original string) error {
252
+	cmdSlice := handleJSONArgs(args, attributes)
253
+
254
+	if !attributes["json"] {
255
+		if runtime.GOOS != "windows" {
256
+			cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...)
257
+		} else {
258
+			cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...)
259
+		}
260
+	}
261
+
262
+	b.RunConfig.Cmd = strslice.StrSlice(cmdSlice)
263
+	if len(args) != 0 {
264
+		b.CmdSet = true
265
+	}
266
+	return nil
267
+}
268
+
269
+// ENTRYPOINT /usr/sbin/nginx
270
+//
271
+// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to
272
+// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx.
273
+//
274
+// Handles command processing similar to CMD and RUN, only b.RunConfig.Entrypoint
275
+// is initialized at NewBuilder time instead of through argument parsing.
276
+//
277
+func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error {
278
+	parsed := handleJSONArgs(args, attributes)
279
+
280
+	switch {
281
+	case attributes["json"]:
282
+		// ENTRYPOINT ["echo", "hi"]
283
+		b.RunConfig.Entrypoint = strslice.StrSlice(parsed)
284
+	case len(parsed) == 0:
285
+		// ENTRYPOINT []
286
+		b.RunConfig.Entrypoint = nil
287
+	default:
288
+		// ENTRYPOINT echo hi
289
+		if runtime.GOOS != "windows" {
290
+			b.RunConfig.Entrypoint = strslice.StrSlice{"/bin/sh", "-c", parsed[0]}
291
+		} else {
292
+			b.RunConfig.Entrypoint = strslice.StrSlice{"cmd", "/S", "/C", parsed[0]}
293
+		}
294
+	}
295
+
296
+	// when setting the entrypoint if a CMD was not explicitly set then
297
+	// set the command to nil
298
+	if !b.CmdSet {
299
+		b.RunConfig.Cmd = nil
300
+	}
301
+	return nil
302
+}
303
+
304
+// EXPOSE 6667/tcp 7000/tcp
305
+//
306
+// Expose ports for links and port mappings. This all ends up in
307
+// b.RunConfig.ExposedPorts for runconfig.
308
+//
309
+func expose(b *Builder, args []string, attributes map[string]bool, original string) error {
310
+	if len(args) == 0 {
311
+		return errAtLeastOneArgument("EXPOSE")
312
+	}
313
+
314
+	if b.RunConfig.ExposedPorts == nil {
315
+		b.RunConfig.ExposedPorts = make(map[docker.Port]struct{})
316
+	}
317
+
318
+	existing := map[string]struct{}{}
319
+	for k := range b.RunConfig.ExposedPorts {
320
+		existing[k.Port()] = struct{}{}
321
+	}
322
+
323
+	for _, port := range args {
324
+		dp := docker.Port(port)
325
+		if _, exists := existing[dp.Port()]; !exists {
326
+			b.RunConfig.ExposedPorts[docker.Port(fmt.Sprintf("%s/%s", dp.Port(), dp.Proto()))] = struct{}{}
327
+		}
328
+	}
329
+	return nil
330
+}
331
+
332
+// USER foo
333
+//
334
+// Set the user to 'foo' for future commands and when running the
335
+// ENTRYPOINT/CMD at container run time.
336
+//
337
+func user(b *Builder, args []string, attributes map[string]bool, original string) error {
338
+	if len(args) != 1 {
339
+		return errExactlyOneArgument("USER")
340
+	}
341
+
342
+	b.RunConfig.User = args[0]
343
+	return nil
344
+}
345
+
346
+// VOLUME /foo
347
+//
348
+// Expose the volume /foo for use. Will also accept the JSON array form.
349
+//
350
+func volume(b *Builder, args []string, attributes map[string]bool, original string) error {
351
+	if len(args) == 0 {
352
+		return errAtLeastOneArgument("VOLUME")
353
+	}
354
+
355
+	if b.RunConfig.Volumes == nil {
356
+		b.RunConfig.Volumes = map[string]struct{}{}
357
+	}
358
+	for _, v := range args {
359
+		v = strings.TrimSpace(v)
360
+		if v == "" {
361
+			return fmt.Errorf("Volume specified can not be an empty string")
362
+		}
363
+		b.RunConfig.Volumes[v] = struct{}{}
364
+	}
365
+	return nil
366
+}
367
+
368
+// STOPSIGNAL signal
369
+//
370
+// Set the signal that will be used to kill the container.
371
+func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error {
372
+	if len(args) != 1 {
373
+		return fmt.Errorf("STOPSIGNAL requires exactly one argument")
374
+	}
375
+
376
+	sig := args[0]
377
+	_, err := signal.ParseSignal(sig)
378
+	if err != nil {
379
+		return err
380
+	}
381
+
382
+	b.RunConfig.StopSignal = sig
383
+	return nil
384
+}
385
+
386
+// ARG name[=value]
387
+//
388
+// Adds the variable foo to the trusted list of variables that can be passed
389
+// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'.
390
+// Dockerfile author may optionally set a default value of this variable.
391
+func arg(b *Builder, args []string, attributes map[string]bool, original string) error {
392
+	if len(args) != 1 {
393
+		return fmt.Errorf("ARG requires exactly one argument definition")
394
+	}
395
+
396
+	var (
397
+		name       string
398
+		value      string
399
+		hasDefault bool
400
+	)
401
+
402
+	arg := args[0]
403
+	// 'arg' can just be a name or name-value pair. Note that this is different
404
+	// from 'env' that handles the split of name and value at the parser level.
405
+	// The reason for doing it differently for 'arg' is that we support just
406
+	// defining an arg and not assign it a value (while 'env' always expects a
407
+	// name-value pair). If possible, it will be good to harmonize the two.
408
+	if strings.Contains(arg, "=") {
409
+		parts := strings.SplitN(arg, "=", 2)
410
+		name = parts[0]
411
+		value = parts[1]
412
+		hasDefault = true
413
+	} else {
414
+		name = arg
415
+		hasDefault = false
416
+	}
417
+	// add the arg to allowed list of build-time args from this step on.
418
+	b.AllowedArgs[name] = true
419
+
420
+	// If there is a default value associated with this arg then add it to the
421
+	// b.buildArgs if one is not already passed to the builder. The args passed
422
+	// to builder override the default value of 'arg'.
423
+	if _, ok := b.Args[name]; !ok && hasDefault {
424
+		b.Args[name] = value
425
+	}
426
+
427
+	return nil
428
+}
429
+
430
+func errAtLeastOneArgument(command string) error {
431
+	return fmt.Errorf("%s requires at least one argument", command)
432
+}
433
+
434
+func errExactlyOneArgument(command string) error {
435
+	return fmt.Errorf("%s requires exactly one argument", command)
436
+}
437
+
438
+func errTooManyArguments(command string) error {
439
+	return fmt.Errorf("Bad input to %s, too many arguments", command)
440
+}
0 441
new file mode 100644
... ...
@@ -0,0 +1,6 @@
0
+// Package builder uses code from github.com/docker/docker/builder/* to implement
1
+// a Docker builder that does not create individual layers, but instead creates a
2
+// single layer.
3
+//
4
+// TODO: full windows support
5
+package builder
0 6
new file mode 100644
... ...
@@ -0,0 +1,150 @@
0
+package builder
1
+
2
+import (
3
+	"fmt"
4
+	"strings"
5
+
6
+	"github.com/docker/docker/builder/command"
7
+	"github.com/docker/docker/builder/parser"
8
+)
9
+
10
+// Environment variable interpolation will happen on these statements only.
11
+var replaceEnvAllowed = map[string]bool{
12
+	command.Env:       true,
13
+	command.Label:     true,
14
+	command.Add:       true,
15
+	command.Copy:      true,
16
+	command.Workdir:   true,
17
+	command.Expose:    true,
18
+	command.Volume:    true,
19
+	command.User:      true,
20
+	commandStopSignal: true,
21
+	commandArg:        true,
22
+}
23
+
24
+// Certain commands are allowed to have their args split into more
25
+// words after env var replacements. Meaning:
26
+//   ENV foo="123 456"
27
+//   EXPOSE $foo
28
+// should result in the same thing as:
29
+//   EXPOSE 123 456
30
+// and not treat "123 456" as a single word.
31
+// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing.
32
+// Quotes will cause it to still be treated as single word.
33
+var allowWordExpansion = map[string]bool{
34
+	command.Expose: true,
35
+}
36
+
37
+// Step represents the input Env and the output command after all
38
+// post processing of the command arguments is done.
39
+type Step struct {
40
+	Env []string
41
+
42
+	Command  string
43
+	Args     []string
44
+	Flags    []string
45
+	Attrs    map[string]bool
46
+	Message  string
47
+	Original string
48
+}
49
+
50
+// Resolve transforms a parsed Dockerfile line into a command to execute,
51
+// resolving any arguments.
52
+//
53
+// Almost all nodes will have this structure:
54
+// Child[Node, Node, Node] where Child is from parser.Node.Children and each
55
+// node comes from parser.Node.Next. This forms a "line" with a statement and
56
+// arguments and we process them in this normalized form by hitting
57
+// evaluateTable with the leaf nodes of the command and the Builder object.
58
+//
59
+// ONBUILD is a special case; in this case the parser will emit:
60
+// Child[Node, Child[Node, Node...]] where the first node is the literal
61
+// "onbuild" and the child entrypoint is the command of the ONBUILD statement,
62
+// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
63
+// deal with that, at least until it becomes more of a general concern with new
64
+// features.
65
+func (b *Step) Resolve(ast *parser.Node) error {
66
+	cmd := ast.Value
67
+	upperCasedCmd := strings.ToUpper(cmd)
68
+
69
+	// To ensure the user is given a decent error message if the platform
70
+	// on which the daemon is running does not support a builder command.
71
+	if err := platformSupports(strings.ToLower(cmd)); err != nil {
72
+		return err
73
+	}
74
+
75
+	attrs := ast.Attributes
76
+	original := ast.Original
77
+	flags := ast.Flags
78
+	strList := []string{}
79
+	msg := upperCasedCmd
80
+
81
+	if len(ast.Flags) > 0 {
82
+		msg += " " + strings.Join(ast.Flags, " ")
83
+	}
84
+
85
+	if cmd == "onbuild" {
86
+		if ast.Next == nil {
87
+			return fmt.Errorf("ONBUILD requires at least one argument")
88
+		}
89
+		ast = ast.Next.Children[0]
90
+		strList = append(strList, ast.Value)
91
+		msg += " " + ast.Value
92
+
93
+		if len(ast.Flags) > 0 {
94
+			msg += " " + strings.Join(ast.Flags, " ")
95
+		}
96
+
97
+	}
98
+
99
+	// count the number of nodes that we are going to traverse first
100
+	// so we can pre-create the argument and message array. This speeds up the
101
+	// allocation of those list a lot when they have a lot of arguments
102
+	cursor := ast
103
+	var n int
104
+	for cursor.Next != nil {
105
+		cursor = cursor.Next
106
+		n++
107
+	}
108
+	msgList := make([]string, n)
109
+
110
+	var i int
111
+	envs := b.Env
112
+	for ast.Next != nil {
113
+		ast = ast.Next
114
+		var str string
115
+		str = ast.Value
116
+		if replaceEnvAllowed[cmd] {
117
+			var err error
118
+			var words []string
119
+
120
+			if allowWordExpansion[cmd] {
121
+				words, err = ProcessWords(str, envs)
122
+				if err != nil {
123
+					return err
124
+				}
125
+				strList = append(strList, words...)
126
+			} else {
127
+				str, err = ProcessWord(str, envs)
128
+				if err != nil {
129
+					return err
130
+				}
131
+				strList = append(strList, str)
132
+			}
133
+		} else {
134
+			strList = append(strList, str)
135
+		}
136
+		msgList[i] = ast.Value
137
+		i++
138
+	}
139
+
140
+	msg += " " + strings.Join(msgList, " ")
141
+
142
+	b.Message = msg
143
+	b.Command = cmd
144
+	b.Args = strList
145
+	b.Original = original
146
+	b.Attrs = attrs
147
+	b.Flags = flags
148
+	return nil
149
+}
0 150
new file mode 100644
... ...
@@ -0,0 +1,11 @@
0
+FROM busybox
1
+ADD https://github.com/openshift/origin/raw/master/README.md README.md
2
+USER 1001
3
+ADD https://github.com/openshift/origin/raw/master/LICENSE .
4
+ADD https://github.com/openshift/origin/raw/master/LICENSE A
5
+ADD https://github.com/openshift/origin/raw/master/LICENSE ./a
6
+USER root
7
+RUN mkdir ./b
8
+ADD https://github.com/openshift/origin/raw/master/LICENSE ./b/a
9
+ADD https://github.com/openshift/origin/raw/master/LICENSE ./b/.
10
+ADD https://github.com/openshift/ruby-hello-world/archive/master.zip /tmp/
0 11
new file mode 100644
... ...
@@ -0,0 +1,48 @@
0
+FROM busybox
1
+
2
+MAINTAINER docker <docker@docker.io>
3
+
4
+ONBUILD RUN ["echo", "test"]
5
+ONBUILD RUN echo test
6
+ONBUILD COPY . /
7
+
8
+
9
+# RUN Commands \
10
+# linebreak in comment \
11
+RUN ["ls", "-la"]
12
+RUN ["echo", "'1234'"]
13
+RUN echo "1234"
14
+RUN echo 1234
15
+RUN echo '1234' && \
16
+    echo "456" && \
17
+    echo 789
18
+RUN    sh -c 'echo root:testpass \
19
+        > /tmp/passwd'
20
+RUN mkdir -p /test /test2 /test3/test
21
+
22
+# ENV \
23
+ENV SCUBA 1 DUBA 3
24
+ENV SCUBA "1 DUBA 3"
25
+
26
+# CMD \
27
+CMD ["echo", "test"]
28
+CMD echo test
29
+CMD echo "test"
30
+CMD echo 'test'
31
+CMD echo 'test' | wc -
32
+
33
+#EXPOSE\
34
+EXPOSE 3000
35
+EXPOSE 9000 5000 6000
36
+
37
+USER docker
38
+USER docker:root
39
+
40
+VOLUME ["/test"]
41
+VOLUME ["/test", "/test2"]
42
+VOLUME /test3
43
+
44
+WORKDIR /test
45
+
46
+ADD . /
47
+COPY . copy
0 48
\ No newline at end of file
1 49
new file mode 100644
... ...
@@ -0,0 +1,23 @@
0
+FROM busybox
1
+ENV name value
2
+ENV name=value
3
+ENV name=value name2=value2
4
+ENV name="value value1"
5
+ENV name=value\ value2
6
+ENV name="value'quote space'value2"
7
+ENV name='value"double quote"value2'
8
+ENV name=value\ value2 name2=value2\ value3
9
+ENV name="a\"b"
10
+ENV name="a\'b"
11
+ENV name='a\'b'
12
+ENV name='a\'b''
13
+ENV name='a\"b'
14
+ENV name="''"
15
+# don't put anything after the next line - it must be the last line of the
16
+# Dockerfile and it must end with \
17
+ENV name=value \
18
+    name1=value1 \
19
+    name2="value2a \
20
+           value2b" \
21
+    name3="value3a\n\"value3b\"" \
22
+	name4="value4a\\nvalue4b" \
0 23
\ No newline at end of file
1 24
new file mode 100644
... ...
@@ -0,0 +1,2 @@
0
+FROM busybox
1
+EXPOSE 3469
0 2
\ No newline at end of file
1 3
new file mode 100644
... ...
@@ -0,0 +1,4 @@
0
+FROM busybox
1
+COPY . /
2
+COPY . dir
3
+COPY subdir/ test/
0 4
\ No newline at end of file
1 5
new file mode 100644
2 6
new file mode 100644
3 7
new file mode 100644
... ...
@@ -0,0 +1,2 @@
0
+file
1
+file2
0 2
\ No newline at end of file
1 3
new file mode 100644
... ...
@@ -0,0 +1,2 @@
0
+FROM busybox
1
+COPY . /
0 2
new file mode 100644
1 3
new file mode 100644
2 4
new file mode 100644
3 5
new file mode 100644
... ...
@@ -0,0 +1,95 @@
0
+package builder
1
+
2
+import (
3
+	"fmt"
4
+	"os"
5
+	"path/filepath"
6
+	"runtime"
7
+	"strings"
8
+)
9
+
10
+// isURL returns true if the string appears to be a URL.
11
+func isURL(s string) bool {
12
+	return strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://")
13
+}
14
+
15
+// exportEnv creates an export statement for a shell that contains all of the
16
+// provided environment.
17
+func exportEnv(env []string) string {
18
+	if len(env) == 0 {
19
+		return ""
20
+	}
21
+	out := "export"
22
+	for _, e := range env {
23
+		out += " " + bashQuote(e)
24
+	}
25
+	return out + "; "
26
+}
27
+
28
+// bashQuote escapes the provided string and surrounds it with double quotes.
29
+// TODO: verify that these are all we have to escape.
30
+func bashQuote(env string) string {
31
+	out := []rune{'"'}
32
+	for _, r := range env {
33
+		switch r {
34
+		case '$', '\\', '"':
35
+			out = append(out, '\\', r)
36
+		default:
37
+			out = append(out, r)
38
+		}
39
+	}
40
+	out = append(out, '"')
41
+	return string(out)
42
+}
43
+
44
+// hasEnvName returns true if the provided enviroment contains the named ENV var.
45
+func hasEnvName(env []string, name string) bool {
46
+	for _, e := range env {
47
+		if strings.HasPrefix(e, name+"=") {
48
+			return true
49
+		}
50
+	}
51
+	return false
52
+}
53
+
54
+// platformSupports is a short-term function to give users a quality error
55
+// message if a Dockerfile uses a command not supported on the platform.
56
+func platformSupports(command string) error {
57
+	if runtime.GOOS != "windows" {
58
+		return nil
59
+	}
60
+	switch command {
61
+	case "expose", "user", "stopsignal", "arg":
62
+		return fmt.Errorf("The daemon on this platform does not support the command '%s'", command)
63
+	}
64
+	return nil
65
+}
66
+
67
+func handleJSONArgs(args []string, attributes map[string]bool) []string {
68
+	if len(args) == 0 {
69
+		return []string{}
70
+	}
71
+
72
+	if attributes != nil && attributes["json"] {
73
+		return args
74
+	}
75
+
76
+	// literal string command, not an exec array
77
+	return []string{strings.Join(args, " ")}
78
+}
79
+
80
+// makeAbsolute ensures that the provided path is absolute.
81
+func makeAbsolute(dest, workingDir string) string {
82
+	// Twiddle the destination when its a relative path - meaning, make it
83
+	// relative to the WORKINGDIR
84
+	if !filepath.IsAbs(dest) {
85
+		hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) || strings.HasSuffix(dest, string(os.PathSeparator)+".")
86
+		dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest)
87
+
88
+		// Make sure we preserve any trailing slash
89
+		if hasSlash {
90
+			dest += string(os.PathSeparator)
91
+		}
92
+	}
93
+	return dest
94
+}
0 95
new file mode 100644
... ...
@@ -0,0 +1,314 @@
0
+package builder
1
+
2
+// This will take a single word and an array of env variables and
3
+// process all quotes (" and ') as well as $xxx and ${xxx} env variable
4
+// tokens.  Tries to mimic bash shell process.
5
+// It doesn't support all flavors of ${xx:...} formats but new ones can
6
+// be added by adding code to the "special ${} format processing" section
7
+
8
+import (
9
+	"fmt"
10
+	"strings"
11
+	"text/scanner"
12
+	"unicode"
13
+)
14
+
15
+type shellWord struct {
16
+	word    string
17
+	scanner scanner.Scanner
18
+	envs    []string
19
+	pos     int
20
+}
21
+
22
+// ProcessWord will use the 'env' list of environment variables,
23
+// and replace any env var references in 'word'.
24
+func ProcessWord(word string, env []string) (string, error) {
25
+	sw := &shellWord{
26
+		word: word,
27
+		envs: env,
28
+		pos:  0,
29
+	}
30
+	sw.scanner.Init(strings.NewReader(word))
31
+	word, _, err := sw.process()
32
+	return word, err
33
+}
34
+
35
+// ProcessWords will use the 'env' list of environment variables,
36
+// and replace any env var references in 'word' then it will also
37
+// return a slice of strings which represents the 'word'
38
+// split up based on spaces - taking into account quotes.  Note that
39
+// this splitting is done **after** the env var substitutions are done.
40
+// Note, each one is trimmed to remove leading and trailing spaces (unless
41
+// they are quoted", but ProcessWord retains spaces between words.
42
+func ProcessWords(word string, env []string) ([]string, error) {
43
+	sw := &shellWord{
44
+		word: word,
45
+		envs: env,
46
+		pos:  0,
47
+	}
48
+	sw.scanner.Init(strings.NewReader(word))
49
+	_, words, err := sw.process()
50
+	return words, err
51
+}
52
+
53
+func (sw *shellWord) process() (string, []string, error) {
54
+	return sw.processStopOn(scanner.EOF)
55
+}
56
+
57
+type wordsStruct struct {
58
+	word   string
59
+	words  []string
60
+	inWord bool
61
+}
62
+
63
+func (w *wordsStruct) addChar(ch rune) {
64
+	if unicode.IsSpace(ch) && w.inWord {
65
+		if len(w.word) != 0 {
66
+			w.words = append(w.words, w.word)
67
+			w.word = ""
68
+			w.inWord = false
69
+		}
70
+	} else if !unicode.IsSpace(ch) {
71
+		w.addRawChar(ch)
72
+	}
73
+}
74
+
75
+func (w *wordsStruct) addRawChar(ch rune) {
76
+	w.word += string(ch)
77
+	w.inWord = true
78
+}
79
+
80
+func (w *wordsStruct) addString(str string) {
81
+	var scan scanner.Scanner
82
+	scan.Init(strings.NewReader(str))
83
+	for scan.Peek() != scanner.EOF {
84
+		w.addChar(scan.Next())
85
+	}
86
+}
87
+
88
+func (w *wordsStruct) addRawString(str string) {
89
+	w.word += str
90
+	w.inWord = true
91
+}
92
+
93
+func (w *wordsStruct) getWords() []string {
94
+	if len(w.word) > 0 {
95
+		w.words = append(w.words, w.word)
96
+
97
+		// Just in case we're called again by mistake
98
+		w.word = ""
99
+		w.inWord = false
100
+	}
101
+	return w.words
102
+}
103
+
104
+// Process the word, starting at 'pos', and stop when we get to the
105
+// end of the word or the 'stopChar' character
106
+func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) {
107
+	var result string
108
+	var words wordsStruct
109
+
110
+	var charFuncMapping = map[rune]func() (string, error){
111
+		'\'': sw.processSingleQuote,
112
+		'"':  sw.processDoubleQuote,
113
+		'$':  sw.processDollar,
114
+	}
115
+
116
+	for sw.scanner.Peek() != scanner.EOF {
117
+		ch := sw.scanner.Peek()
118
+
119
+		if stopChar != scanner.EOF && ch == stopChar {
120
+			sw.scanner.Next()
121
+			break
122
+		}
123
+		if fn, ok := charFuncMapping[ch]; ok {
124
+			// Call special processing func for certain chars
125
+			tmp, err := fn()
126
+			if err != nil {
127
+				return "", []string{}, err
128
+			}
129
+			result += tmp
130
+
131
+			if ch == rune('$') {
132
+				words.addString(tmp)
133
+			} else {
134
+				words.addRawString(tmp)
135
+			}
136
+		} else {
137
+			// Not special, just add it to the result
138
+			ch = sw.scanner.Next()
139
+
140
+			if ch == '\\' {
141
+				// '\' escapes, except end of line
142
+
143
+				ch = sw.scanner.Next()
144
+
145
+				if ch == scanner.EOF {
146
+					break
147
+				}
148
+
149
+				words.addRawChar(ch)
150
+			} else {
151
+				words.addChar(ch)
152
+			}
153
+
154
+			result += string(ch)
155
+		}
156
+	}
157
+
158
+	return result, words.getWords(), nil
159
+}
160
+
161
+func (sw *shellWord) processSingleQuote() (string, error) {
162
+	// All chars between single quotes are taken as-is
163
+	// Note, you can't escape '
164
+	var result string
165
+
166
+	sw.scanner.Next()
167
+
168
+	for {
169
+		ch := sw.scanner.Next()
170
+		if ch == '\'' || ch == scanner.EOF {
171
+			break
172
+		}
173
+		result += string(ch)
174
+	}
175
+
176
+	return result, nil
177
+}
178
+
179
+func (sw *shellWord) processDoubleQuote() (string, error) {
180
+	// All chars up to the next " are taken as-is, even ', except any $ chars
181
+	// But you can escape " with a \
182
+	var result string
183
+
184
+	sw.scanner.Next()
185
+
186
+	for sw.scanner.Peek() != scanner.EOF {
187
+		ch := sw.scanner.Peek()
188
+		if ch == '"' {
189
+			sw.scanner.Next()
190
+			break
191
+		}
192
+		if ch == '$' {
193
+			tmp, err := sw.processDollar()
194
+			if err != nil {
195
+				return "", err
196
+			}
197
+			result += tmp
198
+		} else {
199
+			ch = sw.scanner.Next()
200
+			if ch == '\\' {
201
+				chNext := sw.scanner.Peek()
202
+
203
+				if chNext == scanner.EOF {
204
+					// Ignore \ at end of word
205
+					continue
206
+				}
207
+
208
+				if chNext == '"' || chNext == '$' {
209
+					// \" and \$ can be escaped, all other \'s are left as-is
210
+					ch = sw.scanner.Next()
211
+				}
212
+			}
213
+			result += string(ch)
214
+		}
215
+	}
216
+
217
+	return result, nil
218
+}
219
+
220
+func (sw *shellWord) processDollar() (string, error) {
221
+	sw.scanner.Next()
222
+	ch := sw.scanner.Peek()
223
+	if ch == '{' {
224
+		sw.scanner.Next()
225
+		name := sw.processName()
226
+		ch = sw.scanner.Peek()
227
+		if ch == '}' {
228
+			// Normal ${xx} case
229
+			sw.scanner.Next()
230
+			return sw.getEnv(name), nil
231
+		}
232
+		if ch == ':' {
233
+			// Special ${xx:...} format processing
234
+			// Yes it allows for recursive $'s in the ... spot
235
+
236
+			sw.scanner.Next() // skip over :
237
+			modifier := sw.scanner.Next()
238
+
239
+			word, _, err := sw.processStopOn('}')
240
+			if err != nil {
241
+				return "", err
242
+			}
243
+
244
+			// Grab the current value of the variable in question so we
245
+			// can use to to determine what to do based on the modifier
246
+			newValue := sw.getEnv(name)
247
+
248
+			switch modifier {
249
+			case '+':
250
+				if newValue != "" {
251
+					newValue = word
252
+				}
253
+				return newValue, nil
254
+
255
+			case '-':
256
+				if newValue == "" {
257
+					newValue = word
258
+				}
259
+				return newValue, nil
260
+
261
+			default:
262
+				return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word)
263
+			}
264
+		}
265
+		return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word)
266
+	}
267
+	// $xxx case
268
+	name := sw.processName()
269
+	if name == "" {
270
+		return "$", nil
271
+	}
272
+	return sw.getEnv(name), nil
273
+}
274
+
275
+func (sw *shellWord) processName() string {
276
+	// Read in a name (alphanumeric or _)
277
+	// If it starts with a numeric then just return $#
278
+	var name string
279
+
280
+	for sw.scanner.Peek() != scanner.EOF {
281
+		ch := sw.scanner.Peek()
282
+		if len(name) == 0 && unicode.IsDigit(ch) {
283
+			ch = sw.scanner.Next()
284
+			return string(ch)
285
+		}
286
+		if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' {
287
+			break
288
+		}
289
+		ch = sw.scanner.Next()
290
+		name += string(ch)
291
+	}
292
+
293
+	return name
294
+}
295
+
296
+func (sw *shellWord) getEnv(name string) string {
297
+	for _, env := range sw.envs {
298
+		i := strings.Index(env, "=")
299
+		if i < 0 {
300
+			if name == env {
301
+				// Should probably never get here, but just in case treat
302
+				// it like "var" and "var=" are the same
303
+				return ""
304
+			}
305
+			continue
306
+		}
307
+		if name != env[:i] {
308
+			continue
309
+		}
310
+		return env[i+1:]
311
+	}
312
+	return ""
313
+}
0 314
new file mode 100644
... ...
@@ -0,0 +1 @@
0
+This package provides helper functions for dealing with signals across various operating systems
0 1
\ No newline at end of file
1 2
new file mode 100644
... ...
@@ -0,0 +1,37 @@
0
+// Package signal provides helper functions for dealing with signals across
1
+// various operating systems.
2
+package signal
3
+
4
+import (
5
+	"fmt"
6
+	"strconv"
7
+	"strings"
8
+	"syscall"
9
+)
10
+
11
+// ParseSignal translates a string to a valid syscall signal.
12
+// It returns an error if the signal map doesn't include the given signal.
13
+func ParseSignal(rawSignal string) (syscall.Signal, error) {
14
+	s, err := strconv.Atoi(rawSignal)
15
+	if err == nil {
16
+		if s == 0 {
17
+			return -1, fmt.Errorf("Invalid signal: %s", rawSignal)
18
+		}
19
+		return syscall.Signal(s), nil
20
+	}
21
+	signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
22
+	if !ok {
23
+		return -1, fmt.Errorf("Invalid signal: %s", rawSignal)
24
+	}
25
+	return signal, nil
26
+}
27
+
28
+// ValidSignalForPlatform returns true if a signal is valid on the platform
29
+func ValidSignalForPlatform(sig syscall.Signal) bool {
30
+	for _, v := range SignalMap {
31
+		if v == sig {
32
+			return true
33
+		}
34
+	}
35
+	return false
36
+}
0 37
new file mode 100644
... ...
@@ -0,0 +1,41 @@
0
+package signal
1
+
2
+import (
3
+	"syscall"
4
+)
5
+
6
+// SignalMap is a map of Darwin signals.
7
+var SignalMap = map[string]syscall.Signal{
8
+	"ABRT":   syscall.SIGABRT,
9
+	"ALRM":   syscall.SIGALRM,
10
+	"BUG":    syscall.SIGBUS,
11
+	"CHLD":   syscall.SIGCHLD,
12
+	"CONT":   syscall.SIGCONT,
13
+	"EMT":    syscall.SIGEMT,
14
+	"FPE":    syscall.SIGFPE,
15
+	"HUP":    syscall.SIGHUP,
16
+	"ILL":    syscall.SIGILL,
17
+	"INFO":   syscall.SIGINFO,
18
+	"INT":    syscall.SIGINT,
19
+	"IO":     syscall.SIGIO,
20
+	"IOT":    syscall.SIGIOT,
21
+	"KILL":   syscall.SIGKILL,
22
+	"PIPE":   syscall.SIGPIPE,
23
+	"PROF":   syscall.SIGPROF,
24
+	"QUIT":   syscall.SIGQUIT,
25
+	"SEGV":   syscall.SIGSEGV,
26
+	"STOP":   syscall.SIGSTOP,
27
+	"SYS":    syscall.SIGSYS,
28
+	"TERM":   syscall.SIGTERM,
29
+	"TRAP":   syscall.SIGTRAP,
30
+	"TSTP":   syscall.SIGTSTP,
31
+	"TTIN":   syscall.SIGTTIN,
32
+	"TTOU":   syscall.SIGTTOU,
33
+	"URG":    syscall.SIGURG,
34
+	"USR1":   syscall.SIGUSR1,
35
+	"USR2":   syscall.SIGUSR2,
36
+	"VTALRM": syscall.SIGVTALRM,
37
+	"WINCH":  syscall.SIGWINCH,
38
+	"XCPU":   syscall.SIGXCPU,
39
+	"XFSZ":   syscall.SIGXFSZ,
40
+}
0 41
new file mode 100644
... ...
@@ -0,0 +1,43 @@
0
+package signal
1
+
2
+import (
3
+	"syscall"
4
+)
5
+
6
+// SignalMap is a map of FreeBSD signals.
7
+var SignalMap = map[string]syscall.Signal{
8
+	"ABRT":   syscall.SIGABRT,
9
+	"ALRM":   syscall.SIGALRM,
10
+	"BUF":    syscall.SIGBUS,
11
+	"CHLD":   syscall.SIGCHLD,
12
+	"CONT":   syscall.SIGCONT,
13
+	"EMT":    syscall.SIGEMT,
14
+	"FPE":    syscall.SIGFPE,
15
+	"HUP":    syscall.SIGHUP,
16
+	"ILL":    syscall.SIGILL,
17
+	"INFO":   syscall.SIGINFO,
18
+	"INT":    syscall.SIGINT,
19
+	"IO":     syscall.SIGIO,
20
+	"IOT":    syscall.SIGIOT,
21
+	"KILL":   syscall.SIGKILL,
22
+	"LWP":    syscall.SIGLWP,
23
+	"PIPE":   syscall.SIGPIPE,
24
+	"PROF":   syscall.SIGPROF,
25
+	"QUIT":   syscall.SIGQUIT,
26
+	"SEGV":   syscall.SIGSEGV,
27
+	"STOP":   syscall.SIGSTOP,
28
+	"SYS":    syscall.SIGSYS,
29
+	"TERM":   syscall.SIGTERM,
30
+	"THR":    syscall.SIGTHR,
31
+	"TRAP":   syscall.SIGTRAP,
32
+	"TSTP":   syscall.SIGTSTP,
33
+	"TTIN":   syscall.SIGTTIN,
34
+	"TTOU":   syscall.SIGTTOU,
35
+	"URG":    syscall.SIGURG,
36
+	"USR1":   syscall.SIGUSR1,
37
+	"USR2":   syscall.SIGUSR2,
38
+	"VTALRM": syscall.SIGVTALRM,
39
+	"WINCH":  syscall.SIGWINCH,
40
+	"XCPU":   syscall.SIGXCPU,
41
+	"XFSZ":   syscall.SIGXFSZ,
42
+}
0 43
new file mode 100644
... ...
@@ -0,0 +1,80 @@
0
+package signal
1
+
2
+import (
3
+	"syscall"
4
+)
5
+
6
+const (
7
+	sigrtmin = 34
8
+	sigrtmax = 64
9
+)
10
+
11
+// SignalMap is a map of Linux signals.
12
+var SignalMap = map[string]syscall.Signal{
13
+	"ABRT":     syscall.SIGABRT,
14
+	"ALRM":     syscall.SIGALRM,
15
+	"BUS":      syscall.SIGBUS,
16
+	"CHLD":     syscall.SIGCHLD,
17
+	"CLD":      syscall.SIGCLD,
18
+	"CONT":     syscall.SIGCONT,
19
+	"FPE":      syscall.SIGFPE,
20
+	"HUP":      syscall.SIGHUP,
21
+	"ILL":      syscall.SIGILL,
22
+	"INT":      syscall.SIGINT,
23
+	"IO":       syscall.SIGIO,
24
+	"IOT":      syscall.SIGIOT,
25
+	"KILL":     syscall.SIGKILL,
26
+	"PIPE":     syscall.SIGPIPE,
27
+	"POLL":     syscall.SIGPOLL,
28
+	"PROF":     syscall.SIGPROF,
29
+	"PWR":      syscall.SIGPWR,
30
+	"QUIT":     syscall.SIGQUIT,
31
+	"SEGV":     syscall.SIGSEGV,
32
+	"STKFLT":   syscall.SIGSTKFLT,
33
+	"STOP":     syscall.SIGSTOP,
34
+	"SYS":      syscall.SIGSYS,
35
+	"TERM":     syscall.SIGTERM,
36
+	"TRAP":     syscall.SIGTRAP,
37
+	"TSTP":     syscall.SIGTSTP,
38
+	"TTIN":     syscall.SIGTTIN,
39
+	"TTOU":     syscall.SIGTTOU,
40
+	"UNUSED":   syscall.SIGUNUSED,
41
+	"URG":      syscall.SIGURG,
42
+	"USR1":     syscall.SIGUSR1,
43
+	"USR2":     syscall.SIGUSR2,
44
+	"VTALRM":   syscall.SIGVTALRM,
45
+	"WINCH":    syscall.SIGWINCH,
46
+	"XCPU":     syscall.SIGXCPU,
47
+	"XFSZ":     syscall.SIGXFSZ,
48
+	"RTMIN":    sigrtmin,
49
+	"RTMIN+1":  sigrtmin + 1,
50
+	"RTMIN+2":  sigrtmin + 2,
51
+	"RTMIN+3":  sigrtmin + 3,
52
+	"RTMIN+4":  sigrtmin + 4,
53
+	"RTMIN+5":  sigrtmin + 5,
54
+	"RTMIN+6":  sigrtmin + 6,
55
+	"RTMIN+7":  sigrtmin + 7,
56
+	"RTMIN+8":  sigrtmin + 8,
57
+	"RTMIN+9":  sigrtmin + 9,
58
+	"RTMIN+10": sigrtmin + 10,
59
+	"RTMIN+11": sigrtmin + 11,
60
+	"RTMIN+12": sigrtmin + 12,
61
+	"RTMIN+13": sigrtmin + 13,
62
+	"RTMIN+14": sigrtmin + 14,
63
+	"RTMIN+15": sigrtmin + 15,
64
+	"RTMAX-14": sigrtmax - 14,
65
+	"RTMAX-13": sigrtmax - 13,
66
+	"RTMAX-12": sigrtmax - 12,
67
+	"RTMAX-11": sigrtmax - 11,
68
+	"RTMAX-10": sigrtmax - 10,
69
+	"RTMAX-9":  sigrtmax - 9,
70
+	"RTMAX-8":  sigrtmax - 8,
71
+	"RTMAX-7":  sigrtmax - 7,
72
+	"RTMAX-6":  sigrtmax - 6,
73
+	"RTMAX-5":  sigrtmax - 5,
74
+	"RTMAX-4":  sigrtmax - 4,
75
+	"RTMAX-3":  sigrtmax - 3,
76
+	"RTMAX-2":  sigrtmax - 2,
77
+	"RTMAX-1":  sigrtmax - 1,
78
+	"RTMAX":    sigrtmax,
79
+}
0 80
new file mode 100644
... ...
@@ -0,0 +1,19 @@
0
+// +build !windows
1
+
2
+package signal
3
+
4
+import (
5
+	"syscall"
6
+)
7
+
8
+// Signals used in api/client (no windows equivalent, use
9
+// invalid signals so they don't get handled)
10
+
11
+const (
12
+	// SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted.
13
+	SIGCHLD = syscall.SIGCHLD
14
+	// SIGWINCH is a signal sent to a process when its controlling terminal changes its size
15
+	SIGWINCH = syscall.SIGWINCH
16
+	// DefaultStopSignal is the syscall signal used to stop a container in unix systems.
17
+	DefaultStopSignal = "SIGTERM"
18
+)
0 19
new file mode 100644
... ...
@@ -0,0 +1,10 @@
0
+// +build !linux,!darwin,!freebsd,!windows
1
+
2
+package signal
3
+
4
+import (
5
+	"syscall"
6
+)
7
+
8
+// SignalMap is an empty map of signals for unsupported platform.
9
+var SignalMap = map[string]syscall.Signal{}
0 10
new file mode 100644
... ...
@@ -0,0 +1,27 @@
0
+// +build windows
1
+
2
+package signal
3
+
4
+import (
5
+	"syscall"
6
+)
7
+
8
+// Signals used in api/client (no windows equivalent, use
9
+// invalid signals so they don't get handled)
10
+const (
11
+	SIGCHLD  = syscall.Signal(0xff)
12
+	SIGWINCH = syscall.Signal(0xff)
13
+	// DefaultStopSignal is the syscall signal used to stop a container in windows systems.
14
+	DefaultStopSignal = "15"
15
+)
16
+
17
+// SignalMap is a map of "supported" signals. As per the comment in GOLang's
18
+// ztypes_windows.go: "More invented values for signals". Windows doesn't
19
+// really support signals in any way, shape or form that Unix does.
20
+//
21
+// We have these so that docker kill can be used to gracefully (TERM) and
22
+// forcibly (KILL) terminate a container on Windows.
23
+var SignalMap = map[string]syscall.Signal{
24
+	"KILL": syscall.SIGKILL,
25
+	"TERM": syscall.SIGTERM,
26
+}
0 27
new file mode 100644
... ...
@@ -0,0 +1,30 @@
0
+package strslice
1
+
2
+import "encoding/json"
3
+
4
+// StrSlice represents a string or an array of strings.
5
+// We need to override the json decoder to accept both options.
6
+type StrSlice []string
7
+
8
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
9
+// strings. This method is needed to implement json.Unmarshaler.
10
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
11
+	if len(b) == 0 {
12
+		// With no input, we preserve the existing value by returning nil and
13
+		// leaving the target alone. This allows defining default values for
14
+		// the type.
15
+		return nil
16
+	}
17
+
18
+	p := make([]string, 0, 1)
19
+	if err := json.Unmarshal(b, &p); err != nil {
20
+		var s string
21
+		if err := json.Unmarshal(b, &s); err != nil {
22
+			return err
23
+		}
24
+		p = append(p, s)
25
+	}
26
+
27
+	*e = p
28
+	return nil
29
+}