Docker-DCO-1.1-Signed-off-by: Erik Hollensbe <github@hollensbe.org> (github: erikh)
| 3 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,31 +0,0 @@ |
| 1 |
-package main |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "os" |
|
| 5 |
- |
|
| 6 |
- "github.com/erikh/buildfile/evaluator" |
|
| 7 |
-) |
|
| 8 |
- |
|
| 9 |
-func main() {
|
|
| 10 |
- if len(os.Args) < 2 {
|
|
| 11 |
- os.Stderr.WriteString("Please supply filename(s) to evaluate")
|
|
| 12 |
- os.Exit(1) |
|
| 13 |
- } |
|
| 14 |
- |
|
| 15 |
- for _, fn := range os.Args[1:] {
|
|
| 16 |
- f, err := os.Open(fn) |
|
| 17 |
- if err != nil {
|
|
| 18 |
- panic(err) |
|
| 19 |
- } |
|
| 20 |
- |
|
| 21 |
- opts := &evaluator.BuildOpts{}
|
|
| 22 |
- |
|
| 23 |
- bf, err := opts.NewBuildFile(f) |
|
| 24 |
- if err != nil {
|
|
| 25 |
- panic(err) |
|
| 26 |
- } |
|
| 27 |
- if err := bf.Run(); err != nil {
|
|
| 28 |
- panic(err) |
|
| 29 |
- } |
|
| 30 |
- } |
|
| 31 |
-} |
| ... | ... |
@@ -2,10 +2,20 @@ package evaluator |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"fmt" |
| 5 |
+ "path/filepath" |
|
| 5 | 6 |
"strings" |
| 7 |
+ |
|
| 8 |
+ "github.com/docker/docker/nat" |
|
| 9 |
+ "github.com/docker/docker/runconfig" |
|
| 10 |
+ "github.com/docker/docker/utils" |
|
| 6 | 11 |
) |
| 7 | 12 |
|
| 8 |
-func env(b *buildFile, args ...string) error {
|
|
| 13 |
+// dispatch with no layer / parsing. |
|
| 14 |
+func nullDispatch(b *buildFile, args []string) error {
|
|
| 15 |
+ return nil |
|
| 16 |
+} |
|
| 17 |
+ |
|
| 18 |
+func env(b *buildFile, args []string) error {
|
|
| 9 | 19 |
if len(args) != 2 {
|
| 10 | 20 |
return fmt.Errorf("ENV accepts two arguments")
|
| 11 | 21 |
} |
| ... | ... |
@@ -14,12 +24,12 @@ func env(b *buildFile, args ...string) error {
|
| 14 | 14 |
// handling. This routine gets much shorter with the denormalization here. |
| 15 | 15 |
key := args[0] |
| 16 | 16 |
b.env[key] = args[1] |
| 17 |
- b.config.Env = append(b.config.Env, strings.Join("=", key, b.env[key]))
|
|
| 17 |
+ b.config.Env = append(b.config.Env, strings.Join([]string{key, b.env[key]}, "="))
|
|
| 18 | 18 |
|
| 19 |
- return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", value))
|
|
| 19 |
+ return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s=%s", key, b.env[key]))
|
|
| 20 | 20 |
} |
| 21 | 21 |
|
| 22 |
-func maintainer(b *buildFile, args ...string) error {
|
|
| 22 |
+func maintainer(b *buildFile, args []string) error {
|
|
| 23 | 23 |
if len(args) != 1 {
|
| 24 | 24 |
return fmt.Errorf("MAINTAINER requires only one argument")
|
| 25 | 25 |
} |
| ... | ... |
@@ -28,7 +38,7 @@ func maintainer(b *buildFile, args ...string) error {
|
| 28 | 28 |
return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
|
| 29 | 29 |
} |
| 30 | 30 |
|
| 31 |
-func add(b *buildFile, args ...string) error {
|
|
| 31 |
+func add(b *buildFile, args []string) error {
|
|
| 32 | 32 |
if len(args) != 2 {
|
| 33 | 33 |
return fmt.Errorf("ADD requires two arguments")
|
| 34 | 34 |
} |
| ... | ... |
@@ -36,10 +46,199 @@ func add(b *buildFile, args ...string) error {
|
| 36 | 36 |
return b.runContextCommand(args, true, true, "ADD") |
| 37 | 37 |
} |
| 38 | 38 |
|
| 39 |
-func dispatchCopy(b *buildFile, args ...string) error {
|
|
| 39 |
+func dispatchCopy(b *buildFile, args []string) error {
|
|
| 40 | 40 |
if len(args) != 2 {
|
| 41 | 41 |
return fmt.Errorf("COPY requires two arguments")
|
| 42 | 42 |
} |
| 43 | 43 |
|
| 44 | 44 |
return b.runContextCommand(args, false, false, "COPY") |
| 45 | 45 |
} |
| 46 |
+ |
|
| 47 |
+func from(b *buildFile, args []string) error {
|
|
| 48 |
+ if len(args) != 1 {
|
|
| 49 |
+ return fmt.Errorf("FROM requires one argument")
|
|
| 50 |
+ } |
|
| 51 |
+ |
|
| 52 |
+ name := args[0] |
|
| 53 |
+ |
|
| 54 |
+ image, err := b.options.Daemon.Repositories().LookupImage(name) |
|
| 55 |
+ if err != nil {
|
|
| 56 |
+ if b.options.Daemon.Graph().IsNotExist(err) {
|
|
| 57 |
+ image, err = b.pullImage(name) |
|
| 58 |
+ } |
|
| 59 |
+ |
|
| 60 |
+ // note that the top level err will still be !nil here if IsNotExist is |
|
| 61 |
+ // not the error. This approach just simplifies hte logic a bit. |
|
| 62 |
+ if err != nil {
|
|
| 63 |
+ return err |
|
| 64 |
+ } |
|
| 65 |
+ } |
|
| 66 |
+ |
|
| 67 |
+ return b.processImageFrom(image) |
|
| 68 |
+} |
|
| 69 |
+ |
|
| 70 |
+func onbuild(b *buildFile, args []string) error {
|
|
| 71 |
+ triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) |
|
| 72 |
+ switch triggerInstruction {
|
|
| 73 |
+ case "ONBUILD": |
|
| 74 |
+ return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
|
| 75 |
+ case "MAINTAINER", "FROM": |
|
| 76 |
+ return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
|
|
| 77 |
+ } |
|
| 78 |
+ |
|
| 79 |
+ trigger := strings.Join(args, " ") |
|
| 80 |
+ |
|
| 81 |
+ b.config.OnBuild = append(b.config.OnBuild, trigger) |
|
| 82 |
+ return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
|
|
| 83 |
+} |
|
| 84 |
+ |
|
| 85 |
+func workdir(b *buildFile, args []string) error {
|
|
| 86 |
+ if len(args) != 1 {
|
|
| 87 |
+ return fmt.Errorf("WORKDIR requires exactly one argument")
|
|
| 88 |
+ } |
|
| 89 |
+ |
|
| 90 |
+ workdir := args[0] |
|
| 91 |
+ |
|
| 92 |
+ if workdir[0] == '/' {
|
|
| 93 |
+ b.config.WorkingDir = workdir |
|
| 94 |
+ } else {
|
|
| 95 |
+ if b.config.WorkingDir == "" {
|
|
| 96 |
+ b.config.WorkingDir = "/" |
|
| 97 |
+ } |
|
| 98 |
+ b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir) |
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 101 |
+ return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
|
| 102 |
+} |
|
| 103 |
+ |
|
| 104 |
+func run(b *buildFile, args []string) error {
|
|
| 105 |
+ if len(args) == 1 { // literal string command, not an exec array
|
|
| 106 |
+ args = append([]string{"/bin/sh", "-c"}, args[0])
|
|
| 107 |
+ } |
|
| 108 |
+ |
|
| 109 |
+ if b.image == "" {
|
|
| 110 |
+ return fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
| 111 |
+ } |
|
| 112 |
+ |
|
| 113 |
+ config, _, _, err := runconfig.Parse(append([]string{b.image}, args...), nil)
|
|
| 114 |
+ if err != nil {
|
|
| 115 |
+ return err |
|
| 116 |
+ } |
|
| 117 |
+ |
|
| 118 |
+ cmd := b.config.Cmd |
|
| 119 |
+ // set Cmd manually, this is special case only for Dockerfiles |
|
| 120 |
+ b.config.Cmd = config.Cmd |
|
| 121 |
+ runconfig.Merge(b.config, config) |
|
| 122 |
+ |
|
| 123 |
+ defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
|
|
| 124 |
+ |
|
| 125 |
+ utils.Debugf("Command to be executed: %v", b.config.Cmd)
|
|
| 126 |
+ |
|
| 127 |
+ hit, err := b.probeCache() |
|
| 128 |
+ if err != nil {
|
|
| 129 |
+ return err |
|
| 130 |
+ } |
|
| 131 |
+ if hit {
|
|
| 132 |
+ return nil |
|
| 133 |
+ } |
|
| 134 |
+ |
|
| 135 |
+ c, err := b.create() |
|
| 136 |
+ if err != nil {
|
|
| 137 |
+ return err |
|
| 138 |
+ } |
|
| 139 |
+ // Ensure that we keep the container mounted until the commit |
|
| 140 |
+ // to avoid unmounting and then mounting directly again |
|
| 141 |
+ c.Mount() |
|
| 142 |
+ defer c.Unmount() |
|
| 143 |
+ |
|
| 144 |
+ err = b.run(c) |
|
| 145 |
+ if err != nil {
|
|
| 146 |
+ return err |
|
| 147 |
+ } |
|
| 148 |
+ if err := b.commit(c.ID, cmd, "run"); err != nil {
|
|
| 149 |
+ return err |
|
| 150 |
+ } |
|
| 151 |
+ |
|
| 152 |
+ return nil |
|
| 153 |
+} |
|
| 154 |
+ |
|
| 155 |
+func cmd(b *buildFile, args []string) error {
|
|
| 156 |
+ if len(args) < 2 {
|
|
| 157 |
+ args = append([]string{"/bin/sh", "-c"}, args...)
|
|
| 158 |
+ } |
|
| 159 |
+ |
|
| 160 |
+ b.config.Cmd = args |
|
| 161 |
+ if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
|
| 162 |
+ return err |
|
| 163 |
+ } |
|
| 164 |
+ |
|
| 165 |
+ b.cmdSet = true |
|
| 166 |
+ return nil |
|
| 167 |
+} |
|
| 168 |
+ |
|
| 169 |
+func entrypoint(b *buildFile, args []string) error {
|
|
| 170 |
+ b.config.Entrypoint = args |
|
| 171 |
+ |
|
| 172 |
+ // if there is no cmd in current Dockerfile - cleanup cmd |
|
| 173 |
+ if !b.cmdSet {
|
|
| 174 |
+ b.config.Cmd = nil |
|
| 175 |
+ } |
|
| 176 |
+ if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
|
|
| 177 |
+ return err |
|
| 178 |
+ } |
|
| 179 |
+ return nil |
|
| 180 |
+} |
|
| 181 |
+ |
|
| 182 |
+func expose(b *buildFile, args []string) error {
|
|
| 183 |
+ portsTab := args |
|
| 184 |
+ |
|
| 185 |
+ if b.config.ExposedPorts == nil {
|
|
| 186 |
+ b.config.ExposedPorts = make(nat.PortSet) |
|
| 187 |
+ } |
|
| 188 |
+ |
|
| 189 |
+ ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...)) |
|
| 190 |
+ if err != nil {
|
|
| 191 |
+ return err |
|
| 192 |
+ } |
|
| 193 |
+ |
|
| 194 |
+ for port := range ports {
|
|
| 195 |
+ if _, exists := b.config.ExposedPorts[port]; !exists {
|
|
| 196 |
+ b.config.ExposedPorts[port] = struct{}{}
|
|
| 197 |
+ } |
|
| 198 |
+ } |
|
| 199 |
+ b.config.PortSpecs = nil |
|
| 200 |
+ |
|
| 201 |
+ return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
|
|
| 202 |
+} |
|
| 203 |
+ |
|
| 204 |
+func user(b *buildFile, args []string) error {
|
|
| 205 |
+ if len(args) != 1 {
|
|
| 206 |
+ return fmt.Errorf("USER requires exactly one argument")
|
|
| 207 |
+ } |
|
| 208 |
+ |
|
| 209 |
+ b.config.User = args[0] |
|
| 210 |
+ return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
|
|
| 211 |
+} |
|
| 212 |
+ |
|
| 213 |
+func volume(b *buildFile, args []string) error {
|
|
| 214 |
+ if len(args) != 1 {
|
|
| 215 |
+ return fmt.Errorf("Volume cannot be empty")
|
|
| 216 |
+ } |
|
| 217 |
+ |
|
| 218 |
+ volume := args |
|
| 219 |
+ |
|
| 220 |
+ if b.config.Volumes == nil {
|
|
| 221 |
+ b.config.Volumes = map[string]struct{}{}
|
|
| 222 |
+ } |
|
| 223 |
+ for _, v := range volume {
|
|
| 224 |
+ b.config.Volumes[v] = struct{}{}
|
|
| 225 |
+ } |
|
| 226 |
+ if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
|
|
| 227 |
+ return err |
|
| 228 |
+ } |
|
| 229 |
+ return nil |
|
| 230 |
+} |
|
| 231 |
+ |
|
| 232 |
+func insert(b *buildFile, args []string) error {
|
|
| 233 |
+ return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
|
|
| 234 |
+} |
| ... | ... |
@@ -1,38 +1,54 @@ |
| 1 | 1 |
package evaluator |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 |
+ "bytes" |
|
| 5 |
+ "errors" |
|
| 4 | 6 |
"fmt" |
| 5 | 7 |
"io" |
| 6 |
- "regexp" |
|
| 8 |
+ "io/ioutil" |
|
| 9 |
+ "os" |
|
| 10 |
+ "path" |
|
| 7 | 11 |
"strings" |
| 8 | 12 |
|
| 9 |
- "github.com/erikh/buildfile/parser" |
|
| 10 |
- |
|
| 13 |
+ "github.com/docker/docker/builder/parser" |
|
| 11 | 14 |
"github.com/docker/docker/daemon" |
| 12 | 15 |
"github.com/docker/docker/engine" |
| 13 | 16 |
"github.com/docker/docker/nat" |
| 17 |
+ "github.com/docker/docker/pkg/tarsum" |
|
| 14 | 18 |
"github.com/docker/docker/registry" |
| 15 | 19 |
"github.com/docker/docker/runconfig" |
| 16 | 20 |
"github.com/docker/docker/utils" |
| 17 | 21 |
) |
| 18 | 22 |
|
| 19 | 23 |
var ( |
| 20 |
- evaluateTable = map[string]func(*buildFile, ...string) error{
|
|
| 21 |
- "env": env, |
|
| 22 |
- "maintainer": maintainer, |
|
| 23 |
- "add": add, |
|
| 24 |
- "copy": dispatchCopy, // copy() is a go builtin |
|
| 25 |
- //"onbuild": parseMaybeJSON, |
|
| 26 |
- //"workdir": parseString, |
|
| 27 |
- //"docker-version": parseString, |
|
| 28 |
- //"run": parseMaybeJSON, |
|
| 29 |
- //"cmd": parseMaybeJSON, |
|
| 30 |
- //"entrypoint": parseMaybeJSON, |
|
| 31 |
- //"expose": parseMaybeJSON, |
|
| 32 |
- //"volume": parseMaybeJSON, |
|
| 33 |
- } |
|
| 24 |
+ ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
|
|
| 34 | 25 |
) |
| 35 | 26 |
|
| 27 |
+var evaluateTable map[string]func(*buildFile, []string) error |
|
| 28 |
+ |
|
| 29 |
+func init() {
|
|
| 30 |
+ evaluateTable = map[string]func(*buildFile, []string) error{
|
|
| 31 |
+ "env": env, |
|
| 32 |
+ "maintainer": maintainer, |
|
| 33 |
+ "add": add, |
|
| 34 |
+ "copy": dispatchCopy, // copy() is a go builtin |
|
| 35 |
+ "from": from, |
|
| 36 |
+ "onbuild": onbuild, |
|
| 37 |
+ "workdir": workdir, |
|
| 38 |
+ "docker-version": nullDispatch, // we don't care about docker-version |
|
| 39 |
+ "run": run, |
|
| 40 |
+ "cmd": cmd, |
|
| 41 |
+ "entrypoint": entrypoint, |
|
| 42 |
+ "expose": expose, |
|
| 43 |
+ "volume": volume, |
|
| 44 |
+ "user": user, |
|
| 45 |
+ "insert": insert, |
|
| 46 |
+ } |
|
| 47 |
+} |
|
| 48 |
+ |
|
| 49 |
+type envMap map[string]string |
|
| 50 |
+type uniqueMap map[string]struct{}
|
|
| 51 |
+ |
|
| 36 | 52 |
type buildFile struct {
|
| 37 | 53 |
dockerfile *parser.Node |
| 38 | 54 |
env envMap |
| ... | ... |
@@ -40,48 +56,86 @@ type buildFile struct {
|
| 40 | 40 |
config *runconfig.Config |
| 41 | 41 |
options *BuildOpts |
| 42 | 42 |
maintainer string |
| 43 |
+ |
|
| 44 |
+ // cmdSet indicates is CMD was set in current Dockerfile |
|
| 45 |
+ cmdSet bool |
|
| 46 |
+ |
|
| 47 |
+ context *tarsum.TarSum |
|
| 48 |
+ contextPath string |
|
| 49 |
+ tmpContainers uniqueMap |
|
| 50 |
+ tmpImages uniqueMap |
|
| 43 | 51 |
} |
| 44 | 52 |
|
| 45 | 53 |
type BuildOpts struct {
|
| 46 |
- Daemon *daemon.Daemon |
|
| 47 |
- Engine *engine.Engine |
|
| 48 |
- OutStream io.Writer |
|
| 49 |
- ErrStream io.Writer |
|
| 50 |
- Verbose bool |
|
| 51 |
- UtilizeCache bool |
|
| 52 |
- Remove bool |
|
| 53 |
- ForceRm bool |
|
| 54 |
+ Daemon *daemon.Daemon |
|
| 55 |
+ Engine *engine.Engine |
|
| 56 |
+ OutStream io.Writer |
|
| 57 |
+ ErrStream io.Writer |
|
| 58 |
+ Verbose bool |
|
| 59 |
+ UtilizeCache bool |
|
| 60 |
+ Remove bool |
|
| 61 |
+ ForceRemove bool |
|
| 62 |
+ AuthConfig *registry.AuthConfig |
|
| 63 |
+ AuthConfigFile *registry.ConfigFile |
|
| 64 |
+ |
|
| 65 |
+ // Deprecated, original writer used for ImagePull. To be removed. |
|
| 54 | 66 |
OutOld io.Writer |
| 55 | 67 |
StreamFormatter *utils.StreamFormatter |
| 56 |
- Auth *registry.AuthConfig |
|
| 57 |
- AuthConfigFile *registry.ConfigFile |
|
| 58 | 68 |
} |
| 59 | 69 |
|
| 60 |
-func NewBuildFile(file io.ReadWriteCloser, opts *BuildOpts) (*buildFile, error) {
|
|
| 61 |
- defer file.Close() |
|
| 62 |
- ast, err := parser.Parse(file) |
|
| 63 |
- if err != nil {
|
|
| 64 |
- return nil, err |
|
| 65 |
- } |
|
| 66 |
- |
|
| 70 |
+func NewBuilder(opts *BuildOpts) (*buildFile, error) {
|
|
| 67 | 71 |
return &buildFile{
|
| 68 |
- dockerfile: ast, |
|
| 69 |
- env: envMap{},
|
|
| 70 |
- config: initRunConfig(), |
|
| 71 |
- options: opts, |
|
| 72 |
+ dockerfile: nil, |
|
| 73 |
+ env: envMap{},
|
|
| 74 |
+ config: initRunConfig(), |
|
| 75 |
+ options: opts, |
|
| 76 |
+ tmpContainers: make(uniqueMap), |
|
| 77 |
+ tmpImages: make(uniqueMap), |
|
| 72 | 78 |
}, nil |
| 73 | 79 |
} |
| 74 | 80 |
|
| 75 |
-func (b *buildFile) Run() error {
|
|
| 76 |
- node := b.dockerfile |
|
| 81 |
+func (b *buildFile) Run(context io.Reader) (string, error) {
|
|
| 82 |
+ err := b.readContext(context) |
|
| 83 |
+ |
|
| 84 |
+ if err != nil {
|
|
| 85 |
+ return "", err |
|
| 86 |
+ } |
|
| 87 |
+ |
|
| 88 |
+ filename := path.Join(b.contextPath, "Dockerfile") |
|
| 89 |
+ if _, err := os.Stat(filename); os.IsNotExist(err) {
|
|
| 90 |
+ return "", fmt.Errorf("Cannot build a directory without a Dockerfile")
|
|
| 91 |
+ } |
|
| 92 |
+ fileBytes, err := ioutil.ReadFile(filename) |
|
| 93 |
+ if err != nil {
|
|
| 94 |
+ return "", err |
|
| 95 |
+ } |
|
| 96 |
+ if len(fileBytes) == 0 {
|
|
| 97 |
+ return "", ErrDockerfileEmpty |
|
| 98 |
+ } |
|
| 99 |
+ ast, err := parser.Parse(bytes.NewReader(fileBytes)) |
|
| 100 |
+ if err != nil {
|
|
| 101 |
+ return "", err |
|
| 102 |
+ } |
|
| 103 |
+ |
|
| 104 |
+ b.dockerfile = ast |
|
| 77 | 105 |
|
| 78 |
- for i, n := range node.Children {
|
|
| 106 |
+ for i, n := range b.dockerfile.Children {
|
|
| 79 | 107 |
if err := b.dispatch(i, n); err != nil {
|
| 80 |
- return err |
|
| 108 |
+ if b.options.ForceRemove {
|
|
| 109 |
+ b.clearTmp(b.tmpContainers) |
|
| 110 |
+ } |
|
| 111 |
+ return "", err |
|
| 112 |
+ } else if b.options.Remove {
|
|
| 113 |
+ b.clearTmp(b.tmpContainers) |
|
| 81 | 114 |
} |
| 82 | 115 |
} |
| 83 | 116 |
|
| 84 |
- return nil |
|
| 117 |
+ if b.image == "" {
|
|
| 118 |
+ return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
|
|
| 119 |
+ } |
|
| 120 |
+ |
|
| 121 |
+ fmt.Fprintf(b.options.OutStream, "Successfully built %s\n", utils.TruncateID(b.image)) |
|
| 122 |
+ return b.image, nil |
|
| 85 | 123 |
} |
| 86 | 124 |
|
| 87 | 125 |
func initRunConfig() *runconfig.Config {
|
| ... | ... |
@@ -94,7 +148,7 @@ func initRunConfig() *runconfig.Config {
|
| 94 | 94 |
|
| 95 | 95 |
// FIXME(erikh) this should also be a type in runconfig |
| 96 | 96 |
Volumes: map[string]struct{}{},
|
| 97 |
- Entrypoint: []string{},
|
|
| 97 |
+ Entrypoint: []string{"/bin/sh", "-c"},
|
|
| 98 | 98 |
OnBuild: []string{},
|
| 99 | 99 |
} |
| 100 | 100 |
} |
| ... | ... |
@@ -102,17 +156,24 @@ func initRunConfig() *runconfig.Config {
|
| 102 | 102 |
func (b *buildFile) dispatch(stepN int, ast *parser.Node) error {
|
| 103 | 103 |
cmd := ast.Value |
| 104 | 104 |
strs := []string{}
|
| 105 |
+ |
|
| 106 |
+ if cmd == "onbuild" {
|
|
| 107 |
+ fmt.Fprintf(b.options.OutStream, "%#v\n", ast.Next.Children[0].Value) |
|
| 108 |
+ ast = ast.Next.Children[0] |
|
| 109 |
+ strs = append(strs, ast.Value) |
|
| 110 |
+ } |
|
| 111 |
+ |
|
| 105 | 112 |
for ast.Next != nil {
|
| 106 | 113 |
ast = ast.Next |
| 107 |
- strs = append(strs, replaceEnv(b, stripQuotes(ast.Value))) |
|
| 114 |
+ strs = append(strs, replaceEnv(b, ast.Value)) |
|
| 108 | 115 |
} |
| 109 | 116 |
|
| 110 |
- fmt.Fprintf(b.outStream, "Step %d : %s\n", i, cmd, expression) |
|
| 117 |
+ fmt.Fprintf(b.options.OutStream, "Step %d : %s %s\n", stepN, strings.ToUpper(cmd), strings.Join(strs, " ")) |
|
| 111 | 118 |
|
| 112 | 119 |
// XXX yes, we skip any cmds that are not valid; the parser should have |
| 113 | 120 |
// picked these out already. |
| 114 | 121 |
if f, ok := evaluateTable[cmd]; ok {
|
| 115 |
- return f(b, strs...) |
|
| 122 |
+ return f(b, strs) |
|
| 116 | 123 |
} |
| 117 | 124 |
|
| 118 | 125 |
return nil |
| ... | ... |
@@ -1,6 +1,33 @@ |
| 1 | 1 |
package evaluator |
| 2 | 2 |
|
| 3 |
-func (b *buildFile) addContext(context io.Reader) (string, error) {
|
|
| 3 |
+import ( |
|
| 4 |
+ "crypto/sha256" |
|
| 5 |
+ "encoding/hex" |
|
| 6 |
+ "fmt" |
|
| 7 |
+ "io" |
|
| 8 |
+ "io/ioutil" |
|
| 9 |
+ "net/url" |
|
| 10 |
+ "os" |
|
| 11 |
+ "path" |
|
| 12 |
+ "path/filepath" |
|
| 13 |
+ "sort" |
|
| 14 |
+ "strings" |
|
| 15 |
+ "syscall" |
|
| 16 |
+ "time" |
|
| 17 |
+ |
|
| 18 |
+ "github.com/docker/docker/archive" |
|
| 19 |
+ "github.com/docker/docker/daemon" |
|
| 20 |
+ imagepkg "github.com/docker/docker/image" |
|
| 21 |
+ "github.com/docker/docker/pkg/parsers" |
|
| 22 |
+ "github.com/docker/docker/pkg/symlink" |
|
| 23 |
+ "github.com/docker/docker/pkg/system" |
|
| 24 |
+ "github.com/docker/docker/pkg/tarsum" |
|
| 25 |
+ "github.com/docker/docker/registry" |
|
| 26 |
+ "github.com/docker/docker/runconfig" |
|
| 27 |
+ "github.com/docker/docker/utils" |
|
| 28 |
+) |
|
| 29 |
+ |
|
| 30 |
+func (b *buildFile) readContext(context io.Reader) error {
|
|
| 4 | 31 |
tmpdirPath, err := ioutil.TempDir("", "docker-build")
|
| 5 | 32 |
if err != nil {
|
| 6 | 33 |
return err |
| ... | ... |
@@ -17,7 +44,7 @@ func (b *buildFile) addContext(context io.Reader) (string, error) {
|
| 17 | 17 |
} |
| 18 | 18 |
|
| 19 | 19 |
b.contextPath = tmpdirPath |
| 20 |
- return tmpdirPath |
|
| 20 |
+ return nil |
|
| 21 | 21 |
} |
| 22 | 22 |
|
| 23 | 23 |
func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
| ... | ... |
@@ -38,15 +65,15 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
| 38 | 38 |
return nil |
| 39 | 39 |
} |
| 40 | 40 |
|
| 41 |
- container, warnings, err := b.daemon.Create(b.config, "") |
|
| 41 |
+ container, warnings, err := b.options.Daemon.Create(b.config, "") |
|
| 42 | 42 |
if err != nil {
|
| 43 | 43 |
return err |
| 44 | 44 |
} |
| 45 | 45 |
for _, warning := range warnings {
|
| 46 |
- fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning) |
|
| 46 |
+ fmt.Fprintf(b.options.OutStream, " ---> [Warning] %s\n", warning) |
|
| 47 | 47 |
} |
| 48 | 48 |
b.tmpContainers[container.ID] = struct{}{}
|
| 49 |
- fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) |
|
| 49 |
+ fmt.Fprintf(b.options.OutStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) |
|
| 50 | 50 |
id = container.ID |
| 51 | 51 |
|
| 52 | 52 |
if err := container.Mount(); err != nil {
|
| ... | ... |
@@ -54,7 +81,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
| 54 | 54 |
} |
| 55 | 55 |
defer container.Unmount() |
| 56 | 56 |
} |
| 57 |
- container := b.daemon.Get(id) |
|
| 57 |
+ container := b.options.Daemon.Get(id) |
|
| 58 | 58 |
if container == nil {
|
| 59 | 59 |
return fmt.Errorf("An error occured while creating the container")
|
| 60 | 60 |
} |
| ... | ... |
@@ -63,7 +90,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
| 63 | 63 |
autoConfig := *b.config |
| 64 | 64 |
autoConfig.Cmd = autoCmd |
| 65 | 65 |
// Commit the container |
| 66 |
- image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) |
|
| 66 |
+ image, err := b.options.Daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig) |
|
| 67 | 67 |
if err != nil {
|
| 68 | 68 |
return err |
| 69 | 69 |
} |
| ... | ... |
@@ -72,24 +99,17 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
| 72 | 72 |
return nil |
| 73 | 73 |
} |
| 74 | 74 |
|
| 75 |
-func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error {
|
|
| 75 |
+func (b *buildFile) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
|
|
| 76 | 76 |
if b.context == nil {
|
| 77 | 77 |
return fmt.Errorf("No context given. Impossible to use %s", cmdName)
|
| 78 | 78 |
} |
| 79 |
- tmp := strings.SplitN(args, " ", 2) |
|
| 80 |
- if len(tmp) != 2 {
|
|
| 81 |
- return fmt.Errorf("Invalid %s format", cmdName)
|
|
| 82 |
- } |
|
| 83 | 79 |
|
| 84 |
- orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) |
|
| 85 |
- if err != nil {
|
|
| 86 |
- return err |
|
| 80 |
+ if len(args) != 2 {
|
|
| 81 |
+ return fmt.Errorf("Invalid %s format", cmdName)
|
|
| 87 | 82 |
} |
| 88 | 83 |
|
| 89 |
- dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) |
|
| 90 |
- if err != nil {
|
|
| 91 |
- return err |
|
| 92 |
- } |
|
| 84 |
+ orig := args[0] |
|
| 85 |
+ dest := args[1] |
|
| 93 | 86 |
|
| 94 | 87 |
cmd := b.config.Cmd |
| 95 | 88 |
b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
|
| ... | ... |
@@ -178,7 +198,7 @@ func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecomp |
| 178 | 178 |
} |
| 179 | 179 |
|
| 180 | 180 |
// Hash path and check the cache |
| 181 |
- if b.utilizeCache {
|
|
| 181 |
+ if b.options.UtilizeCache {
|
|
| 182 | 182 |
var ( |
| 183 | 183 |
hash string |
| 184 | 184 |
sums = b.context.GetSums() |
| ... | ... |
@@ -222,7 +242,7 @@ func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecomp |
| 222 | 222 |
} |
| 223 | 223 |
|
| 224 | 224 |
// Create the container |
| 225 |
- container, _, err := b.daemon.Create(b.config, "") |
|
| 225 |
+ container, _, err := b.options.Daemon.Create(b.config, "") |
|
| 226 | 226 |
if err != nil {
|
| 227 | 227 |
return err |
| 228 | 228 |
} |
| ... | ... |
@@ -245,3 +265,295 @@ func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecomp |
| 245 | 245 |
} |
| 246 | 246 |
return nil |
| 247 | 247 |
} |
| 248 |
+ |
|
| 249 |
+func (b *buildFile) pullImage(name string) (*imagepkg.Image, error) {
|
|
| 250 |
+ remote, tag := parsers.ParseRepositoryTag(name) |
|
| 251 |
+ pullRegistryAuth := b.options.AuthConfig |
|
| 252 |
+ if len(b.options.AuthConfigFile.Configs) > 0 {
|
|
| 253 |
+ // The request came with a full auth config file, we prefer to use that |
|
| 254 |
+ endpoint, _, err := registry.ResolveRepositoryName(remote) |
|
| 255 |
+ if err != nil {
|
|
| 256 |
+ return nil, err |
|
| 257 |
+ } |
|
| 258 |
+ resolvedAuth := b.options.AuthConfigFile.ResolveAuthConfig(endpoint) |
|
| 259 |
+ pullRegistryAuth = &resolvedAuth |
|
| 260 |
+ } |
|
| 261 |
+ job := b.options.Engine.Job("pull", remote, tag)
|
|
| 262 |
+ job.SetenvBool("json", b.options.StreamFormatter.Json())
|
|
| 263 |
+ job.SetenvBool("parallel", true)
|
|
| 264 |
+ job.SetenvJson("authConfig", pullRegistryAuth)
|
|
| 265 |
+ job.Stdout.Add(b.options.OutOld) |
|
| 266 |
+ if err := job.Run(); err != nil {
|
|
| 267 |
+ return nil, err |
|
| 268 |
+ } |
|
| 269 |
+ image, err := b.options.Daemon.Repositories().LookupImage(name) |
|
| 270 |
+ if err != nil {
|
|
| 271 |
+ return nil, err |
|
| 272 |
+ } |
|
| 273 |
+ |
|
| 274 |
+ return image, nil |
|
| 275 |
+} |
|
| 276 |
+ |
|
| 277 |
+func (b *buildFile) processImageFrom(img *imagepkg.Image) error {
|
|
| 278 |
+ b.image = img.ID |
|
| 279 |
+ b.config = &runconfig.Config{}
|
|
| 280 |
+ if img.Config != nil {
|
|
| 281 |
+ b.config = img.Config |
|
| 282 |
+ } |
|
| 283 |
+ if b.config.Env == nil || len(b.config.Env) == 0 {
|
|
| 284 |
+ b.config.Env = append(b.config.Env, "PATH="+daemon.DefaultPathEnv) |
|
| 285 |
+ } |
|
| 286 |
+ // Process ONBUILD triggers if they exist |
|
| 287 |
+ if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
|
|
| 288 |
+ fmt.Fprintf(b.options.ErrStream, "# Executing %d build triggers\n", nTriggers) |
|
| 289 |
+ } |
|
| 290 |
+ |
|
| 291 |
+ // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited. |
|
| 292 |
+ onBuildTriggers := b.config.OnBuild |
|
| 293 |
+ b.config.OnBuild = []string{}
|
|
| 294 |
+ |
|
| 295 |
+ // FIXME rewrite this so that builder/parser is used; right now steps in |
|
| 296 |
+ // onbuild are muted because we have no good way to represent the step |
|
| 297 |
+ // number |
|
| 298 |
+ for _, step := range onBuildTriggers {
|
|
| 299 |
+ splitStep := strings.Split(step, " ") |
|
| 300 |
+ stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " ")) |
|
| 301 |
+ switch stepInstruction {
|
|
| 302 |
+ case "ONBUILD": |
|
| 303 |
+ return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
|
|
| 304 |
+ case "MAINTAINER", "FROM": |
|
| 305 |
+ return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
|
|
| 306 |
+ } |
|
| 307 |
+ |
|
| 308 |
+ // FIXME we have to run the evaluator manually here. This does not belong |
|
| 309 |
+ // in this function. |
|
| 310 |
+ |
|
| 311 |
+ if f, ok := evaluateTable[strings.ToLower(stepInstruction)]; ok {
|
|
| 312 |
+ if err := f(b, splitStep[1:]); err != nil {
|
|
| 313 |
+ return err |
|
| 314 |
+ } |
|
| 315 |
+ } else {
|
|
| 316 |
+ return fmt.Errorf("%s doesn't appear to be a valid Dockerfile instruction", splitStep[0])
|
|
| 317 |
+ } |
|
| 318 |
+ } |
|
| 319 |
+ |
|
| 320 |
+ return nil |
|
| 321 |
+} |
|
| 322 |
+ |
|
| 323 |
+// probeCache checks to see if image-caching is enabled (`b.options.UtilizeCache`) |
|
| 324 |
+// and if so attempts to look up the current `b.image` and `b.config` pair |
|
| 325 |
+// in the current server `b.options.Daemon`. If an image is found, probeCache returns |
|
| 326 |
+// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there |
|
| 327 |
+// is any error, it returns `(false, err)`. |
|
| 328 |
+func (b *buildFile) probeCache() (bool, error) {
|
|
| 329 |
+ if b.options.UtilizeCache {
|
|
| 330 |
+ if cache, err := b.options.Daemon.ImageGetCached(b.image, b.config); err != nil {
|
|
| 331 |
+ return false, err |
|
| 332 |
+ } else if cache != nil {
|
|
| 333 |
+ fmt.Fprintf(b.options.OutStream, " ---> Using cache\n") |
|
| 334 |
+ utils.Debugf("[BUILDER] Use cached version")
|
|
| 335 |
+ b.image = cache.ID |
|
| 336 |
+ return true, nil |
|
| 337 |
+ } else {
|
|
| 338 |
+ utils.Debugf("[BUILDER] Cache miss")
|
|
| 339 |
+ } |
|
| 340 |
+ } |
|
| 341 |
+ return false, nil |
|
| 342 |
+} |
|
| 343 |
+ |
|
| 344 |
+func (b *buildFile) create() (*daemon.Container, error) {
|
|
| 345 |
+ if b.image == "" {
|
|
| 346 |
+ return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
| 347 |
+ } |
|
| 348 |
+ b.config.Image = b.image |
|
| 349 |
+ |
|
| 350 |
+ // Create the container |
|
| 351 |
+ c, _, err := b.options.Daemon.Create(b.config, "") |
|
| 352 |
+ if err != nil {
|
|
| 353 |
+ return nil, err |
|
| 354 |
+ } |
|
| 355 |
+ b.tmpContainers[c.ID] = struct{}{}
|
|
| 356 |
+ fmt.Fprintf(b.options.OutStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) |
|
| 357 |
+ |
|
| 358 |
+ // override the entry point that may have been picked up from the base image |
|
| 359 |
+ c.Path = b.config.Cmd[0] |
|
| 360 |
+ c.Args = b.config.Cmd[1:] |
|
| 361 |
+ |
|
| 362 |
+ return c, nil |
|
| 363 |
+} |
|
| 364 |
+ |
|
| 365 |
+func (b *buildFile) run(c *daemon.Container) error {
|
|
| 366 |
+ var errCh chan error |
|
| 367 |
+ if b.options.Verbose {
|
|
| 368 |
+ errCh = utils.Go(func() error {
|
|
| 369 |
+ // FIXME: call the 'attach' job so that daemon.Attach can be made private |
|
| 370 |
+ // |
|
| 371 |
+ // FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach |
|
| 372 |
+ // but without hijacking for stdin. Also, with attach there can be race |
|
| 373 |
+ // condition because of some output already was printed before it. |
|
| 374 |
+ return <-b.options.Daemon.Attach(c, nil, nil, b.options.OutStream, b.options.ErrStream) |
|
| 375 |
+ }) |
|
| 376 |
+ } |
|
| 377 |
+ |
|
| 378 |
+ //start the container |
|
| 379 |
+ if err := c.Start(); err != nil {
|
|
| 380 |
+ return err |
|
| 381 |
+ } |
|
| 382 |
+ |
|
| 383 |
+ if errCh != nil {
|
|
| 384 |
+ if err := <-errCh; err != nil {
|
|
| 385 |
+ return err |
|
| 386 |
+ } |
|
| 387 |
+ } |
|
| 388 |
+ |
|
| 389 |
+ // Wait for it to finish |
|
| 390 |
+ if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
|
|
| 391 |
+ err := &utils.JSONError{
|
|
| 392 |
+ Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
|
|
| 393 |
+ Code: ret, |
|
| 394 |
+ } |
|
| 395 |
+ return err |
|
| 396 |
+ } |
|
| 397 |
+ |
|
| 398 |
+ return nil |
|
| 399 |
+} |
|
| 400 |
+ |
|
| 401 |
+func (b *buildFile) checkPathForAddition(orig string) error {
|
|
| 402 |
+ origPath := path.Join(b.contextPath, orig) |
|
| 403 |
+ if p, err := filepath.EvalSymlinks(origPath); err != nil {
|
|
| 404 |
+ if os.IsNotExist(err) {
|
|
| 405 |
+ return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 406 |
+ } |
|
| 407 |
+ return err |
|
| 408 |
+ } else {
|
|
| 409 |
+ origPath = p |
|
| 410 |
+ } |
|
| 411 |
+ if !strings.HasPrefix(origPath, b.contextPath) {
|
|
| 412 |
+ return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
|
| 413 |
+ } |
|
| 414 |
+ _, err := os.Stat(origPath) |
|
| 415 |
+ if err != nil {
|
|
| 416 |
+ if os.IsNotExist(err) {
|
|
| 417 |
+ return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 418 |
+ } |
|
| 419 |
+ return err |
|
| 420 |
+ } |
|
| 421 |
+ return nil |
|
| 422 |
+} |
|
| 423 |
+ |
|
| 424 |
+func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
|
|
| 425 |
+ var ( |
|
| 426 |
+ err error |
|
| 427 |
+ destExists = true |
|
| 428 |
+ origPath = path.Join(b.contextPath, orig) |
|
| 429 |
+ destPath = path.Join(container.RootfsPath(), dest) |
|
| 430 |
+ ) |
|
| 431 |
+ |
|
| 432 |
+ if destPath != container.RootfsPath() {
|
|
| 433 |
+ destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath()) |
|
| 434 |
+ if err != nil {
|
|
| 435 |
+ return err |
|
| 436 |
+ } |
|
| 437 |
+ } |
|
| 438 |
+ |
|
| 439 |
+ // Preserve the trailing '/' |
|
| 440 |
+ if strings.HasSuffix(dest, "/") || dest == "." {
|
|
| 441 |
+ destPath = destPath + "/" |
|
| 442 |
+ } |
|
| 443 |
+ |
|
| 444 |
+ destStat, err := os.Stat(destPath) |
|
| 445 |
+ if err != nil {
|
|
| 446 |
+ if !os.IsNotExist(err) {
|
|
| 447 |
+ return err |
|
| 448 |
+ } |
|
| 449 |
+ destExists = false |
|
| 450 |
+ } |
|
| 451 |
+ |
|
| 452 |
+ fi, err := os.Stat(origPath) |
|
| 453 |
+ if err != nil {
|
|
| 454 |
+ if os.IsNotExist(err) {
|
|
| 455 |
+ return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 456 |
+ } |
|
| 457 |
+ return err |
|
| 458 |
+ } |
|
| 459 |
+ |
|
| 460 |
+ if fi.IsDir() {
|
|
| 461 |
+ return copyAsDirectory(origPath, destPath, destExists) |
|
| 462 |
+ } |
|
| 463 |
+ |
|
| 464 |
+ // If we are adding a remote file (or we've been told not to decompress), do not try to untar it |
|
| 465 |
+ if decompress {
|
|
| 466 |
+ // First try to unpack the source as an archive |
|
| 467 |
+ // to support the untar feature we need to clean up the path a little bit |
|
| 468 |
+ // because tar is very forgiving. First we need to strip off the archive's |
|
| 469 |
+ // filename from the path but this is only added if it does not end in / . |
|
| 470 |
+ tarDest := destPath |
|
| 471 |
+ if strings.HasSuffix(tarDest, "/") {
|
|
| 472 |
+ tarDest = filepath.Dir(destPath) |
|
| 473 |
+ } |
|
| 474 |
+ |
|
| 475 |
+ // try to successfully untar the orig |
|
| 476 |
+ if err := archive.UntarPath(origPath, tarDest); err == nil {
|
|
| 477 |
+ return nil |
|
| 478 |
+ } else if err != io.EOF {
|
|
| 479 |
+ utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
|
| 480 |
+ } |
|
| 481 |
+ } |
|
| 482 |
+ |
|
| 483 |
+ if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
|
|
| 484 |
+ return err |
|
| 485 |
+ } |
|
| 486 |
+ if err := archive.CopyWithTar(origPath, destPath); err != nil {
|
|
| 487 |
+ return err |
|
| 488 |
+ } |
|
| 489 |
+ |
|
| 490 |
+ resPath := destPath |
|
| 491 |
+ if destExists && destStat.IsDir() {
|
|
| 492 |
+ resPath = path.Join(destPath, path.Base(origPath)) |
|
| 493 |
+ } |
|
| 494 |
+ |
|
| 495 |
+ return fixPermissions(resPath, 0, 0) |
|
| 496 |
+} |
|
| 497 |
+ |
|
| 498 |
+func copyAsDirectory(source, destination string, destinationExists bool) error {
|
|
| 499 |
+ if err := archive.CopyWithTar(source, destination); err != nil {
|
|
| 500 |
+ return err |
|
| 501 |
+ } |
|
| 502 |
+ |
|
| 503 |
+ if destinationExists {
|
|
| 504 |
+ files, err := ioutil.ReadDir(source) |
|
| 505 |
+ if err != nil {
|
|
| 506 |
+ return err |
|
| 507 |
+ } |
|
| 508 |
+ |
|
| 509 |
+ for _, file := range files {
|
|
| 510 |
+ if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
|
|
| 511 |
+ return err |
|
| 512 |
+ } |
|
| 513 |
+ } |
|
| 514 |
+ return nil |
|
| 515 |
+ } |
|
| 516 |
+ |
|
| 517 |
+ return fixPermissions(destination, 0, 0) |
|
| 518 |
+} |
|
| 519 |
+ |
|
| 520 |
+func fixPermissions(destination string, uid, gid int) error {
|
|
| 521 |
+ return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
|
|
| 522 |
+ if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
|
|
| 523 |
+ return err |
|
| 524 |
+ } |
|
| 525 |
+ return nil |
|
| 526 |
+ }) |
|
| 527 |
+} |
|
| 528 |
+ |
|
| 529 |
+func (b *buildFile) clearTmp(containers map[string]struct{}) {
|
|
| 530 |
+ for c := range containers {
|
|
| 531 |
+ tmp := b.options.Daemon.Get(c) |
|
| 532 |
+ if err := b.options.Daemon.Destroy(tmp); err != nil {
|
|
| 533 |
+ fmt.Fprintf(b.options.OutStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) |
|
| 534 |
+ } else {
|
|
| 535 |
+ delete(containers, c) |
|
| 536 |
+ fmt.Fprintf(b.options.OutStream, "Removing intermediate container %s\n", utils.TruncateID(c)) |
|
| 537 |
+ } |
|
| 538 |
+ } |
|
| 539 |
+} |
| ... | ... |
@@ -6,17 +6,9 @@ import ( |
| 6 | 6 |
) |
| 7 | 7 |
|
| 8 | 8 |
var ( |
| 9 |
- TOKEN_ESCAPED_QUOTE = regexp.MustCompile(`\\"`) |
|
| 10 |
- TOKEN_ESCAPED_ESCAPE = regexp.MustCompile(`\\\\`) |
|
| 11 | 9 |
TOKEN_ENV_INTERPOLATION = regexp.MustCompile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
|
| 12 | 10 |
) |
| 13 | 11 |
|
| 14 |
-func stripQuotes(str string) string {
|
|
| 15 |
- str = str[1 : len(str)-1] |
|
| 16 |
- str = TOKEN_ESCAPED_QUOTE.ReplaceAllString(str, `"`) |
|
| 17 |
- return TOKEN_ESCAPED_ESCAPE.ReplaceAllString(str, `\`) |
|
| 18 |
-} |
|
| 19 |
- |
|
| 20 | 12 |
func replaceEnv(b *buildFile, str string) string {
|
| 21 | 13 |
for _, match := range TOKEN_ENV_INTERPOLATION.FindAllString(str, -1) {
|
| 22 | 14 |
match = match[strings.Index(match, "$"):] |
| ... | ... |
@@ -27,13 +27,11 @@ func parseEnv(rest string) (*Node, error) {
|
| 27 | 27 |
node := blankNode() |
| 28 | 28 |
rootnode := node |
| 29 | 29 |
strs := TOKEN_WHITESPACE.Split(rest, 2) |
| 30 |
- node.Value = QuoteString(strs[0]) |
|
| 30 |
+ node.Value = strs[0] |
|
| 31 | 31 |
node.Next = blankNode() |
| 32 |
- node.Next.Value = QuoteString(strs[1]) |
|
| 32 |
+ node.Next.Value = strs[1] |
|
| 33 | 33 |
|
| 34 | 34 |
return rootnode, nil |
| 35 |
- |
|
| 36 |
- return node, nil |
|
| 37 | 35 |
} |
| 38 | 36 |
|
| 39 | 37 |
// parses a whitespace-delimited set of arguments. The result is effectively a |
| ... | ... |
@@ -41,18 +39,25 @@ func parseEnv(rest string) (*Node, error) {
|
| 41 | 41 |
func parseStringsWhitespaceDelimited(rest string) (*Node, error) {
|
| 42 | 42 |
node := blankNode() |
| 43 | 43 |
rootnode := node |
| 44 |
+ prevnode := node |
|
| 44 | 45 |
for _, str := range TOKEN_WHITESPACE.Split(rest, -1) { // use regexp
|
| 45 |
- node.Value = QuoteString(str) |
|
| 46 |
+ prevnode = node |
|
| 47 |
+ node.Value = str |
|
| 46 | 48 |
node.Next = blankNode() |
| 47 | 49 |
node = node.Next |
| 48 | 50 |
} |
| 49 | 51 |
|
| 52 |
+ // XXX to get around regexp.Split *always* providing an empty string at the |
|
| 53 |
+ // end due to how our loop is constructed, nil out the last node in the |
|
| 54 |
+ // chain. |
|
| 55 |
+ prevnode.Next = nil |
|
| 56 |
+ |
|
| 50 | 57 |
return rootnode, nil |
| 51 | 58 |
} |
| 52 | 59 |
|
| 53 | 60 |
// parsestring just wraps the string in quotes and returns a working node. |
| 54 | 61 |
func parseString(rest string) (*Node, error) {
|
| 55 |
- return &Node{QuoteString(rest), nil, nil}, nil
|
|
| 62 |
+ return &Node{rest, nil, nil}, nil
|
|
| 56 | 63 |
} |
| 57 | 64 |
|
| 58 | 65 |
// parseJSON converts JSON arrays to an AST. |
| ... | ... |
@@ -61,6 +66,7 @@ func parseJSON(rest string) (*Node, error) {
|
| 61 | 61 |
myJson []interface{}
|
| 62 | 62 |
next = blankNode() |
| 63 | 63 |
orignext = next |
| 64 |
+ prevnode = next |
|
| 64 | 65 |
) |
| 65 | 66 |
|
| 66 | 67 |
if err := json.Unmarshal([]byte(rest), &myJson); err != nil {
|
| ... | ... |
@@ -72,11 +78,14 @@ func parseJSON(rest string) (*Node, error) {
|
| 72 | 72 |
case float64: |
| 73 | 73 |
str = strconv.FormatFloat(str.(float64), 'G', -1, 64) |
| 74 | 74 |
} |
| 75 |
- next.Value = QuoteString(str.(string)) |
|
| 75 |
+ next.Value = str.(string) |
|
| 76 | 76 |
next.Next = blankNode() |
| 77 |
+ prevnode = next |
|
| 77 | 78 |
next = next.Next |
| 78 | 79 |
} |
| 79 | 80 |
|
| 81 |
+ prevnode.Next = nil |
|
| 82 |
+ |
|
| 80 | 83 |
return orignext, nil |
| 81 | 84 |
} |
| 82 | 85 |
|
| ... | ... |
@@ -94,6 +103,6 @@ func parseMaybeJSON(rest string) (*Node, error) {
|
| 94 | 94 |
} |
| 95 | 95 |
|
| 96 | 96 |
node := blankNode() |
| 97 |
- node.Value = QuoteString(rest) |
|
| 97 |
+ node.Value = rest |
|
| 98 | 98 |
return node, nil |
| 99 | 99 |
} |
| ... | ... |
@@ -43,7 +43,7 @@ type Node struct {
|
| 43 | 43 |
|
| 44 | 44 |
var ( |
| 45 | 45 |
dispatch map[string]func(string) (*Node, error) |
| 46 |
- TOKEN_WHITESPACE = regexp.MustCompile(`\s+`) |
|
| 46 |
+ TOKEN_WHITESPACE = regexp.MustCompile(`[\t\v\f\r ]+`) |
|
| 47 | 47 |
TOKEN_LINE_CONTINUATION = regexp.MustCompile(`\\$`) |
| 48 | 48 |
TOKEN_COMMENT = regexp.MustCompile(`^#.*$`) |
| 49 | 49 |
) |
| ... | ... |
@@ -70,6 +70,7 @@ func init() {
|
| 70 | 70 |
"entrypoint": parseMaybeJSON, |
| 71 | 71 |
"expose": parseStringsWhitespaceDelimited, |
| 72 | 72 |
"volume": parseMaybeJSON, |
| 73 |
+ "insert": parseIgnore, |
|
| 73 | 74 |
} |
| 74 | 75 |
} |
| 75 | 76 |
|
| ... | ... |
@@ -1,5 +1,5 @@ |
| 1 | 1 |
(from "brimstone/ubuntu:14.04") |
| 2 |
-(cmd) |
|
| 2 |
+(cmd "") |
|
| 3 | 3 |
(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") |
| 4 | 4 |
(expose "8500" "8600" "8400" "8301" "8302") |
| 5 | 5 |
(run "apt-get update && apt-get install -y unzip wget && apt-get clean && rm -rf /var/lib/apt/lists") |