Browse code

vendor: use dockerfile parser from buildkit

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>

Tonis Tiigi authored on 2018/06/03 01:46:53
Showing 128 changed files
... ...
@@ -14,9 +14,6 @@ import (
14 14
 	"github.com/docker/docker/api/types/backend"
15 15
 	"github.com/docker/docker/api/types/container"
16 16
 	"github.com/docker/docker/builder"
17
-	"github.com/docker/docker/builder/dockerfile/instructions"
18
-	"github.com/docker/docker/builder/dockerfile/parser"
19
-	"github.com/docker/docker/builder/dockerfile/shell"
20 17
 	"github.com/docker/docker/builder/fscache"
21 18
 	"github.com/docker/docker/builder/remotecontext"
22 19
 	"github.com/docker/docker/errdefs"
... ...
@@ -24,6 +21,9 @@ import (
24 24
 	"github.com/docker/docker/pkg/streamformatter"
25 25
 	"github.com/docker/docker/pkg/stringid"
26 26
 	"github.com/docker/docker/pkg/system"
27
+	"github.com/moby/buildkit/frontend/dockerfile/instructions"
28
+	"github.com/moby/buildkit/frontend/dockerfile/parser"
29
+	"github.com/moby/buildkit/frontend/dockerfile/shell"
27 30
 	"github.com/moby/buildkit/session"
28 31
 	"github.com/pkg/errors"
29 32
 	"github.com/sirupsen/logrus"
30 33
deleted file mode 100644
... ...
@@ -1,46 +0,0 @@
1
-// Package command contains the set of Dockerfile commands.
2
-package command // import "github.com/docker/docker/builder/dockerfile/command"
3
-
4
-// Define constants for the command strings
5
-const (
6
-	Add         = "add"
7
-	Arg         = "arg"
8
-	Cmd         = "cmd"
9
-	Copy        = "copy"
10
-	Entrypoint  = "entrypoint"
11
-	Env         = "env"
12
-	Expose      = "expose"
13
-	From        = "from"
14
-	Healthcheck = "healthcheck"
15
-	Label       = "label"
16
-	Maintainer  = "maintainer"
17
-	Onbuild     = "onbuild"
18
-	Run         = "run"
19
-	Shell       = "shell"
20
-	StopSignal  = "stopsignal"
21
-	User        = "user"
22
-	Volume      = "volume"
23
-	Workdir     = "workdir"
24
-)
25
-
26
-// Commands is list of all Dockerfile commands
27
-var Commands = map[string]struct{}{
28
-	Add:         {},
29
-	Arg:         {},
30
-	Cmd:         {},
31
-	Copy:        {},
32
-	Entrypoint:  {},
33
-	Env:         {},
34
-	Expose:      {},
35
-	From:        {},
36
-	Healthcheck: {},
37
-	Label:       {},
38
-	Maintainer:  {},
39
-	Onbuild:     {},
40
-	Run:         {},
41
-	Shell:       {},
42
-	StopSignal:  {},
43
-	User:        {},
44
-	Volume:      {},
45
-	Workdir:     {},
46
-}
... ...
@@ -18,15 +18,15 @@ import (
18 18
 	"github.com/docker/docker/api/types/container"
19 19
 	"github.com/docker/docker/api/types/strslice"
20 20
 	"github.com/docker/docker/builder"
21
-	"github.com/docker/docker/builder/dockerfile/instructions"
22
-	"github.com/docker/docker/builder/dockerfile/parser"
23
-	"github.com/docker/docker/builder/dockerfile/shell"
24 21
 	"github.com/docker/docker/errdefs"
25 22
 	"github.com/docker/docker/image"
26 23
 	"github.com/docker/docker/pkg/jsonmessage"
27 24
 	"github.com/docker/docker/pkg/signal"
28 25
 	"github.com/docker/docker/pkg/system"
29 26
 	"github.com/docker/go-connections/nat"
27
+	"github.com/moby/buildkit/frontend/dockerfile/instructions"
28
+	"github.com/moby/buildkit/frontend/dockerfile/parser"
29
+	"github.com/moby/buildkit/frontend/dockerfile/shell"
30 30
 	"github.com/pkg/errors"
31 31
 	"github.com/sirupsen/logrus"
32 32
 )
... ...
@@ -11,13 +11,13 @@ import (
11 11
 	"github.com/docker/docker/api/types/container"
12 12
 	"github.com/docker/docker/api/types/strslice"
13 13
 	"github.com/docker/docker/builder"
14
-	"github.com/docker/docker/builder/dockerfile/instructions"
15
-	"github.com/docker/docker/builder/dockerfile/shell"
16 14
 	"github.com/docker/docker/image"
17 15
 	"github.com/docker/docker/pkg/system"
18 16
 	"github.com/docker/go-connections/nat"
19 17
 	"github.com/gotestyourself/gotestyourself/assert"
20 18
 	is "github.com/gotestyourself/gotestyourself/assert/cmp"
19
+	"github.com/moby/buildkit/frontend/dockerfile/instructions"
20
+	"github.com/moby/buildkit/frontend/dockerfile/shell"
21 21
 )
22 22
 
23 23
 func newBuilderWithMockBackend() *Builder {
... ...
@@ -27,11 +27,11 @@ import (
27 27
 
28 28
 	"github.com/docker/docker/api/types/container"
29 29
 	"github.com/docker/docker/builder"
30
-	"github.com/docker/docker/builder/dockerfile/instructions"
31
-	"github.com/docker/docker/builder/dockerfile/shell"
32 30
 	"github.com/docker/docker/errdefs"
33 31
 	"github.com/docker/docker/pkg/system"
34 32
 	"github.com/docker/docker/runconfig/opts"
33
+	"github.com/moby/buildkit/frontend/dockerfile/instructions"
34
+	"github.com/moby/buildkit/frontend/dockerfile/shell"
35 35
 	"github.com/pkg/errors"
36 36
 )
37 37
 
... ...
@@ -4,13 +4,13 @@ import (
4 4
 	"os"
5 5
 	"testing"
6 6
 
7
-	"github.com/docker/docker/builder/dockerfile/instructions"
8 7
 	"github.com/docker/docker/builder/remotecontext"
9 8
 	"github.com/docker/docker/pkg/archive"
10 9
 	"github.com/docker/docker/pkg/reexec"
11 10
 	"github.com/gotestyourself/gotestyourself/assert"
12 11
 	is "github.com/gotestyourself/gotestyourself/assert/cmp"
13 12
 	"github.com/gotestyourself/gotestyourself/skip"
13
+	"github.com/moby/buildkit/frontend/dockerfile/instructions"
14 14
 )
15 15
 
16 16
 type dispatchTestCase struct {
17 17
deleted file mode 100644
... ...
@@ -1,183 +0,0 @@
1
-package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
2
-
3
-import (
4
-	"fmt"
5
-	"strings"
6
-)
7
-
8
-// FlagType is the type of the build flag
9
-type FlagType int
10
-
11
-const (
12
-	boolType FlagType = iota
13
-	stringType
14
-)
15
-
16
-// BFlags contains all flags information for the builder
17
-type BFlags struct {
18
-	Args  []string // actual flags/args from cmd line
19
-	flags map[string]*Flag
20
-	used  map[string]*Flag
21
-	Err   error
22
-}
23
-
24
-// Flag contains all information for a flag
25
-type Flag struct {
26
-	bf       *BFlags
27
-	name     string
28
-	flagType FlagType
29
-	Value    string
30
-}
31
-
32
-// NewBFlags returns the new BFlags struct
33
-func NewBFlags() *BFlags {
34
-	return &BFlags{
35
-		flags: make(map[string]*Flag),
36
-		used:  make(map[string]*Flag),
37
-	}
38
-}
39
-
40
-// NewBFlagsWithArgs returns the new BFlags struct with Args set to args
41
-func NewBFlagsWithArgs(args []string) *BFlags {
42
-	flags := NewBFlags()
43
-	flags.Args = args
44
-	return flags
45
-}
46
-
47
-// AddBool adds a bool flag to BFlags
48
-// Note, any error will be generated when Parse() is called (see Parse).
49
-func (bf *BFlags) AddBool(name string, def bool) *Flag {
50
-	flag := bf.addFlag(name, boolType)
51
-	if flag == nil {
52
-		return nil
53
-	}
54
-	if def {
55
-		flag.Value = "true"
56
-	} else {
57
-		flag.Value = "false"
58
-	}
59
-	return flag
60
-}
61
-
62
-// AddString adds a string flag to BFlags
63
-// Note, any error will be generated when Parse() is called (see Parse).
64
-func (bf *BFlags) AddString(name string, def string) *Flag {
65
-	flag := bf.addFlag(name, stringType)
66
-	if flag == nil {
67
-		return nil
68
-	}
69
-	flag.Value = def
70
-	return flag
71
-}
72
-
73
-// addFlag is a generic func used by the other AddXXX() func
74
-// to add a new flag to the BFlags struct.
75
-// Note, any error will be generated when Parse() is called (see Parse).
76
-func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag {
77
-	if _, ok := bf.flags[name]; ok {
78
-		bf.Err = fmt.Errorf("Duplicate flag defined: %s", name)
79
-		return nil
80
-	}
81
-
82
-	newFlag := &Flag{
83
-		bf:       bf,
84
-		name:     name,
85
-		flagType: flagType,
86
-	}
87
-	bf.flags[name] = newFlag
88
-
89
-	return newFlag
90
-}
91
-
92
-// IsUsed checks if the flag is used
93
-func (fl *Flag) IsUsed() bool {
94
-	if _, ok := fl.bf.used[fl.name]; ok {
95
-		return true
96
-	}
97
-	return false
98
-}
99
-
100
-// IsTrue checks if a bool flag is true
101
-func (fl *Flag) IsTrue() bool {
102
-	if fl.flagType != boolType {
103
-		// Should never get here
104
-		panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name))
105
-	}
106
-	return fl.Value == "true"
107
-}
108
-
109
-// Parse parses and checks if the BFlags is valid.
110
-// Any error noticed during the AddXXX() funcs will be generated/returned
111
-// here.  We do this because an error during AddXXX() is more like a
112
-// compile time error so it doesn't matter too much when we stop our
113
-// processing as long as we do stop it, so this allows the code
114
-// around AddXXX() to be just:
115
-//     defFlag := AddString("description", "")
116
-// w/o needing to add an if-statement around each one.
117
-func (bf *BFlags) Parse() error {
118
-	// If there was an error while defining the possible flags
119
-	// go ahead and bubble it back up here since we didn't do it
120
-	// earlier in the processing
121
-	if bf.Err != nil {
122
-		return fmt.Errorf("Error setting up flags: %s", bf.Err)
123
-	}
124
-
125
-	for _, arg := range bf.Args {
126
-		if !strings.HasPrefix(arg, "--") {
127
-			return fmt.Errorf("Arg should start with -- : %s", arg)
128
-		}
129
-
130
-		if arg == "--" {
131
-			return nil
132
-		}
133
-
134
-		arg = arg[2:]
135
-		value := ""
136
-
137
-		index := strings.Index(arg, "=")
138
-		if index >= 0 {
139
-			value = arg[index+1:]
140
-			arg = arg[:index]
141
-		}
142
-
143
-		flag, ok := bf.flags[arg]
144
-		if !ok {
145
-			return fmt.Errorf("Unknown flag: %s", arg)
146
-		}
147
-
148
-		if _, ok = bf.used[arg]; ok {
149
-			return fmt.Errorf("Duplicate flag specified: %s", arg)
150
-		}
151
-
152
-		bf.used[arg] = flag
153
-
154
-		switch flag.flagType {
155
-		case boolType:
156
-			// value == "" is only ok if no "=" was specified
157
-			if index >= 0 && value == "" {
158
-				return fmt.Errorf("Missing a value on flag: %s", arg)
159
-			}
160
-
161
-			lower := strings.ToLower(value)
162
-			if lower == "" {
163
-				flag.Value = "true"
164
-			} else if lower == "true" || lower == "false" {
165
-				flag.Value = lower
166
-			} else {
167
-				return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value)
168
-			}
169
-
170
-		case stringType:
171
-			if index < 0 {
172
-				return fmt.Errorf("Missing a value on flag: %s", arg)
173
-			}
174
-			flag.Value = value
175
-
176
-		default:
177
-			panic("No idea what kind of flag we have! Should never get here!")
178
-		}
179
-
180
-	}
181
-
182
-	return nil
183
-}
184 1
deleted file mode 100644
... ...
@@ -1,187 +0,0 @@
1
-package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
2
-
3
-import (
4
-	"testing"
5
-)
6
-
7
-func TestBuilderFlags(t *testing.T) {
8
-	var expected string
9
-	var err error
10
-
11
-	// ---
12
-
13
-	bf := NewBFlags()
14
-	bf.Args = []string{}
15
-	if err := bf.Parse(); err != nil {
16
-		t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err)
17
-	}
18
-
19
-	// ---
20
-
21
-	bf = NewBFlags()
22
-	bf.Args = []string{"--"}
23
-	if err := bf.Parse(); err != nil {
24
-		t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err)
25
-	}
26
-
27
-	// ---
28
-
29
-	bf = NewBFlags()
30
-	flStr1 := bf.AddString("str1", "")
31
-	flBool1 := bf.AddBool("bool1", false)
32
-	bf.Args = []string{}
33
-	if err = bf.Parse(); err != nil {
34
-		t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
35
-	}
36
-
37
-	if flStr1.IsUsed() {
38
-		t.Fatal("Test3 - str1 was not used!")
39
-	}
40
-	if flBool1.IsUsed() {
41
-		t.Fatal("Test3 - bool1 was not used!")
42
-	}
43
-
44
-	// ---
45
-
46
-	bf = NewBFlags()
47
-	flStr1 = bf.AddString("str1", "HI")
48
-	flBool1 = bf.AddBool("bool1", false)
49
-	bf.Args = []string{}
50
-
51
-	if err = bf.Parse(); err != nil {
52
-		t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err)
53
-	}
54
-
55
-	if flStr1.Value != "HI" {
56
-		t.Fatal("Str1 was supposed to default to: HI")
57
-	}
58
-	if flBool1.IsTrue() {
59
-		t.Fatal("Bool1 was supposed to default to: false")
60
-	}
61
-	if flStr1.IsUsed() {
62
-		t.Fatal("Str1 was not used!")
63
-	}
64
-	if flBool1.IsUsed() {
65
-		t.Fatal("Bool1 was not used!")
66
-	}
67
-
68
-	// ---
69
-
70
-	bf = NewBFlags()
71
-	flStr1 = bf.AddString("str1", "HI")
72
-	bf.Args = []string{"--str1"}
73
-
74
-	if err = bf.Parse(); err == nil {
75
-		t.Fatalf("Test %q was supposed to fail", bf.Args)
76
-	}
77
-
78
-	// ---
79
-
80
-	bf = NewBFlags()
81
-	flStr1 = bf.AddString("str1", "HI")
82
-	bf.Args = []string{"--str1="}
83
-
84
-	if err = bf.Parse(); err != nil {
85
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
86
-	}
87
-
88
-	expected = ""
89
-	if flStr1.Value != expected {
90
-		t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
91
-	}
92
-
93
-	// ---
94
-
95
-	bf = NewBFlags()
96
-	flStr1 = bf.AddString("str1", "HI")
97
-	bf.Args = []string{"--str1=BYE"}
98
-
99
-	if err = bf.Parse(); err != nil {
100
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
101
-	}
102
-
103
-	expected = "BYE"
104
-	if flStr1.Value != expected {
105
-		t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
106
-	}
107
-
108
-	// ---
109
-
110
-	bf = NewBFlags()
111
-	flBool1 = bf.AddBool("bool1", false)
112
-	bf.Args = []string{"--bool1"}
113
-
114
-	if err = bf.Parse(); err != nil {
115
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
116
-	}
117
-
118
-	if !flBool1.IsTrue() {
119
-		t.Fatal("Test-b1 Bool1 was supposed to be true")
120
-	}
121
-
122
-	// ---
123
-
124
-	bf = NewBFlags()
125
-	flBool1 = bf.AddBool("bool1", false)
126
-	bf.Args = []string{"--bool1=true"}
127
-
128
-	if err = bf.Parse(); err != nil {
129
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
130
-	}
131
-
132
-	if !flBool1.IsTrue() {
133
-		t.Fatal("Test-b2 Bool1 was supposed to be true")
134
-	}
135
-
136
-	// ---
137
-
138
-	bf = NewBFlags()
139
-	flBool1 = bf.AddBool("bool1", false)
140
-	bf.Args = []string{"--bool1=false"}
141
-
142
-	if err = bf.Parse(); err != nil {
143
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
144
-	}
145
-
146
-	if flBool1.IsTrue() {
147
-		t.Fatal("Test-b3 Bool1 was supposed to be false")
148
-	}
149
-
150
-	// ---
151
-
152
-	bf = NewBFlags()
153
-	flBool1 = bf.AddBool("bool1", false)
154
-	bf.Args = []string{"--bool1=false1"}
155
-
156
-	if err = bf.Parse(); err == nil {
157
-		t.Fatalf("Test %q was supposed to fail", bf.Args)
158
-	}
159
-
160
-	// ---
161
-
162
-	bf = NewBFlags()
163
-	flBool1 = bf.AddBool("bool1", false)
164
-	bf.Args = []string{"--bool2"}
165
-
166
-	if err = bf.Parse(); err == nil {
167
-		t.Fatalf("Test %q was supposed to fail", bf.Args)
168
-	}
169
-
170
-	// ---
171
-
172
-	bf = NewBFlags()
173
-	flStr1 = bf.AddString("str1", "HI")
174
-	flBool1 = bf.AddBool("bool1", false)
175
-	bf.Args = []string{"--bool1", "--str1=BYE"}
176
-
177
-	if err = bf.Parse(); err != nil {
178
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
179
-	}
180
-
181
-	if flStr1.Value != "BYE" {
182
-		t.Fatalf("Test %s, str1 should be BYE", bf.Args)
183
-	}
184
-	if !flBool1.IsTrue() {
185
-		t.Fatalf("Test %s, bool1 should be true", bf.Args)
186
-	}
187
-}
188 1
deleted file mode 100644
... ...
@@ -1,418 +0,0 @@
1
-package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
2
-
3
-import (
4
-	"errors"
5
-	"strings"
6
-
7
-	"github.com/docker/docker/api/types/container"
8
-	"github.com/docker/docker/api/types/strslice"
9
-	specs "github.com/opencontainers/image-spec/specs-go/v1"
10
-)
11
-
12
-// KeyValuePair represent an arbitrary named value (useful in slice instead of map[string] string to preserve ordering)
13
-type KeyValuePair struct {
14
-	Key   string
15
-	Value string
16
-}
17
-
18
-func (kvp *KeyValuePair) String() string {
19
-	return kvp.Key + "=" + kvp.Value
20
-}
21
-
22
-// Command is implemented by every command present in a dockerfile
23
-type Command interface {
24
-	Name() string
25
-}
26
-
27
-// KeyValuePairs is a slice of KeyValuePair
28
-type KeyValuePairs []KeyValuePair
29
-
30
-// withNameAndCode is the base of every command in a Dockerfile (String() returns its source code)
31
-type withNameAndCode struct {
32
-	code string
33
-	name string
34
-}
35
-
36
-func (c *withNameAndCode) String() string {
37
-	return c.code
38
-}
39
-
40
-// Name of the command
41
-func (c *withNameAndCode) Name() string {
42
-	return c.name
43
-}
44
-
45
-func newWithNameAndCode(req parseRequest) withNameAndCode {
46
-	return withNameAndCode{code: strings.TrimSpace(req.original), name: req.command}
47
-}
48
-
49
-// SingleWordExpander is a provider for variable expansion where 1 word => 1 output
50
-type SingleWordExpander func(word string) (string, error)
51
-
52
-// SupportsSingleWordExpansion interface marks a command as supporting variable expansion
53
-type SupportsSingleWordExpansion interface {
54
-	Expand(expander SingleWordExpander) error
55
-}
56
-
57
-// PlatformSpecific adds platform checks to a command
58
-type PlatformSpecific interface {
59
-	CheckPlatform(platform string) error
60
-}
61
-
62
-func expandKvp(kvp KeyValuePair, expander SingleWordExpander) (KeyValuePair, error) {
63
-	key, err := expander(kvp.Key)
64
-	if err != nil {
65
-		return KeyValuePair{}, err
66
-	}
67
-	value, err := expander(kvp.Value)
68
-	if err != nil {
69
-		return KeyValuePair{}, err
70
-	}
71
-	return KeyValuePair{Key: key, Value: value}, nil
72
-}
73
-func expandKvpsInPlace(kvps KeyValuePairs, expander SingleWordExpander) error {
74
-	for i, kvp := range kvps {
75
-		newKvp, err := expandKvp(kvp, expander)
76
-		if err != nil {
77
-			return err
78
-		}
79
-		kvps[i] = newKvp
80
-	}
81
-	return nil
82
-}
83
-
84
-func expandSliceInPlace(values []string, expander SingleWordExpander) error {
85
-	for i, v := range values {
86
-		newValue, err := expander(v)
87
-		if err != nil {
88
-			return err
89
-		}
90
-		values[i] = newValue
91
-	}
92
-	return nil
93
-}
94
-
95
-// EnvCommand : ENV key1 value1 [keyN valueN...]
96
-type EnvCommand struct {
97
-	withNameAndCode
98
-	Env KeyValuePairs // kvp slice instead of map to preserve ordering
99
-}
100
-
101
-// Expand variables
102
-func (c *EnvCommand) Expand(expander SingleWordExpander) error {
103
-	return expandKvpsInPlace(c.Env, expander)
104
-}
105
-
106
-// MaintainerCommand : MAINTAINER maintainer_name
107
-type MaintainerCommand struct {
108
-	withNameAndCode
109
-	Maintainer string
110
-}
111
-
112
-// NewLabelCommand creates a new 'LABEL' command
113
-func NewLabelCommand(k string, v string, NoExp bool) *LabelCommand {
114
-	kvp := KeyValuePair{Key: k, Value: v}
115
-	c := "LABEL "
116
-	c += kvp.String()
117
-	nc := withNameAndCode{code: c, name: "label"}
118
-	cmd := &LabelCommand{
119
-		withNameAndCode: nc,
120
-		Labels: KeyValuePairs{
121
-			kvp,
122
-		},
123
-		noExpand: NoExp,
124
-	}
125
-	return cmd
126
-}
127
-
128
-// LabelCommand : LABEL some json data describing the image
129
-//
130
-// Sets the Label variable foo to bar,
131
-//
132
-type LabelCommand struct {
133
-	withNameAndCode
134
-	Labels   KeyValuePairs // kvp slice instead of map to preserve ordering
135
-	noExpand bool
136
-}
137
-
138
-// Expand variables
139
-func (c *LabelCommand) Expand(expander SingleWordExpander) error {
140
-	if c.noExpand {
141
-		return nil
142
-	}
143
-	return expandKvpsInPlace(c.Labels, expander)
144
-}
145
-
146
-// SourcesAndDest represent a list of source files and a destination
147
-type SourcesAndDest []string
148
-
149
-// Sources list the source paths
150
-func (s SourcesAndDest) Sources() []string {
151
-	res := make([]string, len(s)-1)
152
-	copy(res, s[:len(s)-1])
153
-	return res
154
-}
155
-
156
-// Dest path of the operation
157
-func (s SourcesAndDest) Dest() string {
158
-	return s[len(s)-1]
159
-}
160
-
161
-// AddCommand : ADD foo /path
162
-//
163
-// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
164
-// exist here. If you do not wish to have this automatic handling, use COPY.
165
-//
166
-type AddCommand struct {
167
-	withNameAndCode
168
-	SourcesAndDest
169
-	Chown string
170
-}
171
-
172
-// Expand variables
173
-func (c *AddCommand) Expand(expander SingleWordExpander) error {
174
-	return expandSliceInPlace(c.SourcesAndDest, expander)
175
-}
176
-
177
-// CopyCommand : COPY foo /path
178
-//
179
-// Same as 'ADD' but without the tar and remote url handling.
180
-//
181
-type CopyCommand struct {
182
-	withNameAndCode
183
-	SourcesAndDest
184
-	From  string
185
-	Chown string
186
-}
187
-
188
-// Expand variables
189
-func (c *CopyCommand) Expand(expander SingleWordExpander) error {
190
-	return expandSliceInPlace(c.SourcesAndDest, expander)
191
-}
192
-
193
-// OnbuildCommand : ONBUILD <some other command>
194
-type OnbuildCommand struct {
195
-	withNameAndCode
196
-	Expression string
197
-}
198
-
199
-// WorkdirCommand : WORKDIR /tmp
200
-//
201
-// Set the working directory for future RUN/CMD/etc statements.
202
-//
203
-type WorkdirCommand struct {
204
-	withNameAndCode
205
-	Path string
206
-}
207
-
208
-// Expand variables
209
-func (c *WorkdirCommand) Expand(expander SingleWordExpander) error {
210
-	p, err := expander(c.Path)
211
-	if err != nil {
212
-		return err
213
-	}
214
-	c.Path = p
215
-	return nil
216
-}
217
-
218
-// ShellDependantCmdLine represents a cmdline optionally prepended with the shell
219
-type ShellDependantCmdLine struct {
220
-	CmdLine      strslice.StrSlice
221
-	PrependShell bool
222
-}
223
-
224
-// RunCommand : RUN some command yo
225
-//
226
-// run a command and commit the image. Args are automatically prepended with
227
-// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under
228
-// Windows, in the event there is only one argument The difference in processing:
229
-//
230
-// RUN echo hi          # sh -c echo hi       (Linux)
231
-// RUN echo hi          # cmd /S /C echo hi   (Windows)
232
-// RUN [ "echo", "hi" ] # echo hi
233
-//
234
-type RunCommand struct {
235
-	withNameAndCode
236
-	ShellDependantCmdLine
237
-}
238
-
239
-// CmdCommand : CMD foo
240
-//
241
-// Set the default command to run in the container (which may be empty).
242
-// Argument handling is the same as RUN.
243
-//
244
-type CmdCommand struct {
245
-	withNameAndCode
246
-	ShellDependantCmdLine
247
-}
248
-
249
-// HealthCheckCommand : HEALTHCHECK foo
250
-//
251
-// Set the default healthcheck command to run in the container (which may be empty).
252
-// Argument handling is the same as RUN.
253
-//
254
-type HealthCheckCommand struct {
255
-	withNameAndCode
256
-	Health *container.HealthConfig
257
-}
258
-
259
-// EntrypointCommand : ENTRYPOINT /usr/sbin/nginx
260
-//
261
-// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments
262
-// to /usr/sbin/nginx. Uses the default shell if not in JSON format.
263
-//
264
-// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint
265
-// is initialized at newBuilder time instead of through argument parsing.
266
-//
267
-type EntrypointCommand struct {
268
-	withNameAndCode
269
-	ShellDependantCmdLine
270
-}
271
-
272
-// ExposeCommand : EXPOSE 6667/tcp 7000/tcp
273
-//
274
-// Expose ports for links and port mappings. This all ends up in
275
-// req.runConfig.ExposedPorts for runconfig.
276
-//
277
-type ExposeCommand struct {
278
-	withNameAndCode
279
-	Ports []string
280
-}
281
-
282
-// UserCommand : USER foo
283
-//
284
-// Set the user to 'foo' for future commands and when running the
285
-// ENTRYPOINT/CMD at container run time.
286
-//
287
-type UserCommand struct {
288
-	withNameAndCode
289
-	User string
290
-}
291
-
292
-// Expand variables
293
-func (c *UserCommand) Expand(expander SingleWordExpander) error {
294
-	p, err := expander(c.User)
295
-	if err != nil {
296
-		return err
297
-	}
298
-	c.User = p
299
-	return nil
300
-}
301
-
302
-// VolumeCommand : VOLUME /foo
303
-//
304
-// Expose the volume /foo for use. Will also accept the JSON array form.
305
-//
306
-type VolumeCommand struct {
307
-	withNameAndCode
308
-	Volumes []string
309
-}
310
-
311
-// Expand variables
312
-func (c *VolumeCommand) Expand(expander SingleWordExpander) error {
313
-	return expandSliceInPlace(c.Volumes, expander)
314
-}
315
-
316
-// StopSignalCommand : STOPSIGNAL signal
317
-//
318
-// Set the signal that will be used to kill the container.
319
-type StopSignalCommand struct {
320
-	withNameAndCode
321
-	Signal string
322
-}
323
-
324
-// Expand variables
325
-func (c *StopSignalCommand) Expand(expander SingleWordExpander) error {
326
-	p, err := expander(c.Signal)
327
-	if err != nil {
328
-		return err
329
-	}
330
-	c.Signal = p
331
-	return nil
332
-}
333
-
334
-// CheckPlatform checks that the command is supported in the target platform
335
-func (c *StopSignalCommand) CheckPlatform(platform string) error {
336
-	if platform == "windows" {
337
-		return errors.New("The daemon on this platform does not support the command stopsignal")
338
-	}
339
-	return nil
340
-}
341
-
342
-// ArgCommand : ARG name[=value]
343
-//
344
-// Adds the variable foo to the trusted list of variables that can be passed
345
-// to builder using the --build-arg flag for expansion/substitution or passing to 'run'.
346
-// Dockerfile author may optionally set a default value of this variable.
347
-type ArgCommand struct {
348
-	withNameAndCode
349
-	Key   string
350
-	Value *string
351
-}
352
-
353
-// Expand variables
354
-func (c *ArgCommand) Expand(expander SingleWordExpander) error {
355
-	p, err := expander(c.Key)
356
-	if err != nil {
357
-		return err
358
-	}
359
-	c.Key = p
360
-	if c.Value != nil {
361
-		p, err = expander(*c.Value)
362
-		if err != nil {
363
-			return err
364
-		}
365
-		c.Value = &p
366
-	}
367
-	return nil
368
-}
369
-
370
-// ShellCommand : SHELL powershell -command
371
-//
372
-// Set the non-default shell to use.
373
-type ShellCommand struct {
374
-	withNameAndCode
375
-	Shell strslice.StrSlice
376
-}
377
-
378
-// Stage represents a single stage in a multi-stage build
379
-type Stage struct {
380
-	Name       string
381
-	Commands   []Command
382
-	BaseName   string
383
-	SourceCode string
384
-	Platform   specs.Platform
385
-}
386
-
387
-// AddCommand to the stage
388
-func (s *Stage) AddCommand(cmd Command) {
389
-	// todo: validate cmd type
390
-	s.Commands = append(s.Commands, cmd)
391
-}
392
-
393
-// IsCurrentStage check if the stage name is the current stage
394
-func IsCurrentStage(s []Stage, name string) bool {
395
-	if len(s) == 0 {
396
-		return false
397
-	}
398
-	return s[len(s)-1].Name == name
399
-}
400
-
401
-// CurrentStage return the last stage in a slice
402
-func CurrentStage(s []Stage) (*Stage, error) {
403
-	if len(s) == 0 {
404
-		return nil, errors.New("No build stage in current context")
405
-	}
406
-	return &s[len(s)-1], nil
407
-}
408
-
409
-// HasStage looks for the presence of a given stage name
410
-func HasStage(s []Stage, name string) (int, bool) {
411
-	for i, stage := range s {
412
-		// Stage name is case-insensitive by design
413
-		if strings.EqualFold(stage.Name, name) {
414
-			return i, true
415
-		}
416
-	}
417
-	return -1, false
418
-}
419 1
deleted file mode 100644
... ...
@@ -1,9 +0,0 @@
1
-// +build !windows
2
-
3
-package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
4
-
5
-import "fmt"
6
-
7
-func errNotJSON(command, _ string) error {
8
-	return fmt.Errorf("%s requires the arguments to be in JSON form", command)
9
-}
10 1
deleted file mode 100644
... ...
@@ -1,27 +0,0 @@
1
-package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
2
-
3
-import (
4
-	"fmt"
5
-	"path/filepath"
6
-	"regexp"
7
-	"strings"
8
-)
9
-
10
-func errNotJSON(command, original string) error {
11
-	// For Windows users, give a hint if it looks like it might contain
12
-	// a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"],
13
-	// as JSON must be escaped. Unfortunate...
14
-	//
15
-	// Specifically looking for quote-driveletter-colon-backslash, there's no
16
-	// double backslash and a [] pair. No, this is not perfect, but it doesn't
17
-	// have to be. It's simply a hint to make life a little easier.
18
-	extra := ""
19
-	original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1)))
20
-	if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 &&
21
-		!strings.Contains(original, `\\`) &&
22
-		strings.Contains(original, "[") &&
23
-		strings.Contains(original, "]") {
24
-		extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original)
25
-	}
26
-	return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra)
27
-}
28 1
deleted file mode 100644
... ...
@@ -1,637 +0,0 @@
1
-package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
2
-
3
-import (
4
-	"fmt"
5
-	"regexp"
6
-	"sort"
7
-	"strconv"
8
-	"strings"
9
-	"time"
10
-
11
-	"github.com/docker/docker/api/types/container"
12
-	"github.com/docker/docker/api/types/strslice"
13
-	"github.com/docker/docker/builder/dockerfile/command"
14
-	"github.com/docker/docker/builder/dockerfile/parser"
15
-	"github.com/docker/docker/pkg/system"
16
-	"github.com/pkg/errors"
17
-)
18
-
19
-type parseRequest struct {
20
-	command    string
21
-	args       []string
22
-	attributes map[string]bool
23
-	flags      *BFlags
24
-	original   string
25
-}
26
-
27
-func nodeArgs(node *parser.Node) []string {
28
-	result := []string{}
29
-	for ; node.Next != nil; node = node.Next {
30
-		arg := node.Next
31
-		if len(arg.Children) == 0 {
32
-			result = append(result, arg.Value)
33
-		} else if len(arg.Children) == 1 {
34
-			//sub command
35
-			result = append(result, arg.Children[0].Value)
36
-			result = append(result, nodeArgs(arg.Children[0])...)
37
-		}
38
-	}
39
-	return result
40
-}
41
-
42
-func newParseRequestFromNode(node *parser.Node) parseRequest {
43
-	return parseRequest{
44
-		command:    node.Value,
45
-		args:       nodeArgs(node),
46
-		attributes: node.Attributes,
47
-		original:   node.Original,
48
-		flags:      NewBFlagsWithArgs(node.Flags),
49
-	}
50
-}
51
-
52
-// ParseInstruction converts an AST to a typed instruction (either a command or a build stage beginning when encountering a `FROM` statement)
53
-func ParseInstruction(node *parser.Node) (interface{}, error) {
54
-	req := newParseRequestFromNode(node)
55
-	switch node.Value {
56
-	case command.Env:
57
-		return parseEnv(req)
58
-	case command.Maintainer:
59
-		return parseMaintainer(req)
60
-	case command.Label:
61
-		return parseLabel(req)
62
-	case command.Add:
63
-		return parseAdd(req)
64
-	case command.Copy:
65
-		return parseCopy(req)
66
-	case command.From:
67
-		return parseFrom(req)
68
-	case command.Onbuild:
69
-		return parseOnBuild(req)
70
-	case command.Workdir:
71
-		return parseWorkdir(req)
72
-	case command.Run:
73
-		return parseRun(req)
74
-	case command.Cmd:
75
-		return parseCmd(req)
76
-	case command.Healthcheck:
77
-		return parseHealthcheck(req)
78
-	case command.Entrypoint:
79
-		return parseEntrypoint(req)
80
-	case command.Expose:
81
-		return parseExpose(req)
82
-	case command.User:
83
-		return parseUser(req)
84
-	case command.Volume:
85
-		return parseVolume(req)
86
-	case command.StopSignal:
87
-		return parseStopSignal(req)
88
-	case command.Arg:
89
-		return parseArg(req)
90
-	case command.Shell:
91
-		return parseShell(req)
92
-	}
93
-
94
-	return nil, &UnknownInstruction{Instruction: node.Value, Line: node.StartLine}
95
-}
96
-
97
-// ParseCommand converts an AST to a typed Command
98
-func ParseCommand(node *parser.Node) (Command, error) {
99
-	s, err := ParseInstruction(node)
100
-	if err != nil {
101
-		return nil, err
102
-	}
103
-	if c, ok := s.(Command); ok {
104
-		return c, nil
105
-	}
106
-	return nil, errors.Errorf("%T is not a command type", s)
107
-}
108
-
109
-// UnknownInstruction represents an error occurring when a command is unresolvable
110
-type UnknownInstruction struct {
111
-	Line        int
112
-	Instruction string
113
-}
114
-
115
-func (e *UnknownInstruction) Error() string {
116
-	return fmt.Sprintf("unknown instruction: %s", strings.ToUpper(e.Instruction))
117
-}
118
-
119
-// IsUnknownInstruction checks if the error is an UnknownInstruction or a parseError containing an UnknownInstruction
120
-func IsUnknownInstruction(err error) bool {
121
-	_, ok := err.(*UnknownInstruction)
122
-	if !ok {
123
-		var pe *parseError
124
-		if pe, ok = err.(*parseError); ok {
125
-			_, ok = pe.inner.(*UnknownInstruction)
126
-		}
127
-	}
128
-	return ok
129
-}
130
-
131
-type parseError struct {
132
-	inner error
133
-	node  *parser.Node
134
-}
135
-
136
-func (e *parseError) Error() string {
137
-	return fmt.Sprintf("Dockerfile parse error line %d: %v", e.node.StartLine, e.inner.Error())
138
-}
139
-
140
-// Parse a docker file into a collection of buildable stages
141
-func Parse(ast *parser.Node) (stages []Stage, metaArgs []ArgCommand, err error) {
142
-	for _, n := range ast.Children {
143
-		cmd, err := ParseInstruction(n)
144
-		if err != nil {
145
-			return nil, nil, &parseError{inner: err, node: n}
146
-		}
147
-		if len(stages) == 0 {
148
-			// meta arg case
149
-			if a, isArg := cmd.(*ArgCommand); isArg {
150
-				metaArgs = append(metaArgs, *a)
151
-				continue
152
-			}
153
-		}
154
-		switch c := cmd.(type) {
155
-		case *Stage:
156
-			stages = append(stages, *c)
157
-		case Command:
158
-			stage, err := CurrentStage(stages)
159
-			if err != nil {
160
-				return nil, nil, err
161
-			}
162
-			stage.AddCommand(c)
163
-		default:
164
-			return nil, nil, errors.Errorf("%T is not a command type", cmd)
165
-		}
166
-
167
-	}
168
-	return stages, metaArgs, nil
169
-}
170
-
171
-func parseKvps(args []string, cmdName string) (KeyValuePairs, error) {
172
-	if len(args) == 0 {
173
-		return nil, errAtLeastOneArgument(cmdName)
174
-	}
175
-	if len(args)%2 != 0 {
176
-		// should never get here, but just in case
177
-		return nil, errTooManyArguments(cmdName)
178
-	}
179
-	var res KeyValuePairs
180
-	for j := 0; j < len(args); j += 2 {
181
-		if len(args[j]) == 0 {
182
-			return nil, errBlankCommandNames(cmdName)
183
-		}
184
-		name := args[j]
185
-		value := args[j+1]
186
-		res = append(res, KeyValuePair{Key: name, Value: value})
187
-	}
188
-	return res, nil
189
-}
190
-
191
-func parseEnv(req parseRequest) (*EnvCommand, error) {
192
-
193
-	if err := req.flags.Parse(); err != nil {
194
-		return nil, err
195
-	}
196
-	envs, err := parseKvps(req.args, "ENV")
197
-	if err != nil {
198
-		return nil, err
199
-	}
200
-	return &EnvCommand{
201
-		Env:             envs,
202
-		withNameAndCode: newWithNameAndCode(req),
203
-	}, nil
204
-}
205
-
206
-func parseMaintainer(req parseRequest) (*MaintainerCommand, error) {
207
-	if len(req.args) != 1 {
208
-		return nil, errExactlyOneArgument("MAINTAINER")
209
-	}
210
-
211
-	if err := req.flags.Parse(); err != nil {
212
-		return nil, err
213
-	}
214
-	return &MaintainerCommand{
215
-		Maintainer:      req.args[0],
216
-		withNameAndCode: newWithNameAndCode(req),
217
-	}, nil
218
-}
219
-
220
-func parseLabel(req parseRequest) (*LabelCommand, error) {
221
-
222
-	if err := req.flags.Parse(); err != nil {
223
-		return nil, err
224
-	}
225
-
226
-	labels, err := parseKvps(req.args, "LABEL")
227
-	if err != nil {
228
-		return nil, err
229
-	}
230
-
231
-	return &LabelCommand{
232
-		Labels:          labels,
233
-		withNameAndCode: newWithNameAndCode(req),
234
-	}, nil
235
-}
236
-
237
-func parseAdd(req parseRequest) (*AddCommand, error) {
238
-	if len(req.args) < 2 {
239
-		return nil, errNoDestinationArgument("ADD")
240
-	}
241
-	flChown := req.flags.AddString("chown", "")
242
-	if err := req.flags.Parse(); err != nil {
243
-		return nil, err
244
-	}
245
-	return &AddCommand{
246
-		SourcesAndDest:  SourcesAndDest(req.args),
247
-		withNameAndCode: newWithNameAndCode(req),
248
-		Chown:           flChown.Value,
249
-	}, nil
250
-}
251
-
252
-func parseCopy(req parseRequest) (*CopyCommand, error) {
253
-	if len(req.args) < 2 {
254
-		return nil, errNoDestinationArgument("COPY")
255
-	}
256
-	flChown := req.flags.AddString("chown", "")
257
-	flFrom := req.flags.AddString("from", "")
258
-	if err := req.flags.Parse(); err != nil {
259
-		return nil, err
260
-	}
261
-	return &CopyCommand{
262
-		SourcesAndDest:  SourcesAndDest(req.args),
263
-		From:            flFrom.Value,
264
-		withNameAndCode: newWithNameAndCode(req),
265
-		Chown:           flChown.Value,
266
-	}, nil
267
-}
268
-
269
-func parseFrom(req parseRequest) (*Stage, error) {
270
-	stageName, err := parseBuildStageName(req.args)
271
-	if err != nil {
272
-		return nil, err
273
-	}
274
-
275
-	flPlatform := req.flags.AddString("platform", "")
276
-	if err := req.flags.Parse(); err != nil {
277
-		return nil, err
278
-	}
279
-	code := strings.TrimSpace(req.original)
280
-	return &Stage{
281
-		BaseName:   req.args[0],
282
-		Name:       stageName,
283
-		SourceCode: code,
284
-		Commands:   []Command{},
285
-		Platform:   *system.ParsePlatform(flPlatform.Value),
286
-	}, nil
287
-
288
-}
289
-
290
-func parseBuildStageName(args []string) (string, error) {
291
-	stageName := ""
292
-	switch {
293
-	case len(args) == 3 && strings.EqualFold(args[1], "as"):
294
-		stageName = strings.ToLower(args[2])
295
-		if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok {
296
-			return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", stageName)
297
-		}
298
-	case len(args) != 1:
299
-		return "", errors.New("FROM requires either one or three arguments")
300
-	}
301
-
302
-	return stageName, nil
303
-}
304
-
305
-func parseOnBuild(req parseRequest) (*OnbuildCommand, error) {
306
-	if len(req.args) == 0 {
307
-		return nil, errAtLeastOneArgument("ONBUILD")
308
-	}
309
-	if err := req.flags.Parse(); err != nil {
310
-		return nil, err
311
-	}
312
-
313
-	triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0]))
314
-	switch strings.ToUpper(triggerInstruction) {
315
-	case "ONBUILD":
316
-		return nil, errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
317
-	case "MAINTAINER", "FROM":
318
-		return nil, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
319
-	}
320
-
321
-	original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "")
322
-	return &OnbuildCommand{
323
-		Expression:      original,
324
-		withNameAndCode: newWithNameAndCode(req),
325
-	}, nil
326
-
327
-}
328
-
329
-func parseWorkdir(req parseRequest) (*WorkdirCommand, error) {
330
-	if len(req.args) != 1 {
331
-		return nil, errExactlyOneArgument("WORKDIR")
332
-	}
333
-
334
-	err := req.flags.Parse()
335
-	if err != nil {
336
-		return nil, err
337
-	}
338
-	return &WorkdirCommand{
339
-		Path:            req.args[0],
340
-		withNameAndCode: newWithNameAndCode(req),
341
-	}, nil
342
-
343
-}
344
-
345
-func parseShellDependentCommand(req parseRequest, emptyAsNil bool) ShellDependantCmdLine {
346
-	args := handleJSONArgs(req.args, req.attributes)
347
-	cmd := strslice.StrSlice(args)
348
-	if emptyAsNil && len(cmd) == 0 {
349
-		cmd = nil
350
-	}
351
-	return ShellDependantCmdLine{
352
-		CmdLine:      cmd,
353
-		PrependShell: !req.attributes["json"],
354
-	}
355
-}
356
-
357
-func parseRun(req parseRequest) (*RunCommand, error) {
358
-
359
-	if err := req.flags.Parse(); err != nil {
360
-		return nil, err
361
-	}
362
-	return &RunCommand{
363
-		ShellDependantCmdLine: parseShellDependentCommand(req, false),
364
-		withNameAndCode:       newWithNameAndCode(req),
365
-	}, nil
366
-
367
-}
368
-
369
-func parseCmd(req parseRequest) (*CmdCommand, error) {
370
-	if err := req.flags.Parse(); err != nil {
371
-		return nil, err
372
-	}
373
-	return &CmdCommand{
374
-		ShellDependantCmdLine: parseShellDependentCommand(req, false),
375
-		withNameAndCode:       newWithNameAndCode(req),
376
-	}, nil
377
-
378
-}
379
-
380
-func parseEntrypoint(req parseRequest) (*EntrypointCommand, error) {
381
-	if err := req.flags.Parse(); err != nil {
382
-		return nil, err
383
-	}
384
-
385
-	cmd := &EntrypointCommand{
386
-		ShellDependantCmdLine: parseShellDependentCommand(req, true),
387
-		withNameAndCode:       newWithNameAndCode(req),
388
-	}
389
-
390
-	return cmd, nil
391
-}
392
-
393
-// parseOptInterval(flag) is the duration of flag.Value, or 0 if
394
-// empty. An error is reported if the value is given and less than minimum duration.
395
-func parseOptInterval(f *Flag) (time.Duration, error) {
396
-	s := f.Value
397
-	if s == "" {
398
-		return 0, nil
399
-	}
400
-	d, err := time.ParseDuration(s)
401
-	if err != nil {
402
-		return 0, err
403
-	}
404
-	if d < container.MinimumDuration {
405
-		return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration)
406
-	}
407
-	return d, nil
408
-}
409
-func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) {
410
-	if len(req.args) == 0 {
411
-		return nil, errAtLeastOneArgument("HEALTHCHECK")
412
-	}
413
-	cmd := &HealthCheckCommand{
414
-		withNameAndCode: newWithNameAndCode(req),
415
-	}
416
-
417
-	typ := strings.ToUpper(req.args[0])
418
-	args := req.args[1:]
419
-	if typ == "NONE" {
420
-		if len(args) != 0 {
421
-			return nil, errors.New("HEALTHCHECK NONE takes no arguments")
422
-		}
423
-		test := strslice.StrSlice{typ}
424
-		cmd.Health = &container.HealthConfig{
425
-			Test: test,
426
-		}
427
-	} else {
428
-
429
-		healthcheck := container.HealthConfig{}
430
-
431
-		flInterval := req.flags.AddString("interval", "")
432
-		flTimeout := req.flags.AddString("timeout", "")
433
-		flStartPeriod := req.flags.AddString("start-period", "")
434
-		flRetries := req.flags.AddString("retries", "")
435
-
436
-		if err := req.flags.Parse(); err != nil {
437
-			return nil, err
438
-		}
439
-
440
-		switch typ {
441
-		case "CMD":
442
-			cmdSlice := handleJSONArgs(args, req.attributes)
443
-			if len(cmdSlice) == 0 {
444
-				return nil, errors.New("Missing command after HEALTHCHECK CMD")
445
-			}
446
-
447
-			if !req.attributes["json"] {
448
-				typ = "CMD-SHELL"
449
-			}
450
-
451
-			healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...))
452
-		default:
453
-			return nil, fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ)
454
-		}
455
-
456
-		interval, err := parseOptInterval(flInterval)
457
-		if err != nil {
458
-			return nil, err
459
-		}
460
-		healthcheck.Interval = interval
461
-
462
-		timeout, err := parseOptInterval(flTimeout)
463
-		if err != nil {
464
-			return nil, err
465
-		}
466
-		healthcheck.Timeout = timeout
467
-
468
-		startPeriod, err := parseOptInterval(flStartPeriod)
469
-		if err != nil {
470
-			return nil, err
471
-		}
472
-		healthcheck.StartPeriod = startPeriod
473
-
474
-		if flRetries.Value != "" {
475
-			retries, err := strconv.ParseInt(flRetries.Value, 10, 32)
476
-			if err != nil {
477
-				return nil, err
478
-			}
479
-			if retries < 1 {
480
-				return nil, fmt.Errorf("--retries must be at least 1 (not %d)", retries)
481
-			}
482
-			healthcheck.Retries = int(retries)
483
-		} else {
484
-			healthcheck.Retries = 0
485
-		}
486
-
487
-		cmd.Health = &healthcheck
488
-	}
489
-	return cmd, nil
490
-}
491
-
492
-func parseExpose(req parseRequest) (*ExposeCommand, error) {
493
-	portsTab := req.args
494
-
495
-	if len(req.args) == 0 {
496
-		return nil, errAtLeastOneArgument("EXPOSE")
497
-	}
498
-
499
-	if err := req.flags.Parse(); err != nil {
500
-		return nil, err
501
-	}
502
-
503
-	sort.Strings(portsTab)
504
-	return &ExposeCommand{
505
-		Ports:           portsTab,
506
-		withNameAndCode: newWithNameAndCode(req),
507
-	}, nil
508
-}
509
-
510
-func parseUser(req parseRequest) (*UserCommand, error) {
511
-	if len(req.args) != 1 {
512
-		return nil, errExactlyOneArgument("USER")
513
-	}
514
-
515
-	if err := req.flags.Parse(); err != nil {
516
-		return nil, err
517
-	}
518
-	return &UserCommand{
519
-		User:            req.args[0],
520
-		withNameAndCode: newWithNameAndCode(req),
521
-	}, nil
522
-}
523
-
524
-func parseVolume(req parseRequest) (*VolumeCommand, error) {
525
-	if len(req.args) == 0 {
526
-		return nil, errAtLeastOneArgument("VOLUME")
527
-	}
528
-
529
-	if err := req.flags.Parse(); err != nil {
530
-		return nil, err
531
-	}
532
-
533
-	cmd := &VolumeCommand{
534
-		withNameAndCode: newWithNameAndCode(req),
535
-	}
536
-
537
-	for _, v := range req.args {
538
-		v = strings.TrimSpace(v)
539
-		if v == "" {
540
-			return nil, errors.New("VOLUME specified can not be an empty string")
541
-		}
542
-		cmd.Volumes = append(cmd.Volumes, v)
543
-	}
544
-	return cmd, nil
545
-
546
-}
547
-
548
-func parseStopSignal(req parseRequest) (*StopSignalCommand, error) {
549
-	if len(req.args) != 1 {
550
-		return nil, errExactlyOneArgument("STOPSIGNAL")
551
-	}
552
-	sig := req.args[0]
553
-
554
-	cmd := &StopSignalCommand{
555
-		Signal:          sig,
556
-		withNameAndCode: newWithNameAndCode(req),
557
-	}
558
-	return cmd, nil
559
-
560
-}
561
-
562
-func parseArg(req parseRequest) (*ArgCommand, error) {
563
-	if len(req.args) != 1 {
564
-		return nil, errExactlyOneArgument("ARG")
565
-	}
566
-
567
-	var (
568
-		name     string
569
-		newValue *string
570
-	)
571
-
572
-	arg := req.args[0]
573
-	// 'arg' can just be a name or name-value pair. Note that this is different
574
-	// from 'env' that handles the split of name and value at the parser level.
575
-	// The reason for doing it differently for 'arg' is that we support just
576
-	// defining an arg and not assign it a value (while 'env' always expects a
577
-	// name-value pair). If possible, it will be good to harmonize the two.
578
-	if strings.Contains(arg, "=") {
579
-		parts := strings.SplitN(arg, "=", 2)
580
-		if len(parts[0]) == 0 {
581
-			return nil, errBlankCommandNames("ARG")
582
-		}
583
-
584
-		name = parts[0]
585
-		newValue = &parts[1]
586
-	} else {
587
-		name = arg
588
-	}
589
-
590
-	return &ArgCommand{
591
-		Key:             name,
592
-		Value:           newValue,
593
-		withNameAndCode: newWithNameAndCode(req),
594
-	}, nil
595
-}
596
-
597
-func parseShell(req parseRequest) (*ShellCommand, error) {
598
-	if err := req.flags.Parse(); err != nil {
599
-		return nil, err
600
-	}
601
-	shellSlice := handleJSONArgs(req.args, req.attributes)
602
-	switch {
603
-	case len(shellSlice) == 0:
604
-		// SHELL []
605
-		return nil, errAtLeastOneArgument("SHELL")
606
-	case req.attributes["json"]:
607
-		// SHELL ["powershell", "-command"]
608
-
609
-		return &ShellCommand{
610
-			Shell:           strslice.StrSlice(shellSlice),
611
-			withNameAndCode: newWithNameAndCode(req),
612
-		}, nil
613
-	default:
614
-		// SHELL powershell -command - not JSON
615
-		return nil, errNotJSON("SHELL", req.original)
616
-	}
617
-}
618
-
619
-func errAtLeastOneArgument(command string) error {
620
-	return errors.Errorf("%s requires at least one argument", command)
621
-}
622
-
623
-func errExactlyOneArgument(command string) error {
624
-	return errors.Errorf("%s requires exactly one argument", command)
625
-}
626
-
627
-func errNoDestinationArgument(command string) error {
628
-	return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined.", command)
629
-}
630
-
631
-func errBlankCommandNames(command string) error {
632
-	return errors.Errorf("%s names can not be blank", command)
633
-}
634
-
635
-func errTooManyArguments(command string) error {
636
-	return errors.Errorf("Bad input to %s, too many arguments", command)
637
-}
638 1
deleted file mode 100644
... ...
@@ -1,198 +0,0 @@
1
-package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
2
-
3
-import (
4
-	"strings"
5
-	"testing"
6
-
7
-	"github.com/docker/docker/builder/dockerfile/command"
8
-	"github.com/docker/docker/builder/dockerfile/parser"
9
-	"github.com/gotestyourself/gotestyourself/assert"
10
-	is "github.com/gotestyourself/gotestyourself/assert/cmp"
11
-)
12
-
13
-func TestCommandsExactlyOneArgument(t *testing.T) {
14
-	commands := []string{
15
-		"MAINTAINER",
16
-		"WORKDIR",
17
-		"USER",
18
-		"STOPSIGNAL",
19
-	}
20
-
21
-	for _, cmd := range commands {
22
-		ast, err := parser.Parse(strings.NewReader(cmd))
23
-		assert.NilError(t, err)
24
-		_, err = ParseInstruction(ast.AST.Children[0])
25
-		assert.Check(t, is.Error(err, errExactlyOneArgument(cmd).Error()))
26
-	}
27
-}
28
-
29
-func TestCommandsAtLeastOneArgument(t *testing.T) {
30
-	commands := []string{
31
-		"ENV",
32
-		"LABEL",
33
-		"ONBUILD",
34
-		"HEALTHCHECK",
35
-		"EXPOSE",
36
-		"VOLUME",
37
-	}
38
-
39
-	for _, cmd := range commands {
40
-		ast, err := parser.Parse(strings.NewReader(cmd))
41
-		assert.NilError(t, err)
42
-		_, err = ParseInstruction(ast.AST.Children[0])
43
-		assert.Check(t, is.Error(err, errAtLeastOneArgument(cmd).Error()))
44
-	}
45
-}
46
-
47
-func TestCommandsNoDestinationArgument(t *testing.T) {
48
-	commands := []string{
49
-		"ADD",
50
-		"COPY",
51
-	}
52
-
53
-	for _, cmd := range commands {
54
-		ast, err := parser.Parse(strings.NewReader(cmd + " arg1"))
55
-		assert.NilError(t, err)
56
-		_, err = ParseInstruction(ast.AST.Children[0])
57
-		assert.Check(t, is.Error(err, errNoDestinationArgument(cmd).Error()))
58
-	}
59
-}
60
-
61
-func TestCommandsTooManyArguments(t *testing.T) {
62
-	commands := []string{
63
-		"ENV",
64
-		"LABEL",
65
-	}
66
-
67
-	for _, command := range commands {
68
-		node := &parser.Node{
69
-			Original: command + "arg1 arg2 arg3",
70
-			Value:    strings.ToLower(command),
71
-			Next: &parser.Node{
72
-				Value: "arg1",
73
-				Next: &parser.Node{
74
-					Value: "arg2",
75
-					Next: &parser.Node{
76
-						Value: "arg3",
77
-					},
78
-				},
79
-			},
80
-		}
81
-		_, err := ParseInstruction(node)
82
-		assert.Check(t, is.Error(err, errTooManyArguments(command).Error()))
83
-	}
84
-}
85
-
86
-func TestCommandsBlankNames(t *testing.T) {
87
-	commands := []string{
88
-		"ENV",
89
-		"LABEL",
90
-	}
91
-
92
-	for _, cmd := range commands {
93
-		node := &parser.Node{
94
-			Original: cmd + " =arg2",
95
-			Value:    strings.ToLower(cmd),
96
-			Next: &parser.Node{
97
-				Value: "",
98
-				Next: &parser.Node{
99
-					Value: "arg2",
100
-				},
101
-			},
102
-		}
103
-		_, err := ParseInstruction(node)
104
-		assert.Check(t, is.Error(err, errBlankCommandNames(cmd).Error()))
105
-	}
106
-}
107
-
108
-func TestHealthCheckCmd(t *testing.T) {
109
-	node := &parser.Node{
110
-		Value: command.Healthcheck,
111
-		Next: &parser.Node{
112
-			Value: "CMD",
113
-			Next: &parser.Node{
114
-				Value: "hello",
115
-				Next: &parser.Node{
116
-					Value: "world",
117
-				},
118
-			},
119
-		},
120
-	}
121
-	cmd, err := ParseInstruction(node)
122
-	assert.Check(t, err)
123
-	hc, ok := cmd.(*HealthCheckCommand)
124
-	assert.Check(t, ok)
125
-	expected := []string{"CMD-SHELL", "hello world"}
126
-	assert.Check(t, is.DeepEqual(expected, hc.Health.Test))
127
-}
128
-
129
-func TestParseOptInterval(t *testing.T) {
130
-	flInterval := &Flag{
131
-		name:     "interval",
132
-		flagType: stringType,
133
-		Value:    "50ns",
134
-	}
135
-	_, err := parseOptInterval(flInterval)
136
-	assert.Check(t, is.ErrorContains(err, "cannot be less than 1ms"))
137
-
138
-	flInterval.Value = "1ms"
139
-	_, err = parseOptInterval(flInterval)
140
-	assert.NilError(t, err)
141
-}
142
-
143
-func TestErrorCases(t *testing.T) {
144
-	cases := []struct {
145
-		name          string
146
-		dockerfile    string
147
-		expectedError string
148
-	}{
149
-		{
150
-			name: "copyEmptyWhitespace",
151
-			dockerfile: `COPY	
152
-		quux \
153
-      bar`,
154
-			expectedError: "COPY requires at least two arguments",
155
-		},
156
-		{
157
-			name:          "ONBUILD forbidden FROM",
158
-			dockerfile:    "ONBUILD FROM scratch",
159
-			expectedError: "FROM isn't allowed as an ONBUILD trigger",
160
-		},
161
-		{
162
-			name:          "ONBUILD forbidden MAINTAINER",
163
-			dockerfile:    "ONBUILD MAINTAINER docker.io",
164
-			expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger",
165
-		},
166
-		{
167
-			name:          "ARG two arguments",
168
-			dockerfile:    "ARG foo bar",
169
-			expectedError: "ARG requires exactly one argument",
170
-		},
171
-		{
172
-			name:          "MAINTAINER unknown flag",
173
-			dockerfile:    "MAINTAINER --boo joe@example.com",
174
-			expectedError: "Unknown flag: boo",
175
-		},
176
-		{
177
-			name:          "Chaining ONBUILD",
178
-			dockerfile:    `ONBUILD ONBUILD RUN touch foobar`,
179
-			expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed",
180
-		},
181
-		{
182
-			name:          "Invalid instruction",
183
-			dockerfile:    `foo bar`,
184
-			expectedError: "unknown instruction: FOO",
185
-		},
186
-	}
187
-	for _, c := range cases {
188
-		r := strings.NewReader(c.dockerfile)
189
-		ast, err := parser.Parse(r)
190
-
191
-		if err != nil {
192
-			t.Fatalf("Error when parsing Dockerfile: %s", err)
193
-		}
194
-		n := ast.AST.Children[0]
195
-		_, err = ParseInstruction(n)
196
-		assert.Check(t, is.ErrorContains(err, c.expectedError))
197
-	}
198
-}
199 1
deleted file mode 100644
... ...
@@ -1,19 +0,0 @@
1
-package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
2
-
3
-import "strings"
4
-
5
-// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile
6
-// for exec form it returns untouched args slice
7
-// for shell form it returns concatenated args as the first element of a slice
8
-func handleJSONArgs(args []string, attributes map[string]bool) []string {
9
-	if len(args) == 0 {
10
-		return []string{}
11
-	}
12
-
13
-	if attributes != nil && attributes["json"] {
14
-		return args
15
-	}
16
-
17
-	// literal string command, not an exec array
18
-	return []string{strings.Join(args, " ")}
19
-}
20 1
deleted file mode 100644
... ...
@@ -1,65 +0,0 @@
1
-package instructions // import "github.com/docker/docker/builder/dockerfile/instructions"
2
-
3
-import "testing"
4
-
5
-type testCase struct {
6
-	name       string
7
-	args       []string
8
-	attributes map[string]bool
9
-	expected   []string
10
-}
11
-
12
-func initTestCases() []testCase {
13
-	var testCases []testCase
14
-
15
-	testCases = append(testCases, testCase{
16
-		name:       "empty args",
17
-		args:       []string{},
18
-		attributes: make(map[string]bool),
19
-		expected:   []string{},
20
-	})
21
-
22
-	jsonAttributes := make(map[string]bool)
23
-	jsonAttributes["json"] = true
24
-
25
-	testCases = append(testCases, testCase{
26
-		name:       "json attribute with one element",
27
-		args:       []string{"foo"},
28
-		attributes: jsonAttributes,
29
-		expected:   []string{"foo"},
30
-	})
31
-
32
-	testCases = append(testCases, testCase{
33
-		name:       "json attribute with two elements",
34
-		args:       []string{"foo", "bar"},
35
-		attributes: jsonAttributes,
36
-		expected:   []string{"foo", "bar"},
37
-	})
38
-
39
-	testCases = append(testCases, testCase{
40
-		name:       "no attributes",
41
-		args:       []string{"foo", "bar"},
42
-		attributes: nil,
43
-		expected:   []string{"foo bar"},
44
-	})
45
-
46
-	return testCases
47
-}
48
-
49
-func TestHandleJSONArgs(t *testing.T) {
50
-	testCases := initTestCases()
51
-
52
-	for _, test := range testCases {
53
-		arguments := handleJSONArgs(test.args, test.attributes)
54
-
55
-		if len(arguments) != len(test.expected) {
56
-			t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments))
57
-		}
58
-
59
-		for i := range test.expected {
60
-			if arguments[i] != test.expected[i] {
61
-				t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i])
62
-			}
63
-		}
64
-	}
65
-}
66 1
deleted file mode 100644
... ...
@@ -1,32 +0,0 @@
1
-package main
2
-
3
-import (
4
-	"fmt"
5
-	"os"
6
-
7
-	"github.com/docker/docker/builder/dockerfile/parser"
8
-)
9
-
10
-func main() {
11
-	var f *os.File
12
-	var err error
13
-
14
-	if len(os.Args) < 2 {
15
-		fmt.Println("please supply filename(s)")
16
-		os.Exit(1)
17
-	}
18
-
19
-	for _, fn := range os.Args[1:] {
20
-		f, err = os.Open(fn)
21
-		if err != nil {
22
-			panic(err)
23
-		}
24
-		defer f.Close()
25
-
26
-		result, err := parser.Parse(f)
27
-		if err != nil {
28
-			panic(err)
29
-		}
30
-		fmt.Println(result.AST.Dump())
31
-	}
32
-}
33 1
deleted file mode 100644
... ...
@@ -1,59 +0,0 @@
1
-package parser // import "github.com/docker/docker/builder/dockerfile/parser"
2
-
3
-import (
4
-	"testing"
5
-)
6
-
7
-var invalidJSONArraysOfStrings = []string{
8
-	`["a",42,"b"]`,
9
-	`["a",123.456,"b"]`,
10
-	`["a",{},"b"]`,
11
-	`["a",{"c": "d"},"b"]`,
12
-	`["a",["c"],"b"]`,
13
-	`["a",true,"b"]`,
14
-	`["a",false,"b"]`,
15
-	`["a",null,"b"]`,
16
-}
17
-
18
-var validJSONArraysOfStrings = map[string][]string{
19
-	`[]`:           {},
20
-	`[""]`:         {""},
21
-	`["a"]`:        {"a"},
22
-	`["a","b"]`:    {"a", "b"},
23
-	`[ "a", "b" ]`: {"a", "b"},
24
-	`[	"a",	"b"	]`: {"a", "b"},
25
-	`	[	"a",	"b"	]	`: {"a", "b"},
26
-	`["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"},
27
-}
28
-
29
-func TestJSONArraysOfStrings(t *testing.T) {
30
-	for json, expected := range validJSONArraysOfStrings {
31
-		d := NewDefaultDirective()
32
-
33
-		if node, _, err := parseJSON(json, d); err != nil {
34
-			t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err)
35
-		} else {
36
-			i := 0
37
-			for node != nil {
38
-				if i >= len(expected) {
39
-					t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json)
40
-				}
41
-				if node.Value != expected[i] {
42
-					t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i)
43
-				}
44
-				node = node.Next
45
-				i++
46
-			}
47
-			if i != len(expected) {
48
-				t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json)
49
-			}
50
-		}
51
-	}
52
-	for _, json := range invalidJSONArraysOfStrings {
53
-		d := NewDefaultDirective()
54
-
55
-		if _, _, err := parseJSON(json, d); err != errDockerfileNotStringArray {
56
-			t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json)
57
-		}
58
-	}
59
-}
60 1
deleted file mode 100644
... ...
@@ -1,368 +0,0 @@
1
-package parser // import "github.com/docker/docker/builder/dockerfile/parser"
2
-
3
-// line parsers are dispatch calls that parse a single unit of text into a
4
-// Node object which contains the whole statement. Dockerfiles have varied
5
-// (but not usually unique, see ONBUILD for a unique example) parsing rules
6
-// per-command, and these unify the processing in a way that makes it
7
-// manageable.
8
-
9
-import (
10
-	"encoding/json"
11
-	"errors"
12
-	"fmt"
13
-	"strings"
14
-	"unicode"
15
-	"unicode/utf8"
16
-)
17
-
18
-var (
19
-	errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only")
20
-)
21
-
22
-const (
23
-	commandLabel = "LABEL"
24
-)
25
-
26
-// ignore the current argument. This will still leave a command parsed, but
27
-// will not incorporate the arguments into the ast.
28
-func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) {
29
-	return &Node{}, nil, nil
30
-}
31
-
32
-// used for onbuild. Could potentially be used for anything that represents a
33
-// statement with sub-statements.
34
-//
35
-// ONBUILD RUN foo bar -> (onbuild (run foo bar))
36
-//
37
-func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) {
38
-	if rest == "" {
39
-		return nil, nil, nil
40
-	}
41
-
42
-	child, err := newNodeFromLine(rest, d)
43
-	if err != nil {
44
-		return nil, nil, err
45
-	}
46
-
47
-	return &Node{Children: []*Node{child}}, nil, nil
48
-}
49
-
50
-// helper to parse words (i.e space delimited or quoted strings) in a statement.
51
-// The quotes are preserved as part of this function and they are stripped later
52
-// as part of processWords().
53
-func parseWords(rest string, d *Directive) []string {
54
-	const (
55
-		inSpaces = iota // looking for start of a word
56
-		inWord
57
-		inQuote
58
-	)
59
-
60
-	words := []string{}
61
-	phase := inSpaces
62
-	word := ""
63
-	quote := '\000'
64
-	blankOK := false
65
-	var ch rune
66
-	var chWidth int
67
-
68
-	for pos := 0; pos <= len(rest); pos += chWidth {
69
-		if pos != len(rest) {
70
-			ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
71
-		}
72
-
73
-		if phase == inSpaces { // Looking for start of word
74
-			if pos == len(rest) { // end of input
75
-				break
76
-			}
77
-			if unicode.IsSpace(ch) { // skip spaces
78
-				continue
79
-			}
80
-			phase = inWord // found it, fall through
81
-		}
82
-		if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
83
-			if blankOK || len(word) > 0 {
84
-				words = append(words, word)
85
-			}
86
-			break
87
-		}
88
-		if phase == inWord {
89
-			if unicode.IsSpace(ch) {
90
-				phase = inSpaces
91
-				if blankOK || len(word) > 0 {
92
-					words = append(words, word)
93
-				}
94
-				word = ""
95
-				blankOK = false
96
-				continue
97
-			}
98
-			if ch == '\'' || ch == '"' {
99
-				quote = ch
100
-				blankOK = true
101
-				phase = inQuote
102
-			}
103
-			if ch == d.escapeToken {
104
-				if pos+chWidth == len(rest) {
105
-					continue // just skip an escape token at end of line
106
-				}
107
-				// If we're not quoted and we see an escape token, then always just
108
-				// add the escape token plus the char to the word, even if the char
109
-				// is a quote.
110
-				word += string(ch)
111
-				pos += chWidth
112
-				ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
113
-			}
114
-			word += string(ch)
115
-			continue
116
-		}
117
-		if phase == inQuote {
118
-			if ch == quote {
119
-				phase = inWord
120
-			}
121
-			// The escape token is special except for ' quotes - can't escape anything for '
122
-			if ch == d.escapeToken && quote != '\'' {
123
-				if pos+chWidth == len(rest) {
124
-					phase = inWord
125
-					continue // just skip the escape token at end
126
-				}
127
-				pos += chWidth
128
-				word += string(ch)
129
-				ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
130
-			}
131
-			word += string(ch)
132
-		}
133
-	}
134
-
135
-	return words
136
-}
137
-
138
-// parse environment like statements. Note that this does *not* handle
139
-// variable interpolation, which will be handled in the evaluator.
140
-func parseNameVal(rest string, key string, d *Directive) (*Node, error) {
141
-	// This is kind of tricky because we need to support the old
142
-	// variant:   KEY name value
143
-	// as well as the new one:    KEY name=value ...
144
-	// The trigger to know which one is being used will be whether we hit
145
-	// a space or = first.  space ==> old, "=" ==> new
146
-
147
-	words := parseWords(rest, d)
148
-	if len(words) == 0 {
149
-		return nil, nil
150
-	}
151
-
152
-	// Old format (KEY name value)
153
-	if !strings.Contains(words[0], "=") {
154
-		parts := tokenWhitespace.Split(rest, 2)
155
-		if len(parts) < 2 {
156
-			return nil, fmt.Errorf(key + " must have two arguments")
157
-		}
158
-		return newKeyValueNode(parts[0], parts[1]), nil
159
-	}
160
-
161
-	var rootNode *Node
162
-	var prevNode *Node
163
-	for _, word := range words {
164
-		if !strings.Contains(word, "=") {
165
-			return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
166
-		}
167
-
168
-		parts := strings.SplitN(word, "=", 2)
169
-		node := newKeyValueNode(parts[0], parts[1])
170
-		rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
171
-	}
172
-
173
-	return rootNode, nil
174
-}
175
-
176
-func newKeyValueNode(key, value string) *Node {
177
-	return &Node{
178
-		Value: key,
179
-		Next:  &Node{Value: value},
180
-	}
181
-}
182
-
183
-func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) {
184
-	if rootNode == nil {
185
-		rootNode = node
186
-	}
187
-	if prevNode != nil {
188
-		prevNode.Next = node
189
-	}
190
-
191
-	prevNode = node.Next
192
-	return rootNode, prevNode
193
-}
194
-
195
-func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) {
196
-	node, err := parseNameVal(rest, "ENV", d)
197
-	return node, nil, err
198
-}
199
-
200
-func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) {
201
-	node, err := parseNameVal(rest, commandLabel, d)
202
-	return node, nil, err
203
-}
204
-
205
-// parses a statement containing one or more keyword definition(s) and/or
206
-// value assignments, like `name1 name2= name3="" name4=value`.
207
-// Note that this is a stricter format than the old format of assignment,
208
-// allowed by parseNameVal(), in a way that this only allows assignment of the
209
-// form `keyword=[<value>]` like  `name2=`, `name3=""`, and `name4=value` above.
210
-// In addition, a keyword definition alone is of the form `keyword` like `name1`
211
-// above. And the assignments `name2=` and `name3=""` are equivalent and
212
-// assign an empty value to the respective keywords.
213
-func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) {
214
-	words := parseWords(rest, d)
215
-	if len(words) == 0 {
216
-		return nil, nil, nil
217
-	}
218
-
219
-	var (
220
-		rootnode *Node
221
-		prevNode *Node
222
-	)
223
-	for i, word := range words {
224
-		node := &Node{}
225
-		node.Value = word
226
-		if i == 0 {
227
-			rootnode = node
228
-		} else {
229
-			prevNode.Next = node
230
-		}
231
-		prevNode = node
232
-	}
233
-
234
-	return rootnode, nil, nil
235
-}
236
-
237
-// parses a whitespace-delimited set of arguments. The result is effectively a
238
-// linked list of string arguments.
239
-func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) {
240
-	if rest == "" {
241
-		return nil, nil, nil
242
-	}
243
-
244
-	node := &Node{}
245
-	rootnode := node
246
-	prevnode := node
247
-	for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp
248
-		prevnode = node
249
-		node.Value = str
250
-		node.Next = &Node{}
251
-		node = node.Next
252
-	}
253
-
254
-	// XXX to get around regexp.Split *always* providing an empty string at the
255
-	// end due to how our loop is constructed, nil out the last node in the
256
-	// chain.
257
-	prevnode.Next = nil
258
-
259
-	return rootnode, nil, nil
260
-}
261
-
262
-// parseString just wraps the string in quotes and returns a working node.
263
-func parseString(rest string, d *Directive) (*Node, map[string]bool, error) {
264
-	if rest == "" {
265
-		return nil, nil, nil
266
-	}
267
-	n := &Node{}
268
-	n.Value = rest
269
-	return n, nil, nil
270
-}
271
-
272
-// parseJSON converts JSON arrays to an AST.
273
-func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
274
-	rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
275
-	if !strings.HasPrefix(rest, "[") {
276
-		return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest)
277
-	}
278
-
279
-	var myJSON []interface{}
280
-	if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
281
-		return nil, nil, err
282
-	}
283
-
284
-	var top, prev *Node
285
-	for _, str := range myJSON {
286
-		s, ok := str.(string)
287
-		if !ok {
288
-			return nil, nil, errDockerfileNotStringArray
289
-		}
290
-
291
-		node := &Node{Value: s}
292
-		if prev == nil {
293
-			top = node
294
-		} else {
295
-			prev.Next = node
296
-		}
297
-		prev = node
298
-	}
299
-
300
-	return top, map[string]bool{"json": true}, nil
301
-}
302
-
303
-// parseMaybeJSON determines if the argument appears to be a JSON array. If
304
-// so, passes to parseJSON; if not, quotes the result and returns a single
305
-// node.
306
-func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
307
-	if rest == "" {
308
-		return nil, nil, nil
309
-	}
310
-
311
-	node, attrs, err := parseJSON(rest, d)
312
-
313
-	if err == nil {
314
-		return node, attrs, nil
315
-	}
316
-	if err == errDockerfileNotStringArray {
317
-		return nil, nil, err
318
-	}
319
-
320
-	node = &Node{}
321
-	node.Value = rest
322
-	return node, nil, nil
323
-}
324
-
325
-// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
326
-// so, passes to parseJSON; if not, attempts to parse it as a whitespace
327
-// delimited string.
328
-func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) {
329
-	node, attrs, err := parseJSON(rest, d)
330
-
331
-	if err == nil {
332
-		return node, attrs, nil
333
-	}
334
-	if err == errDockerfileNotStringArray {
335
-		return nil, nil, err
336
-	}
337
-
338
-	return parseStringsWhitespaceDelimited(rest, d)
339
-}
340
-
341
-// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument.
342
-func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) {
343
-	// Find end of first argument
344
-	var sep int
345
-	for ; sep < len(rest); sep++ {
346
-		if unicode.IsSpace(rune(rest[sep])) {
347
-			break
348
-		}
349
-	}
350
-	next := sep
351
-	for ; next < len(rest); next++ {
352
-		if !unicode.IsSpace(rune(rest[next])) {
353
-			break
354
-		}
355
-	}
356
-
357
-	if sep == 0 {
358
-		return nil, nil, nil
359
-	}
360
-
361
-	typ := rest[:sep]
362
-	cmd, attrs, err := parseMaybeJSON(rest[next:], d)
363
-	if err != nil {
364
-		return nil, nil, err
365
-	}
366
-
367
-	return &Node{Value: typ, Next: cmd}, attrs, err
368
-}
369 1
deleted file mode 100644
... ...
@@ -1,51 +0,0 @@
1
-package parser // import "github.com/docker/docker/builder/dockerfile/parser"
2
-
3
-import (
4
-	"testing"
5
-
6
-	"github.com/google/go-cmp/cmp"
7
-	"github.com/gotestyourself/gotestyourself/assert"
8
-	is "github.com/gotestyourself/gotestyourself/assert/cmp"
9
-)
10
-
11
-func TestParseNameValOldFormat(t *testing.T) {
12
-	directive := Directive{}
13
-	node, err := parseNameVal("foo bar", "LABEL", &directive)
14
-	assert.Check(t, err)
15
-
16
-	expected := &Node{
17
-		Value: "foo",
18
-		Next:  &Node{Value: "bar"},
19
-	}
20
-	assert.DeepEqual(t, expected, node, cmpNodeOpt)
21
-}
22
-
23
-var cmpNodeOpt = cmp.AllowUnexported(Node{})
24
-
25
-func TestParseNameValNewFormat(t *testing.T) {
26
-	directive := Directive{}
27
-	node, err := parseNameVal("foo=bar thing=star", "LABEL", &directive)
28
-	assert.Check(t, err)
29
-
30
-	expected := &Node{
31
-		Value: "foo",
32
-		Next: &Node{
33
-			Value: "bar",
34
-			Next: &Node{
35
-				Value: "thing",
36
-				Next: &Node{
37
-					Value: "star",
38
-				},
39
-			},
40
-		},
41
-	}
42
-	assert.DeepEqual(t, expected, node, cmpNodeOpt)
43
-}
44
-
45
-func TestParseNameValWithoutVal(t *testing.T) {
46
-	directive := Directive{}
47
-	// In Config.Env, a variable without `=` is removed from the environment. (#31634)
48
-	// However, in Dockerfile, we don't allow "unsetting" an environment variable. (#11922)
49
-	_, err := parseNameVal("foo", "ENV", &directive)
50
-	assert.Check(t, is.ErrorContains(err, ""), "ENV must have two arguments")
51
-}
52 1
deleted file mode 100644
... ...
@@ -1,327 +0,0 @@
1
-// Package parser implements a parser and parse tree dumper for Dockerfiles.
2
-package parser // import "github.com/docker/docker/builder/dockerfile/parser"
3
-
4
-import (
5
-	"bufio"
6
-	"bytes"
7
-	"fmt"
8
-	"io"
9
-	"regexp"
10
-	"strconv"
11
-	"strings"
12
-	"unicode"
13
-
14
-	"github.com/docker/docker/builder/dockerfile/command"
15
-	"github.com/pkg/errors"
16
-)
17
-
18
-// Node is a structure used to represent a parse tree.
19
-//
20
-// In the node there are three fields, Value, Next, and Children. Value is the
21
-// current token's string value. Next is always the next non-child token, and
22
-// children contains all the children. Here's an example:
23
-//
24
-// (value next (child child-next child-next-next) next-next)
25
-//
26
-// This data structure is frankly pretty lousy for handling complex languages,
27
-// but lucky for us the Dockerfile isn't very complicated. This structure
28
-// works a little more effectively than a "proper" parse tree for our needs.
29
-//
30
-type Node struct {
31
-	Value      string          // actual content
32
-	Next       *Node           // the next item in the current sexp
33
-	Children   []*Node         // the children of this sexp
34
-	Attributes map[string]bool // special attributes for this node
35
-	Original   string          // original line used before parsing
36
-	Flags      []string        // only top Node should have this set
37
-	StartLine  int             // the line in the original dockerfile where the node begins
38
-	endLine    int             // the line in the original dockerfile where the node ends
39
-}
40
-
41
-// Dump dumps the AST defined by `node` as a list of sexps.
42
-// Returns a string suitable for printing.
43
-func (node *Node) Dump() string {
44
-	str := ""
45
-	str += node.Value
46
-
47
-	if len(node.Flags) > 0 {
48
-		str += fmt.Sprintf(" %q", node.Flags)
49
-	}
50
-
51
-	for _, n := range node.Children {
52
-		str += "(" + n.Dump() + ")\n"
53
-	}
54
-
55
-	for n := node.Next; n != nil; n = n.Next {
56
-		if len(n.Children) > 0 {
57
-			str += " " + n.Dump()
58
-		} else {
59
-			str += " " + strconv.Quote(n.Value)
60
-		}
61
-	}
62
-
63
-	return strings.TrimSpace(str)
64
-}
65
-
66
-func (node *Node) lines(start, end int) {
67
-	node.StartLine = start
68
-	node.endLine = end
69
-}
70
-
71
-// AddChild adds a new child node, and updates line information
72
-func (node *Node) AddChild(child *Node, startLine, endLine int) {
73
-	child.lines(startLine, endLine)
74
-	if node.StartLine < 0 {
75
-		node.StartLine = startLine
76
-	}
77
-	node.endLine = endLine
78
-	node.Children = append(node.Children, child)
79
-}
80
-
81
-var (
82
-	dispatch           map[string]func(string, *Directive) (*Node, map[string]bool, error)
83
-	tokenWhitespace    = regexp.MustCompile(`[\t\v\f\r ]+`)
84
-	tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P<escapechar>.).*$`)
85
-	tokenComment       = regexp.MustCompile(`^#.*$`)
86
-)
87
-
88
-// DefaultEscapeToken is the default escape token
89
-const DefaultEscapeToken = '\\'
90
-
91
-// Directive is the structure used during a build run to hold the state of
92
-// parsing directives.
93
-type Directive struct {
94
-	escapeToken           rune           // Current escape token
95
-	lineContinuationRegex *regexp.Regexp // Current line continuation regex
96
-	processingComplete    bool           // Whether we are done looking for directives
97
-	escapeSeen            bool           // Whether the escape directive has been seen
98
-}
99
-
100
-// setEscapeToken sets the default token for escaping characters in a Dockerfile.
101
-func (d *Directive) setEscapeToken(s string) error {
102
-	if s != "`" && s != "\\" {
103
-		return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s)
104
-	}
105
-	d.escapeToken = rune(s[0])
106
-	d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`)
107
-	return nil
108
-}
109
-
110
-// possibleParserDirective looks for parser directives, eg '# escapeToken=<char>'.
111
-// Parser directives must precede any builder instruction or other comments,
112
-// and cannot be repeated.
113
-func (d *Directive) possibleParserDirective(line string) error {
114
-	if d.processingComplete {
115
-		return nil
116
-	}
117
-
118
-	tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line))
119
-	if len(tecMatch) != 0 {
120
-		for i, n := range tokenEscapeCommand.SubexpNames() {
121
-			if n == "escapechar" {
122
-				if d.escapeSeen {
123
-					return errors.New("only one escape parser directive can be used")
124
-				}
125
-				d.escapeSeen = true
126
-				return d.setEscapeToken(tecMatch[i])
127
-			}
128
-		}
129
-	}
130
-
131
-	d.processingComplete = true
132
-	return nil
133
-}
134
-
135
-// NewDefaultDirective returns a new Directive with the default escapeToken token
136
-func NewDefaultDirective() *Directive {
137
-	directive := Directive{}
138
-	directive.setEscapeToken(string(DefaultEscapeToken))
139
-	return &directive
140
-}
141
-
142
-func init() {
143
-	// Dispatch Table. see line_parsers.go for the parse functions.
144
-	// The command is parsed and mapped to the line parser. The line parser
145
-	// receives the arguments but not the command, and returns an AST after
146
-	// reformulating the arguments according to the rules in the parser
147
-	// functions. Errors are propagated up by Parse() and the resulting AST can
148
-	// be incorporated directly into the existing AST as a next.
149
-	dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){
150
-		command.Add:         parseMaybeJSONToList,
151
-		command.Arg:         parseNameOrNameVal,
152
-		command.Cmd:         parseMaybeJSON,
153
-		command.Copy:        parseMaybeJSONToList,
154
-		command.Entrypoint:  parseMaybeJSON,
155
-		command.Env:         parseEnv,
156
-		command.Expose:      parseStringsWhitespaceDelimited,
157
-		command.From:        parseStringsWhitespaceDelimited,
158
-		command.Healthcheck: parseHealthConfig,
159
-		command.Label:       parseLabel,
160
-		command.Maintainer:  parseString,
161
-		command.Onbuild:     parseSubCommand,
162
-		command.Run:         parseMaybeJSON,
163
-		command.Shell:       parseMaybeJSON,
164
-		command.StopSignal:  parseString,
165
-		command.User:        parseString,
166
-		command.Volume:      parseMaybeJSONToList,
167
-		command.Workdir:     parseString,
168
-	}
169
-}
170
-
171
-// newNodeFromLine splits the line into parts, and dispatches to a function
172
-// based on the command and command arguments. A Node is created from the
173
-// result of the dispatch.
174
-func newNodeFromLine(line string, directive *Directive) (*Node, error) {
175
-	cmd, flags, args, err := splitCommand(line)
176
-	if err != nil {
177
-		return nil, err
178
-	}
179
-
180
-	fn := dispatch[cmd]
181
-	// Ignore invalid Dockerfile instructions
182
-	if fn == nil {
183
-		fn = parseIgnore
184
-	}
185
-	next, attrs, err := fn(args, directive)
186
-	if err != nil {
187
-		return nil, err
188
-	}
189
-
190
-	return &Node{
191
-		Value:      cmd,
192
-		Original:   line,
193
-		Flags:      flags,
194
-		Next:       next,
195
-		Attributes: attrs,
196
-	}, nil
197
-}
198
-
199
-// Result is the result of parsing a Dockerfile
200
-type Result struct {
201
-	AST         *Node
202
-	EscapeToken rune
203
-	Warnings    []string
204
-}
205
-
206
-// PrintWarnings to the writer
207
-func (r *Result) PrintWarnings(out io.Writer) {
208
-	if len(r.Warnings) == 0 {
209
-		return
210
-	}
211
-	fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n")
212
-}
213
-
214
-// Parse reads lines from a Reader, parses the lines into an AST and returns
215
-// the AST and escape token
216
-func Parse(rwc io.Reader) (*Result, error) {
217
-	d := NewDefaultDirective()
218
-	currentLine := 0
219
-	root := &Node{StartLine: -1}
220
-	scanner := bufio.NewScanner(rwc)
221
-	warnings := []string{}
222
-
223
-	var err error
224
-	for scanner.Scan() {
225
-		bytesRead := scanner.Bytes()
226
-		if currentLine == 0 {
227
-			// First line, strip the byte-order-marker if present
228
-			bytesRead = bytes.TrimPrefix(bytesRead, utf8bom)
229
-		}
230
-		bytesRead, err = processLine(d, bytesRead, true)
231
-		if err != nil {
232
-			return nil, err
233
-		}
234
-		currentLine++
235
-
236
-		startLine := currentLine
237
-		line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d)
238
-		if isEndOfLine && line == "" {
239
-			continue
240
-		}
241
-
242
-		var hasEmptyContinuationLine bool
243
-		for !isEndOfLine && scanner.Scan() {
244
-			bytesRead, err := processLine(d, scanner.Bytes(), false)
245
-			if err != nil {
246
-				return nil, err
247
-			}
248
-			currentLine++
249
-
250
-			if isComment(scanner.Bytes()) {
251
-				// original line was a comment (processLine strips comments)
252
-				continue
253
-			}
254
-			if isEmptyContinuationLine(bytesRead) {
255
-				hasEmptyContinuationLine = true
256
-				continue
257
-			}
258
-
259
-			continuationLine := string(bytesRead)
260
-			continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d)
261
-			line += continuationLine
262
-		}
263
-
264
-		if hasEmptyContinuationLine {
265
-			warnings = append(warnings, "[WARNING]: Empty continuation line found in:\n    "+line)
266
-		}
267
-
268
-		child, err := newNodeFromLine(line, d)
269
-		if err != nil {
270
-			return nil, err
271
-		}
272
-		root.AddChild(child, startLine, currentLine)
273
-	}
274
-
275
-	if len(warnings) > 0 {
276
-		warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.")
277
-	}
278
-	return &Result{
279
-		AST:         root,
280
-		Warnings:    warnings,
281
-		EscapeToken: d.escapeToken,
282
-	}, handleScannerError(scanner.Err())
283
-}
284
-
285
-func trimComments(src []byte) []byte {
286
-	return tokenComment.ReplaceAll(src, []byte{})
287
-}
288
-
289
-func trimWhitespace(src []byte) []byte {
290
-	return bytes.TrimLeftFunc(src, unicode.IsSpace)
291
-}
292
-
293
-func isComment(line []byte) bool {
294
-	return tokenComment.Match(trimWhitespace(line))
295
-}
296
-
297
-func isEmptyContinuationLine(line []byte) bool {
298
-	return len(trimWhitespace(line)) == 0
299
-}
300
-
301
-var utf8bom = []byte{0xEF, 0xBB, 0xBF}
302
-
303
-func trimContinuationCharacter(line string, d *Directive) (string, bool) {
304
-	if d.lineContinuationRegex.MatchString(line) {
305
-		line = d.lineContinuationRegex.ReplaceAllString(line, "")
306
-		return line, false
307
-	}
308
-	return line, true
309
-}
310
-
311
-// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
312
-// to preserve whitespace on continuation lines. Why is that done?
313
-func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) {
314
-	if stripLeftWhitespace {
315
-		token = trimWhitespace(token)
316
-	}
317
-	return trimComments(token), d.possibleParserDirective(string(token))
318
-}
319
-
320
-func handleScannerError(err error) error {
321
-	switch err {
322
-	case bufio.ErrTooLong:
323
-		return errors.Errorf("dockerfile line greater than max allowed size of %d", bufio.MaxScanTokenSize-1)
324
-	default:
325
-		return err
326
-	}
327
-}
328 1
deleted file mode 100644
... ...
@@ -1,174 +0,0 @@
1
-package parser // import "github.com/docker/docker/builder/dockerfile/parser"
2
-
3
-import (
4
-	"bufio"
5
-	"bytes"
6
-	"fmt"
7
-	"io/ioutil"
8
-	"os"
9
-	"path/filepath"
10
-	"runtime"
11
-	"strings"
12
-	"testing"
13
-
14
-	"github.com/gotestyourself/gotestyourself/assert"
15
-	is "github.com/gotestyourself/gotestyourself/assert/cmp"
16
-)
17
-
18
-const testDir = "testfiles"
19
-const negativeTestDir = "testfiles-negative"
20
-const testFileLineInfo = "testfile-line/Dockerfile"
21
-
22
-func getDirs(t *testing.T, dir string) []string {
23
-	f, err := os.Open(dir)
24
-	assert.NilError(t, err)
25
-	defer f.Close()
26
-
27
-	dirs, err := f.Readdirnames(0)
28
-	assert.NilError(t, err)
29
-	return dirs
30
-}
31
-
32
-func TestParseErrorCases(t *testing.T) {
33
-	for _, dir := range getDirs(t, negativeTestDir) {
34
-		dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile")
35
-
36
-		df, err := os.Open(dockerfile)
37
-		assert.NilError(t, err, dockerfile)
38
-		defer df.Close()
39
-
40
-		_, err = Parse(df)
41
-		assert.Check(t, is.ErrorContains(err, ""), dockerfile)
42
-	}
43
-}
44
-
45
-func TestParseCases(t *testing.T) {
46
-	for _, dir := range getDirs(t, testDir) {
47
-		dockerfile := filepath.Join(testDir, dir, "Dockerfile")
48
-		resultfile := filepath.Join(testDir, dir, "result")
49
-
50
-		df, err := os.Open(dockerfile)
51
-		assert.NilError(t, err, dockerfile)
52
-		defer df.Close()
53
-
54
-		result, err := Parse(df)
55
-		assert.NilError(t, err, dockerfile)
56
-
57
-		content, err := ioutil.ReadFile(resultfile)
58
-		assert.NilError(t, err, resultfile)
59
-
60
-		if runtime.GOOS == "windows" {
61
-			// CRLF --> CR to match Unix behavior
62
-			content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1)
63
-		}
64
-		assert.Check(t, is.Equal(result.AST.Dump()+"\n", string(content)), "In "+dockerfile)
65
-	}
66
-}
67
-
68
-func TestParseWords(t *testing.T) {
69
-	tests := []map[string][]string{
70
-		{
71
-			"input":  {"foo"},
72
-			"expect": {"foo"},
73
-		},
74
-		{
75
-			"input":  {"foo bar"},
76
-			"expect": {"foo", "bar"},
77
-		},
78
-		{
79
-			"input":  {"foo\\ bar"},
80
-			"expect": {"foo\\ bar"},
81
-		},
82
-		{
83
-			"input":  {"foo=bar"},
84
-			"expect": {"foo=bar"},
85
-		},
86
-		{
87
-			"input":  {"foo bar 'abc xyz'"},
88
-			"expect": {"foo", "bar", "'abc xyz'"},
89
-		},
90
-		{
91
-			"input":  {`foo bar "abc xyz"`},
92
-			"expect": {"foo", "bar", `"abc xyz"`},
93
-		},
94
-		{
95
-			"input":  {"àöû"},
96
-			"expect": {"àöû"},
97
-		},
98
-		{
99
-			"input":  {`föo bàr "âbc xÿz"`},
100
-			"expect": {"föo", "bàr", `"âbc xÿz"`},
101
-		},
102
-	}
103
-
104
-	for _, test := range tests {
105
-		words := parseWords(test["input"][0], NewDefaultDirective())
106
-		assert.Check(t, is.DeepEqual(test["expect"], words))
107
-	}
108
-}
109
-
110
-func TestParseIncludesLineNumbers(t *testing.T) {
111
-	df, err := os.Open(testFileLineInfo)
112
-	assert.NilError(t, err)
113
-	defer df.Close()
114
-
115
-	result, err := Parse(df)
116
-	assert.NilError(t, err)
117
-
118
-	ast := result.AST
119
-	assert.Check(t, is.Equal(5, ast.StartLine))
120
-	assert.Check(t, is.Equal(31, ast.endLine))
121
-	assert.Check(t, is.Len(ast.Children, 3))
122
-	expected := [][]int{
123
-		{5, 5},
124
-		{11, 12},
125
-		{17, 31},
126
-	}
127
-	for i, child := range ast.Children {
128
-		msg := fmt.Sprintf("Child %d", i)
129
-		assert.Check(t, is.DeepEqual(expected[i], []int{child.StartLine, child.endLine}), msg)
130
-	}
131
-}
132
-
133
-func TestParseWarnsOnEmptyContinutationLine(t *testing.T) {
134
-	dockerfile := bytes.NewBufferString(`
135
-FROM alpine:3.6
136
-
137
-RUN something \
138
-
139
-    following \
140
-
141
-    more
142
-
143
-RUN another \
144
-
145
-    thing
146
-RUN non-indented \
147
-# this is a comment
148
-   after-comment
149
-
150
-RUN indented \
151
-    # this is an indented comment
152
-    comment
153
-	`)
154
-
155
-	result, err := Parse(dockerfile)
156
-	assert.NilError(t, err)
157
-	warnings := result.Warnings
158
-	assert.Check(t, is.Len(warnings, 3))
159
-	assert.Check(t, is.Contains(warnings[0], "Empty continuation line found in"))
160
-	assert.Check(t, is.Contains(warnings[0], "RUN something     following     more"))
161
-	assert.Check(t, is.Contains(warnings[1], "RUN another     thing"))
162
-	assert.Check(t, is.Contains(warnings[2], "will become errors in a future release"))
163
-}
164
-
165
-func TestParseReturnsScannerErrors(t *testing.T) {
166
-	label := strings.Repeat("a", bufio.MaxScanTokenSize)
167
-
168
-	dockerfile := strings.NewReader(fmt.Sprintf(`
169
-		FROM image
170
-		LABEL test=%s
171
-`, label))
172
-	_, err := Parse(dockerfile)
173
-	assert.Check(t, is.Error(err, "dockerfile line greater than max allowed size of 65535"))
174
-}
175 1
deleted file mode 100644
... ...
@@ -1,118 +0,0 @@
1
-package parser // import "github.com/docker/docker/builder/dockerfile/parser"
2
-
3
-import (
4
-	"strings"
5
-	"unicode"
6
-)
7
-
8
-// splitCommand takes a single line of text and parses out the cmd and args,
9
-// which are used for dispatching to more exact parsing functions.
10
-func splitCommand(line string) (string, []string, string, error) {
11
-	var args string
12
-	var flags []string
13
-
14
-	// Make sure we get the same results irrespective of leading/trailing spaces
15
-	cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2)
16
-	cmd := strings.ToLower(cmdline[0])
17
-
18
-	if len(cmdline) == 2 {
19
-		var err error
20
-		args, flags, err = extractBuilderFlags(cmdline[1])
21
-		if err != nil {
22
-			return "", nil, "", err
23
-		}
24
-	}
25
-
26
-	return cmd, flags, strings.TrimSpace(args), nil
27
-}
28
-
29
-func extractBuilderFlags(line string) (string, []string, error) {
30
-	// Parses the BuilderFlags and returns the remaining part of the line
31
-
32
-	const (
33
-		inSpaces = iota // looking for start of a word
34
-		inWord
35
-		inQuote
36
-	)
37
-
38
-	words := []string{}
39
-	phase := inSpaces
40
-	word := ""
41
-	quote := '\000'
42
-	blankOK := false
43
-	var ch rune
44
-
45
-	for pos := 0; pos <= len(line); pos++ {
46
-		if pos != len(line) {
47
-			ch = rune(line[pos])
48
-		}
49
-
50
-		if phase == inSpaces { // Looking for start of word
51
-			if pos == len(line) { // end of input
52
-				break
53
-			}
54
-			if unicode.IsSpace(ch) { // skip spaces
55
-				continue
56
-			}
57
-
58
-			// Only keep going if the next word starts with --
59
-			if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
60
-				return line[pos:], words, nil
61
-			}
62
-
63
-			phase = inWord // found something with "--", fall through
64
-		}
65
-		if (phase == inWord || phase == inQuote) && (pos == len(line)) {
66
-			if word != "--" && (blankOK || len(word) > 0) {
67
-				words = append(words, word)
68
-			}
69
-			break
70
-		}
71
-		if phase == inWord {
72
-			if unicode.IsSpace(ch) {
73
-				phase = inSpaces
74
-				if word == "--" {
75
-					return line[pos:], words, nil
76
-				}
77
-				if blankOK || len(word) > 0 {
78
-					words = append(words, word)
79
-				}
80
-				word = ""
81
-				blankOK = false
82
-				continue
83
-			}
84
-			if ch == '\'' || ch == '"' {
85
-				quote = ch
86
-				blankOK = true
87
-				phase = inQuote
88
-				continue
89
-			}
90
-			if ch == '\\' {
91
-				if pos+1 == len(line) {
92
-					continue // just skip \ at end
93
-				}
94
-				pos++
95
-				ch = rune(line[pos])
96
-			}
97
-			word += string(ch)
98
-			continue
99
-		}
100
-		if phase == inQuote {
101
-			if ch == quote {
102
-				phase = inWord
103
-				continue
104
-			}
105
-			if ch == '\\' {
106
-				if pos+1 == len(line) {
107
-					phase = inWord
108
-					continue // just skip \ at end
109
-				}
110
-				pos++
111
-				ch = rune(line[pos])
112
-			}
113
-			word += string(ch)
114
-		}
115
-	}
116
-
117
-	return "", words, nil
118
-}
119 1
deleted file mode 100644
... ...
@@ -1,35 +0,0 @@
1
-# ESCAPE=\
2
-
3
-
4
-
5
-FROM brimstone/ubuntu:14.04
6
-
7
-
8
-# TORUN -v /var/run/docker.sock:/var/run/docker.sock
9
-
10
-
11
-ENV GOPATH \
12
-/go
13
-
14
-
15
-
16
-# Install the packages we need, clean up after them and us
17
-RUN apt-get update \
18
-	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
19
-
20
-
21
-    && apt-get install -y --no-install-recommends git golang ca-certificates \
22
-    && apt-get clean \
23
-    && rm -rf /var/lib/apt/lists \
24
-
25
-	&& go get -v github.com/brimstone/consuldock \
26
-    && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \
27
-
28
-	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
29
-	&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
30
-	&& rm /tmp/dpkg.* \
31
-	&& rm -rf $GOPATH
32
-
33
-
34
-
35
-
36 1
deleted file mode 100644
... ...
@@ -1,3 +0,0 @@
1
-FROM busybox
2
-
3
-ENV PATH
4 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-CMD [ "echo", [ "nested json" ] ]
2 1
deleted file mode 100644
... ...
@@ -1,11 +0,0 @@
1
-FROM	ubuntu:14.04
2
-LABEL	maintainer	Seongyeol Lim <seongyeol37@gmail.com>
3
-
4
-COPY	.	/go/src/github.com/docker/docker
5
-ADD		.	/
6
-ADD		null /
7
-COPY	nullfile /tmp
8
-ADD		[ "vimrc", "/tmp" ]
9
-COPY	[ "bashrc", "/tmp" ]
10
-COPY	[ "test file", "/tmp" ]
11
-ADD		[ "test file", "/tmp/test file" ]
12 1
deleted file mode 100644
... ...
@@ -1,10 +0,0 @@
1
-(from "ubuntu:14.04")
2
-(label "maintainer" "Seongyeol Lim <seongyeol37@gmail.com>")
3
-(copy "." "/go/src/github.com/docker/docker")
4
-(add "." "/")
5
-(add "null" "/")
6
-(copy "nullfile" "/tmp")
7
-(add "vimrc" "/tmp")
8
-(copy "bashrc" "/tmp")
9
-(copy "test file" "/tmp")
10
-(add "test file" "/tmp/test file")
11 1
deleted file mode 100644
... ...
@@ -1,26 +0,0 @@
1
-#escape=\
2
-FROM brimstone/ubuntu:14.04
3
-
4
-LABEL maintainer brimstone@the.narro.ws
5
-
6
-# TORUN -v /var/run/docker.sock:/var/run/docker.sock
7
-
8
-ENV GOPATH /go
9
-
10
-# Set our command
11
-ENTRYPOINT ["/usr/local/bin/consuldock"]
12
-
13
-# Install the packages we need, clean up after them and us
14
-RUN apt-get update \
15
-	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
16
-    && apt-get install -y --no-install-recommends git golang ca-certificates \
17
-    && apt-get clean \
18
-    && rm -rf /var/lib/apt/lists \
19
-
20
-	&& go get -v github.com/brimstone/consuldock \
21
-    && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \
22
-
23
-	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
24
-	&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
25
-	&& rm /tmp/dpkg.* \
26
-	&& rm -rf $GOPATH
27 1
deleted file mode 100644
... ...
@@ -1,5 +0,0 @@
1
-(from "brimstone/ubuntu:14.04")
2
-(label "maintainer" "brimstone@the.narro.ws")
3
-(env "GOPATH" "/go")
4
-(entrypoint "/usr/local/bin/consuldock")
5
-(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean     && apt-get install -y --no-install-recommends git golang ca-certificates     && apt-get clean     && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock     && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")
6 1
deleted file mode 100644
... ...
@@ -1,52 +0,0 @@
1
-FROM brimstone/ubuntu:14.04
2
-
3
-CMD []
4
-
5
-ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"]
6
-
7
-EXPOSE 8500 8600 8400 8301 8302
8
-
9
-RUN apt-get update \
10
-    && apt-get install -y unzip wget \
11
-	&& apt-get clean \
12
-	&& rm -rf /var/lib/apt/lists
13
-
14
-RUN cd /tmp \
15
-    && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
16
-       -O web_ui.zip \
17
-    && unzip web_ui.zip \
18
-    && mv dist /webui \
19
-    && rm web_ui.zip
20
-
21
-RUN apt-get update \
22
-	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
23
-    && apt-get install -y --no-install-recommends unzip wget \
24
-    && apt-get clean \
25
-    && rm -rf /var/lib/apt/lists \
26
-
27
-    && cd /tmp \
28
-    && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \
29
-       -O web_ui.zip \
30
-    && unzip web_ui.zip \
31
-    && mv dist /webui \
32
-    && rm web_ui.zip \
33
-
34
-	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
35
-	&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
36
-	&& rm /tmp/dpkg.*
37
-
38
-ENV GOPATH /go
39
-
40
-RUN apt-get update \
41
-	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
42
-    && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \
43
-    && apt-get clean \
44
-    && rm -rf /var/lib/apt/lists \
45
-
46
-	&& go get -v github.com/hashicorp/consul \
47
-	&& mv $GOPATH/bin/consul /usr/bin/consul \
48
-
49
-	&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
50
-	&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
51
-	&& rm /tmp/dpkg.* \
52
-	&& rm -rf $GOPATH
53 1
deleted file mode 100644
... ...
@@ -1,9 +0,0 @@
1
-(from "brimstone/ubuntu:14.04")
2
-(cmd)
3
-(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui")
4
-(expose "8500" "8600" "8400" "8301" "8302")
5
-(run "apt-get update     && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists")
6
-(run "cd /tmp     && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip        -O web_ui.zip     && unzip web_ui.zip     && mv dist /webui     && rm web_ui.zip")
7
-(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean     && apt-get install -y --no-install-recommends unzip wget     && apt-get clean     && rm -rf /var/lib/apt/lists     && cd /tmp     && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip        -O web_ui.zip     && unzip web_ui.zip     && mv dist /webui     && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*")
8
-(env "GOPATH" "/go")
9
-(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean     && apt-get install -y --no-install-recommends git golang ca-certificates build-essential     && apt-get clean     && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")
10 1
deleted file mode 100644
... ...
@@ -1,3 +0,0 @@
1
-FROM alpine:3.5
2
-
3
-RUN something \
4 1
\ No newline at end of file
5 2
deleted file mode 100644
... ...
@@ -1,2 +0,0 @@
1
-(from "alpine:3.5")
2
-(run "something")
3 1
deleted file mode 100644
... ...
@@ -1,36 +0,0 @@
1
-FROM ubuntu:14.04
2
-
3
-RUN echo hello\
4
-  world\
5
-  goodnight  \
6
-  moon\
7
-  light\
8
-ning
9
-RUN echo hello  \
10
-  world
11
-RUN echo hello  \
12
-world
13
-RUN echo hello \
14
-goodbye\
15
-frog
16
-RUN echo hello  \  
17
-world
18
-RUN echo hi \
19
- \
20
- world \
21
-\
22
- good\
23
-\
24
-night
25
-RUN echo goodbye\
26
-frog
27
-RUN echo good\
28
-bye\
29
-frog
30
-
31
-RUN echo hello \
32
-# this is a comment
33
-
34
-# this is a comment with a blank line surrounding it
35
-
36
-this is some more useful stuff
37 1
deleted file mode 100644
... ...
@@ -1,10 +0,0 @@
1
-(from "ubuntu:14.04")
2
-(run "echo hello  world  goodnight    moon  lightning")
3
-(run "echo hello    world")
4
-(run "echo hello  world")
5
-(run "echo hello goodbyefrog")
6
-(run "echo hello  world")
7
-(run "echo hi   world  goodnight")
8
-(run "echo goodbyefrog")
9
-(run "echo goodbyefrog")
10
-(run "echo hello this is some more useful stuff")
11 1
deleted file mode 100644
... ...
@@ -1,54 +0,0 @@
1
-FROM cpuguy83/ubuntu
2
-ENV NAGIOS_HOME /opt/nagios
3
-ENV NAGIOS_USER nagios
4
-ENV NAGIOS_GROUP nagios
5
-ENV NAGIOS_CMDUSER nagios
6
-ENV NAGIOS_CMDGROUP nagios
7
-ENV NAGIOSADMIN_USER nagiosadmin
8
-ENV NAGIOSADMIN_PASS nagios
9
-ENV APACHE_RUN_USER nagios
10
-ENV APACHE_RUN_GROUP nagios
11
-ENV NAGIOS_TIMEZONE UTC
12
-
13
-RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list
14
-RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx
15
-RUN ( egrep -i  "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP )
16
-RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )
17
-
18
-ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz
19
-RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios  && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf
20
-ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/
21
-RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install
22
-
23
-RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars
24
-RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default
25
-
26
-RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo
27
-
28
-RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf
29
-
30
-RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs
31
-RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg
32
-RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg
33
-RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf
34
-
35
-RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \
36
-  sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg
37
-RUN cp /etc/services /var/spool/postfix/etc/
38
-
39
-RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix
40
-ADD nagios.init /etc/sv/nagios/run
41
-ADD apache.init /etc/sv/apache/run
42
-ADD postfix.init /etc/sv/postfix/run
43
-ADD postfix.stop /etc/sv/postfix/finish
44
-
45
-ADD start.sh /usr/local/bin/start_nagios
46
-
47
-ENV APACHE_LOCK_DIR /var/run
48
-ENV APACHE_LOG_DIR /var/log/apache2
49
-
50
-EXPOSE 80
51
-
52
-VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"]
53
-
54
-CMD ["/usr/local/bin/start_nagios"]
55 1
deleted file mode 100644
... ...
@@ -1,40 +0,0 @@
1
-(from "cpuguy83/ubuntu")
2
-(env "NAGIOS_HOME" "/opt/nagios")
3
-(env "NAGIOS_USER" "nagios")
4
-(env "NAGIOS_GROUP" "nagios")
5
-(env "NAGIOS_CMDUSER" "nagios")
6
-(env "NAGIOS_CMDGROUP" "nagios")
7
-(env "NAGIOSADMIN_USER" "nagiosadmin")
8
-(env "NAGIOSADMIN_PASS" "nagios")
9
-(env "APACHE_RUN_USER" "nagios")
10
-(env "APACHE_RUN_GROUP" "nagios")
11
-(env "NAGIOS_TIMEZONE" "UTC")
12
-(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list")
13
-(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx")
14
-(run "( egrep -i  \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )")
15
-(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )")
16
-(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz")
17
-(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios  && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf")
18
-(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/")
19
-(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install")
20
-(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars")
21
-(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default")
22
-(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo")
23
-(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf")
24
-(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs")
25
-(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
26
-(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
27
-(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf")
28
-(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg &&   sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg")
29
-(run "cp /etc/services /var/spool/postfix/etc/")
30
-(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix")
31
-(add "nagios.init" "/etc/sv/nagios/run")
32
-(add "apache.init" "/etc/sv/apache/run")
33
-(add "postfix.init" "/etc/sv/postfix/run")
34
-(add "postfix.stop" "/etc/sv/postfix/finish")
35
-(add "start.sh" "/usr/local/bin/start_nagios")
36
-(env "APACHE_LOCK_DIR" "/var/run")
37
-(env "APACHE_LOG_DIR" "/var/log/apache2")
38
-(expose "80")
39
-(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs")
40
-(cmd "/usr/local/bin/start_nagios")
41 1
deleted file mode 100644
... ...
@@ -1,94 +0,0 @@
1
-# This file describes the standard way to build Docker, using docker
2
-#
3
-# Usage:
4
-#
5
-# # Assemble the full dev environment. This is slow the first time.
6
-# docker build -t docker .
7
-#
8
-# # Mount your source in an interactive container for quick testing:
9
-# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
10
-#
11
-# # Run the test suite:
12
-# docker run --privileged docker hack/make.sh test-unit test-integration test-docker-py
13
-#
14
-# Note: AppArmor used to mess with privileged mode, but this is no longer
15
-# the case. Therefore, you don't have to disable it anymore.
16
-#
17
-
18
-FROM	ubuntu:14.04
19
-LABEL	maintainer	Tianon Gravi <admwiggin@gmail.com> (@tianon)
20
-
21
-# Packaged dependencies
22
-RUN	apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
23
-	apt-utils \
24
-	aufs-tools \
25
-	automake \
26
-	btrfs-tools \
27
-	build-essential \
28
-	curl \
29
-	dpkg-sig \
30
-	git \
31
-	iptables \
32
-	libapparmor-dev \
33
-	libcap-dev \
34
-	mercurial \
35
-	pandoc \
36
-	parallel \
37
-	reprepro \
38
-	ruby1.9.1 \
39
-	ruby1.9.1-dev \
40
-	s3cmd=1.1.0* \
41
-	--no-install-recommends
42
-
43
-# Get lvm2 source for compiling statically
44
-RUN	git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
45
-# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
46
-# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly
47
-
48
-# Compile and install lvm2
49
-RUN	cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper
50
-# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
51
-
52
-# Install Go
53
-RUN	curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz
54
-ENV	PATH	/usr/local/go/bin:$PATH
55
-ENV	GOPATH	/go:/go/src/github.com/docker/docker/vendor
56
-RUN	cd /usr/local/go/src && ./make.bash --no-clean 2>&1
57
-
58
-# Compile Go for cross compilation
59
-ENV	DOCKER_CROSSPLATFORMS	\
60
-	linux/386 linux/arm \
61
-	darwin/amd64 darwin/386 \
62
-	freebsd/amd64 freebsd/386 freebsd/arm
63
-# (set an explicit GOARM of 5 for maximum compatibility)
64
-ENV	GOARM	5
65
-RUN	cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
66
-
67
-# Grab Go's cover tool for dead-simple code coverage testing
68
-RUN	go get golang.org/x/tools/cmd/cover
69
-
70
-# TODO replace FPM with some very minimal debhelper stuff
71
-RUN	gem install --no-rdoc --no-ri fpm --version 1.0.2
72
-
73
-# Get the "busybox" image source so we can build locally instead of pulling
74
-RUN	git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox
75
-
76
-# Setup s3cmd config
77
-RUN	/bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg
78
-
79
-# Set user.email so crosbymichael's in-container merge commits go smoothly
80
-RUN	git config --global user.email 'docker-dummy@example.com'
81
-
82
-# Add an unprivileged user to be used for tests which need it
83
-RUN groupadd -r docker
84
-RUN useradd --create-home --gid docker unprivilegeduser
85
-
86
-VOLUME	/var/lib/docker
87
-WORKDIR	/go/src/github.com/docker/docker
88
-ENV	DOCKER_BUILDTAGS	apparmor selinux
89
-
90
-# Wrap all commands in the "docker-in-docker" script to allow nested containers
91
-ENTRYPOINT	["hack/dind"]
92
-
93
-# Upload docker source
94
-COPY	.	/go/src/github.com/docker/docker
95 1
deleted file mode 100644
... ...
@@ -1,24 +0,0 @@
1
-(from "ubuntu:14.04")
2
-(label "maintainer" "Tianon Gravi <admwiggin@gmail.com> (@tianon)")
3
-(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends")
4
-(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103")
5
-(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper")
6
-(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz")
7
-(env "PATH" "/usr/local/go/bin:$PATH")
8
-(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor")
9
-(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1")
10
-(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm")
11
-(env "GOARM" "5")
12
-(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
13
-(run "go get golang.org/x/tools/cmd/cover")
14
-(run "gem install --no-rdoc --no-ri fpm --version 1.0.2")
15
-(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox")
16
-(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg")
17
-(run "git config --global user.email 'docker-dummy@example.com'")
18
-(run "groupadd -r docker")
19
-(run "useradd --create-home --gid docker unprivilegeduser")
20
-(volume "/var/lib/docker")
21
-(workdir "/go/src/github.com/docker/docker")
22
-(env "DOCKER_BUILDTAGS" "apparmor selinux")
23
-(entrypoint "hack/dind")
24
-(copy "." "/go/src/github.com/docker/docker")
25 1
deleted file mode 100644
... ...
@@ -1,23 +0,0 @@
1
-FROM ubuntu
2
-ENV name value
3
-ENV name=value
4
-ENV name=value name2=value2
5
-ENV name="value value1"
6
-ENV name=value\ value2
7
-ENV name="value'quote space'value2"
8
-ENV name='value"double quote"value2'
9
-ENV name=value\ value2 name2=value2\ value3
10
-ENV name="a\"b"
11
-ENV name="a\'b"
12
-ENV name='a\'b'
13
-ENV name='a\'b''
14
-ENV name='a\"b'
15
-ENV name="''"
16
-# don't put anything after the next line - it must be the last line of the
17
-# Dockerfile and it must end with \
18
-ENV name=value \
19
-    name1=value1 \
20
-    name2="value2a \
21
-           value2b" \
22
-    name3="value3a\n\"value3b\"" \
23
-	name4="value4a\\nvalue4b" \
24 1
deleted file mode 100644
... ...
@@ -1,16 +0,0 @@
1
-(from "ubuntu")
2
-(env "name" "value")
3
-(env "name" "value")
4
-(env "name" "value" "name2" "value2")
5
-(env "name" "\"value value1\"")
6
-(env "name" "value\\ value2")
7
-(env "name" "\"value'quote space'value2\"")
8
-(env "name" "'value\"double quote\"value2'")
9
-(env "name" "value\\ value2" "name2" "value2\\ value3")
10
-(env "name" "\"a\\\"b\"")
11
-(env "name" "\"a\\'b\"")
12
-(env "name" "'a\\'b'")
13
-(env "name" "'a\\'b''")
14
-(env "name" "'a\\\"b'")
15
-(env "name" "\"''\"")
16
-(env "name" "value" "name1" "value1" "name2" "\"value2a            value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"")
17 1
deleted file mode 100644
... ...
@@ -1,9 +0,0 @@
1
-# Comment here. Should not be looking for the following parser directive.
2
-# Hence the following line will be ignored, and the subsequent backslash
3
-# continuation will be the default.
4
-# escape = `
5
-
6
-FROM image
7
-LABEL maintainer foo@bar.com
8
-ENV GOPATH \
9
-\go
10 1
\ No newline at end of file
11 2
deleted file mode 100644
... ...
@@ -1,3 +0,0 @@
1
-(from "image")
2
-(label "maintainer" "foo@bar.com")
3
-(env "GOPATH" "\\go")
4 1
deleted file mode 100644
... ...
@@ -1,7 +0,0 @@
1
-# escape = ``
2
-# There is no white space line after the directives. This still succeeds, but goes
3
-# against best practices.
4
-FROM image
5
-LABEL maintainer foo@bar.com
6
-ENV GOPATH `
7
-\go
8 1
\ No newline at end of file
9 2
deleted file mode 100644
... ...
@@ -1,3 +0,0 @@
1
-(from "image")
2
-(label "maintainer" "foo@bar.com")
3
-(env "GOPATH" "\\go")
4 1
deleted file mode 100644
... ...
@@ -1,6 +0,0 @@
1
-#escape = `
2
-
3
-FROM image
4
-LABEL maintainer foo@bar.com
5
-ENV GOPATH `
6
-\go
7 1
\ No newline at end of file
8 2
deleted file mode 100644
... ...
@@ -1,3 +0,0 @@
1
-(from "image")
2
-(label "maintainer" "foo@bar.com")
3
-(env "GOPATH" "\\go")
4 1
deleted file mode 100644
... ...
@@ -1,14 +0,0 @@
1
-FROM ubuntu:14.04
2
-LABEL maintainer Erik \\Hollensbe <erik@hollensbe.org>\"
3
-
4
-RUN apt-get \update && \
5
-  apt-get \"install znc -y
6
-ADD \conf\\" /.znc
7
-
8
-RUN foo \
9
-
10
-bar \
11
-
12
-baz
13
-
14
-CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ]
15 1
deleted file mode 100644
... ...
@@ -1,6 +0,0 @@
1
-(from "ubuntu:14.04")
2
-(label "maintainer" "Erik \\\\Hollensbe <erik@hollensbe.org>\\\"")
3
-(run "apt-get \\update &&   apt-get \\\"install znc -y")
4
-(add "\\conf\\\\\"" "/.znc")
5
-(run "foo bar baz")
6
-(cmd "/usr\\\"/bin/znc" "-f" "-r")
7 1
deleted file mode 100644
... ...
@@ -1,10 +0,0 @@
1
-FROM scratch
2
-COPY foo /tmp/
3
-COPY --user=me foo /tmp/
4
-COPY --doit=true foo /tmp/
5
-COPY --user=me --doit=true foo /tmp/
6
-COPY --doit=true -- foo /tmp/
7
-COPY -- foo /tmp/
8
-CMD --doit [ "a", "b" ]
9
-CMD --doit=true -- [ "a", "b" ]
10
-CMD --doit -- [ ]
11 1
deleted file mode 100644
... ...
@@ -1,10 +0,0 @@
1
-(from "scratch")
2
-(copy "foo" "/tmp/")
3
-(copy ["--user=me"] "foo" "/tmp/")
4
-(copy ["--doit=true"] "foo" "/tmp/")
5
-(copy ["--user=me" "--doit=true"] "foo" "/tmp/")
6
-(copy ["--doit=true"] "foo" "/tmp/")
7
-(copy "foo" "/tmp/")
8
-(cmd ["--doit"] "a" "b")
9
-(cmd ["--doit=true"] "a" "b")
10
-(cmd ["--doit"])
11 1
deleted file mode 100644
... ...
@@ -1,10 +0,0 @@
1
-FROM debian
2
-ADD check.sh main.sh /app/
3
-CMD /app/main.sh
4
-HEALTHCHECK
5
-HEALTHCHECK --interval=5s --timeout=3s --retries=3 \
6
-  CMD /app/check.sh --quiet
7
-HEALTHCHECK CMD
8
-HEALTHCHECK   CMD   a b
9
-HEALTHCHECK --timeout=3s CMD ["foo"]
10
-HEALTHCHECK CONNECT TCP 7000
11 1
deleted file mode 100644
... ...
@@ -1,9 +0,0 @@
1
-(from "debian")
2
-(add "check.sh" "main.sh" "/app/")
3
-(cmd "/app/main.sh")
4
-(healthcheck)
5
-(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet")
6
-(healthcheck "CMD")
7
-(healthcheck "CMD" "a b")
8
-(healthcheck ["--timeout=3s"] "CMD" "foo")
9
-(healthcheck "CONNECT" "TCP 7000")
10 1
deleted file mode 100644
... ...
@@ -1,15 +0,0 @@
1
-FROM ubuntu:14.04
2
-
3
-RUN apt-get update && apt-get install wget -y
4
-RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb
5
-RUN dpkg -i influxdb_latest_amd64.deb
6
-RUN rm -r /opt/influxdb/shared
7
-
8
-VOLUME /opt/influxdb/shared
9
-
10
-CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml
11
-
12
-EXPOSE 8083
13
-EXPOSE 8086
14
-EXPOSE 8090
15
-EXPOSE 8099
16 1
deleted file mode 100644
... ...
@@ -1,11 +0,0 @@
1
-(from "ubuntu:14.04")
2
-(run "apt-get update && apt-get install wget -y")
3
-(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb")
4
-(run "dpkg -i influxdb_latest_amd64.deb")
5
-(run "rm -r /opt/influxdb/shared")
6
-(volume "/opt/influxdb/shared")
7
-(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml")
8
-(expose "8083")
9
-(expose "8086")
10
-(expose "8090")
11
-(expose "8099")
12 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]"
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"")
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-CMD '["echo", "Well, JSON in a string is JSON too?"]'
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'")
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-CMD ['echo','single quotes are invalid JSON']
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-(cmd "['echo','single quotes are invalid JSON']")
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-CMD ["echo", "Please, close the brackets when you're done"
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-(cmd "[\"echo\", \"Please, close the brackets when you're done\"")
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-CMD ["echo", "look ma, no quote!]
2 1
deleted file mode 100644
... ...
@@ -1 +0,0 @@
1
-(cmd "[\"echo\", \"look ma, no quote!]")
2 1
deleted file mode 100644
... ...
@@ -1,8 +0,0 @@
1
-CMD []
2
-CMD [""]
3
-CMD ["a"]
4
-CMD ["a","b"]
5
-CMD [ "a", "b" ]
6
-CMD [	"a",	"b"	]
7
-CMD	[	"a",	"b"	]	
8
-CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]
9 1
deleted file mode 100644
... ...
@@ -1,8 +0,0 @@
1
-(cmd)
2
-(cmd "")
3
-(cmd "a")
4
-(cmd "a" "b")
5
-(cmd "a" "b")
6
-(cmd "a" "b")
7
-(cmd "a" "b")
8
-(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00")
9 1
deleted file mode 100644
... ...
@@ -1,7 +0,0 @@
1
-FROM ubuntu:14.04
2
-LABEL maintainer James Turnbull "james@example.com"
3
-ENV REFRESHED_AT 2014-06-01
4
-RUN apt-get update
5
-RUN apt-get -y install redis-server redis-tools
6
-EXPOSE 6379
7
-ENTRYPOINT [ "/usr/bin/redis-server" ]
8 1
deleted file mode 100644
... ...
@@ -1,7 +0,0 @@
1
-(from "ubuntu:14.04")
2
-(label "maintainer" "James Turnbull \"james@example.com\"")
3
-(env "REFRESHED_AT" "2014-06-01")
4
-(run "apt-get update")
5
-(run "apt-get -y install redis-server redis-tools")
6
-(expose "6379")
7
-(entrypoint "/usr/bin/redis-server")
8 1
deleted file mode 100644
... ...
@@ -1,48 +0,0 @@
1
-FROM busybox:buildroot-2014.02
2
-
3
-LABEL maintainer docker <docker@docker.io>
4
-
5
-ONBUILD RUN ["echo", "test"]
6
-ONBUILD RUN echo test
7
-ONBUILD COPY . /
8
-
9
-
10
-# RUN Commands \
11
-# linebreak in comment \
12
-RUN ["ls", "-la"]
13
-RUN ["echo", "'1234'"]
14
-RUN echo "1234"
15
-RUN echo 1234
16
-RUN echo '1234' && \
17
-    echo "456" && \
18
-    echo 789
19
-RUN    sh -c 'echo root:testpass \
20
-        > /tmp/passwd'
21
-RUN mkdir -p /test /test2 /test3/test
22
-
23
-# ENV \
24
-ENV SCUBA 1 DUBA 3
25
-ENV SCUBA "1 DUBA 3"
26
-
27
-# CMD \
28
-CMD ["echo", "test"]
29
-CMD echo test
30
-CMD echo "test"
31
-CMD echo 'test'
32
-CMD echo 'test' | wc -
33
-
34
-#EXPOSE\
35
-EXPOSE 3000
36
-EXPOSE 9000 5000 6000
37
-
38
-USER docker
39
-USER docker:root
40
-
41
-VOLUME ["/test"]
42
-VOLUME ["/test", "/test2"]
43
-VOLUME /test3
44
-
45
-WORKDIR /test
46
-
47
-ADD . /
48
-COPY . copy
49 1
deleted file mode 100644
... ...
@@ -1,29 +0,0 @@
1
-(from "busybox:buildroot-2014.02")
2
-(label "maintainer" "docker <docker@docker.io>")
3
-(onbuild (run "echo" "test"))
4
-(onbuild (run "echo test"))
5
-(onbuild (copy "." "/"))
6
-(run "ls" "-la")
7
-(run "echo" "'1234'")
8
-(run "echo \"1234\"")
9
-(run "echo 1234")
10
-(run "echo '1234' &&     echo \"456\" &&     echo 789")
11
-(run "sh -c 'echo root:testpass         > /tmp/passwd'")
12
-(run "mkdir -p /test /test2 /test3/test")
13
-(env "SCUBA" "1 DUBA 3")
14
-(env "SCUBA" "\"1 DUBA 3\"")
15
-(cmd "echo" "test")
16
-(cmd "echo test")
17
-(cmd "echo \"test\"")
18
-(cmd "echo 'test'")
19
-(cmd "echo 'test' | wc -")
20
-(expose "3000")
21
-(expose "9000" "5000" "6000")
22
-(user "docker")
23
-(user "docker:root")
24
-(volume "/test")
25
-(volume "/test" "/test2")
26
-(volume "/test3")
27
-(workdir "/test")
28
-(add "." "/")
29
-(copy "." "copy")
30 1
deleted file mode 100644
... ...
@@ -1,16 +0,0 @@
1
-FROM ubuntu:14.04
2
-
3
-RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y
4
-ADD .muttrc /
5
-ADD .offlineimaprc /
6
-ADD .tmux.conf /
7
-ADD mutt /.mutt
8
-ADD vim /.vim
9
-ADD vimrc /.vimrc
10
-ADD crontab /etc/crontab
11
-RUN chmod 644 /etc/crontab
12
-RUN mkdir /Mail
13
-RUN mkdir /.offlineimap
14
-RUN echo "export TERM=screen-256color" >/.zshenv
15
-
16
-CMD setsid cron; tmux -2
17 1
deleted file mode 100644
... ...
@@ -1,14 +0,0 @@
1
-(from "ubuntu:14.04")
2
-(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y")
3
-(add ".muttrc" "/")
4
-(add ".offlineimaprc" "/")
5
-(add ".tmux.conf" "/")
6
-(add "mutt" "/.mutt")
7
-(add "vim" "/.vim")
8
-(add "vimrc" "/.vimrc")
9
-(add "crontab" "/etc/crontab")
10
-(run "chmod 644 /etc/crontab")
11
-(run "mkdir /Mail")
12
-(run "mkdir /.offlineimap")
13
-(run "echo \"export TERM=screen-256color\" >/.zshenv")
14
-(cmd "setsid cron; tmux -2")
15 1
deleted file mode 100644
... ...
@@ -1,3 +0,0 @@
1
-FROM foo
2
-
3
-VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs
4 1
deleted file mode 100644
... ...
@@ -1,2 +0,0 @@
1
-(from "foo")
2
-(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs")
3 1
deleted file mode 100644
... ...
@@ -1,7 +0,0 @@
1
-FROM ubuntu:14.04
2
-
3
-RUN apt-get update && apt-get install libcap2-bin mumble-server -y
4
-
5
-ADD ./mumble-server.ini /etc/mumble-server.ini
6
-
7
-CMD /usr/sbin/murmurd
8 1
deleted file mode 100644
... ...
@@ -1,4 +0,0 @@
1
-(from "ubuntu:14.04")
2
-(run "apt-get update && apt-get install libcap2-bin mumble-server -y")
3
-(add "./mumble-server.ini" "/etc/mumble-server.ini")
4
-(cmd "/usr/sbin/murmurd")
5 1
deleted file mode 100644
... ...
@@ -1,14 +0,0 @@
1
-FROM ubuntu:14.04
2
-LABEL maintainer Erik Hollensbe <erik@hollensbe.org>
3
-
4
-RUN apt-get update && apt-get install nginx-full -y
5
-RUN rm -rf /etc/nginx
6
-ADD etc /etc/nginx
7
-RUN chown -R root:root /etc/nginx
8
-RUN /usr/sbin/nginx -qt
9
-RUN mkdir /www
10
-
11
-CMD ["/usr/sbin/nginx"]
12
-
13
-VOLUME /www
14
-EXPOSE 80
15 1
deleted file mode 100644
... ...
@@ -1,11 +0,0 @@
1
-(from "ubuntu:14.04")
2
-(label "maintainer" "Erik Hollensbe <erik@hollensbe.org>")
3
-(run "apt-get update && apt-get install nginx-full -y")
4
-(run "rm -rf /etc/nginx")
5
-(add "etc" "/etc/nginx")
6
-(run "chown -R root:root /etc/nginx")
7
-(run "/usr/sbin/nginx -qt")
8
-(run "mkdir /www")
9
-(cmd "/usr/sbin/nginx")
10
-(volume "/www")
11
-(expose "80")
12 1
deleted file mode 100644
... ...
@@ -1,23 +0,0 @@
1
-FROM ubuntu:12.04
2
-
3
-EXPOSE 27015
4
-EXPOSE 27005
5
-EXPOSE 26901
6
-EXPOSE 27020
7
-
8
-RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y
9
-RUN mkdir -p /steam
10
-RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam
11
-ADD ./script /steam/script
12
-RUN /steam/steamcmd.sh +runscript /steam/script
13
-RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf
14
-RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf
15
-ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg
16
-ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg
17
-ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg
18
-RUN rm -r /steam/tf2/tf/addons/sourcemod/configs
19
-ADD ./configs /steam/tf2/tf/addons/sourcemod/configs
20
-RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en
21
-RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en
22
-
23
-CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill
24 1
deleted file mode 100644
... ...
@@ -1,20 +0,0 @@
1
-(from "ubuntu:12.04")
2
-(expose "27015")
3
-(expose "27005")
4
-(expose "26901")
5
-(expose "27020")
6
-(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y")
7
-(run "mkdir -p /steam")
8
-(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam")
9
-(add "./script" "/steam/script")
10
-(run "/steam/steamcmd.sh +runscript /steam/script")
11
-(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf")
12
-(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf")
13
-(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg")
14
-(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg")
15
-(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg")
16
-(run "rm -r /steam/tf2/tf/addons/sourcemod/configs")
17
-(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs")
18
-(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en")
19
-(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en")
20
-(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill")
21 1
deleted file mode 100644
... ...
@@ -1,9 +0,0 @@
1
-FROM ubuntu:14.04
2
-
3
-RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y
4
-
5
-ADD .weechat /.weechat
6
-ADD .tmux.conf /
7
-RUN echo "export TERM=screen-256color" >/.zshenv
8
-
9
-CMD zsh -c weechat
10 1
deleted file mode 100644
... ...
@@ -1,6 +0,0 @@
1
-(from "ubuntu:14.04")
2
-(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y")
3
-(add ".weechat" "/.weechat")
4
-(add ".tmux.conf" "/")
5
-(run "echo \"export TERM=screen-256color\" >/.zshenv")
6
-(cmd "zsh -c weechat")
7 1
deleted file mode 100644
... ...
@@ -1,7 +0,0 @@
1
-FROM ubuntu:14.04
2
-LABEL maintainer Erik Hollensbe <erik@hollensbe.org>
3
-
4
-RUN apt-get update && apt-get install znc -y
5
-ADD conf /.znc
6
-
7
-CMD [ "/usr/bin/znc", "-f", "-r" ]
8 1
deleted file mode 100644
... ...
@@ -1,5 +0,0 @@
1
-(from "ubuntu:14.04")
2
-(label "maintainer" "Erik Hollensbe <erik@hollensbe.org>")
3
-(run "apt-get update && apt-get install znc -y")
4
-(add "conf" "/.znc")
5
-(cmd "/usr/bin/znc" "-f" "-r")
6 1
deleted file mode 100644
... ...
@@ -1,232 +0,0 @@
1
-A|hello                    |     hello
2
-A|he'll'o                  |     hello
3
-A|he'llo                   |     error
4
-A|he\'llo                  |     he'llo
5
-A|he\\'llo                 |     error
6
-A|abc\tdef                 |     abctdef
7
-A|"abc\tdef"               |     abc\tdef
8
-A|"abc\\tdef"              |     abc\tdef
9
-A|'abc\tdef'               |     abc\tdef
10
-A|hello\                   |     hello
11
-A|hello\\                  |     hello\
12
-A|"hello                   |     error
13
-A|"hello\"                 |     error
14
-A|"hel'lo"                 |     hel'lo
15
-A|'hello                   |     error
16
-A|'hello\'                 |     hello\
17
-A|'hello\there'            |     hello\there
18
-A|'hello\\there'           |     hello\\there
19
-A|"''"                     |     ''
20
-A|$.                       |     $.
21
-A|he$1x                    |     hex
22
-A|he$.x                    |     he$.x
23
-# Next one is different on Windows as $pwd==$PWD
24
-U|he$pwd.                  |     he.
25
-W|he$pwd.                  |     he/home.
26
-A|he$PWD                   |     he/home
27
-A|he\$PWD                  |     he$PWD
28
-A|he\\$PWD                 |     he\/home
29
-A|"he\$PWD"                |     he$PWD
30
-A|"he\\$PWD"               |     he\/home
31
-A|\${}                     |     ${}
32
-A|\${}aaa                  |     ${}aaa
33
-A|he\${}                   |     he${}
34
-A|he\${}xx                 |     he${}xx
35
-A|${}                      |     error
36
-A|${}aaa                   |     error
37
-A|he${}                    |     error
38
-A|he${}xx                  |     error
39
-A|he${hi}                  |     he
40
-A|he${hi}xx                |     hexx
41
-A|he${PWD}                 |     he/home
42
-A|he${.}                   |     error
43
-A|he${XXX:-000}xx          |     he000xx
44
-A|he${PWD:-000}xx          |     he/homexx
45
-A|he${XXX:-$PWD}xx         |     he/homexx
46
-A|he${XXX:-${PWD:-yyy}}xx  |     he/homexx
47
-A|he${XXX:-${YYY:-yyy}}xx  |     heyyyxx
48
-A|he${XXX:YYY}             |     error
49
-A|he${XXX:+${PWD}}xx       |     hexx
50
-A|he${PWD:+${XXX}}xx       |     hexx
51
-A|he${PWD:+${SHELL}}xx     |     hebashxx
52
-A|he${XXX:+000}xx          |     hexx
53
-A|he${PWD:+000}xx          |     he000xx
54
-A|'he${XX}'                |     he${XX}
55
-A|"he${PWD}"               |     he/home
56
-A|"he'$PWD'"               |     he'/home'
57
-A|"$PWD"                   |     /home
58
-A|'$PWD'                   |     $PWD
59
-A|'\$PWD'                  |     \$PWD
60
-A|'"hello"'                |     "hello"
61
-A|he\$PWD                  |     he$PWD
62
-A|"he\$PWD"                |     he$PWD
63
-A|'he\$PWD'                |     he\$PWD
64
-A|he${PWD                  |     error
65
-A|he${PWD:=000}xx          |     error
66
-A|he${PWD:+${PWD}:}xx      |     he/home:xx
67
-A|he${XXX:-\$PWD:}xx       |     he$PWD:xx
68
-A|he${XXX:-\${PWD}z}xx     |     he${PWDz}xx
69
-A|안녕하세요                 |     안녕하세요
70
-A|안'녕'하세요               |     안녕하세요
71
-A|안'녕하세요                |     error
72
-A|안녕\'하세요               |     안녕'하세요
73
-A|안\\'녕하세요              |     error
74
-A|안녕\t하세요               |     안녕t하세요
75
-A|"안녕\t하세요"             |     안녕\t하세요
76
-A|'안녕\t하세요              |     error
77
-A|안녕하세요\                |     안녕하세요
78
-A|안녕하세요\\               |     안녕하세요\
79
-A|"안녕하세요                |     error
80
-A|"안녕하세요\"              |     error
81
-A|"안녕'하세요"              |     안녕'하세요
82
-A|'안녕하세요                |     error
83
-A|'안녕하세요\'              |     안녕하세요\
84
-A|안녕$1x                    |     안녕x
85
-A|안녕$.x                    |     안녕$.x
86
-# Next one is different on Windows as $pwd==$PWD
87
-U|안녕$pwd.                  |     안녕.
88
-W|안녕$pwd.                  |     안녕/home.
89
-A|안녕$PWD                   |     안녕/home
90
-A|안녕\$PWD                  |     안녕$PWD
91
-A|안녕\\$PWD                 |     안녕\/home
92
-A|안녕\${}                   |     안녕${}
93
-A|안녕\${}xx                 |     안녕${}xx
94
-A|안녕${}                    |     error
95
-A|안녕${}xx                  |     error
96
-A|안녕${hi}                  |     안녕
97
-A|안녕${hi}xx                |     안녕xx
98
-A|안녕${PWD}                 |     안녕/home
99
-A|안녕${.}                   |     error
100
-A|안녕${XXX:-000}xx          |     안녕000xx
101
-A|안녕${PWD:-000}xx          |     안녕/homexx
102
-A|안녕${XXX:-$PWD}xx         |     안녕/homexx
103
-A|안녕${XXX:-${PWD:-yyy}}xx  |     안녕/homexx
104
-A|안녕${XXX:-${YYY:-yyy}}xx  |     안녕yyyxx
105
-A|안녕${XXX:YYY}             |     error
106
-A|안녕${XXX:+${PWD}}xx       |     안녕xx
107
-A|안녕${PWD:+${XXX}}xx       |     안녕xx
108
-A|안녕${PWD:+${SHELL}}xx     |     안녕bashxx
109
-A|안녕${XXX:+000}xx          |     안녕xx
110
-A|안녕${PWD:+000}xx          |     안녕000xx
111
-A|'안녕${XX}'                |     안녕${XX}
112
-A|"안녕${PWD}"               |     안녕/home
113
-A|"안녕'$PWD'"               |     안녕'/home'
114
-A|'"안녕"'                   |     "안녕"
115
-A|안녕\$PWD                  |     안녕$PWD
116
-A|"안녕\$PWD"                |     안녕$PWD
117
-A|'안녕\$PWD'                |     안녕\$PWD
118
-A|안녕${PWD                  |     error
119
-A|안녕${PWD:=000}xx          |     error
120
-A|안녕${PWD:+${PWD}:}xx      |     안녕/home:xx
121
-A|안녕${XXX:-\$PWD:}xx       |     안녕$PWD:xx
122
-A|안녕${XXX:-\${PWD}z}xx     |     안녕${PWDz}xx
123
-A|$KOREAN                    |     한국어
124
-A|안녕$KOREAN                |     안녕한국어
125
-A|${{aaa}                   |     error
126
-A|${aaa}}                   |     }
127
-A|${aaa                     |     error
128
-A|${{aaa:-bbb}              |     error
129
-A|${aaa:-bbb}}              |     bbb}
130
-A|${aaa:-bbb                |     error
131
-A|${aaa:-bbb}               |     bbb
132
-A|${aaa:-${bbb:-ccc}}       |     ccc
133
-A|${aaa:-bbb ${foo}         |     error
134
-A|${aaa:-bbb {foo}          |     bbb {foo
135
-A|${:}                      |     error
136
-A|${:-bbb}                  |     error
137
-A|${:+bbb}                  |     error
138
-
139
-# Positional parameters won't be set:
140
-# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_01
141
-A|$1                        |
142
-A|${1}                      |
143
-A|${1:+bbb}                 |
144
-A|${1:-bbb}                 |     bbb
145
-A|$2                        |
146
-A|${2}                      |
147
-A|${2:+bbb}                 |
148
-A|${2:-bbb}                 |     bbb
149
-A|$3                        |
150
-A|${3}                      |
151
-A|${3:+bbb}                 |
152
-A|${3:-bbb}                 |     bbb
153
-A|$4                        |
154
-A|${4}                      |
155
-A|${4:+bbb}                 |
156
-A|${4:-bbb}                 |     bbb
157
-A|$5                        |
158
-A|${5}                      |
159
-A|${5:+bbb}                 |
160
-A|${5:-bbb}                 |     bbb
161
-A|$6                        |
162
-A|${6}                      |
163
-A|${6:+bbb}                 |
164
-A|${6:-bbb}                 |     bbb
165
-A|$7                        |
166
-A|${7}                      |
167
-A|${7:+bbb}                 |
168
-A|${7:-bbb}                 |     bbb
169
-A|$8                        |
170
-A|${8}                      |
171
-A|${8:+bbb}                 |
172
-A|${8:-bbb}                 |     bbb
173
-A|$9                        |
174
-A|${9}                      |
175
-A|${9:+bbb}                 |
176
-A|${9:-bbb}                 |     bbb
177
-A|$999                      |
178
-A|${999}                    |
179
-A|${999:+bbb}               |
180
-A|${999:-bbb}               |     bbb
181
-A|$999aaa                   |     aaa
182
-A|${999}aaa                 |     aaa
183
-A|${999:+bbb}aaa            |     aaa
184
-A|${999:-bbb}aaa            |     bbbaaa
185
-A|$001                      |
186
-A|${001}                    |
187
-A|${001:+bbb}               |
188
-A|${001:-bbb}               |     bbb
189
-A|$001aaa                   |     aaa
190
-A|${001}aaa                 |     aaa
191
-A|${001:+bbb}aaa            |     aaa
192
-A|${001:-bbb}aaa            |     bbbaaa
193
-
194
-# Special parameters won't be set in the Dockerfile:
195
-# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02
196
-A|$@                        |
197
-A|${@}                      |
198
-A|${@:+bbb}                 |
199
-A|${@:-bbb}                 |     bbb
200
-A|$@@@                      |     @@
201
-A|$@aaa                     |     aaa
202
-A|${@}aaa                   |     aaa
203
-A|${@:+bbb}aaa              |     aaa
204
-A|${@:-bbb}aaa              |     bbbaaa
205
-A|$*                        |
206
-A|${*}                      |
207
-A|${*:+bbb}                 |
208
-A|${*:-bbb}                 |     bbb
209
-A|$#                        |
210
-A|${#}                      |
211
-A|${#:+bbb}                 |
212
-A|${#:-bbb}                 |     bbb
213
-A|$?                        |
214
-A|${?}                      |
215
-A|${?:+bbb}                 |
216
-A|${?:-bbb}                 |     bbb
217
-A|$-                        |
218
-A|${-}                      |
219
-A|${-:+bbb}                 |
220
-A|${-:-bbb}                 |     bbb
221
-A|$$                        |
222
-A|${$}                      |
223
-A|${$:+bbb}                 |
224
-A|${$:-bbb}                 |     bbb
225
-A|$!                        |
226
-A|${!}                      |
227
-A|${!:+bbb}                 |
228
-A|${!:-bbb}                 |     bbb
229
-A|$0                        |
230
-A|${0}                      |
231
-A|${0:+bbb}                 |
232
-A|${0:-bbb}                 |     bbb
233 1
deleted file mode 100644
... ...
@@ -1,9 +0,0 @@
1
-// +build !windows
2
-
3
-package shell // import "github.com/docker/docker/builder/dockerfile/shell"
4
-
5
-// EqualEnvKeys compare two strings and returns true if they are equal. On
6
-// Windows this comparison is case insensitive.
7
-func EqualEnvKeys(from, to string) bool {
8
-	return from == to
9
-}
10 1
deleted file mode 100644
... ...
@@ -1,9 +0,0 @@
1
-package shell // import "github.com/docker/docker/builder/dockerfile/shell"
2
-
3
-import "strings"
4
-
5
-// EqualEnvKeys compare two strings and returns true if they are equal. On
6
-// Windows this comparison is case insensitive.
7
-func EqualEnvKeys(from, to string) bool {
8
-	return strings.ToUpper(from) == strings.ToUpper(to)
9
-}
10 1
deleted file mode 100644
... ...
@@ -1,373 +0,0 @@
1
-package shell // import "github.com/docker/docker/builder/dockerfile/shell"
2
-
3
-import (
4
-	"bytes"
5
-	"strings"
6
-	"text/scanner"
7
-	"unicode"
8
-
9
-	"github.com/pkg/errors"
10
-)
11
-
12
-// Lex performs shell word splitting and variable expansion.
13
-//
14
-// Lex takes a string and an array of env variables and
15
-// process all quotes (" and ') as well as $xxx and ${xxx} env variable
16
-// tokens.  Tries to mimic bash shell process.
17
-// It doesn't support all flavors of ${xx:...} formats but new ones can
18
-// be added by adding code to the "special ${} format processing" section
19
-type Lex struct {
20
-	escapeToken rune
21
-}
22
-
23
-// NewLex creates a new Lex which uses escapeToken to escape quotes.
24
-func NewLex(escapeToken rune) *Lex {
25
-	return &Lex{escapeToken: escapeToken}
26
-}
27
-
28
-// ProcessWord will use the 'env' list of environment variables,
29
-// and replace any env var references in 'word'.
30
-func (s *Lex) ProcessWord(word string, env []string) (string, error) {
31
-	word, _, err := s.process(word, env)
32
-	return word, err
33
-}
34
-
35
-// ProcessWords will use the 'env' list of environment variables,
36
-// and replace any env var references in 'word' then it will also
37
-// return a slice of strings which represents the 'word'
38
-// split up based on spaces - taking into account quotes.  Note that
39
-// this splitting is done **after** the env var substitutions are done.
40
-// Note, each one is trimmed to remove leading and trailing spaces (unless
41
-// they are quoted", but ProcessWord retains spaces between words.
42
-func (s *Lex) ProcessWords(word string, env []string) ([]string, error) {
43
-	_, words, err := s.process(word, env)
44
-	return words, err
45
-}
46
-
47
-func (s *Lex) process(word string, env []string) (string, []string, error) {
48
-	sw := &shellWord{
49
-		envs:        env,
50
-		escapeToken: s.escapeToken,
51
-	}
52
-	sw.scanner.Init(strings.NewReader(word))
53
-	return sw.process(word)
54
-}
55
-
56
-type shellWord struct {
57
-	scanner     scanner.Scanner
58
-	envs        []string
59
-	escapeToken rune
60
-}
61
-
62
-func (sw *shellWord) process(source string) (string, []string, error) {
63
-	word, words, err := sw.processStopOn(scanner.EOF)
64
-	if err != nil {
65
-		err = errors.Wrapf(err, "failed to process %q", source)
66
-	}
67
-	return word, words, err
68
-}
69
-
70
-type wordsStruct struct {
71
-	word   string
72
-	words  []string
73
-	inWord bool
74
-}
75
-
76
-func (w *wordsStruct) addChar(ch rune) {
77
-	if unicode.IsSpace(ch) && w.inWord {
78
-		if len(w.word) != 0 {
79
-			w.words = append(w.words, w.word)
80
-			w.word = ""
81
-			w.inWord = false
82
-		}
83
-	} else if !unicode.IsSpace(ch) {
84
-		w.addRawChar(ch)
85
-	}
86
-}
87
-
88
-func (w *wordsStruct) addRawChar(ch rune) {
89
-	w.word += string(ch)
90
-	w.inWord = true
91
-}
92
-
93
-func (w *wordsStruct) addString(str string) {
94
-	var scan scanner.Scanner
95
-	scan.Init(strings.NewReader(str))
96
-	for scan.Peek() != scanner.EOF {
97
-		w.addChar(scan.Next())
98
-	}
99
-}
100
-
101
-func (w *wordsStruct) addRawString(str string) {
102
-	w.word += str
103
-	w.inWord = true
104
-}
105
-
106
-func (w *wordsStruct) getWords() []string {
107
-	if len(w.word) > 0 {
108
-		w.words = append(w.words, w.word)
109
-
110
-		// Just in case we're called again by mistake
111
-		w.word = ""
112
-		w.inWord = false
113
-	}
114
-	return w.words
115
-}
116
-
117
-// Process the word, starting at 'pos', and stop when we get to the
118
-// end of the word or the 'stopChar' character
119
-func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) {
120
-	var result bytes.Buffer
121
-	var words wordsStruct
122
-
123
-	var charFuncMapping = map[rune]func() (string, error){
124
-		'\'': sw.processSingleQuote,
125
-		'"':  sw.processDoubleQuote,
126
-		'$':  sw.processDollar,
127
-	}
128
-
129
-	for sw.scanner.Peek() != scanner.EOF {
130
-		ch := sw.scanner.Peek()
131
-
132
-		if stopChar != scanner.EOF && ch == stopChar {
133
-			sw.scanner.Next()
134
-			return result.String(), words.getWords(), nil
135
-		}
136
-		if fn, ok := charFuncMapping[ch]; ok {
137
-			// Call special processing func for certain chars
138
-			tmp, err := fn()
139
-			if err != nil {
140
-				return "", []string{}, err
141
-			}
142
-			result.WriteString(tmp)
143
-
144
-			if ch == rune('$') {
145
-				words.addString(tmp)
146
-			} else {
147
-				words.addRawString(tmp)
148
-			}
149
-		} else {
150
-			// Not special, just add it to the result
151
-			ch = sw.scanner.Next()
152
-
153
-			if ch == sw.escapeToken {
154
-				// '\' (default escape token, but ` allowed) escapes, except end of line
155
-				ch = sw.scanner.Next()
156
-
157
-				if ch == scanner.EOF {
158
-					break
159
-				}
160
-
161
-				words.addRawChar(ch)
162
-			} else {
163
-				words.addChar(ch)
164
-			}
165
-
166
-			result.WriteRune(ch)
167
-		}
168
-	}
169
-	if stopChar != scanner.EOF {
170
-		return "", []string{}, errors.Errorf("unexpected end of statement while looking for matching %s", string(stopChar))
171
-	}
172
-	return result.String(), words.getWords(), nil
173
-}
174
-
175
-func (sw *shellWord) processSingleQuote() (string, error) {
176
-	// All chars between single quotes are taken as-is
177
-	// Note, you can't escape '
178
-	//
179
-	// From the "sh" man page:
180
-	// Single Quotes
181
-	//   Enclosing characters in single quotes preserves the literal meaning of
182
-	//   all the characters (except single quotes, making it impossible to put
183
-	//   single-quotes in a single-quoted string).
184
-
185
-	var result bytes.Buffer
186
-
187
-	sw.scanner.Next()
188
-
189
-	for {
190
-		ch := sw.scanner.Next()
191
-		switch ch {
192
-		case scanner.EOF:
193
-			return "", errors.New("unexpected end of statement while looking for matching single-quote")
194
-		case '\'':
195
-			return result.String(), nil
196
-		}
197
-		result.WriteRune(ch)
198
-	}
199
-}
200
-
201
-func (sw *shellWord) processDoubleQuote() (string, error) {
202
-	// All chars up to the next " are taken as-is, even ', except any $ chars
203
-	// But you can escape " with a \ (or ` if escape token set accordingly)
204
-	//
205
-	// From the "sh" man page:
206
-	// Double Quotes
207
-	//  Enclosing characters within double quotes preserves the literal meaning
208
-	//  of all characters except dollarsign ($), backquote (`), and backslash
209
-	//  (\).  The backslash inside double quotes is historically weird, and
210
-	//  serves to quote only the following characters:
211
-	//    $ ` " \ <newline>.
212
-	//  Otherwise it remains literal.
213
-
214
-	var result bytes.Buffer
215
-
216
-	sw.scanner.Next()
217
-
218
-	for {
219
-		switch sw.scanner.Peek() {
220
-		case scanner.EOF:
221
-			return "", errors.New("unexpected end of statement while looking for matching double-quote")
222
-		case '"':
223
-			sw.scanner.Next()
224
-			return result.String(), nil
225
-		case '$':
226
-			value, err := sw.processDollar()
227
-			if err != nil {
228
-				return "", err
229
-			}
230
-			result.WriteString(value)
231
-		default:
232
-			ch := sw.scanner.Next()
233
-			if ch == sw.escapeToken {
234
-				switch sw.scanner.Peek() {
235
-				case scanner.EOF:
236
-					// Ignore \ at end of word
237
-					continue
238
-				case '"', '$', sw.escapeToken:
239
-					// These chars can be escaped, all other \'s are left as-is
240
-					// Note: for now don't do anything special with ` chars.
241
-					// Not sure what to do with them anyway since we're not going
242
-					// to execute the text in there (not now anyway).
243
-					ch = sw.scanner.Next()
244
-				}
245
-			}
246
-			result.WriteRune(ch)
247
-		}
248
-	}
249
-}
250
-
251
-func (sw *shellWord) processDollar() (string, error) {
252
-	sw.scanner.Next()
253
-
254
-	// $xxx case
255
-	if sw.scanner.Peek() != '{' {
256
-		name := sw.processName()
257
-		if name == "" {
258
-			return "$", nil
259
-		}
260
-		return sw.getEnv(name), nil
261
-	}
262
-
263
-	sw.scanner.Next()
264
-	switch sw.scanner.Peek() {
265
-	case scanner.EOF:
266
-		return "", errors.New("syntax error: missing '}'")
267
-	case '{', '}', ':':
268
-		// Invalid ${{xx}, ${:xx}, ${:}. ${} case
269
-		return "", errors.New("syntax error: bad substitution")
270
-	}
271
-	name := sw.processName()
272
-	ch := sw.scanner.Next()
273
-	switch ch {
274
-	case '}':
275
-		// Normal ${xx} case
276
-		return sw.getEnv(name), nil
277
-	case ':':
278
-		// Special ${xx:...} format processing
279
-		// Yes it allows for recursive $'s in the ... spot
280
-		modifier := sw.scanner.Next()
281
-
282
-		word, _, err := sw.processStopOn('}')
283
-		if err != nil {
284
-			if sw.scanner.Peek() == scanner.EOF {
285
-				return "", errors.New("syntax error: missing '}'")
286
-			}
287
-			return "", err
288
-		}
289
-
290
-		// Grab the current value of the variable in question so we
291
-		// can use to to determine what to do based on the modifier
292
-		newValue := sw.getEnv(name)
293
-
294
-		switch modifier {
295
-		case '+':
296
-			if newValue != "" {
297
-				newValue = word
298
-			}
299
-			return newValue, nil
300
-
301
-		case '-':
302
-			if newValue == "" {
303
-				newValue = word
304
-			}
305
-			return newValue, nil
306
-
307
-		default:
308
-			return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier)
309
-		}
310
-	}
311
-	return "", errors.Errorf("missing ':' in substitution")
312
-}
313
-
314
-func (sw *shellWord) processName() string {
315
-	// Read in a name (alphanumeric or _)
316
-	// If it starts with a numeric then just return $#
317
-	var name bytes.Buffer
318
-
319
-	for sw.scanner.Peek() != scanner.EOF {
320
-		ch := sw.scanner.Peek()
321
-		if name.Len() == 0 && unicode.IsDigit(ch) {
322
-			for sw.scanner.Peek() != scanner.EOF && unicode.IsDigit(sw.scanner.Peek()) {
323
-				// Keep reading until the first non-digit character, or EOF
324
-				ch = sw.scanner.Next()
325
-				name.WriteRune(ch)
326
-			}
327
-			return name.String()
328
-		}
329
-		if name.Len() == 0 && isSpecialParam(ch) {
330
-			ch = sw.scanner.Next()
331
-			return string(ch)
332
-		}
333
-		if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' {
334
-			break
335
-		}
336
-		ch = sw.scanner.Next()
337
-		name.WriteRune(ch)
338
-	}
339
-
340
-	return name.String()
341
-}
342
-
343
-// isSpecialParam checks if the provided character is a special parameters,
344
-// as defined in http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02
345
-func isSpecialParam(char rune) bool {
346
-	switch char {
347
-	case '@', '*', '#', '?', '-', '$', '!', '0':
348
-		// Special parameters
349
-		// http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02
350
-		return true
351
-	}
352
-	return false
353
-}
354
-
355
-func (sw *shellWord) getEnv(name string) string {
356
-	for _, env := range sw.envs {
357
-		i := strings.Index(env, "=")
358
-		if i < 0 {
359
-			if EqualEnvKeys(name, env) {
360
-				// Should probably never get here, but just in case treat
361
-				// it like "var" and "var=" are the same
362
-				return ""
363
-			}
364
-			continue
365
-		}
366
-		compareName := env[:i]
367
-		if !EqualEnvKeys(name, compareName) {
368
-			continue
369
-		}
370
-		return env[i+1:]
371
-	}
372
-	return ""
373
-}
374 1
deleted file mode 100644
... ...
@@ -1,150 +0,0 @@
1
-package shell // import "github.com/docker/docker/builder/dockerfile/shell"
2
-
3
-import (
4
-	"bufio"
5
-	"os"
6
-	"runtime"
7
-	"strings"
8
-	"testing"
9
-
10
-	"github.com/gotestyourself/gotestyourself/assert"
11
-	is "github.com/gotestyourself/gotestyourself/assert/cmp"
12
-)
13
-
14
-func TestShellParser4EnvVars(t *testing.T) {
15
-	fn := "envVarTest"
16
-	lineCount := 0
17
-
18
-	file, err := os.Open(fn)
19
-	assert.Check(t, err)
20
-	defer file.Close()
21
-
22
-	shlex := NewLex('\\')
23
-	scanner := bufio.NewScanner(file)
24
-	envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"}
25
-	for scanner.Scan() {
26
-		line := scanner.Text()
27
-		lineCount++
28
-
29
-		// Skip comments and blank lines
30
-		if strings.HasPrefix(line, "#") {
31
-			continue
32
-		}
33
-		line = strings.TrimSpace(line)
34
-		if line == "" {
35
-			continue
36
-		}
37
-
38
-		words := strings.Split(line, "|")
39
-		assert.Check(t, is.Len(words, 3))
40
-
41
-		platform := strings.TrimSpace(words[0])
42
-		source := strings.TrimSpace(words[1])
43
-		expected := strings.TrimSpace(words[2])
44
-
45
-		// Key W=Windows; A=All; U=Unix
46
-		if platform != "W" && platform != "A" && platform != "U" {
47
-			t.Fatalf("Invalid tag %s at line %d of %s. Must be W, A or U", platform, lineCount, fn)
48
-		}
49
-
50
-		if ((platform == "W" || platform == "A") && runtime.GOOS == "windows") ||
51
-			((platform == "U" || platform == "A") && runtime.GOOS != "windows") {
52
-			newWord, err := shlex.ProcessWord(source, envs)
53
-			if expected == "error" {
54
-				assert.Check(t, is.ErrorContains(err, ""), "input: %q, result: %q", source, newWord)
55
-			} else {
56
-				assert.Check(t, err, "at line %d of %s", lineCount, fn)
57
-				assert.Check(t, is.Equal(newWord, expected), "at line %d of %s", lineCount, fn)
58
-			}
59
-		}
60
-	}
61
-}
62
-
63
-func TestShellParser4Words(t *testing.T) {
64
-	fn := "wordsTest"
65
-
66
-	file, err := os.Open(fn)
67
-	if err != nil {
68
-		t.Fatalf("Can't open '%s': %s", err, fn)
69
-	}
70
-	defer file.Close()
71
-
72
-	var envs []string
73
-	shlex := NewLex('\\')
74
-	scanner := bufio.NewScanner(file)
75
-	lineNum := 0
76
-	for scanner.Scan() {
77
-		line := scanner.Text()
78
-		lineNum = lineNum + 1
79
-
80
-		if strings.HasPrefix(line, "#") {
81
-			continue
82
-		}
83
-
84
-		if strings.HasPrefix(line, "ENV ") {
85
-			line = strings.TrimLeft(line[3:], " ")
86
-			envs = append(envs, line)
87
-			continue
88
-		}
89
-
90
-		words := strings.Split(line, "|")
91
-		if len(words) != 2 {
92
-			t.Fatalf("Error in '%s'(line %d) - should be exactly one | in: %q", fn, lineNum, line)
93
-		}
94
-		test := strings.TrimSpace(words[0])
95
-		expected := strings.Split(strings.TrimLeft(words[1], " "), ",")
96
-
97
-		result, err := shlex.ProcessWords(test, envs)
98
-
99
-		if err != nil {
100
-			result = []string{"error"}
101
-		}
102
-
103
-		if len(result) != len(expected) {
104
-			t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result)
105
-		}
106
-		for i, w := range expected {
107
-			if w != result[i] {
108
-				t.Fatalf("Error on line %d. %q was suppose to result in %q, but got %q instead", lineNum, test, expected, result)
109
-			}
110
-		}
111
-	}
112
-}
113
-
114
-func TestGetEnv(t *testing.T) {
115
-	sw := &shellWord{envs: nil}
116
-
117
-	sw.envs = []string{}
118
-	if sw.getEnv("foo") != "" {
119
-		t.Fatal("2 - 'foo' should map to ''")
120
-	}
121
-
122
-	sw.envs = []string{"foo"}
123
-	if sw.getEnv("foo") != "" {
124
-		t.Fatal("3 - 'foo' should map to ''")
125
-	}
126
-
127
-	sw.envs = []string{"foo="}
128
-	if sw.getEnv("foo") != "" {
129
-		t.Fatal("4 - 'foo' should map to ''")
130
-	}
131
-
132
-	sw.envs = []string{"foo=bar"}
133
-	if sw.getEnv("foo") != "bar" {
134
-		t.Fatal("5 - 'foo' should map to 'bar'")
135
-	}
136
-
137
-	sw.envs = []string{"foo=bar", "car=hat"}
138
-	if sw.getEnv("foo") != "bar" {
139
-		t.Fatal("6 - 'foo' should map to 'bar'")
140
-	}
141
-	if sw.getEnv("car") != "hat" {
142
-		t.Fatal("7 - 'car' should map to 'hat'")
143
-	}
144
-
145
-	// Make sure we grab the first 'car' in the list
146
-	sw.envs = []string{"foo=bar", "car=hat", "car=bike"}
147
-	if sw.getEnv("car") != "hat" {
148
-		t.Fatal("8 - 'car' should map to 'hat'")
149
-	}
150
-}
151 1
deleted file mode 100644
... ...
@@ -1,30 +0,0 @@
1
-hello | hello
2
-hello${hi}bye | hellobye
3
-ENV hi=hi
4
-hello${hi}bye | hellohibye
5
-ENV space=abc  def
6
-hello${space}bye | helloabc,defbye
7
-hello"${space}"bye | helloabc  defbye
8
-hello "${space}"bye | hello,abc  defbye
9
-ENV leading=  ab c
10
-hello${leading}def | hello,ab,cdef
11
-hello"${leading}" def | hello  ab c,def
12
-hello"${leading}" | hello  ab c
13
-hello${leading} | hello,ab,c
14
-# next line MUST have 3 trailing spaces, don't erase them!
15
-ENV trailing=ab c   
16
-hello${trailing} | helloab,c
17
-hello${trailing}d | helloab,c,d
18
-hello"${trailing}"d | helloab c   d
19
-# next line MUST have 3 trailing spaces, don't erase them!
20
-hel"lo${trailing}" | helloab c   
21
-hello" there  " | hello there  
22
-hello there     | hello,there
23
-hello\ there | hello there
24
-hello" there | error
25
-hello\" there | hello",there
26
-hello"\\there" | hello\there
27
-hello"\there" | hello\there
28
-hello'\\there' | hello\\there
29
-hello'\there' | hello\there
30
-hello'$there' | hello$there
... ...
@@ -10,10 +10,10 @@ import (
10 10
 	"github.com/containerd/continuity/driver"
11 11
 	"github.com/docker/docker/api/types/backend"
12 12
 	"github.com/docker/docker/builder"
13
-	"github.com/docker/docker/builder/dockerfile/parser"
14 13
 	"github.com/docker/docker/builder/dockerignore"
15 14
 	"github.com/docker/docker/pkg/fileutils"
16 15
 	"github.com/docker/docker/pkg/urlutil"
16
+	"github.com/moby/buildkit/frontend/dockerfile/parser"
17 17
 	"github.com/pkg/errors"
18 18
 	"github.com/sirupsen/logrus"
19 19
 )
... ...
@@ -16,7 +16,7 @@ import (
16 16
 	"text/template"
17 17
 	"time"
18 18
 
19
-	"github.com/docker/docker/builder/dockerfile/command"
19
+	"github.com/moby/buildkit/frontend/dockerfile/command"
20 20
 	"github.com/docker/docker/integration-cli/checker"
21 21
 	"github.com/docker/docker/integration-cli/cli"
22 22
 	"github.com/docker/docker/integration-cli/cli/build"
... ...
@@ -85,7 +85,8 @@ func TestBuildWithSession(t *testing.T) {
85 85
 }
86 86
 
87 87
 func testBuildWithSession(t *testing.T, client dclient.APIClient, daemonHost string, dir, dockerfile string) (outStr string) {
88
-	sess, err := session.NewSession("foo1", "foo")
88
+	ctx := context.Background()
89
+	sess, err := session.NewSession(ctx, "foo1", "foo")
89 90
 	assert.Check(t, err)
90 91
 
91 92
 	fsProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{
... ...
@@ -26,8 +26,8 @@ github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
26 26
 github.com/imdario/mergo 0.2.1
27 27
 golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
28 28
 
29
-github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
30
-github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
29
+github.com/moby/buildkit b14fd548fe80c0399b105aeec5dbd96ccd2f7720
30
+github.com/tonistiigi/fsutil dc68c74458923f357474a9178bd198aa3ed11a5f
31 31
 
32 32
 #get libnetwork packages
33 33
 
... ...
@@ -5,7 +5,6 @@
5 5
 
6 6
 ## BuildKit
7 7
 
8
-<!-- godoc is mainly for LLB stuff -->
9 8
 [![GoDoc](https://godoc.org/github.com/moby/buildkit?status.svg)](https://godoc.org/github.com/moby/buildkit/client/llb)
10 9
 [![Build Status](https://travis-ci.org/moby/buildkit.svg?branch=master)](https://travis-ci.org/moby/buildkit)
11 10
 [![Go Report Card](https://goreportcard.com/badge/github.com/moby/buildkit)](https://goreportcard.com/report/github.com/moby/buildkit)
... ...
@@ -23,26 +22,58 @@ Key features:
23 23
 - Distributable workers
24 24
 - Multiple output formats
25 25
 - Pluggable architecture
26
+- Execution without root privileges
26 27
 
27 28
 
28 29
 Read the proposal from https://github.com/moby/moby/issues/32925
29 30
 
30
-#### Quick start
31
+Introductory blog post https://blog.mobyproject.org/introducing-buildkit-17e056cc5317
31 32
 
32
-BuildKit daemon can be built in two different versions: one that uses [containerd](https://github.com/containerd/containerd) for execution and distribution, and a standalone version that doesn't have other dependencies apart from [runc](https://github.com/opencontainers/runc). We are open for adding more backends. `buildd` is a CLI utility for serving the gRPC API. 
33
+### Quick start
34
+
35
+Dependencies:
36
+- [runc](https://github.com/opencontainers/runc)
37
+- [containerd](https://github.com/containerd/containerd) (if you want to use containerd worker)
38
+
39
+
40
+The following command installs `buildkitd` and `buildctl` to `/usr/local/bin`:
33 41
 
34 42
 ```bash
35
-# buildd daemon (choose one)
36
-go build -o buildd-containerd -tags containerd ./cmd/buildd
37
-go build -o buildd-standalone -tags standalone ./cmd/buildd
43
+$ make && sudo make install
44
+```
45
+
46
+You can also use `make binaries-all` to prepare `buildkitd.containerd_only` and `buildkitd.oci_only`.
38 47
 
39
-# buildctl utility
40
-go build -o buildctl ./cmd/buildctl
48
+#### Starting the buildkitd daemon:
49
+
50
+```
51
+buildkitd --debug --root /var/lib/buildkit
41 52
 ```
42 53
 
43
-You can also use `make binaries` that prepares all binaries into the `bin/` directory.
54
+The buildkitd daemon suppports two worker backends: OCI (runc) and containerd.
55
+
56
+By default, the OCI (runc) worker is used.
57
+You can set `--oci-worker=false --containerd-worker=true` to use the containerd worker.
58
+
59
+We are open to adding more backends.
44 60
 
45
-`examples/buildkit*` directory contains scripts that define how to build different configurations of BuildKit and its dependencies using the `client` package. Running one of these script generates a protobuf definition of a build graph. Note that the script itself does not execute any steps of the build.
61
+#### Exploring LLB
62
+
63
+BuildKit builds are based on a binary intermediate format called LLB that is used for defining the dependency graph for processes running part of your build. tl;dr: LLB is to Dockerfile what LLVM IR is to C.
64
+
65
+- Marshaled as Protobuf messages
66
+- Concurrently executable
67
+- Efficiently cacheable
68
+- Vendor-neutral (i.e. non-Dockerfile languages can be easily implemented)
69
+
70
+See [`solver/pb/ops.proto`](./solver/pb/ops.proto) for the format definition.
71
+
72
+Currently, following high-level languages has been implemented for LLB:
73
+
74
+- Dockerfile (See [Exploring Dockerfiles](#exploring-dockerfiles))
75
+- (open a PR to add your own language)
76
+
77
+For understanding the basics of LLB, `examples/buildkit*` directory contains scripts that define how to build different configurations of BuildKit itself and its dependencies using the `client` package. Running one of these scripts generates a protobuf definition of a build graph. Note that the script itself does not execute any steps of the build.
46 78
 
47 79
 You can use `buildctl debug dump-llb` to see what data is in this definition. Add `--dot` to generate dot layout.
48 80
 
... ...
@@ -50,7 +81,7 @@ You can use `buildctl debug dump-llb` to see what data is in this definition. Ad
50 50
 go run examples/buildkit0/buildkit.go | buildctl debug dump-llb | jq .
51 51
 ```
52 52
 
53
-To start building use `buildctl build` command. The example script accepts `--target` flag to choose between `containerd` and `standalone` configurations. In standalone mode BuildKit binaries are built together with `runc`. In containerd mode, the `containerd` binary is built as well from the upstream repo.
53
+To start building use `buildctl build` command. The example script accepts `--with-containerd` flag to choose if containerd binaries and support should be included in the end result as well. 
54 54
 
55 55
 ```bash
56 56
 go run examples/buildkit0/buildkit.go | buildctl build
... ...
@@ -68,50 +99,138 @@ Different versions of the example scripts show different ways of describing the
68 68
 - `./examples/gobuild` - shows how to use nested invocation to generate LLB for Go package internal dependencies
69 69
 
70 70
 
71
-#### Examples
71
+#### Exploring Dockerfiles
72
+
73
+Frontends are components that run inside BuildKit and convert any build definition to LLB. There is a special frontend called gateway (gateway.v0) that allows using any image as a frontend.
74
+
75
+During development, Dockerfile frontend (dockerfile.v0) is also part of the BuildKit repo. In the future, this will be moved out, and Dockerfiles can be built using an external image.
72 76
 
73
-##### Starting the buildd daemon:
77
+##### Building a Dockerfile with `buildctl`
74 78
 
75 79
 ```
76
-buildd-standalone --debug --root /var/lib/buildkit
80
+buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=.
81
+buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. --frontend-opt target=foo --frontend-opt build-arg:foo=bar
77 82
 ```
78 83
 
79
-##### Building a Dockerfile:
84
+`--local` exposes local source files from client to the builder. `context` and `dockerfile` are the names Dockerfile frontend looks for build context and Dockerfile location.
85
+
86
+##### build-using-dockerfile utility
87
+
88
+For people familiar with `docker build` command, there is an example wrapper utility in `./examples/build-using-dockerfile` that allows building Dockerfiles with BuildKit using a syntax similar to `docker build`.
80 89
 
81 90
 ```
82
-buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=.
91
+go build ./examples/build-using-dockerfile && sudo install build-using-dockerfile /usr/local/bin
92
+
93
+build-using-dockerfile -t myimage .
94
+build-using-dockerfile -t mybuildkit -f ./hack/dockerfiles/test.Dockerfile .
95
+
96
+# build-using-dockerfile will automatically load the resulting image to Docker
97
+docker inspect myimage
83 98
 ```
84 99
 
85
-`context` and `dockerfile` should point to local directories for build context and Dockerfile location.
100
+##### Building a Dockerfile using [external frontend](https://hub.docker.com/r/tonistiigi/dockerfile/tags/):
101
+
102
+During development, an external version of the Dockerfile frontend is pushed to https://hub.docker.com/r/tonistiigi/dockerfile that can be used with the gateway frontend. The source for the external frontend is currently located in `./frontend/dockerfile/cmd/dockerfile-frontend` but will move out of this repository in the future ([#163](https://github.com/moby/buildkit/issues/163)).
103
+
104
+```
105
+buildctl build --frontend=gateway.v0 --frontend-opt=source=tonistiigi/dockerfile:v0 --local context=. --local dockerfile=.
106
+buildctl build --frontend gateway.v0 --frontend-opt=source=tonistiigi/dockerfile:v0 --frontend-opt=context=git://github.com/moby/moby --frontend-opt build-arg:APT_MIRROR=cdn-fastly.deb.debian.org
107
+````
86 108
 
109
+### Exporters
110
+
111
+By default, the build result and intermediate cache will only remain internally in BuildKit. Exporter needs to be specified to retrieve the result.
87 112
 
88 113
 ##### Exporting resulting image to containerd
89 114
 
90
-Containerd version of buildd needs to be used
115
+The containerd worker needs to be used
91 116
 
92 117
 ```
93 118
 buildctl build ... --exporter=image --exporter-opt name=docker.io/username/image
94 119
 ctr --namespace=buildkit images ls
95 120
 ```
96 121
 
122
+##### Push resulting image to registry
123
+
124
+```
125
+buildctl build ... --exporter=image --exporter-opt name=docker.io/username/image --exporter-opt push=true
126
+```
127
+
128
+If credentials are required, `buildctl` will attempt to read Docker configuration file.
129
+
130
+
97 131
 ##### Exporting build result back to client
98 132
 
133
+The local client will copy the files directly to the client. This is useful if BuildKit is being used for building something else than container images.
134
+
99 135
 ```
100 136
 buildctl build ... --exporter=local --exporter-opt output=path/to/output-dir
101 137
 ```
102 138
 
139
+##### Exporting built image to Docker
140
+
141
+```
142
+# exported tarball is also compatible with OCI spec
143
+buildctl build ... --exporter=docker --exporter-opt name=myimage | docker load
144
+```
145
+
146
+##### Exporting [OCI Image Format](https://github.com/opencontainers/image-spec) tarball to client
147
+
148
+```
149
+buildctl build ... --exporter=oci --exporter-opt output=path/to/output.tar
150
+buildctl build ... --exporter=oci > output.tar
151
+```
152
+
153
+### Other
154
+
103 155
 #### View build cache
104 156
 
105 157
 ```
106 158
 buildctl du -v
107 159
 ```
108 160
 
109
-#### Supported runc version
161
+#### Show enabled workers
162
+
163
+```
164
+buildctl debug workers -v
165
+```
166
+
167
+### Running containerized buildkit
168
+
169
+BuildKit can also be used by running the `buildkitd` daemon inside a Docker container and accessing it remotely. The client tool `buildctl` is also available for Mac and Windows.
170
+
171
+To run daemon in a container:
172
+
173
+```
174
+docker run -d --privileged -p 1234:1234 tonistiigi/buildkit --addr tcp://0.0.0.0:1234
175
+export BUILDKIT_HOST=tcp://0.0.0.0:1234
176
+buildctl build --help
177
+```
178
+
179
+The `tonistiigi/buildkit` image can be built locally using the Dockerfile in `./hack/dockerfiles/test.Dockerfile`.
180
+
181
+### Opentracing support
182
+
183
+BuildKit supports opentracing for buildkitd gRPC API and buildctl commands. To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set `JAEGER_TRACE` environment variable to the collection address.
184
+
185
+
186
+```
187
+docker run -d -p6831:6831/udp -p16686:16686 jaegertracing/all-in-one:latest
188
+export JAEGER_TRACE=0.0.0.0:6831
189
+# restart buildkitd and buildctl so they know JAEGER_TRACE
190
+# any buildctl command should be traced to http://127.0.0.1:16686/
191
+```
192
+
110 193
 
111
-During development buildkit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/d1e11f17ec7b325f89608dd46c128300b8727d50/RUNC.md) for more information.
194
+### Supported runc version
112 195
 
196
+During development, BuildKit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/v1.1.0/RUNC.md) for more information.
113 197
 
114
-#### Contributing
198
+### Running BuildKit without root privileges
199
+
200
+Please refer to [`docs/rootless.md`](docs/rootless.md).
201
+
202
+### Contributing
115 203
 
116 204
 Running tests:
117 205
 
... ...
@@ -119,6 +238,20 @@ Running tests:
119 119
 make test
120 120
 ```
121 121
 
122
+This runs all unit and integration tests in a containerized environment. Locally, every package can be tested separately with standard Go tools, but integration tests are skipped if local user doesn't have enough permissions or worker binaries are not installed.
123
+
124
+```
125
+# test a specific package only
126
+make test TESTPKGS=./client
127
+
128
+# run a specific test with all worker combinations
129
+make test TESTPKGS=./client TESTFLAGS="--run /TestCallDiskUsage -v" 
130
+
131
+# run all integration tests with a specific worker
132
+# supported workers are oci and containerd
133
+make test TESTPKGS=./client TESTFLAGS="--run //worker=containerd -v" 
134
+```
135
+
122 136
 Updating vendored dependencies:
123 137
 
124 138
 ```bash
125 139
new file mode 100644
... ...
@@ -0,0 +1,46 @@
0
+// Package command contains the set of Dockerfile commands.
1
+package command
2
+
3
+// Define constants for the command strings
4
+const (
5
+	Add         = "add"
6
+	Arg         = "arg"
7
+	Cmd         = "cmd"
8
+	Copy        = "copy"
9
+	Entrypoint  = "entrypoint"
10
+	Env         = "env"
11
+	Expose      = "expose"
12
+	From        = "from"
13
+	Healthcheck = "healthcheck"
14
+	Label       = "label"
15
+	Maintainer  = "maintainer"
16
+	Onbuild     = "onbuild"
17
+	Run         = "run"
18
+	Shell       = "shell"
19
+	StopSignal  = "stopsignal"
20
+	User        = "user"
21
+	Volume      = "volume"
22
+	Workdir     = "workdir"
23
+)
24
+
25
+// Commands is list of all Dockerfile commands
26
+var Commands = map[string]struct{}{
27
+	Add:         {},
28
+	Arg:         {},
29
+	Cmd:         {},
30
+	Copy:        {},
31
+	Entrypoint:  {},
32
+	Env:         {},
33
+	Expose:      {},
34
+	From:        {},
35
+	Healthcheck: {},
36
+	Label:       {},
37
+	Maintainer:  {},
38
+	Onbuild:     {},
39
+	Run:         {},
40
+	Shell:       {},
41
+	StopSignal:  {},
42
+	User:        {},
43
+	Volume:      {},
44
+	Workdir:     {},
45
+}
0 46
new file mode 100644
... ...
@@ -0,0 +1,183 @@
0
+package instructions
1
+
2
+import (
3
+	"fmt"
4
+	"strings"
5
+)
6
+
7
+// FlagType is the type of the build flag
8
+type FlagType int
9
+
10
+const (
11
+	boolType FlagType = iota
12
+	stringType
13
+)
14
+
15
+// BFlags contains all flags information for the builder
16
+type BFlags struct {
17
+	Args  []string // actual flags/args from cmd line
18
+	flags map[string]*Flag
19
+	used  map[string]*Flag
20
+	Err   error
21
+}
22
+
23
+// Flag contains all information for a flag
24
+type Flag struct {
25
+	bf       *BFlags
26
+	name     string
27
+	flagType FlagType
28
+	Value    string
29
+}
30
+
31
+// NewBFlags returns the new BFlags struct
32
+func NewBFlags() *BFlags {
33
+	return &BFlags{
34
+		flags: make(map[string]*Flag),
35
+		used:  make(map[string]*Flag),
36
+	}
37
+}
38
+
39
+// NewBFlagsWithArgs returns the new BFlags struct with Args set to args
40
+func NewBFlagsWithArgs(args []string) *BFlags {
41
+	flags := NewBFlags()
42
+	flags.Args = args
43
+	return flags
44
+}
45
+
46
+// AddBool adds a bool flag to BFlags
47
+// Note, any error will be generated when Parse() is called (see Parse).
48
+func (bf *BFlags) AddBool(name string, def bool) *Flag {
49
+	flag := bf.addFlag(name, boolType)
50
+	if flag == nil {
51
+		return nil
52
+	}
53
+	if def {
54
+		flag.Value = "true"
55
+	} else {
56
+		flag.Value = "false"
57
+	}
58
+	return flag
59
+}
60
+
61
+// AddString adds a string flag to BFlags
62
+// Note, any error will be generated when Parse() is called (see Parse).
63
+func (bf *BFlags) AddString(name string, def string) *Flag {
64
+	flag := bf.addFlag(name, stringType)
65
+	if flag == nil {
66
+		return nil
67
+	}
68
+	flag.Value = def
69
+	return flag
70
+}
71
+
72
+// addFlag is a generic func used by the other AddXXX() func
73
+// to add a new flag to the BFlags struct.
74
+// Note, any error will be generated when Parse() is called (see Parse).
75
+func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag {
76
+	if _, ok := bf.flags[name]; ok {
77
+		bf.Err = fmt.Errorf("Duplicate flag defined: %s", name)
78
+		return nil
79
+	}
80
+
81
+	newFlag := &Flag{
82
+		bf:       bf,
83
+		name:     name,
84
+		flagType: flagType,
85
+	}
86
+	bf.flags[name] = newFlag
87
+
88
+	return newFlag
89
+}
90
+
91
+// IsUsed checks if the flag is used
92
+func (fl *Flag) IsUsed() bool {
93
+	if _, ok := fl.bf.used[fl.name]; ok {
94
+		return true
95
+	}
96
+	return false
97
+}
98
+
99
+// IsTrue checks if a bool flag is true
100
+func (fl *Flag) IsTrue() bool {
101
+	if fl.flagType != boolType {
102
+		// Should never get here
103
+		panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name))
104
+	}
105
+	return fl.Value == "true"
106
+}
107
+
108
+// Parse parses and checks if the BFlags is valid.
109
+// Any error noticed during the AddXXX() funcs will be generated/returned
110
+// here.  We do this because an error during AddXXX() is more like a
111
+// compile time error so it doesn't matter too much when we stop our
112
+// processing as long as we do stop it, so this allows the code
113
+// around AddXXX() to be just:
114
+//     defFlag := AddString("description", "")
115
+// w/o needing to add an if-statement around each one.
116
+func (bf *BFlags) Parse() error {
117
+	// If there was an error while defining the possible flags
118
+	// go ahead and bubble it back up here since we didn't do it
119
+	// earlier in the processing
120
+	if bf.Err != nil {
121
+		return fmt.Errorf("Error setting up flags: %s", bf.Err)
122
+	}
123
+
124
+	for _, arg := range bf.Args {
125
+		if !strings.HasPrefix(arg, "--") {
126
+			return fmt.Errorf("Arg should start with -- : %s", arg)
127
+		}
128
+
129
+		if arg == "--" {
130
+			return nil
131
+		}
132
+
133
+		arg = arg[2:]
134
+		value := ""
135
+
136
+		index := strings.Index(arg, "=")
137
+		if index >= 0 {
138
+			value = arg[index+1:]
139
+			arg = arg[:index]
140
+		}
141
+
142
+		flag, ok := bf.flags[arg]
143
+		if !ok {
144
+			return fmt.Errorf("Unknown flag: %s", arg)
145
+		}
146
+
147
+		if _, ok = bf.used[arg]; ok {
148
+			return fmt.Errorf("Duplicate flag specified: %s", arg)
149
+		}
150
+
151
+		bf.used[arg] = flag
152
+
153
+		switch flag.flagType {
154
+		case boolType:
155
+			// value == "" is only ok if no "=" was specified
156
+			if index >= 0 && value == "" {
157
+				return fmt.Errorf("Missing a value on flag: %s", arg)
158
+			}
159
+
160
+			lower := strings.ToLower(value)
161
+			if lower == "" {
162
+				flag.Value = "true"
163
+			} else if lower == "true" || lower == "false" {
164
+				flag.Value = lower
165
+			} else {
166
+				return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value)
167
+			}
168
+
169
+		case stringType:
170
+			if index < 0 {
171
+				return fmt.Errorf("Missing a value on flag: %s", arg)
172
+			}
173
+			flag.Value = value
174
+
175
+		default:
176
+			panic("No idea what kind of flag we have! Should never get here!")
177
+		}
178
+
179
+	}
180
+
181
+	return nil
182
+}
0 183
new file mode 100644
... ...
@@ -0,0 +1,418 @@
0
+package instructions
1
+
2
+import (
3
+	"errors"
4
+	"strings"
5
+
6
+	"github.com/docker/docker/api/types/container"
7
+	"github.com/docker/docker/api/types/strslice"
8
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
9
+)
10
+
11
+// KeyValuePair represent an arbitrary named value (useful in slice instead of map[string] string to preserve ordering)
12
+type KeyValuePair struct {
13
+	Key   string
14
+	Value string
15
+}
16
+
17
+func (kvp *KeyValuePair) String() string {
18
+	return kvp.Key + "=" + kvp.Value
19
+}
20
+
21
+// Command is implemented by every command present in a dockerfile
22
+type Command interface {
23
+	Name() string
24
+}
25
+
26
+// KeyValuePairs is a slice of KeyValuePair
27
+type KeyValuePairs []KeyValuePair
28
+
29
+// withNameAndCode is the base of every command in a Dockerfile (String() returns its source code)
30
+type withNameAndCode struct {
31
+	code string
32
+	name string
33
+}
34
+
35
+func (c *withNameAndCode) String() string {
36
+	return c.code
37
+}
38
+
39
+// Name of the command
40
+func (c *withNameAndCode) Name() string {
41
+	return c.name
42
+}
43
+
44
+func newWithNameAndCode(req parseRequest) withNameAndCode {
45
+	return withNameAndCode{code: strings.TrimSpace(req.original), name: req.command}
46
+}
47
+
48
+// SingleWordExpander is a provider for variable expansion where 1 word => 1 output
49
+type SingleWordExpander func(word string) (string, error)
50
+
51
+// SupportsSingleWordExpansion interface marks a command as supporting variable expansion
52
+type SupportsSingleWordExpansion interface {
53
+	Expand(expander SingleWordExpander) error
54
+}
55
+
56
+// PlatformSpecific adds platform checks to a command
57
+type PlatformSpecific interface {
58
+	CheckPlatform(platform string) error
59
+}
60
+
61
+func expandKvp(kvp KeyValuePair, expander SingleWordExpander) (KeyValuePair, error) {
62
+	key, err := expander(kvp.Key)
63
+	if err != nil {
64
+		return KeyValuePair{}, err
65
+	}
66
+	value, err := expander(kvp.Value)
67
+	if err != nil {
68
+		return KeyValuePair{}, err
69
+	}
70
+	return KeyValuePair{Key: key, Value: value}, nil
71
+}
72
+func expandKvpsInPlace(kvps KeyValuePairs, expander SingleWordExpander) error {
73
+	for i, kvp := range kvps {
74
+		newKvp, err := expandKvp(kvp, expander)
75
+		if err != nil {
76
+			return err
77
+		}
78
+		kvps[i] = newKvp
79
+	}
80
+	return nil
81
+}
82
+
83
+func expandSliceInPlace(values []string, expander SingleWordExpander) error {
84
+	for i, v := range values {
85
+		newValue, err := expander(v)
86
+		if err != nil {
87
+			return err
88
+		}
89
+		values[i] = newValue
90
+	}
91
+	return nil
92
+}
93
+
94
+// EnvCommand : ENV key1 value1 [keyN valueN...]
95
+type EnvCommand struct {
96
+	withNameAndCode
97
+	Env KeyValuePairs // kvp slice instead of map to preserve ordering
98
+}
99
+
100
+// Expand variables
101
+func (c *EnvCommand) Expand(expander SingleWordExpander) error {
102
+	return expandKvpsInPlace(c.Env, expander)
103
+}
104
+
105
+// MaintainerCommand : MAINTAINER maintainer_name
106
+type MaintainerCommand struct {
107
+	withNameAndCode
108
+	Maintainer string
109
+}
110
+
111
+// NewLabelCommand creates a new 'LABEL' command
112
+func NewLabelCommand(k string, v string, NoExp bool) *LabelCommand {
113
+	kvp := KeyValuePair{Key: k, Value: v}
114
+	c := "LABEL "
115
+	c += kvp.String()
116
+	nc := withNameAndCode{code: c, name: "label"}
117
+	cmd := &LabelCommand{
118
+		withNameAndCode: nc,
119
+		Labels: KeyValuePairs{
120
+			kvp,
121
+		},
122
+		noExpand: NoExp,
123
+	}
124
+	return cmd
125
+}
126
+
127
+// LabelCommand : LABEL some json data describing the image
128
+//
129
+// Sets the Label variable foo to bar,
130
+//
131
+type LabelCommand struct {
132
+	withNameAndCode
133
+	Labels   KeyValuePairs // kvp slice instead of map to preserve ordering
134
+	noExpand bool
135
+}
136
+
137
+// Expand variables
138
+func (c *LabelCommand) Expand(expander SingleWordExpander) error {
139
+	if c.noExpand {
140
+		return nil
141
+	}
142
+	return expandKvpsInPlace(c.Labels, expander)
143
+}
144
+
145
+// SourcesAndDest represent a list of source files and a destination
146
+type SourcesAndDest []string
147
+
148
+// Sources list the source paths
149
+func (s SourcesAndDest) Sources() []string {
150
+	res := make([]string, len(s)-1)
151
+	copy(res, s[:len(s)-1])
152
+	return res
153
+}
154
+
155
+// Dest path of the operation
156
+func (s SourcesAndDest) Dest() string {
157
+	return s[len(s)-1]
158
+}
159
+
160
+// AddCommand : ADD foo /path
161
+//
162
+// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
163
+// exist here. If you do not wish to have this automatic handling, use COPY.
164
+//
165
+type AddCommand struct {
166
+	withNameAndCode
167
+	SourcesAndDest
168
+	Chown string
169
+}
170
+
171
+// Expand variables
172
+func (c *AddCommand) Expand(expander SingleWordExpander) error {
173
+	return expandSliceInPlace(c.SourcesAndDest, expander)
174
+}
175
+
176
+// CopyCommand : COPY foo /path
177
+//
178
+// Same as 'ADD' but without the tar and remote url handling.
179
+//
180
+type CopyCommand struct {
181
+	withNameAndCode
182
+	SourcesAndDest
183
+	From  string
184
+	Chown string
185
+}
186
+
187
+// Expand variables
188
+func (c *CopyCommand) Expand(expander SingleWordExpander) error {
189
+	return expandSliceInPlace(c.SourcesAndDest, expander)
190
+}
191
+
192
+// OnbuildCommand : ONBUILD <some other command>
193
+type OnbuildCommand struct {
194
+	withNameAndCode
195
+	Expression string
196
+}
197
+
198
+// WorkdirCommand : WORKDIR /tmp
199
+//
200
+// Set the working directory for future RUN/CMD/etc statements.
201
+//
202
+type WorkdirCommand struct {
203
+	withNameAndCode
204
+	Path string
205
+}
206
+
207
+// Expand variables
208
+func (c *WorkdirCommand) Expand(expander SingleWordExpander) error {
209
+	p, err := expander(c.Path)
210
+	if err != nil {
211
+		return err
212
+	}
213
+	c.Path = p
214
+	return nil
215
+}
216
+
217
+// ShellDependantCmdLine represents a cmdline optionally prepended with the shell
218
+type ShellDependantCmdLine struct {
219
+	CmdLine      strslice.StrSlice
220
+	PrependShell bool
221
+}
222
+
223
+// RunCommand : RUN some command yo
224
+//
225
+// run a command and commit the image. Args are automatically prepended with
226
+// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under
227
+// Windows, in the event there is only one argument The difference in processing:
228
+//
229
+// RUN echo hi          # sh -c echo hi       (Linux)
230
+// RUN echo hi          # cmd /S /C echo hi   (Windows)
231
+// RUN [ "echo", "hi" ] # echo hi
232
+//
233
+type RunCommand struct {
234
+	withNameAndCode
235
+	ShellDependantCmdLine
236
+}
237
+
238
+// CmdCommand : CMD foo
239
+//
240
+// Set the default command to run in the container (which may be empty).
241
+// Argument handling is the same as RUN.
242
+//
243
+type CmdCommand struct {
244
+	withNameAndCode
245
+	ShellDependantCmdLine
246
+}
247
+
248
+// HealthCheckCommand : HEALTHCHECK foo
249
+//
250
+// Set the default healthcheck command to run in the container (which may be empty).
251
+// Argument handling is the same as RUN.
252
+//
253
+type HealthCheckCommand struct {
254
+	withNameAndCode
255
+	Health *container.HealthConfig
256
+}
257
+
258
+// EntrypointCommand : ENTRYPOINT /usr/sbin/nginx
259
+//
260
+// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments
261
+// to /usr/sbin/nginx. Uses the default shell if not in JSON format.
262
+//
263
+// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint
264
+// is initialized at newBuilder time instead of through argument parsing.
265
+//
266
+type EntrypointCommand struct {
267
+	withNameAndCode
268
+	ShellDependantCmdLine
269
+}
270
+
271
+// ExposeCommand : EXPOSE 6667/tcp 7000/tcp
272
+//
273
+// Expose ports for links and port mappings. This all ends up in
274
+// req.runConfig.ExposedPorts for runconfig.
275
+//
276
+type ExposeCommand struct {
277
+	withNameAndCode
278
+	Ports []string
279
+}
280
+
281
+// UserCommand : USER foo
282
+//
283
+// Set the user to 'foo' for future commands and when running the
284
+// ENTRYPOINT/CMD at container run time.
285
+//
286
+type UserCommand struct {
287
+	withNameAndCode
288
+	User string
289
+}
290
+
291
+// Expand variables
292
+func (c *UserCommand) Expand(expander SingleWordExpander) error {
293
+	p, err := expander(c.User)
294
+	if err != nil {
295
+		return err
296
+	}
297
+	c.User = p
298
+	return nil
299
+}
300
+
301
+// VolumeCommand : VOLUME /foo
302
+//
303
+// Expose the volume /foo for use. Will also accept the JSON array form.
304
+//
305
+type VolumeCommand struct {
306
+	withNameAndCode
307
+	Volumes []string
308
+}
309
+
310
+// Expand variables
311
+func (c *VolumeCommand) Expand(expander SingleWordExpander) error {
312
+	return expandSliceInPlace(c.Volumes, expander)
313
+}
314
+
315
+// StopSignalCommand : STOPSIGNAL signal
316
+//
317
+// Set the signal that will be used to kill the container.
318
+type StopSignalCommand struct {
319
+	withNameAndCode
320
+	Signal string
321
+}
322
+
323
+// Expand variables
324
+func (c *StopSignalCommand) Expand(expander SingleWordExpander) error {
325
+	p, err := expander(c.Signal)
326
+	if err != nil {
327
+		return err
328
+	}
329
+	c.Signal = p
330
+	return nil
331
+}
332
+
333
+// CheckPlatform checks that the command is supported in the target platform
334
+func (c *StopSignalCommand) CheckPlatform(platform string) error {
335
+	if platform == "windows" {
336
+		return errors.New("The daemon on this platform does not support the command stopsignal")
337
+	}
338
+	return nil
339
+}
340
+
341
+// ArgCommand : ARG name[=value]
342
+//
343
+// Adds the variable foo to the trusted list of variables that can be passed
344
+// to builder using the --build-arg flag for expansion/substitution or passing to 'run'.
345
+// Dockerfile author may optionally set a default value of this variable.
346
+type ArgCommand struct {
347
+	withNameAndCode
348
+	Key   string
349
+	Value *string
350
+}
351
+
352
+// Expand variables
353
+func (c *ArgCommand) Expand(expander SingleWordExpander) error {
354
+	p, err := expander(c.Key)
355
+	if err != nil {
356
+		return err
357
+	}
358
+	c.Key = p
359
+	if c.Value != nil {
360
+		p, err = expander(*c.Value)
361
+		if err != nil {
362
+			return err
363
+		}
364
+		c.Value = &p
365
+	}
366
+	return nil
367
+}
368
+
369
+// ShellCommand : SHELL powershell -command
370
+//
371
+// Set the non-default shell to use.
372
+type ShellCommand struct {
373
+	withNameAndCode
374
+	Shell strslice.StrSlice
375
+}
376
+
377
+// Stage represents a single stage in a multi-stage build
378
+type Stage struct {
379
+	Name       string
380
+	Commands   []Command
381
+	BaseName   string
382
+	SourceCode string
383
+	Platform   specs.Platform
384
+}
385
+
386
+// AddCommand to the stage
387
+func (s *Stage) AddCommand(cmd Command) {
388
+	// todo: validate cmd type
389
+	s.Commands = append(s.Commands, cmd)
390
+}
391
+
392
+// IsCurrentStage check if the stage name is the current stage
393
+func IsCurrentStage(s []Stage, name string) bool {
394
+	if len(s) == 0 {
395
+		return false
396
+	}
397
+	return s[len(s)-1].Name == name
398
+}
399
+
400
+// CurrentStage return the last stage in a slice
401
+func CurrentStage(s []Stage) (*Stage, error) {
402
+	if len(s) == 0 {
403
+		return nil, errors.New("No build stage in current context")
404
+	}
405
+	return &s[len(s)-1], nil
406
+}
407
+
408
+// HasStage looks for the presence of a given stage name
409
+func HasStage(s []Stage, name string) (int, bool) {
410
+	for i, stage := range s {
411
+		// Stage name is case-insensitive by design
412
+		if strings.EqualFold(stage.Name, name) {
413
+			return i, true
414
+		}
415
+	}
416
+	return -1, false
417
+}
0 418
new file mode 100644
... ...
@@ -0,0 +1,9 @@
0
+// +build !windows
1
+
2
+package instructions
3
+
4
+import "fmt"
5
+
6
+func errNotJSON(command, _ string) error {
7
+	return fmt.Errorf("%s requires the arguments to be in JSON form", command)
8
+}
0 9
new file mode 100644
... ...
@@ -0,0 +1,27 @@
0
+package instructions
1
+
2
+import (
3
+	"fmt"
4
+	"path/filepath"
5
+	"regexp"
6
+	"strings"
7
+)
8
+
9
+func errNotJSON(command, original string) error {
10
+	// For Windows users, give a hint if it looks like it might contain
11
+	// a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"],
12
+	// as JSON must be escaped. Unfortunate...
13
+	//
14
+	// Specifically looking for quote-driveletter-colon-backslash, there's no
15
+	// double backslash and a [] pair. No, this is not perfect, but it doesn't
16
+	// have to be. It's simply a hint to make life a little easier.
17
+	extra := ""
18
+	original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1)))
19
+	if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 &&
20
+		!strings.Contains(original, `\\`) &&
21
+		strings.Contains(original, "[") &&
22
+		strings.Contains(original, "]") {
23
+		extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original)
24
+	}
25
+	return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra)
26
+}
0 27
new file mode 100644
... ...
@@ -0,0 +1,637 @@
0
+package instructions
1
+
2
+import (
3
+	"fmt"
4
+	"regexp"
5
+	"sort"
6
+	"strconv"
7
+	"strings"
8
+	"time"
9
+
10
+	"github.com/docker/docker/api/types/container"
11
+	"github.com/docker/docker/api/types/strslice"
12
+	"github.com/docker/docker/pkg/system"
13
+	"github.com/moby/buildkit/frontend/dockerfile/command"
14
+	"github.com/moby/buildkit/frontend/dockerfile/parser"
15
+	"github.com/pkg/errors"
16
+)
17
+
18
+type parseRequest struct {
19
+	command    string
20
+	args       []string
21
+	attributes map[string]bool
22
+	flags      *BFlags
23
+	original   string
24
+}
25
+
26
+func nodeArgs(node *parser.Node) []string {
27
+	result := []string{}
28
+	for ; node.Next != nil; node = node.Next {
29
+		arg := node.Next
30
+		if len(arg.Children) == 0 {
31
+			result = append(result, arg.Value)
32
+		} else if len(arg.Children) == 1 {
33
+			//sub command
34
+			result = append(result, arg.Children[0].Value)
35
+			result = append(result, nodeArgs(arg.Children[0])...)
36
+		}
37
+	}
38
+	return result
39
+}
40
+
41
+func newParseRequestFromNode(node *parser.Node) parseRequest {
42
+	return parseRequest{
43
+		command:    node.Value,
44
+		args:       nodeArgs(node),
45
+		attributes: node.Attributes,
46
+		original:   node.Original,
47
+		flags:      NewBFlagsWithArgs(node.Flags),
48
+	}
49
+}
50
+
51
+// ParseInstruction converts an AST to a typed instruction (either a command or a build stage beginning when encountering a `FROM` statement)
52
+func ParseInstruction(node *parser.Node) (interface{}, error) {
53
+	req := newParseRequestFromNode(node)
54
+	switch node.Value {
55
+	case command.Env:
56
+		return parseEnv(req)
57
+	case command.Maintainer:
58
+		return parseMaintainer(req)
59
+	case command.Label:
60
+		return parseLabel(req)
61
+	case command.Add:
62
+		return parseAdd(req)
63
+	case command.Copy:
64
+		return parseCopy(req)
65
+	case command.From:
66
+		return parseFrom(req)
67
+	case command.Onbuild:
68
+		return parseOnBuild(req)
69
+	case command.Workdir:
70
+		return parseWorkdir(req)
71
+	case command.Run:
72
+		return parseRun(req)
73
+	case command.Cmd:
74
+		return parseCmd(req)
75
+	case command.Healthcheck:
76
+		return parseHealthcheck(req)
77
+	case command.Entrypoint:
78
+		return parseEntrypoint(req)
79
+	case command.Expose:
80
+		return parseExpose(req)
81
+	case command.User:
82
+		return parseUser(req)
83
+	case command.Volume:
84
+		return parseVolume(req)
85
+	case command.StopSignal:
86
+		return parseStopSignal(req)
87
+	case command.Arg:
88
+		return parseArg(req)
89
+	case command.Shell:
90
+		return parseShell(req)
91
+	}
92
+
93
+	return nil, &UnknownInstruction{Instruction: node.Value, Line: node.StartLine}
94
+}
95
+
96
+// ParseCommand converts an AST to a typed Command
97
+func ParseCommand(node *parser.Node) (Command, error) {
98
+	s, err := ParseInstruction(node)
99
+	if err != nil {
100
+		return nil, err
101
+	}
102
+	if c, ok := s.(Command); ok {
103
+		return c, nil
104
+	}
105
+	return nil, errors.Errorf("%T is not a command type", s)
106
+}
107
+
108
+// UnknownInstruction represents an error occurring when a command is unresolvable
109
+type UnknownInstruction struct {
110
+	Line        int
111
+	Instruction string
112
+}
113
+
114
+func (e *UnknownInstruction) Error() string {
115
+	return fmt.Sprintf("unknown instruction: %s", strings.ToUpper(e.Instruction))
116
+}
117
+
118
+// IsUnknownInstruction checks if the error is an UnknownInstruction or a parseError containing an UnknownInstruction
119
+func IsUnknownInstruction(err error) bool {
120
+	_, ok := err.(*UnknownInstruction)
121
+	if !ok {
122
+		var pe *parseError
123
+		if pe, ok = err.(*parseError); ok {
124
+			_, ok = pe.inner.(*UnknownInstruction)
125
+		}
126
+	}
127
+	return ok
128
+}
129
+
130
+type parseError struct {
131
+	inner error
132
+	node  *parser.Node
133
+}
134
+
135
+func (e *parseError) Error() string {
136
+	return fmt.Sprintf("Dockerfile parse error line %d: %v", e.node.StartLine, e.inner.Error())
137
+}
138
+
139
+// Parse a docker file into a collection of buildable stages
140
+func Parse(ast *parser.Node) (stages []Stage, metaArgs []ArgCommand, err error) {
141
+	for _, n := range ast.Children {
142
+		cmd, err := ParseInstruction(n)
143
+		if err != nil {
144
+			return nil, nil, &parseError{inner: err, node: n}
145
+		}
146
+		if len(stages) == 0 {
147
+			// meta arg case
148
+			if a, isArg := cmd.(*ArgCommand); isArg {
149
+				metaArgs = append(metaArgs, *a)
150
+				continue
151
+			}
152
+		}
153
+		switch c := cmd.(type) {
154
+		case *Stage:
155
+			stages = append(stages, *c)
156
+		case Command:
157
+			stage, err := CurrentStage(stages)
158
+			if err != nil {
159
+				return nil, nil, err
160
+			}
161
+			stage.AddCommand(c)
162
+		default:
163
+			return nil, nil, errors.Errorf("%T is not a command type", cmd)
164
+		}
165
+
166
+	}
167
+	return stages, metaArgs, nil
168
+}
169
+
170
+func parseKvps(args []string, cmdName string) (KeyValuePairs, error) {
171
+	if len(args) == 0 {
172
+		return nil, errAtLeastOneArgument(cmdName)
173
+	}
174
+	if len(args)%2 != 0 {
175
+		// should never get here, but just in case
176
+		return nil, errTooManyArguments(cmdName)
177
+	}
178
+	var res KeyValuePairs
179
+	for j := 0; j < len(args); j += 2 {
180
+		if len(args[j]) == 0 {
181
+			return nil, errBlankCommandNames(cmdName)
182
+		}
183
+		name := args[j]
184
+		value := args[j+1]
185
+		res = append(res, KeyValuePair{Key: name, Value: value})
186
+	}
187
+	return res, nil
188
+}
189
+
190
+func parseEnv(req parseRequest) (*EnvCommand, error) {
191
+
192
+	if err := req.flags.Parse(); err != nil {
193
+		return nil, err
194
+	}
195
+	envs, err := parseKvps(req.args, "ENV")
196
+	if err != nil {
197
+		return nil, err
198
+	}
199
+	return &EnvCommand{
200
+		Env:             envs,
201
+		withNameAndCode: newWithNameAndCode(req),
202
+	}, nil
203
+}
204
+
205
+func parseMaintainer(req parseRequest) (*MaintainerCommand, error) {
206
+	if len(req.args) != 1 {
207
+		return nil, errExactlyOneArgument("MAINTAINER")
208
+	}
209
+
210
+	if err := req.flags.Parse(); err != nil {
211
+		return nil, err
212
+	}
213
+	return &MaintainerCommand{
214
+		Maintainer:      req.args[0],
215
+		withNameAndCode: newWithNameAndCode(req),
216
+	}, nil
217
+}
218
+
219
+func parseLabel(req parseRequest) (*LabelCommand, error) {
220
+
221
+	if err := req.flags.Parse(); err != nil {
222
+		return nil, err
223
+	}
224
+
225
+	labels, err := parseKvps(req.args, "LABEL")
226
+	if err != nil {
227
+		return nil, err
228
+	}
229
+
230
+	return &LabelCommand{
231
+		Labels:          labels,
232
+		withNameAndCode: newWithNameAndCode(req),
233
+	}, nil
234
+}
235
+
236
+func parseAdd(req parseRequest) (*AddCommand, error) {
237
+	if len(req.args) < 2 {
238
+		return nil, errNoDestinationArgument("ADD")
239
+	}
240
+	flChown := req.flags.AddString("chown", "")
241
+	if err := req.flags.Parse(); err != nil {
242
+		return nil, err
243
+	}
244
+	return &AddCommand{
245
+		SourcesAndDest:  SourcesAndDest(req.args),
246
+		withNameAndCode: newWithNameAndCode(req),
247
+		Chown:           flChown.Value,
248
+	}, nil
249
+}
250
+
251
+func parseCopy(req parseRequest) (*CopyCommand, error) {
252
+	if len(req.args) < 2 {
253
+		return nil, errNoDestinationArgument("COPY")
254
+	}
255
+	flChown := req.flags.AddString("chown", "")
256
+	flFrom := req.flags.AddString("from", "")
257
+	if err := req.flags.Parse(); err != nil {
258
+		return nil, err
259
+	}
260
+	return &CopyCommand{
261
+		SourcesAndDest:  SourcesAndDest(req.args),
262
+		From:            flFrom.Value,
263
+		withNameAndCode: newWithNameAndCode(req),
264
+		Chown:           flChown.Value,
265
+	}, nil
266
+}
267
+
268
+func parseFrom(req parseRequest) (*Stage, error) {
269
+	stageName, err := parseBuildStageName(req.args)
270
+	if err != nil {
271
+		return nil, err
272
+	}
273
+
274
+	flPlatform := req.flags.AddString("platform", "")
275
+	if err := req.flags.Parse(); err != nil {
276
+		return nil, err
277
+	}
278
+	code := strings.TrimSpace(req.original)
279
+	return &Stage{
280
+		BaseName:   req.args[0],
281
+		Name:       stageName,
282
+		SourceCode: code,
283
+		Commands:   []Command{},
284
+		Platform:   *system.ParsePlatform(flPlatform.Value),
285
+	}, nil
286
+
287
+}
288
+
289
+func parseBuildStageName(args []string) (string, error) {
290
+	stageName := ""
291
+	switch {
292
+	case len(args) == 3 && strings.EqualFold(args[1], "as"):
293
+		stageName = strings.ToLower(args[2])
294
+		if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok {
295
+			return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", stageName)
296
+		}
297
+	case len(args) != 1:
298
+		return "", errors.New("FROM requires either one or three arguments")
299
+	}
300
+
301
+	return stageName, nil
302
+}
303
+
304
+func parseOnBuild(req parseRequest) (*OnbuildCommand, error) {
305
+	if len(req.args) == 0 {
306
+		return nil, errAtLeastOneArgument("ONBUILD")
307
+	}
308
+	if err := req.flags.Parse(); err != nil {
309
+		return nil, err
310
+	}
311
+
312
+	triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0]))
313
+	switch strings.ToUpper(triggerInstruction) {
314
+	case "ONBUILD":
315
+		return nil, errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
316
+	case "MAINTAINER", "FROM":
317
+		return nil, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
318
+	}
319
+
320
+	original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "")
321
+	return &OnbuildCommand{
322
+		Expression:      original,
323
+		withNameAndCode: newWithNameAndCode(req),
324
+	}, nil
325
+
326
+}
327
+
328
+func parseWorkdir(req parseRequest) (*WorkdirCommand, error) {
329
+	if len(req.args) != 1 {
330
+		return nil, errExactlyOneArgument("WORKDIR")
331
+	}
332
+
333
+	err := req.flags.Parse()
334
+	if err != nil {
335
+		return nil, err
336
+	}
337
+	return &WorkdirCommand{
338
+		Path:            req.args[0],
339
+		withNameAndCode: newWithNameAndCode(req),
340
+	}, nil
341
+
342
+}
343
+
344
+func parseShellDependentCommand(req parseRequest, emptyAsNil bool) ShellDependantCmdLine {
345
+	args := handleJSONArgs(req.args, req.attributes)
346
+	cmd := strslice.StrSlice(args)
347
+	if emptyAsNil && len(cmd) == 0 {
348
+		cmd = nil
349
+	}
350
+	return ShellDependantCmdLine{
351
+		CmdLine:      cmd,
352
+		PrependShell: !req.attributes["json"],
353
+	}
354
+}
355
+
356
+func parseRun(req parseRequest) (*RunCommand, error) {
357
+
358
+	if err := req.flags.Parse(); err != nil {
359
+		return nil, err
360
+	}
361
+	return &RunCommand{
362
+		ShellDependantCmdLine: parseShellDependentCommand(req, false),
363
+		withNameAndCode:       newWithNameAndCode(req),
364
+	}, nil
365
+
366
+}
367
+
368
+func parseCmd(req parseRequest) (*CmdCommand, error) {
369
+	if err := req.flags.Parse(); err != nil {
370
+		return nil, err
371
+	}
372
+	return &CmdCommand{
373
+		ShellDependantCmdLine: parseShellDependentCommand(req, false),
374
+		withNameAndCode:       newWithNameAndCode(req),
375
+	}, nil
376
+
377
+}
378
+
379
+func parseEntrypoint(req parseRequest) (*EntrypointCommand, error) {
380
+	if err := req.flags.Parse(); err != nil {
381
+		return nil, err
382
+	}
383
+
384
+	cmd := &EntrypointCommand{
385
+		ShellDependantCmdLine: parseShellDependentCommand(req, true),
386
+		withNameAndCode:       newWithNameAndCode(req),
387
+	}
388
+
389
+	return cmd, nil
390
+}
391
+
392
+// parseOptInterval(flag) is the duration of flag.Value, or 0 if
393
+// empty. An error is reported if the value is given and less than minimum duration.
394
+func parseOptInterval(f *Flag) (time.Duration, error) {
395
+	s := f.Value
396
+	if s == "" {
397
+		return 0, nil
398
+	}
399
+	d, err := time.ParseDuration(s)
400
+	if err != nil {
401
+		return 0, err
402
+	}
403
+	if d < container.MinimumDuration {
404
+		return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration)
405
+	}
406
+	return d, nil
407
+}
408
+func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) {
409
+	if len(req.args) == 0 {
410
+		return nil, errAtLeastOneArgument("HEALTHCHECK")
411
+	}
412
+	cmd := &HealthCheckCommand{
413
+		withNameAndCode: newWithNameAndCode(req),
414
+	}
415
+
416
+	typ := strings.ToUpper(req.args[0])
417
+	args := req.args[1:]
418
+	if typ == "NONE" {
419
+		if len(args) != 0 {
420
+			return nil, errors.New("HEALTHCHECK NONE takes no arguments")
421
+		}
422
+		test := strslice.StrSlice{typ}
423
+		cmd.Health = &container.HealthConfig{
424
+			Test: test,
425
+		}
426
+	} else {
427
+
428
+		healthcheck := container.HealthConfig{}
429
+
430
+		flInterval := req.flags.AddString("interval", "")
431
+		flTimeout := req.flags.AddString("timeout", "")
432
+		flStartPeriod := req.flags.AddString("start-period", "")
433
+		flRetries := req.flags.AddString("retries", "")
434
+
435
+		if err := req.flags.Parse(); err != nil {
436
+			return nil, err
437
+		}
438
+
439
+		switch typ {
440
+		case "CMD":
441
+			cmdSlice := handleJSONArgs(args, req.attributes)
442
+			if len(cmdSlice) == 0 {
443
+				return nil, errors.New("Missing command after HEALTHCHECK CMD")
444
+			}
445
+
446
+			if !req.attributes["json"] {
447
+				typ = "CMD-SHELL"
448
+			}
449
+
450
+			healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...))
451
+		default:
452
+			return nil, fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ)
453
+		}
454
+
455
+		interval, err := parseOptInterval(flInterval)
456
+		if err != nil {
457
+			return nil, err
458
+		}
459
+		healthcheck.Interval = interval
460
+
461
+		timeout, err := parseOptInterval(flTimeout)
462
+		if err != nil {
463
+			return nil, err
464
+		}
465
+		healthcheck.Timeout = timeout
466
+
467
+		startPeriod, err := parseOptInterval(flStartPeriod)
468
+		if err != nil {
469
+			return nil, err
470
+		}
471
+		healthcheck.StartPeriod = startPeriod
472
+
473
+		if flRetries.Value != "" {
474
+			retries, err := strconv.ParseInt(flRetries.Value, 10, 32)
475
+			if err != nil {
476
+				return nil, err
477
+			}
478
+			if retries < 1 {
479
+				return nil, fmt.Errorf("--retries must be at least 1 (not %d)", retries)
480
+			}
481
+			healthcheck.Retries = int(retries)
482
+		} else {
483
+			healthcheck.Retries = 0
484
+		}
485
+
486
+		cmd.Health = &healthcheck
487
+	}
488
+	return cmd, nil
489
+}
490
+
491
+func parseExpose(req parseRequest) (*ExposeCommand, error) {
492
+	portsTab := req.args
493
+
494
+	if len(req.args) == 0 {
495
+		return nil, errAtLeastOneArgument("EXPOSE")
496
+	}
497
+
498
+	if err := req.flags.Parse(); err != nil {
499
+		return nil, err
500
+	}
501
+
502
+	sort.Strings(portsTab)
503
+	return &ExposeCommand{
504
+		Ports:           portsTab,
505
+		withNameAndCode: newWithNameAndCode(req),
506
+	}, nil
507
+}
508
+
509
+func parseUser(req parseRequest) (*UserCommand, error) {
510
+	if len(req.args) != 1 {
511
+		return nil, errExactlyOneArgument("USER")
512
+	}
513
+
514
+	if err := req.flags.Parse(); err != nil {
515
+		return nil, err
516
+	}
517
+	return &UserCommand{
518
+		User:            req.args[0],
519
+		withNameAndCode: newWithNameAndCode(req),
520
+	}, nil
521
+}
522
+
523
+func parseVolume(req parseRequest) (*VolumeCommand, error) {
524
+	if len(req.args) == 0 {
525
+		return nil, errAtLeastOneArgument("VOLUME")
526
+	}
527
+
528
+	if err := req.flags.Parse(); err != nil {
529
+		return nil, err
530
+	}
531
+
532
+	cmd := &VolumeCommand{
533
+		withNameAndCode: newWithNameAndCode(req),
534
+	}
535
+
536
+	for _, v := range req.args {
537
+		v = strings.TrimSpace(v)
538
+		if v == "" {
539
+			return nil, errors.New("VOLUME specified can not be an empty string")
540
+		}
541
+		cmd.Volumes = append(cmd.Volumes, v)
542
+	}
543
+	return cmd, nil
544
+
545
+}
546
+
547
+func parseStopSignal(req parseRequest) (*StopSignalCommand, error) {
548
+	if len(req.args) != 1 {
549
+		return nil, errExactlyOneArgument("STOPSIGNAL")
550
+	}
551
+	sig := req.args[0]
552
+
553
+	cmd := &StopSignalCommand{
554
+		Signal:          sig,
555
+		withNameAndCode: newWithNameAndCode(req),
556
+	}
557
+	return cmd, nil
558
+
559
+}
560
+
561
+func parseArg(req parseRequest) (*ArgCommand, error) {
562
+	if len(req.args) != 1 {
563
+		return nil, errExactlyOneArgument("ARG")
564
+	}
565
+
566
+	var (
567
+		name     string
568
+		newValue *string
569
+	)
570
+
571
+	arg := req.args[0]
572
+	// 'arg' can just be a name or name-value pair. Note that this is different
573
+	// from 'env' that handles the split of name and value at the parser level.
574
+	// The reason for doing it differently for 'arg' is that we support just
575
+	// defining an arg and not assign it a value (while 'env' always expects a
576
+	// name-value pair). If possible, it will be good to harmonize the two.
577
+	if strings.Contains(arg, "=") {
578
+		parts := strings.SplitN(arg, "=", 2)
579
+		if len(parts[0]) == 0 {
580
+			return nil, errBlankCommandNames("ARG")
581
+		}
582
+
583
+		name = parts[0]
584
+		newValue = &parts[1]
585
+	} else {
586
+		name = arg
587
+	}
588
+
589
+	return &ArgCommand{
590
+		Key:             name,
591
+		Value:           newValue,
592
+		withNameAndCode: newWithNameAndCode(req),
593
+	}, nil
594
+}
595
+
596
+func parseShell(req parseRequest) (*ShellCommand, error) {
597
+	if err := req.flags.Parse(); err != nil {
598
+		return nil, err
599
+	}
600
+	shellSlice := handleJSONArgs(req.args, req.attributes)
601
+	switch {
602
+	case len(shellSlice) == 0:
603
+		// SHELL []
604
+		return nil, errAtLeastOneArgument("SHELL")
605
+	case req.attributes["json"]:
606
+		// SHELL ["powershell", "-command"]
607
+
608
+		return &ShellCommand{
609
+			Shell:           strslice.StrSlice(shellSlice),
610
+			withNameAndCode: newWithNameAndCode(req),
611
+		}, nil
612
+	default:
613
+		// SHELL powershell -command - not JSON
614
+		return nil, errNotJSON("SHELL", req.original)
615
+	}
616
+}
617
+
618
+func errAtLeastOneArgument(command string) error {
619
+	return errors.Errorf("%s requires at least one argument", command)
620
+}
621
+
622
+func errExactlyOneArgument(command string) error {
623
+	return errors.Errorf("%s requires exactly one argument", command)
624
+}
625
+
626
+func errNoDestinationArgument(command string) error {
627
+	return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined.", command)
628
+}
629
+
630
+func errBlankCommandNames(command string) error {
631
+	return errors.Errorf("%s names can not be blank", command)
632
+}
633
+
634
+func errTooManyArguments(command string) error {
635
+	return errors.Errorf("Bad input to %s, too many arguments", command)
636
+}
0 637
new file mode 100644
... ...
@@ -0,0 +1,19 @@
0
+package instructions
1
+
2
+import "strings"
3
+
4
+// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile
5
+// for exec form it returns untouched args slice
6
+// for shell form it returns concatenated args as the first element of a slice
7
+func handleJSONArgs(args []string, attributes map[string]bool) []string {
8
+	if len(args) == 0 {
9
+		return []string{}
10
+	}
11
+
12
+	if attributes != nil && attributes["json"] {
13
+		return args
14
+	}
15
+
16
+	// literal string command, not an exec array
17
+	return []string{strings.Join(args, " ")}
18
+}
0 19
new file mode 100644
... ...
@@ -0,0 +1,368 @@
0
+package parser
1
+
2
+// line parsers are dispatch calls that parse a single unit of text into a
3
+// Node object which contains the whole statement. Dockerfiles have varied
4
+// (but not usually unique, see ONBUILD for a unique example) parsing rules
5
+// per-command, and these unify the processing in a way that makes it
6
+// manageable.
7
+
8
+import (
9
+	"encoding/json"
10
+	"errors"
11
+	"fmt"
12
+	"strings"
13
+	"unicode"
14
+	"unicode/utf8"
15
+)
16
+
17
+var (
18
+	errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only")
19
+)
20
+
21
+const (
22
+	commandLabel = "LABEL"
23
+)
24
+
25
+// ignore the current argument. This will still leave a command parsed, but
26
+// will not incorporate the arguments into the ast.
27
+func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) {
28
+	return &Node{}, nil, nil
29
+}
30
+
31
+// used for onbuild. Could potentially be used for anything that represents a
32
+// statement with sub-statements.
33
+//
34
+// ONBUILD RUN foo bar -> (onbuild (run foo bar))
35
+//
36
+func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) {
37
+	if rest == "" {
38
+		return nil, nil, nil
39
+	}
40
+
41
+	child, err := newNodeFromLine(rest, d)
42
+	if err != nil {
43
+		return nil, nil, err
44
+	}
45
+
46
+	return &Node{Children: []*Node{child}}, nil, nil
47
+}
48
+
49
+// helper to parse words (i.e space delimited or quoted strings) in a statement.
50
+// The quotes are preserved as part of this function and they are stripped later
51
+// as part of processWords().
52
+func parseWords(rest string, d *Directive) []string {
53
+	const (
54
+		inSpaces = iota // looking for start of a word
55
+		inWord
56
+		inQuote
57
+	)
58
+
59
+	words := []string{}
60
+	phase := inSpaces
61
+	word := ""
62
+	quote := '\000'
63
+	blankOK := false
64
+	var ch rune
65
+	var chWidth int
66
+
67
+	for pos := 0; pos <= len(rest); pos += chWidth {
68
+		if pos != len(rest) {
69
+			ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
70
+		}
71
+
72
+		if phase == inSpaces { // Looking for start of word
73
+			if pos == len(rest) { // end of input
74
+				break
75
+			}
76
+			if unicode.IsSpace(ch) { // skip spaces
77
+				continue
78
+			}
79
+			phase = inWord // found it, fall through
80
+		}
81
+		if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
82
+			if blankOK || len(word) > 0 {
83
+				words = append(words, word)
84
+			}
85
+			break
86
+		}
87
+		if phase == inWord {
88
+			if unicode.IsSpace(ch) {
89
+				phase = inSpaces
90
+				if blankOK || len(word) > 0 {
91
+					words = append(words, word)
92
+				}
93
+				word = ""
94
+				blankOK = false
95
+				continue
96
+			}
97
+			if ch == '\'' || ch == '"' {
98
+				quote = ch
99
+				blankOK = true
100
+				phase = inQuote
101
+			}
102
+			if ch == d.escapeToken {
103
+				if pos+chWidth == len(rest) {
104
+					continue // just skip an escape token at end of line
105
+				}
106
+				// If we're not quoted and we see an escape token, then always just
107
+				// add the escape token plus the char to the word, even if the char
108
+				// is a quote.
109
+				word += string(ch)
110
+				pos += chWidth
111
+				ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
112
+			}
113
+			word += string(ch)
114
+			continue
115
+		}
116
+		if phase == inQuote {
117
+			if ch == quote {
118
+				phase = inWord
119
+			}
120
+			// The escape token is special except for ' quotes - can't escape anything for '
121
+			if ch == d.escapeToken && quote != '\'' {
122
+				if pos+chWidth == len(rest) {
123
+					phase = inWord
124
+					continue // just skip the escape token at end
125
+				}
126
+				pos += chWidth
127
+				word += string(ch)
128
+				ch, chWidth = utf8.DecodeRuneInString(rest[pos:])
129
+			}
130
+			word += string(ch)
131
+		}
132
+	}
133
+
134
+	return words
135
+}
136
+
137
+// parse environment like statements. Note that this does *not* handle
138
+// variable interpolation, which will be handled in the evaluator.
139
+func parseNameVal(rest string, key string, d *Directive) (*Node, error) {
140
+	// This is kind of tricky because we need to support the old
141
+	// variant:   KEY name value
142
+	// as well as the new one:    KEY name=value ...
143
+	// The trigger to know which one is being used will be whether we hit
144
+	// a space or = first.  space ==> old, "=" ==> new
145
+
146
+	words := parseWords(rest, d)
147
+	if len(words) == 0 {
148
+		return nil, nil
149
+	}
150
+
151
+	// Old format (KEY name value)
152
+	if !strings.Contains(words[0], "=") {
153
+		parts := tokenWhitespace.Split(rest, 2)
154
+		if len(parts) < 2 {
155
+			return nil, fmt.Errorf(key + " must have two arguments")
156
+		}
157
+		return newKeyValueNode(parts[0], parts[1]), nil
158
+	}
159
+
160
+	var rootNode *Node
161
+	var prevNode *Node
162
+	for _, word := range words {
163
+		if !strings.Contains(word, "=") {
164
+			return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
165
+		}
166
+
167
+		parts := strings.SplitN(word, "=", 2)
168
+		node := newKeyValueNode(parts[0], parts[1])
169
+		rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode)
170
+	}
171
+
172
+	return rootNode, nil
173
+}
174
+
175
+func newKeyValueNode(key, value string) *Node {
176
+	return &Node{
177
+		Value: key,
178
+		Next:  &Node{Value: value},
179
+	}
180
+}
181
+
182
+func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) {
183
+	if rootNode == nil {
184
+		rootNode = node
185
+	}
186
+	if prevNode != nil {
187
+		prevNode.Next = node
188
+	}
189
+
190
+	prevNode = node.Next
191
+	return rootNode, prevNode
192
+}
193
+
194
+func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) {
195
+	node, err := parseNameVal(rest, "ENV", d)
196
+	return node, nil, err
197
+}
198
+
199
+func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) {
200
+	node, err := parseNameVal(rest, commandLabel, d)
201
+	return node, nil, err
202
+}
203
+
204
+// parses a statement containing one or more keyword definition(s) and/or
205
+// value assignments, like `name1 name2= name3="" name4=value`.
206
+// Note that this is a stricter format than the old format of assignment,
207
+// allowed by parseNameVal(), in a way that this only allows assignment of the
208
+// form `keyword=[<value>]` like  `name2=`, `name3=""`, and `name4=value` above.
209
+// In addition, a keyword definition alone is of the form `keyword` like `name1`
210
+// above. And the assignments `name2=` and `name3=""` are equivalent and
211
+// assign an empty value to the respective keywords.
212
+func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) {
213
+	words := parseWords(rest, d)
214
+	if len(words) == 0 {
215
+		return nil, nil, nil
216
+	}
217
+
218
+	var (
219
+		rootnode *Node
220
+		prevNode *Node
221
+	)
222
+	for i, word := range words {
223
+		node := &Node{}
224
+		node.Value = word
225
+		if i == 0 {
226
+			rootnode = node
227
+		} else {
228
+			prevNode.Next = node
229
+		}
230
+		prevNode = node
231
+	}
232
+
233
+	return rootnode, nil, nil
234
+}
235
+
236
+// parses a whitespace-delimited set of arguments. The result is effectively a
237
+// linked list of string arguments.
238
+func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) {
239
+	if rest == "" {
240
+		return nil, nil, nil
241
+	}
242
+
243
+	node := &Node{}
244
+	rootnode := node
245
+	prevnode := node
246
+	for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp
247
+		prevnode = node
248
+		node.Value = str
249
+		node.Next = &Node{}
250
+		node = node.Next
251
+	}
252
+
253
+	// XXX to get around regexp.Split *always* providing an empty string at the
254
+	// end due to how our loop is constructed, nil out the last node in the
255
+	// chain.
256
+	prevnode.Next = nil
257
+
258
+	return rootnode, nil, nil
259
+}
260
+
261
+// parseString just wraps the string in quotes and returns a working node.
262
+func parseString(rest string, d *Directive) (*Node, map[string]bool, error) {
263
+	if rest == "" {
264
+		return nil, nil, nil
265
+	}
266
+	n := &Node{}
267
+	n.Value = rest
268
+	return n, nil, nil
269
+}
270
+
271
+// parseJSON converts JSON arrays to an AST.
272
+func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
273
+	rest = strings.TrimLeftFunc(rest, unicode.IsSpace)
274
+	if !strings.HasPrefix(rest, "[") {
275
+		return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest)
276
+	}
277
+
278
+	var myJSON []interface{}
279
+	if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
280
+		return nil, nil, err
281
+	}
282
+
283
+	var top, prev *Node
284
+	for _, str := range myJSON {
285
+		s, ok := str.(string)
286
+		if !ok {
287
+			return nil, nil, errDockerfileNotStringArray
288
+		}
289
+
290
+		node := &Node{Value: s}
291
+		if prev == nil {
292
+			top = node
293
+		} else {
294
+			prev.Next = node
295
+		}
296
+		prev = node
297
+	}
298
+
299
+	return top, map[string]bool{"json": true}, nil
300
+}
301
+
302
+// parseMaybeJSON determines if the argument appears to be a JSON array. If
303
+// so, passes to parseJSON; if not, quotes the result and returns a single
304
+// node.
305
+func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) {
306
+	if rest == "" {
307
+		return nil, nil, nil
308
+	}
309
+
310
+	node, attrs, err := parseJSON(rest, d)
311
+
312
+	if err == nil {
313
+		return node, attrs, nil
314
+	}
315
+	if err == errDockerfileNotStringArray {
316
+		return nil, nil, err
317
+	}
318
+
319
+	node = &Node{}
320
+	node.Value = rest
321
+	return node, nil, nil
322
+}
323
+
324
+// parseMaybeJSONToList determines if the argument appears to be a JSON array. If
325
+// so, passes to parseJSON; if not, attempts to parse it as a whitespace
326
+// delimited string.
327
+func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) {
328
+	node, attrs, err := parseJSON(rest, d)
329
+
330
+	if err == nil {
331
+		return node, attrs, nil
332
+	}
333
+	if err == errDockerfileNotStringArray {
334
+		return nil, nil, err
335
+	}
336
+
337
+	return parseStringsWhitespaceDelimited(rest, d)
338
+}
339
+
340
+// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument.
341
+func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) {
342
+	// Find end of first argument
343
+	var sep int
344
+	for ; sep < len(rest); sep++ {
345
+		if unicode.IsSpace(rune(rest[sep])) {
346
+			break
347
+		}
348
+	}
349
+	next := sep
350
+	for ; next < len(rest); next++ {
351
+		if !unicode.IsSpace(rune(rest[next])) {
352
+			break
353
+		}
354
+	}
355
+
356
+	if sep == 0 {
357
+		return nil, nil, nil
358
+	}
359
+
360
+	typ := rest[:sep]
361
+	cmd, attrs, err := parseMaybeJSON(rest[next:], d)
362
+	if err != nil {
363
+		return nil, nil, err
364
+	}
365
+
366
+	return &Node{Value: typ, Next: cmd}, attrs, err
367
+}
0 368
new file mode 100644
... ...
@@ -0,0 +1,327 @@
0
+// Package parser implements a parser and parse tree dumper for Dockerfiles.
1
+package parser
2
+
3
+import (
4
+	"bufio"
5
+	"bytes"
6
+	"fmt"
7
+	"io"
8
+	"regexp"
9
+	"strconv"
10
+	"strings"
11
+	"unicode"
12
+
13
+	"github.com/moby/buildkit/frontend/dockerfile/command"
14
+	"github.com/pkg/errors"
15
+)
16
+
17
+// Node is a structure used to represent a parse tree.
18
+//
19
+// In the node there are three fields, Value, Next, and Children. Value is the
20
+// current token's string value. Next is always the next non-child token, and
21
+// children contains all the children. Here's an example:
22
+//
23
+// (value next (child child-next child-next-next) next-next)
24
+//
25
+// This data structure is frankly pretty lousy for handling complex languages,
26
+// but lucky for us the Dockerfile isn't very complicated. This structure
27
+// works a little more effectively than a "proper" parse tree for our needs.
28
+//
29
+type Node struct {
30
+	Value      string          // actual content
31
+	Next       *Node           // the next item in the current sexp
32
+	Children   []*Node         // the children of this sexp
33
+	Attributes map[string]bool // special attributes for this node
34
+	Original   string          // original line used before parsing
35
+	Flags      []string        // only top Node should have this set
36
+	StartLine  int             // the line in the original dockerfile where the node begins
37
+	endLine    int             // the line in the original dockerfile where the node ends
38
+}
39
+
40
+// Dump dumps the AST defined by `node` as a list of sexps.
41
+// Returns a string suitable for printing.
42
+func (node *Node) Dump() string {
43
+	str := ""
44
+	str += node.Value
45
+
46
+	if len(node.Flags) > 0 {
47
+		str += fmt.Sprintf(" %q", node.Flags)
48
+	}
49
+
50
+	for _, n := range node.Children {
51
+		str += "(" + n.Dump() + ")\n"
52
+	}
53
+
54
+	for n := node.Next; n != nil; n = n.Next {
55
+		if len(n.Children) > 0 {
56
+			str += " " + n.Dump()
57
+		} else {
58
+			str += " " + strconv.Quote(n.Value)
59
+		}
60
+	}
61
+
62
+	return strings.TrimSpace(str)
63
+}
64
+
65
+func (node *Node) lines(start, end int) {
66
+	node.StartLine = start
67
+	node.endLine = end
68
+}
69
+
70
+// AddChild adds a new child node, and updates line information
71
+func (node *Node) AddChild(child *Node, startLine, endLine int) {
72
+	child.lines(startLine, endLine)
73
+	if node.StartLine < 0 {
74
+		node.StartLine = startLine
75
+	}
76
+	node.endLine = endLine
77
+	node.Children = append(node.Children, child)
78
+}
79
+
80
+var (
81
+	dispatch           map[string]func(string, *Directive) (*Node, map[string]bool, error)
82
+	tokenWhitespace    = regexp.MustCompile(`[\t\v\f\r ]+`)
83
+	tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P<escapechar>.).*$`)
84
+	tokenComment       = regexp.MustCompile(`^#.*$`)
85
+)
86
+
87
+// DefaultEscapeToken is the default escape token
88
+const DefaultEscapeToken = '\\'
89
+
90
+// Directive is the structure used during a build run to hold the state of
91
+// parsing directives.
92
+type Directive struct {
93
+	escapeToken           rune           // Current escape token
94
+	lineContinuationRegex *regexp.Regexp // Current line continuation regex
95
+	processingComplete    bool           // Whether we are done looking for directives
96
+	escapeSeen            bool           // Whether the escape directive has been seen
97
+}
98
+
99
+// setEscapeToken sets the default token for escaping characters in a Dockerfile.
100
+func (d *Directive) setEscapeToken(s string) error {
101
+	if s != "`" && s != "\\" {
102
+		return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s)
103
+	}
104
+	d.escapeToken = rune(s[0])
105
+	d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`)
106
+	return nil
107
+}
108
+
109
+// possibleParserDirective looks for parser directives, eg '# escapeToken=<char>'.
110
+// Parser directives must precede any builder instruction or other comments,
111
+// and cannot be repeated.
112
+func (d *Directive) possibleParserDirective(line string) error {
113
+	if d.processingComplete {
114
+		return nil
115
+	}
116
+
117
+	tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line))
118
+	if len(tecMatch) != 0 {
119
+		for i, n := range tokenEscapeCommand.SubexpNames() {
120
+			if n == "escapechar" {
121
+				if d.escapeSeen {
122
+					return errors.New("only one escape parser directive can be used")
123
+				}
124
+				d.escapeSeen = true
125
+				return d.setEscapeToken(tecMatch[i])
126
+			}
127
+		}
128
+	}
129
+
130
+	d.processingComplete = true
131
+	return nil
132
+}
133
+
134
+// NewDefaultDirective returns a new Directive with the default escapeToken token
135
+func NewDefaultDirective() *Directive {
136
+	directive := Directive{}
137
+	directive.setEscapeToken(string(DefaultEscapeToken))
138
+	return &directive
139
+}
140
+
141
+func init() {
142
+	// Dispatch Table. see line_parsers.go for the parse functions.
143
+	// The command is parsed and mapped to the line parser. The line parser
144
+	// receives the arguments but not the command, and returns an AST after
145
+	// reformulating the arguments according to the rules in the parser
146
+	// functions. Errors are propagated up by Parse() and the resulting AST can
147
+	// be incorporated directly into the existing AST as a next.
148
+	dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){
149
+		command.Add:         parseMaybeJSONToList,
150
+		command.Arg:         parseNameOrNameVal,
151
+		command.Cmd:         parseMaybeJSON,
152
+		command.Copy:        parseMaybeJSONToList,
153
+		command.Entrypoint:  parseMaybeJSON,
154
+		command.Env:         parseEnv,
155
+		command.Expose:      parseStringsWhitespaceDelimited,
156
+		command.From:        parseStringsWhitespaceDelimited,
157
+		command.Healthcheck: parseHealthConfig,
158
+		command.Label:       parseLabel,
159
+		command.Maintainer:  parseString,
160
+		command.Onbuild:     parseSubCommand,
161
+		command.Run:         parseMaybeJSON,
162
+		command.Shell:       parseMaybeJSON,
163
+		command.StopSignal:  parseString,
164
+		command.User:        parseString,
165
+		command.Volume:      parseMaybeJSONToList,
166
+		command.Workdir:     parseString,
167
+	}
168
+}
169
+
170
+// newNodeFromLine splits the line into parts, and dispatches to a function
171
+// based on the command and command arguments. A Node is created from the
172
+// result of the dispatch.
173
+func newNodeFromLine(line string, directive *Directive) (*Node, error) {
174
+	cmd, flags, args, err := splitCommand(line)
175
+	if err != nil {
176
+		return nil, err
177
+	}
178
+
179
+	fn := dispatch[cmd]
180
+	// Ignore invalid Dockerfile instructions
181
+	if fn == nil {
182
+		fn = parseIgnore
183
+	}
184
+	next, attrs, err := fn(args, directive)
185
+	if err != nil {
186
+		return nil, err
187
+	}
188
+
189
+	return &Node{
190
+		Value:      cmd,
191
+		Original:   line,
192
+		Flags:      flags,
193
+		Next:       next,
194
+		Attributes: attrs,
195
+	}, nil
196
+}
197
+
198
+// Result is the result of parsing a Dockerfile
199
+type Result struct {
200
+	AST         *Node
201
+	EscapeToken rune
202
+	Warnings    []string
203
+}
204
+
205
+// PrintWarnings to the writer
206
+func (r *Result) PrintWarnings(out io.Writer) {
207
+	if len(r.Warnings) == 0 {
208
+		return
209
+	}
210
+	fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n")
211
+}
212
+
213
+// Parse reads lines from a Reader, parses the lines into an AST and returns
214
+// the AST and escape token
215
+func Parse(rwc io.Reader) (*Result, error) {
216
+	d := NewDefaultDirective()
217
+	currentLine := 0
218
+	root := &Node{StartLine: -1}
219
+	scanner := bufio.NewScanner(rwc)
220
+	warnings := []string{}
221
+
222
+	var err error
223
+	for scanner.Scan() {
224
+		bytesRead := scanner.Bytes()
225
+		if currentLine == 0 {
226
+			// First line, strip the byte-order-marker if present
227
+			bytesRead = bytes.TrimPrefix(bytesRead, utf8bom)
228
+		}
229
+		bytesRead, err = processLine(d, bytesRead, true)
230
+		if err != nil {
231
+			return nil, err
232
+		}
233
+		currentLine++
234
+
235
+		startLine := currentLine
236
+		line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d)
237
+		if isEndOfLine && line == "" {
238
+			continue
239
+		}
240
+
241
+		var hasEmptyContinuationLine bool
242
+		for !isEndOfLine && scanner.Scan() {
243
+			bytesRead, err := processLine(d, scanner.Bytes(), false)
244
+			if err != nil {
245
+				return nil, err
246
+			}
247
+			currentLine++
248
+
249
+			if isComment(scanner.Bytes()) {
250
+				// original line was a comment (processLine strips comments)
251
+				continue
252
+			}
253
+			if isEmptyContinuationLine(bytesRead) {
254
+				hasEmptyContinuationLine = true
255
+				continue
256
+			}
257
+
258
+			continuationLine := string(bytesRead)
259
+			continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d)
260
+			line += continuationLine
261
+		}
262
+
263
+		if hasEmptyContinuationLine {
264
+			warnings = append(warnings, "[WARNING]: Empty continuation line found in:\n    "+line)
265
+		}
266
+
267
+		child, err := newNodeFromLine(line, d)
268
+		if err != nil {
269
+			return nil, err
270
+		}
271
+		root.AddChild(child, startLine, currentLine)
272
+	}
273
+
274
+	if len(warnings) > 0 {
275
+		warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.")
276
+	}
277
+	return &Result{
278
+		AST:         root,
279
+		Warnings:    warnings,
280
+		EscapeToken: d.escapeToken,
281
+	}, handleScannerError(scanner.Err())
282
+}
283
+
284
+func trimComments(src []byte) []byte {
285
+	return tokenComment.ReplaceAll(src, []byte{})
286
+}
287
+
288
+func trimWhitespace(src []byte) []byte {
289
+	return bytes.TrimLeftFunc(src, unicode.IsSpace)
290
+}
291
+
292
+func isComment(line []byte) bool {
293
+	return tokenComment.Match(trimWhitespace(line))
294
+}
295
+
296
+func isEmptyContinuationLine(line []byte) bool {
297
+	return len(trimWhitespace(line)) == 0
298
+}
299
+
300
+var utf8bom = []byte{0xEF, 0xBB, 0xBF}
301
+
302
+func trimContinuationCharacter(line string, d *Directive) (string, bool) {
303
+	if d.lineContinuationRegex.MatchString(line) {
304
+		line = d.lineContinuationRegex.ReplaceAllString(line, "")
305
+		return line, false
306
+	}
307
+	return line, true
308
+}
309
+
310
+// TODO: remove stripLeftWhitespace after deprecation period. It seems silly
311
+// to preserve whitespace on continuation lines. Why is that done?
312
+func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) {
313
+	if stripLeftWhitespace {
314
+		token = trimWhitespace(token)
315
+	}
316
+	return trimComments(token), d.possibleParserDirective(string(token))
317
+}
318
+
319
+func handleScannerError(err error) error {
320
+	switch err {
321
+	case bufio.ErrTooLong:
322
+		return errors.Errorf("dockerfile line greater than max allowed size of %d", bufio.MaxScanTokenSize-1)
323
+	default:
324
+		return err
325
+	}
326
+}
0 327
new file mode 100644
... ...
@@ -0,0 +1,118 @@
0
+package parser
1
+
2
+import (
3
+	"strings"
4
+	"unicode"
5
+)
6
+
7
+// splitCommand takes a single line of text and parses out the cmd and args,
8
+// which are used for dispatching to more exact parsing functions.
9
+func splitCommand(line string) (string, []string, string, error) {
10
+	var args string
11
+	var flags []string
12
+
13
+	// Make sure we get the same results irrespective of leading/trailing spaces
14
+	cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2)
15
+	cmd := strings.ToLower(cmdline[0])
16
+
17
+	if len(cmdline) == 2 {
18
+		var err error
19
+		args, flags, err = extractBuilderFlags(cmdline[1])
20
+		if err != nil {
21
+			return "", nil, "", err
22
+		}
23
+	}
24
+
25
+	return cmd, flags, strings.TrimSpace(args), nil
26
+}
27
+
28
+func extractBuilderFlags(line string) (string, []string, error) {
29
+	// Parses the BuilderFlags and returns the remaining part of the line
30
+
31
+	const (
32
+		inSpaces = iota // looking for start of a word
33
+		inWord
34
+		inQuote
35
+	)
36
+
37
+	words := []string{}
38
+	phase := inSpaces
39
+	word := ""
40
+	quote := '\000'
41
+	blankOK := false
42
+	var ch rune
43
+
44
+	for pos := 0; pos <= len(line); pos++ {
45
+		if pos != len(line) {
46
+			ch = rune(line[pos])
47
+		}
48
+
49
+		if phase == inSpaces { // Looking for start of word
50
+			if pos == len(line) { // end of input
51
+				break
52
+			}
53
+			if unicode.IsSpace(ch) { // skip spaces
54
+				continue
55
+			}
56
+
57
+			// Only keep going if the next word starts with --
58
+			if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
59
+				return line[pos:], words, nil
60
+			}
61
+
62
+			phase = inWord // found something with "--", fall through
63
+		}
64
+		if (phase == inWord || phase == inQuote) && (pos == len(line)) {
65
+			if word != "--" && (blankOK || len(word) > 0) {
66
+				words = append(words, word)
67
+			}
68
+			break
69
+		}
70
+		if phase == inWord {
71
+			if unicode.IsSpace(ch) {
72
+				phase = inSpaces
73
+				if word == "--" {
74
+					return line[pos:], words, nil
75
+				}
76
+				if blankOK || len(word) > 0 {
77
+					words = append(words, word)
78
+				}
79
+				word = ""
80
+				blankOK = false
81
+				continue
82
+			}
83
+			if ch == '\'' || ch == '"' {
84
+				quote = ch
85
+				blankOK = true
86
+				phase = inQuote
87
+				continue
88
+			}
89
+			if ch == '\\' {
90
+				if pos+1 == len(line) {
91
+					continue // just skip \ at end
92
+				}
93
+				pos++
94
+				ch = rune(line[pos])
95
+			}
96
+			word += string(ch)
97
+			continue
98
+		}
99
+		if phase == inQuote {
100
+			if ch == quote {
101
+				phase = inWord
102
+				continue
103
+			}
104
+			if ch == '\\' {
105
+				if pos+1 == len(line) {
106
+					phase = inWord
107
+					continue // just skip \ at end
108
+				}
109
+				pos++
110
+				ch = rune(line[pos])
111
+			}
112
+			word += string(ch)
113
+		}
114
+	}
115
+
116
+	return "", words, nil
117
+}
0 118
new file mode 100644
... ...
@@ -0,0 +1,9 @@
0
+// +build !windows
1
+
2
+package shell
3
+
4
+// EqualEnvKeys compare two strings and returns true if they are equal. On
5
+// Windows this comparison is case insensitive.
6
+func EqualEnvKeys(from, to string) bool {
7
+	return from == to
8
+}
0 9
new file mode 100644
... ...
@@ -0,0 +1,9 @@
0
+package shell
1
+
2
+import "strings"
3
+
4
+// EqualEnvKeys compare two strings and returns true if they are equal. On
5
+// Windows this comparison is case insensitive.
6
+func EqualEnvKeys(from, to string) bool {
7
+	return strings.ToUpper(from) == strings.ToUpper(to)
8
+}
0 9
new file mode 100644
... ...
@@ -0,0 +1,373 @@
0
+package shell
1
+
2
+import (
3
+	"bytes"
4
+	"strings"
5
+	"text/scanner"
6
+	"unicode"
7
+
8
+	"github.com/pkg/errors"
9
+)
10
+
11
+// Lex performs shell word splitting and variable expansion.
12
+//
13
+// Lex takes a string and an array of env variables and
14
+// process all quotes (" and ') as well as $xxx and ${xxx} env variable
15
+// tokens.  Tries to mimic bash shell process.
16
+// It doesn't support all flavors of ${xx:...} formats but new ones can
17
+// be added by adding code to the "special ${} format processing" section
18
+type Lex struct {
19
+	escapeToken rune
20
+}
21
+
22
+// NewLex creates a new Lex which uses escapeToken to escape quotes.
23
+func NewLex(escapeToken rune) *Lex {
24
+	return &Lex{escapeToken: escapeToken}
25
+}
26
+
27
+// ProcessWord will use the 'env' list of environment variables,
28
+// and replace any env var references in 'word'.
29
+func (s *Lex) ProcessWord(word string, env []string) (string, error) {
30
+	word, _, err := s.process(word, env)
31
+	return word, err
32
+}
33
+
34
+// ProcessWords will use the 'env' list of environment variables,
35
+// and replace any env var references in 'word' then it will also
36
+// return a slice of strings which represents the 'word'
37
+// split up based on spaces - taking into account quotes.  Note that
38
+// this splitting is done **after** the env var substitutions are done.
39
+// Note, each one is trimmed to remove leading and trailing spaces (unless
40
+// they are quoted", but ProcessWord retains spaces between words.
41
+func (s *Lex) ProcessWords(word string, env []string) ([]string, error) {
42
+	_, words, err := s.process(word, env)
43
+	return words, err
44
+}
45
+
46
+func (s *Lex) process(word string, env []string) (string, []string, error) {
47
+	sw := &shellWord{
48
+		envs:        env,
49
+		escapeToken: s.escapeToken,
50
+	}
51
+	sw.scanner.Init(strings.NewReader(word))
52
+	return sw.process(word)
53
+}
54
+
55
+type shellWord struct {
56
+	scanner     scanner.Scanner
57
+	envs        []string
58
+	escapeToken rune
59
+}
60
+
61
+func (sw *shellWord) process(source string) (string, []string, error) {
62
+	word, words, err := sw.processStopOn(scanner.EOF)
63
+	if err != nil {
64
+		err = errors.Wrapf(err, "failed to process %q", source)
65
+	}
66
+	return word, words, err
67
+}
68
+
69
+type wordsStruct struct {
70
+	word   string
71
+	words  []string
72
+	inWord bool
73
+}
74
+
75
+func (w *wordsStruct) addChar(ch rune) {
76
+	if unicode.IsSpace(ch) && w.inWord {
77
+		if len(w.word) != 0 {
78
+			w.words = append(w.words, w.word)
79
+			w.word = ""
80
+			w.inWord = false
81
+		}
82
+	} else if !unicode.IsSpace(ch) {
83
+		w.addRawChar(ch)
84
+	}
85
+}
86
+
87
+func (w *wordsStruct) addRawChar(ch rune) {
88
+	w.word += string(ch)
89
+	w.inWord = true
90
+}
91
+
92
+func (w *wordsStruct) addString(str string) {
93
+	var scan scanner.Scanner
94
+	scan.Init(strings.NewReader(str))
95
+	for scan.Peek() != scanner.EOF {
96
+		w.addChar(scan.Next())
97
+	}
98
+}
99
+
100
+func (w *wordsStruct) addRawString(str string) {
101
+	w.word += str
102
+	w.inWord = true
103
+}
104
+
105
+func (w *wordsStruct) getWords() []string {
106
+	if len(w.word) > 0 {
107
+		w.words = append(w.words, w.word)
108
+
109
+		// Just in case we're called again by mistake
110
+		w.word = ""
111
+		w.inWord = false
112
+	}
113
+	return w.words
114
+}
115
+
116
+// Process the word, starting at 'pos', and stop when we get to the
117
+// end of the word or the 'stopChar' character
118
+func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) {
119
+	var result bytes.Buffer
120
+	var words wordsStruct
121
+
122
+	var charFuncMapping = map[rune]func() (string, error){
123
+		'\'': sw.processSingleQuote,
124
+		'"':  sw.processDoubleQuote,
125
+		'$':  sw.processDollar,
126
+	}
127
+
128
+	for sw.scanner.Peek() != scanner.EOF {
129
+		ch := sw.scanner.Peek()
130
+
131
+		if stopChar != scanner.EOF && ch == stopChar {
132
+			sw.scanner.Next()
133
+			return result.String(), words.getWords(), nil
134
+		}
135
+		if fn, ok := charFuncMapping[ch]; ok {
136
+			// Call special processing func for certain chars
137
+			tmp, err := fn()
138
+			if err != nil {
139
+				return "", []string{}, err
140
+			}
141
+			result.WriteString(tmp)
142
+
143
+			if ch == rune('$') {
144
+				words.addString(tmp)
145
+			} else {
146
+				words.addRawString(tmp)
147
+			}
148
+		} else {
149
+			// Not special, just add it to the result
150
+			ch = sw.scanner.Next()
151
+
152
+			if ch == sw.escapeToken {
153
+				// '\' (default escape token, but ` allowed) escapes, except end of line
154
+				ch = sw.scanner.Next()
155
+
156
+				if ch == scanner.EOF {
157
+					break
158
+				}
159
+
160
+				words.addRawChar(ch)
161
+			} else {
162
+				words.addChar(ch)
163
+			}
164
+
165
+			result.WriteRune(ch)
166
+		}
167
+	}
168
+	if stopChar != scanner.EOF {
169
+		return "", []string{}, errors.Errorf("unexpected end of statement while looking for matching %s", string(stopChar))
170
+	}
171
+	return result.String(), words.getWords(), nil
172
+}
173
+
174
+func (sw *shellWord) processSingleQuote() (string, error) {
175
+	// All chars between single quotes are taken as-is
176
+	// Note, you can't escape '
177
+	//
178
+	// From the "sh" man page:
179
+	// Single Quotes
180
+	//   Enclosing characters in single quotes preserves the literal meaning of
181
+	//   all the characters (except single quotes, making it impossible to put
182
+	//   single-quotes in a single-quoted string).
183
+
184
+	var result bytes.Buffer
185
+
186
+	sw.scanner.Next()
187
+
188
+	for {
189
+		ch := sw.scanner.Next()
190
+		switch ch {
191
+		case scanner.EOF:
192
+			return "", errors.New("unexpected end of statement while looking for matching single-quote")
193
+		case '\'':
194
+			return result.String(), nil
195
+		}
196
+		result.WriteRune(ch)
197
+	}
198
+}
199
+
200
+func (sw *shellWord) processDoubleQuote() (string, error) {
201
+	// All chars up to the next " are taken as-is, even ', except any $ chars
202
+	// But you can escape " with a \ (or ` if escape token set accordingly)
203
+	//
204
+	// From the "sh" man page:
205
+	// Double Quotes
206
+	//  Enclosing characters within double quotes preserves the literal meaning
207
+	//  of all characters except dollarsign ($), backquote (`), and backslash
208
+	//  (\).  The backslash inside double quotes is historically weird, and
209
+	//  serves to quote only the following characters:
210
+	//    $ ` " \ <newline>.
211
+	//  Otherwise it remains literal.
212
+
213
+	var result bytes.Buffer
214
+
215
+	sw.scanner.Next()
216
+
217
+	for {
218
+		switch sw.scanner.Peek() {
219
+		case scanner.EOF:
220
+			return "", errors.New("unexpected end of statement while looking for matching double-quote")
221
+		case '"':
222
+			sw.scanner.Next()
223
+			return result.String(), nil
224
+		case '$':
225
+			value, err := sw.processDollar()
226
+			if err != nil {
227
+				return "", err
228
+			}
229
+			result.WriteString(value)
230
+		default:
231
+			ch := sw.scanner.Next()
232
+			if ch == sw.escapeToken {
233
+				switch sw.scanner.Peek() {
234
+				case scanner.EOF:
235
+					// Ignore \ at end of word
236
+					continue
237
+				case '"', '$', sw.escapeToken:
238
+					// These chars can be escaped, all other \'s are left as-is
239
+					// Note: for now don't do anything special with ` chars.
240
+					// Not sure what to do with them anyway since we're not going
241
+					// to execute the text in there (not now anyway).
242
+					ch = sw.scanner.Next()
243
+				}
244
+			}
245
+			result.WriteRune(ch)
246
+		}
247
+	}
248
+}
249
+
250
+func (sw *shellWord) processDollar() (string, error) {
251
+	sw.scanner.Next()
252
+
253
+	// $xxx case
254
+	if sw.scanner.Peek() != '{' {
255
+		name := sw.processName()
256
+		if name == "" {
257
+			return "$", nil
258
+		}
259
+		return sw.getEnv(name), nil
260
+	}
261
+
262
+	sw.scanner.Next()
263
+	switch sw.scanner.Peek() {
264
+	case scanner.EOF:
265
+		return "", errors.New("syntax error: missing '}'")
266
+	case '{', '}', ':':
267
+		// Invalid ${{xx}, ${:xx}, ${:}. ${} case
268
+		return "", errors.New("syntax error: bad substitution")
269
+	}
270
+	name := sw.processName()
271
+	ch := sw.scanner.Next()
272
+	switch ch {
273
+	case '}':
274
+		// Normal ${xx} case
275
+		return sw.getEnv(name), nil
276
+	case ':':
277
+		// Special ${xx:...} format processing
278
+		// Yes it allows for recursive $'s in the ... spot
279
+		modifier := sw.scanner.Next()
280
+
281
+		word, _, err := sw.processStopOn('}')
282
+		if err != nil {
283
+			if sw.scanner.Peek() == scanner.EOF {
284
+				return "", errors.New("syntax error: missing '}'")
285
+			}
286
+			return "", err
287
+		}
288
+
289
+		// Grab the current value of the variable in question so we
290
+		// can use to to determine what to do based on the modifier
291
+		newValue := sw.getEnv(name)
292
+
293
+		switch modifier {
294
+		case '+':
295
+			if newValue != "" {
296
+				newValue = word
297
+			}
298
+			return newValue, nil
299
+
300
+		case '-':
301
+			if newValue == "" {
302
+				newValue = word
303
+			}
304
+			return newValue, nil
305
+
306
+		default:
307
+			return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier)
308
+		}
309
+	}
310
+	return "", errors.Errorf("missing ':' in substitution")
311
+}
312
+
313
+func (sw *shellWord) processName() string {
314
+	// Read in a name (alphanumeric or _)
315
+	// If it starts with a numeric then just return $#
316
+	var name bytes.Buffer
317
+
318
+	for sw.scanner.Peek() != scanner.EOF {
319
+		ch := sw.scanner.Peek()
320
+		if name.Len() == 0 && unicode.IsDigit(ch) {
321
+			for sw.scanner.Peek() != scanner.EOF && unicode.IsDigit(sw.scanner.Peek()) {
322
+				// Keep reading until the first non-digit character, or EOF
323
+				ch = sw.scanner.Next()
324
+				name.WriteRune(ch)
325
+			}
326
+			return name.String()
327
+		}
328
+		if name.Len() == 0 && isSpecialParam(ch) {
329
+			ch = sw.scanner.Next()
330
+			return string(ch)
331
+		}
332
+		if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' {
333
+			break
334
+		}
335
+		ch = sw.scanner.Next()
336
+		name.WriteRune(ch)
337
+	}
338
+
339
+	return name.String()
340
+}
341
+
342
+// isSpecialParam checks if the provided character is a special parameters,
343
+// as defined in http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02
344
+func isSpecialParam(char rune) bool {
345
+	switch char {
346
+	case '@', '*', '#', '?', '-', '$', '!', '0':
347
+		// Special parameters
348
+		// http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02
349
+		return true
350
+	}
351
+	return false
352
+}
353
+
354
+func (sw *shellWord) getEnv(name string) string {
355
+	for _, env := range sw.envs {
356
+		i := strings.Index(env, "=")
357
+		if i < 0 {
358
+			if EqualEnvKeys(name, env) {
359
+				// Should probably never get here, but just in case treat
360
+				// it like "var" and "var=" are the same
361
+				return ""
362
+			}
363
+			continue
364
+		}
365
+		compareName := env[:i]
366
+		if !EqualEnvKeys(name, compareName) {
367
+			continue
368
+		}
369
+		return env[i+1:]
370
+	}
371
+	return ""
372
+}
0 373
new file mode 100644
... ...
@@ -0,0 +1,53 @@
0
+package identity
1
+
2
+import (
3
+	cryptorand "crypto/rand"
4
+	"fmt"
5
+	"io"
6
+	"math/big"
7
+)
8
+
9
+var (
10
+	// idReader is used for random id generation. This declaration allows us to
11
+	// replace it for testing.
12
+	idReader = cryptorand.Reader
13
+)
14
+
15
+// parameters for random identifier generation. We can tweak this when there is
16
+// time for further analysis.
17
+const (
18
+	randomIDEntropyBytes = 17
19
+	randomIDBase         = 36
20
+
21
+	// To ensure that all identifiers are fixed length, we make sure they
22
+	// get padded out or truncated to 25 characters.
23
+	//
24
+	// For academics,  f5lxx1zz5pnorynqglhzmsp33  == 2^128 - 1. This value
25
+	// was calculated from floor(log(2^128-1, 36)) + 1.
26
+	//
27
+	// While 128 bits is the largest whole-byte size that fits into 25
28
+	// base-36 characters, we generate an extra byte of entropy to fill
29
+	// in the high bits, which would otherwise be 0. This gives us a more
30
+	// even distribution of the first character.
31
+	//
32
+	// See http://mathworld.wolfram.com/NumberLength.html for more information.
33
+	maxRandomIDLength = 25
34
+)
35
+
36
+// NewID generates a new identifier for use where random identifiers with low
37
+// collision probability are required.
38
+//
39
+// With the parameters in this package, the generated identifier will provide
40
+// ~129 bits of entropy encoded with base36. Leading padding is added if the
41
+// string is less 25 bytes. We do not intend to maintain this interface, so
42
+// identifiers should be treated opaquely.
43
+func NewID() string {
44
+	var p [randomIDEntropyBytes]byte
45
+
46
+	if _, err := io.ReadFull(idReader, p[:]); err != nil {
47
+		panic(fmt.Errorf("failed to read random bytes: %v", err))
48
+	}
49
+
50
+	p[0] |= 0x80 // set high bit to avoid the need for padding
51
+	return (&big.Int{}).SetBytes(p[:]).Text(randomIDBase)[1 : maxRandomIDLength+1]
52
+}
... ...
@@ -1,9 +1,12 @@
1 1
 package filesync
2 2
 
3 3
 import (
4
+	"bufio"
5
+	io "io"
4 6
 	"os"
5 7
 	"time"
6 8
 
9
+	"github.com/pkg/errors"
7 10
 	"github.com/sirupsen/logrus"
8 11
 	"github.com/tonistiigi/fsutil"
9 12
 	"google.golang.org/grpc"
... ...
@@ -17,6 +20,46 @@ func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, p
17 17
 	}, progress)
18 18
 }
19 19
 
20
+func newStreamWriter(stream grpc.ClientStream) io.WriteCloser {
21
+	wc := &streamWriterCloser{ClientStream: stream}
22
+	return &bufferedWriteCloser{Writer: bufio.NewWriter(wc), Closer: wc}
23
+}
24
+
25
+type bufferedWriteCloser struct {
26
+	*bufio.Writer
27
+	io.Closer
28
+}
29
+
30
+func (bwc *bufferedWriteCloser) Close() error {
31
+	if err := bwc.Writer.Flush(); err != nil {
32
+		return err
33
+	}
34
+	return bwc.Closer.Close()
35
+}
36
+
37
+type streamWriterCloser struct {
38
+	grpc.ClientStream
39
+}
40
+
41
+func (wc *streamWriterCloser) Write(dt []byte) (int, error) {
42
+	if err := wc.ClientStream.SendMsg(&BytesMessage{Data: dt}); err != nil {
43
+		return 0, err
44
+	}
45
+	return len(dt), nil
46
+}
47
+
48
+func (wc *streamWriterCloser) Close() error {
49
+	if err := wc.ClientStream.CloseSend(); err != nil {
50
+		return err
51
+	}
52
+	// block until receiver is done
53
+	var bm BytesMessage
54
+	if err := wc.ClientStream.RecvMsg(&bm); err != io.EOF {
55
+		return err
56
+	}
57
+	return nil
58
+}
59
+
20 60
 func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb) error {
21 61
 	st := time.Now()
22 62
 	defer func() {
... ...
@@ -53,3 +96,18 @@ func syncTargetDiffCopy(ds grpc.Stream, dest string) error {
53 53
 		}(),
54 54
 	})
55 55
 }
56
+
57
+func writeTargetFile(ds grpc.Stream, wc io.WriteCloser) error {
58
+	for {
59
+		bm := BytesMessage{}
60
+		if err := ds.RecvMsg(&bm); err != nil {
61
+			if errors.Cause(err) == io.EOF {
62
+				return nil
63
+			}
64
+			return err
65
+		}
66
+		if _, err := wc.Write(bm.Data); err != nil {
67
+			return err
68
+		}
69
+	}
70
+}
... ...
@@ -1,14 +1,15 @@
1 1
 package filesync
2 2
 
3 3
 import (
4
+	"context"
4 5
 	"fmt"
6
+	io "io"
5 7
 	"os"
6 8
 	"strings"
7 9
 
8 10
 	"github.com/moby/buildkit/session"
9 11
 	"github.com/pkg/errors"
10 12
 	"github.com/tonistiigi/fsutil"
11
-	"golang.org/x/net/context"
12 13
 	"google.golang.org/grpc"
13 14
 	"google.golang.org/grpc/metadata"
14 15
 )
... ...
@@ -16,6 +17,7 @@ import (
16 16
 const (
17 17
 	keyOverrideExcludes = "override-excludes"
18 18
 	keyIncludePatterns  = "include-patterns"
19
+	keyExcludePatterns  = "exclude-patterns"
19 20
 	keyDirName          = "dir-name"
20 21
 )
21 22
 
... ...
@@ -54,7 +56,7 @@ func (sp *fsSyncProvider) TarStream(stream FileSync_TarStreamServer) error {
54 54
 	return sp.handle("tarstream", stream)
55 55
 }
56 56
 
57
-func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error {
57
+func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retErr error) {
58 58
 	var pr *protocol
59 59
 	for _, p := range supportedProtocols {
60 60
 		if method == p.name && isProtoSupported(p.name) {
... ...
@@ -66,20 +68,21 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error
66 66
 		return errors.New("failed to negotiate protocol")
67 67
 	}
68 68
 
69
-	opts, _ := metadata.FromContext(stream.Context()) // if no metadata continue with empty object
69
+	opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object
70 70
 
71
+	dirName := ""
71 72
 	name, ok := opts[keyDirName]
72
-	if !ok || len(name) != 1 {
73
-		return errors.New("no dir name in request")
73
+	if ok && len(name) > 0 {
74
+		dirName = name[0]
74 75
 	}
75 76
 
76
-	dir, ok := sp.dirs[name[0]]
77
+	dir, ok := sp.dirs[dirName]
77 78
 	if !ok {
78
-		return errors.Errorf("no access allowed to dir %q", name[0])
79
+		return errors.Errorf("no access allowed to dir %q", dirName)
79 80
 	}
80 81
 
81
-	var excludes []string
82
-	if len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true" {
82
+	excludes := opts[keyExcludePatterns]
83
+	if len(dir.Excludes) != 0 && (len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true") {
83 84
 		excludes = dir.Excludes
84 85
 	}
85 86
 	includes := opts[keyIncludePatterns]
... ...
@@ -138,7 +141,8 @@ var supportedProtocols = []protocol{
138 138
 type FSSendRequestOpt struct {
139 139
 	Name             string
140 140
 	IncludePatterns  []string
141
-	OverrideExcludes bool
141
+	ExcludePatterns  []string
142
+	OverrideExcludes bool // deprecated: this is used by docker/cli for automatically loading .dockerignore from the directory
142 143
 	DestDir          string
143 144
 	CacheUpdater     CacheUpdater
144 145
 	ProgressCb       func(int, bool)
... ...
@@ -173,6 +177,10 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
173 173
 		opts[keyIncludePatterns] = opt.IncludePatterns
174 174
 	}
175 175
 
176
+	if opt.ExcludePatterns != nil {
177
+		opts[keyExcludePatterns] = opt.ExcludePatterns
178
+	}
179
+
176 180
 	opts[keyDirName] = []string{opt.Name}
177 181
 
178 182
 	ctx, cancel := context.WithCancel(ctx)
... ...
@@ -182,7 +190,7 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
182 182
 
183 183
 	var stream grpc.ClientStream
184 184
 
185
-	ctx = metadata.NewContext(ctx, opts)
185
+	ctx = metadata.NewOutgoingContext(ctx, opts)
186 186
 
187 187
 	switch pr.name {
188 188
 	case "tarstream":
... ...
@@ -204,16 +212,25 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
204 204
 	return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb)
205 205
 }
206 206
 
207
-// NewFSSyncTarget allows writing into a directory
208
-func NewFSSyncTarget(outdir string) session.Attachable {
207
+// NewFSSyncTargetDir allows writing into a directory
208
+func NewFSSyncTargetDir(outdir string) session.Attachable {
209 209
 	p := &fsSyncTarget{
210 210
 		outdir: outdir,
211 211
 	}
212 212
 	return p
213 213
 }
214 214
 
215
+// NewFSSyncTarget allows writing into an io.WriteCloser
216
+func NewFSSyncTarget(w io.WriteCloser) session.Attachable {
217
+	p := &fsSyncTarget{
218
+		outfile: w,
219
+	}
220
+	return p
221
+}
222
+
215 223
 type fsSyncTarget struct {
216
-	outdir string
224
+	outdir  string
225
+	outfile io.WriteCloser
217 226
 }
218 227
 
219 228
 func (sp *fsSyncTarget) Register(server *grpc.Server) {
... ...
@@ -221,7 +238,14 @@ func (sp *fsSyncTarget) Register(server *grpc.Server) {
221 221
 }
222 222
 
223 223
 func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error {
224
-	return syncTargetDiffCopy(stream, sp.outdir)
224
+	if sp.outdir != "" {
225
+		return syncTargetDiffCopy(stream, sp.outdir)
226
+	}
227
+	if sp.outfile == nil {
228
+		return errors.New("empty outfile and outdir")
229
+	}
230
+	defer sp.outfile.Close()
231
+	return writeTargetFile(stream, sp.outfile)
225 232
 }
226 233
 
227 234
 func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progress func(int, bool)) error {
... ...
@@ -239,3 +263,19 @@ func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progres
239 239
 
240 240
 	return sendDiffCopy(cc, srcPath, nil, nil, progress, nil)
241 241
 }
242
+
243
+func CopyFileWriter(ctx context.Context, c session.Caller) (io.WriteCloser, error) {
244
+	method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy")
245
+	if !c.Supports(method) {
246
+		return nil, errors.Errorf("method %s not supported by the client", method)
247
+	}
248
+
249
+	client := NewFileSendClient(c.Conn())
250
+
251
+	cc, err := client.DiffCopy(ctx)
252
+	if err != nil {
253
+		return nil, err
254
+	}
255
+
256
+	return newStreamWriter(cc), nil
257
+}
... ...
@@ -1,6 +1,5 @@
1
-// Code generated by protoc-gen-gogo.
1
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
2 2
 // source: filesync.proto
3
-// DO NOT EDIT!
4 3
 
5 4
 /*
6 5
 Package filesync is a generated protocol buffer package.
... ...
@@ -22,10 +21,8 @@ import bytes "bytes"
22 22
 import strings "strings"
23 23
 import reflect "reflect"
24 24
 
25
-import (
26
-	context "golang.org/x/net/context"
27
-	grpc "google.golang.org/grpc"
28
-)
25
+import context "golang.org/x/net/context"
26
+import grpc "google.golang.org/grpc"
29 27
 
30 28
 import io "io"
31 29
 
... ...
@@ -61,10 +58,7 @@ func init() {
61 61
 }
62 62
 func (this *BytesMessage) Equal(that interface{}) bool {
63 63
 	if that == nil {
64
-		if this == nil {
65
-			return true
66
-		}
67
-		return false
64
+		return this == nil
68 65
 	}
69 66
 
70 67
 	that1, ok := that.(*BytesMessage)
... ...
@@ -77,10 +71,7 @@ func (this *BytesMessage) Equal(that interface{}) bool {
77 77
 		}
78 78
 	}
79 79
 	if that1 == nil {
80
-		if this == nil {
81
-			return true
82
-		}
83
-		return false
80
+		return this == nil
84 81
 	} else if this == nil {
85 82
 		return false
86 83
 	}
... ...
@@ -397,24 +388,6 @@ func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) {
397 397
 	return i, nil
398 398
 }
399 399
 
400
-func encodeFixed64Filesync(dAtA []byte, offset int, v uint64) int {
401
-	dAtA[offset] = uint8(v)
402
-	dAtA[offset+1] = uint8(v >> 8)
403
-	dAtA[offset+2] = uint8(v >> 16)
404
-	dAtA[offset+3] = uint8(v >> 24)
405
-	dAtA[offset+4] = uint8(v >> 32)
406
-	dAtA[offset+5] = uint8(v >> 40)
407
-	dAtA[offset+6] = uint8(v >> 48)
408
-	dAtA[offset+7] = uint8(v >> 56)
409
-	return offset + 8
410
-}
411
-func encodeFixed32Filesync(dAtA []byte, offset int, v uint32) int {
412
-	dAtA[offset] = uint8(v)
413
-	dAtA[offset+1] = uint8(v >> 8)
414
-	dAtA[offset+2] = uint8(v >> 16)
415
-	dAtA[offset+3] = uint8(v >> 24)
416
-	return offset + 4
417
-}
418 400
 func encodeVarintFilesync(dAtA []byte, offset int, v uint64) int {
419 401
 	for v >= 1<<7 {
420 402
 		dAtA[offset] = uint8(v&0x7f | 0x80)
... ...
@@ -655,7 +628,7 @@ func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) }
655 655
 
656 656
 var fileDescriptorFilesync = []byte{
657 657
 	// 208 bytes of a gzipped FileDescriptorProto
658
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49,
658
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49,
659 659
 	0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa,
660 660
 	0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6,
661 661
 	0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a,
... ...
@@ -1,12 +1,15 @@
1 1
 package session
2 2
 
3 3
 import (
4
+	"context"
4 5
 	"net"
6
+	"sync/atomic"
5 7
 	"time"
6 8
 
9
+	"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
10
+	opentracing "github.com/opentracing/opentracing-go"
7 11
 	"github.com/pkg/errors"
8 12
 	"github.com/sirupsen/logrus"
9
-	"golang.org/x/net/context"
10 13
 	"golang.org/x/net/http2"
11 14
 	"google.golang.org/grpc"
12 15
 	"google.golang.org/grpc/health/grpc_health_v1"
... ...
@@ -22,11 +25,28 @@ func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) {
22 22
 }
23 23
 
24 24
 func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.ClientConn, error) {
25
-	dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) {
25
+	var dialCount int64
26
+	dialer := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) {
27
+		if c := atomic.AddInt64(&dialCount, 1); c > 1 {
28
+			return nil, errors.Errorf("only one connection allowed")
29
+		}
26 30
 		return conn, nil
27 31
 	})
28 32
 
29
-	cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure())
33
+	dialOpts := []grpc.DialOption{
34
+		dialer,
35
+		grpc.WithInsecure(),
36
+	}
37
+
38
+	if span := opentracing.SpanFromContext(ctx); span != nil {
39
+		tracer := span.Tracer()
40
+		dialOpts = append(dialOpts,
41
+			grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer, traceFilter())),
42
+			grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer, traceFilter())),
43
+		)
44
+	}
45
+
46
+	cc, err := grpc.DialContext(ctx, "", dialOpts...)
30 47
 	if err != nil {
31 48
 		return nil, nil, errors.Wrap(err, "failed to create grpc client")
32 49
 	}
... ...
@@ -41,7 +61,7 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func())
41 41
 	defer cancelConn()
42 42
 	defer cc.Close()
43 43
 
44
-	ticker := time.NewTicker(500 * time.Millisecond)
44
+	ticker := time.NewTicker(1 * time.Second)
45 45
 	defer ticker.Stop()
46 46
 	healthClient := grpc_health_v1.NewHealthClient(cc)
47 47
 
... ...
@@ -50,8 +70,7 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func())
50 50
 		case <-ctx.Done():
51 51
 			return
52 52
 		case <-ticker.C:
53
-			<-ticker.C
54
-			ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
53
+			ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
55 54
 			_, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
56 55
 			cancel()
57 56
 			if err != nil {
... ...
@@ -1,13 +1,13 @@
1 1
 package session
2 2
 
3 3
 import (
4
+	"context"
4 5
 	"net"
5 6
 	"net/http"
6 7
 	"strings"
7 8
 	"sync"
8 9
 
9 10
 	"github.com/pkg/errors"
10
-	"golang.org/x/net/context"
11 11
 	"google.golang.org/grpc"
12 12
 )
13 13
 
... ...
@@ -150,6 +150,12 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin
150 150
 
151 151
 // Get returns a session by ID
152 152
 func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) {
153
+	// session prefix is used to identify vertexes with different contexts so
154
+	// they would not collide, but for lookup we don't need the prefix
155
+	if p := strings.SplitN(id, ":", 2); len(p) == 2 && len(p[1]) > 0 {
156
+		id = p[1]
157
+	}
158
+
153 159
 	ctx, cancel := context.WithCancel(ctx)
154 160
 	defer cancel()
155 161
 
... ...
@@ -1,11 +1,14 @@
1 1
 package session
2 2
 
3 3
 import (
4
+	"context"
4 5
 	"net"
6
+	"strings"
5 7
 
6
-	"github.com/docker/docker/pkg/stringid"
8
+	"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
9
+	"github.com/moby/buildkit/identity"
10
+	opentracing "github.com/opentracing/opentracing-go"
7 11
 	"github.com/pkg/errors"
8
-	"golang.org/x/net/context"
9 12
 	"google.golang.org/grpc"
10 13
 	"google.golang.org/grpc/health"
11 14
 	"google.golang.org/grpc/health/grpc_health_v1"
... ...
@@ -35,16 +38,27 @@ type Session struct {
35 35
 	cancelCtx  func()
36 36
 	done       chan struct{}
37 37
 	grpcServer *grpc.Server
38
+	conn       net.Conn
38 39
 }
39 40
 
40 41
 // NewSession returns a new long running session
41
-func NewSession(name, sharedKey string) (*Session, error) {
42
-	id := stringid.GenerateRandomID()
42
+func NewSession(ctx context.Context, name, sharedKey string) (*Session, error) {
43
+	id := identity.NewID()
44
+
45
+	serverOpts := []grpc.ServerOption{}
46
+	if span := opentracing.SpanFromContext(ctx); span != nil {
47
+		tracer := span.Tracer()
48
+		serverOpts = []grpc.ServerOption{
49
+			grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(span.Tracer(), traceFilter())),
50
+			grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer, traceFilter())),
51
+		}
52
+	}
53
+
43 54
 	s := &Session{
44 55
 		id:         id,
45 56
 		name:       name,
46 57
 		sharedKey:  sharedKey,
47
-		grpcServer: grpc.NewServer(),
58
+		grpcServer: grpc.NewServer(serverOpts...),
48 59
 	}
49 60
 
50 61
 	grpc_health_v1.RegisterHealthServer(s.grpcServer, health.NewServer())
... ...
@@ -85,6 +99,7 @@ func (s *Session) Run(ctx context.Context, dialer Dialer) error {
85 85
 	if err != nil {
86 86
 		return errors.Wrap(err, "failed to dial gRPC")
87 87
 	}
88
+	s.conn = conn
88 89
 	serve(ctx, s.grpcServer, conn)
89 90
 	return nil
90 91
 }
... ...
@@ -92,8 +107,10 @@ func (s *Session) Run(ctx context.Context, dialer Dialer) error {
92 92
 // Close closes the session
93 93
 func (s *Session) Close() error {
94 94
 	if s.cancelCtx != nil && s.done != nil {
95
+		if s.conn != nil {
96
+			s.conn.Close()
97
+		}
95 98
 		s.grpcServer.Stop()
96
-		s.cancelCtx()
97 99
 		<-s.done
98 100
 	}
99 101
 	return nil
... ...
@@ -116,3 +133,11 @@ func (s *Session) closed() bool {
116 116
 func MethodURL(s, m string) string {
117 117
 	return "/" + s + "/" + m
118 118
 }
119
+
120
+func traceFilter() otgrpc.Option {
121
+	return otgrpc.IncludingSpans(func(parentSpanCtx opentracing.SpanContext,
122
+		method string,
123
+		req, resp interface{}) bool {
124
+		return !strings.HasSuffix(method, "Health/Check")
125
+	})
126
+}
... ...
@@ -1,46 +1,68 @@
1 1
 github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
2
-github.com/pkg/errors c605e284fe17294bda444b34710735b29d1a9d90
2
+github.com/pkg/errors v0.8.0
3 3
 
4 4
 github.com/stretchr/testify v1.1.4
5 5
 github.com/davecgh/go-spew v1.1.0
6 6
 github.com/pmezard/go-difflib v1.0.0
7
-golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
7
+golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993
8 8
 
9
-github.com/containerd/containerd d1e11f17ec7b325f89608dd46c128300b8727d50
10
-golang.org/x/sync f52d1811a62927559de87708c8913c1650ce4f26
9
+github.com/containerd/containerd 7f800e0a7bb1e2547baca4d5bbf317ceeb341c14
10
+github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
11
+golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
11 12
 github.com/sirupsen/logrus v1.0.0
12
-google.golang.org/grpc v1.3.0
13
-github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
14
-golang.org/x/net 1f9224279e98554b6a6432d4dd998a739f8b2b7c
15
-github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
16
-github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55
17
-github.com/containerd/continuity 86cec1535a968310e7532819f699ff2830ed7463
18
-github.com/opencontainers/image-spec v1.0.0
19
-github.com/opencontainers/runc e775f0fba3ea329b8b766451c892c41a3d49594d
20
-github.com/Microsoft/go-winio v0.4.1
21
-github.com/containerd/fifo 69b99525e472735860a5269b75af1970142b3062
22
-github.com/opencontainers/runtime-spec 96de01bbb42c7af89bff100e10a9f0fb62e75bfb
23
-github.com/containerd/go-runc 2774a2ea124a5c2d0aba13b5c2dd8a5a9a48775d
24
-github.com/containerd/console 7fed77e673ca4abcd0cbd6d4d0e0e22137cbd778
25
-github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
13
+google.golang.org/grpc v1.10.1
14
+github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
15
+golang.org/x/net 0ed95abb35c445290478a5348a7b38bb154135fd
16
+github.com/gogo/protobuf v1.0.0
17
+github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
18
+github.com/golang/protobuf 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9
19
+github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
20
+github.com/opencontainers/image-spec v1.0.1
21
+github.com/opencontainers/runc 0e561642f81e84ebd0b3afd6ec510c75a2ccb71b
22
+github.com/Microsoft/go-winio v0.4.5
23
+github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
24
+github.com/opencontainers/runtime-spec v1.0.1
25
+github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd5
26
+github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925
26 27
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
27 28
 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
28 29
 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
30
+github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
31
+github.com/Microsoft/hcsshim v0.6.10
29 32
 
30
-github.com/urfave/cli d70f47eeca3afd795160003bc6e28b001d60c67c
31
-github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
33
+github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
34
+github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
35
+github.com/docker/go-units v0.3.1
32 36
 github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716
33
-golang.org/x/time 8be79e1e0910c292df4e79c241bb7e8f7e725959
37
+golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
34 38
 
35
-github.com/BurntSushi/locker 392720b78f44e9d0249fcac6c43b111b47a370b8
36
-github.com/docker/docker 6f723db8c6f0c7f0b252674a9673a25b5978db04 https://github.com/tonistiigi/docker.git
39
+github.com/BurntSushi/locker a6e239ea1c69bff1cfdb20c4b73dadf52f784b6a
40
+github.com/docker/docker 71cd53e4a197b303c6ba086bd584ffd67a884281
37 41
 github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f
38 42
 
39
-github.com/tonistiigi/fsutil 1dedf6e90084bd88c4c518a15e68a37ed1370203
40
-github.com/stevvooe/continuity 86cec1535a968310e7532819f699ff2830ed7463
41
-github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf
43
+github.com/tonistiigi/fsutil dc68c74458923f357474a9178bd198aa3ed11a5f
42 44
 github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
43 45
 github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
44 46
 github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
45 47
 github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
46 48
 github.com/docker/distribution 30578ca32960a4d368bf6db67b0a33c2a1f3dc6f
49
+
50
+github.com/tonistiigi/units 29de085e9400559bd68aea2e7bc21566e7b8281d
51
+github.com/docker/cli 99576756eb3303b7af8102c502f21a912e3c1af6 https://github.com/tonistiigi/docker-cli.git
52
+github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
53
+github.com/docker/libnetwork 822e5b59d346b7ad0735df2c8e445e9787320e67
54
+
55
+
56
+github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
57
+github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
58
+github.com/uber/jaeger-client-go e02c85f9069ea625a96fc4e1afb5e9ac6c569a6d
59
+github.com/apache/thrift b2a4d4ae21c789b689dd162deb819665567f481c
60
+github.com/uber/jaeger-lib c48167d9cae5887393dd5e61efd06a4a48b7fbb3
61
+github.com/codahale/hdrhistogram f8ad88b59a584afeee9d334eff879b104439117b
62
+
63
+github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
64
+github.com/opencontainers/selinux 74a747aeaf2d66097b6908f572794f49f07dda2c
65
+
66
+# used by dockerfile tests
67
+github.com/gotestyourself/gotestyourself cf3a5ab914a2efa8bc838d09f5918c1d44d029
68
+github.com/google/go-cmp v0.2.0
47 69
new file mode 100644
... ...
@@ -0,0 +1,20 @@
0
+// +build linux
1
+
2
+package fsutil
3
+
4
+import (
5
+	"github.com/pkg/errors"
6
+	"golang.org/x/sys/unix"
7
+)
8
+
9
+func chtimes(path string, un int64) error {
10
+	var utimes [2]unix.Timespec
11
+	utimes[0] = unix.NsecToTimespec(un)
12
+	utimes[1] = utimes[0]
13
+
14
+	if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
15
+		return errors.Wrap(err, "failed call to UtimesNanoAt")
16
+	}
17
+
18
+	return nil
19
+}
0 20
new file mode 100644
... ...
@@ -0,0 +1,13 @@
0
+// +build !linux
1
+
2
+package fsutil
3
+
4
+import (
5
+	"os"
6
+	"time"
7
+)
8
+
9
+func chtimes(path string, un int64) error {
10
+	mtime := time.Unix(0, un)
11
+	return os.Chtimes(path, mtime, mtime)
12
+}
... ...
@@ -55,6 +55,7 @@ func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWr
55 55
 		eg:     eg,
56 56
 		ctx:    ctx,
57 57
 		cancel: cancel,
58
+		filter: opt.Filter,
58 59
 	}, nil
59 60
 }
60 61
 
61 62
deleted file mode 100644
... ...
@@ -1,7 +0,0 @@
1
-// +build darwin
2
-
3
-package fsutil
4
-
5
-func chtimes(path string, un int64) error {
6
-	return nil
7
-}
8 1
deleted file mode 100644
... ...
@@ -1,20 +0,0 @@
1
-// +build linux
2
-
3
-package fsutil
4
-
5
-import (
6
-	"github.com/pkg/errors"
7
-	"golang.org/x/sys/unix"
8
-)
9
-
10
-func chtimes(path string, un int64) error {
11
-	var utimes [2]unix.Timespec
12
-	utimes[0] = unix.NsecToTimespec(un)
13
-	utimes[1] = utimes[0]
14
-
15
-	if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
16
-		return errors.Wrap(err, "failed call to UtimesNanoAt")
17
-	}
18
-
19
-	return nil
20
-}
... ...
@@ -3,9 +3,6 @@
3 3
 package fsutil
4 4
 
5 5
 import (
6
-	"os"
7
-	"time"
8
-
9 6
 	"github.com/pkg/errors"
10 7
 )
11 8
 
... ...
@@ -13,11 +10,6 @@ func rewriteMetadata(p string, stat *Stat) error {
13 13
 	return chtimes(p, stat.ModTime)
14 14
 }
15 15
 
16
-func chtimes(path string, un int64) error {
17
-	mtime := time.Unix(0, un)
18
-	return os.Chtimes(path, mtime, mtime)
19
-}
20
-
21 16
 // handleTarTypeBlockCharFifo is an OS-specific helper function used by
22 17
 // createTarFile to handle the following types of header: Block; Char; Fifo
23 18
 func handleTarTypeBlockCharFifo(path string, stat *Stat) error {
... ...
@@ -55,25 +55,33 @@ type receiver struct {
55 55
 
56 56
 type dynamicWalker struct {
57 57
 	walkChan chan *currentPath
58
-	closed   bool
58
+	err      error
59
+	closeCh  chan struct{}
59 60
 }
60 61
 
61 62
 func newDynamicWalker() *dynamicWalker {
62 63
 	return &dynamicWalker{
63 64
 		walkChan: make(chan *currentPath, 128),
65
+		closeCh:  make(chan struct{}),
64 66
 	}
65 67
 }
66 68
 
67 69
 func (w *dynamicWalker) update(p *currentPath) error {
68
-	if w.closed {
69
-		return errors.New("walker is closed")
70
+	select {
71
+	case <-w.closeCh:
72
+		return errors.Wrap(w.err, "walker is closed")
73
+	default:
70 74
 	}
71 75
 	if p == nil {
72 76
 		close(w.walkChan)
73 77
 		return nil
74 78
 	}
75
-	w.walkChan <- p
76
-	return nil
79
+	select {
80
+	case w.walkChan <- p:
81
+		return nil
82
+	case <-w.closeCh:
83
+		return errors.Wrap(w.err, "walker is closed")
84
+	}
77 85
 }
78 86
 
79 87
 func (w *dynamicWalker) fill(ctx context.Context, pathC chan<- *currentPath) error {
... ...
@@ -85,6 +93,8 @@ func (w *dynamicWalker) fill(ctx context.Context, pathC chan<- *currentPath) err
85 85
 			}
86 86
 			pathC <- p
87 87
 		case <-ctx.Done():
88
+			w.err = ctx.Err()
89
+			close(w.closeCh)
88 90
 			return ctx.Err()
89 91
 		}
90 92
 	}
... ...
@@ -106,7 +116,12 @@ func (r *receiver) run(ctx context.Context) error {
106 106
 
107 107
 	w := newDynamicWalker()
108 108
 
109
-	g.Go(func() error {
109
+	g.Go(func() (retErr error) {
110
+		defer func() {
111
+			if retErr != nil {
112
+				r.conn.SendMsg(&Packet{Type: PACKET_ERR, Data: []byte(retErr.Error())})
113
+			}
114
+		}()
110 115
 		destWalker := emptyWalker
111 116
 		if !r.merge {
112 117
 			destWalker = GetWalkerFn(r.dest)
... ...
@@ -143,6 +158,8 @@ func (r *receiver) run(ctx context.Context) error {
143 143
 			}
144 144
 
145 145
 			switch p.Type {
146
+			case PACKET_ERR:
147
+				return errors.Errorf("error from sender: %s", p.Data)
146 148
 			case PACKET_STAT:
147 149
 				if p.Stat == nil {
148 150
 					if err := w.update(nil); err != nil {
... ...
@@ -183,7 +200,15 @@ func (r *receiver) run(ctx context.Context) error {
183 183
 					}
184 184
 				}
185 185
 			case PACKET_FIN:
186
-				return nil
186
+				for {
187
+					var p Packet
188
+					if err := r.conn.RecvMsg(&p); err != nil {
189
+						if err == io.EOF {
190
+							return nil
191
+						}
192
+						return err
193
+					}
194
+				}
187 195
 			}
188 196
 		}
189 197
 	})
... ...
@@ -208,10 +233,13 @@ func (r *receiver) asyncDataFunc(ctx context.Context, p string, wc io.WriteClose
208 208
 		return err
209 209
 	}
210 210
 	err := wwc.Wait(ctx)
211
+	if err != nil {
212
+		return err
213
+	}
211 214
 	r.muPipes.Lock()
212 215
 	delete(r.pipes, id)
213 216
 	r.muPipes.Unlock()
214
-	return err
217
+	return nil
215 218
 }
216 219
 
217 220
 type wrappedWriteCloser struct {
... ...
@@ -57,7 +57,11 @@ func (s *sender) run(ctx context.Context) error {
57 57
 	defer s.updateProgress(0, true)
58 58
 
59 59
 	g.Go(func() error {
60
-		return s.walk(ctx)
60
+		err := s.walk(ctx)
61
+		if err != nil {
62
+			s.conn.SendMsg(&Packet{Type: PACKET_ERR, Data: []byte(err.Error())})
63
+		}
64
+		return err
61 65
 	})
62 66
 
63 67
 	for i := 0; i < 4; i++ {
... ...
@@ -90,6 +94,8 @@ func (s *sender) run(ctx context.Context) error {
90 90
 				return err
91 91
 			}
92 92
 			switch p.Type {
93
+			case PACKET_ERR:
94
+				return errors.Errorf("error from receiver: %s", p.Data)
93 95
 			case PACKET_REQ:
94 96
 				if err := s.queue(p.ID); err != nil {
95 97
 					return err
... ...
@@ -126,6 +132,7 @@ func (s *sender) queue(id uint32) error {
126 126
 func (s *sender) sendFile(h *sendHandle) error {
127 127
 	f, err := os.Open(filepath.Join(s.root, h.path))
128 128
 	if err == nil {
129
+		defer f.Close()
129 130
 		buf := bufPool.Get().([]byte)
130 131
 		defer bufPool.Put(buf)
131 132
 		if _, err := io.CopyBuffer(&fileSender{sender: s, id: h.id}, f, buf); err != nil {
... ...
@@ -39,14 +39,22 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
39 39
 		}
40 40
 	}
41 41
 
42
+	var lastIncludedDir string
43
+	var includePatternPrefixes []string
44
+
42 45
 	seenFiles := make(map[uint64]string)
43
-	return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
46
+	return filepath.Walk(root, func(path string, fi os.FileInfo, err error) (retErr error) {
44 47
 		if err != nil {
45 48
 			if os.IsNotExist(err) {
46 49
 				return filepath.SkipDir
47 50
 			}
48 51
 			return err
49 52
 		}
53
+		defer func() {
54
+			if retErr != nil && os.IsNotExist(errors.Cause(retErr)) {
55
+				retErr = filepath.SkipDir
56
+			}
57
+		}()
50 58
 		origpath := path
51 59
 		path, err = filepath.Rel(root, path)
52 60
 		if err != nil {
... ...
@@ -59,18 +67,34 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err
59 59
 
60 60
 		if opt != nil {
61 61
 			if opt.IncludePatterns != nil {
62
+				if includePatternPrefixes == nil {
63
+					includePatternPrefixes = patternPrefixes(opt.IncludePatterns)
64
+				}
62 65
 				matched := false
63
-				for _, p := range opt.IncludePatterns {
64
-					if m, _ := filepath.Match(p, path); m {
66
+				if lastIncludedDir != "" {
67
+					if strings.HasPrefix(path, lastIncludedDir+string(filepath.Separator)) {
65 68
 						matched = true
66
-						break
67 69
 					}
68 70
 				}
69 71
 				if !matched {
70
-					if fi.IsDir() {
71
-						return filepath.SkipDir
72
+					for _, p := range opt.IncludePatterns {
73
+						if m, _ := filepath.Match(p, path); m {
74
+							matched = true
75
+							break
76
+						}
77
+					}
78
+					if matched && fi.IsDir() {
79
+						lastIncludedDir = path
80
+					}
81
+				}
82
+				if !matched {
83
+					if !fi.IsDir() {
84
+						return nil
85
+					} else {
86
+						if noPossiblePrefixMatch(path, includePatternPrefixes) {
87
+							return filepath.SkipDir
88
+						}
72 89
 					}
73
-					return nil
74 90
 				}
75 91
 			}
76 92
 			if pm != nil {
... ...
@@ -174,3 +198,30 @@ func (s *StatInfo) IsDir() bool {
174 174
 func (s *StatInfo) Sys() interface{} {
175 175
 	return s.Stat
176 176
 }
177
+
178
+func patternPrefixes(patterns []string) []string {
179
+	pfxs := make([]string, 0, len(patterns))
180
+	for _, ptrn := range patterns {
181
+		idx := strings.IndexFunc(ptrn, func(ch rune) bool {
182
+			return ch == '*' || ch == '?' || ch == '[' || ch == '\\'
183
+		})
184
+		if idx == -1 {
185
+			idx = len(ptrn)
186
+		}
187
+		pfxs = append(pfxs, ptrn[:idx])
188
+	}
189
+	return pfxs
190
+}
191
+
192
+func noPossiblePrefixMatch(p string, pfxs []string) bool {
193
+	for _, pfx := range pfxs {
194
+		chk := p
195
+		if len(pfx) < len(p) {
196
+			chk = p[:len(pfx)]
197
+		}
198
+		if strings.HasPrefix(pfx, chk) {
199
+			return false
200
+		}
201
+	}
202
+	return true
203
+}
... ...
@@ -29,6 +29,7 @@ const (
29 29
 	PACKET_REQ  Packet_PacketType = 1
30 30
 	PACKET_DATA Packet_PacketType = 2
31 31
 	PACKET_FIN  Packet_PacketType = 3
32
+	PACKET_ERR  Packet_PacketType = 4
32 33
 )
33 34
 
34 35
 var Packet_PacketType_name = map[int32]string{
... ...
@@ -36,12 +37,14 @@ var Packet_PacketType_name = map[int32]string{
36 36
 	1: "PACKET_REQ",
37 37
 	2: "PACKET_DATA",
38 38
 	3: "PACKET_FIN",
39
+	4: "PACKET_ERR",
39 40
 }
40 41
 var Packet_PacketType_value = map[string]int32{
41 42
 	"PACKET_STAT": 0,
42 43
 	"PACKET_REQ":  1,
43 44
 	"PACKET_DATA": 2,
44 45
 	"PACKET_FIN":  3,
46
+	"PACKET_ERR":  4,
45 47
 }
46 48
 
47 49
 func (Packet_PacketType) EnumDescriptor() ([]byte, []int) { return fileDescriptorWire, []int{0, 0} }
... ...
@@ -543,21 +546,22 @@ var (
543 543
 func init() { proto.RegisterFile("wire.proto", fileDescriptorWire) }
544 544
 
545 545
 var fileDescriptorWire = []byte{
546
-	// 253 bytes of a gzipped FileDescriptorProto
546
+	// 259 bytes of a gzipped FileDescriptorProto
547 547
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xcf, 0x2c, 0x4a,
548 548
 	0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4b, 0x2b, 0x2e, 0x2d, 0xc9, 0xcc, 0x91, 0xe2,
549
-	0x2a, 0x2e, 0x49, 0x2c, 0x81, 0x88, 0x29, 0x9d, 0x65, 0xe4, 0x62, 0x0b, 0x48, 0x4c, 0xce, 0x4e,
549
+	0x2a, 0x2e, 0x49, 0x2c, 0x81, 0x88, 0x29, 0xdd, 0x65, 0xe4, 0x62, 0x0b, 0x48, 0x4c, 0xce, 0x4e,
550 550
 	0x2d, 0x11, 0xd2, 0xe5, 0x62, 0x29, 0xa9, 0x2c, 0x48, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x33,
551 551
 	0x92, 0xd4, 0x83, 0xa8, 0xd6, 0x83, 0xc8, 0x42, 0xa9, 0x90, 0xca, 0x82, 0xd4, 0x20, 0xb0, 0x32,
552 552
 	0x21, 0x05, 0x2e, 0x16, 0x90, 0x39, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, 0x3c, 0x30, 0xe5,
553 553
 	0xc1, 0x25, 0x89, 0x25, 0x41, 0x60, 0x19, 0x21, 0x3e, 0x2e, 0x26, 0x4f, 0x17, 0x09, 0x66, 0x05,
554 554
 	0x46, 0x0d, 0xde, 0x20, 0x26, 0x4f, 0x17, 0x21, 0x21, 0x2e, 0x96, 0x94, 0xc4, 0x92, 0x44, 0x09,
555
-	0x16, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x30, 0x5b, 0xc9, 0x8f, 0x8b, 0x0b, 0x61, 0xb2, 0x10, 0x3f,
555
+	0x16, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x30, 0x5b, 0x29, 0x8e, 0x8b, 0x0b, 0x61, 0xb2, 0x10, 0x3f,
556 556
 	0x17, 0x77, 0x80, 0xa3, 0xb3, 0xb7, 0x6b, 0x48, 0x7c, 0x70, 0x88, 0x63, 0x88, 0x00, 0x83, 0x10,
557 557
 	0x1f, 0x17, 0x17, 0x54, 0x20, 0xc8, 0x35, 0x50, 0x80, 0x11, 0x49, 0x81, 0x8b, 0x63, 0x88, 0xa3,
558
-	0x00, 0x13, 0x92, 0x02, 0x37, 0x4f, 0x3f, 0x01, 0x66, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e,
559
-	0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c,
560
-	0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72,
561
-	0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0x0e, 0x04, 0x63, 0x40,
562
-	0x00, 0x00, 0x00, 0xff, 0xff, 0xda, 0x30, 0x43, 0x22, 0x26, 0x01, 0x00, 0x00,
558
+	0x00, 0x13, 0x92, 0x02, 0x37, 0x4f, 0x3f, 0x01, 0x66, 0x24, 0xbe, 0x6b, 0x50, 0x90, 0x00, 0x8b,
559
+	0x93, 0xce, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xd8, 0xf0,
560
+	0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c,
561
+	0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63,
562
+	0x48, 0x62, 0x03, 0x07, 0x8a, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8b, 0xce, 0x55, 0x3b, 0x36,
563
+	0x01, 0x00, 0x00,
563 564
 }
... ...
@@ -10,6 +10,7 @@ message Packet {
10 10
       PACKET_REQ = 1;
11 11
       PACKET_DATA = 2;
12 12
       PACKET_FIN = 3;
13
+      PACKET_ERR = 4;
13 14
     }
14 15
   PacketType type = 1;
15 16
   Stat stat = 2;