Signed-off-by: Tibor Vass <tibor@docker.com>
| ... | ... |
@@ -12,7 +12,7 @@ import ( |
| 12 | 12 |
"github.com/Sirupsen/logrus" |
| 13 | 13 |
"github.com/docker/docker/api/server/httputils" |
| 14 | 14 |
"github.com/docker/docker/api/types" |
| 15 |
- "github.com/docker/docker/builder" |
|
| 15 |
+ "github.com/docker/docker/builder/dockerfile" |
|
| 16 | 16 |
"github.com/docker/docker/cliconfig" |
| 17 | 17 |
"github.com/docker/docker/graph" |
| 18 | 18 |
"github.com/docker/docker/pkg/ioutils" |
| ... | ... |
@@ -46,7 +46,7 @@ func (s *router) postCommit(ctx context.Context, w http.ResponseWriter, r *http. |
| 46 | 46 |
return err |
| 47 | 47 |
} |
| 48 | 48 |
|
| 49 |
- commitCfg := &builder.CommitConfig{
|
|
| 49 |
+ commitCfg := &dockerfile.CommitConfig{
|
|
| 50 | 50 |
Pause: pause, |
| 51 | 51 |
Repo: r.Form.Get("repo"),
|
| 52 | 52 |
Tag: r.Form.Get("tag"),
|
| ... | ... |
@@ -56,7 +56,7 @@ func (s *router) postCommit(ctx context.Context, w http.ResponseWriter, r *http. |
| 56 | 56 |
Config: c, |
| 57 | 57 |
} |
| 58 | 58 |
|
| 59 |
- imgID, err := builder.Commit(cname, s.daemon, commitCfg) |
|
| 59 |
+ imgID, err := dockerfile.Commit(cname, s.daemon, commitCfg) |
|
| 60 | 60 |
if err != nil {
|
| 61 | 61 |
return err |
| 62 | 62 |
} |
| ... | ... |
@@ -125,7 +125,7 @@ func (s *router) postImagesCreate(ctx context.Context, w http.ResponseWriter, r |
| 125 | 125 |
// generated from the download to be available to the output |
| 126 | 126 |
// stream processing below |
| 127 | 127 |
var newConfig *runconfig.Config |
| 128 |
- newConfig, err = builder.BuildFromConfig(s.daemon, &runconfig.Config{}, r.Form["changes"])
|
|
| 128 |
+ newConfig, err = dockerfile.BuildFromConfig(s.daemon, &runconfig.Config{}, r.Form["changes"])
|
|
| 129 | 129 |
if err != nil {
|
| 130 | 130 |
return err |
| 131 | 131 |
} |
| ... | ... |
@@ -269,7 +269,7 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R |
| 269 | 269 |
var ( |
| 270 | 270 |
authConfigs = map[string]cliconfig.AuthConfig{}
|
| 271 | 271 |
authConfigsEncoded = r.Header.Get("X-Registry-Config")
|
| 272 |
- buildConfig = builder.NewBuildConfig() |
|
| 272 |
+ buildConfig = dockerfile.NewBuildConfig() |
|
| 273 | 273 |
) |
| 274 | 274 |
|
| 275 | 275 |
if authConfigsEncoded != "" {
|
| ... | ... |
@@ -347,7 +347,7 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R |
| 347 | 347 |
}() |
| 348 | 348 |
} |
| 349 | 349 |
|
| 350 |
- if err := builder.Build(s.daemon, buildConfig); err != nil {
|
|
| 350 |
+ if err := dockerfile.Build(s.daemon, buildConfig); err != nil {
|
|
| 351 | 351 |
// Do not write the error in the http output if it's still empty. |
| 352 | 352 |
// This prevents from writing a 200(OK) when there is an interal error. |
| 353 | 353 |
if !output.Flushed() {
|
| 354 | 354 |
deleted file mode 100644 |
| ... | ... |
@@ -1,176 +0,0 @@ |
| 1 |
-package builder |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "strings" |
|
| 6 |
-) |
|
| 7 |
- |
|
| 8 |
-// FlagType is the type of the build flag |
|
| 9 |
-type FlagType int |
|
| 10 |
- |
|
| 11 |
-const ( |
|
| 12 |
- boolType FlagType = iota |
|
| 13 |
- stringType |
|
| 14 |
-) |
|
| 15 |
- |
|
| 16 |
-// BFlags contains all flags information for the builder |
|
| 17 |
-type BFlags struct {
|
|
| 18 |
- Args []string // actual flags/args from cmd line |
|
| 19 |
- flags map[string]*Flag |
|
| 20 |
- used map[string]*Flag |
|
| 21 |
- Err error |
|
| 22 |
-} |
|
| 23 |
- |
|
| 24 |
-// Flag contains all information for a flag |
|
| 25 |
-type Flag struct {
|
|
| 26 |
- bf *BFlags |
|
| 27 |
- name string |
|
| 28 |
- flagType FlagType |
|
| 29 |
- Value string |
|
| 30 |
-} |
|
| 31 |
- |
|
| 32 |
-// NewBFlags return the new BFlags struct |
|
| 33 |
-func NewBFlags() *BFlags {
|
|
| 34 |
- return &BFlags{
|
|
| 35 |
- flags: make(map[string]*Flag), |
|
| 36 |
- used: make(map[string]*Flag), |
|
| 37 |
- } |
|
| 38 |
-} |
|
| 39 |
- |
|
| 40 |
-// AddBool adds a bool flag to BFlags |
|
| 41 |
-// Note, any error will be generated when Parse() is called (see Parse). |
|
| 42 |
-func (bf *BFlags) AddBool(name string, def bool) *Flag {
|
|
| 43 |
- flag := bf.addFlag(name, boolType) |
|
| 44 |
- if flag == nil {
|
|
| 45 |
- return nil |
|
| 46 |
- } |
|
| 47 |
- if def {
|
|
| 48 |
- flag.Value = "true" |
|
| 49 |
- } else {
|
|
| 50 |
- flag.Value = "false" |
|
| 51 |
- } |
|
| 52 |
- return flag |
|
| 53 |
-} |
|
| 54 |
- |
|
| 55 |
-// AddString adds a string flag to BFlags |
|
| 56 |
-// Note, any error will be generated when Parse() is called (see Parse). |
|
| 57 |
-func (bf *BFlags) AddString(name string, def string) *Flag {
|
|
| 58 |
- flag := bf.addFlag(name, stringType) |
|
| 59 |
- if flag == nil {
|
|
| 60 |
- return nil |
|
| 61 |
- } |
|
| 62 |
- flag.Value = def |
|
| 63 |
- return flag |
|
| 64 |
-} |
|
| 65 |
- |
|
| 66 |
-// addFlag is a generic func used by the other AddXXX() func |
|
| 67 |
-// to add a new flag to the BFlags struct. |
|
| 68 |
-// Note, any error will be generated when Parse() is called (see Parse). |
|
| 69 |
-func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag {
|
|
| 70 |
- if _, ok := bf.flags[name]; ok {
|
|
| 71 |
- bf.Err = fmt.Errorf("Duplicate flag defined: %s", name)
|
|
| 72 |
- return nil |
|
| 73 |
- } |
|
| 74 |
- |
|
| 75 |
- newFlag := &Flag{
|
|
| 76 |
- bf: bf, |
|
| 77 |
- name: name, |
|
| 78 |
- flagType: flagType, |
|
| 79 |
- } |
|
| 80 |
- bf.flags[name] = newFlag |
|
| 81 |
- |
|
| 82 |
- return newFlag |
|
| 83 |
-} |
|
| 84 |
- |
|
| 85 |
-// IsUsed checks if the flag is used |
|
| 86 |
-func (fl *Flag) IsUsed() bool {
|
|
| 87 |
- if _, ok := fl.bf.used[fl.name]; ok {
|
|
| 88 |
- return true |
|
| 89 |
- } |
|
| 90 |
- return false |
|
| 91 |
-} |
|
| 92 |
- |
|
| 93 |
-// IsTrue checks if a bool flag is true |
|
| 94 |
-func (fl *Flag) IsTrue() bool {
|
|
| 95 |
- if fl.flagType != boolType {
|
|
| 96 |
- // Should never get here |
|
| 97 |
- panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name))
|
|
| 98 |
- } |
|
| 99 |
- return fl.Value == "true" |
|
| 100 |
-} |
|
| 101 |
- |
|
| 102 |
-// Parse parses and checks if the BFlags is valid. |
|
| 103 |
-// Any error noticed during the AddXXX() funcs will be generated/returned |
|
| 104 |
-// here. We do this because an error during AddXXX() is more like a |
|
| 105 |
-// compile time error so it doesn't matter too much when we stop our |
|
| 106 |
-// processing as long as we do stop it, so this allows the code |
|
| 107 |
-// around AddXXX() to be just: |
|
| 108 |
-// defFlag := AddString("desription", "")
|
|
| 109 |
-// w/o needing to add an if-statement around each one. |
|
| 110 |
-func (bf *BFlags) Parse() error {
|
|
| 111 |
- // If there was an error while defining the possible flags |
|
| 112 |
- // go ahead and bubble it back up here since we didn't do it |
|
| 113 |
- // earlier in the processing |
|
| 114 |
- if bf.Err != nil {
|
|
| 115 |
- return fmt.Errorf("Error setting up flags: %s", bf.Err)
|
|
| 116 |
- } |
|
| 117 |
- |
|
| 118 |
- for _, arg := range bf.Args {
|
|
| 119 |
- if !strings.HasPrefix(arg, "--") {
|
|
| 120 |
- return fmt.Errorf("Arg should start with -- : %s", arg)
|
|
| 121 |
- } |
|
| 122 |
- |
|
| 123 |
- if arg == "--" {
|
|
| 124 |
- return nil |
|
| 125 |
- } |
|
| 126 |
- |
|
| 127 |
- arg = arg[2:] |
|
| 128 |
- value := "" |
|
| 129 |
- |
|
| 130 |
- index := strings.Index(arg, "=") |
|
| 131 |
- if index >= 0 {
|
|
| 132 |
- value = arg[index+1:] |
|
| 133 |
- arg = arg[:index] |
|
| 134 |
- } |
|
| 135 |
- |
|
| 136 |
- flag, ok := bf.flags[arg] |
|
| 137 |
- if !ok {
|
|
| 138 |
- return fmt.Errorf("Unknown flag: %s", arg)
|
|
| 139 |
- } |
|
| 140 |
- |
|
| 141 |
- if _, ok = bf.used[arg]; ok {
|
|
| 142 |
- return fmt.Errorf("Duplicate flag specified: %s", arg)
|
|
| 143 |
- } |
|
| 144 |
- |
|
| 145 |
- bf.used[arg] = flag |
|
| 146 |
- |
|
| 147 |
- switch flag.flagType {
|
|
| 148 |
- case boolType: |
|
| 149 |
- // value == "" is only ok if no "=" was specified |
|
| 150 |
- if index >= 0 && value == "" {
|
|
| 151 |
- return fmt.Errorf("Missing a value on flag: %s", arg)
|
|
| 152 |
- } |
|
| 153 |
- |
|
| 154 |
- lower := strings.ToLower(value) |
|
| 155 |
- if lower == "" {
|
|
| 156 |
- flag.Value = "true" |
|
| 157 |
- } else if lower == "true" || lower == "false" {
|
|
| 158 |
- flag.Value = lower |
|
| 159 |
- } else {
|
|
| 160 |
- return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value)
|
|
| 161 |
- } |
|
| 162 |
- |
|
| 163 |
- case stringType: |
|
| 164 |
- if index < 0 {
|
|
| 165 |
- return fmt.Errorf("Missing a value on flag: %s", arg)
|
|
| 166 |
- } |
|
| 167 |
- flag.Value = value |
|
| 168 |
- |
|
| 169 |
- default: |
|
| 170 |
- panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!"))
|
|
| 171 |
- } |
|
| 172 |
- |
|
| 173 |
- } |
|
| 174 |
- |
|
| 175 |
- return nil |
|
| 176 |
-} |
| 177 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,187 +0,0 @@ |
| 1 |
-package builder |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "testing" |
|
| 5 |
-) |
|
| 6 |
- |
|
| 7 |
-func TestBuilderFlags(t *testing.T) {
|
|
| 8 |
- var expected string |
|
| 9 |
- var err error |
|
| 10 |
- |
|
| 11 |
- // --- |
|
| 12 |
- |
|
| 13 |
- bf := NewBFlags() |
|
| 14 |
- bf.Args = []string{}
|
|
| 15 |
- if err := bf.Parse(); err != nil {
|
|
| 16 |
- t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err)
|
|
| 17 |
- } |
|
| 18 |
- |
|
| 19 |
- // --- |
|
| 20 |
- |
|
| 21 |
- bf = NewBFlags() |
|
| 22 |
- bf.Args = []string{"--"}
|
|
| 23 |
- if err := bf.Parse(); err != nil {
|
|
| 24 |
- t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err)
|
|
| 25 |
- } |
|
| 26 |
- |
|
| 27 |
- // --- |
|
| 28 |
- |
|
| 29 |
- bf = NewBFlags() |
|
| 30 |
- flStr1 := bf.AddString("str1", "")
|
|
| 31 |
- flBool1 := bf.AddBool("bool1", false)
|
|
| 32 |
- bf.Args = []string{}
|
|
| 33 |
- if err = bf.Parse(); err != nil {
|
|
| 34 |
- t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
|
|
| 35 |
- } |
|
| 36 |
- |
|
| 37 |
- if flStr1.IsUsed() == true {
|
|
| 38 |
- t.Fatalf("Test3 - str1 was not used!")
|
|
| 39 |
- } |
|
| 40 |
- if flBool1.IsUsed() == true {
|
|
| 41 |
- t.Fatalf("Test3 - bool1 was not used!")
|
|
| 42 |
- } |
|
| 43 |
- |
|
| 44 |
- // --- |
|
| 45 |
- |
|
| 46 |
- bf = NewBFlags() |
|
| 47 |
- flStr1 = bf.AddString("str1", "HI")
|
|
| 48 |
- flBool1 = bf.AddBool("bool1", false)
|
|
| 49 |
- bf.Args = []string{}
|
|
| 50 |
- |
|
| 51 |
- if err = bf.Parse(); err != nil {
|
|
| 52 |
- t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err)
|
|
| 53 |
- } |
|
| 54 |
- |
|
| 55 |
- if flStr1.Value != "HI" {
|
|
| 56 |
- t.Fatalf("Str1 was supposed to default to: HI")
|
|
| 57 |
- } |
|
| 58 |
- if flBool1.IsTrue() {
|
|
| 59 |
- t.Fatalf("Bool1 was supposed to default to: false")
|
|
| 60 |
- } |
|
| 61 |
- if flStr1.IsUsed() == true {
|
|
| 62 |
- t.Fatalf("Str1 was not used!")
|
|
| 63 |
- } |
|
| 64 |
- if flBool1.IsUsed() == true {
|
|
| 65 |
- t.Fatalf("Bool1 was not used!")
|
|
| 66 |
- } |
|
| 67 |
- |
|
| 68 |
- // --- |
|
| 69 |
- |
|
| 70 |
- bf = NewBFlags() |
|
| 71 |
- flStr1 = bf.AddString("str1", "HI")
|
|
| 72 |
- bf.Args = []string{"--str1"}
|
|
| 73 |
- |
|
| 74 |
- if err = bf.Parse(); err == nil {
|
|
| 75 |
- t.Fatalf("Test %q was supposed to fail", bf.Args)
|
|
| 76 |
- } |
|
| 77 |
- |
|
| 78 |
- // --- |
|
| 79 |
- |
|
| 80 |
- bf = NewBFlags() |
|
| 81 |
- flStr1 = bf.AddString("str1", "HI")
|
|
| 82 |
- bf.Args = []string{"--str1="}
|
|
| 83 |
- |
|
| 84 |
- if err = bf.Parse(); err != nil {
|
|
| 85 |
- t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 86 |
- } |
|
| 87 |
- |
|
| 88 |
- expected = "" |
|
| 89 |
- if flStr1.Value != expected {
|
|
| 90 |
- t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
|
|
| 91 |
- } |
|
| 92 |
- |
|
| 93 |
- // --- |
|
| 94 |
- |
|
| 95 |
- bf = NewBFlags() |
|
| 96 |
- flStr1 = bf.AddString("str1", "HI")
|
|
| 97 |
- bf.Args = []string{"--str1=BYE"}
|
|
| 98 |
- |
|
| 99 |
- if err = bf.Parse(); err != nil {
|
|
| 100 |
- t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 101 |
- } |
|
| 102 |
- |
|
| 103 |
- expected = "BYE" |
|
| 104 |
- if flStr1.Value != expected {
|
|
| 105 |
- t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
|
|
| 106 |
- } |
|
| 107 |
- |
|
| 108 |
- // --- |
|
| 109 |
- |
|
| 110 |
- bf = NewBFlags() |
|
| 111 |
- flBool1 = bf.AddBool("bool1", false)
|
|
| 112 |
- bf.Args = []string{"--bool1"}
|
|
| 113 |
- |
|
| 114 |
- if err = bf.Parse(); err != nil {
|
|
| 115 |
- t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 116 |
- } |
|
| 117 |
- |
|
| 118 |
- if !flBool1.IsTrue() {
|
|
| 119 |
- t.Fatalf("Test-b1 Bool1 was supposed to be true")
|
|
| 120 |
- } |
|
| 121 |
- |
|
| 122 |
- // --- |
|
| 123 |
- |
|
| 124 |
- bf = NewBFlags() |
|
| 125 |
- flBool1 = bf.AddBool("bool1", false)
|
|
| 126 |
- bf.Args = []string{"--bool1=true"}
|
|
| 127 |
- |
|
| 128 |
- if err = bf.Parse(); err != nil {
|
|
| 129 |
- t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 130 |
- } |
|
| 131 |
- |
|
| 132 |
- if !flBool1.IsTrue() {
|
|
| 133 |
- t.Fatalf("Test-b2 Bool1 was supposed to be true")
|
|
| 134 |
- } |
|
| 135 |
- |
|
| 136 |
- // --- |
|
| 137 |
- |
|
| 138 |
- bf = NewBFlags() |
|
| 139 |
- flBool1 = bf.AddBool("bool1", false)
|
|
| 140 |
- bf.Args = []string{"--bool1=false"}
|
|
| 141 |
- |
|
| 142 |
- if err = bf.Parse(); err != nil {
|
|
| 143 |
- t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 144 |
- } |
|
| 145 |
- |
|
| 146 |
- if flBool1.IsTrue() {
|
|
| 147 |
- t.Fatalf("Test-b3 Bool1 was supposed to be false")
|
|
| 148 |
- } |
|
| 149 |
- |
|
| 150 |
- // --- |
|
| 151 |
- |
|
| 152 |
- bf = NewBFlags() |
|
| 153 |
- flBool1 = bf.AddBool("bool1", false)
|
|
| 154 |
- bf.Args = []string{"--bool1=false1"}
|
|
| 155 |
- |
|
| 156 |
- if err = bf.Parse(); err == nil {
|
|
| 157 |
- t.Fatalf("Test %q was supposed to fail", bf.Args)
|
|
| 158 |
- } |
|
| 159 |
- |
|
| 160 |
- // --- |
|
| 161 |
- |
|
| 162 |
- bf = NewBFlags() |
|
| 163 |
- flBool1 = bf.AddBool("bool1", false)
|
|
| 164 |
- bf.Args = []string{"--bool2"}
|
|
| 165 |
- |
|
| 166 |
- if err = bf.Parse(); err == nil {
|
|
| 167 |
- t.Fatalf("Test %q was supposed to fail", bf.Args)
|
|
| 168 |
- } |
|
| 169 |
- |
|
| 170 |
- // --- |
|
| 171 |
- |
|
| 172 |
- bf = NewBFlags() |
|
| 173 |
- flStr1 = bf.AddString("str1", "HI")
|
|
| 174 |
- flBool1 = bf.AddBool("bool1", false)
|
|
| 175 |
- bf.Args = []string{"--bool1", "--str1=BYE"}
|
|
| 176 |
- |
|
| 177 |
- if err = bf.Parse(); err != nil {
|
|
| 178 |
- t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 179 |
- } |
|
| 180 |
- |
|
| 181 |
- if flStr1.Value != "BYE" {
|
|
| 182 |
- t.Fatalf("Teset %s, str1 should be BYE", bf.Args)
|
|
| 183 |
- } |
|
| 184 |
- if !flBool1.IsTrue() {
|
|
| 185 |
- t.Fatalf("Teset %s, bool1 should be true", bf.Args)
|
|
| 186 |
- } |
|
| 187 |
-} |
| 188 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,42 +0,0 @@ |
| 1 |
-// Package command contains the set of Dockerfile commands. |
|
| 2 |
-package command |
|
| 3 |
- |
|
| 4 |
-// Define constants for the command strings |
|
| 5 |
-const ( |
|
| 6 |
- Env = "env" |
|
| 7 |
- Label = "label" |
|
| 8 |
- Maintainer = "maintainer" |
|
| 9 |
- Add = "add" |
|
| 10 |
- Copy = "copy" |
|
| 11 |
- From = "from" |
|
| 12 |
- Onbuild = "onbuild" |
|
| 13 |
- Workdir = "workdir" |
|
| 14 |
- Run = "run" |
|
| 15 |
- Cmd = "cmd" |
|
| 16 |
- Entrypoint = "entrypoint" |
|
| 17 |
- Expose = "expose" |
|
| 18 |
- Volume = "volume" |
|
| 19 |
- User = "user" |
|
| 20 |
- StopSignal = "stopsignal" |
|
| 21 |
- Arg = "arg" |
|
| 22 |
-) |
|
| 23 |
- |
|
| 24 |
-// Commands is list of all Dockerfile commands |
|
| 25 |
-var Commands = map[string]struct{}{
|
|
| 26 |
- Env: {},
|
|
| 27 |
- Label: {},
|
|
| 28 |
- Maintainer: {},
|
|
| 29 |
- Add: {},
|
|
| 30 |
- Copy: {},
|
|
| 31 |
- From: {},
|
|
| 32 |
- Onbuild: {},
|
|
| 33 |
- Workdir: {},
|
|
| 34 |
- Run: {},
|
|
| 35 |
- Cmd: {},
|
|
| 36 |
- Entrypoint: {},
|
|
| 37 |
- Expose: {},
|
|
| 38 |
- Volume: {},
|
|
| 39 |
- User: {},
|
|
| 40 |
- StopSignal: {},
|
|
| 41 |
- Arg: {},
|
|
| 42 |
-} |
| 43 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,650 +0,0 @@ |
| 1 |
-package builder |
|
| 2 |
- |
|
| 3 |
-// This file contains the dispatchers for each command. Note that |
|
| 4 |
-// `nullDispatch` is not actually a command, but support for commands we parse |
|
| 5 |
-// but do nothing with. |
|
| 6 |
-// |
|
| 7 |
-// See evaluator.go for a higher level discussion of the whole evaluator |
|
| 8 |
-// package. |
|
| 9 |
- |
|
| 10 |
-import ( |
|
| 11 |
- "fmt" |
|
| 12 |
- "io/ioutil" |
|
| 13 |
- "os" |
|
| 14 |
- "path/filepath" |
|
| 15 |
- "regexp" |
|
| 16 |
- "runtime" |
|
| 17 |
- "sort" |
|
| 18 |
- "strings" |
|
| 19 |
- |
|
| 20 |
- "github.com/Sirupsen/logrus" |
|
| 21 |
- derr "github.com/docker/docker/errors" |
|
| 22 |
- flag "github.com/docker/docker/pkg/mflag" |
|
| 23 |
- "github.com/docker/docker/pkg/nat" |
|
| 24 |
- "github.com/docker/docker/pkg/signal" |
|
| 25 |
- "github.com/docker/docker/pkg/stringutils" |
|
| 26 |
- "github.com/docker/docker/pkg/system" |
|
| 27 |
- "github.com/docker/docker/runconfig" |
|
| 28 |
-) |
|
| 29 |
- |
|
| 30 |
-const ( |
|
| 31 |
- // NoBaseImageSpecifier is the symbol used by the FROM |
|
| 32 |
- // command to specify that no base image is to be used. |
|
| 33 |
- NoBaseImageSpecifier string = "scratch" |
|
| 34 |
-) |
|
| 35 |
- |
|
| 36 |
-// dispatch with no layer / parsing. This is effectively not a command. |
|
| 37 |
-func nullDispatch(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 38 |
- return nil |
|
| 39 |
-} |
|
| 40 |
- |
|
| 41 |
-// ENV foo bar |
|
| 42 |
-// |
|
| 43 |
-// Sets the environment variable foo to bar, also makes interpolation |
|
| 44 |
-// in the dockerfile available from the next statement on via ${foo}.
|
|
| 45 |
-// |
|
| 46 |
-func env(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 47 |
- if len(args) == 0 {
|
|
| 48 |
- return derr.ErrorCodeAtLeastOneArg.WithArgs("ENV")
|
|
| 49 |
- } |
|
| 50 |
- |
|
| 51 |
- if len(args)%2 != 0 {
|
|
| 52 |
- // should never get here, but just in case |
|
| 53 |
- return derr.ErrorCodeTooManyArgs.WithArgs("ENV")
|
|
| 54 |
- } |
|
| 55 |
- |
|
| 56 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 57 |
- return err |
|
| 58 |
- } |
|
| 59 |
- |
|
| 60 |
- // TODO/FIXME/NOT USED |
|
| 61 |
- // Just here to show how to use the builder flags stuff within the |
|
| 62 |
- // context of a builder command. Will remove once we actually add |
|
| 63 |
- // a builder command to something! |
|
| 64 |
- /* |
|
| 65 |
- flBool1 := b.BuilderFlags.AddBool("bool1", false)
|
|
| 66 |
- flStr1 := b.BuilderFlags.AddString("str1", "HI")
|
|
| 67 |
- |
|
| 68 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 69 |
- return err |
|
| 70 |
- } |
|
| 71 |
- |
|
| 72 |
- fmt.Printf("Bool1:%v\n", flBool1)
|
|
| 73 |
- fmt.Printf("Str1:%v\n", flStr1)
|
|
| 74 |
- */ |
|
| 75 |
- |
|
| 76 |
- commitStr := "ENV" |
|
| 77 |
- |
|
| 78 |
- for j := 0; j < len(args); j++ {
|
|
| 79 |
- // name ==> args[j] |
|
| 80 |
- // value ==> args[j+1] |
|
| 81 |
- newVar := args[j] + "=" + args[j+1] + "" |
|
| 82 |
- commitStr += " " + newVar |
|
| 83 |
- |
|
| 84 |
- gotOne := false |
|
| 85 |
- for i, envVar := range b.Config.Env {
|
|
| 86 |
- envParts := strings.SplitN(envVar, "=", 2) |
|
| 87 |
- if envParts[0] == args[j] {
|
|
| 88 |
- b.Config.Env[i] = newVar |
|
| 89 |
- gotOne = true |
|
| 90 |
- break |
|
| 91 |
- } |
|
| 92 |
- } |
|
| 93 |
- if !gotOne {
|
|
| 94 |
- b.Config.Env = append(b.Config.Env, newVar) |
|
| 95 |
- } |
|
| 96 |
- j++ |
|
| 97 |
- } |
|
| 98 |
- |
|
| 99 |
- return b.commit("", b.Config.Cmd, commitStr)
|
|
| 100 |
-} |
|
| 101 |
- |
|
| 102 |
-// MAINTAINER some text <maybe@an.email.address> |
|
| 103 |
-// |
|
| 104 |
-// Sets the maintainer metadata. |
|
| 105 |
-func maintainer(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 106 |
- if len(args) != 1 {
|
|
| 107 |
- return derr.ErrorCodeExactlyOneArg.WithArgs("MAINTAINER")
|
|
| 108 |
- } |
|
| 109 |
- |
|
| 110 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 111 |
- return err |
|
| 112 |
- } |
|
| 113 |
- |
|
| 114 |
- b.maintainer = args[0] |
|
| 115 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
|
|
| 116 |
-} |
|
| 117 |
- |
|
| 118 |
-// LABEL some json data describing the image |
|
| 119 |
-// |
|
| 120 |
-// Sets the Label variable foo to bar, |
|
| 121 |
-// |
|
| 122 |
-func label(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 123 |
- if len(args) == 0 {
|
|
| 124 |
- return derr.ErrorCodeAtLeastOneArg.WithArgs("LABEL")
|
|
| 125 |
- } |
|
| 126 |
- if len(args)%2 != 0 {
|
|
| 127 |
- // should never get here, but just in case |
|
| 128 |
- return derr.ErrorCodeTooManyArgs.WithArgs("LABEL")
|
|
| 129 |
- } |
|
| 130 |
- |
|
| 131 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 132 |
- return err |
|
| 133 |
- } |
|
| 134 |
- |
|
| 135 |
- commitStr := "LABEL" |
|
| 136 |
- |
|
| 137 |
- if b.Config.Labels == nil {
|
|
| 138 |
- b.Config.Labels = map[string]string{}
|
|
| 139 |
- } |
|
| 140 |
- |
|
| 141 |
- for j := 0; j < len(args); j++ {
|
|
| 142 |
- // name ==> args[j] |
|
| 143 |
- // value ==> args[j+1] |
|
| 144 |
- newVar := args[j] + "=" + args[j+1] + "" |
|
| 145 |
- commitStr += " " + newVar |
|
| 146 |
- |
|
| 147 |
- b.Config.Labels[args[j]] = args[j+1] |
|
| 148 |
- j++ |
|
| 149 |
- } |
|
| 150 |
- return b.commit("", b.Config.Cmd, commitStr)
|
|
| 151 |
-} |
|
| 152 |
- |
|
| 153 |
-// ADD foo /path |
|
| 154 |
-// |
|
| 155 |
-// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling |
|
| 156 |
-// exist here. If you do not wish to have this automatic handling, use COPY. |
|
| 157 |
-// |
|
| 158 |
-func add(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 159 |
- if len(args) < 2 {
|
|
| 160 |
- return derr.ErrorCodeAtLeastTwoArgs.WithArgs("ADD")
|
|
| 161 |
- } |
|
| 162 |
- |
|
| 163 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 164 |
- return err |
|
| 165 |
- } |
|
| 166 |
- |
|
| 167 |
- return b.runContextCommand(args, true, true, "ADD") |
|
| 168 |
-} |
|
| 169 |
- |
|
| 170 |
-// COPY foo /path |
|
| 171 |
-// |
|
| 172 |
-// Same as 'ADD' but without the tar and remote url handling. |
|
| 173 |
-// |
|
| 174 |
-func dispatchCopy(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 175 |
- if len(args) < 2 {
|
|
| 176 |
- return derr.ErrorCodeAtLeastTwoArgs.WithArgs("COPY")
|
|
| 177 |
- } |
|
| 178 |
- |
|
| 179 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 180 |
- return err |
|
| 181 |
- } |
|
| 182 |
- |
|
| 183 |
- return b.runContextCommand(args, false, false, "COPY") |
|
| 184 |
-} |
|
| 185 |
- |
|
| 186 |
-// FROM imagename |
|
| 187 |
-// |
|
| 188 |
-// This sets the image the dockerfile will build on top of. |
|
| 189 |
-// |
|
| 190 |
-func from(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 191 |
- if len(args) != 1 {
|
|
| 192 |
- return derr.ErrorCodeExactlyOneArg.WithArgs("FROM")
|
|
| 193 |
- } |
|
| 194 |
- |
|
| 195 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 196 |
- return err |
|
| 197 |
- } |
|
| 198 |
- |
|
| 199 |
- name := args[0] |
|
| 200 |
- |
|
| 201 |
- // Windows cannot support a container with no base image. |
|
| 202 |
- if name == NoBaseImageSpecifier {
|
|
| 203 |
- if runtime.GOOS == "windows" {
|
|
| 204 |
- return fmt.Errorf("Windows does not support FROM scratch")
|
|
| 205 |
- } |
|
| 206 |
- b.image = "" |
|
| 207 |
- b.noBaseImage = true |
|
| 208 |
- return nil |
|
| 209 |
- } |
|
| 210 |
- |
|
| 211 |
- image, err := b.Daemon.Repositories().LookupImage(name) |
|
| 212 |
- if b.Pull {
|
|
| 213 |
- image, err = b.pullImage(name) |
|
| 214 |
- if err != nil {
|
|
| 215 |
- return err |
|
| 216 |
- } |
|
| 217 |
- } |
|
| 218 |
- if err != nil {
|
|
| 219 |
- if b.Daemon.Graph().IsNotExist(err, name) {
|
|
| 220 |
- image, err = b.pullImage(name) |
|
| 221 |
- } |
|
| 222 |
- |
|
| 223 |
- // note that the top level err will still be !nil here if IsNotExist is |
|
| 224 |
- // not the error. This approach just simplifies the logic a bit. |
|
| 225 |
- if err != nil {
|
|
| 226 |
- return err |
|
| 227 |
- } |
|
| 228 |
- } |
|
| 229 |
- |
|
| 230 |
- return b.processImageFrom(image) |
|
| 231 |
-} |
|
| 232 |
- |
|
| 233 |
-// ONBUILD RUN echo yo |
|
| 234 |
-// |
|
| 235 |
-// ONBUILD triggers run when the image is used in a FROM statement. |
|
| 236 |
-// |
|
| 237 |
-// ONBUILD handling has a lot of special-case functionality, the heading in |
|
| 238 |
-// evaluator.go and comments around dispatch() in the same file explain the |
|
| 239 |
-// special cases. search for 'OnBuild' in internals.go for additional special |
|
| 240 |
-// cases. |
|
| 241 |
-// |
|
| 242 |
-func onbuild(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 243 |
- if len(args) == 0 {
|
|
| 244 |
- return derr.ErrorCodeAtLeastOneArg.WithArgs("ONBUILD")
|
|
| 245 |
- } |
|
| 246 |
- |
|
| 247 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 248 |
- return err |
|
| 249 |
- } |
|
| 250 |
- |
|
| 251 |
- triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) |
|
| 252 |
- switch triggerInstruction {
|
|
| 253 |
- case "ONBUILD": |
|
| 254 |
- return derr.ErrorCodeChainOnBuild |
|
| 255 |
- case "MAINTAINER", "FROM": |
|
| 256 |
- return derr.ErrorCodeBadOnBuildCmd.WithArgs(triggerInstruction) |
|
| 257 |
- } |
|
| 258 |
- |
|
| 259 |
- original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") |
|
| 260 |
- |
|
| 261 |
- b.Config.OnBuild = append(b.Config.OnBuild, original) |
|
| 262 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
|
|
| 263 |
-} |
|
| 264 |
- |
|
| 265 |
-// WORKDIR /tmp |
|
| 266 |
-// |
|
| 267 |
-// Set the working directory for future RUN/CMD/etc statements. |
|
| 268 |
-// |
|
| 269 |
-func workdir(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 270 |
- if len(args) != 1 {
|
|
| 271 |
- return derr.ErrorCodeExactlyOneArg.WithArgs("WORKDIR")
|
|
| 272 |
- } |
|
| 273 |
- |
|
| 274 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 275 |
- return err |
|
| 276 |
- } |
|
| 277 |
- |
|
| 278 |
- // This is from the Dockerfile and will not necessarily be in platform |
|
| 279 |
- // specific semantics, hence ensure it is converted. |
|
| 280 |
- workdir := filepath.FromSlash(args[0]) |
|
| 281 |
- |
|
| 282 |
- if !system.IsAbs(workdir) {
|
|
| 283 |
- current := filepath.FromSlash(b.Config.WorkingDir) |
|
| 284 |
- workdir = filepath.Join(string(os.PathSeparator), current, workdir) |
|
| 285 |
- } |
|
| 286 |
- |
|
| 287 |
- b.Config.WorkingDir = workdir |
|
| 288 |
- |
|
| 289 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
|
| 290 |
-} |
|
| 291 |
- |
|
| 292 |
-// RUN some command yo |
|
| 293 |
-// |
|
| 294 |
-// run a command and commit the image. Args are automatically prepended with |
|
| 295 |
-// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is |
|
| 296 |
-// only one argument. The difference in processing: |
|
| 297 |
-// |
|
| 298 |
-// RUN echo hi # sh -c echo hi (Linux) |
|
| 299 |
-// RUN echo hi # cmd /S /C echo hi (Windows) |
|
| 300 |
-// RUN [ "echo", "hi" ] # echo hi |
|
| 301 |
-// |
|
| 302 |
-func run(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 303 |
- if b.image == "" && !b.noBaseImage {
|
|
| 304 |
- return derr.ErrorCodeMissingFrom |
|
| 305 |
- } |
|
| 306 |
- |
|
| 307 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 308 |
- return err |
|
| 309 |
- } |
|
| 310 |
- |
|
| 311 |
- args = handleJSONArgs(args, attributes) |
|
| 312 |
- |
|
| 313 |
- if !attributes["json"] {
|
|
| 314 |
- if runtime.GOOS != "windows" {
|
|
| 315 |
- args = append([]string{"/bin/sh", "-c"}, args...)
|
|
| 316 |
- } else {
|
|
| 317 |
- args = append([]string{"cmd", "/S", "/C"}, args...)
|
|
| 318 |
- } |
|
| 319 |
- } |
|
| 320 |
- |
|
| 321 |
- runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
|
|
| 322 |
- runCmd.SetOutput(ioutil.Discard) |
|
| 323 |
- runCmd.Usage = nil |
|
| 324 |
- |
|
| 325 |
- config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...))
|
|
| 326 |
- if err != nil {
|
|
| 327 |
- return err |
|
| 328 |
- } |
|
| 329 |
- |
|
| 330 |
- // stash the cmd |
|
| 331 |
- cmd := b.Config.Cmd |
|
| 332 |
- runconfig.Merge(b.Config, config) |
|
| 333 |
- // stash the config environment |
|
| 334 |
- env := b.Config.Env |
|
| 335 |
- |
|
| 336 |
- defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
|
| 337 |
- defer func(env []string) { b.Config.Env = env }(env)
|
|
| 338 |
- |
|
| 339 |
- // derive the net build-time environment for this run. We let config |
|
| 340 |
- // environment override the build time environment. |
|
| 341 |
- // This means that we take the b.buildArgs list of env vars and remove |
|
| 342 |
- // any of those variables that are defined as part of the container. In other |
|
| 343 |
- // words, anything in b.Config.Env. What's left is the list of build-time env |
|
| 344 |
- // vars that we need to add to each RUN command - note the list could be empty. |
|
| 345 |
- // |
|
| 346 |
- // We don't persist the build time environment with container's config |
|
| 347 |
- // environment, but just sort and prepend it to the command string at time |
|
| 348 |
- // of commit. |
|
| 349 |
- // This helps with tracing back the image's actual environment at the time |
|
| 350 |
- // of RUN, without leaking it to the final image. It also aids cache |
|
| 351 |
- // lookup for same image built with same build time environment. |
|
| 352 |
- cmdBuildEnv := []string{}
|
|
| 353 |
- configEnv := runconfig.ConvertKVStringsToMap(b.Config.Env) |
|
| 354 |
- for key, val := range b.buildArgs {
|
|
| 355 |
- if !b.isBuildArgAllowed(key) {
|
|
| 356 |
- // skip build-args that are not in allowed list, meaning they have |
|
| 357 |
- // not been defined by an "ARG" Dockerfile command yet. |
|
| 358 |
- // This is an error condition but only if there is no "ARG" in the entire |
|
| 359 |
- // Dockerfile, so we'll generate any necessary errors after we parsed |
|
| 360 |
- // the entire file (see 'leftoverArgs' processing in evaluator.go ) |
|
| 361 |
- continue |
|
| 362 |
- } |
|
| 363 |
- if _, ok := configEnv[key]; !ok {
|
|
| 364 |
- cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, val))
|
|
| 365 |
- } |
|
| 366 |
- } |
|
| 367 |
- |
|
| 368 |
- // derive the command to use for probeCache() and to commit in this container. |
|
| 369 |
- // Note that we only do this if there are any build-time env vars. Also, we |
|
| 370 |
- // use the special argument "|#" at the start of the args array. This will |
|
| 371 |
- // avoid conflicts with any RUN command since commands can not |
|
| 372 |
- // start with | (vertical bar). The "#" (number of build envs) is there to |
|
| 373 |
- // help ensure proper cache matches. We don't want a RUN command |
|
| 374 |
- // that starts with "foo=abc" to be considered part of a build-time env var. |
|
| 375 |
- saveCmd := config.Cmd |
|
| 376 |
- if len(cmdBuildEnv) > 0 {
|
|
| 377 |
- sort.Strings(cmdBuildEnv) |
|
| 378 |
- tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...)
|
|
| 379 |
- saveCmd = stringutils.NewStrSlice(append(tmpEnv, saveCmd.Slice()...)...) |
|
| 380 |
- } |
|
| 381 |
- |
|
| 382 |
- b.Config.Cmd = saveCmd |
|
| 383 |
- hit, err := b.probeCache() |
|
| 384 |
- if err != nil {
|
|
| 385 |
- return err |
|
| 386 |
- } |
|
| 387 |
- if hit {
|
|
| 388 |
- return nil |
|
| 389 |
- } |
|
| 390 |
- |
|
| 391 |
- // set Cmd manually, this is special case only for Dockerfiles |
|
| 392 |
- b.Config.Cmd = config.Cmd |
|
| 393 |
- // set build-time environment for 'run'. |
|
| 394 |
- b.Config.Env = append(b.Config.Env, cmdBuildEnv...) |
|
| 395 |
- |
|
| 396 |
- logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)
|
|
| 397 |
- |
|
| 398 |
- c, err := b.create() |
|
| 399 |
- if err != nil {
|
|
| 400 |
- return err |
|
| 401 |
- } |
|
| 402 |
- |
|
| 403 |
- // Ensure that we keep the container mounted until the commit |
|
| 404 |
- // to avoid unmounting and then mounting directly again |
|
| 405 |
- c.Mount() |
|
| 406 |
- defer c.Unmount() |
|
| 407 |
- |
|
| 408 |
- err = b.run(c) |
|
| 409 |
- if err != nil {
|
|
| 410 |
- return err |
|
| 411 |
- } |
|
| 412 |
- |
|
| 413 |
- // revert to original config environment and set the command string to |
|
| 414 |
- // have the build-time env vars in it (if any) so that future cache look-ups |
|
| 415 |
- // properly match it. |
|
| 416 |
- b.Config.Env = env |
|
| 417 |
- b.Config.Cmd = saveCmd |
|
| 418 |
- if err := b.commit(c.ID, cmd, "run"); err != nil {
|
|
| 419 |
- return err |
|
| 420 |
- } |
|
| 421 |
- |
|
| 422 |
- return nil |
|
| 423 |
-} |
|
| 424 |
- |
|
| 425 |
-// CMD foo |
|
| 426 |
-// |
|
| 427 |
-// Set the default command to run in the container (which may be empty). |
|
| 428 |
-// Argument handling is the same as RUN. |
|
| 429 |
-// |
|
| 430 |
-func cmd(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 431 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 432 |
- return err |
|
| 433 |
- } |
|
| 434 |
- |
|
| 435 |
- cmdSlice := handleJSONArgs(args, attributes) |
|
| 436 |
- |
|
| 437 |
- if !attributes["json"] {
|
|
| 438 |
- if runtime.GOOS != "windows" {
|
|
| 439 |
- cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...)
|
|
| 440 |
- } else {
|
|
| 441 |
- cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...)
|
|
| 442 |
- } |
|
| 443 |
- } |
|
| 444 |
- |
|
| 445 |
- b.Config.Cmd = stringutils.NewStrSlice(cmdSlice...) |
|
| 446 |
- |
|
| 447 |
- if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
|
|
| 448 |
- return err |
|
| 449 |
- } |
|
| 450 |
- |
|
| 451 |
- if len(args) != 0 {
|
|
| 452 |
- b.cmdSet = true |
|
| 453 |
- } |
|
| 454 |
- |
|
| 455 |
- return nil |
|
| 456 |
-} |
|
| 457 |
- |
|
| 458 |
-// ENTRYPOINT /usr/sbin/nginx |
|
| 459 |
-// |
|
| 460 |
-// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to |
|
| 461 |
-// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx. |
|
| 462 |
-// |
|
| 463 |
-// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint |
|
| 464 |
-// is initialized at NewBuilder time instead of through argument parsing. |
|
| 465 |
-// |
|
| 466 |
-func entrypoint(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 467 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 468 |
- return err |
|
| 469 |
- } |
|
| 470 |
- |
|
| 471 |
- parsed := handleJSONArgs(args, attributes) |
|
| 472 |
- |
|
| 473 |
- switch {
|
|
| 474 |
- case attributes["json"]: |
|
| 475 |
- // ENTRYPOINT ["echo", "hi"] |
|
| 476 |
- b.Config.Entrypoint = stringutils.NewStrSlice(parsed...) |
|
| 477 |
- case len(parsed) == 0: |
|
| 478 |
- // ENTRYPOINT [] |
|
| 479 |
- b.Config.Entrypoint = nil |
|
| 480 |
- default: |
|
| 481 |
- // ENTRYPOINT echo hi |
|
| 482 |
- if runtime.GOOS != "windows" {
|
|
| 483 |
- b.Config.Entrypoint = stringutils.NewStrSlice("/bin/sh", "-c", parsed[0])
|
|
| 484 |
- } else {
|
|
| 485 |
- b.Config.Entrypoint = stringutils.NewStrSlice("cmd", "/S", "/C", parsed[0])
|
|
| 486 |
- } |
|
| 487 |
- } |
|
| 488 |
- |
|
| 489 |
- // when setting the entrypoint if a CMD was not explicitly set then |
|
| 490 |
- // set the command to nil |
|
| 491 |
- if !b.cmdSet {
|
|
| 492 |
- b.Config.Cmd = nil |
|
| 493 |
- } |
|
| 494 |
- |
|
| 495 |
- if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
|
|
| 496 |
- return err |
|
| 497 |
- } |
|
| 498 |
- |
|
| 499 |
- return nil |
|
| 500 |
-} |
|
| 501 |
- |
|
| 502 |
-// EXPOSE 6667/tcp 7000/tcp |
|
| 503 |
-// |
|
| 504 |
-// Expose ports for links and port mappings. This all ends up in |
|
| 505 |
-// b.Config.ExposedPorts for runconfig. |
|
| 506 |
-// |
|
| 507 |
-func expose(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 508 |
- portsTab := args |
|
| 509 |
- |
|
| 510 |
- if len(args) == 0 {
|
|
| 511 |
- return derr.ErrorCodeAtLeastOneArg.WithArgs("EXPOSE")
|
|
| 512 |
- } |
|
| 513 |
- |
|
| 514 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 515 |
- return err |
|
| 516 |
- } |
|
| 517 |
- |
|
| 518 |
- if b.Config.ExposedPorts == nil {
|
|
| 519 |
- b.Config.ExposedPorts = make(nat.PortSet) |
|
| 520 |
- } |
|
| 521 |
- |
|
| 522 |
- ports, _, err := nat.ParsePortSpecs(portsTab) |
|
| 523 |
- if err != nil {
|
|
| 524 |
- return err |
|
| 525 |
- } |
|
| 526 |
- |
|
| 527 |
- // instead of using ports directly, we build a list of ports and sort it so |
|
| 528 |
- // the order is consistent. This prevents cache burst where map ordering |
|
| 529 |
- // changes between builds |
|
| 530 |
- portList := make([]string, len(ports)) |
|
| 531 |
- var i int |
|
| 532 |
- for port := range ports {
|
|
| 533 |
- if _, exists := b.Config.ExposedPorts[port]; !exists {
|
|
| 534 |
- b.Config.ExposedPorts[port] = struct{}{}
|
|
| 535 |
- } |
|
| 536 |
- portList[i] = string(port) |
|
| 537 |
- i++ |
|
| 538 |
- } |
|
| 539 |
- sort.Strings(portList) |
|
| 540 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
|
|
| 541 |
-} |
|
| 542 |
- |
|
| 543 |
-// USER foo |
|
| 544 |
-// |
|
| 545 |
-// Set the user to 'foo' for future commands and when running the |
|
| 546 |
-// ENTRYPOINT/CMD at container run time. |
|
| 547 |
-// |
|
| 548 |
-func user(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 549 |
- if len(args) != 1 {
|
|
| 550 |
- return derr.ErrorCodeExactlyOneArg.WithArgs("USER")
|
|
| 551 |
- } |
|
| 552 |
- |
|
| 553 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 554 |
- return err |
|
| 555 |
- } |
|
| 556 |
- |
|
| 557 |
- b.Config.User = args[0] |
|
| 558 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
|
|
| 559 |
-} |
|
| 560 |
- |
|
| 561 |
-// VOLUME /foo |
|
| 562 |
-// |
|
| 563 |
-// Expose the volume /foo for use. Will also accept the JSON array form. |
|
| 564 |
-// |
|
| 565 |
-func volume(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 566 |
- if len(args) == 0 {
|
|
| 567 |
- return derr.ErrorCodeAtLeastOneArg.WithArgs("VOLUME")
|
|
| 568 |
- } |
|
| 569 |
- |
|
| 570 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 571 |
- return err |
|
| 572 |
- } |
|
| 573 |
- |
|
| 574 |
- if b.Config.Volumes == nil {
|
|
| 575 |
- b.Config.Volumes = map[string]struct{}{}
|
|
| 576 |
- } |
|
| 577 |
- for _, v := range args {
|
|
| 578 |
- v = strings.TrimSpace(v) |
|
| 579 |
- if v == "" {
|
|
| 580 |
- return derr.ErrorCodeVolumeEmpty |
|
| 581 |
- } |
|
| 582 |
- b.Config.Volumes[v] = struct{}{}
|
|
| 583 |
- } |
|
| 584 |
- if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
|
|
| 585 |
- return err |
|
| 586 |
- } |
|
| 587 |
- return nil |
|
| 588 |
-} |
|
| 589 |
- |
|
| 590 |
-// STOPSIGNAL signal |
|
| 591 |
-// |
|
| 592 |
-// Set the signal that will be used to kill the container. |
|
| 593 |
-func stopSignal(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 594 |
- if len(args) != 1 {
|
|
| 595 |
- return fmt.Errorf("STOPSIGNAL requires exactly one argument")
|
|
| 596 |
- } |
|
| 597 |
- |
|
| 598 |
- sig := args[0] |
|
| 599 |
- _, err := signal.ParseSignal(sig) |
|
| 600 |
- if err != nil {
|
|
| 601 |
- return err |
|
| 602 |
- } |
|
| 603 |
- |
|
| 604 |
- b.Config.StopSignal = sig |
|
| 605 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
|
|
| 606 |
-} |
|
| 607 |
- |
|
| 608 |
-// ARG name[=value] |
|
| 609 |
-// |
|
| 610 |
-// Adds the variable foo to the trusted list of variables that can be passed |
|
| 611 |
-// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. |
|
| 612 |
-// Dockerfile author may optionally set a default value of this variable. |
|
| 613 |
-func arg(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 614 |
- if len(args) != 1 {
|
|
| 615 |
- return fmt.Errorf("ARG requires exactly one argument definition")
|
|
| 616 |
- } |
|
| 617 |
- |
|
| 618 |
- var ( |
|
| 619 |
- name string |
|
| 620 |
- value string |
|
| 621 |
- hasDefault bool |
|
| 622 |
- ) |
|
| 623 |
- |
|
| 624 |
- arg := args[0] |
|
| 625 |
- // 'arg' can just be a name or name-value pair. Note that this is different |
|
| 626 |
- // from 'env' that handles the split of name and value at the parser level. |
|
| 627 |
- // The reason for doing it differently for 'arg' is that we support just |
|
| 628 |
- // defining an arg and not assign it a value (while 'env' always expects a |
|
| 629 |
- // name-value pair). If possible, it will be good to harmonize the two. |
|
| 630 |
- if strings.Contains(arg, "=") {
|
|
| 631 |
- parts := strings.SplitN(arg, "=", 2) |
|
| 632 |
- name = parts[0] |
|
| 633 |
- value = parts[1] |
|
| 634 |
- hasDefault = true |
|
| 635 |
- } else {
|
|
| 636 |
- name = arg |
|
| 637 |
- hasDefault = false |
|
| 638 |
- } |
|
| 639 |
- // add the arg to allowed list of build-time args from this step on. |
|
| 640 |
- b.allowedBuildArgs[name] = true |
|
| 641 |
- |
|
| 642 |
- // If there is a default value associated with this arg then add it to the |
|
| 643 |
- // b.buildArgs if one is not already passed to the builder. The args passed |
|
| 644 |
- // to builder override the defaut value of 'arg'. |
|
| 645 |
- if _, ok := b.buildArgs[name]; !ok && hasDefault {
|
|
| 646 |
- b.buildArgs[name] = value |
|
| 647 |
- } |
|
| 648 |
- |
|
| 649 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("ARG %s", arg))
|
|
| 650 |
-} |
| 651 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,176 @@ |
| 0 |
+package dockerfile |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "strings" |
|
| 5 |
+) |
|
| 6 |
+ |
|
| 7 |
+// FlagType is the type of the build flag |
|
| 8 |
+type FlagType int |
|
| 9 |
+ |
|
| 10 |
+const ( |
|
| 11 |
+ boolType FlagType = iota |
|
| 12 |
+ stringType |
|
| 13 |
+) |
|
| 14 |
+ |
|
| 15 |
+// BFlags contains all flags information for the builder |
|
| 16 |
+type BFlags struct {
|
|
| 17 |
+ Args []string // actual flags/args from cmd line |
|
| 18 |
+ flags map[string]*Flag |
|
| 19 |
+ used map[string]*Flag |
|
| 20 |
+ Err error |
|
| 21 |
+} |
|
| 22 |
+ |
|
| 23 |
+// Flag contains all information for a flag |
|
| 24 |
+type Flag struct {
|
|
| 25 |
+ bf *BFlags |
|
| 26 |
+ name string |
|
| 27 |
+ flagType FlagType |
|
| 28 |
+ Value string |
|
| 29 |
+} |
|
| 30 |
+ |
|
| 31 |
+// NewBFlags return the new BFlags struct |
|
| 32 |
+func NewBFlags() *BFlags {
|
|
| 33 |
+ return &BFlags{
|
|
| 34 |
+ flags: make(map[string]*Flag), |
|
| 35 |
+ used: make(map[string]*Flag), |
|
| 36 |
+ } |
|
| 37 |
+} |
|
| 38 |
+ |
|
| 39 |
+// AddBool adds a bool flag to BFlags |
|
| 40 |
+// Note, any error will be generated when Parse() is called (see Parse). |
|
| 41 |
+func (bf *BFlags) AddBool(name string, def bool) *Flag {
|
|
| 42 |
+ flag := bf.addFlag(name, boolType) |
|
| 43 |
+ if flag == nil {
|
|
| 44 |
+ return nil |
|
| 45 |
+ } |
|
| 46 |
+ if def {
|
|
| 47 |
+ flag.Value = "true" |
|
| 48 |
+ } else {
|
|
| 49 |
+ flag.Value = "false" |
|
| 50 |
+ } |
|
| 51 |
+ return flag |
|
| 52 |
+} |
|
| 53 |
+ |
|
| 54 |
+// AddString adds a string flag to BFlags |
|
| 55 |
+// Note, any error will be generated when Parse() is called (see Parse). |
|
| 56 |
+func (bf *BFlags) AddString(name string, def string) *Flag {
|
|
| 57 |
+ flag := bf.addFlag(name, stringType) |
|
| 58 |
+ if flag == nil {
|
|
| 59 |
+ return nil |
|
| 60 |
+ } |
|
| 61 |
+ flag.Value = def |
|
| 62 |
+ return flag |
|
| 63 |
+} |
|
| 64 |
+ |
|
| 65 |
+// addFlag is a generic func used by the other AddXXX() func |
|
| 66 |
+// to add a new flag to the BFlags struct. |
|
| 67 |
+// Note, any error will be generated when Parse() is called (see Parse). |
|
| 68 |
+func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag {
|
|
| 69 |
+ if _, ok := bf.flags[name]; ok {
|
|
| 70 |
+ bf.Err = fmt.Errorf("Duplicate flag defined: %s", name)
|
|
| 71 |
+ return nil |
|
| 72 |
+ } |
|
| 73 |
+ |
|
| 74 |
+ newFlag := &Flag{
|
|
| 75 |
+ bf: bf, |
|
| 76 |
+ name: name, |
|
| 77 |
+ flagType: flagType, |
|
| 78 |
+ } |
|
| 79 |
+ bf.flags[name] = newFlag |
|
| 80 |
+ |
|
| 81 |
+ return newFlag |
|
| 82 |
+} |
|
| 83 |
+ |
|
| 84 |
+// IsUsed checks if the flag is used |
|
| 85 |
+func (fl *Flag) IsUsed() bool {
|
|
| 86 |
+ if _, ok := fl.bf.used[fl.name]; ok {
|
|
| 87 |
+ return true |
|
| 88 |
+ } |
|
| 89 |
+ return false |
|
| 90 |
+} |
|
| 91 |
+ |
|
| 92 |
+// IsTrue checks if a bool flag is true |
|
| 93 |
+func (fl *Flag) IsTrue() bool {
|
|
| 94 |
+ if fl.flagType != boolType {
|
|
| 95 |
+ // Should never get here |
|
| 96 |
+ panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name))
|
|
| 97 |
+ } |
|
| 98 |
+ return fl.Value == "true" |
|
| 99 |
+} |
|
| 100 |
+ |
|
| 101 |
+// Parse parses and checks if the BFlags is valid. |
|
| 102 |
+// Any error noticed during the AddXXX() funcs will be generated/returned |
|
| 103 |
+// here. We do this because an error during AddXXX() is more like a |
|
| 104 |
+// compile time error so it doesn't matter too much when we stop our |
|
| 105 |
+// processing as long as we do stop it, so this allows the code |
|
| 106 |
+// around AddXXX() to be just: |
|
| 107 |
+// defFlag := AddString("desription", "")
|
|
| 108 |
+// w/o needing to add an if-statement around each one. |
|
| 109 |
+func (bf *BFlags) Parse() error {
|
|
| 110 |
+ // If there was an error while defining the possible flags |
|
| 111 |
+ // go ahead and bubble it back up here since we didn't do it |
|
| 112 |
+ // earlier in the processing |
|
| 113 |
+ if bf.Err != nil {
|
|
| 114 |
+ return fmt.Errorf("Error setting up flags: %s", bf.Err)
|
|
| 115 |
+ } |
|
| 116 |
+ |
|
| 117 |
+ for _, arg := range bf.Args {
|
|
| 118 |
+ if !strings.HasPrefix(arg, "--") {
|
|
| 119 |
+ return fmt.Errorf("Arg should start with -- : %s", arg)
|
|
| 120 |
+ } |
|
| 121 |
+ |
|
| 122 |
+ if arg == "--" {
|
|
| 123 |
+ return nil |
|
| 124 |
+ } |
|
| 125 |
+ |
|
| 126 |
+ arg = arg[2:] |
|
| 127 |
+ value := "" |
|
| 128 |
+ |
|
| 129 |
+ index := strings.Index(arg, "=") |
|
| 130 |
+ if index >= 0 {
|
|
| 131 |
+ value = arg[index+1:] |
|
| 132 |
+ arg = arg[:index] |
|
| 133 |
+ } |
|
| 134 |
+ |
|
| 135 |
+ flag, ok := bf.flags[arg] |
|
| 136 |
+ if !ok {
|
|
| 137 |
+ return fmt.Errorf("Unknown flag: %s", arg)
|
|
| 138 |
+ } |
|
| 139 |
+ |
|
| 140 |
+ if _, ok = bf.used[arg]; ok {
|
|
| 141 |
+ return fmt.Errorf("Duplicate flag specified: %s", arg)
|
|
| 142 |
+ } |
|
| 143 |
+ |
|
| 144 |
+ bf.used[arg] = flag |
|
| 145 |
+ |
|
| 146 |
+ switch flag.flagType {
|
|
| 147 |
+ case boolType: |
|
| 148 |
+ // value == "" is only ok if no "=" was specified |
|
| 149 |
+ if index >= 0 && value == "" {
|
|
| 150 |
+ return fmt.Errorf("Missing a value on flag: %s", arg)
|
|
| 151 |
+ } |
|
| 152 |
+ |
|
| 153 |
+ lower := strings.ToLower(value) |
|
| 154 |
+ if lower == "" {
|
|
| 155 |
+ flag.Value = "true" |
|
| 156 |
+ } else if lower == "true" || lower == "false" {
|
|
| 157 |
+ flag.Value = lower |
|
| 158 |
+ } else {
|
|
| 159 |
+ return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value)
|
|
| 160 |
+ } |
|
| 161 |
+ |
|
| 162 |
+ case stringType: |
|
| 163 |
+ if index < 0 {
|
|
| 164 |
+ return fmt.Errorf("Missing a value on flag: %s", arg)
|
|
| 165 |
+ } |
|
| 166 |
+ flag.Value = value |
|
| 167 |
+ |
|
| 168 |
+ default: |
|
| 169 |
+ panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!"))
|
|
| 170 |
+ } |
|
| 171 |
+ |
|
| 172 |
+ } |
|
| 173 |
+ |
|
| 174 |
+ return nil |
|
| 175 |
+} |
| 0 | 176 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,187 @@ |
| 0 |
+package dockerfile |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "testing" |
|
| 4 |
+) |
|
| 5 |
+ |
|
| 6 |
+func TestBuilderFlags(t *testing.T) {
|
|
| 7 |
+ var expected string |
|
| 8 |
+ var err error |
|
| 9 |
+ |
|
| 10 |
+ // --- |
|
| 11 |
+ |
|
| 12 |
+ bf := NewBFlags() |
|
| 13 |
+ bf.Args = []string{}
|
|
| 14 |
+ if err := bf.Parse(); err != nil {
|
|
| 15 |
+ t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err)
|
|
| 16 |
+ } |
|
| 17 |
+ |
|
| 18 |
+ // --- |
|
| 19 |
+ |
|
| 20 |
+ bf = NewBFlags() |
|
| 21 |
+ bf.Args = []string{"--"}
|
|
| 22 |
+ if err := bf.Parse(); err != nil {
|
|
| 23 |
+ t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err)
|
|
| 24 |
+ } |
|
| 25 |
+ |
|
| 26 |
+ // --- |
|
| 27 |
+ |
|
| 28 |
+ bf = NewBFlags() |
|
| 29 |
+ flStr1 := bf.AddString("str1", "")
|
|
| 30 |
+ flBool1 := bf.AddBool("bool1", false)
|
|
| 31 |
+ bf.Args = []string{}
|
|
| 32 |
+ if err = bf.Parse(); err != nil {
|
|
| 33 |
+ t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
|
|
| 34 |
+ } |
|
| 35 |
+ |
|
| 36 |
+ if flStr1.IsUsed() == true {
|
|
| 37 |
+ t.Fatalf("Test3 - str1 was not used!")
|
|
| 38 |
+ } |
|
| 39 |
+ if flBool1.IsUsed() == true {
|
|
| 40 |
+ t.Fatalf("Test3 - bool1 was not used!")
|
|
| 41 |
+ } |
|
| 42 |
+ |
|
| 43 |
+ // --- |
|
| 44 |
+ |
|
| 45 |
+ bf = NewBFlags() |
|
| 46 |
+ flStr1 = bf.AddString("str1", "HI")
|
|
| 47 |
+ flBool1 = bf.AddBool("bool1", false)
|
|
| 48 |
+ bf.Args = []string{}
|
|
| 49 |
+ |
|
| 50 |
+ if err = bf.Parse(); err != nil {
|
|
| 51 |
+ t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err)
|
|
| 52 |
+ } |
|
| 53 |
+ |
|
| 54 |
+ if flStr1.Value != "HI" {
|
|
| 55 |
+ t.Fatalf("Str1 was supposed to default to: HI")
|
|
| 56 |
+ } |
|
| 57 |
+ if flBool1.IsTrue() {
|
|
| 58 |
+ t.Fatalf("Bool1 was supposed to default to: false")
|
|
| 59 |
+ } |
|
| 60 |
+ if flStr1.IsUsed() == true {
|
|
| 61 |
+ t.Fatalf("Str1 was not used!")
|
|
| 62 |
+ } |
|
| 63 |
+ if flBool1.IsUsed() == true {
|
|
| 64 |
+ t.Fatalf("Bool1 was not used!")
|
|
| 65 |
+ } |
|
| 66 |
+ |
|
| 67 |
+ // --- |
|
| 68 |
+ |
|
| 69 |
+ bf = NewBFlags() |
|
| 70 |
+ flStr1 = bf.AddString("str1", "HI")
|
|
| 71 |
+ bf.Args = []string{"--str1"}
|
|
| 72 |
+ |
|
| 73 |
+ if err = bf.Parse(); err == nil {
|
|
| 74 |
+ t.Fatalf("Test %q was supposed to fail", bf.Args)
|
|
| 75 |
+ } |
|
| 76 |
+ |
|
| 77 |
+ // --- |
|
| 78 |
+ |
|
| 79 |
+ bf = NewBFlags() |
|
| 80 |
+ flStr1 = bf.AddString("str1", "HI")
|
|
| 81 |
+ bf.Args = []string{"--str1="}
|
|
| 82 |
+ |
|
| 83 |
+ if err = bf.Parse(); err != nil {
|
|
| 84 |
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 85 |
+ } |
|
| 86 |
+ |
|
| 87 |
+ expected = "" |
|
| 88 |
+ if flStr1.Value != expected {
|
|
| 89 |
+ t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
|
|
| 90 |
+ } |
|
| 91 |
+ |
|
| 92 |
+ // --- |
|
| 93 |
+ |
|
| 94 |
+ bf = NewBFlags() |
|
| 95 |
+ flStr1 = bf.AddString("str1", "HI")
|
|
| 96 |
+ bf.Args = []string{"--str1=BYE"}
|
|
| 97 |
+ |
|
| 98 |
+ if err = bf.Parse(); err != nil {
|
|
| 99 |
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 100 |
+ } |
|
| 101 |
+ |
|
| 102 |
+ expected = "BYE" |
|
| 103 |
+ if flStr1.Value != expected {
|
|
| 104 |
+ t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
|
|
| 105 |
+ } |
|
| 106 |
+ |
|
| 107 |
+ // --- |
|
| 108 |
+ |
|
| 109 |
+ bf = NewBFlags() |
|
| 110 |
+ flBool1 = bf.AddBool("bool1", false)
|
|
| 111 |
+ bf.Args = []string{"--bool1"}
|
|
| 112 |
+ |
|
| 113 |
+ if err = bf.Parse(); err != nil {
|
|
| 114 |
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 115 |
+ } |
|
| 116 |
+ |
|
| 117 |
+ if !flBool1.IsTrue() {
|
|
| 118 |
+ t.Fatalf("Test-b1 Bool1 was supposed to be true")
|
|
| 119 |
+ } |
|
| 120 |
+ |
|
| 121 |
+ // --- |
|
| 122 |
+ |
|
| 123 |
+ bf = NewBFlags() |
|
| 124 |
+ flBool1 = bf.AddBool("bool1", false)
|
|
| 125 |
+ bf.Args = []string{"--bool1=true"}
|
|
| 126 |
+ |
|
| 127 |
+ if err = bf.Parse(); err != nil {
|
|
| 128 |
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 129 |
+ } |
|
| 130 |
+ |
|
| 131 |
+ if !flBool1.IsTrue() {
|
|
| 132 |
+ t.Fatalf("Test-b2 Bool1 was supposed to be true")
|
|
| 133 |
+ } |
|
| 134 |
+ |
|
| 135 |
+ // --- |
|
| 136 |
+ |
|
| 137 |
+ bf = NewBFlags() |
|
| 138 |
+ flBool1 = bf.AddBool("bool1", false)
|
|
| 139 |
+ bf.Args = []string{"--bool1=false"}
|
|
| 140 |
+ |
|
| 141 |
+ if err = bf.Parse(); err != nil {
|
|
| 142 |
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 143 |
+ } |
|
| 144 |
+ |
|
| 145 |
+ if flBool1.IsTrue() {
|
|
| 146 |
+ t.Fatalf("Test-b3 Bool1 was supposed to be false")
|
|
| 147 |
+ } |
|
| 148 |
+ |
|
| 149 |
+ // --- |
|
| 150 |
+ |
|
| 151 |
+ bf = NewBFlags() |
|
| 152 |
+ flBool1 = bf.AddBool("bool1", false)
|
|
| 153 |
+ bf.Args = []string{"--bool1=false1"}
|
|
| 154 |
+ |
|
| 155 |
+ if err = bf.Parse(); err == nil {
|
|
| 156 |
+ t.Fatalf("Test %q was supposed to fail", bf.Args)
|
|
| 157 |
+ } |
|
| 158 |
+ |
|
| 159 |
+ // --- |
|
| 160 |
+ |
|
| 161 |
+ bf = NewBFlags() |
|
| 162 |
+ flBool1 = bf.AddBool("bool1", false)
|
|
| 163 |
+ bf.Args = []string{"--bool2"}
|
|
| 164 |
+ |
|
| 165 |
+ if err = bf.Parse(); err == nil {
|
|
| 166 |
+ t.Fatalf("Test %q was supposed to fail", bf.Args)
|
|
| 167 |
+ } |
|
| 168 |
+ |
|
| 169 |
+ // --- |
|
| 170 |
+ |
|
| 171 |
+ bf = NewBFlags() |
|
| 172 |
+ flStr1 = bf.AddString("str1", "HI")
|
|
| 173 |
+ flBool1 = bf.AddBool("bool1", false)
|
|
| 174 |
+ bf.Args = []string{"--bool1", "--str1=BYE"}
|
|
| 175 |
+ |
|
| 176 |
+ if err = bf.Parse(); err != nil {
|
|
| 177 |
+ t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
|
|
| 178 |
+ } |
|
| 179 |
+ |
|
| 180 |
+ if flStr1.Value != "BYE" {
|
|
| 181 |
+ t.Fatalf("Teset %s, str1 should be BYE", bf.Args)
|
|
| 182 |
+ } |
|
| 183 |
+ if !flBool1.IsTrue() {
|
|
| 184 |
+ t.Fatalf("Teset %s, bool1 should be true", bf.Args)
|
|
| 185 |
+ } |
|
| 186 |
+} |
| 0 | 187 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,42 @@ |
| 0 |
+// Package command contains the set of Dockerfile commands. |
|
| 1 |
+package command |
|
| 2 |
+ |
|
| 3 |
+// Define constants for the command strings |
|
| 4 |
+const ( |
|
| 5 |
+ Env = "env" |
|
| 6 |
+ Label = "label" |
|
| 7 |
+ Maintainer = "maintainer" |
|
| 8 |
+ Add = "add" |
|
| 9 |
+ Copy = "copy" |
|
| 10 |
+ From = "from" |
|
| 11 |
+ Onbuild = "onbuild" |
|
| 12 |
+ Workdir = "workdir" |
|
| 13 |
+ Run = "run" |
|
| 14 |
+ Cmd = "cmd" |
|
| 15 |
+ Entrypoint = "entrypoint" |
|
| 16 |
+ Expose = "expose" |
|
| 17 |
+ Volume = "volume" |
|
| 18 |
+ User = "user" |
|
| 19 |
+ StopSignal = "stopsignal" |
|
| 20 |
+ Arg = "arg" |
|
| 21 |
+) |
|
| 22 |
+ |
|
| 23 |
+// Commands is list of all Dockerfile commands |
|
| 24 |
+var Commands = map[string]struct{}{
|
|
| 25 |
+ Env: {},
|
|
| 26 |
+ Label: {},
|
|
| 27 |
+ Maintainer: {},
|
|
| 28 |
+ Add: {},
|
|
| 29 |
+ Copy: {},
|
|
| 30 |
+ From: {},
|
|
| 31 |
+ Onbuild: {},
|
|
| 32 |
+ Workdir: {},
|
|
| 33 |
+ Run: {},
|
|
| 34 |
+ Cmd: {},
|
|
| 35 |
+ Entrypoint: {},
|
|
| 36 |
+ Expose: {},
|
|
| 37 |
+ Volume: {},
|
|
| 38 |
+ User: {},
|
|
| 39 |
+ StopSignal: {},
|
|
| 40 |
+ Arg: {},
|
|
| 41 |
+} |
| 0 | 42 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,650 @@ |
| 0 |
+package dockerfile |
|
| 1 |
+ |
|
| 2 |
+// This file contains the dispatchers for each command. Note that |
|
| 3 |
+// `nullDispatch` is not actually a command, but support for commands we parse |
|
| 4 |
+// but do nothing with. |
|
| 5 |
+// |
|
| 6 |
+// See evaluator.go for a higher level discussion of the whole evaluator |
|
| 7 |
+// package. |
|
| 8 |
+ |
|
| 9 |
+import ( |
|
| 10 |
+ "fmt" |
|
| 11 |
+ "io/ioutil" |
|
| 12 |
+ "os" |
|
| 13 |
+ "path/filepath" |
|
| 14 |
+ "regexp" |
|
| 15 |
+ "runtime" |
|
| 16 |
+ "sort" |
|
| 17 |
+ "strings" |
|
| 18 |
+ |
|
| 19 |
+ "github.com/Sirupsen/logrus" |
|
| 20 |
+ derr "github.com/docker/docker/errors" |
|
| 21 |
+ flag "github.com/docker/docker/pkg/mflag" |
|
| 22 |
+ "github.com/docker/docker/pkg/nat" |
|
| 23 |
+ "github.com/docker/docker/pkg/signal" |
|
| 24 |
+ "github.com/docker/docker/pkg/stringutils" |
|
| 25 |
+ "github.com/docker/docker/pkg/system" |
|
| 26 |
+ "github.com/docker/docker/runconfig" |
|
| 27 |
+) |
|
| 28 |
+ |
|
| 29 |
+const ( |
|
| 30 |
+ // NoBaseImageSpecifier is the symbol used by the FROM |
|
| 31 |
+ // command to specify that no base image is to be used. |
|
| 32 |
+ NoBaseImageSpecifier string = "scratch" |
|
| 33 |
+) |
|
| 34 |
+ |
|
| 35 |
+// dispatch with no layer / parsing. This is effectively not a command. |
|
| 36 |
+func nullDispatch(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 37 |
+ return nil |
|
| 38 |
+} |
|
| 39 |
+ |
|
| 40 |
+// ENV foo bar |
|
| 41 |
+// |
|
| 42 |
+// Sets the environment variable foo to bar, also makes interpolation |
|
| 43 |
+// in the dockerfile available from the next statement on via ${foo}.
|
|
| 44 |
+// |
|
| 45 |
+func env(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 46 |
+ if len(args) == 0 {
|
|
| 47 |
+ return derr.ErrorCodeAtLeastOneArg.WithArgs("ENV")
|
|
| 48 |
+ } |
|
| 49 |
+ |
|
| 50 |
+ if len(args)%2 != 0 {
|
|
| 51 |
+ // should never get here, but just in case |
|
| 52 |
+ return derr.ErrorCodeTooManyArgs.WithArgs("ENV")
|
|
| 53 |
+ } |
|
| 54 |
+ |
|
| 55 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 56 |
+ return err |
|
| 57 |
+ } |
|
| 58 |
+ |
|
| 59 |
+ // TODO/FIXME/NOT USED |
|
| 60 |
+ // Just here to show how to use the builder flags stuff within the |
|
| 61 |
+ // context of a builder command. Will remove once we actually add |
|
| 62 |
+ // a builder command to something! |
|
| 63 |
+ /* |
|
| 64 |
+ flBool1 := b.BuilderFlags.AddBool("bool1", false)
|
|
| 65 |
+ flStr1 := b.BuilderFlags.AddString("str1", "HI")
|
|
| 66 |
+ |
|
| 67 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 68 |
+ return err |
|
| 69 |
+ } |
|
| 70 |
+ |
|
| 71 |
+ fmt.Printf("Bool1:%v\n", flBool1)
|
|
| 72 |
+ fmt.Printf("Str1:%v\n", flStr1)
|
|
| 73 |
+ */ |
|
| 74 |
+ |
|
| 75 |
+ commitStr := "ENV" |
|
| 76 |
+ |
|
| 77 |
+ for j := 0; j < len(args); j++ {
|
|
| 78 |
+ // name ==> args[j] |
|
| 79 |
+ // value ==> args[j+1] |
|
| 80 |
+ newVar := args[j] + "=" + args[j+1] + "" |
|
| 81 |
+ commitStr += " " + newVar |
|
| 82 |
+ |
|
| 83 |
+ gotOne := false |
|
| 84 |
+ for i, envVar := range b.Config.Env {
|
|
| 85 |
+ envParts := strings.SplitN(envVar, "=", 2) |
|
| 86 |
+ if envParts[0] == args[j] {
|
|
| 87 |
+ b.Config.Env[i] = newVar |
|
| 88 |
+ gotOne = true |
|
| 89 |
+ break |
|
| 90 |
+ } |
|
| 91 |
+ } |
|
| 92 |
+ if !gotOne {
|
|
| 93 |
+ b.Config.Env = append(b.Config.Env, newVar) |
|
| 94 |
+ } |
|
| 95 |
+ j++ |
|
| 96 |
+ } |
|
| 97 |
+ |
|
| 98 |
+ return b.commit("", b.Config.Cmd, commitStr)
|
|
| 99 |
+} |
|
| 100 |
+ |
|
| 101 |
+// MAINTAINER some text <maybe@an.email.address> |
|
| 102 |
+// |
|
| 103 |
+// Sets the maintainer metadata. |
|
| 104 |
+func maintainer(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 105 |
+ if len(args) != 1 {
|
|
| 106 |
+ return derr.ErrorCodeExactlyOneArg.WithArgs("MAINTAINER")
|
|
| 107 |
+ } |
|
| 108 |
+ |
|
| 109 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 110 |
+ return err |
|
| 111 |
+ } |
|
| 112 |
+ |
|
| 113 |
+ b.maintainer = args[0] |
|
| 114 |
+ return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
|
|
| 115 |
+} |
|
| 116 |
+ |
|
| 117 |
+// LABEL some json data describing the image |
|
| 118 |
+// |
|
| 119 |
+// Sets the Label variable foo to bar, |
|
| 120 |
+// |
|
| 121 |
+func label(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 122 |
+ if len(args) == 0 {
|
|
| 123 |
+ return derr.ErrorCodeAtLeastOneArg.WithArgs("LABEL")
|
|
| 124 |
+ } |
|
| 125 |
+ if len(args)%2 != 0 {
|
|
| 126 |
+ // should never get here, but just in case |
|
| 127 |
+ return derr.ErrorCodeTooManyArgs.WithArgs("LABEL")
|
|
| 128 |
+ } |
|
| 129 |
+ |
|
| 130 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 131 |
+ return err |
|
| 132 |
+ } |
|
| 133 |
+ |
|
| 134 |
+ commitStr := "LABEL" |
|
| 135 |
+ |
|
| 136 |
+ if b.Config.Labels == nil {
|
|
| 137 |
+ b.Config.Labels = map[string]string{}
|
|
| 138 |
+ } |
|
| 139 |
+ |
|
| 140 |
+ for j := 0; j < len(args); j++ {
|
|
| 141 |
+ // name ==> args[j] |
|
| 142 |
+ // value ==> args[j+1] |
|
| 143 |
+ newVar := args[j] + "=" + args[j+1] + "" |
|
| 144 |
+ commitStr += " " + newVar |
|
| 145 |
+ |
|
| 146 |
+ b.Config.Labels[args[j]] = args[j+1] |
|
| 147 |
+ j++ |
|
| 148 |
+ } |
|
| 149 |
+ return b.commit("", b.Config.Cmd, commitStr)
|
|
| 150 |
+} |
|
| 151 |
+ |
|
| 152 |
+// ADD foo /path |
|
| 153 |
+// |
|
| 154 |
+// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling |
|
| 155 |
+// exist here. If you do not wish to have this automatic handling, use COPY. |
|
| 156 |
+// |
|
| 157 |
+func add(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 158 |
+ if len(args) < 2 {
|
|
| 159 |
+ return derr.ErrorCodeAtLeastTwoArgs.WithArgs("ADD")
|
|
| 160 |
+ } |
|
| 161 |
+ |
|
| 162 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 163 |
+ return err |
|
| 164 |
+ } |
|
| 165 |
+ |
|
| 166 |
+ return b.runContextCommand(args, true, true, "ADD") |
|
| 167 |
+} |
|
| 168 |
+ |
|
| 169 |
+// COPY foo /path |
|
| 170 |
+// |
|
| 171 |
+// Same as 'ADD' but without the tar and remote url handling. |
|
| 172 |
+// |
|
| 173 |
+func dispatchCopy(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 174 |
+ if len(args) < 2 {
|
|
| 175 |
+ return derr.ErrorCodeAtLeastTwoArgs.WithArgs("COPY")
|
|
| 176 |
+ } |
|
| 177 |
+ |
|
| 178 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 179 |
+ return err |
|
| 180 |
+ } |
|
| 181 |
+ |
|
| 182 |
+ return b.runContextCommand(args, false, false, "COPY") |
|
| 183 |
+} |
|
| 184 |
+ |
|
| 185 |
+// FROM imagename |
|
| 186 |
+// |
|
| 187 |
+// This sets the image the dockerfile will build on top of. |
|
| 188 |
+// |
|
| 189 |
+func from(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 190 |
+ if len(args) != 1 {
|
|
| 191 |
+ return derr.ErrorCodeExactlyOneArg.WithArgs("FROM")
|
|
| 192 |
+ } |
|
| 193 |
+ |
|
| 194 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 195 |
+ return err |
|
| 196 |
+ } |
|
| 197 |
+ |
|
| 198 |
+ name := args[0] |
|
| 199 |
+ |
|
| 200 |
+ // Windows cannot support a container with no base image. |
|
| 201 |
+ if name == NoBaseImageSpecifier {
|
|
| 202 |
+ if runtime.GOOS == "windows" {
|
|
| 203 |
+ return fmt.Errorf("Windows does not support FROM scratch")
|
|
| 204 |
+ } |
|
| 205 |
+ b.image = "" |
|
| 206 |
+ b.noBaseImage = true |
|
| 207 |
+ return nil |
|
| 208 |
+ } |
|
| 209 |
+ |
|
| 210 |
+ image, err := b.Daemon.Repositories().LookupImage(name) |
|
| 211 |
+ if b.Pull {
|
|
| 212 |
+ image, err = b.pullImage(name) |
|
| 213 |
+ if err != nil {
|
|
| 214 |
+ return err |
|
| 215 |
+ } |
|
| 216 |
+ } |
|
| 217 |
+ if err != nil {
|
|
| 218 |
+ if b.Daemon.Graph().IsNotExist(err, name) {
|
|
| 219 |
+ image, err = b.pullImage(name) |
|
| 220 |
+ } |
|
| 221 |
+ |
|
| 222 |
+ // note that the top level err will still be !nil here if IsNotExist is |
|
| 223 |
+ // not the error. This approach just simplifies the logic a bit. |
|
| 224 |
+ if err != nil {
|
|
| 225 |
+ return err |
|
| 226 |
+ } |
|
| 227 |
+ } |
|
| 228 |
+ |
|
| 229 |
+ return b.processImageFrom(image) |
|
| 230 |
+} |
|
| 231 |
+ |
|
| 232 |
+// ONBUILD RUN echo yo |
|
| 233 |
+// |
|
| 234 |
+// ONBUILD triggers run when the image is used in a FROM statement. |
|
| 235 |
+// |
|
| 236 |
+// ONBUILD handling has a lot of special-case functionality, the heading in |
|
| 237 |
+// evaluator.go and comments around dispatch() in the same file explain the |
|
| 238 |
+// special cases. search for 'OnBuild' in internals.go for additional special |
|
| 239 |
+// cases. |
|
| 240 |
+// |
|
| 241 |
+func onbuild(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 242 |
+ if len(args) == 0 {
|
|
| 243 |
+ return derr.ErrorCodeAtLeastOneArg.WithArgs("ONBUILD")
|
|
| 244 |
+ } |
|
| 245 |
+ |
|
| 246 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 247 |
+ return err |
|
| 248 |
+ } |
|
| 249 |
+ |
|
| 250 |
+ triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) |
|
| 251 |
+ switch triggerInstruction {
|
|
| 252 |
+ case "ONBUILD": |
|
| 253 |
+ return derr.ErrorCodeChainOnBuild |
|
| 254 |
+ case "MAINTAINER", "FROM": |
|
| 255 |
+ return derr.ErrorCodeBadOnBuildCmd.WithArgs(triggerInstruction) |
|
| 256 |
+ } |
|
| 257 |
+ |
|
| 258 |
+ original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") |
|
| 259 |
+ |
|
| 260 |
+ b.Config.OnBuild = append(b.Config.OnBuild, original) |
|
| 261 |
+ return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
|
|
| 262 |
+} |
|
| 263 |
+ |
|
| 264 |
+// WORKDIR /tmp |
|
| 265 |
+// |
|
| 266 |
+// Set the working directory for future RUN/CMD/etc statements. |
|
| 267 |
+// |
|
| 268 |
+func workdir(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 269 |
+ if len(args) != 1 {
|
|
| 270 |
+ return derr.ErrorCodeExactlyOneArg.WithArgs("WORKDIR")
|
|
| 271 |
+ } |
|
| 272 |
+ |
|
| 273 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 274 |
+ return err |
|
| 275 |
+ } |
|
| 276 |
+ |
|
| 277 |
+ // This is from the Dockerfile and will not necessarily be in platform |
|
| 278 |
+ // specific semantics, hence ensure it is converted. |
|
| 279 |
+ workdir := filepath.FromSlash(args[0]) |
|
| 280 |
+ |
|
| 281 |
+ if !system.IsAbs(workdir) {
|
|
| 282 |
+ current := filepath.FromSlash(b.Config.WorkingDir) |
|
| 283 |
+ workdir = filepath.Join(string(os.PathSeparator), current, workdir) |
|
| 284 |
+ } |
|
| 285 |
+ |
|
| 286 |
+ b.Config.WorkingDir = workdir |
|
| 287 |
+ |
|
| 288 |
+ return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
|
| 289 |
+} |
|
| 290 |
+ |
|
| 291 |
+// RUN some command yo |
|
| 292 |
+// |
|
| 293 |
+// run a command and commit the image. Args are automatically prepended with |
|
| 294 |
+// 'sh -c' under linux or 'cmd /S /C' under Windows, in the event there is |
|
| 295 |
+// only one argument. The difference in processing: |
|
| 296 |
+// |
|
| 297 |
+// RUN echo hi # sh -c echo hi (Linux) |
|
| 298 |
+// RUN echo hi # cmd /S /C echo hi (Windows) |
|
| 299 |
+// RUN [ "echo", "hi" ] # echo hi |
|
| 300 |
+// |
|
| 301 |
+func run(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 302 |
+ if b.image == "" && !b.noBaseImage {
|
|
| 303 |
+ return derr.ErrorCodeMissingFrom |
|
| 304 |
+ } |
|
| 305 |
+ |
|
| 306 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 307 |
+ return err |
|
| 308 |
+ } |
|
| 309 |
+ |
|
| 310 |
+ args = handleJSONArgs(args, attributes) |
|
| 311 |
+ |
|
| 312 |
+ if !attributes["json"] {
|
|
| 313 |
+ if runtime.GOOS != "windows" {
|
|
| 314 |
+ args = append([]string{"/bin/sh", "-c"}, args...)
|
|
| 315 |
+ } else {
|
|
| 316 |
+ args = append([]string{"cmd", "/S", "/C"}, args...)
|
|
| 317 |
+ } |
|
| 318 |
+ } |
|
| 319 |
+ |
|
| 320 |
+ runCmd := flag.NewFlagSet("run", flag.ContinueOnError)
|
|
| 321 |
+ runCmd.SetOutput(ioutil.Discard) |
|
| 322 |
+ runCmd.Usage = nil |
|
| 323 |
+ |
|
| 324 |
+ config, _, _, err := runconfig.Parse(runCmd, append([]string{b.image}, args...))
|
|
| 325 |
+ if err != nil {
|
|
| 326 |
+ return err |
|
| 327 |
+ } |
|
| 328 |
+ |
|
| 329 |
+ // stash the cmd |
|
| 330 |
+ cmd := b.Config.Cmd |
|
| 331 |
+ runconfig.Merge(b.Config, config) |
|
| 332 |
+ // stash the config environment |
|
| 333 |
+ env := b.Config.Env |
|
| 334 |
+ |
|
| 335 |
+ defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
|
| 336 |
+ defer func(env []string) { b.Config.Env = env }(env)
|
|
| 337 |
+ |
|
| 338 |
+ // derive the net build-time environment for this run. We let config |
|
| 339 |
+ // environment override the build time environment. |
|
| 340 |
+ // This means that we take the b.buildArgs list of env vars and remove |
|
| 341 |
+ // any of those variables that are defined as part of the container. In other |
|
| 342 |
+ // words, anything in b.Config.Env. What's left is the list of build-time env |
|
| 343 |
+ // vars that we need to add to each RUN command - note the list could be empty. |
|
| 344 |
+ // |
|
| 345 |
+ // We don't persist the build time environment with container's config |
|
| 346 |
+ // environment, but just sort and prepend it to the command string at time |
|
| 347 |
+ // of commit. |
|
| 348 |
+ // This helps with tracing back the image's actual environment at the time |
|
| 349 |
+ // of RUN, without leaking it to the final image. It also aids cache |
|
| 350 |
+ // lookup for same image built with same build time environment. |
|
| 351 |
+ cmdBuildEnv := []string{}
|
|
| 352 |
+ configEnv := runconfig.ConvertKVStringsToMap(b.Config.Env) |
|
| 353 |
+ for key, val := range b.buildArgs {
|
|
| 354 |
+ if !b.isBuildArgAllowed(key) {
|
|
| 355 |
+ // skip build-args that are not in allowed list, meaning they have |
|
| 356 |
+ // not been defined by an "ARG" Dockerfile command yet. |
|
| 357 |
+ // This is an error condition but only if there is no "ARG" in the entire |
|
| 358 |
+ // Dockerfile, so we'll generate any necessary errors after we parsed |
|
| 359 |
+ // the entire file (see 'leftoverArgs' processing in evaluator.go ) |
|
| 360 |
+ continue |
|
| 361 |
+ } |
|
| 362 |
+ if _, ok := configEnv[key]; !ok {
|
|
| 363 |
+ cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, val))
|
|
| 364 |
+ } |
|
| 365 |
+ } |
|
| 366 |
+ |
|
| 367 |
+ // derive the command to use for probeCache() and to commit in this container. |
|
| 368 |
+ // Note that we only do this if there are any build-time env vars. Also, we |
|
| 369 |
+ // use the special argument "|#" at the start of the args array. This will |
|
| 370 |
+ // avoid conflicts with any RUN command since commands can not |
|
| 371 |
+ // start with | (vertical bar). The "#" (number of build envs) is there to |
|
| 372 |
+ // help ensure proper cache matches. We don't want a RUN command |
|
| 373 |
+ // that starts with "foo=abc" to be considered part of a build-time env var. |
|
| 374 |
+ saveCmd := config.Cmd |
|
| 375 |
+ if len(cmdBuildEnv) > 0 {
|
|
| 376 |
+ sort.Strings(cmdBuildEnv) |
|
| 377 |
+ tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...)
|
|
| 378 |
+ saveCmd = stringutils.NewStrSlice(append(tmpEnv, saveCmd.Slice()...)...) |
|
| 379 |
+ } |
|
| 380 |
+ |
|
| 381 |
+ b.Config.Cmd = saveCmd |
|
| 382 |
+ hit, err := b.probeCache() |
|
| 383 |
+ if err != nil {
|
|
| 384 |
+ return err |
|
| 385 |
+ } |
|
| 386 |
+ if hit {
|
|
| 387 |
+ return nil |
|
| 388 |
+ } |
|
| 389 |
+ |
|
| 390 |
+ // set Cmd manually, this is special case only for Dockerfiles |
|
| 391 |
+ b.Config.Cmd = config.Cmd |
|
| 392 |
+ // set build-time environment for 'run'. |
|
| 393 |
+ b.Config.Env = append(b.Config.Env, cmdBuildEnv...) |
|
| 394 |
+ |
|
| 395 |
+ logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)
|
|
| 396 |
+ |
|
| 397 |
+ c, err := b.create() |
|
| 398 |
+ if err != nil {
|
|
| 399 |
+ return err |
|
| 400 |
+ } |
|
| 401 |
+ |
|
| 402 |
+ // Ensure that we keep the container mounted until the commit |
|
| 403 |
+ // to avoid unmounting and then mounting directly again |
|
| 404 |
+ c.Mount() |
|
| 405 |
+ defer c.Unmount() |
|
| 406 |
+ |
|
| 407 |
+ err = b.run(c) |
|
| 408 |
+ if err != nil {
|
|
| 409 |
+ return err |
|
| 410 |
+ } |
|
| 411 |
+ |
|
| 412 |
+ // revert to original config environment and set the command string to |
|
| 413 |
+ // have the build-time env vars in it (if any) so that future cache look-ups |
|
| 414 |
+ // properly match it. |
|
| 415 |
+ b.Config.Env = env |
|
| 416 |
+ b.Config.Cmd = saveCmd |
|
| 417 |
+ if err := b.commit(c.ID, cmd, "run"); err != nil {
|
|
| 418 |
+ return err |
|
| 419 |
+ } |
|
| 420 |
+ |
|
| 421 |
+ return nil |
|
| 422 |
+} |
|
| 423 |
+ |
|
| 424 |
+// CMD foo |
|
| 425 |
+// |
|
| 426 |
+// Set the default command to run in the container (which may be empty). |
|
| 427 |
+// Argument handling is the same as RUN. |
|
| 428 |
+// |
|
| 429 |
+func cmd(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 430 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 431 |
+ return err |
|
| 432 |
+ } |
|
| 433 |
+ |
|
| 434 |
+ cmdSlice := handleJSONArgs(args, attributes) |
|
| 435 |
+ |
|
| 436 |
+ if !attributes["json"] {
|
|
| 437 |
+ if runtime.GOOS != "windows" {
|
|
| 438 |
+ cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...)
|
|
| 439 |
+ } else {
|
|
| 440 |
+ cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...)
|
|
| 441 |
+ } |
|
| 442 |
+ } |
|
| 443 |
+ |
|
| 444 |
+ b.Config.Cmd = stringutils.NewStrSlice(cmdSlice...) |
|
| 445 |
+ |
|
| 446 |
+ if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
|
|
| 447 |
+ return err |
|
| 448 |
+ } |
|
| 449 |
+ |
|
| 450 |
+ if len(args) != 0 {
|
|
| 451 |
+ b.cmdSet = true |
|
| 452 |
+ } |
|
| 453 |
+ |
|
| 454 |
+ return nil |
|
| 455 |
+} |
|
| 456 |
+ |
|
| 457 |
+// ENTRYPOINT /usr/sbin/nginx |
|
| 458 |
+// |
|
| 459 |
+// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to |
|
| 460 |
+// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx. |
|
| 461 |
+// |
|
| 462 |
+// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint |
|
| 463 |
+// is initialized at NewBuilder time instead of through argument parsing. |
|
| 464 |
+// |
|
| 465 |
+func entrypoint(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 466 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 467 |
+ return err |
|
| 468 |
+ } |
|
| 469 |
+ |
|
| 470 |
+ parsed := handleJSONArgs(args, attributes) |
|
| 471 |
+ |
|
| 472 |
+ switch {
|
|
| 473 |
+ case attributes["json"]: |
|
| 474 |
+ // ENTRYPOINT ["echo", "hi"] |
|
| 475 |
+ b.Config.Entrypoint = stringutils.NewStrSlice(parsed...) |
|
| 476 |
+ case len(parsed) == 0: |
|
| 477 |
+ // ENTRYPOINT [] |
|
| 478 |
+ b.Config.Entrypoint = nil |
|
| 479 |
+ default: |
|
| 480 |
+ // ENTRYPOINT echo hi |
|
| 481 |
+ if runtime.GOOS != "windows" {
|
|
| 482 |
+ b.Config.Entrypoint = stringutils.NewStrSlice("/bin/sh", "-c", parsed[0])
|
|
| 483 |
+ } else {
|
|
| 484 |
+ b.Config.Entrypoint = stringutils.NewStrSlice("cmd", "/S", "/C", parsed[0])
|
|
| 485 |
+ } |
|
| 486 |
+ } |
|
| 487 |
+ |
|
| 488 |
+ // when setting the entrypoint if a CMD was not explicitly set then |
|
| 489 |
+ // set the command to nil |
|
| 490 |
+ if !b.cmdSet {
|
|
| 491 |
+ b.Config.Cmd = nil |
|
| 492 |
+ } |
|
| 493 |
+ |
|
| 494 |
+ if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
|
|
| 495 |
+ return err |
|
| 496 |
+ } |
|
| 497 |
+ |
|
| 498 |
+ return nil |
|
| 499 |
+} |
|
| 500 |
+ |
|
| 501 |
+// EXPOSE 6667/tcp 7000/tcp |
|
| 502 |
+// |
|
| 503 |
+// Expose ports for links and port mappings. This all ends up in |
|
| 504 |
+// b.Config.ExposedPorts for runconfig. |
|
| 505 |
+// |
|
| 506 |
+func expose(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 507 |
+ portsTab := args |
|
| 508 |
+ |
|
| 509 |
+ if len(args) == 0 {
|
|
| 510 |
+ return derr.ErrorCodeAtLeastOneArg.WithArgs("EXPOSE")
|
|
| 511 |
+ } |
|
| 512 |
+ |
|
| 513 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 514 |
+ return err |
|
| 515 |
+ } |
|
| 516 |
+ |
|
| 517 |
+ if b.Config.ExposedPorts == nil {
|
|
| 518 |
+ b.Config.ExposedPorts = make(nat.PortSet) |
|
| 519 |
+ } |
|
| 520 |
+ |
|
| 521 |
+ ports, _, err := nat.ParsePortSpecs(portsTab) |
|
| 522 |
+ if err != nil {
|
|
| 523 |
+ return err |
|
| 524 |
+ } |
|
| 525 |
+ |
|
| 526 |
+ // instead of using ports directly, we build a list of ports and sort it so |
|
| 527 |
+ // the order is consistent. This prevents cache burst where map ordering |
|
| 528 |
+ // changes between builds |
|
| 529 |
+ portList := make([]string, len(ports)) |
|
| 530 |
+ var i int |
|
| 531 |
+ for port := range ports {
|
|
| 532 |
+ if _, exists := b.Config.ExposedPorts[port]; !exists {
|
|
| 533 |
+ b.Config.ExposedPorts[port] = struct{}{}
|
|
| 534 |
+ } |
|
| 535 |
+ portList[i] = string(port) |
|
| 536 |
+ i++ |
|
| 537 |
+ } |
|
| 538 |
+ sort.Strings(portList) |
|
| 539 |
+ return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
|
|
| 540 |
+} |
|
| 541 |
+ |
|
| 542 |
+// USER foo |
|
| 543 |
+// |
|
| 544 |
+// Set the user to 'foo' for future commands and when running the |
|
| 545 |
+// ENTRYPOINT/CMD at container run time. |
|
| 546 |
+// |
|
| 547 |
+func user(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 548 |
+ if len(args) != 1 {
|
|
| 549 |
+ return derr.ErrorCodeExactlyOneArg.WithArgs("USER")
|
|
| 550 |
+ } |
|
| 551 |
+ |
|
| 552 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 553 |
+ return err |
|
| 554 |
+ } |
|
| 555 |
+ |
|
| 556 |
+ b.Config.User = args[0] |
|
| 557 |
+ return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
|
|
| 558 |
+} |
|
| 559 |
+ |
|
| 560 |
+// VOLUME /foo |
|
| 561 |
+// |
|
| 562 |
+// Expose the volume /foo for use. Will also accept the JSON array form. |
|
| 563 |
+// |
|
| 564 |
+func volume(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 565 |
+ if len(args) == 0 {
|
|
| 566 |
+ return derr.ErrorCodeAtLeastOneArg.WithArgs("VOLUME")
|
|
| 567 |
+ } |
|
| 568 |
+ |
|
| 569 |
+ if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 570 |
+ return err |
|
| 571 |
+ } |
|
| 572 |
+ |
|
| 573 |
+ if b.Config.Volumes == nil {
|
|
| 574 |
+ b.Config.Volumes = map[string]struct{}{}
|
|
| 575 |
+ } |
|
| 576 |
+ for _, v := range args {
|
|
| 577 |
+ v = strings.TrimSpace(v) |
|
| 578 |
+ if v == "" {
|
|
| 579 |
+ return derr.ErrorCodeVolumeEmpty |
|
| 580 |
+ } |
|
| 581 |
+ b.Config.Volumes[v] = struct{}{}
|
|
| 582 |
+ } |
|
| 583 |
+ if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
|
|
| 584 |
+ return err |
|
| 585 |
+ } |
|
| 586 |
+ return nil |
|
| 587 |
+} |
|
| 588 |
+ |
|
| 589 |
+// STOPSIGNAL signal |
|
| 590 |
+// |
|
| 591 |
+// Set the signal that will be used to kill the container. |
|
| 592 |
+func stopSignal(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 593 |
+ if len(args) != 1 {
|
|
| 594 |
+ return fmt.Errorf("STOPSIGNAL requires exactly one argument")
|
|
| 595 |
+ } |
|
| 596 |
+ |
|
| 597 |
+ sig := args[0] |
|
| 598 |
+ _, err := signal.ParseSignal(sig) |
|
| 599 |
+ if err != nil {
|
|
| 600 |
+ return err |
|
| 601 |
+ } |
|
| 602 |
+ |
|
| 603 |
+ b.Config.StopSignal = sig |
|
| 604 |
+ return b.commit("", b.Config.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
|
|
| 605 |
+} |
|
| 606 |
+ |
|
| 607 |
+// ARG name[=value] |
|
| 608 |
+// |
|
| 609 |
+// Adds the variable foo to the trusted list of variables that can be passed |
|
| 610 |
+// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. |
|
| 611 |
+// Dockerfile author may optionally set a default value of this variable. |
|
| 612 |
+func arg(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 613 |
+ if len(args) != 1 {
|
|
| 614 |
+ return fmt.Errorf("ARG requires exactly one argument definition")
|
|
| 615 |
+ } |
|
| 616 |
+ |
|
| 617 |
+ var ( |
|
| 618 |
+ name string |
|
| 619 |
+ value string |
|
| 620 |
+ hasDefault bool |
|
| 621 |
+ ) |
|
| 622 |
+ |
|
| 623 |
+ arg := args[0] |
|
| 624 |
+ // 'arg' can just be a name or name-value pair. Note that this is different |
|
| 625 |
+ // from 'env' that handles the split of name and value at the parser level. |
|
| 626 |
+ // The reason for doing it differently for 'arg' is that we support just |
|
| 627 |
+ // defining an arg and not assign it a value (while 'env' always expects a |
|
| 628 |
+ // name-value pair). If possible, it will be good to harmonize the two. |
|
| 629 |
+ if strings.Contains(arg, "=") {
|
|
| 630 |
+ parts := strings.SplitN(arg, "=", 2) |
|
| 631 |
+ name = parts[0] |
|
| 632 |
+ value = parts[1] |
|
| 633 |
+ hasDefault = true |
|
| 634 |
+ } else {
|
|
| 635 |
+ name = arg |
|
| 636 |
+ hasDefault = false |
|
| 637 |
+ } |
|
| 638 |
+ // add the arg to allowed list of build-time args from this step on. |
|
| 639 |
+ b.allowedBuildArgs[name] = true |
|
| 640 |
+ |
|
| 641 |
+ // If there is a default value associated with this arg then add it to the |
|
| 642 |
+ // b.buildArgs if one is not already passed to the builder. The args passed |
|
| 643 |
+ // to builder override the defaut value of 'arg'. |
|
| 644 |
+ if _, ok := b.buildArgs[name]; !ok && hasDefault {
|
|
| 645 |
+ b.buildArgs[name] = value |
|
| 646 |
+ } |
|
| 647 |
+ |
|
| 648 |
+ return b.commit("", b.Config.Cmd, fmt.Sprintf("ARG %s", arg))
|
|
| 649 |
+} |
| 0 | 650 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,424 @@ |
| 0 |
+// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. |
|
| 1 |
+// |
|
| 2 |
+// It incorporates a dispatch table based on the parser.Node values (see the |
|
| 3 |
+// parser package for more information) that are yielded from the parser itself. |
|
| 4 |
+// Calling NewBuilder with the BuildOpts struct can be used to customize the |
|
| 5 |
+// experience for execution purposes only. Parsing is controlled in the parser |
|
| 6 |
+// package, and this division of resposibility should be respected. |
|
| 7 |
+// |
|
| 8 |
+// Please see the jump table targets for the actual invocations, most of which |
|
| 9 |
+// will call out to the functions in internals.go to deal with their tasks. |
|
| 10 |
+// |
|
| 11 |
+// ONBUILD is a special case, which is covered in the onbuild() func in |
|
| 12 |
+// dispatchers.go. |
|
| 13 |
+// |
|
| 14 |
+// The evaluator uses the concept of "steps", which are usually each processable |
|
| 15 |
+// line in the Dockerfile. Each step is numbered and certain actions are taken |
|
| 16 |
+// before and after each step, such as creating an image ID and removing temporary |
|
| 17 |
+// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which |
|
| 18 |
+// includes its own set of steps (usually only one of them). |
|
| 19 |
+package dockerfile |
|
| 20 |
+ |
|
| 21 |
+import ( |
|
| 22 |
+ "fmt" |
|
| 23 |
+ "io" |
|
| 24 |
+ "os" |
|
| 25 |
+ "path/filepath" |
|
| 26 |
+ "runtime" |
|
| 27 |
+ "strings" |
|
| 28 |
+ |
|
| 29 |
+ "github.com/Sirupsen/logrus" |
|
| 30 |
+ "github.com/docker/docker/api" |
|
| 31 |
+ "github.com/docker/docker/builder/dockerfile/command" |
|
| 32 |
+ "github.com/docker/docker/builder/dockerfile/parser" |
|
| 33 |
+ "github.com/docker/docker/cliconfig" |
|
| 34 |
+ "github.com/docker/docker/daemon" |
|
| 35 |
+ "github.com/docker/docker/pkg/fileutils" |
|
| 36 |
+ "github.com/docker/docker/pkg/streamformatter" |
|
| 37 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 38 |
+ "github.com/docker/docker/pkg/symlink" |
|
| 39 |
+ "github.com/docker/docker/pkg/tarsum" |
|
| 40 |
+ "github.com/docker/docker/pkg/ulimit" |
|
| 41 |
+ "github.com/docker/docker/runconfig" |
|
| 42 |
+ "github.com/docker/docker/utils" |
|
| 43 |
+) |
|
| 44 |
+ |
|
| 45 |
+// Environment variable interpolation will happen on these statements only. |
|
| 46 |
+var replaceEnvAllowed = map[string]struct{}{
|
|
| 47 |
+ command.Env: {},
|
|
| 48 |
+ command.Label: {},
|
|
| 49 |
+ command.Add: {},
|
|
| 50 |
+ command.Copy: {},
|
|
| 51 |
+ command.Workdir: {},
|
|
| 52 |
+ command.Expose: {},
|
|
| 53 |
+ command.Volume: {},
|
|
| 54 |
+ command.User: {},
|
|
| 55 |
+ command.StopSignal: {},
|
|
| 56 |
+ command.Arg: {},
|
|
| 57 |
+} |
|
| 58 |
+ |
|
| 59 |
+var evaluateTable map[string]func(*builder, []string, map[string]bool, string) error |
|
| 60 |
+ |
|
| 61 |
+func init() {
|
|
| 62 |
+ evaluateTable = map[string]func(*builder, []string, map[string]bool, string) error{
|
|
| 63 |
+ command.Env: env, |
|
| 64 |
+ command.Label: label, |
|
| 65 |
+ command.Maintainer: maintainer, |
|
| 66 |
+ command.Add: add, |
|
| 67 |
+ command.Copy: dispatchCopy, // copy() is a go builtin |
|
| 68 |
+ command.From: from, |
|
| 69 |
+ command.Onbuild: onbuild, |
|
| 70 |
+ command.Workdir: workdir, |
|
| 71 |
+ command.Run: run, |
|
| 72 |
+ command.Cmd: cmd, |
|
| 73 |
+ command.Entrypoint: entrypoint, |
|
| 74 |
+ command.Expose: expose, |
|
| 75 |
+ command.Volume: volume, |
|
| 76 |
+ command.User: user, |
|
| 77 |
+ command.StopSignal: stopSignal, |
|
| 78 |
+ command.Arg: arg, |
|
| 79 |
+ } |
|
| 80 |
+} |
|
| 81 |
+ |
|
| 82 |
+// builder is an internal struct, used to maintain configuration of the Dockerfile's |
|
| 83 |
+// processing as it evaluates the parsing result. |
|
| 84 |
+type builder struct {
|
|
| 85 |
+ Daemon *daemon.Daemon |
|
| 86 |
+ |
|
| 87 |
+ // effectively stdio for the run. Because it is not stdio, I said |
|
| 88 |
+ // "Effectively". Do not use stdio anywhere in this package for any reason. |
|
| 89 |
+ OutStream io.Writer |
|
| 90 |
+ ErrStream io.Writer |
|
| 91 |
+ |
|
| 92 |
+ Verbose bool |
|
| 93 |
+ UtilizeCache bool |
|
| 94 |
+ cacheBusted bool |
|
| 95 |
+ |
|
| 96 |
+ // controls how images and containers are handled between steps. |
|
| 97 |
+ Remove bool |
|
| 98 |
+ ForceRemove bool |
|
| 99 |
+ Pull bool |
|
| 100 |
+ |
|
| 101 |
+ // set this to true if we want the builder to not commit between steps. |
|
| 102 |
+ // This is useful when we only want to use the evaluator table to generate |
|
| 103 |
+ // the final configs of the Dockerfile but dont want the layers |
|
| 104 |
+ disableCommit bool |
|
| 105 |
+ |
|
| 106 |
+ // Registry server auth configs used to pull images when handling `FROM`. |
|
| 107 |
+ AuthConfigs map[string]cliconfig.AuthConfig |
|
| 108 |
+ |
|
| 109 |
+ // Deprecated, original writer used for ImagePull. To be removed. |
|
| 110 |
+ OutOld io.Writer |
|
| 111 |
+ StreamFormatter *streamformatter.StreamFormatter |
|
| 112 |
+ |
|
| 113 |
+ Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. |
|
| 114 |
+ |
|
| 115 |
+ buildArgs map[string]string // build-time args received in build context for expansion/substitution and commands in 'run'. |
|
| 116 |
+ allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. |
|
| 117 |
+ |
|
| 118 |
+ // both of these are controlled by the Remove and ForceRemove options in BuildOpts |
|
| 119 |
+ TmpContainers map[string]struct{} // a map of containers used for removes
|
|
| 120 |
+ |
|
| 121 |
+ dockerfileName string // name of Dockerfile |
|
| 122 |
+ dockerfile *parser.Node // the syntax tree of the dockerfile |
|
| 123 |
+ image string // image name for commit processing |
|
| 124 |
+ maintainer string // maintainer name. could probably be removed. |
|
| 125 |
+ cmdSet bool // indicates is CMD was set in current Dockerfile |
|
| 126 |
+ BuilderFlags *BFlags // current cmd's BuilderFlags - temporary |
|
| 127 |
+ context tarsum.TarSum // the context is a tarball that is uploaded by the client |
|
| 128 |
+ contextPath string // the path of the temporary directory the local context is unpacked to (server side) |
|
| 129 |
+ noBaseImage bool // indicates that this build does not start from any base image, but is being built from an empty file system. |
|
| 130 |
+ |
|
| 131 |
+ // Set resource restrictions for build containers |
|
| 132 |
+ cpuSetCpus string |
|
| 133 |
+ cpuSetMems string |
|
| 134 |
+ cpuShares int64 |
|
| 135 |
+ cpuPeriod int64 |
|
| 136 |
+ cpuQuota int64 |
|
| 137 |
+ cgroupParent string |
|
| 138 |
+ memory int64 |
|
| 139 |
+ memorySwap int64 |
|
| 140 |
+ ulimits []*ulimit.Ulimit |
|
| 141 |
+ |
|
| 142 |
+ cancelled <-chan struct{} // When closed, job was cancelled.
|
|
| 143 |
+ |
|
| 144 |
+ activeImages []string |
|
| 145 |
+ id string // Used to hold reference images |
|
| 146 |
+} |
|
| 147 |
+ |
|
| 148 |
+// Run the builder with the context. This is the lynchpin of this package. This |
|
| 149 |
+// will (barring errors): |
|
| 150 |
+// |
|
| 151 |
+// * call readContext() which will set up the temporary directory and unpack |
|
| 152 |
+// the context into it. |
|
| 153 |
+// * read the dockerfile |
|
| 154 |
+// * parse the dockerfile |
|
| 155 |
+// * walk the parse tree and execute it by dispatching to handlers. If Remove |
|
| 156 |
+// or ForceRemove is set, additional cleanup around containers happens after |
|
| 157 |
+// processing. |
|
| 158 |
+// * Print a happy message and return the image ID. |
|
| 159 |
+// |
|
| 160 |
+func (b *builder) Run(context io.Reader) (string, error) {
|
|
| 161 |
+ if err := b.readContext(context); err != nil {
|
|
| 162 |
+ return "", err |
|
| 163 |
+ } |
|
| 164 |
+ |
|
| 165 |
+ defer func() {
|
|
| 166 |
+ if err := os.RemoveAll(b.contextPath); err != nil {
|
|
| 167 |
+ logrus.Debugf("[BUILDER] failed to remove temporary context: %s", err)
|
|
| 168 |
+ } |
|
| 169 |
+ }() |
|
| 170 |
+ |
|
| 171 |
+ if err := b.readDockerfile(); err != nil {
|
|
| 172 |
+ return "", err |
|
| 173 |
+ } |
|
| 174 |
+ |
|
| 175 |
+ // some initializations that would not have been supplied by the caller. |
|
| 176 |
+ b.Config = &runconfig.Config{}
|
|
| 177 |
+ |
|
| 178 |
+ b.TmpContainers = map[string]struct{}{}
|
|
| 179 |
+ |
|
| 180 |
+ for i, n := range b.dockerfile.Children {
|
|
| 181 |
+ select {
|
|
| 182 |
+ case <-b.cancelled: |
|
| 183 |
+ logrus.Debug("Builder: build cancelled!")
|
|
| 184 |
+ fmt.Fprintf(b.OutStream, "Build cancelled") |
|
| 185 |
+ return "", fmt.Errorf("Build cancelled")
|
|
| 186 |
+ default: |
|
| 187 |
+ // Not cancelled yet, keep going... |
|
| 188 |
+ } |
|
| 189 |
+ if err := b.dispatch(i, n); err != nil {
|
|
| 190 |
+ if b.ForceRemove {
|
|
| 191 |
+ b.clearTmp() |
|
| 192 |
+ } |
|
| 193 |
+ return "", err |
|
| 194 |
+ } |
|
| 195 |
+ fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image)) |
|
| 196 |
+ if b.Remove {
|
|
| 197 |
+ b.clearTmp() |
|
| 198 |
+ } |
|
| 199 |
+ } |
|
| 200 |
+ |
|
| 201 |
+ // check if there are any leftover build-args that were passed but not |
|
| 202 |
+ // consumed during build. Return an error, if there are any. |
|
| 203 |
+ leftoverArgs := []string{}
|
|
| 204 |
+ for arg := range b.buildArgs {
|
|
| 205 |
+ if !b.isBuildArgAllowed(arg) {
|
|
| 206 |
+ leftoverArgs = append(leftoverArgs, arg) |
|
| 207 |
+ } |
|
| 208 |
+ } |
|
| 209 |
+ if len(leftoverArgs) > 0 {
|
|
| 210 |
+ return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs)
|
|
| 211 |
+ } |
|
| 212 |
+ |
|
| 213 |
+ if b.image == "" {
|
|
| 214 |
+ return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
|
|
| 215 |
+ } |
|
| 216 |
+ |
|
| 217 |
+ fmt.Fprintf(b.OutStream, "Successfully built %s\n", stringid.TruncateID(b.image)) |
|
| 218 |
+ return b.image, nil |
|
| 219 |
+} |
|
| 220 |
+ |
|
| 221 |
+// Reads a Dockerfile from the current context. It assumes that the |
|
| 222 |
+// 'filename' is a relative path from the root of the context |
|
| 223 |
+func (b *builder) readDockerfile() error {
|
|
| 224 |
+ // If no -f was specified then look for 'Dockerfile'. If we can't find |
|
| 225 |
+ // that then look for 'dockerfile'. If neither are found then default |
|
| 226 |
+ // back to 'Dockerfile' and use that in the error message. |
|
| 227 |
+ if b.dockerfileName == "" {
|
|
| 228 |
+ b.dockerfileName = api.DefaultDockerfileName |
|
| 229 |
+ tmpFN := filepath.Join(b.contextPath, api.DefaultDockerfileName) |
|
| 230 |
+ if _, err := os.Lstat(tmpFN); err != nil {
|
|
| 231 |
+ tmpFN = filepath.Join(b.contextPath, strings.ToLower(api.DefaultDockerfileName)) |
|
| 232 |
+ if _, err := os.Lstat(tmpFN); err == nil {
|
|
| 233 |
+ b.dockerfileName = strings.ToLower(api.DefaultDockerfileName) |
|
| 234 |
+ } |
|
| 235 |
+ } |
|
| 236 |
+ } |
|
| 237 |
+ |
|
| 238 |
+ origFile := b.dockerfileName |
|
| 239 |
+ |
|
| 240 |
+ filename, err := symlink.FollowSymlinkInScope(filepath.Join(b.contextPath, origFile), b.contextPath) |
|
| 241 |
+ if err != nil {
|
|
| 242 |
+ return fmt.Errorf("The Dockerfile (%s) must be within the build context", origFile)
|
|
| 243 |
+ } |
|
| 244 |
+ |
|
| 245 |
+ fi, err := os.Lstat(filename) |
|
| 246 |
+ if os.IsNotExist(err) {
|
|
| 247 |
+ return fmt.Errorf("Cannot locate specified Dockerfile: %s", origFile)
|
|
| 248 |
+ } |
|
| 249 |
+ if fi.Size() == 0 {
|
|
| 250 |
+ return fmt.Errorf("The Dockerfile (%s) cannot be empty", origFile)
|
|
| 251 |
+ } |
|
| 252 |
+ |
|
| 253 |
+ f, err := os.Open(filename) |
|
| 254 |
+ if err != nil {
|
|
| 255 |
+ return err |
|
| 256 |
+ } |
|
| 257 |
+ |
|
| 258 |
+ b.dockerfile, err = parser.Parse(f) |
|
| 259 |
+ f.Close() |
|
| 260 |
+ |
|
| 261 |
+ if err != nil {
|
|
| 262 |
+ return err |
|
| 263 |
+ } |
|
| 264 |
+ |
|
| 265 |
+ // After the Dockerfile has been parsed, we need to check the .dockerignore |
|
| 266 |
+ // file for either "Dockerfile" or ".dockerignore", and if either are |
|
| 267 |
+ // present then erase them from the build context. These files should never |
|
| 268 |
+ // have been sent from the client but we did send them to make sure that |
|
| 269 |
+ // we had the Dockerfile to actually parse, and then we also need the |
|
| 270 |
+ // .dockerignore file to know whether either file should be removed. |
|
| 271 |
+ // Note that this assumes the Dockerfile has been read into memory and |
|
| 272 |
+ // is now safe to be removed. |
|
| 273 |
+ |
|
| 274 |
+ excludes, _ := utils.ReadDockerIgnore(filepath.Join(b.contextPath, ".dockerignore")) |
|
| 275 |
+ if rm, _ := fileutils.Matches(".dockerignore", excludes); rm == true {
|
|
| 276 |
+ os.Remove(filepath.Join(b.contextPath, ".dockerignore")) |
|
| 277 |
+ b.context.(tarsum.BuilderContext).Remove(".dockerignore")
|
|
| 278 |
+ } |
|
| 279 |
+ if rm, _ := fileutils.Matches(b.dockerfileName, excludes); rm == true {
|
|
| 280 |
+ os.Remove(filepath.Join(b.contextPath, b.dockerfileName)) |
|
| 281 |
+ b.context.(tarsum.BuilderContext).Remove(b.dockerfileName) |
|
| 282 |
+ } |
|
| 283 |
+ |
|
| 284 |
+ return nil |
|
| 285 |
+} |
|
| 286 |
+ |
|
| 287 |
+// determine if build arg is part of built-in args or user |
|
| 288 |
+// defined args in Dockerfile at any point in time. |
|
| 289 |
+func (b *builder) isBuildArgAllowed(arg string) bool {
|
|
| 290 |
+ if _, ok := BuiltinAllowedBuildArgs[arg]; ok {
|
|
| 291 |
+ return true |
|
| 292 |
+ } |
|
| 293 |
+ if _, ok := b.allowedBuildArgs[arg]; ok {
|
|
| 294 |
+ return true |
|
| 295 |
+ } |
|
| 296 |
+ return false |
|
| 297 |
+} |
|
| 298 |
+ |
|
| 299 |
+// This method is the entrypoint to all statement handling routines. |
|
| 300 |
+// |
|
| 301 |
+// Almost all nodes will have this structure: |
|
| 302 |
+// Child[Node, Node, Node] where Child is from parser.Node.Children and each |
|
| 303 |
+// node comes from parser.Node.Next. This forms a "line" with a statement and |
|
| 304 |
+// arguments and we process them in this normalized form by hitting |
|
| 305 |
+// evaluateTable with the leaf nodes of the command and the Builder object. |
|
| 306 |
+// |
|
| 307 |
+// ONBUILD is a special case; in this case the parser will emit: |
|
| 308 |
+// Child[Node, Child[Node, Node...]] where the first node is the literal |
|
| 309 |
+// "onbuild" and the child entrypoint is the command of the ONBUILD statement, |
|
| 310 |
+// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to |
|
| 311 |
+// deal with that, at least until it becomes more of a general concern with new |
|
| 312 |
+// features. |
|
| 313 |
+func (b *builder) dispatch(stepN int, ast *parser.Node) error {
|
|
| 314 |
+ cmd := ast.Value |
|
| 315 |
+ |
|
| 316 |
+ // To ensure the user is given a decent error message if the platform |
|
| 317 |
+ // on which the daemon is running does not support a builder command. |
|
| 318 |
+ if err := platformSupports(strings.ToLower(cmd)); err != nil {
|
|
| 319 |
+ return err |
|
| 320 |
+ } |
|
| 321 |
+ |
|
| 322 |
+ attrs := ast.Attributes |
|
| 323 |
+ original := ast.Original |
|
| 324 |
+ flags := ast.Flags |
|
| 325 |
+ strs := []string{}
|
|
| 326 |
+ msg := fmt.Sprintf("Step %d : %s", stepN+1, strings.ToUpper(cmd))
|
|
| 327 |
+ |
|
| 328 |
+ if len(ast.Flags) > 0 {
|
|
| 329 |
+ msg += " " + strings.Join(ast.Flags, " ") |
|
| 330 |
+ } |
|
| 331 |
+ |
|
| 332 |
+ if cmd == "onbuild" {
|
|
| 333 |
+ if ast.Next == nil {
|
|
| 334 |
+ return fmt.Errorf("ONBUILD requires at least one argument")
|
|
| 335 |
+ } |
|
| 336 |
+ ast = ast.Next.Children[0] |
|
| 337 |
+ strs = append(strs, ast.Value) |
|
| 338 |
+ msg += " " + ast.Value |
|
| 339 |
+ |
|
| 340 |
+ if len(ast.Flags) > 0 {
|
|
| 341 |
+ msg += " " + strings.Join(ast.Flags, " ") |
|
| 342 |
+ } |
|
| 343 |
+ |
|
| 344 |
+ } |
|
| 345 |
+ |
|
| 346 |
+ // count the number of nodes that we are going to traverse first |
|
| 347 |
+ // so we can pre-create the argument and message array. This speeds up the |
|
| 348 |
+ // allocation of those list a lot when they have a lot of arguments |
|
| 349 |
+ cursor := ast |
|
| 350 |
+ var n int |
|
| 351 |
+ for cursor.Next != nil {
|
|
| 352 |
+ cursor = cursor.Next |
|
| 353 |
+ n++ |
|
| 354 |
+ } |
|
| 355 |
+ l := len(strs) |
|
| 356 |
+ strList := make([]string, n+l) |
|
| 357 |
+ copy(strList, strs) |
|
| 358 |
+ msgList := make([]string, n) |
|
| 359 |
+ |
|
| 360 |
+ var i int |
|
| 361 |
+ // Append the build-time args to config-environment. |
|
| 362 |
+ // This allows builder config to override the variables, making the behavior similar to |
|
| 363 |
+ // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build |
|
| 364 |
+ // context. But `ENV foo $foo` will use the value from build context if one |
|
| 365 |
+ // isn't already been defined by a previous ENV primitive. |
|
| 366 |
+ // Note, we get this behavior because we know that ProcessWord() will |
|
| 367 |
+ // stop on the first occurrence of a variable name and not notice |
|
| 368 |
+ // a subsequent one. So, putting the buildArgs list after the Config.Env |
|
| 369 |
+ // list, in 'envs', is safe. |
|
| 370 |
+ envs := b.Config.Env |
|
| 371 |
+ for key, val := range b.buildArgs {
|
|
| 372 |
+ if !b.isBuildArgAllowed(key) {
|
|
| 373 |
+ // skip build-args that are not in allowed list, meaning they have |
|
| 374 |
+ // not been defined by an "ARG" Dockerfile command yet. |
|
| 375 |
+ // This is an error condition but only if there is no "ARG" in the entire |
|
| 376 |
+ // Dockerfile, so we'll generate any necessary errors after we parsed |
|
| 377 |
+ // the entire file (see 'leftoverArgs' processing in evaluator.go ) |
|
| 378 |
+ continue |
|
| 379 |
+ } |
|
| 380 |
+ envs = append(envs, fmt.Sprintf("%s=%s", key, val))
|
|
| 381 |
+ } |
|
| 382 |
+ for ast.Next != nil {
|
|
| 383 |
+ ast = ast.Next |
|
| 384 |
+ var str string |
|
| 385 |
+ str = ast.Value |
|
| 386 |
+ if _, ok := replaceEnvAllowed[cmd]; ok {
|
|
| 387 |
+ var err error |
|
| 388 |
+ str, err = ProcessWord(ast.Value, envs) |
|
| 389 |
+ if err != nil {
|
|
| 390 |
+ return err |
|
| 391 |
+ } |
|
| 392 |
+ } |
|
| 393 |
+ strList[i+l] = str |
|
| 394 |
+ msgList[i] = ast.Value |
|
| 395 |
+ i++ |
|
| 396 |
+ } |
|
| 397 |
+ |
|
| 398 |
+ msg += " " + strings.Join(msgList, " ") |
|
| 399 |
+ fmt.Fprintln(b.OutStream, msg) |
|
| 400 |
+ |
|
| 401 |
+ // XXX yes, we skip any cmds that are not valid; the parser should have |
|
| 402 |
+ // picked these out already. |
|
| 403 |
+ if f, ok := evaluateTable[cmd]; ok {
|
|
| 404 |
+ b.BuilderFlags = NewBFlags() |
|
| 405 |
+ b.BuilderFlags.Args = flags |
|
| 406 |
+ return f(b, strList, attrs, original) |
|
| 407 |
+ } |
|
| 408 |
+ |
|
| 409 |
+ return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(cmd))
|
|
| 410 |
+} |
|
| 411 |
+ |
|
| 412 |
+// platformSupports is a short-term function to give users a quality error |
|
| 413 |
+// message if a Dockerfile uses a command not supported on the platform. |
|
| 414 |
+func platformSupports(command string) error {
|
|
| 415 |
+ if runtime.GOOS != "windows" {
|
|
| 416 |
+ return nil |
|
| 417 |
+ } |
|
| 418 |
+ switch command {
|
|
| 419 |
+ case "expose", "volume", "user", "stopsignal", "arg": |
|
| 420 |
+ return fmt.Errorf("The daemon on this platform does not support the command '%s'", command)
|
|
| 421 |
+ } |
|
| 422 |
+ return nil |
|
| 423 |
+} |
| 0 | 424 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,811 @@ |
| 0 |
+package dockerfile |
|
| 1 |
+ |
|
| 2 |
+// internals for handling commands. Covers many areas and a lot of |
|
| 3 |
+// non-contiguous functionality. Please read the comments. |
|
| 4 |
+ |
|
| 5 |
+import ( |
|
| 6 |
+ "crypto/sha256" |
|
| 7 |
+ "encoding/hex" |
|
| 8 |
+ "fmt" |
|
| 9 |
+ "io" |
|
| 10 |
+ "io/ioutil" |
|
| 11 |
+ "net/http" |
|
| 12 |
+ "net/url" |
|
| 13 |
+ "os" |
|
| 14 |
+ "path/filepath" |
|
| 15 |
+ "runtime" |
|
| 16 |
+ "sort" |
|
| 17 |
+ "strings" |
|
| 18 |
+ "time" |
|
| 19 |
+ |
|
| 20 |
+ "github.com/Sirupsen/logrus" |
|
| 21 |
+ "github.com/docker/docker/builder/dockerfile/parser" |
|
| 22 |
+ "github.com/docker/docker/cliconfig" |
|
| 23 |
+ "github.com/docker/docker/daemon" |
|
| 24 |
+ "github.com/docker/docker/graph" |
|
| 25 |
+ "github.com/docker/docker/image" |
|
| 26 |
+ "github.com/docker/docker/pkg/archive" |
|
| 27 |
+ "github.com/docker/docker/pkg/chrootarchive" |
|
| 28 |
+ "github.com/docker/docker/pkg/httputils" |
|
| 29 |
+ "github.com/docker/docker/pkg/ioutils" |
|
| 30 |
+ "github.com/docker/docker/pkg/jsonmessage" |
|
| 31 |
+ "github.com/docker/docker/pkg/parsers" |
|
| 32 |
+ "github.com/docker/docker/pkg/progressreader" |
|
| 33 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 34 |
+ "github.com/docker/docker/pkg/stringutils" |
|
| 35 |
+ "github.com/docker/docker/pkg/symlink" |
|
| 36 |
+ "github.com/docker/docker/pkg/system" |
|
| 37 |
+ "github.com/docker/docker/pkg/tarsum" |
|
| 38 |
+ "github.com/docker/docker/pkg/urlutil" |
|
| 39 |
+ "github.com/docker/docker/registry" |
|
| 40 |
+ "github.com/docker/docker/runconfig" |
|
| 41 |
+) |
|
| 42 |
+ |
|
| 43 |
+func (b *builder) readContext(context io.Reader) (err error) {
|
|
| 44 |
+ tmpdirPath, err := getTempDir("", "docker-build")
|
|
| 45 |
+ if err != nil {
|
|
| 46 |
+ return |
|
| 47 |
+ } |
|
| 48 |
+ |
|
| 49 |
+ // Make sure we clean-up upon error. In the happy case the caller |
|
| 50 |
+ // is expected to manage the clean-up |
|
| 51 |
+ defer func() {
|
|
| 52 |
+ if err != nil {
|
|
| 53 |
+ if e := os.RemoveAll(tmpdirPath); e != nil {
|
|
| 54 |
+ logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e)
|
|
| 55 |
+ } |
|
| 56 |
+ } |
|
| 57 |
+ }() |
|
| 58 |
+ |
|
| 59 |
+ decompressedStream, err := archive.DecompressStream(context) |
|
| 60 |
+ if err != nil {
|
|
| 61 |
+ return |
|
| 62 |
+ } |
|
| 63 |
+ |
|
| 64 |
+ if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil {
|
|
| 65 |
+ return |
|
| 66 |
+ } |
|
| 67 |
+ |
|
| 68 |
+ if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
|
|
| 69 |
+ return |
|
| 70 |
+ } |
|
| 71 |
+ |
|
| 72 |
+ b.contextPath = tmpdirPath |
|
| 73 |
+ return |
|
| 74 |
+} |
|
| 75 |
+ |
|
| 76 |
+func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment string) error {
|
|
| 77 |
+ if b.disableCommit {
|
|
| 78 |
+ return nil |
|
| 79 |
+ } |
|
| 80 |
+ if b.image == "" && !b.noBaseImage {
|
|
| 81 |
+ return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
|
| 82 |
+ } |
|
| 83 |
+ b.Config.Image = b.image |
|
| 84 |
+ if id == "" {
|
|
| 85 |
+ cmd := b.Config.Cmd |
|
| 86 |
+ if runtime.GOOS != "windows" {
|
|
| 87 |
+ b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", "#(nop) "+comment)
|
|
| 88 |
+ } else {
|
|
| 89 |
+ b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S", "/C", "REM (nop) "+comment)
|
|
| 90 |
+ } |
|
| 91 |
+ defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
|
| 92 |
+ |
|
| 93 |
+ hit, err := b.probeCache() |
|
| 94 |
+ if err != nil {
|
|
| 95 |
+ return err |
|
| 96 |
+ } |
|
| 97 |
+ if hit {
|
|
| 98 |
+ return nil |
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 101 |
+ container, err := b.create() |
|
| 102 |
+ if err != nil {
|
|
| 103 |
+ return err |
|
| 104 |
+ } |
|
| 105 |
+ id = container.ID |
|
| 106 |
+ |
|
| 107 |
+ if err := container.Mount(); err != nil {
|
|
| 108 |
+ return err |
|
| 109 |
+ } |
|
| 110 |
+ defer container.Unmount() |
|
| 111 |
+ } |
|
| 112 |
+ container, err := b.Daemon.Get(id) |
|
| 113 |
+ if err != nil {
|
|
| 114 |
+ return err |
|
| 115 |
+ } |
|
| 116 |
+ |
|
| 117 |
+ // Note: Actually copy the struct |
|
| 118 |
+ autoConfig := *b.Config |
|
| 119 |
+ autoConfig.Cmd = autoCmd |
|
| 120 |
+ |
|
| 121 |
+ commitCfg := &daemon.ContainerCommitConfig{
|
|
| 122 |
+ Author: b.maintainer, |
|
| 123 |
+ Pause: true, |
|
| 124 |
+ Config: &autoConfig, |
|
| 125 |
+ } |
|
| 126 |
+ |
|
| 127 |
+ // Commit the container |
|
| 128 |
+ image, err := b.Daemon.Commit(container, commitCfg) |
|
| 129 |
+ if err != nil {
|
|
| 130 |
+ return err |
|
| 131 |
+ } |
|
| 132 |
+ b.Daemon.Graph().Retain(b.id, image.ID) |
|
| 133 |
+ b.activeImages = append(b.activeImages, image.ID) |
|
| 134 |
+ b.image = image.ID |
|
| 135 |
+ return nil |
|
| 136 |
+} |
|
| 137 |
+ |
|
| 138 |
+type copyInfo struct {
|
|
| 139 |
+ origPath string |
|
| 140 |
+ destPath string |
|
| 141 |
+ hash string |
|
| 142 |
+ decompress bool |
|
| 143 |
+ tmpDir string |
|
| 144 |
+} |
|
| 145 |
+ |
|
| 146 |
+func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
|
|
| 147 |
+ if b.context == nil {
|
|
| 148 |
+ return fmt.Errorf("No context given. Impossible to use %s", cmdName)
|
|
| 149 |
+ } |
|
| 150 |
+ |
|
| 151 |
+ if len(args) < 2 {
|
|
| 152 |
+ return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
|
|
| 153 |
+ } |
|
| 154 |
+ |
|
| 155 |
+ // Work in daemon-specific filepath semantics |
|
| 156 |
+ dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest |
|
| 157 |
+ |
|
| 158 |
+ copyInfos := []*copyInfo{}
|
|
| 159 |
+ |
|
| 160 |
+ b.Config.Image = b.image |
|
| 161 |
+ |
|
| 162 |
+ defer func() {
|
|
| 163 |
+ for _, ci := range copyInfos {
|
|
| 164 |
+ if ci.tmpDir != "" {
|
|
| 165 |
+ os.RemoveAll(ci.tmpDir) |
|
| 166 |
+ } |
|
| 167 |
+ } |
|
| 168 |
+ }() |
|
| 169 |
+ |
|
| 170 |
+ // Loop through each src file and calculate the info we need to |
|
| 171 |
+ // do the copy (e.g. hash value if cached). Don't actually do |
|
| 172 |
+ // the copy until we've looked at all src files |
|
| 173 |
+ for _, orig := range args[0 : len(args)-1] {
|
|
| 174 |
+ if err := calcCopyInfo( |
|
| 175 |
+ b, |
|
| 176 |
+ cmdName, |
|
| 177 |
+ ©Infos, |
|
| 178 |
+ orig, |
|
| 179 |
+ dest, |
|
| 180 |
+ allowRemote, |
|
| 181 |
+ allowDecompression, |
|
| 182 |
+ true, |
|
| 183 |
+ ); err != nil {
|
|
| 184 |
+ return err |
|
| 185 |
+ } |
|
| 186 |
+ } |
|
| 187 |
+ |
|
| 188 |
+ if len(copyInfos) == 0 {
|
|
| 189 |
+ return fmt.Errorf("No source files were specified")
|
|
| 190 |
+ } |
|
| 191 |
+ if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
|
|
| 192 |
+ return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
|
| 193 |
+ } |
|
| 194 |
+ |
|
| 195 |
+ // For backwards compat, if there's just one CI then use it as the |
|
| 196 |
+ // cache look-up string, otherwise hash 'em all into one |
|
| 197 |
+ var srcHash string |
|
| 198 |
+ var origPaths string |
|
| 199 |
+ |
|
| 200 |
+ if len(copyInfos) == 1 {
|
|
| 201 |
+ srcHash = copyInfos[0].hash |
|
| 202 |
+ origPaths = copyInfos[0].origPath |
|
| 203 |
+ } else {
|
|
| 204 |
+ var hashs []string |
|
| 205 |
+ var origs []string |
|
| 206 |
+ for _, ci := range copyInfos {
|
|
| 207 |
+ hashs = append(hashs, ci.hash) |
|
| 208 |
+ origs = append(origs, ci.origPath) |
|
| 209 |
+ } |
|
| 210 |
+ hasher := sha256.New() |
|
| 211 |
+ hasher.Write([]byte(strings.Join(hashs, ","))) |
|
| 212 |
+ srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) |
|
| 213 |
+ origPaths = strings.Join(origs, " ") |
|
| 214 |
+ } |
|
| 215 |
+ |
|
| 216 |
+ cmd := b.Config.Cmd |
|
| 217 |
+ if runtime.GOOS != "windows" {
|
|
| 218 |
+ b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
|
|
| 219 |
+ } else {
|
|
| 220 |
+ b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
|
|
| 221 |
+ } |
|
| 222 |
+ defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
|
| 223 |
+ |
|
| 224 |
+ hit, err := b.probeCache() |
|
| 225 |
+ if err != nil {
|
|
| 226 |
+ return err |
|
| 227 |
+ } |
|
| 228 |
+ |
|
| 229 |
+ if hit {
|
|
| 230 |
+ return nil |
|
| 231 |
+ } |
|
| 232 |
+ |
|
| 233 |
+ ccr, err := b.Daemon.ContainerCreate("", b.Config, nil, true)
|
|
| 234 |
+ if err != nil {
|
|
| 235 |
+ return err |
|
| 236 |
+ } |
|
| 237 |
+ container, err := b.Daemon.Get(ccr.ID) |
|
| 238 |
+ if err != nil {
|
|
| 239 |
+ return err |
|
| 240 |
+ } |
|
| 241 |
+ |
|
| 242 |
+ b.TmpContainers[container.ID] = struct{}{}
|
|
| 243 |
+ |
|
| 244 |
+ if err := container.Mount(); err != nil {
|
|
| 245 |
+ return err |
|
| 246 |
+ } |
|
| 247 |
+ defer container.Unmount() |
|
| 248 |
+ |
|
| 249 |
+ for _, ci := range copyInfos {
|
|
| 250 |
+ if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
|
|
| 251 |
+ return err |
|
| 252 |
+ } |
|
| 253 |
+ } |
|
| 254 |
+ |
|
| 255 |
+ if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
|
|
| 256 |
+ return err |
|
| 257 |
+ } |
|
| 258 |
+ return nil |
|
| 259 |
+} |
|
| 260 |
+ |
|
| 261 |
+func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
|
|
| 262 |
+ |
|
| 263 |
+ // Work in daemon-specific OS filepath semantics. However, we save |
|
| 264 |
+ // the the origPath passed in here, as it might also be a URL which |
|
| 265 |
+ // we need to check for in this function. |
|
| 266 |
+ passedInOrigPath := origPath |
|
| 267 |
+ origPath = filepath.FromSlash(origPath) |
|
| 268 |
+ destPath = filepath.FromSlash(destPath) |
|
| 269 |
+ |
|
| 270 |
+ if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
|
|
| 271 |
+ origPath = origPath[1:] |
|
| 272 |
+ } |
|
| 273 |
+ origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) |
|
| 274 |
+ |
|
| 275 |
+ // Twiddle the destPath when its a relative path - meaning, make it |
|
| 276 |
+ // relative to the WORKINGDIR |
|
| 277 |
+ if !system.IsAbs(destPath) {
|
|
| 278 |
+ hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator)) |
|
| 279 |
+ destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath) |
|
| 280 |
+ |
|
| 281 |
+ // Make sure we preserve any trailing slash |
|
| 282 |
+ if hasSlash {
|
|
| 283 |
+ destPath += string(os.PathSeparator) |
|
| 284 |
+ } |
|
| 285 |
+ } |
|
| 286 |
+ |
|
| 287 |
+ // In the remote/URL case, download it and gen its hashcode |
|
| 288 |
+ if urlutil.IsURL(passedInOrigPath) {
|
|
| 289 |
+ |
|
| 290 |
+ // As it's a URL, we go back to processing on what was passed in |
|
| 291 |
+ // to this function |
|
| 292 |
+ origPath = passedInOrigPath |
|
| 293 |
+ |
|
| 294 |
+ if !allowRemote {
|
|
| 295 |
+ return fmt.Errorf("Source can't be a URL for %s", cmdName)
|
|
| 296 |
+ } |
|
| 297 |
+ |
|
| 298 |
+ ci := copyInfo{}
|
|
| 299 |
+ ci.origPath = origPath |
|
| 300 |
+ ci.hash = origPath // default to this but can change |
|
| 301 |
+ ci.destPath = destPath |
|
| 302 |
+ ci.decompress = false |
|
| 303 |
+ *cInfos = append(*cInfos, &ci) |
|
| 304 |
+ |
|
| 305 |
+ // Initiate the download |
|
| 306 |
+ resp, err := httputils.Download(ci.origPath) |
|
| 307 |
+ if err != nil {
|
|
| 308 |
+ return err |
|
| 309 |
+ } |
|
| 310 |
+ |
|
| 311 |
+ // Create a tmp dir |
|
| 312 |
+ tmpDirName, err := getTempDir(b.contextPath, "docker-remote") |
|
| 313 |
+ if err != nil {
|
|
| 314 |
+ return err |
|
| 315 |
+ } |
|
| 316 |
+ ci.tmpDir = tmpDirName |
|
| 317 |
+ |
|
| 318 |
+ // Create a tmp file within our tmp dir |
|
| 319 |
+ tmpFileName := filepath.Join(tmpDirName, "tmp") |
|
| 320 |
+ tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) |
|
| 321 |
+ if err != nil {
|
|
| 322 |
+ return err |
|
| 323 |
+ } |
|
| 324 |
+ |
|
| 325 |
+ // Download and dump result to tmp file |
|
| 326 |
+ if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
|
|
| 327 |
+ In: resp.Body, |
|
| 328 |
+ Out: b.OutOld, |
|
| 329 |
+ Formatter: b.StreamFormatter, |
|
| 330 |
+ Size: resp.ContentLength, |
|
| 331 |
+ NewLines: true, |
|
| 332 |
+ ID: "", |
|
| 333 |
+ Action: "Downloading", |
|
| 334 |
+ })); err != nil {
|
|
| 335 |
+ tmpFile.Close() |
|
| 336 |
+ return err |
|
| 337 |
+ } |
|
| 338 |
+ fmt.Fprintf(b.OutStream, "\n") |
|
| 339 |
+ tmpFile.Close() |
|
| 340 |
+ |
|
| 341 |
+ // Set the mtime to the Last-Modified header value if present |
|
| 342 |
+ // Otherwise just remove atime and mtime |
|
| 343 |
+ mTime := time.Time{}
|
|
| 344 |
+ |
|
| 345 |
+ lastMod := resp.Header.Get("Last-Modified")
|
|
| 346 |
+ if lastMod != "" {
|
|
| 347 |
+ // If we can't parse it then just let it default to 'zero' |
|
| 348 |
+ // otherwise use the parsed time value |
|
| 349 |
+ if parsedMTime, err := http.ParseTime(lastMod); err == nil {
|
|
| 350 |
+ mTime = parsedMTime |
|
| 351 |
+ } |
|
| 352 |
+ } |
|
| 353 |
+ |
|
| 354 |
+ if err := system.Chtimes(tmpFileName, time.Time{}, mTime); err != nil {
|
|
| 355 |
+ return err |
|
| 356 |
+ } |
|
| 357 |
+ |
|
| 358 |
+ ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) |
|
| 359 |
+ |
|
| 360 |
+ // If the destination is a directory, figure out the filename. |
|
| 361 |
+ if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) {
|
|
| 362 |
+ u, err := url.Parse(origPath) |
|
| 363 |
+ if err != nil {
|
|
| 364 |
+ return err |
|
| 365 |
+ } |
|
| 366 |
+ path := filepath.FromSlash(u.Path) // Ensure in platform semantics |
|
| 367 |
+ if strings.HasSuffix(path, string(os.PathSeparator)) {
|
|
| 368 |
+ path = path[:len(path)-1] |
|
| 369 |
+ } |
|
| 370 |
+ parts := strings.Split(path, string(os.PathSeparator)) |
|
| 371 |
+ filename := parts[len(parts)-1] |
|
| 372 |
+ if filename == "" {
|
|
| 373 |
+ return fmt.Errorf("cannot determine filename from url: %s", u)
|
|
| 374 |
+ } |
|
| 375 |
+ ci.destPath = ci.destPath + filename |
|
| 376 |
+ } |
|
| 377 |
+ |
|
| 378 |
+ // Calc the checksum, even if we're using the cache |
|
| 379 |
+ r, err := archive.Tar(tmpFileName, archive.Uncompressed) |
|
| 380 |
+ if err != nil {
|
|
| 381 |
+ return err |
|
| 382 |
+ } |
|
| 383 |
+ tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) |
|
| 384 |
+ if err != nil {
|
|
| 385 |
+ return err |
|
| 386 |
+ } |
|
| 387 |
+ if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
|
|
| 388 |
+ return err |
|
| 389 |
+ } |
|
| 390 |
+ ci.hash = tarSum.Sum(nil) |
|
| 391 |
+ r.Close() |
|
| 392 |
+ |
|
| 393 |
+ return nil |
|
| 394 |
+ } |
|
| 395 |
+ |
|
| 396 |
+ // Deal with wildcards |
|
| 397 |
+ if allowWildcards && containsWildcards(origPath) {
|
|
| 398 |
+ for _, fileInfo := range b.context.GetSums() {
|
|
| 399 |
+ if fileInfo.Name() == "" {
|
|
| 400 |
+ continue |
|
| 401 |
+ } |
|
| 402 |
+ match, _ := filepath.Match(origPath, fileInfo.Name()) |
|
| 403 |
+ if !match {
|
|
| 404 |
+ continue |
|
| 405 |
+ } |
|
| 406 |
+ |
|
| 407 |
+ // Note we set allowWildcards to false in case the name has |
|
| 408 |
+ // a * in it |
|
| 409 |
+ calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false) |
|
| 410 |
+ } |
|
| 411 |
+ return nil |
|
| 412 |
+ } |
|
| 413 |
+ |
|
| 414 |
+ // Must be a dir or a file |
|
| 415 |
+ |
|
| 416 |
+ if err := b.checkPathForAddition(origPath); err != nil {
|
|
| 417 |
+ return err |
|
| 418 |
+ } |
|
| 419 |
+ fi, _ := os.Stat(filepath.Join(b.contextPath, origPath)) |
|
| 420 |
+ |
|
| 421 |
+ ci := copyInfo{}
|
|
| 422 |
+ ci.origPath = origPath |
|
| 423 |
+ ci.hash = origPath |
|
| 424 |
+ ci.destPath = destPath |
|
| 425 |
+ ci.decompress = allowDecompression |
|
| 426 |
+ *cInfos = append(*cInfos, &ci) |
|
| 427 |
+ |
|
| 428 |
+ // Deal with the single file case |
|
| 429 |
+ if !fi.IsDir() {
|
|
| 430 |
+ // This will match first file in sums of the archive |
|
| 431 |
+ fis := b.context.GetSums().GetFile(ci.origPath) |
|
| 432 |
+ if fis != nil {
|
|
| 433 |
+ ci.hash = "file:" + fis.Sum() |
|
| 434 |
+ } |
|
| 435 |
+ return nil |
|
| 436 |
+ } |
|
| 437 |
+ |
|
| 438 |
+ // Must be a dir |
|
| 439 |
+ var subfiles []string |
|
| 440 |
+ absOrigPath := filepath.Join(b.contextPath, ci.origPath) |
|
| 441 |
+ |
|
| 442 |
+ // Add a trailing / to make sure we only pick up nested files under |
|
| 443 |
+ // the dir and not sibling files of the dir that just happen to |
|
| 444 |
+ // start with the same chars |
|
| 445 |
+ if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) {
|
|
| 446 |
+ absOrigPath += string(os.PathSeparator) |
|
| 447 |
+ } |
|
| 448 |
+ |
|
| 449 |
+ // Need path w/o slash too to find matching dir w/o trailing slash |
|
| 450 |
+ absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] |
|
| 451 |
+ |
|
| 452 |
+ for _, fileInfo := range b.context.GetSums() {
|
|
| 453 |
+ absFile := filepath.Join(b.contextPath, fileInfo.Name()) |
|
| 454 |
+ // Any file in the context that starts with the given path will be |
|
| 455 |
+ // picked up and its hashcode used. However, we'll exclude the |
|
| 456 |
+ // root dir itself. We do this for a coupel of reasons: |
|
| 457 |
+ // 1 - ADD/COPY will not copy the dir itself, just its children |
|
| 458 |
+ // so there's no reason to include it in the hash calc |
|
| 459 |
+ // 2 - the metadata on the dir will change when any child file |
|
| 460 |
+ // changes. This will lead to a miss in the cache check if that |
|
| 461 |
+ // child file is in the .dockerignore list. |
|
| 462 |
+ if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
|
|
| 463 |
+ subfiles = append(subfiles, fileInfo.Sum()) |
|
| 464 |
+ } |
|
| 465 |
+ } |
|
| 466 |
+ sort.Strings(subfiles) |
|
| 467 |
+ hasher := sha256.New() |
|
| 468 |
+ hasher.Write([]byte(strings.Join(subfiles, ","))) |
|
| 469 |
+ ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) |
|
| 470 |
+ |
|
| 471 |
+ return nil |
|
| 472 |
+} |
|
| 473 |
+ |
|
| 474 |
+func containsWildcards(name string) bool {
|
|
| 475 |
+ for i := 0; i < len(name); i++ {
|
|
| 476 |
+ ch := name[i] |
|
| 477 |
+ if ch == '\\' {
|
|
| 478 |
+ i++ |
|
| 479 |
+ } else if ch == '*' || ch == '?' || ch == '[' {
|
|
| 480 |
+ return true |
|
| 481 |
+ } |
|
| 482 |
+ } |
|
| 483 |
+ return false |
|
| 484 |
+} |
|
| 485 |
+ |
|
| 486 |
+func (b *builder) pullImage(name string) (*image.Image, error) {
|
|
| 487 |
+ remote, tag := parsers.ParseRepositoryTag(name) |
|
| 488 |
+ if tag == "" {
|
|
| 489 |
+ tag = "latest" |
|
| 490 |
+ } |
|
| 491 |
+ |
|
| 492 |
+ pullRegistryAuth := &cliconfig.AuthConfig{}
|
|
| 493 |
+ if len(b.AuthConfigs) > 0 {
|
|
| 494 |
+ // The request came with a full auth config file, we prefer to use that |
|
| 495 |
+ repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote) |
|
| 496 |
+ if err != nil {
|
|
| 497 |
+ return nil, err |
|
| 498 |
+ } |
|
| 499 |
+ |
|
| 500 |
+ resolvedConfig := registry.ResolveAuthConfig( |
|
| 501 |
+ &cliconfig.ConfigFile{AuthConfigs: b.AuthConfigs},
|
|
| 502 |
+ repoInfo.Index, |
|
| 503 |
+ ) |
|
| 504 |
+ pullRegistryAuth = &resolvedConfig |
|
| 505 |
+ } |
|
| 506 |
+ |
|
| 507 |
+ imagePullConfig := &graph.ImagePullConfig{
|
|
| 508 |
+ AuthConfig: pullRegistryAuth, |
|
| 509 |
+ OutStream: ioutils.NopWriteCloser(b.OutOld), |
|
| 510 |
+ } |
|
| 511 |
+ |
|
| 512 |
+ if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
|
|
| 513 |
+ return nil, err |
|
| 514 |
+ } |
|
| 515 |
+ |
|
| 516 |
+ image, err := b.Daemon.Repositories().LookupImage(name) |
|
| 517 |
+ if err != nil {
|
|
| 518 |
+ return nil, err |
|
| 519 |
+ } |
|
| 520 |
+ |
|
| 521 |
+ return image, nil |
|
| 522 |
+} |
|
| 523 |
+ |
|
| 524 |
+func (b *builder) processImageFrom(img *image.Image) error {
|
|
| 525 |
+ b.image = img.ID |
|
| 526 |
+ |
|
| 527 |
+ if img.Config != nil {
|
|
| 528 |
+ b.Config = img.Config |
|
| 529 |
+ } |
|
| 530 |
+ |
|
| 531 |
+ // The default path will be blank on Windows (set by HCS) |
|
| 532 |
+ if len(b.Config.Env) == 0 && daemon.DefaultPathEnv != "" {
|
|
| 533 |
+ b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) |
|
| 534 |
+ } |
|
| 535 |
+ |
|
| 536 |
+ // Process ONBUILD triggers if they exist |
|
| 537 |
+ if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
|
|
| 538 |
+ word := "trigger" |
|
| 539 |
+ if nTriggers > 1 {
|
|
| 540 |
+ word = "triggers" |
|
| 541 |
+ } |
|
| 542 |
+ fmt.Fprintf(b.ErrStream, "# Executing %d build %s...\n", nTriggers, word) |
|
| 543 |
+ } |
|
| 544 |
+ |
|
| 545 |
+ // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. |
|
| 546 |
+ onBuildTriggers := b.Config.OnBuild |
|
| 547 |
+ b.Config.OnBuild = []string{}
|
|
| 548 |
+ |
|
| 549 |
+ // parse the ONBUILD triggers by invoking the parser |
|
| 550 |
+ for _, step := range onBuildTriggers {
|
|
| 551 |
+ ast, err := parser.Parse(strings.NewReader(step)) |
|
| 552 |
+ if err != nil {
|
|
| 553 |
+ return err |
|
| 554 |
+ } |
|
| 555 |
+ |
|
| 556 |
+ for i, n := range ast.Children {
|
|
| 557 |
+ switch strings.ToUpper(n.Value) {
|
|
| 558 |
+ case "ONBUILD": |
|
| 559 |
+ return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
|
| 560 |
+ case "MAINTAINER", "FROM": |
|
| 561 |
+ return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
|
|
| 562 |
+ } |
|
| 563 |
+ |
|
| 564 |
+ if err := b.dispatch(i, n); err != nil {
|
|
| 565 |
+ return err |
|
| 566 |
+ } |
|
| 567 |
+ } |
|
| 568 |
+ } |
|
| 569 |
+ |
|
| 570 |
+ return nil |
|
| 571 |
+} |
|
| 572 |
+ |
|
| 573 |
+// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`) |
|
| 574 |
+// and if so attempts to look up the current `b.image` and `b.Config` pair |
|
| 575 |
+// in the current server `b.Daemon`. If an image is found, probeCache returns |
|
| 576 |
+// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there |
|
| 577 |
+// is any error, it returns `(false, err)`. |
|
| 578 |
+func (b *builder) probeCache() (bool, error) {
|
|
| 579 |
+ if !b.UtilizeCache || b.cacheBusted {
|
|
| 580 |
+ return false, nil |
|
| 581 |
+ } |
|
| 582 |
+ |
|
| 583 |
+ cache, err := b.Daemon.ImageGetCached(b.image, b.Config) |
|
| 584 |
+ if err != nil {
|
|
| 585 |
+ return false, err |
|
| 586 |
+ } |
|
| 587 |
+ if cache == nil {
|
|
| 588 |
+ logrus.Debugf("[BUILDER] Cache miss")
|
|
| 589 |
+ b.cacheBusted = true |
|
| 590 |
+ return false, nil |
|
| 591 |
+ } |
|
| 592 |
+ |
|
| 593 |
+ fmt.Fprintf(b.OutStream, " ---> Using cache\n") |
|
| 594 |
+ logrus.Debugf("[BUILDER] Use cached version")
|
|
| 595 |
+ b.image = cache.ID |
|
| 596 |
+ b.Daemon.Graph().Retain(b.id, cache.ID) |
|
| 597 |
+ b.activeImages = append(b.activeImages, cache.ID) |
|
| 598 |
+ return true, nil |
|
| 599 |
+} |
|
| 600 |
+ |
|
| 601 |
+func (b *builder) create() (*daemon.Container, error) {
|
|
| 602 |
+ if b.image == "" && !b.noBaseImage {
|
|
| 603 |
+ return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
| 604 |
+ } |
|
| 605 |
+ b.Config.Image = b.image |
|
| 606 |
+ |
|
| 607 |
+ hostConfig := &runconfig.HostConfig{
|
|
| 608 |
+ CPUShares: b.cpuShares, |
|
| 609 |
+ CPUPeriod: b.cpuPeriod, |
|
| 610 |
+ CPUQuota: b.cpuQuota, |
|
| 611 |
+ CpusetCpus: b.cpuSetCpus, |
|
| 612 |
+ CpusetMems: b.cpuSetMems, |
|
| 613 |
+ CgroupParent: b.cgroupParent, |
|
| 614 |
+ Memory: b.memory, |
|
| 615 |
+ MemorySwap: b.memorySwap, |
|
| 616 |
+ Ulimits: b.ulimits, |
|
| 617 |
+ } |
|
| 618 |
+ |
|
| 619 |
+ config := *b.Config |
|
| 620 |
+ |
|
| 621 |
+ // Create the container |
|
| 622 |
+ ccr, err := b.Daemon.ContainerCreate("", b.Config, hostConfig, true)
|
|
| 623 |
+ if err != nil {
|
|
| 624 |
+ return nil, err |
|
| 625 |
+ } |
|
| 626 |
+ for _, warning := range ccr.Warnings {
|
|
| 627 |
+ fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning) |
|
| 628 |
+ } |
|
| 629 |
+ c, err := b.Daemon.Get(ccr.ID) |
|
| 630 |
+ if err != nil {
|
|
| 631 |
+ return nil, err |
|
| 632 |
+ } |
|
| 633 |
+ |
|
| 634 |
+ b.TmpContainers[c.ID] = struct{}{}
|
|
| 635 |
+ fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID)) |
|
| 636 |
+ |
|
| 637 |
+ if config.Cmd.Len() > 0 {
|
|
| 638 |
+ // override the entry point that may have been picked up from the base image |
|
| 639 |
+ s := config.Cmd.Slice() |
|
| 640 |
+ c.Path = s[0] |
|
| 641 |
+ c.Args = s[1:] |
|
| 642 |
+ } else {
|
|
| 643 |
+ config.Cmd = stringutils.NewStrSlice() |
|
| 644 |
+ } |
|
| 645 |
+ |
|
| 646 |
+ return c, nil |
|
| 647 |
+} |
|
| 648 |
+ |
|
| 649 |
+func (b *builder) run(c *daemon.Container) error {
|
|
| 650 |
+ var errCh chan error |
|
| 651 |
+ if b.Verbose {
|
|
| 652 |
+ errCh = c.Attach(nil, b.OutStream, b.ErrStream) |
|
| 653 |
+ } |
|
| 654 |
+ |
|
| 655 |
+ //start the container |
|
| 656 |
+ if err := c.Start(); err != nil {
|
|
| 657 |
+ return err |
|
| 658 |
+ } |
|
| 659 |
+ |
|
| 660 |
+ finished := make(chan struct{})
|
|
| 661 |
+ defer close(finished) |
|
| 662 |
+ go func() {
|
|
| 663 |
+ select {
|
|
| 664 |
+ case <-b.cancelled: |
|
| 665 |
+ logrus.Debugln("Build cancelled, killing container:", c.ID)
|
|
| 666 |
+ c.Kill() |
|
| 667 |
+ case <-finished: |
|
| 668 |
+ } |
|
| 669 |
+ }() |
|
| 670 |
+ |
|
| 671 |
+ if b.Verbose {
|
|
| 672 |
+ // Block on reading output from container, stop on err or chan closed |
|
| 673 |
+ if err := <-errCh; err != nil {
|
|
| 674 |
+ return err |
|
| 675 |
+ } |
|
| 676 |
+ } |
|
| 677 |
+ |
|
| 678 |
+ // Wait for it to finish |
|
| 679 |
+ if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
|
|
| 680 |
+ return &jsonmessage.JSONError{
|
|
| 681 |
+ Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
|
|
| 682 |
+ Code: ret, |
|
| 683 |
+ } |
|
| 684 |
+ } |
|
| 685 |
+ |
|
| 686 |
+ return nil |
|
| 687 |
+} |
|
| 688 |
+ |
|
| 689 |
+func (b *builder) checkPathForAddition(orig string) error {
|
|
| 690 |
+ origPath := filepath.Join(b.contextPath, orig) |
|
| 691 |
+ origPath, err := symlink.EvalSymlinks(origPath) |
|
| 692 |
+ if err != nil {
|
|
| 693 |
+ if os.IsNotExist(err) {
|
|
| 694 |
+ return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 695 |
+ } |
|
| 696 |
+ return err |
|
| 697 |
+ } |
|
| 698 |
+ contextPath, err := symlink.EvalSymlinks(b.contextPath) |
|
| 699 |
+ if err != nil {
|
|
| 700 |
+ return err |
|
| 701 |
+ } |
|
| 702 |
+ if !strings.HasPrefix(origPath, contextPath) {
|
|
| 703 |
+ return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
|
| 704 |
+ } |
|
| 705 |
+ if _, err := os.Stat(origPath); err != nil {
|
|
| 706 |
+ if os.IsNotExist(err) {
|
|
| 707 |
+ return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 708 |
+ } |
|
| 709 |
+ return err |
|
| 710 |
+ } |
|
| 711 |
+ return nil |
|
| 712 |
+} |
|
| 713 |
+ |
|
| 714 |
+func (b *builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
|
|
| 715 |
+ var ( |
|
| 716 |
+ err error |
|
| 717 |
+ destExists = true |
|
| 718 |
+ origPath = filepath.Join(b.contextPath, orig) |
|
| 719 |
+ destPath string |
|
| 720 |
+ ) |
|
| 721 |
+ |
|
| 722 |
+ // Work in daemon-local OS specific file paths |
|
| 723 |
+ dest = filepath.FromSlash(dest) |
|
| 724 |
+ |
|
| 725 |
+ destPath, err = container.GetResourcePath(dest) |
|
| 726 |
+ if err != nil {
|
|
| 727 |
+ return err |
|
| 728 |
+ } |
|
| 729 |
+ |
|
| 730 |
+ // Preserve the trailing slash |
|
| 731 |
+ if strings.HasSuffix(dest, string(os.PathSeparator)) || dest == "." {
|
|
| 732 |
+ destPath = destPath + string(os.PathSeparator) |
|
| 733 |
+ } |
|
| 734 |
+ |
|
| 735 |
+ destStat, err := os.Stat(destPath) |
|
| 736 |
+ if err != nil {
|
|
| 737 |
+ if !os.IsNotExist(err) {
|
|
| 738 |
+ logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
|
|
| 739 |
+ return err |
|
| 740 |
+ } |
|
| 741 |
+ destExists = false |
|
| 742 |
+ } |
|
| 743 |
+ |
|
| 744 |
+ fi, err := os.Stat(origPath) |
|
| 745 |
+ if err != nil {
|
|
| 746 |
+ if os.IsNotExist(err) {
|
|
| 747 |
+ return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 748 |
+ } |
|
| 749 |
+ return err |
|
| 750 |
+ } |
|
| 751 |
+ |
|
| 752 |
+ if fi.IsDir() {
|
|
| 753 |
+ return copyAsDirectory(origPath, destPath, destExists) |
|
| 754 |
+ } |
|
| 755 |
+ |
|
| 756 |
+ // If we are adding a remote file (or we've been told not to decompress), do not try to untar it |
|
| 757 |
+ if decompress {
|
|
| 758 |
+ // First try to unpack the source as an archive |
|
| 759 |
+ // to support the untar feature we need to clean up the path a little bit |
|
| 760 |
+ // because tar is very forgiving. First we need to strip off the archive's |
|
| 761 |
+ // filename from the path but this is only added if it does not end in slash |
|
| 762 |
+ tarDest := destPath |
|
| 763 |
+ if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
|
|
| 764 |
+ tarDest = filepath.Dir(destPath) |
|
| 765 |
+ } |
|
| 766 |
+ |
|
| 767 |
+ // try to successfully untar the orig |
|
| 768 |
+ if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
|
|
| 769 |
+ return nil |
|
| 770 |
+ } else if err != io.EOF {
|
|
| 771 |
+ logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
|
| 772 |
+ } |
|
| 773 |
+ } |
|
| 774 |
+ |
|
| 775 |
+ if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
|
|
| 776 |
+ return err |
|
| 777 |
+ } |
|
| 778 |
+ if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
|
|
| 779 |
+ return err |
|
| 780 |
+ } |
|
| 781 |
+ |
|
| 782 |
+ resPath := destPath |
|
| 783 |
+ if destExists && destStat.IsDir() {
|
|
| 784 |
+ resPath = filepath.Join(destPath, filepath.Base(origPath)) |
|
| 785 |
+ } |
|
| 786 |
+ |
|
| 787 |
+ return fixPermissions(origPath, resPath, 0, 0, destExists) |
|
| 788 |
+} |
|
| 789 |
+ |
|
| 790 |
+func copyAsDirectory(source, destination string, destExisted bool) error {
|
|
| 791 |
+ if err := chrootarchive.CopyWithTar(source, destination); err != nil {
|
|
| 792 |
+ return err |
|
| 793 |
+ } |
|
| 794 |
+ return fixPermissions(source, destination, 0, 0, destExisted) |
|
| 795 |
+} |
|
| 796 |
+ |
|
| 797 |
+func (b *builder) clearTmp() {
|
|
| 798 |
+ for c := range b.TmpContainers {
|
|
| 799 |
+ rmConfig := &daemon.ContainerRmConfig{
|
|
| 800 |
+ ForceRemove: true, |
|
| 801 |
+ RemoveVolume: true, |
|
| 802 |
+ } |
|
| 803 |
+ if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
|
|
| 804 |
+ fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) |
|
| 805 |
+ return |
|
| 806 |
+ } |
|
| 807 |
+ delete(b.TmpContainers, c) |
|
| 808 |
+ fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c)) |
|
| 809 |
+ } |
|
| 810 |
+} |
| 0 | 811 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,45 @@ |
| 0 |
+// +build freebsd linux |
|
| 1 |
+ |
|
| 2 |
+package dockerfile |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "io/ioutil" |
|
| 6 |
+ "os" |
|
| 7 |
+ "path/filepath" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+func getTempDir(dir, prefix string) (string, error) {
|
|
| 11 |
+ return ioutil.TempDir(dir, prefix) |
|
| 12 |
+} |
|
| 13 |
+ |
|
| 14 |
+func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
|
| 15 |
+ // If the destination didn't already exist, or the destination isn't a |
|
| 16 |
+ // directory, then we should Lchown the destination. Otherwise, we shouldn't |
|
| 17 |
+ // Lchown the destination. |
|
| 18 |
+ destStat, err := os.Stat(destination) |
|
| 19 |
+ if err != nil {
|
|
| 20 |
+ // This should *never* be reached, because the destination must've already |
|
| 21 |
+ // been created while untar-ing the context. |
|
| 22 |
+ return err |
|
| 23 |
+ } |
|
| 24 |
+ doChownDestination := !destExisted || !destStat.IsDir() |
|
| 25 |
+ |
|
| 26 |
+ // We Walk on the source rather than on the destination because we don't |
|
| 27 |
+ // want to change permissions on things we haven't created or modified. |
|
| 28 |
+ return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
|
|
| 29 |
+ // Do not alter the walk root iff. it existed before, as it doesn't fall under |
|
| 30 |
+ // the domain of "things we should chown". |
|
| 31 |
+ if !doChownDestination && (source == fullpath) {
|
|
| 32 |
+ return nil |
|
| 33 |
+ } |
|
| 34 |
+ |
|
| 35 |
+ // Path is prefixed by source: substitute with destination instead. |
|
| 36 |
+ cleaned, err := filepath.Rel(source, fullpath) |
|
| 37 |
+ if err != nil {
|
|
| 38 |
+ return err |
|
| 39 |
+ } |
|
| 40 |
+ |
|
| 41 |
+ fullpath = filepath.Join(destination, cleaned) |
|
| 42 |
+ return os.Lchown(fullpath, uid, gid) |
|
| 43 |
+ }) |
|
| 44 |
+} |
| 0 | 45 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,22 @@ |
| 0 |
+// +build windows |
|
| 1 |
+ |
|
| 2 |
+package dockerfile |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "io/ioutil" |
|
| 6 |
+ |
|
| 7 |
+ "github.com/docker/docker/pkg/longpath" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+func getTempDir(dir, prefix string) (string, error) {
|
|
| 11 |
+ tempDir, err := ioutil.TempDir(dir, prefix) |
|
| 12 |
+ if err != nil {
|
|
| 13 |
+ return "", err |
|
| 14 |
+ } |
|
| 15 |
+ return longpath.AddPrefix(tempDir), nil |
|
| 16 |
+} |
|
| 17 |
+ |
|
| 18 |
+func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
|
| 19 |
+ // chown is not supported on Windows |
|
| 20 |
+ return nil |
|
| 21 |
+} |
| 0 | 22 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,376 @@ |
| 0 |
+package dockerfile |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "errors" |
|
| 5 |
+ "fmt" |
|
| 6 |
+ "io" |
|
| 7 |
+ "io/ioutil" |
|
| 8 |
+ "os" |
|
| 9 |
+ "runtime" |
|
| 10 |
+ "strings" |
|
| 11 |
+ "sync" |
|
| 12 |
+ |
|
| 13 |
+ "github.com/docker/docker/api" |
|
| 14 |
+ "github.com/docker/docker/builder/dockerfile/parser" |
|
| 15 |
+ "github.com/docker/docker/cliconfig" |
|
| 16 |
+ "github.com/docker/docker/daemon" |
|
| 17 |
+ "github.com/docker/docker/graph/tags" |
|
| 18 |
+ "github.com/docker/docker/pkg/archive" |
|
| 19 |
+ "github.com/docker/docker/pkg/httputils" |
|
| 20 |
+ "github.com/docker/docker/pkg/parsers" |
|
| 21 |
+ "github.com/docker/docker/pkg/progressreader" |
|
| 22 |
+ "github.com/docker/docker/pkg/streamformatter" |
|
| 23 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 24 |
+ "github.com/docker/docker/pkg/ulimit" |
|
| 25 |
+ "github.com/docker/docker/pkg/urlutil" |
|
| 26 |
+ "github.com/docker/docker/registry" |
|
| 27 |
+ "github.com/docker/docker/runconfig" |
|
| 28 |
+ "github.com/docker/docker/utils" |
|
| 29 |
+) |
|
| 30 |
+ |
|
| 31 |
+// When downloading remote contexts, limit the amount (in bytes) |
|
| 32 |
+// to be read from the response body in order to detect its Content-Type |
|
| 33 |
+const maxPreambleLength = 100 |
|
| 34 |
+ |
|
| 35 |
+// whitelist of commands allowed for a commit/import |
|
| 36 |
+var validCommitCommands = map[string]bool{
|
|
| 37 |
+ "cmd": true, |
|
| 38 |
+ "entrypoint": true, |
|
| 39 |
+ "env": true, |
|
| 40 |
+ "expose": true, |
|
| 41 |
+ "label": true, |
|
| 42 |
+ "onbuild": true, |
|
| 43 |
+ "user": true, |
|
| 44 |
+ "volume": true, |
|
| 45 |
+ "workdir": true, |
|
| 46 |
+} |
|
| 47 |
+ |
|
| 48 |
+// BuiltinAllowedBuildArgs is list of built-in allowed build args |
|
| 49 |
+var BuiltinAllowedBuildArgs = map[string]bool{
|
|
| 50 |
+ "HTTP_PROXY": true, |
|
| 51 |
+ "http_proxy": true, |
|
| 52 |
+ "HTTPS_PROXY": true, |
|
| 53 |
+ "https_proxy": true, |
|
| 54 |
+ "FTP_PROXY": true, |
|
| 55 |
+ "ftp_proxy": true, |
|
| 56 |
+ "NO_PROXY": true, |
|
| 57 |
+ "no_proxy": true, |
|
| 58 |
+} |
|
| 59 |
+ |
|
| 60 |
+// Config contains all configs for a build job |
|
| 61 |
+type Config struct {
|
|
| 62 |
+ DockerfileName string |
|
| 63 |
+ RemoteURL string |
|
| 64 |
+ RepoName string |
|
| 65 |
+ SuppressOutput bool |
|
| 66 |
+ NoCache bool |
|
| 67 |
+ Remove bool |
|
| 68 |
+ ForceRemove bool |
|
| 69 |
+ Pull bool |
|
| 70 |
+ Memory int64 |
|
| 71 |
+ MemorySwap int64 |
|
| 72 |
+ CPUShares int64 |
|
| 73 |
+ CPUPeriod int64 |
|
| 74 |
+ CPUQuota int64 |
|
| 75 |
+ CPUSetCpus string |
|
| 76 |
+ CPUSetMems string |
|
| 77 |
+ CgroupParent string |
|
| 78 |
+ Ulimits []*ulimit.Ulimit |
|
| 79 |
+ AuthConfigs map[string]cliconfig.AuthConfig |
|
| 80 |
+ BuildArgs map[string]string |
|
| 81 |
+ |
|
| 82 |
+ Stdout io.Writer |
|
| 83 |
+ Context io.ReadCloser |
|
| 84 |
+ // When closed, the job has been cancelled. |
|
| 85 |
+ // Note: not all jobs implement cancellation. |
|
| 86 |
+ // See Job.Cancel() and Job.WaitCancelled() |
|
| 87 |
+ cancelled chan struct{}
|
|
| 88 |
+ cancelOnce sync.Once |
|
| 89 |
+} |
|
| 90 |
+ |
|
| 91 |
+// Cancel signals the build job to cancel |
|
| 92 |
+func (b *Config) Cancel() {
|
|
| 93 |
+ b.cancelOnce.Do(func() {
|
|
| 94 |
+ close(b.cancelled) |
|
| 95 |
+ }) |
|
| 96 |
+} |
|
| 97 |
+ |
|
| 98 |
+// WaitCancelled returns a channel which is closed ("never blocks") when
|
|
| 99 |
+// the job is cancelled. |
|
| 100 |
+func (b *Config) WaitCancelled() <-chan struct{} {
|
|
| 101 |
+ return b.cancelled |
|
| 102 |
+} |
|
| 103 |
+ |
|
| 104 |
+// NewBuildConfig returns a new Config struct |
|
| 105 |
+func NewBuildConfig() *Config {
|
|
| 106 |
+ return &Config{
|
|
| 107 |
+ AuthConfigs: map[string]cliconfig.AuthConfig{},
|
|
| 108 |
+ cancelled: make(chan struct{}),
|
|
| 109 |
+ } |
|
| 110 |
+} |
|
| 111 |
+ |
|
| 112 |
+// Build is the main interface of the package, it gathers the Builder |
|
| 113 |
+// struct and calls builder.Run() to do all the real build job. |
|
| 114 |
+func Build(d *daemon.Daemon, buildConfig *Config) error {
|
|
| 115 |
+ var ( |
|
| 116 |
+ repoName string |
|
| 117 |
+ tag string |
|
| 118 |
+ context io.ReadCloser |
|
| 119 |
+ ) |
|
| 120 |
+ sf := streamformatter.NewJSONStreamFormatter() |
|
| 121 |
+ |
|
| 122 |
+ repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) |
|
| 123 |
+ if repoName != "" {
|
|
| 124 |
+ if err := registry.ValidateRepositoryName(repoName); err != nil {
|
|
| 125 |
+ return err |
|
| 126 |
+ } |
|
| 127 |
+ if len(tag) > 0 {
|
|
| 128 |
+ if err := tags.ValidateTagName(tag); err != nil {
|
|
| 129 |
+ return err |
|
| 130 |
+ } |
|
| 131 |
+ } |
|
| 132 |
+ } |
|
| 133 |
+ |
|
| 134 |
+ if buildConfig.RemoteURL == "" {
|
|
| 135 |
+ context = ioutil.NopCloser(buildConfig.Context) |
|
| 136 |
+ } else if urlutil.IsGitURL(buildConfig.RemoteURL) {
|
|
| 137 |
+ root, err := utils.GitClone(buildConfig.RemoteURL) |
|
| 138 |
+ if err != nil {
|
|
| 139 |
+ return err |
|
| 140 |
+ } |
|
| 141 |
+ defer os.RemoveAll(root) |
|
| 142 |
+ |
|
| 143 |
+ c, err := archive.Tar(root, archive.Uncompressed) |
|
| 144 |
+ if err != nil {
|
|
| 145 |
+ return err |
|
| 146 |
+ } |
|
| 147 |
+ context = c |
|
| 148 |
+ } else if urlutil.IsURL(buildConfig.RemoteURL) {
|
|
| 149 |
+ f, err := httputils.Download(buildConfig.RemoteURL) |
|
| 150 |
+ if err != nil {
|
|
| 151 |
+ return fmt.Errorf("Error downloading remote context %s: %v", buildConfig.RemoteURL, err)
|
|
| 152 |
+ } |
|
| 153 |
+ defer f.Body.Close() |
|
| 154 |
+ ct := f.Header.Get("Content-Type")
|
|
| 155 |
+ clen := f.ContentLength |
|
| 156 |
+ contentType, bodyReader, err := inspectResponse(ct, f.Body, clen) |
|
| 157 |
+ |
|
| 158 |
+ defer bodyReader.Close() |
|
| 159 |
+ |
|
| 160 |
+ if err != nil {
|
|
| 161 |
+ return fmt.Errorf("Error detecting content type for remote %s: %v", buildConfig.RemoteURL, err)
|
|
| 162 |
+ } |
|
| 163 |
+ if contentType == httputils.MimeTypes.TextPlain {
|
|
| 164 |
+ dockerFile, err := ioutil.ReadAll(bodyReader) |
|
| 165 |
+ if err != nil {
|
|
| 166 |
+ return err |
|
| 167 |
+ } |
|
| 168 |
+ |
|
| 169 |
+ // When we're downloading just a Dockerfile put it in |
|
| 170 |
+ // the default name - don't allow the client to move/specify it |
|
| 171 |
+ buildConfig.DockerfileName = api.DefaultDockerfileName |
|
| 172 |
+ |
|
| 173 |
+ c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) |
|
| 174 |
+ if err != nil {
|
|
| 175 |
+ return err |
|
| 176 |
+ } |
|
| 177 |
+ context = c |
|
| 178 |
+ } else {
|
|
| 179 |
+ // Pass through - this is a pre-packaged context, presumably |
|
| 180 |
+ // with a Dockerfile with the right name inside it. |
|
| 181 |
+ prCfg := progressreader.Config{
|
|
| 182 |
+ In: bodyReader, |
|
| 183 |
+ Out: buildConfig.Stdout, |
|
| 184 |
+ Formatter: sf, |
|
| 185 |
+ Size: clen, |
|
| 186 |
+ NewLines: true, |
|
| 187 |
+ ID: "Downloading context", |
|
| 188 |
+ Action: buildConfig.RemoteURL, |
|
| 189 |
+ } |
|
| 190 |
+ context = progressreader.New(prCfg) |
|
| 191 |
+ } |
|
| 192 |
+ } |
|
| 193 |
+ |
|
| 194 |
+ defer context.Close() |
|
| 195 |
+ |
|
| 196 |
+ builder := &builder{
|
|
| 197 |
+ Daemon: d, |
|
| 198 |
+ OutStream: &streamformatter.StdoutFormatter{
|
|
| 199 |
+ Writer: buildConfig.Stdout, |
|
| 200 |
+ StreamFormatter: sf, |
|
| 201 |
+ }, |
|
| 202 |
+ ErrStream: &streamformatter.StderrFormatter{
|
|
| 203 |
+ Writer: buildConfig.Stdout, |
|
| 204 |
+ StreamFormatter: sf, |
|
| 205 |
+ }, |
|
| 206 |
+ Verbose: !buildConfig.SuppressOutput, |
|
| 207 |
+ UtilizeCache: !buildConfig.NoCache, |
|
| 208 |
+ Remove: buildConfig.Remove, |
|
| 209 |
+ ForceRemove: buildConfig.ForceRemove, |
|
| 210 |
+ Pull: buildConfig.Pull, |
|
| 211 |
+ OutOld: buildConfig.Stdout, |
|
| 212 |
+ StreamFormatter: sf, |
|
| 213 |
+ AuthConfigs: buildConfig.AuthConfigs, |
|
| 214 |
+ dockerfileName: buildConfig.DockerfileName, |
|
| 215 |
+ cpuShares: buildConfig.CPUShares, |
|
| 216 |
+ cpuPeriod: buildConfig.CPUPeriod, |
|
| 217 |
+ cpuQuota: buildConfig.CPUQuota, |
|
| 218 |
+ cpuSetCpus: buildConfig.CPUSetCpus, |
|
| 219 |
+ cpuSetMems: buildConfig.CPUSetMems, |
|
| 220 |
+ cgroupParent: buildConfig.CgroupParent, |
|
| 221 |
+ memory: buildConfig.Memory, |
|
| 222 |
+ memorySwap: buildConfig.MemorySwap, |
|
| 223 |
+ ulimits: buildConfig.Ulimits, |
|
| 224 |
+ cancelled: buildConfig.WaitCancelled(), |
|
| 225 |
+ id: stringid.GenerateRandomID(), |
|
| 226 |
+ buildArgs: buildConfig.BuildArgs, |
|
| 227 |
+ allowedBuildArgs: make(map[string]bool), |
|
| 228 |
+ } |
|
| 229 |
+ |
|
| 230 |
+ defer func() {
|
|
| 231 |
+ builder.Daemon.Graph().Release(builder.id, builder.activeImages...) |
|
| 232 |
+ }() |
|
| 233 |
+ |
|
| 234 |
+ id, err := builder.Run(context) |
|
| 235 |
+ if err != nil {
|
|
| 236 |
+ return err |
|
| 237 |
+ } |
|
| 238 |
+ if repoName != "" {
|
|
| 239 |
+ return d.Repositories().Tag(repoName, tag, id, true) |
|
| 240 |
+ } |
|
| 241 |
+ return nil |
|
| 242 |
+} |
|
| 243 |
+ |
|
| 244 |
+// BuildFromConfig will do build directly from parameter 'changes', which comes |
|
| 245 |
+// from Dockerfile entries, it will: |
|
| 246 |
+// |
|
| 247 |
+// - call parse.Parse() to get AST root from Dockerfile entries |
|
| 248 |
+// - do build by calling builder.dispatch() to call all entries' handling routines |
|
| 249 |
+func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
|
|
| 250 |
+ ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) |
|
| 251 |
+ if err != nil {
|
|
| 252 |
+ return nil, err |
|
| 253 |
+ } |
|
| 254 |
+ |
|
| 255 |
+ // ensure that the commands are valid |
|
| 256 |
+ for _, n := range ast.Children {
|
|
| 257 |
+ if !validCommitCommands[n.Value] {
|
|
| 258 |
+ return nil, fmt.Errorf("%s is not a valid change command", n.Value)
|
|
| 259 |
+ } |
|
| 260 |
+ } |
|
| 261 |
+ |
|
| 262 |
+ builder := &builder{
|
|
| 263 |
+ Daemon: d, |
|
| 264 |
+ Config: c, |
|
| 265 |
+ OutStream: ioutil.Discard, |
|
| 266 |
+ ErrStream: ioutil.Discard, |
|
| 267 |
+ disableCommit: true, |
|
| 268 |
+ } |
|
| 269 |
+ |
|
| 270 |
+ for i, n := range ast.Children {
|
|
| 271 |
+ if err := builder.dispatch(i, n); err != nil {
|
|
| 272 |
+ return nil, err |
|
| 273 |
+ } |
|
| 274 |
+ } |
|
| 275 |
+ |
|
| 276 |
+ return builder.Config, nil |
|
| 277 |
+} |
|
| 278 |
+ |
|
| 279 |
+// CommitConfig contains build configs for commit operation |
|
| 280 |
+type CommitConfig struct {
|
|
| 281 |
+ Pause bool |
|
| 282 |
+ Repo string |
|
| 283 |
+ Tag string |
|
| 284 |
+ Author string |
|
| 285 |
+ Comment string |
|
| 286 |
+ Changes []string |
|
| 287 |
+ Config *runconfig.Config |
|
| 288 |
+} |
|
| 289 |
+ |
|
| 290 |
+// Commit will create a new image from a container's changes |
|
| 291 |
+func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
|
|
| 292 |
+ container, err := d.Get(name) |
|
| 293 |
+ if err != nil {
|
|
| 294 |
+ return "", err |
|
| 295 |
+ } |
|
| 296 |
+ |
|
| 297 |
+ // It is not possible to commit a running container on Windows |
|
| 298 |
+ if runtime.GOOS == "windows" && container.IsRunning() {
|
|
| 299 |
+ return "", fmt.Errorf("Windows does not support commit of a running container")
|
|
| 300 |
+ } |
|
| 301 |
+ |
|
| 302 |
+ if c.Config == nil {
|
|
| 303 |
+ c.Config = &runconfig.Config{}
|
|
| 304 |
+ } |
|
| 305 |
+ |
|
| 306 |
+ newConfig, err := BuildFromConfig(d, c.Config, c.Changes) |
|
| 307 |
+ if err != nil {
|
|
| 308 |
+ return "", err |
|
| 309 |
+ } |
|
| 310 |
+ |
|
| 311 |
+ if err := runconfig.Merge(newConfig, container.Config); err != nil {
|
|
| 312 |
+ return "", err |
|
| 313 |
+ } |
|
| 314 |
+ |
|
| 315 |
+ commitCfg := &daemon.ContainerCommitConfig{
|
|
| 316 |
+ Pause: c.Pause, |
|
| 317 |
+ Repo: c.Repo, |
|
| 318 |
+ Tag: c.Tag, |
|
| 319 |
+ Author: c.Author, |
|
| 320 |
+ Comment: c.Comment, |
|
| 321 |
+ Config: newConfig, |
|
| 322 |
+ } |
|
| 323 |
+ |
|
| 324 |
+ img, err := d.Commit(container, commitCfg) |
|
| 325 |
+ if err != nil {
|
|
| 326 |
+ return "", err |
|
| 327 |
+ } |
|
| 328 |
+ |
|
| 329 |
+ return img.ID, nil |
|
| 330 |
+} |
|
| 331 |
+ |
|
| 332 |
+// inspectResponse looks into the http response data at r to determine whether its |
|
| 333 |
+// content-type is on the list of acceptable content types for remote build contexts. |
|
| 334 |
+// This function returns: |
|
| 335 |
+// - a string representation of the detected content-type |
|
| 336 |
+// - an io.Reader for the response body |
|
| 337 |
+// - an error value which will be non-nil either when something goes wrong while |
|
| 338 |
+// reading bytes from r or when the detected content-type is not acceptable. |
|
| 339 |
+func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) {
|
|
| 340 |
+ plen := clen |
|
| 341 |
+ if plen <= 0 || plen > maxPreambleLength {
|
|
| 342 |
+ plen = maxPreambleLength |
|
| 343 |
+ } |
|
| 344 |
+ |
|
| 345 |
+ preamble := make([]byte, plen, plen) |
|
| 346 |
+ rlen, err := r.Read(preamble) |
|
| 347 |
+ if rlen == 0 {
|
|
| 348 |
+ return ct, r, errors.New("Empty response")
|
|
| 349 |
+ } |
|
| 350 |
+ if err != nil && err != io.EOF {
|
|
| 351 |
+ return ct, r, err |
|
| 352 |
+ } |
|
| 353 |
+ |
|
| 354 |
+ preambleR := bytes.NewReader(preamble) |
|
| 355 |
+ bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) |
|
| 356 |
+ // Some web servers will use application/octet-stream as the default |
|
| 357 |
+ // content type for files without an extension (e.g. 'Dockerfile') |
|
| 358 |
+ // so if we receive this value we better check for text content |
|
| 359 |
+ contentType := ct |
|
| 360 |
+ if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream {
|
|
| 361 |
+ contentType, _, err = httputils.DetectContentType(preamble) |
|
| 362 |
+ if err != nil {
|
|
| 363 |
+ return contentType, bodyReader, err |
|
| 364 |
+ } |
|
| 365 |
+ } |
|
| 366 |
+ |
|
| 367 |
+ contentType = selectAcceptableMIME(contentType) |
|
| 368 |
+ var cterr error |
|
| 369 |
+ if len(contentType) == 0 {
|
|
| 370 |
+ cterr = fmt.Errorf("unsupported Content-Type %q", ct)
|
|
| 371 |
+ contentType = ct |
|
| 372 |
+ } |
|
| 373 |
+ |
|
| 374 |
+ return contentType, bodyReader, cterr |
|
| 375 |
+} |
| 0 | 376 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,113 @@ |
| 0 |
+package dockerfile |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "io/ioutil" |
|
| 5 |
+ "testing" |
|
| 6 |
+) |
|
| 7 |
+ |
|
| 8 |
+var textPlainDockerfile = "FROM busybox" |
|
| 9 |
+var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic
|
|
| 10 |
+ |
|
| 11 |
+func TestInspectEmptyResponse(t *testing.T) {
|
|
| 12 |
+ ct := "application/octet-stream" |
|
| 13 |
+ br := ioutil.NopCloser(bytes.NewReader([]byte("")))
|
|
| 14 |
+ contentType, bReader, err := inspectResponse(ct, br, 0) |
|
| 15 |
+ if err == nil {
|
|
| 16 |
+ t.Fatalf("Should have generated an error for an empty response")
|
|
| 17 |
+ } |
|
| 18 |
+ if contentType != "application/octet-stream" {
|
|
| 19 |
+ t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType)
|
|
| 20 |
+ } |
|
| 21 |
+ body, err := ioutil.ReadAll(bReader) |
|
| 22 |
+ if err != nil {
|
|
| 23 |
+ t.Fatal(err) |
|
| 24 |
+ } |
|
| 25 |
+ if len(body) != 0 {
|
|
| 26 |
+ t.Fatal("response body should remain empty")
|
|
| 27 |
+ } |
|
| 28 |
+} |
|
| 29 |
+ |
|
| 30 |
+func TestInspectResponseBinary(t *testing.T) {
|
|
| 31 |
+ ct := "application/octet-stream" |
|
| 32 |
+ br := ioutil.NopCloser(bytes.NewReader(binaryContext)) |
|
| 33 |
+ contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) |
|
| 34 |
+ if err != nil {
|
|
| 35 |
+ t.Fatal(err) |
|
| 36 |
+ } |
|
| 37 |
+ if contentType != "application/octet-stream" {
|
|
| 38 |
+ t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType)
|
|
| 39 |
+ } |
|
| 40 |
+ body, err := ioutil.ReadAll(bReader) |
|
| 41 |
+ if err != nil {
|
|
| 42 |
+ t.Fatal(err) |
|
| 43 |
+ } |
|
| 44 |
+ if len(body) != len(binaryContext) {
|
|
| 45 |
+ t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body))
|
|
| 46 |
+ } |
|
| 47 |
+ for i := range body {
|
|
| 48 |
+ if body[i] != binaryContext[i] {
|
|
| 49 |
+ t.Fatalf("Corrupted response body at byte index %d", i)
|
|
| 50 |
+ } |
|
| 51 |
+ } |
|
| 52 |
+} |
|
| 53 |
+ |
|
| 54 |
+func TestResponseUnsupportedContentType(t *testing.T) {
|
|
| 55 |
+ content := []byte(textPlainDockerfile) |
|
| 56 |
+ ct := "application/json" |
|
| 57 |
+ br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 58 |
+ contentType, bReader, err := inspectResponse(ct, br, int64(len(textPlainDockerfile))) |
|
| 59 |
+ |
|
| 60 |
+ if err == nil {
|
|
| 61 |
+ t.Fatal("Should have returned an error on content-type 'application/json'")
|
|
| 62 |
+ } |
|
| 63 |
+ if contentType != ct {
|
|
| 64 |
+ t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType)
|
|
| 65 |
+ } |
|
| 66 |
+ body, err := ioutil.ReadAll(bReader) |
|
| 67 |
+ if err != nil {
|
|
| 68 |
+ t.Fatal(err) |
|
| 69 |
+ } |
|
| 70 |
+ if string(body) != textPlainDockerfile {
|
|
| 71 |
+ t.Fatalf("Corrupted response body %s", body)
|
|
| 72 |
+ } |
|
| 73 |
+} |
|
| 74 |
+ |
|
| 75 |
+func TestInspectResponseTextSimple(t *testing.T) {
|
|
| 76 |
+ content := []byte(textPlainDockerfile) |
|
| 77 |
+ ct := "text/plain" |
|
| 78 |
+ br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 79 |
+ contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) |
|
| 80 |
+ if err != nil {
|
|
| 81 |
+ t.Fatal(err) |
|
| 82 |
+ } |
|
| 83 |
+ if contentType != "text/plain" {
|
|
| 84 |
+ t.Fatalf("Content type should be 'text/plain' but is %q", contentType)
|
|
| 85 |
+ } |
|
| 86 |
+ body, err := ioutil.ReadAll(bReader) |
|
| 87 |
+ if err != nil {
|
|
| 88 |
+ t.Fatal(err) |
|
| 89 |
+ } |
|
| 90 |
+ if string(body) != textPlainDockerfile {
|
|
| 91 |
+ t.Fatalf("Corrupted response body %s", body)
|
|
| 92 |
+ } |
|
| 93 |
+} |
|
| 94 |
+ |
|
| 95 |
+func TestInspectResponseEmptyContentType(t *testing.T) {
|
|
| 96 |
+ content := []byte(textPlainDockerfile) |
|
| 97 |
+ br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 98 |
+ contentType, bodyReader, err := inspectResponse("", br, int64(len(content)))
|
|
| 99 |
+ if err != nil {
|
|
| 100 |
+ t.Fatal(err) |
|
| 101 |
+ } |
|
| 102 |
+ if contentType != "text/plain" {
|
|
| 103 |
+ t.Fatalf("Content type should be 'text/plain' but is %q", contentType)
|
|
| 104 |
+ } |
|
| 105 |
+ body, err := ioutil.ReadAll(bodyReader) |
|
| 106 |
+ if err != nil {
|
|
| 107 |
+ t.Fatal(err) |
|
| 108 |
+ } |
|
| 109 |
+ if string(body) != textPlainDockerfile {
|
|
| 110 |
+ t.Fatalf("Corrupted response body %s", body)
|
|
| 111 |
+ } |
|
| 112 |
+} |
| 0 | 113 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,32 @@ |
| 0 |
+package main |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "os" |
|
| 5 |
+ |
|
| 6 |
+ "github.com/docker/docker/builder/parser" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+func main() {
|
|
| 10 |
+ var f *os.File |
|
| 11 |
+ var err error |
|
| 12 |
+ |
|
| 13 |
+ if len(os.Args) < 2 {
|
|
| 14 |
+ fmt.Println("please supply filename(s)")
|
|
| 15 |
+ os.Exit(1) |
|
| 16 |
+ } |
|
| 17 |
+ |
|
| 18 |
+ for _, fn := range os.Args[1:] {
|
|
| 19 |
+ f, err = os.Open(fn) |
|
| 20 |
+ if err != nil {
|
|
| 21 |
+ panic(err) |
|
| 22 |
+ } |
|
| 23 |
+ |
|
| 24 |
+ ast, err := parser.Parse(f) |
|
| 25 |
+ if err != nil {
|
|
| 26 |
+ panic(err) |
|
| 27 |
+ } else {
|
|
| 28 |
+ fmt.Println(ast.Dump()) |
|
| 29 |
+ } |
|
| 30 |
+ } |
|
| 31 |
+} |
| 0 | 32 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,55 @@ |
| 0 |
+package parser |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "testing" |
|
| 4 |
+) |
|
| 5 |
+ |
|
| 6 |
+var invalidJSONArraysOfStrings = []string{
|
|
| 7 |
+ `["a",42,"b"]`, |
|
| 8 |
+ `["a",123.456,"b"]`, |
|
| 9 |
+ `["a",{},"b"]`,
|
|
| 10 |
+ `["a",{"c": "d"},"b"]`,
|
|
| 11 |
+ `["a",["c"],"b"]`, |
|
| 12 |
+ `["a",true,"b"]`, |
|
| 13 |
+ `["a",false,"b"]`, |
|
| 14 |
+ `["a",null,"b"]`, |
|
| 15 |
+} |
|
| 16 |
+ |
|
| 17 |
+var validJSONArraysOfStrings = map[string][]string{
|
|
| 18 |
+ `[]`: {},
|
|
| 19 |
+ `[""]`: {""},
|
|
| 20 |
+ `["a"]`: {"a"},
|
|
| 21 |
+ `["a","b"]`: {"a", "b"},
|
|
| 22 |
+ `[ "a", "b" ]`: {"a", "b"},
|
|
| 23 |
+ `[ "a", "b" ]`: {"a", "b"},
|
|
| 24 |
+ ` [ "a", "b" ] `: {"a", "b"},
|
|
| 25 |
+ `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"},
|
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+func TestJSONArraysOfStrings(t *testing.T) {
|
|
| 29 |
+ for json, expected := range validJSONArraysOfStrings {
|
|
| 30 |
+ if node, _, err := parseJSON(json); err != nil {
|
|
| 31 |
+ t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err)
|
|
| 32 |
+ } else {
|
|
| 33 |
+ i := 0 |
|
| 34 |
+ for node != nil {
|
|
| 35 |
+ if i >= len(expected) {
|
|
| 36 |
+ t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json)
|
|
| 37 |
+ } |
|
| 38 |
+ if node.Value != expected[i] {
|
|
| 39 |
+ t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i)
|
|
| 40 |
+ } |
|
| 41 |
+ node = node.Next |
|
| 42 |
+ i++ |
|
| 43 |
+ } |
|
| 44 |
+ if i != len(expected) {
|
|
| 45 |
+ t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json)
|
|
| 46 |
+ } |
|
| 47 |
+ } |
|
| 48 |
+ } |
|
| 49 |
+ for _, json := range invalidJSONArraysOfStrings {
|
|
| 50 |
+ if _, _, err := parseJSON(json); err != errDockerfileNotStringArray {
|
|
| 51 |
+ t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json)
|
|
| 52 |
+ } |
|
| 53 |
+ } |
|
| 54 |
+} |
| 0 | 55 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,331 @@ |
| 0 |
+package parser |
|
| 1 |
+ |
|
| 2 |
+// line parsers are dispatch calls that parse a single unit of text into a |
|
| 3 |
+// Node object which contains the whole statement. Dockerfiles have varied |
|
| 4 |
+// (but not usually unique, see ONBUILD for a unique example) parsing rules |
|
| 5 |
+// per-command, and these unify the processing in a way that makes it |
|
| 6 |
+// manageable. |
|
| 7 |
+ |
|
| 8 |
+import ( |
|
| 9 |
+ "encoding/json" |
|
| 10 |
+ "errors" |
|
| 11 |
+ "fmt" |
|
| 12 |
+ "strings" |
|
| 13 |
+ "unicode" |
|
| 14 |
+) |
|
| 15 |
+ |
|
| 16 |
+var ( |
|
| 17 |
+ errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.")
|
|
| 18 |
+) |
|
| 19 |
+ |
|
| 20 |
+// ignore the current argument. This will still leave a command parsed, but |
|
| 21 |
+// will not incorporate the arguments into the ast. |
|
| 22 |
+func parseIgnore(rest string) (*Node, map[string]bool, error) {
|
|
| 23 |
+ return &Node{}, nil, nil
|
|
| 24 |
+} |
|
| 25 |
+ |
|
| 26 |
+// used for onbuild. Could potentially be used for anything that represents a |
|
| 27 |
+// statement with sub-statements. |
|
| 28 |
+// |
|
| 29 |
+// ONBUILD RUN foo bar -> (onbuild (run foo bar)) |
|
| 30 |
+// |
|
| 31 |
+func parseSubCommand(rest string) (*Node, map[string]bool, error) {
|
|
| 32 |
+ if rest == "" {
|
|
| 33 |
+ return nil, nil, nil |
|
| 34 |
+ } |
|
| 35 |
+ |
|
| 36 |
+ _, child, err := parseLine(rest) |
|
| 37 |
+ if err != nil {
|
|
| 38 |
+ return nil, nil, err |
|
| 39 |
+ } |
|
| 40 |
+ |
|
| 41 |
+ return &Node{Children: []*Node{child}}, nil, nil
|
|
| 42 |
+} |
|
| 43 |
+ |
|
| 44 |
+// helper to parse words (i.e space delimited or quoted strings) in a statement. |
|
| 45 |
+// The quotes are preserved as part of this function and they are stripped later |
|
| 46 |
+// as part of processWords(). |
|
| 47 |
+func parseWords(rest string) []string {
|
|
| 48 |
+ const ( |
|
| 49 |
+ inSpaces = iota // looking for start of a word |
|
| 50 |
+ inWord |
|
| 51 |
+ inQuote |
|
| 52 |
+ ) |
|
| 53 |
+ |
|
| 54 |
+ words := []string{}
|
|
| 55 |
+ phase := inSpaces |
|
| 56 |
+ word := "" |
|
| 57 |
+ quote := '\000' |
|
| 58 |
+ blankOK := false |
|
| 59 |
+ var ch rune |
|
| 60 |
+ |
|
| 61 |
+ for pos := 0; pos <= len(rest); pos++ {
|
|
| 62 |
+ if pos != len(rest) {
|
|
| 63 |
+ ch = rune(rest[pos]) |
|
| 64 |
+ } |
|
| 65 |
+ |
|
| 66 |
+ if phase == inSpaces { // Looking for start of word
|
|
| 67 |
+ if pos == len(rest) { // end of input
|
|
| 68 |
+ break |
|
| 69 |
+ } |
|
| 70 |
+ if unicode.IsSpace(ch) { // skip spaces
|
|
| 71 |
+ continue |
|
| 72 |
+ } |
|
| 73 |
+ phase = inWord // found it, fall thru |
|
| 74 |
+ } |
|
| 75 |
+ if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
|
|
| 76 |
+ if blankOK || len(word) > 0 {
|
|
| 77 |
+ words = append(words, word) |
|
| 78 |
+ } |
|
| 79 |
+ break |
|
| 80 |
+ } |
|
| 81 |
+ if phase == inWord {
|
|
| 82 |
+ if unicode.IsSpace(ch) {
|
|
| 83 |
+ phase = inSpaces |
|
| 84 |
+ if blankOK || len(word) > 0 {
|
|
| 85 |
+ words = append(words, word) |
|
| 86 |
+ } |
|
| 87 |
+ word = "" |
|
| 88 |
+ blankOK = false |
|
| 89 |
+ continue |
|
| 90 |
+ } |
|
| 91 |
+ if ch == '\'' || ch == '"' {
|
|
| 92 |
+ quote = ch |
|
| 93 |
+ blankOK = true |
|
| 94 |
+ phase = inQuote |
|
| 95 |
+ } |
|
| 96 |
+ if ch == '\\' {
|
|
| 97 |
+ if pos+1 == len(rest) {
|
|
| 98 |
+ continue // just skip \ at end |
|
| 99 |
+ } |
|
| 100 |
+ // If we're not quoted and we see a \, then always just |
|
| 101 |
+ // add \ plus the char to the word, even if the char |
|
| 102 |
+ // is a quote. |
|
| 103 |
+ word += string(ch) |
|
| 104 |
+ pos++ |
|
| 105 |
+ ch = rune(rest[pos]) |
|
| 106 |
+ } |
|
| 107 |
+ word += string(ch) |
|
| 108 |
+ continue |
|
| 109 |
+ } |
|
| 110 |
+ if phase == inQuote {
|
|
| 111 |
+ if ch == quote {
|
|
| 112 |
+ phase = inWord |
|
| 113 |
+ } |
|
| 114 |
+ // \ is special except for ' quotes - can't escape anything for ' |
|
| 115 |
+ if ch == '\\' && quote != '\'' {
|
|
| 116 |
+ if pos+1 == len(rest) {
|
|
| 117 |
+ phase = inWord |
|
| 118 |
+ continue // just skip \ at end |
|
| 119 |
+ } |
|
| 120 |
+ pos++ |
|
| 121 |
+ nextCh := rune(rest[pos]) |
|
| 122 |
+ word += string(ch) |
|
| 123 |
+ ch = nextCh |
|
| 124 |
+ } |
|
| 125 |
+ word += string(ch) |
|
| 126 |
+ } |
|
| 127 |
+ } |
|
| 128 |
+ |
|
| 129 |
+ return words |
|
| 130 |
+} |
|
| 131 |
+ |
|
| 132 |
+// parse environment like statements. Note that this does *not* handle |
|
| 133 |
+// variable interpolation, which will be handled in the evaluator. |
|
| 134 |
+func parseNameVal(rest string, key string) (*Node, map[string]bool, error) {
|
|
| 135 |
+ // This is kind of tricky because we need to support the old |
|
| 136 |
+ // variant: KEY name value |
|
| 137 |
+ // as well as the new one: KEY name=value ... |
|
| 138 |
+ // The trigger to know which one is being used will be whether we hit |
|
| 139 |
+ // a space or = first. space ==> old, "=" ==> new |
|
| 140 |
+ |
|
| 141 |
+ words := parseWords(rest) |
|
| 142 |
+ if len(words) == 0 {
|
|
| 143 |
+ return nil, nil, nil |
|
| 144 |
+ } |
|
| 145 |
+ |
|
| 146 |
+ var rootnode *Node |
|
| 147 |
+ |
|
| 148 |
+ // Old format (KEY name value) |
|
| 149 |
+ if !strings.Contains(words[0], "=") {
|
|
| 150 |
+ node := &Node{}
|
|
| 151 |
+ rootnode = node |
|
| 152 |
+ strs := tokenWhitespace.Split(rest, 2) |
|
| 153 |
+ |
|
| 154 |
+ if len(strs) < 2 {
|
|
| 155 |
+ return nil, nil, fmt.Errorf(key + " must have two arguments") |
|
| 156 |
+ } |
|
| 157 |
+ |
|
| 158 |
+ node.Value = strs[0] |
|
| 159 |
+ node.Next = &Node{}
|
|
| 160 |
+ node.Next.Value = strs[1] |
|
| 161 |
+ } else {
|
|
| 162 |
+ var prevNode *Node |
|
| 163 |
+ for i, word := range words {
|
|
| 164 |
+ if !strings.Contains(word, "=") {
|
|
| 165 |
+ return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
|
|
| 166 |
+ } |
|
| 167 |
+ parts := strings.SplitN(word, "=", 2) |
|
| 168 |
+ |
|
| 169 |
+ name := &Node{}
|
|
| 170 |
+ value := &Node{}
|
|
| 171 |
+ |
|
| 172 |
+ name.Next = value |
|
| 173 |
+ name.Value = parts[0] |
|
| 174 |
+ value.Value = parts[1] |
|
| 175 |
+ |
|
| 176 |
+ if i == 0 {
|
|
| 177 |
+ rootnode = name |
|
| 178 |
+ } else {
|
|
| 179 |
+ prevNode.Next = name |
|
| 180 |
+ } |
|
| 181 |
+ prevNode = value |
|
| 182 |
+ } |
|
| 183 |
+ } |
|
| 184 |
+ |
|
| 185 |
+ return rootnode, nil, nil |
|
| 186 |
+} |
|
| 187 |
+ |
|
| 188 |
+func parseEnv(rest string) (*Node, map[string]bool, error) {
|
|
| 189 |
+ return parseNameVal(rest, "ENV") |
|
| 190 |
+} |
|
| 191 |
+ |
|
| 192 |
+func parseLabel(rest string) (*Node, map[string]bool, error) {
|
|
| 193 |
+ return parseNameVal(rest, "LABEL") |
|
| 194 |
+} |
|
| 195 |
+ |
|
| 196 |
+// parses a statement containing one or more keyword definition(s) and/or |
|
| 197 |
+// value assignments, like `name1 name2= name3="" name4=value`. |
|
| 198 |
+// Note that this is a stricter format than the old format of assignment, |
|
| 199 |
+// allowed by parseNameVal(), in a way that this only allows assignment of the |
|
| 200 |
+// form `keyword=[<value>]` like `name2=`, `name3=""`, and `name4=value` above. |
|
| 201 |
+// In addition, a keyword definition alone is of the form `keyword` like `name1` |
|
| 202 |
+// above. And the assignments `name2=` and `name3=""` are equivalent and |
|
| 203 |
+// assign an empty value to the respective keywords. |
|
| 204 |
+func parseNameOrNameVal(rest string) (*Node, map[string]bool, error) {
|
|
| 205 |
+ words := parseWords(rest) |
|
| 206 |
+ if len(words) == 0 {
|
|
| 207 |
+ return nil, nil, nil |
|
| 208 |
+ } |
|
| 209 |
+ |
|
| 210 |
+ var ( |
|
| 211 |
+ rootnode *Node |
|
| 212 |
+ prevNode *Node |
|
| 213 |
+ ) |
|
| 214 |
+ for i, word := range words {
|
|
| 215 |
+ node := &Node{}
|
|
| 216 |
+ node.Value = word |
|
| 217 |
+ if i == 0 {
|
|
| 218 |
+ rootnode = node |
|
| 219 |
+ } else {
|
|
| 220 |
+ prevNode.Next = node |
|
| 221 |
+ } |
|
| 222 |
+ prevNode = node |
|
| 223 |
+ } |
|
| 224 |
+ |
|
| 225 |
+ return rootnode, nil, nil |
|
| 226 |
+} |
|
| 227 |
+ |
|
| 228 |
+// parses a whitespace-delimited set of arguments. The result is effectively a |
|
| 229 |
+// linked list of string arguments. |
|
| 230 |
+func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) {
|
|
| 231 |
+ if rest == "" {
|
|
| 232 |
+ return nil, nil, nil |
|
| 233 |
+ } |
|
| 234 |
+ |
|
| 235 |
+ node := &Node{}
|
|
| 236 |
+ rootnode := node |
|
| 237 |
+ prevnode := node |
|
| 238 |
+ for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp
|
|
| 239 |
+ prevnode = node |
|
| 240 |
+ node.Value = str |
|
| 241 |
+ node.Next = &Node{}
|
|
| 242 |
+ node = node.Next |
|
| 243 |
+ } |
|
| 244 |
+ |
|
| 245 |
+ // XXX to get around regexp.Split *always* providing an empty string at the |
|
| 246 |
+ // end due to how our loop is constructed, nil out the last node in the |
|
| 247 |
+ // chain. |
|
| 248 |
+ prevnode.Next = nil |
|
| 249 |
+ |
|
| 250 |
+ return rootnode, nil, nil |
|
| 251 |
+} |
|
| 252 |
+ |
|
| 253 |
+// parsestring just wraps the string in quotes and returns a working node. |
|
| 254 |
+func parseString(rest string) (*Node, map[string]bool, error) {
|
|
| 255 |
+ if rest == "" {
|
|
| 256 |
+ return nil, nil, nil |
|
| 257 |
+ } |
|
| 258 |
+ n := &Node{}
|
|
| 259 |
+ n.Value = rest |
|
| 260 |
+ return n, nil, nil |
|
| 261 |
+} |
|
| 262 |
+ |
|
| 263 |
+// parseJSON converts JSON arrays to an AST. |
|
| 264 |
+func parseJSON(rest string) (*Node, map[string]bool, error) {
|
|
| 265 |
+ rest = strings.TrimLeftFunc(rest, unicode.IsSpace) |
|
| 266 |
+ if !strings.HasPrefix(rest, "[") {
|
|
| 267 |
+ return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) |
|
| 268 |
+ } |
|
| 269 |
+ |
|
| 270 |
+ var myJSON []interface{}
|
|
| 271 |
+ if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
|
|
| 272 |
+ return nil, nil, err |
|
| 273 |
+ } |
|
| 274 |
+ |
|
| 275 |
+ var top, prev *Node |
|
| 276 |
+ for _, str := range myJSON {
|
|
| 277 |
+ s, ok := str.(string) |
|
| 278 |
+ if !ok {
|
|
| 279 |
+ return nil, nil, errDockerfileNotStringArray |
|
| 280 |
+ } |
|
| 281 |
+ |
|
| 282 |
+ node := &Node{Value: s}
|
|
| 283 |
+ if prev == nil {
|
|
| 284 |
+ top = node |
|
| 285 |
+ } else {
|
|
| 286 |
+ prev.Next = node |
|
| 287 |
+ } |
|
| 288 |
+ prev = node |
|
| 289 |
+ } |
|
| 290 |
+ |
|
| 291 |
+ return top, map[string]bool{"json": true}, nil
|
|
| 292 |
+} |
|
| 293 |
+ |
|
| 294 |
+// parseMaybeJSON determines if the argument appears to be a JSON array. If |
|
| 295 |
+// so, passes to parseJSON; if not, quotes the result and returns a single |
|
| 296 |
+// node. |
|
| 297 |
+func parseMaybeJSON(rest string) (*Node, map[string]bool, error) {
|
|
| 298 |
+ if rest == "" {
|
|
| 299 |
+ return nil, nil, nil |
|
| 300 |
+ } |
|
| 301 |
+ |
|
| 302 |
+ node, attrs, err := parseJSON(rest) |
|
| 303 |
+ |
|
| 304 |
+ if err == nil {
|
|
| 305 |
+ return node, attrs, nil |
|
| 306 |
+ } |
|
| 307 |
+ if err == errDockerfileNotStringArray {
|
|
| 308 |
+ return nil, nil, err |
|
| 309 |
+ } |
|
| 310 |
+ |
|
| 311 |
+ node = &Node{}
|
|
| 312 |
+ node.Value = rest |
|
| 313 |
+ return node, nil, nil |
|
| 314 |
+} |
|
| 315 |
+ |
|
| 316 |
+// parseMaybeJSONToList determines if the argument appears to be a JSON array. If |
|
| 317 |
+// so, passes to parseJSON; if not, attempts to parse it as a whitespace |
|
| 318 |
+// delimited string. |
|
| 319 |
+func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) {
|
|
| 320 |
+ node, attrs, err := parseJSON(rest) |
|
| 321 |
+ |
|
| 322 |
+ if err == nil {
|
|
| 323 |
+ return node, attrs, nil |
|
| 324 |
+ } |
|
| 325 |
+ if err == errDockerfileNotStringArray {
|
|
| 326 |
+ return nil, nil, err |
|
| 327 |
+ } |
|
| 328 |
+ |
|
| 329 |
+ return parseStringsWhitespaceDelimited(rest) |
|
| 330 |
+} |
| 0 | 331 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,145 @@ |
| 0 |
+// Package parser implements a parser and parse tree dumper for Dockerfiles. |
|
| 1 |
+package parser |
|
| 2 |
+ |
|
| 3 |
+import ( |
|
| 4 |
+ "bufio" |
|
| 5 |
+ "io" |
|
| 6 |
+ "regexp" |
|
| 7 |
+ "strings" |
|
| 8 |
+ "unicode" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/docker/docker/builder/dockerfile/command" |
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+// Node is a structure used to represent a parse tree. |
|
| 14 |
+// |
|
| 15 |
+// In the node there are three fields, Value, Next, and Children. Value is the |
|
| 16 |
+// current token's string value. Next is always the next non-child token, and |
|
| 17 |
+// children contains all the children. Here's an example: |
|
| 18 |
+// |
|
| 19 |
+// (value next (child child-next child-next-next) next-next) |
|
| 20 |
+// |
|
| 21 |
+// This data structure is frankly pretty lousy for handling complex languages, |
|
| 22 |
+// but lucky for us the Dockerfile isn't very complicated. This structure |
|
| 23 |
+// works a little more effectively than a "proper" parse tree for our needs. |
|
| 24 |
+// |
|
| 25 |
+type Node struct {
|
|
| 26 |
+ Value string // actual content |
|
| 27 |
+ Next *Node // the next item in the current sexp |
|
| 28 |
+ Children []*Node // the children of this sexp |
|
| 29 |
+ Attributes map[string]bool // special attributes for this node |
|
| 30 |
+ Original string // original line used before parsing |
|
| 31 |
+ Flags []string // only top Node should have this set |
|
| 32 |
+} |
|
| 33 |
+ |
|
| 34 |
+var ( |
|
| 35 |
+ dispatch map[string]func(string) (*Node, map[string]bool, error) |
|
| 36 |
+ tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) |
|
| 37 |
+ tokenLineContinuation = regexp.MustCompile(`\\[ \t]*$`) |
|
| 38 |
+ tokenComment = regexp.MustCompile(`^#.*$`) |
|
| 39 |
+) |
|
| 40 |
+ |
|
| 41 |
+func init() {
|
|
| 42 |
+ // Dispatch Table. see line_parsers.go for the parse functions. |
|
| 43 |
+ // The command is parsed and mapped to the line parser. The line parser |
|
| 44 |
+ // receives the arguments but not the command, and returns an AST after |
|
| 45 |
+ // reformulating the arguments according to the rules in the parser |
|
| 46 |
+ // functions. Errors are propagated up by Parse() and the resulting AST can |
|
| 47 |
+ // be incorporated directly into the existing AST as a next. |
|
| 48 |
+ dispatch = map[string]func(string) (*Node, map[string]bool, error){
|
|
| 49 |
+ command.User: parseString, |
|
| 50 |
+ command.Onbuild: parseSubCommand, |
|
| 51 |
+ command.Workdir: parseString, |
|
| 52 |
+ command.Env: parseEnv, |
|
| 53 |
+ command.Label: parseLabel, |
|
| 54 |
+ command.Maintainer: parseString, |
|
| 55 |
+ command.From: parseString, |
|
| 56 |
+ command.Add: parseMaybeJSONToList, |
|
| 57 |
+ command.Copy: parseMaybeJSONToList, |
|
| 58 |
+ command.Run: parseMaybeJSON, |
|
| 59 |
+ command.Cmd: parseMaybeJSON, |
|
| 60 |
+ command.Entrypoint: parseMaybeJSON, |
|
| 61 |
+ command.Expose: parseStringsWhitespaceDelimited, |
|
| 62 |
+ command.Volume: parseMaybeJSONToList, |
|
| 63 |
+ command.StopSignal: parseString, |
|
| 64 |
+ command.Arg: parseNameOrNameVal, |
|
| 65 |
+ } |
|
| 66 |
+} |
|
| 67 |
+ |
|
| 68 |
+// parse a line and return the remainder. |
|
| 69 |
+func parseLine(line string) (string, *Node, error) {
|
|
| 70 |
+ if line = stripComments(line); line == "" {
|
|
| 71 |
+ return "", nil, nil |
|
| 72 |
+ } |
|
| 73 |
+ |
|
| 74 |
+ if tokenLineContinuation.MatchString(line) {
|
|
| 75 |
+ line = tokenLineContinuation.ReplaceAllString(line, "") |
|
| 76 |
+ return line, nil, nil |
|
| 77 |
+ } |
|
| 78 |
+ |
|
| 79 |
+ cmd, flags, args, err := splitCommand(line) |
|
| 80 |
+ if err != nil {
|
|
| 81 |
+ return "", nil, err |
|
| 82 |
+ } |
|
| 83 |
+ |
|
| 84 |
+ node := &Node{}
|
|
| 85 |
+ node.Value = cmd |
|
| 86 |
+ |
|
| 87 |
+ sexp, attrs, err := fullDispatch(cmd, args) |
|
| 88 |
+ if err != nil {
|
|
| 89 |
+ return "", nil, err |
|
| 90 |
+ } |
|
| 91 |
+ |
|
| 92 |
+ node.Next = sexp |
|
| 93 |
+ node.Attributes = attrs |
|
| 94 |
+ node.Original = line |
|
| 95 |
+ node.Flags = flags |
|
| 96 |
+ |
|
| 97 |
+ return "", node, nil |
|
| 98 |
+} |
|
| 99 |
+ |
|
| 100 |
+// Parse is the main parse routine. |
|
| 101 |
+// It handles an io.ReadWriteCloser and returns the root of the AST. |
|
| 102 |
+func Parse(rwc io.Reader) (*Node, error) {
|
|
| 103 |
+ root := &Node{}
|
|
| 104 |
+ scanner := bufio.NewScanner(rwc) |
|
| 105 |
+ |
|
| 106 |
+ for scanner.Scan() {
|
|
| 107 |
+ scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) |
|
| 108 |
+ line, child, err := parseLine(scannedLine) |
|
| 109 |
+ if err != nil {
|
|
| 110 |
+ return nil, err |
|
| 111 |
+ } |
|
| 112 |
+ |
|
| 113 |
+ if line != "" && child == nil {
|
|
| 114 |
+ for scanner.Scan() {
|
|
| 115 |
+ newline := scanner.Text() |
|
| 116 |
+ |
|
| 117 |
+ if stripComments(strings.TrimSpace(newline)) == "" {
|
|
| 118 |
+ continue |
|
| 119 |
+ } |
|
| 120 |
+ |
|
| 121 |
+ line, child, err = parseLine(line + newline) |
|
| 122 |
+ if err != nil {
|
|
| 123 |
+ return nil, err |
|
| 124 |
+ } |
|
| 125 |
+ |
|
| 126 |
+ if child != nil {
|
|
| 127 |
+ break |
|
| 128 |
+ } |
|
| 129 |
+ } |
|
| 130 |
+ if child == nil && line != "" {
|
|
| 131 |
+ line, child, err = parseLine(line) |
|
| 132 |
+ if err != nil {
|
|
| 133 |
+ return nil, err |
|
| 134 |
+ } |
|
| 135 |
+ } |
|
| 136 |
+ } |
|
| 137 |
+ |
|
| 138 |
+ if child != nil {
|
|
| 139 |
+ root.Children = append(root.Children, child) |
|
| 140 |
+ } |
|
| 141 |
+ } |
|
| 142 |
+ |
|
| 143 |
+ return root, nil |
|
| 144 |
+} |
| 0 | 145 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,112 @@ |
| 0 |
+package parser |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "io/ioutil" |
|
| 5 |
+ "os" |
|
| 6 |
+ "path/filepath" |
|
| 7 |
+ "testing" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+const testDir = "testfiles" |
|
| 11 |
+const negativeTestDir = "testfiles-negative" |
|
| 12 |
+ |
|
| 13 |
+func getDirs(t *testing.T, dir string) []string {
|
|
| 14 |
+ f, err := os.Open(dir) |
|
| 15 |
+ if err != nil {
|
|
| 16 |
+ t.Fatal(err) |
|
| 17 |
+ } |
|
| 18 |
+ |
|
| 19 |
+ defer f.Close() |
|
| 20 |
+ |
|
| 21 |
+ dirs, err := f.Readdirnames(0) |
|
| 22 |
+ if err != nil {
|
|
| 23 |
+ t.Fatal(err) |
|
| 24 |
+ } |
|
| 25 |
+ |
|
| 26 |
+ return dirs |
|
| 27 |
+} |
|
| 28 |
+ |
|
| 29 |
+func TestTestNegative(t *testing.T) {
|
|
| 30 |
+ for _, dir := range getDirs(t, negativeTestDir) {
|
|
| 31 |
+ dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") |
|
| 32 |
+ |
|
| 33 |
+ df, err := os.Open(dockerfile) |
|
| 34 |
+ if err != nil {
|
|
| 35 |
+ t.Fatalf("Dockerfile missing for %s: %v", dir, err)
|
|
| 36 |
+ } |
|
| 37 |
+ |
|
| 38 |
+ _, err = Parse(df) |
|
| 39 |
+ if err == nil {
|
|
| 40 |
+ t.Fatalf("No error parsing broken dockerfile for %s", dir)
|
|
| 41 |
+ } |
|
| 42 |
+ |
|
| 43 |
+ df.Close() |
|
| 44 |
+ } |
|
| 45 |
+} |
|
| 46 |
+ |
|
| 47 |
+func TestTestData(t *testing.T) {
|
|
| 48 |
+ for _, dir := range getDirs(t, testDir) {
|
|
| 49 |
+ dockerfile := filepath.Join(testDir, dir, "Dockerfile") |
|
| 50 |
+ resultfile := filepath.Join(testDir, dir, "result") |
|
| 51 |
+ |
|
| 52 |
+ df, err := os.Open(dockerfile) |
|
| 53 |
+ if err != nil {
|
|
| 54 |
+ t.Fatalf("Dockerfile missing for %s: %v", dir, err)
|
|
| 55 |
+ } |
|
| 56 |
+ defer df.Close() |
|
| 57 |
+ |
|
| 58 |
+ ast, err := Parse(df) |
|
| 59 |
+ if err != nil {
|
|
| 60 |
+ t.Fatalf("Error parsing %s's dockerfile: %v", dir, err)
|
|
| 61 |
+ } |
|
| 62 |
+ |
|
| 63 |
+ content, err := ioutil.ReadFile(resultfile) |
|
| 64 |
+ if err != nil {
|
|
| 65 |
+ t.Fatalf("Error reading %s's result file: %v", dir, err)
|
|
| 66 |
+ } |
|
| 67 |
+ |
|
| 68 |
+ if ast.Dump()+"\n" != string(content) {
|
|
| 69 |
+ fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) |
|
| 70 |
+ fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) |
|
| 71 |
+ t.Fatalf("%s: AST dump of dockerfile does not match result", dir)
|
|
| 72 |
+ } |
|
| 73 |
+ } |
|
| 74 |
+} |
|
| 75 |
+ |
|
| 76 |
+func TestParseWords(t *testing.T) {
|
|
| 77 |
+ tests := []map[string][]string{
|
|
| 78 |
+ {
|
|
| 79 |
+ "input": {"foo"},
|
|
| 80 |
+ "expect": {"foo"},
|
|
| 81 |
+ }, |
|
| 82 |
+ {
|
|
| 83 |
+ "input": {"foo bar"},
|
|
| 84 |
+ "expect": {"foo", "bar"},
|
|
| 85 |
+ }, |
|
| 86 |
+ {
|
|
| 87 |
+ "input": {"foo=bar"},
|
|
| 88 |
+ "expect": {"foo=bar"},
|
|
| 89 |
+ }, |
|
| 90 |
+ {
|
|
| 91 |
+ "input": {"foo bar 'abc xyz'"},
|
|
| 92 |
+ "expect": {"foo", "bar", "'abc xyz'"},
|
|
| 93 |
+ }, |
|
| 94 |
+ {
|
|
| 95 |
+ "input": {`foo bar "abc xyz"`},
|
|
| 96 |
+ "expect": {"foo", "bar", `"abc xyz"`},
|
|
| 97 |
+ }, |
|
| 98 |
+ } |
|
| 99 |
+ |
|
| 100 |
+ for _, test := range tests {
|
|
| 101 |
+ words := parseWords(test["input"][0]) |
|
| 102 |
+ if len(words) != len(test["expect"]) {
|
|
| 103 |
+ t.Fatalf("length check failed. input: %v, expect: %v, output: %v", test["input"][0], test["expect"], words)
|
|
| 104 |
+ } |
|
| 105 |
+ for i, word := range words {
|
|
| 106 |
+ if word != test["expect"][i] {
|
|
| 107 |
+ t.Fatalf("word check failed for word: %q. input: %v, expect: %v, output: %v", word, test["input"][0], test["expect"], words)
|
|
| 108 |
+ } |
|
| 109 |
+ } |
|
| 110 |
+ } |
|
| 111 |
+} |
| 0 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,11 @@ |
| 0 |
+FROM ubuntu:14.04 |
|
| 1 |
+MAINTAINER Seongyeol Lim <seongyeol37@gmail.com> |
|
| 2 |
+ |
|
| 3 |
+COPY . /go/src/github.com/docker/docker |
|
| 4 |
+ADD . / |
|
| 5 |
+ADD null / |
|
| 6 |
+COPY nullfile /tmp |
|
| 7 |
+ADD [ "vimrc", "/tmp" ] |
|
| 8 |
+COPY [ "bashrc", "/tmp" ] |
|
| 9 |
+COPY [ "test file", "/tmp" ] |
|
| 10 |
+ADD [ "test file", "/tmp/test file" ] |
| 0 | 11 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,10 @@ |
| 0 |
+(from "ubuntu:14.04") |
|
| 1 |
+(maintainer "Seongyeol Lim <seongyeol37@gmail.com>") |
|
| 2 |
+(copy "." "/go/src/github.com/docker/docker") |
|
| 3 |
+(add "." "/") |
|
| 4 |
+(add "null" "/") |
|
| 5 |
+(copy "nullfile" "/tmp") |
|
| 6 |
+(add "vimrc" "/tmp") |
|
| 7 |
+(copy "bashrc" "/tmp") |
|
| 8 |
+(copy "test file" "/tmp") |
|
| 9 |
+(add "test file" "/tmp/test file") |
| 0 | 10 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,25 @@ |
| 0 |
+FROM brimstone/ubuntu:14.04 |
|
| 1 |
+ |
|
| 2 |
+MAINTAINER brimstone@the.narro.ws |
|
| 3 |
+ |
|
| 4 |
+# TORUN -v /var/run/docker.sock:/var/run/docker.sock |
|
| 5 |
+ |
|
| 6 |
+ENV GOPATH /go |
|
| 7 |
+ |
|
| 8 |
+# Set our command |
|
| 9 |
+ENTRYPOINT ["/usr/local/bin/consuldock"] |
|
| 10 |
+ |
|
| 11 |
+# Install the packages we need, clean up after them and us |
|
| 12 |
+RUN apt-get update \ |
|
| 13 |
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
|
| 14 |
+ && apt-get install -y --no-install-recommends git golang ca-certificates \ |
|
| 15 |
+ && apt-get clean \ |
|
| 16 |
+ && rm -rf /var/lib/apt/lists \ |
|
| 17 |
+ |
|
| 18 |
+ && go get -v github.com/brimstone/consuldock \ |
|
| 19 |
+ && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ |
|
| 20 |
+ |
|
| 21 |
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
|
| 22 |
+ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
|
| 23 |
+ && rm /tmp/dpkg.* \ |
|
| 24 |
+ && rm -rf $GOPATH |
| 0 | 25 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,5 @@ |
| 0 |
+(from "brimstone/ubuntu:14.04") |
|
| 1 |
+(maintainer "brimstone@the.narro.ws") |
|
| 2 |
+(env "GOPATH" "/go") |
|
| 3 |
+(entrypoint "/usr/local/bin/consuldock") |
|
| 4 |
+(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")
|
| 0 | 5 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,52 @@ |
| 0 |
+FROM brimstone/ubuntu:14.04 |
|
| 1 |
+ |
|
| 2 |
+CMD [] |
|
| 3 |
+ |
|
| 4 |
+ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] |
|
| 5 |
+ |
|
| 6 |
+EXPOSE 8500 8600 8400 8301 8302 |
|
| 7 |
+ |
|
| 8 |
+RUN apt-get update \ |
|
| 9 |
+ && apt-get install -y unzip wget \ |
|
| 10 |
+ && apt-get clean \ |
|
| 11 |
+ && rm -rf /var/lib/apt/lists |
|
| 12 |
+ |
|
| 13 |
+RUN cd /tmp \ |
|
| 14 |
+ && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ |
|
| 15 |
+ -O web_ui.zip \ |
|
| 16 |
+ && unzip web_ui.zip \ |
|
| 17 |
+ && mv dist /webui \ |
|
| 18 |
+ && rm web_ui.zip |
|
| 19 |
+ |
|
| 20 |
+RUN apt-get update \ |
|
| 21 |
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
|
| 22 |
+ && apt-get install -y --no-install-recommends unzip wget \ |
|
| 23 |
+ && apt-get clean \ |
|
| 24 |
+ && rm -rf /var/lib/apt/lists \ |
|
| 25 |
+ |
|
| 26 |
+ && cd /tmp \ |
|
| 27 |
+ && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ |
|
| 28 |
+ -O web_ui.zip \ |
|
| 29 |
+ && unzip web_ui.zip \ |
|
| 30 |
+ && mv dist /webui \ |
|
| 31 |
+ && rm web_ui.zip \ |
|
| 32 |
+ |
|
| 33 |
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
|
| 34 |
+ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
|
| 35 |
+ && rm /tmp/dpkg.* |
|
| 36 |
+ |
|
| 37 |
+ENV GOPATH /go |
|
| 38 |
+ |
|
| 39 |
+RUN apt-get update \ |
|
| 40 |
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
|
| 41 |
+ && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ |
|
| 42 |
+ && apt-get clean \ |
|
| 43 |
+ && rm -rf /var/lib/apt/lists \ |
|
| 44 |
+ |
|
| 45 |
+ && go get -v github.com/hashicorp/consul \ |
|
| 46 |
+ && mv $GOPATH/bin/consul /usr/bin/consul \ |
|
| 47 |
+ |
|
| 48 |
+ && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
|
| 49 |
+ && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
|
| 50 |
+ && rm /tmp/dpkg.* \ |
|
| 51 |
+ && rm -rf $GOPATH |
| 0 | 52 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,9 @@ |
| 0 |
+(from "brimstone/ubuntu:14.04") |
|
| 1 |
+(cmd) |
|
| 2 |
+(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") |
|
| 3 |
+(expose "8500" "8600" "8400" "8301" "8302") |
|
| 4 |
+(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") |
|
| 5 |
+(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") |
|
| 6 |
+(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*")
|
|
| 7 |
+(env "GOPATH" "/go") |
|
| 8 |
+(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")
|
| 0 | 9 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,36 @@ |
| 0 |
+FROM ubuntu:14.04 |
|
| 1 |
+ |
|
| 2 |
+RUN echo hello\ |
|
| 3 |
+ world\ |
|
| 4 |
+ goodnight \ |
|
| 5 |
+ moon\ |
|
| 6 |
+ light\ |
|
| 7 |
+ning |
|
| 8 |
+RUN echo hello \ |
|
| 9 |
+ world |
|
| 10 |
+RUN echo hello \ |
|
| 11 |
+world |
|
| 12 |
+RUN echo hello \ |
|
| 13 |
+goodbye\ |
|
| 14 |
+frog |
|
| 15 |
+RUN echo hello \ |
|
| 16 |
+world |
|
| 17 |
+RUN echo hi \ |
|
| 18 |
+ \ |
|
| 19 |
+ world \ |
|
| 20 |
+\ |
|
| 21 |
+ good\ |
|
| 22 |
+\ |
|
| 23 |
+night |
|
| 24 |
+RUN echo goodbye\ |
|
| 25 |
+frog |
|
| 26 |
+RUN echo good\ |
|
| 27 |
+bye\ |
|
| 28 |
+frog |
|
| 29 |
+ |
|
| 30 |
+RUN echo hello \ |
|
| 31 |
+# this is a comment |
|
| 32 |
+ |
|
| 33 |
+# this is a comment with a blank line surrounding it |
|
| 34 |
+ |
|
| 35 |
+this is some more useful stuff |
| 0 | 36 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,10 @@ |
| 0 |
+(from "ubuntu:14.04") |
|
| 1 |
+(run "echo hello world goodnight moon lightning") |
|
| 2 |
+(run "echo hello world") |
|
| 3 |
+(run "echo hello world") |
|
| 4 |
+(run "echo hello goodbyefrog") |
|
| 5 |
+(run "echo hello world") |
|
| 6 |
+(run "echo hi world goodnight") |
|
| 7 |
+(run "echo goodbyefrog") |
|
| 8 |
+(run "echo goodbyefrog") |
|
| 9 |
+(run "echo hello this is some more useful stuff") |
| 0 | 10 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,54 @@ |
| 0 |
+FROM cpuguy83/ubuntu |
|
| 1 |
+ENV NAGIOS_HOME /opt/nagios |
|
| 2 |
+ENV NAGIOS_USER nagios |
|
| 3 |
+ENV NAGIOS_GROUP nagios |
|
| 4 |
+ENV NAGIOS_CMDUSER nagios |
|
| 5 |
+ENV NAGIOS_CMDGROUP nagios |
|
| 6 |
+ENV NAGIOSADMIN_USER nagiosadmin |
|
| 7 |
+ENV NAGIOSADMIN_PASS nagios |
|
| 8 |
+ENV APACHE_RUN_USER nagios |
|
| 9 |
+ENV APACHE_RUN_GROUP nagios |
|
| 10 |
+ENV NAGIOS_TIMEZONE UTC |
|
| 11 |
+ |
|
| 12 |
+RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list |
|
| 13 |
+RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx |
|
| 14 |
+RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP )
|
|
| 15 |
+RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) |
|
| 16 |
+ |
|
| 17 |
+ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz |
|
| 18 |
+RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf
|
|
| 19 |
+ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ |
|
| 20 |
+RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install
|
|
| 21 |
+ |
|
| 22 |
+RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars |
|
| 23 |
+RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default |
|
| 24 |
+ |
|
| 25 |
+RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo
|
|
| 26 |
+ |
|
| 27 |
+RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf
|
|
| 28 |
+ |
|
| 29 |
+RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs
|
|
| 30 |
+RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg
|
|
| 31 |
+RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg
|
|
| 32 |
+RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf |
|
| 33 |
+ |
|
| 34 |
+RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ |
|
| 35 |
+ sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg |
|
| 36 |
+RUN cp /etc/services /var/spool/postfix/etc/ |
|
| 37 |
+ |
|
| 38 |
+RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix |
|
| 39 |
+ADD nagios.init /etc/sv/nagios/run |
|
| 40 |
+ADD apache.init /etc/sv/apache/run |
|
| 41 |
+ADD postfix.init /etc/sv/postfix/run |
|
| 42 |
+ADD postfix.stop /etc/sv/postfix/finish |
|
| 43 |
+ |
|
| 44 |
+ADD start.sh /usr/local/bin/start_nagios |
|
| 45 |
+ |
|
| 46 |
+ENV APACHE_LOCK_DIR /var/run |
|
| 47 |
+ENV APACHE_LOG_DIR /var/log/apache2 |
|
| 48 |
+ |
|
| 49 |
+EXPOSE 80 |
|
| 50 |
+ |
|
| 51 |
+VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] |
|
| 52 |
+ |
|
| 53 |
+CMD ["/usr/local/bin/start_nagios"] |
| 0 | 54 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,40 @@ |
| 0 |
+(from "cpuguy83/ubuntu") |
|
| 1 |
+(env "NAGIOS_HOME" "/opt/nagios") |
|
| 2 |
+(env "NAGIOS_USER" "nagios") |
|
| 3 |
+(env "NAGIOS_GROUP" "nagios") |
|
| 4 |
+(env "NAGIOS_CMDUSER" "nagios") |
|
| 5 |
+(env "NAGIOS_CMDGROUP" "nagios") |
|
| 6 |
+(env "NAGIOSADMIN_USER" "nagiosadmin") |
|
| 7 |
+(env "NAGIOSADMIN_PASS" "nagios") |
|
| 8 |
+(env "APACHE_RUN_USER" "nagios") |
|
| 9 |
+(env "APACHE_RUN_GROUP" "nagios") |
|
| 10 |
+(env "NAGIOS_TIMEZONE" "UTC") |
|
| 11 |
+(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") |
|
| 12 |
+(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") |
|
| 13 |
+(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )")
|
|
| 14 |
+(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") |
|
| 15 |
+(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") |
|
| 16 |
+(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf")
|
|
| 17 |
+(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") |
|
| 18 |
+(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install")
|
|
| 19 |
+(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") |
|
| 20 |
+(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") |
|
| 21 |
+(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo")
|
|
| 22 |
+(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf")
|
|
| 23 |
+(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs")
|
|
| 24 |
+(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
|
|
| 25 |
+(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
|
|
| 26 |
+(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") |
|
| 27 |
+(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") |
|
| 28 |
+(run "cp /etc/services /var/spool/postfix/etc/") |
|
| 29 |
+(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") |
|
| 30 |
+(add "nagios.init" "/etc/sv/nagios/run") |
|
| 31 |
+(add "apache.init" "/etc/sv/apache/run") |
|
| 32 |
+(add "postfix.init" "/etc/sv/postfix/run") |
|
| 33 |
+(add "postfix.stop" "/etc/sv/postfix/finish") |
|
| 34 |
+(add "start.sh" "/usr/local/bin/start_nagios") |
|
| 35 |
+(env "APACHE_LOCK_DIR" "/var/run") |
|
| 36 |
+(env "APACHE_LOG_DIR" "/var/log/apache2") |
|
| 37 |
+(expose "80") |
|
| 38 |
+(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") |
|
| 39 |
+(cmd "/usr/local/bin/start_nagios") |
| 0 | 40 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,104 @@ |
| 0 |
+# This file describes the standard way to build Docker, using docker |
|
| 1 |
+# |
|
| 2 |
+# Usage: |
|
| 3 |
+# |
|
| 4 |
+# # Assemble the full dev environment. This is slow the first time. |
|
| 5 |
+# docker build -t docker . |
|
| 6 |
+# |
|
| 7 |
+# # Mount your source in an interactive container for quick testing: |
|
| 8 |
+# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash |
|
| 9 |
+# |
|
| 10 |
+# # Run the test suite: |
|
| 11 |
+# docker run --privileged docker hack/make.sh test |
|
| 12 |
+# |
|
| 13 |
+# # Publish a release: |
|
| 14 |
+# docker run --privileged \ |
|
| 15 |
+# -e AWS_S3_BUCKET=baz \ |
|
| 16 |
+# -e AWS_ACCESS_KEY=foo \ |
|
| 17 |
+# -e AWS_SECRET_KEY=bar \ |
|
| 18 |
+# -e GPG_PASSPHRASE=gloubiboulga \ |
|
| 19 |
+# docker hack/release.sh |
|
| 20 |
+# |
|
| 21 |
+# Note: AppArmor used to mess with privileged mode, but this is no longer |
|
| 22 |
+# the case. Therefore, you don't have to disable it anymore. |
|
| 23 |
+# |
|
| 24 |
+ |
|
| 25 |
+FROM ubuntu:14.04 |
|
| 26 |
+MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon) |
|
| 27 |
+ |
|
| 28 |
+# Packaged dependencies |
|
| 29 |
+RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ |
|
| 30 |
+ apt-utils \ |
|
| 31 |
+ aufs-tools \ |
|
| 32 |
+ automake \ |
|
| 33 |
+ btrfs-tools \ |
|
| 34 |
+ build-essential \ |
|
| 35 |
+ curl \ |
|
| 36 |
+ dpkg-sig \ |
|
| 37 |
+ git \ |
|
| 38 |
+ iptables \ |
|
| 39 |
+ libapparmor-dev \ |
|
| 40 |
+ libcap-dev \ |
|
| 41 |
+ libsqlite3-dev \ |
|
| 42 |
+ lxc=1.0* \ |
|
| 43 |
+ mercurial \ |
|
| 44 |
+ pandoc \ |
|
| 45 |
+ parallel \ |
|
| 46 |
+ reprepro \ |
|
| 47 |
+ ruby1.9.1 \ |
|
| 48 |
+ ruby1.9.1-dev \ |
|
| 49 |
+ s3cmd=1.1.0* \ |
|
| 50 |
+ --no-install-recommends |
|
| 51 |
+ |
|
| 52 |
+# Get lvm2 source for compiling statically |
|
| 53 |
+RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 |
|
| 54 |
+# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags |
|
| 55 |
+# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly |
|
| 56 |
+ |
|
| 57 |
+# Compile and install lvm2 |
|
| 58 |
+RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper |
|
| 59 |
+# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL |
|
| 60 |
+ |
|
| 61 |
+# Install Go |
|
| 62 |
+RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz |
|
| 63 |
+ENV PATH /usr/local/go/bin:$PATH |
|
| 64 |
+ENV GOPATH /go:/go/src/github.com/docker/docker/vendor |
|
| 65 |
+RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 |
|
| 66 |
+ |
|
| 67 |
+# Compile Go for cross compilation |
|
| 68 |
+ENV DOCKER_CROSSPLATFORMS \ |
|
| 69 |
+ linux/386 linux/arm \ |
|
| 70 |
+ darwin/amd64 darwin/386 \ |
|
| 71 |
+ freebsd/amd64 freebsd/386 freebsd/arm |
|
| 72 |
+# (set an explicit GOARM of 5 for maximum compatibility) |
|
| 73 |
+ENV GOARM 5 |
|
| 74 |
+RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
|
|
| 75 |
+ |
|
| 76 |
+# Grab Go's cover tool for dead-simple code coverage testing |
|
| 77 |
+RUN go get golang.org/x/tools/cmd/cover |
|
| 78 |
+ |
|
| 79 |
+# TODO replace FPM with some very minimal debhelper stuff |
|
| 80 |
+RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 |
|
| 81 |
+ |
|
| 82 |
+# Get the "busybox" image source so we can build locally instead of pulling |
|
| 83 |
+RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox |
|
| 84 |
+ |
|
| 85 |
+# Setup s3cmd config |
|
| 86 |
+RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg |
|
| 87 |
+ |
|
| 88 |
+# Set user.email so crosbymichael's in-container merge commits go smoothly |
|
| 89 |
+RUN git config --global user.email 'docker-dummy@example.com' |
|
| 90 |
+ |
|
| 91 |
+# Add an unprivileged user to be used for tests which need it |
|
| 92 |
+RUN groupadd -r docker |
|
| 93 |
+RUN useradd --create-home --gid docker unprivilegeduser |
|
| 94 |
+ |
|
| 95 |
+VOLUME /var/lib/docker |
|
| 96 |
+WORKDIR /go/src/github.com/docker/docker |
|
| 97 |
+ENV DOCKER_BUILDTAGS apparmor selinux |
|
| 98 |
+ |
|
| 99 |
+# Wrap all commands in the "docker-in-docker" script to allow nested containers |
|
| 100 |
+ENTRYPOINT ["hack/dind"] |
|
| 101 |
+ |
|
| 102 |
+# Upload docker source |
|
| 103 |
+COPY . /go/src/github.com/docker/docker |
| 0 | 104 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,24 @@ |
| 0 |
+(from "ubuntu:14.04") |
|
| 1 |
+(maintainer "Tianon Gravi <admwiggin@gmail.com> (@tianon)") |
|
| 2 |
+(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tlxc=1.0* \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") |
|
| 3 |
+(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") |
|
| 4 |
+(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") |
|
| 5 |
+(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") |
|
| 6 |
+(env "PATH" "/usr/local/go/bin:$PATH") |
|
| 7 |
+(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") |
|
| 8 |
+(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") |
|
| 9 |
+(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") |
|
| 10 |
+(env "GOARM" "5") |
|
| 11 |
+(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
|
|
| 12 |
+(run "go get golang.org/x/tools/cmd/cover") |
|
| 13 |
+(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") |
|
| 14 |
+(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") |
|
| 15 |
+(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") |
|
| 16 |
+(run "git config --global user.email 'docker-dummy@example.com'") |
|
| 17 |
+(run "groupadd -r docker") |
|
| 18 |
+(run "useradd --create-home --gid docker unprivilegeduser") |
|
| 19 |
+(volume "/var/lib/docker") |
|
| 20 |
+(workdir "/go/src/github.com/docker/docker") |
|
| 21 |
+(env "DOCKER_BUILDTAGS" "apparmor selinux") |
|
| 22 |
+(entrypoint "hack/dind") |
|
| 23 |
+(copy "." "/go/src/github.com/docker/docker") |
| 0 | 24 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,23 @@ |
| 0 |
+FROM ubuntu |
|
| 1 |
+ENV name value |
|
| 2 |
+ENV name=value |
|
| 3 |
+ENV name=value name2=value2 |
|
| 4 |
+ENV name="value value1" |
|
| 5 |
+ENV name=value\ value2 |
|
| 6 |
+ENV name="value'quote space'value2" |
|
| 7 |
+ENV name='value"double quote"value2' |
|
| 8 |
+ENV name=value\ value2 name2=value2\ value3 |
|
| 9 |
+ENV name="a\"b" |
|
| 10 |
+ENV name="a\'b" |
|
| 11 |
+ENV name='a\'b' |
|
| 12 |
+ENV name='a\'b'' |
|
| 13 |
+ENV name='a\"b' |
|
| 14 |
+ENV name="''" |
|
| 15 |
+# don't put anything after the next line - it must be the last line of the |
|
| 16 |
+# Dockerfile and it must end with \ |
|
| 17 |
+ENV name=value \ |
|
| 18 |
+ name1=value1 \ |
|
| 19 |
+ name2="value2a \ |
|
| 20 |
+ value2b" \ |
|
| 21 |
+ name3="value3a\n\"value3b\"" \ |
|
| 22 |
+ name4="value4a\\nvalue4b" \ |
| 0 | 23 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,16 @@ |
| 0 |
+(from "ubuntu") |
|
| 1 |
+(env "name" "value") |
|
| 2 |
+(env "name" "value") |
|
| 3 |
+(env "name" "value" "name2" "value2") |
|
| 4 |
+(env "name" "\"value value1\"") |
|
| 5 |
+(env "name" "value\\ value2") |
|
| 6 |
+(env "name" "\"value'quote space'value2\"") |
|
| 7 |
+(env "name" "'value\"double quote\"value2'") |
|
| 8 |
+(env "name" "value\\ value2" "name2" "value2\\ value3") |
|
| 9 |
+(env "name" "\"a\\\"b\"") |
|
| 10 |
+(env "name" "\"a\\'b\"") |
|
| 11 |
+(env "name" "'a\\'b'") |
|
| 12 |
+(env "name" "'a\\'b''") |
|
| 13 |
+(env "name" "'a\\\"b'") |
|
| 14 |
+(env "name" "\"''\"") |
|
| 15 |
+(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") |
| 0 | 16 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,14 @@ |
| 0 |
+FROM ubuntu:14.04 |
|
| 1 |
+MAINTAINER Erik \\Hollensbe <erik@hollensbe.org>\" |
|
| 2 |
+ |
|
| 3 |
+RUN apt-get \update && \ |
|
| 4 |
+ apt-get \"install znc -y |
|
| 5 |
+ADD \conf\\" /.znc |
|
| 6 |
+ |
|
| 7 |
+RUN foo \ |
|
| 8 |
+ |
|
| 9 |
+bar \ |
|
| 10 |
+ |
|
| 11 |
+baz |
|
| 12 |
+ |
|
| 13 |
+CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] |
| 0 | 6 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,10 @@ |
| 0 |
+FROM scratch |
|
| 1 |
+COPY foo /tmp/ |
|
| 2 |
+COPY --user=me foo /tmp/ |
|
| 3 |
+COPY --doit=true foo /tmp/ |
|
| 4 |
+COPY --user=me --doit=true foo /tmp/ |
|
| 5 |
+COPY --doit=true -- foo /tmp/ |
|
| 6 |
+COPY -- foo /tmp/ |
|
| 7 |
+CMD --doit [ "a", "b" ] |
|
| 8 |
+CMD --doit=true -- [ "a", "b" ] |
|
| 9 |
+CMD --doit -- [ ] |
| 0 | 10 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,10 @@ |
| 0 |
+(from "scratch") |
|
| 1 |
+(copy "foo" "/tmp/") |
|
| 2 |
+(copy ["--user=me"] "foo" "/tmp/") |
|
| 3 |
+(copy ["--doit=true"] "foo" "/tmp/") |
|
| 4 |
+(copy ["--user=me" "--doit=true"] "foo" "/tmp/") |
|
| 5 |
+(copy ["--doit=true"] "foo" "/tmp/") |
|
| 6 |
+(copy "foo" "/tmp/") |
|
| 7 |
+(cmd ["--doit"] "a" "b") |
|
| 8 |
+(cmd ["--doit=true"] "a" "b") |
|
| 9 |
+(cmd ["--doit"]) |
| 0 | 10 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,15 @@ |
| 0 |
+FROM ubuntu:14.04 |
|
| 1 |
+ |
|
| 2 |
+RUN apt-get update && apt-get install wget -y |
|
| 3 |
+RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb |
|
| 4 |
+RUN dpkg -i influxdb_latest_amd64.deb |
|
| 5 |
+RUN rm -r /opt/influxdb/shared |
|
| 6 |
+ |
|
| 7 |
+VOLUME /opt/influxdb/shared |
|
| 8 |
+ |
|
| 9 |
+CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml |
|
| 10 |
+ |
|
| 11 |
+EXPOSE 8083 |
|
| 12 |
+EXPOSE 8086 |
|
| 13 |
+EXPOSE 8090 |
|
| 14 |
+EXPOSE 8099 |
| 0 | 15 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,11 @@ |
| 0 |
+(from "ubuntu:14.04") |
|
| 1 |
+(run "apt-get update && apt-get install wget -y") |
|
| 2 |
+(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") |
|
| 3 |
+(run "dpkg -i influxdb_latest_amd64.deb") |
|
| 4 |
+(run "rm -r /opt/influxdb/shared") |
|
| 5 |
+(volume "/opt/influxdb/shared") |
|
| 6 |
+(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") |
|
| 7 |
+(expose "8083") |
|
| 8 |
+(expose "8086") |
|
| 9 |
+(expose "8090") |
|
| 10 |
+(expose "8099") |
| 0 | 7 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,7 @@ |
| 0 |
+(from "ubuntu:14.04") |
|
| 1 |
+(maintainer "James Turnbull \"james@example.com\"") |
|
| 2 |
+(env "REFRESHED_AT" "2014-06-01") |
|
| 3 |
+(run "apt-get update") |
|
| 4 |
+(run "apt-get -y install redis-server redis-tools") |
|
| 5 |
+(expose "6379") |
|
| 6 |
+(entrypoint "/usr/bin/redis-server") |
| 0 | 7 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,48 @@ |
| 0 |
+FROM busybox:buildroot-2014.02 |
|
| 1 |
+ |
|
| 2 |
+MAINTAINER docker <docker@docker.io> |
|
| 3 |
+ |
|
| 4 |
+ONBUILD RUN ["echo", "test"] |
|
| 5 |
+ONBUILD RUN echo test |
|
| 6 |
+ONBUILD COPY . / |
|
| 7 |
+ |
|
| 8 |
+ |
|
| 9 |
+# RUN Commands \ |
|
| 10 |
+# linebreak in comment \ |
|
| 11 |
+RUN ["ls", "-la"] |
|
| 12 |
+RUN ["echo", "'1234'"] |
|
| 13 |
+RUN echo "1234" |
|
| 14 |
+RUN echo 1234 |
|
| 15 |
+RUN echo '1234' && \ |
|
| 16 |
+ echo "456" && \ |
|
| 17 |
+ echo 789 |
|
| 18 |
+RUN sh -c 'echo root:testpass \ |
|
| 19 |
+ > /tmp/passwd' |
|
| 20 |
+RUN mkdir -p /test /test2 /test3/test |
|
| 21 |
+ |
|
| 22 |
+# ENV \ |
|
| 23 |
+ENV SCUBA 1 DUBA 3 |
|
| 24 |
+ENV SCUBA "1 DUBA 3" |
|
| 25 |
+ |
|
| 26 |
+# CMD \ |
|
| 27 |
+CMD ["echo", "test"] |
|
| 28 |
+CMD echo test |
|
| 29 |
+CMD echo "test" |
|
| 30 |
+CMD echo 'test' |
|
| 31 |
+CMD echo 'test' | wc - |
|
| 32 |
+ |
|
| 33 |
+#EXPOSE\ |
|
| 34 |
+EXPOSE 3000 |
|
| 35 |
+EXPOSE 9000 5000 6000 |
|
| 36 |
+ |
|
| 37 |
+USER docker |
|
| 38 |
+USER docker:root |
|
| 39 |
+ |
|
| 40 |
+VOLUME ["/test"] |
|
| 41 |
+VOLUME ["/test", "/test2"] |
|
| 42 |
+VOLUME /test3 |
|
| 43 |
+ |
|
| 44 |
+WORKDIR /test |
|
| 45 |
+ |
|
| 46 |
+ADD . / |
|
| 47 |
+COPY . copy |
| 0 | 48 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,29 @@ |
| 0 |
+(from "busybox:buildroot-2014.02") |
|
| 1 |
+(maintainer "docker <docker@docker.io>") |
|
| 2 |
+(onbuild (run "echo" "test")) |
|
| 3 |
+(onbuild (run "echo test")) |
|
| 4 |
+(onbuild (copy "." "/")) |
|
| 5 |
+(run "ls" "-la") |
|
| 6 |
+(run "echo" "'1234'") |
|
| 7 |
+(run "echo \"1234\"") |
|
| 8 |
+(run "echo 1234") |
|
| 9 |
+(run "echo '1234' && echo \"456\" && echo 789") |
|
| 10 |
+(run "sh -c 'echo root:testpass > /tmp/passwd'") |
|
| 11 |
+(run "mkdir -p /test /test2 /test3/test") |
|
| 12 |
+(env "SCUBA" "1 DUBA 3") |
|
| 13 |
+(env "SCUBA" "\"1 DUBA 3\"") |
|
| 14 |
+(cmd "echo" "test") |
|
| 15 |
+(cmd "echo test") |
|
| 16 |
+(cmd "echo \"test\"") |
|
| 17 |
+(cmd "echo 'test'") |
|
| 18 |
+(cmd "echo 'test' | wc -") |
|
| 19 |
+(expose "3000") |
|
| 20 |
+(expose "9000" "5000" "6000") |
|
| 21 |
+(user "docker") |
|
| 22 |
+(user "docker:root") |
|
| 23 |
+(volume "/test") |
|
| 24 |
+(volume "/test" "/test2") |
|
| 25 |
+(volume "/test3") |
|
| 26 |
+(workdir "/test") |
|
| 27 |
+(add "." "/") |
|
| 28 |
+(copy "." "copy") |
| 0 | 29 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,16 @@ |
| 0 |
+FROM ubuntu:14.04 |
|
| 1 |
+ |
|
| 2 |
+RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y |
|
| 3 |
+ADD .muttrc / |
|
| 4 |
+ADD .offlineimaprc / |
|
| 5 |
+ADD .tmux.conf / |
|
| 6 |
+ADD mutt /.mutt |
|
| 7 |
+ADD vim /.vim |
|
| 8 |
+ADD vimrc /.vimrc |
|
| 9 |
+ADD crontab /etc/crontab |
|
| 10 |
+RUN chmod 644 /etc/crontab |
|
| 11 |
+RUN mkdir /Mail |
|
| 12 |
+RUN mkdir /.offlineimap |
|
| 13 |
+RUN echo "export TERM=screen-256color" >/.zshenv |
|
| 14 |
+ |
|
| 15 |
+CMD setsid cron; tmux -2 |
| 0 | 16 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,14 @@ |
| 0 |
+(from "ubuntu:14.04") |
|
| 1 |
+(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") |
|
| 2 |
+(add ".muttrc" "/") |
|
| 3 |
+(add ".offlineimaprc" "/") |
|
| 4 |
+(add ".tmux.conf" "/") |
|
| 5 |
+(add "mutt" "/.mutt") |
|
| 6 |
+(add "vim" "/.vim") |
|
| 7 |
+(add "vimrc" "/.vimrc") |
|
| 8 |
+(add "crontab" "/etc/crontab") |
|
| 9 |
+(run "chmod 644 /etc/crontab") |
|
| 10 |
+(run "mkdir /Mail") |
|
| 11 |
+(run "mkdir /.offlineimap") |
|
| 12 |
+(run "echo \"export TERM=screen-256color\" >/.zshenv") |
|
| 13 |
+(cmd "setsid cron; tmux -2") |
| 0 | 4 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,14 @@ |
| 0 |
+FROM ubuntu:14.04 |
|
| 1 |
+MAINTAINER Erik Hollensbe <erik@hollensbe.org> |
|
| 2 |
+ |
|
| 3 |
+RUN apt-get update && apt-get install nginx-full -y |
|
| 4 |
+RUN rm -rf /etc/nginx |
|
| 5 |
+ADD etc /etc/nginx |
|
| 6 |
+RUN chown -R root:root /etc/nginx |
|
| 7 |
+RUN /usr/sbin/nginx -qt |
|
| 8 |
+RUN mkdir /www |
|
| 9 |
+ |
|
| 10 |
+CMD ["/usr/sbin/nginx"] |
|
| 11 |
+ |
|
| 12 |
+VOLUME /www |
|
| 13 |
+EXPOSE 80 |
| 0 | 14 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,11 @@ |
| 0 |
+(from "ubuntu:14.04") |
|
| 1 |
+(maintainer "Erik Hollensbe <erik@hollensbe.org>") |
|
| 2 |
+(run "apt-get update && apt-get install nginx-full -y") |
|
| 3 |
+(run "rm -rf /etc/nginx") |
|
| 4 |
+(add "etc" "/etc/nginx") |
|
| 5 |
+(run "chown -R root:root /etc/nginx") |
|
| 6 |
+(run "/usr/sbin/nginx -qt") |
|
| 7 |
+(run "mkdir /www") |
|
| 8 |
+(cmd "/usr/sbin/nginx") |
|
| 9 |
+(volume "/www") |
|
| 10 |
+(expose "80") |
| 0 | 11 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,23 @@ |
| 0 |
+FROM ubuntu:12.04 |
|
| 1 |
+ |
|
| 2 |
+EXPOSE 27015 |
|
| 3 |
+EXPOSE 27005 |
|
| 4 |
+EXPOSE 26901 |
|
| 5 |
+EXPOSE 27020 |
|
| 6 |
+ |
|
| 7 |
+RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y |
|
| 8 |
+RUN mkdir -p /steam |
|
| 9 |
+RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam |
|
| 10 |
+ADD ./script /steam/script |
|
| 11 |
+RUN /steam/steamcmd.sh +runscript /steam/script |
|
| 12 |
+RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf |
|
| 13 |
+RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf |
|
| 14 |
+ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg |
|
| 15 |
+ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg |
|
| 16 |
+ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg |
|
| 17 |
+RUN rm -r /steam/tf2/tf/addons/sourcemod/configs |
|
| 18 |
+ADD ./configs /steam/tf2/tf/addons/sourcemod/configs |
|
| 19 |
+RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en |
|
| 20 |
+RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en |
|
| 21 |
+ |
|
| 22 |
+CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill |
| 0 | 23 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,20 @@ |
| 0 |
+(from "ubuntu:12.04") |
|
| 1 |
+(expose "27015") |
|
| 2 |
+(expose "27005") |
|
| 3 |
+(expose "26901") |
|
| 4 |
+(expose "27020") |
|
| 5 |
+(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") |
|
| 6 |
+(run "mkdir -p /steam") |
|
| 7 |
+(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") |
|
| 8 |
+(add "./script" "/steam/script") |
|
| 9 |
+(run "/steam/steamcmd.sh +runscript /steam/script") |
|
| 10 |
+(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") |
|
| 11 |
+(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") |
|
| 12 |
+(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") |
|
| 13 |
+(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") |
|
| 14 |
+(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") |
|
| 15 |
+(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") |
|
| 16 |
+(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") |
|
| 17 |
+(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") |
|
| 18 |
+(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") |
|
| 19 |
+(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") |
| 0 | 5 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,176 @@ |
| 0 |
+package parser |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "strconv" |
|
| 5 |
+ "strings" |
|
| 6 |
+ "unicode" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+// Dump dumps the AST defined by `node` as a list of sexps. |
|
| 10 |
+// Returns a string suitable for printing. |
|
| 11 |
+func (node *Node) Dump() string {
|
|
| 12 |
+ str := "" |
|
| 13 |
+ str += node.Value |
|
| 14 |
+ |
|
| 15 |
+ if len(node.Flags) > 0 {
|
|
| 16 |
+ str += fmt.Sprintf(" %q", node.Flags)
|
|
| 17 |
+ } |
|
| 18 |
+ |
|
| 19 |
+ for _, n := range node.Children {
|
|
| 20 |
+ str += "(" + n.Dump() + ")\n"
|
|
| 21 |
+ } |
|
| 22 |
+ |
|
| 23 |
+ if node.Next != nil {
|
|
| 24 |
+ for n := node.Next; n != nil; n = n.Next {
|
|
| 25 |
+ if len(n.Children) > 0 {
|
|
| 26 |
+ str += " " + n.Dump() |
|
| 27 |
+ } else {
|
|
| 28 |
+ str += " " + strconv.Quote(n.Value) |
|
| 29 |
+ } |
|
| 30 |
+ } |
|
| 31 |
+ } |
|
| 32 |
+ |
|
| 33 |
+ return strings.TrimSpace(str) |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+// performs the dispatch based on the two primal strings, cmd and args. Please |
|
| 37 |
+// look at the dispatch table in parser.go to see how these dispatchers work. |
|
| 38 |
+func fullDispatch(cmd, args string) (*Node, map[string]bool, error) {
|
|
| 39 |
+ fn := dispatch[cmd] |
|
| 40 |
+ |
|
| 41 |
+ // Ignore invalid Dockerfile instructions |
|
| 42 |
+ if fn == nil {
|
|
| 43 |
+ fn = parseIgnore |
|
| 44 |
+ } |
|
| 45 |
+ |
|
| 46 |
+ sexp, attrs, err := fn(args) |
|
| 47 |
+ if err != nil {
|
|
| 48 |
+ return nil, nil, err |
|
| 49 |
+ } |
|
| 50 |
+ |
|
| 51 |
+ return sexp, attrs, nil |
|
| 52 |
+} |
|
| 53 |
+ |
|
| 54 |
+// splitCommand takes a single line of text and parses out the cmd and args, |
|
| 55 |
+// which are used for dispatching to more exact parsing functions. |
|
| 56 |
+func splitCommand(line string) (string, []string, string, error) {
|
|
| 57 |
+ var args string |
|
| 58 |
+ var flags []string |
|
| 59 |
+ |
|
| 60 |
+ // Make sure we get the same results irrespective of leading/trailing spaces |
|
| 61 |
+ cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) |
|
| 62 |
+ cmd := strings.ToLower(cmdline[0]) |
|
| 63 |
+ |
|
| 64 |
+ if len(cmdline) == 2 {
|
|
| 65 |
+ var err error |
|
| 66 |
+ args, flags, err = extractBuilderFlags(cmdline[1]) |
|
| 67 |
+ if err != nil {
|
|
| 68 |
+ return "", nil, "", err |
|
| 69 |
+ } |
|
| 70 |
+ } |
|
| 71 |
+ |
|
| 72 |
+ return cmd, flags, strings.TrimSpace(args), nil |
|
| 73 |
+} |
|
| 74 |
+ |
|
| 75 |
+// covers comments and empty lines. Lines should be trimmed before passing to |
|
| 76 |
+// this function. |
|
| 77 |
+func stripComments(line string) string {
|
|
| 78 |
+ // string is already trimmed at this point |
|
| 79 |
+ if tokenComment.MatchString(line) {
|
|
| 80 |
+ return tokenComment.ReplaceAllString(line, "") |
|
| 81 |
+ } |
|
| 82 |
+ |
|
| 83 |
+ return line |
|
| 84 |
+} |
|
| 85 |
+ |
|
| 86 |
+func extractBuilderFlags(line string) (string, []string, error) {
|
|
| 87 |
+ // Parses the BuilderFlags and returns the remaining part of the line |
|
| 88 |
+ |
|
| 89 |
+ const ( |
|
| 90 |
+ inSpaces = iota // looking for start of a word |
|
| 91 |
+ inWord |
|
| 92 |
+ inQuote |
|
| 93 |
+ ) |
|
| 94 |
+ |
|
| 95 |
+ words := []string{}
|
|
| 96 |
+ phase := inSpaces |
|
| 97 |
+ word := "" |
|
| 98 |
+ quote := '\000' |
|
| 99 |
+ blankOK := false |
|
| 100 |
+ var ch rune |
|
| 101 |
+ |
|
| 102 |
+ for pos := 0; pos <= len(line); pos++ {
|
|
| 103 |
+ if pos != len(line) {
|
|
| 104 |
+ ch = rune(line[pos]) |
|
| 105 |
+ } |
|
| 106 |
+ |
|
| 107 |
+ if phase == inSpaces { // Looking for start of word
|
|
| 108 |
+ if pos == len(line) { // end of input
|
|
| 109 |
+ break |
|
| 110 |
+ } |
|
| 111 |
+ if unicode.IsSpace(ch) { // skip spaces
|
|
| 112 |
+ continue |
|
| 113 |
+ } |
|
| 114 |
+ |
|
| 115 |
+ // Only keep going if the next word starts with -- |
|
| 116 |
+ if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
|
|
| 117 |
+ return line[pos:], words, nil |
|
| 118 |
+ } |
|
| 119 |
+ |
|
| 120 |
+ phase = inWord // found someting with "--", fall thru |
|
| 121 |
+ } |
|
| 122 |
+ if (phase == inWord || phase == inQuote) && (pos == len(line)) {
|
|
| 123 |
+ if word != "--" && (blankOK || len(word) > 0) {
|
|
| 124 |
+ words = append(words, word) |
|
| 125 |
+ } |
|
| 126 |
+ break |
|
| 127 |
+ } |
|
| 128 |
+ if phase == inWord {
|
|
| 129 |
+ if unicode.IsSpace(ch) {
|
|
| 130 |
+ phase = inSpaces |
|
| 131 |
+ if word == "--" {
|
|
| 132 |
+ return line[pos:], words, nil |
|
| 133 |
+ } |
|
| 134 |
+ if blankOK || len(word) > 0 {
|
|
| 135 |
+ words = append(words, word) |
|
| 136 |
+ } |
|
| 137 |
+ word = "" |
|
| 138 |
+ blankOK = false |
|
| 139 |
+ continue |
|
| 140 |
+ } |
|
| 141 |
+ if ch == '\'' || ch == '"' {
|
|
| 142 |
+ quote = ch |
|
| 143 |
+ blankOK = true |
|
| 144 |
+ phase = inQuote |
|
| 145 |
+ continue |
|
| 146 |
+ } |
|
| 147 |
+ if ch == '\\' {
|
|
| 148 |
+ if pos+1 == len(line) {
|
|
| 149 |
+ continue // just skip \ at end |
|
| 150 |
+ } |
|
| 151 |
+ pos++ |
|
| 152 |
+ ch = rune(line[pos]) |
|
| 153 |
+ } |
|
| 154 |
+ word += string(ch) |
|
| 155 |
+ continue |
|
| 156 |
+ } |
|
| 157 |
+ if phase == inQuote {
|
|
| 158 |
+ if ch == quote {
|
|
| 159 |
+ phase = inWord |
|
| 160 |
+ continue |
|
| 161 |
+ } |
|
| 162 |
+ if ch == '\\' {
|
|
| 163 |
+ if pos+1 == len(line) {
|
|
| 164 |
+ phase = inWord |
|
| 165 |
+ continue // just skip \ at end |
|
| 166 |
+ } |
|
| 167 |
+ pos++ |
|
| 168 |
+ ch = rune(line[pos]) |
|
| 169 |
+ } |
|
| 170 |
+ word += string(ch) |
|
| 171 |
+ } |
|
| 172 |
+ } |
|
| 173 |
+ |
|
| 174 |
+ return "", words, nil |
|
| 175 |
+} |
| 0 | 176 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,243 @@ |
| 0 |
+package dockerfile |
|
| 1 |
+ |
|
| 2 |
+// This will take a single word and an array of env variables and |
|
| 3 |
+// process all quotes (" and ') as well as $xxx and ${xxx} env variable
|
|
| 4 |
+// tokens. Tries to mimic bash shell process. |
|
| 5 |
+// It doesn't support all flavors of ${xx:...} formats but new ones can
|
|
| 6 |
+// be added by adding code to the "special ${} format processing" section
|
|
| 7 |
+ |
|
| 8 |
+import ( |
|
| 9 |
+ "fmt" |
|
| 10 |
+ "strings" |
|
| 11 |
+ "unicode" |
|
| 12 |
+) |
|
| 13 |
+ |
|
| 14 |
+type shellWord struct {
|
|
| 15 |
+ word string |
|
| 16 |
+ envs []string |
|
| 17 |
+ pos int |
|
| 18 |
+} |
|
| 19 |
+ |
|
| 20 |
+// ProcessWord will use the 'env' list of environment variables, |
|
| 21 |
+// and replace any env var references in 'word'. |
|
| 22 |
+func ProcessWord(word string, env []string) (string, error) {
|
|
| 23 |
+ sw := &shellWord{
|
|
| 24 |
+ word: word, |
|
| 25 |
+ envs: env, |
|
| 26 |
+ pos: 0, |
|
| 27 |
+ } |
|
| 28 |
+ return sw.process() |
|
| 29 |
+} |
|
| 30 |
+ |
|
| 31 |
+func (sw *shellWord) process() (string, error) {
|
|
| 32 |
+ return sw.processStopOn('\000')
|
|
| 33 |
+} |
|
| 34 |
+ |
|
| 35 |
+// Process the word, starting at 'pos', and stop when we get to the |
|
| 36 |
+// end of the word or the 'stopChar' character |
|
| 37 |
+func (sw *shellWord) processStopOn(stopChar rune) (string, error) {
|
|
| 38 |
+ var result string |
|
| 39 |
+ var charFuncMapping = map[rune]func() (string, error){
|
|
| 40 |
+ '\'': sw.processSingleQuote, |
|
| 41 |
+ '"': sw.processDoubleQuote, |
|
| 42 |
+ '$': sw.processDollar, |
|
| 43 |
+ } |
|
| 44 |
+ |
|
| 45 |
+ for sw.pos < len(sw.word) {
|
|
| 46 |
+ ch := sw.peek() |
|
| 47 |
+ if stopChar != '\000' && ch == stopChar {
|
|
| 48 |
+ sw.next() |
|
| 49 |
+ break |
|
| 50 |
+ } |
|
| 51 |
+ if fn, ok := charFuncMapping[ch]; ok {
|
|
| 52 |
+ // Call special processing func for certain chars |
|
| 53 |
+ tmp, err := fn() |
|
| 54 |
+ if err != nil {
|
|
| 55 |
+ return "", err |
|
| 56 |
+ } |
|
| 57 |
+ result += tmp |
|
| 58 |
+ } else {
|
|
| 59 |
+ // Not special, just add it to the result |
|
| 60 |
+ ch = sw.next() |
|
| 61 |
+ if ch == '\\' {
|
|
| 62 |
+ // '\' escapes, except end of line |
|
| 63 |
+ ch = sw.next() |
|
| 64 |
+ if ch == '\000' {
|
|
| 65 |
+ continue |
|
| 66 |
+ } |
|
| 67 |
+ } |
|
| 68 |
+ result += string(ch) |
|
| 69 |
+ } |
|
| 70 |
+ } |
|
| 71 |
+ |
|
| 72 |
+ return result, nil |
|
| 73 |
+} |
|
| 74 |
+ |
|
| 75 |
+func (sw *shellWord) peek() rune {
|
|
| 76 |
+ if sw.pos == len(sw.word) {
|
|
| 77 |
+ return '\000' |
|
| 78 |
+ } |
|
| 79 |
+ return rune(sw.word[sw.pos]) |
|
| 80 |
+} |
|
| 81 |
+ |
|
| 82 |
+func (sw *shellWord) next() rune {
|
|
| 83 |
+ if sw.pos == len(sw.word) {
|
|
| 84 |
+ return '\000' |
|
| 85 |
+ } |
|
| 86 |
+ ch := rune(sw.word[sw.pos]) |
|
| 87 |
+ sw.pos++ |
|
| 88 |
+ return ch |
|
| 89 |
+} |
|
| 90 |
+ |
|
| 91 |
+func (sw *shellWord) processSingleQuote() (string, error) {
|
|
| 92 |
+ // All chars between single quotes are taken as-is |
|
| 93 |
+ // Note, you can't escape ' |
|
| 94 |
+ var result string |
|
| 95 |
+ |
|
| 96 |
+ sw.next() |
|
| 97 |
+ |
|
| 98 |
+ for {
|
|
| 99 |
+ ch := sw.next() |
|
| 100 |
+ if ch == '\000' || ch == '\'' {
|
|
| 101 |
+ break |
|
| 102 |
+ } |
|
| 103 |
+ result += string(ch) |
|
| 104 |
+ } |
|
| 105 |
+ return result, nil |
|
| 106 |
+} |
|
| 107 |
+ |
|
| 108 |
+func (sw *shellWord) processDoubleQuote() (string, error) {
|
|
| 109 |
+ // All chars up to the next " are taken as-is, even ', except any $ chars |
|
| 110 |
+ // But you can escape " with a \ |
|
| 111 |
+ var result string |
|
| 112 |
+ |
|
| 113 |
+ sw.next() |
|
| 114 |
+ |
|
| 115 |
+ for sw.pos < len(sw.word) {
|
|
| 116 |
+ ch := sw.peek() |
|
| 117 |
+ if ch == '"' {
|
|
| 118 |
+ sw.next() |
|
| 119 |
+ break |
|
| 120 |
+ } |
|
| 121 |
+ if ch == '$' {
|
|
| 122 |
+ tmp, err := sw.processDollar() |
|
| 123 |
+ if err != nil {
|
|
| 124 |
+ return "", err |
|
| 125 |
+ } |
|
| 126 |
+ result += tmp |
|
| 127 |
+ } else {
|
|
| 128 |
+ ch = sw.next() |
|
| 129 |
+ if ch == '\\' {
|
|
| 130 |
+ chNext := sw.peek() |
|
| 131 |
+ |
|
| 132 |
+ if chNext == '\000' {
|
|
| 133 |
+ // Ignore \ at end of word |
|
| 134 |
+ continue |
|
| 135 |
+ } |
|
| 136 |
+ |
|
| 137 |
+ if chNext == '"' || chNext == '$' {
|
|
| 138 |
+ // \" and \$ can be escaped, all other \'s are left as-is |
|
| 139 |
+ ch = sw.next() |
|
| 140 |
+ } |
|
| 141 |
+ } |
|
| 142 |
+ result += string(ch) |
|
| 143 |
+ } |
|
| 144 |
+ } |
|
| 145 |
+ |
|
| 146 |
+ return result, nil |
|
| 147 |
+} |
|
| 148 |
+ |
|
| 149 |
+func (sw *shellWord) processDollar() (string, error) {
|
|
| 150 |
+ sw.next() |
|
| 151 |
+ ch := sw.peek() |
|
| 152 |
+ if ch == '{' {
|
|
| 153 |
+ sw.next() |
|
| 154 |
+ name := sw.processName() |
|
| 155 |
+ ch = sw.peek() |
|
| 156 |
+ if ch == '}' {
|
|
| 157 |
+ // Normal ${xx} case
|
|
| 158 |
+ sw.next() |
|
| 159 |
+ return sw.getEnv(name), nil |
|
| 160 |
+ } |
|
| 161 |
+ if ch == ':' {
|
|
| 162 |
+ // Special ${xx:...} format processing
|
|
| 163 |
+ // Yes it allows for recursive $'s in the ... spot |
|
| 164 |
+ |
|
| 165 |
+ sw.next() // skip over : |
|
| 166 |
+ modifier := sw.next() |
|
| 167 |
+ |
|
| 168 |
+ word, err := sw.processStopOn('}')
|
|
| 169 |
+ if err != nil {
|
|
| 170 |
+ return "", err |
|
| 171 |
+ } |
|
| 172 |
+ |
|
| 173 |
+ // Grab the current value of the variable in question so we |
|
| 174 |
+ // can use to to determine what to do based on the modifier |
|
| 175 |
+ newValue := sw.getEnv(name) |
|
| 176 |
+ |
|
| 177 |
+ switch modifier {
|
|
| 178 |
+ case '+': |
|
| 179 |
+ if newValue != "" {
|
|
| 180 |
+ newValue = word |
|
| 181 |
+ } |
|
| 182 |
+ return newValue, nil |
|
| 183 |
+ |
|
| 184 |
+ case '-': |
|
| 185 |
+ if newValue == "" {
|
|
| 186 |
+ newValue = word |
|
| 187 |
+ } |
|
| 188 |
+ return newValue, nil |
|
| 189 |
+ |
|
| 190 |
+ default: |
|
| 191 |
+ return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word)
|
|
| 192 |
+ } |
|
| 193 |
+ } |
|
| 194 |
+ return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word)
|
|
| 195 |
+ } |
|
| 196 |
+ // $xxx case |
|
| 197 |
+ name := sw.processName() |
|
| 198 |
+ if name == "" {
|
|
| 199 |
+ return "$", nil |
|
| 200 |
+ } |
|
| 201 |
+ return sw.getEnv(name), nil |
|
| 202 |
+} |
|
| 203 |
+ |
|
| 204 |
+func (sw *shellWord) processName() string {
|
|
| 205 |
+ // Read in a name (alphanumeric or _) |
|
| 206 |
+ // If it starts with a numeric then just return $# |
|
| 207 |
+ var name string |
|
| 208 |
+ |
|
| 209 |
+ for sw.pos < len(sw.word) {
|
|
| 210 |
+ ch := sw.peek() |
|
| 211 |
+ if len(name) == 0 && unicode.IsDigit(ch) {
|
|
| 212 |
+ ch = sw.next() |
|
| 213 |
+ return string(ch) |
|
| 214 |
+ } |
|
| 215 |
+ if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' {
|
|
| 216 |
+ break |
|
| 217 |
+ } |
|
| 218 |
+ ch = sw.next() |
|
| 219 |
+ name += string(ch) |
|
| 220 |
+ } |
|
| 221 |
+ |
|
| 222 |
+ return name |
|
| 223 |
+} |
|
| 224 |
+ |
|
| 225 |
+func (sw *shellWord) getEnv(name string) string {
|
|
| 226 |
+ for _, env := range sw.envs {
|
|
| 227 |
+ i := strings.Index(env, "=") |
|
| 228 |
+ if i < 0 {
|
|
| 229 |
+ if name == env {
|
|
| 230 |
+ // Should probably never get here, but just in case treat |
|
| 231 |
+ // it like "var" and "var=" are the same |
|
| 232 |
+ return "" |
|
| 233 |
+ } |
|
| 234 |
+ continue |
|
| 235 |
+ } |
|
| 236 |
+ if name != env[:i] {
|
|
| 237 |
+ continue |
|
| 238 |
+ } |
|
| 239 |
+ return env[i+1:] |
|
| 240 |
+ } |
|
| 241 |
+ return "" |
|
| 242 |
+} |
| 0 | 243 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,93 @@ |
| 0 |
+package dockerfile |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bufio" |
|
| 4 |
+ "os" |
|
| 5 |
+ "strings" |
|
| 6 |
+ "testing" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+func TestShellParser(t *testing.T) {
|
|
| 10 |
+ file, err := os.Open("words")
|
|
| 11 |
+ if err != nil {
|
|
| 12 |
+ t.Fatalf("Can't open 'words': %s", err)
|
|
| 13 |
+ } |
|
| 14 |
+ defer file.Close() |
|
| 15 |
+ |
|
| 16 |
+ scanner := bufio.NewScanner(file) |
|
| 17 |
+ envs := []string{"PWD=/home", "SHELL=bash"}
|
|
| 18 |
+ for scanner.Scan() {
|
|
| 19 |
+ line := scanner.Text() |
|
| 20 |
+ |
|
| 21 |
+ // Trim comments and blank lines |
|
| 22 |
+ i := strings.Index(line, "#") |
|
| 23 |
+ if i >= 0 {
|
|
| 24 |
+ line = line[:i] |
|
| 25 |
+ } |
|
| 26 |
+ line = strings.TrimSpace(line) |
|
| 27 |
+ |
|
| 28 |
+ if line == "" {
|
|
| 29 |
+ continue |
|
| 30 |
+ } |
|
| 31 |
+ |
|
| 32 |
+ words := strings.Split(line, "|") |
|
| 33 |
+ if len(words) != 2 {
|
|
| 34 |
+ t.Fatalf("Error in 'words' - should be 2 words:%q", words)
|
|
| 35 |
+ } |
|
| 36 |
+ |
|
| 37 |
+ words[0] = strings.TrimSpace(words[0]) |
|
| 38 |
+ words[1] = strings.TrimSpace(words[1]) |
|
| 39 |
+ |
|
| 40 |
+ newWord, err := ProcessWord(words[0], envs) |
|
| 41 |
+ |
|
| 42 |
+ if err != nil {
|
|
| 43 |
+ newWord = "error" |
|
| 44 |
+ } |
|
| 45 |
+ |
|
| 46 |
+ if newWord != words[1] {
|
|
| 47 |
+ t.Fatalf("Error. Src: %s Calc: %s Expected: %s", words[0], newWord, words[1])
|
|
| 48 |
+ } |
|
| 49 |
+ } |
|
| 50 |
+} |
|
| 51 |
+ |
|
| 52 |
+func TestGetEnv(t *testing.T) {
|
|
| 53 |
+ sw := &shellWord{
|
|
| 54 |
+ word: "", |
|
| 55 |
+ envs: nil, |
|
| 56 |
+ pos: 0, |
|
| 57 |
+ } |
|
| 58 |
+ |
|
| 59 |
+ sw.envs = []string{}
|
|
| 60 |
+ if sw.getEnv("foo") != "" {
|
|
| 61 |
+ t.Fatalf("2 - 'foo' should map to ''")
|
|
| 62 |
+ } |
|
| 63 |
+ |
|
| 64 |
+ sw.envs = []string{"foo"}
|
|
| 65 |
+ if sw.getEnv("foo") != "" {
|
|
| 66 |
+ t.Fatalf("3 - 'foo' should map to ''")
|
|
| 67 |
+ } |
|
| 68 |
+ |
|
| 69 |
+ sw.envs = []string{"foo="}
|
|
| 70 |
+ if sw.getEnv("foo") != "" {
|
|
| 71 |
+ t.Fatalf("4 - 'foo' should map to ''")
|
|
| 72 |
+ } |
|
| 73 |
+ |
|
| 74 |
+ sw.envs = []string{"foo=bar"}
|
|
| 75 |
+ if sw.getEnv("foo") != "bar" {
|
|
| 76 |
+ t.Fatalf("5 - 'foo' should map to 'bar'")
|
|
| 77 |
+ } |
|
| 78 |
+ |
|
| 79 |
+ sw.envs = []string{"foo=bar", "car=hat"}
|
|
| 80 |
+ if sw.getEnv("foo") != "bar" {
|
|
| 81 |
+ t.Fatalf("6 - 'foo' should map to 'bar'")
|
|
| 82 |
+ } |
|
| 83 |
+ if sw.getEnv("car") != "hat" {
|
|
| 84 |
+ t.Fatalf("7 - 'car' should map to 'hat'")
|
|
| 85 |
+ } |
|
| 86 |
+ |
|
| 87 |
+ // Make sure we grab the first 'car' in the list |
|
| 88 |
+ sw.envs = []string{"foo=bar", "car=hat", "car=bike"}
|
|
| 89 |
+ if sw.getEnv("car") != "hat" {
|
|
| 90 |
+ t.Fatalf("8 - 'car' should map to 'hat'")
|
|
| 91 |
+ } |
|
| 92 |
+} |
| 0 | 93 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,27 @@ |
| 0 |
+package dockerfile |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "regexp" |
|
| 4 |
+ "strings" |
|
| 5 |
+) |
|
| 6 |
+ |
|
| 7 |
+const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` |
|
| 8 |
+ |
|
| 9 |
+var mimeRe = regexp.MustCompile(acceptableRemoteMIME) |
|
| 10 |
+ |
|
| 11 |
+func selectAcceptableMIME(ct string) string {
|
|
| 12 |
+ return mimeRe.FindString(ct) |
|
| 13 |
+} |
|
| 14 |
+ |
|
| 15 |
+func handleJSONArgs(args []string, attributes map[string]bool) []string {
|
|
| 16 |
+ if len(args) == 0 {
|
|
| 17 |
+ return []string{}
|
|
| 18 |
+ } |
|
| 19 |
+ |
|
| 20 |
+ if attributes != nil && attributes["json"] {
|
|
| 21 |
+ return args |
|
| 22 |
+ } |
|
| 23 |
+ |
|
| 24 |
+ // literal string command, not an exec array |
|
| 25 |
+ return []string{strings.Join(args, " ")}
|
|
| 26 |
+} |
| 0 | 27 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,41 @@ |
| 0 |
+package dockerfile |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "testing" |
|
| 5 |
+) |
|
| 6 |
+ |
|
| 7 |
+func TestSelectAcceptableMIME(t *testing.T) {
|
|
| 8 |
+ validMimeStrings := []string{
|
|
| 9 |
+ "application/x-bzip2", |
|
| 10 |
+ "application/bzip2", |
|
| 11 |
+ "application/gzip", |
|
| 12 |
+ "application/x-gzip", |
|
| 13 |
+ "application/x-xz", |
|
| 14 |
+ "application/xz", |
|
| 15 |
+ "application/tar", |
|
| 16 |
+ "application/x-tar", |
|
| 17 |
+ "application/octet-stream", |
|
| 18 |
+ "text/plain", |
|
| 19 |
+ } |
|
| 20 |
+ |
|
| 21 |
+ invalidMimeStrings := []string{
|
|
| 22 |
+ "", |
|
| 23 |
+ "application/octet", |
|
| 24 |
+ "application/json", |
|
| 25 |
+ } |
|
| 26 |
+ |
|
| 27 |
+ for _, m := range invalidMimeStrings {
|
|
| 28 |
+ if len(selectAcceptableMIME(m)) > 0 {
|
|
| 29 |
+ err := fmt.Errorf("Should not have accepted %q", m)
|
|
| 30 |
+ t.Fatal(err) |
|
| 31 |
+ } |
|
| 32 |
+ } |
|
| 33 |
+ |
|
| 34 |
+ for _, m := range validMimeStrings {
|
|
| 35 |
+ if str := selectAcceptableMIME(m); str == "" {
|
|
| 36 |
+ err := fmt.Errorf("Should have accepted %q", m)
|
|
| 37 |
+ t.Fatal(err) |
|
| 38 |
+ } |
|
| 39 |
+ } |
|
| 40 |
+} |
| 0 | 41 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,58 @@ |
| 0 |
+hello | hello |
|
| 1 |
+he'll'o | hello |
|
| 2 |
+he'llo | hello |
|
| 3 |
+he\'llo | he'llo |
|
| 4 |
+he\\'llo | he\llo |
|
| 5 |
+abc\tdef | abctdef |
|
| 6 |
+"abc\tdef" | abc\tdef |
|
| 7 |
+'abc\tdef' | abc\tdef |
|
| 8 |
+hello\ | hello |
|
| 9 |
+hello\\ | hello\ |
|
| 10 |
+"hello | hello |
|
| 11 |
+"hello\" | hello" |
|
| 12 |
+"hel'lo" | hel'lo |
|
| 13 |
+'hello | hello |
|
| 14 |
+'hello\' | hello\ |
|
| 15 |
+"''" | '' |
|
| 16 |
+$. | $. |
|
| 17 |
+$1 | |
|
| 18 |
+he$1x | hex |
|
| 19 |
+he$.x | he$.x |
|
| 20 |
+he$pwd. | he. |
|
| 21 |
+he$PWD | he/home |
|
| 22 |
+he\$PWD | he$PWD |
|
| 23 |
+he\\$PWD | he\/home |
|
| 24 |
+he\${} | he${}
|
|
| 25 |
+he\${}xx | he${}xx
|
|
| 26 |
+he${} | he
|
|
| 27 |
+he${}xx | hexx
|
|
| 28 |
+he${hi} | he
|
|
| 29 |
+he${hi}xx | hexx
|
|
| 30 |
+he${PWD} | he/home
|
|
| 31 |
+he${.} | error
|
|
| 32 |
+he${XXX:-000}xx | he000xx
|
|
| 33 |
+he${PWD:-000}xx | he/homexx
|
|
| 34 |
+he${XXX:-$PWD}xx | he/homexx
|
|
| 35 |
+he${XXX:-${PWD:-yyy}}xx | he/homexx
|
|
| 36 |
+he${XXX:-${YYY:-yyy}}xx | heyyyxx
|
|
| 37 |
+he${XXX:YYY} | error
|
|
| 38 |
+he${XXX:+${PWD}}xx | hexx
|
|
| 39 |
+he${PWD:+${XXX}}xx | hexx
|
|
| 40 |
+he${PWD:+${SHELL}}xx | hebashxx
|
|
| 41 |
+he${XXX:+000}xx | hexx
|
|
| 42 |
+he${PWD:+000}xx | he000xx
|
|
| 43 |
+'he${XX}' | he${XX}
|
|
| 44 |
+"he${PWD}" | he/home
|
|
| 45 |
+"he'$PWD'" | he'/home' |
|
| 46 |
+"$PWD" | /home |
|
| 47 |
+'$PWD' | $PWD |
|
| 48 |
+'\$PWD' | \$PWD |
|
| 49 |
+'"hello"' | "hello" |
|
| 50 |
+he\$PWD | he$PWD |
|
| 51 |
+"he\$PWD" | he$PWD |
|
| 52 |
+'he\$PWD' | he\$PWD |
|
| 53 |
+he${PWD | error
|
|
| 54 |
+he${PWD:=000}xx | error
|
|
| 55 |
+he${PWD:+${PWD}:}xx | he/home:xx
|
|
| 56 |
+he${XXX:-\$PWD:}xx | he$PWD:xx
|
|
| 57 |
+he${XXX:-\${PWD}z}xx | he${PWDz}xx
|
| 0 | 58 |
deleted file mode 100644 |
| ... | ... |
@@ -1,424 +0,0 @@ |
| 1 |
-// Package builder is the evaluation step in the Dockerfile parse/evaluate pipeline. |
|
| 2 |
-// |
|
| 3 |
-// It incorporates a dispatch table based on the parser.Node values (see the |
|
| 4 |
-// parser package for more information) that are yielded from the parser itself. |
|
| 5 |
-// Calling NewBuilder with the BuildOpts struct can be used to customize the |
|
| 6 |
-// experience for execution purposes only. Parsing is controlled in the parser |
|
| 7 |
-// package, and this division of resposibility should be respected. |
|
| 8 |
-// |
|
| 9 |
-// Please see the jump table targets for the actual invocations, most of which |
|
| 10 |
-// will call out to the functions in internals.go to deal with their tasks. |
|
| 11 |
-// |
|
| 12 |
-// ONBUILD is a special case, which is covered in the onbuild() func in |
|
| 13 |
-// dispatchers.go. |
|
| 14 |
-// |
|
| 15 |
-// The evaluator uses the concept of "steps", which are usually each processable |
|
| 16 |
-// line in the Dockerfile. Each step is numbered and certain actions are taken |
|
| 17 |
-// before and after each step, such as creating an image ID and removing temporary |
|
| 18 |
-// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which |
|
| 19 |
-// includes its own set of steps (usually only one of them). |
|
| 20 |
-package builder |
|
| 21 |
- |
|
| 22 |
-import ( |
|
| 23 |
- "fmt" |
|
| 24 |
- "io" |
|
| 25 |
- "os" |
|
| 26 |
- "path/filepath" |
|
| 27 |
- "runtime" |
|
| 28 |
- "strings" |
|
| 29 |
- |
|
| 30 |
- "github.com/Sirupsen/logrus" |
|
| 31 |
- "github.com/docker/docker/api" |
|
| 32 |
- "github.com/docker/docker/builder/command" |
|
| 33 |
- "github.com/docker/docker/builder/parser" |
|
| 34 |
- "github.com/docker/docker/cliconfig" |
|
| 35 |
- "github.com/docker/docker/daemon" |
|
| 36 |
- "github.com/docker/docker/pkg/fileutils" |
|
| 37 |
- "github.com/docker/docker/pkg/streamformatter" |
|
| 38 |
- "github.com/docker/docker/pkg/stringid" |
|
| 39 |
- "github.com/docker/docker/pkg/symlink" |
|
| 40 |
- "github.com/docker/docker/pkg/tarsum" |
|
| 41 |
- "github.com/docker/docker/pkg/ulimit" |
|
| 42 |
- "github.com/docker/docker/runconfig" |
|
| 43 |
- "github.com/docker/docker/utils" |
|
| 44 |
-) |
|
| 45 |
- |
|
| 46 |
-// Environment variable interpolation will happen on these statements only. |
|
| 47 |
-var replaceEnvAllowed = map[string]struct{}{
|
|
| 48 |
- command.Env: {},
|
|
| 49 |
- command.Label: {},
|
|
| 50 |
- command.Add: {},
|
|
| 51 |
- command.Copy: {},
|
|
| 52 |
- command.Workdir: {},
|
|
| 53 |
- command.Expose: {},
|
|
| 54 |
- command.Volume: {},
|
|
| 55 |
- command.User: {},
|
|
| 56 |
- command.StopSignal: {},
|
|
| 57 |
- command.Arg: {},
|
|
| 58 |
-} |
|
| 59 |
- |
|
| 60 |
-var evaluateTable map[string]func(*builder, []string, map[string]bool, string) error |
|
| 61 |
- |
|
| 62 |
-func init() {
|
|
| 63 |
- evaluateTable = map[string]func(*builder, []string, map[string]bool, string) error{
|
|
| 64 |
- command.Env: env, |
|
| 65 |
- command.Label: label, |
|
| 66 |
- command.Maintainer: maintainer, |
|
| 67 |
- command.Add: add, |
|
| 68 |
- command.Copy: dispatchCopy, // copy() is a go builtin |
|
| 69 |
- command.From: from, |
|
| 70 |
- command.Onbuild: onbuild, |
|
| 71 |
- command.Workdir: workdir, |
|
| 72 |
- command.Run: run, |
|
| 73 |
- command.Cmd: cmd, |
|
| 74 |
- command.Entrypoint: entrypoint, |
|
| 75 |
- command.Expose: expose, |
|
| 76 |
- command.Volume: volume, |
|
| 77 |
- command.User: user, |
|
| 78 |
- command.StopSignal: stopSignal, |
|
| 79 |
- command.Arg: arg, |
|
| 80 |
- } |
|
| 81 |
-} |
|
| 82 |
- |
|
| 83 |
-// builder is an internal struct, used to maintain configuration of the Dockerfile's |
|
| 84 |
-// processing as it evaluates the parsing result. |
|
| 85 |
-type builder struct {
|
|
| 86 |
- Daemon *daemon.Daemon |
|
| 87 |
- |
|
| 88 |
- // effectively stdio for the run. Because it is not stdio, I said |
|
| 89 |
- // "Effectively". Do not use stdio anywhere in this package for any reason. |
|
| 90 |
- OutStream io.Writer |
|
| 91 |
- ErrStream io.Writer |
|
| 92 |
- |
|
| 93 |
- Verbose bool |
|
| 94 |
- UtilizeCache bool |
|
| 95 |
- cacheBusted bool |
|
| 96 |
- |
|
| 97 |
- // controls how images and containers are handled between steps. |
|
| 98 |
- Remove bool |
|
| 99 |
- ForceRemove bool |
|
| 100 |
- Pull bool |
|
| 101 |
- |
|
| 102 |
- // set this to true if we want the builder to not commit between steps. |
|
| 103 |
- // This is useful when we only want to use the evaluator table to generate |
|
| 104 |
- // the final configs of the Dockerfile but dont want the layers |
|
| 105 |
- disableCommit bool |
|
| 106 |
- |
|
| 107 |
- // Registry server auth configs used to pull images when handling `FROM`. |
|
| 108 |
- AuthConfigs map[string]cliconfig.AuthConfig |
|
| 109 |
- |
|
| 110 |
- // Deprecated, original writer used for ImagePull. To be removed. |
|
| 111 |
- OutOld io.Writer |
|
| 112 |
- StreamFormatter *streamformatter.StreamFormatter |
|
| 113 |
- |
|
| 114 |
- Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. |
|
| 115 |
- |
|
| 116 |
- buildArgs map[string]string // build-time args received in build context for expansion/substitution and commands in 'run'. |
|
| 117 |
- allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. |
|
| 118 |
- |
|
| 119 |
- // both of these are controlled by the Remove and ForceRemove options in BuildOpts |
|
| 120 |
- TmpContainers map[string]struct{} // a map of containers used for removes
|
|
| 121 |
- |
|
| 122 |
- dockerfileName string // name of Dockerfile |
|
| 123 |
- dockerfile *parser.Node // the syntax tree of the dockerfile |
|
| 124 |
- image string // image name for commit processing |
|
| 125 |
- maintainer string // maintainer name. could probably be removed. |
|
| 126 |
- cmdSet bool // indicates is CMD was set in current Dockerfile |
|
| 127 |
- BuilderFlags *BFlags // current cmd's BuilderFlags - temporary |
|
| 128 |
- context tarsum.TarSum // the context is a tarball that is uploaded by the client |
|
| 129 |
- contextPath string // the path of the temporary directory the local context is unpacked to (server side) |
|
| 130 |
- noBaseImage bool // indicates that this build does not start from any base image, but is being built from an empty file system. |
|
| 131 |
- |
|
| 132 |
- // Set resource restrictions for build containers |
|
| 133 |
- cpuSetCpus string |
|
| 134 |
- cpuSetMems string |
|
| 135 |
- cpuShares int64 |
|
| 136 |
- cpuPeriod int64 |
|
| 137 |
- cpuQuota int64 |
|
| 138 |
- cgroupParent string |
|
| 139 |
- memory int64 |
|
| 140 |
- memorySwap int64 |
|
| 141 |
- ulimits []*ulimit.Ulimit |
|
| 142 |
- |
|
| 143 |
- cancelled <-chan struct{} // When closed, job was cancelled.
|
|
| 144 |
- |
|
| 145 |
- activeImages []string |
|
| 146 |
- id string // Used to hold reference images |
|
| 147 |
-} |
|
| 148 |
- |
|
| 149 |
-// Run the builder with the context. This is the lynchpin of this package. This |
|
| 150 |
-// will (barring errors): |
|
| 151 |
-// |
|
| 152 |
-// * call readContext() which will set up the temporary directory and unpack |
|
| 153 |
-// the context into it. |
|
| 154 |
-// * read the dockerfile |
|
| 155 |
-// * parse the dockerfile |
|
| 156 |
-// * walk the parse tree and execute it by dispatching to handlers. If Remove |
|
| 157 |
-// or ForceRemove is set, additional cleanup around containers happens after |
|
| 158 |
-// processing. |
|
| 159 |
-// * Print a happy message and return the image ID. |
|
| 160 |
-// |
|
| 161 |
-func (b *builder) Run(context io.Reader) (string, error) {
|
|
| 162 |
- if err := b.readContext(context); err != nil {
|
|
| 163 |
- return "", err |
|
| 164 |
- } |
|
| 165 |
- |
|
| 166 |
- defer func() {
|
|
| 167 |
- if err := os.RemoveAll(b.contextPath); err != nil {
|
|
| 168 |
- logrus.Debugf("[BUILDER] failed to remove temporary context: %s", err)
|
|
| 169 |
- } |
|
| 170 |
- }() |
|
| 171 |
- |
|
| 172 |
- if err := b.readDockerfile(); err != nil {
|
|
| 173 |
- return "", err |
|
| 174 |
- } |
|
| 175 |
- |
|
| 176 |
- // some initializations that would not have been supplied by the caller. |
|
| 177 |
- b.Config = &runconfig.Config{}
|
|
| 178 |
- |
|
| 179 |
- b.TmpContainers = map[string]struct{}{}
|
|
| 180 |
- |
|
| 181 |
- for i, n := range b.dockerfile.Children {
|
|
| 182 |
- select {
|
|
| 183 |
- case <-b.cancelled: |
|
| 184 |
- logrus.Debug("Builder: build cancelled!")
|
|
| 185 |
- fmt.Fprintf(b.OutStream, "Build cancelled") |
|
| 186 |
- return "", fmt.Errorf("Build cancelled")
|
|
| 187 |
- default: |
|
| 188 |
- // Not cancelled yet, keep going... |
|
| 189 |
- } |
|
| 190 |
- if err := b.dispatch(i, n); err != nil {
|
|
| 191 |
- if b.ForceRemove {
|
|
| 192 |
- b.clearTmp() |
|
| 193 |
- } |
|
| 194 |
- return "", err |
|
| 195 |
- } |
|
| 196 |
- fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image)) |
|
| 197 |
- if b.Remove {
|
|
| 198 |
- b.clearTmp() |
|
| 199 |
- } |
|
| 200 |
- } |
|
| 201 |
- |
|
| 202 |
- // check if there are any leftover build-args that were passed but not |
|
| 203 |
- // consumed during build. Return an error, if there are any. |
|
| 204 |
- leftoverArgs := []string{}
|
|
| 205 |
- for arg := range b.buildArgs {
|
|
| 206 |
- if !b.isBuildArgAllowed(arg) {
|
|
| 207 |
- leftoverArgs = append(leftoverArgs, arg) |
|
| 208 |
- } |
|
| 209 |
- } |
|
| 210 |
- if len(leftoverArgs) > 0 {
|
|
| 211 |
- return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs)
|
|
| 212 |
- } |
|
| 213 |
- |
|
| 214 |
- if b.image == "" {
|
|
| 215 |
- return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
|
|
| 216 |
- } |
|
| 217 |
- |
|
| 218 |
- fmt.Fprintf(b.OutStream, "Successfully built %s\n", stringid.TruncateID(b.image)) |
|
| 219 |
- return b.image, nil |
|
| 220 |
-} |
|
| 221 |
- |
|
| 222 |
-// Reads a Dockerfile from the current context. It assumes that the |
|
| 223 |
-// 'filename' is a relative path from the root of the context |
|
| 224 |
-func (b *builder) readDockerfile() error {
|
|
| 225 |
- // If no -f was specified then look for 'Dockerfile'. If we can't find |
|
| 226 |
- // that then look for 'dockerfile'. If neither are found then default |
|
| 227 |
- // back to 'Dockerfile' and use that in the error message. |
|
| 228 |
- if b.dockerfileName == "" {
|
|
| 229 |
- b.dockerfileName = api.DefaultDockerfileName |
|
| 230 |
- tmpFN := filepath.Join(b.contextPath, api.DefaultDockerfileName) |
|
| 231 |
- if _, err := os.Lstat(tmpFN); err != nil {
|
|
| 232 |
- tmpFN = filepath.Join(b.contextPath, strings.ToLower(api.DefaultDockerfileName)) |
|
| 233 |
- if _, err := os.Lstat(tmpFN); err == nil {
|
|
| 234 |
- b.dockerfileName = strings.ToLower(api.DefaultDockerfileName) |
|
| 235 |
- } |
|
| 236 |
- } |
|
| 237 |
- } |
|
| 238 |
- |
|
| 239 |
- origFile := b.dockerfileName |
|
| 240 |
- |
|
| 241 |
- filename, err := symlink.FollowSymlinkInScope(filepath.Join(b.contextPath, origFile), b.contextPath) |
|
| 242 |
- if err != nil {
|
|
| 243 |
- return fmt.Errorf("The Dockerfile (%s) must be within the build context", origFile)
|
|
| 244 |
- } |
|
| 245 |
- |
|
| 246 |
- fi, err := os.Lstat(filename) |
|
| 247 |
- if os.IsNotExist(err) {
|
|
| 248 |
- return fmt.Errorf("Cannot locate specified Dockerfile: %s", origFile)
|
|
| 249 |
- } |
|
| 250 |
- if fi.Size() == 0 {
|
|
| 251 |
- return fmt.Errorf("The Dockerfile (%s) cannot be empty", origFile)
|
|
| 252 |
- } |
|
| 253 |
- |
|
| 254 |
- f, err := os.Open(filename) |
|
| 255 |
- if err != nil {
|
|
| 256 |
- return err |
|
| 257 |
- } |
|
| 258 |
- |
|
| 259 |
- b.dockerfile, err = parser.Parse(f) |
|
| 260 |
- f.Close() |
|
| 261 |
- |
|
| 262 |
- if err != nil {
|
|
| 263 |
- return err |
|
| 264 |
- } |
|
| 265 |
- |
|
| 266 |
- // After the Dockerfile has been parsed, we need to check the .dockerignore |
|
| 267 |
- // file for either "Dockerfile" or ".dockerignore", and if either are |
|
| 268 |
- // present then erase them from the build context. These files should never |
|
| 269 |
- // have been sent from the client but we did send them to make sure that |
|
| 270 |
- // we had the Dockerfile to actually parse, and then we also need the |
|
| 271 |
- // .dockerignore file to know whether either file should be removed. |
|
| 272 |
- // Note that this assumes the Dockerfile has been read into memory and |
|
| 273 |
- // is now safe to be removed. |
|
| 274 |
- |
|
| 275 |
- excludes, _ := utils.ReadDockerIgnore(filepath.Join(b.contextPath, ".dockerignore")) |
|
| 276 |
- if rm, _ := fileutils.Matches(".dockerignore", excludes); rm == true {
|
|
| 277 |
- os.Remove(filepath.Join(b.contextPath, ".dockerignore")) |
|
| 278 |
- b.context.(tarsum.BuilderContext).Remove(".dockerignore")
|
|
| 279 |
- } |
|
| 280 |
- if rm, _ := fileutils.Matches(b.dockerfileName, excludes); rm == true {
|
|
| 281 |
- os.Remove(filepath.Join(b.contextPath, b.dockerfileName)) |
|
| 282 |
- b.context.(tarsum.BuilderContext).Remove(b.dockerfileName) |
|
| 283 |
- } |
|
| 284 |
- |
|
| 285 |
- return nil |
|
| 286 |
-} |
|
| 287 |
- |
|
| 288 |
-// determine if build arg is part of built-in args or user |
|
| 289 |
-// defined args in Dockerfile at any point in time. |
|
| 290 |
-func (b *builder) isBuildArgAllowed(arg string) bool {
|
|
| 291 |
- if _, ok := BuiltinAllowedBuildArgs[arg]; ok {
|
|
| 292 |
- return true |
|
| 293 |
- } |
|
| 294 |
- if _, ok := b.allowedBuildArgs[arg]; ok {
|
|
| 295 |
- return true |
|
| 296 |
- } |
|
| 297 |
- return false |
|
| 298 |
-} |
|
| 299 |
- |
|
| 300 |
-// This method is the entrypoint to all statement handling routines. |
|
| 301 |
-// |
|
| 302 |
-// Almost all nodes will have this structure: |
|
| 303 |
-// Child[Node, Node, Node] where Child is from parser.Node.Children and each |
|
| 304 |
-// node comes from parser.Node.Next. This forms a "line" with a statement and |
|
| 305 |
-// arguments and we process them in this normalized form by hitting |
|
| 306 |
-// evaluateTable with the leaf nodes of the command and the Builder object. |
|
| 307 |
-// |
|
| 308 |
-// ONBUILD is a special case; in this case the parser will emit: |
|
| 309 |
-// Child[Node, Child[Node, Node...]] where the first node is the literal |
|
| 310 |
-// "onbuild" and the child entrypoint is the command of the ONBUILD statement, |
|
| 311 |
-// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to |
|
| 312 |
-// deal with that, at least until it becomes more of a general concern with new |
|
| 313 |
-// features. |
|
| 314 |
-func (b *builder) dispatch(stepN int, ast *parser.Node) error {
|
|
| 315 |
- cmd := ast.Value |
|
| 316 |
- |
|
| 317 |
- // To ensure the user is given a decent error message if the platform |
|
| 318 |
- // on which the daemon is running does not support a builder command. |
|
| 319 |
- if err := platformSupports(strings.ToLower(cmd)); err != nil {
|
|
| 320 |
- return err |
|
| 321 |
- } |
|
| 322 |
- |
|
| 323 |
- attrs := ast.Attributes |
|
| 324 |
- original := ast.Original |
|
| 325 |
- flags := ast.Flags |
|
| 326 |
- strs := []string{}
|
|
| 327 |
- msg := fmt.Sprintf("Step %d : %s", stepN+1, strings.ToUpper(cmd))
|
|
| 328 |
- |
|
| 329 |
- if len(ast.Flags) > 0 {
|
|
| 330 |
- msg += " " + strings.Join(ast.Flags, " ") |
|
| 331 |
- } |
|
| 332 |
- |
|
| 333 |
- if cmd == "onbuild" {
|
|
| 334 |
- if ast.Next == nil {
|
|
| 335 |
- return fmt.Errorf("ONBUILD requires at least one argument")
|
|
| 336 |
- } |
|
| 337 |
- ast = ast.Next.Children[0] |
|
| 338 |
- strs = append(strs, ast.Value) |
|
| 339 |
- msg += " " + ast.Value |
|
| 340 |
- |
|
| 341 |
- if len(ast.Flags) > 0 {
|
|
| 342 |
- msg += " " + strings.Join(ast.Flags, " ") |
|
| 343 |
- } |
|
| 344 |
- |
|
| 345 |
- } |
|
| 346 |
- |
|
| 347 |
- // count the number of nodes that we are going to traverse first |
|
| 348 |
- // so we can pre-create the argument and message array. This speeds up the |
|
| 349 |
- // allocation of those list a lot when they have a lot of arguments |
|
| 350 |
- cursor := ast |
|
| 351 |
- var n int |
|
| 352 |
- for cursor.Next != nil {
|
|
| 353 |
- cursor = cursor.Next |
|
| 354 |
- n++ |
|
| 355 |
- } |
|
| 356 |
- l := len(strs) |
|
| 357 |
- strList := make([]string, n+l) |
|
| 358 |
- copy(strList, strs) |
|
| 359 |
- msgList := make([]string, n) |
|
| 360 |
- |
|
| 361 |
- var i int |
|
| 362 |
- // Append the build-time args to config-environment. |
|
| 363 |
- // This allows builder config to override the variables, making the behavior similar to |
|
| 364 |
- // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build |
|
| 365 |
- // context. But `ENV foo $foo` will use the value from build context if one |
|
| 366 |
- // isn't already been defined by a previous ENV primitive. |
|
| 367 |
- // Note, we get this behavior because we know that ProcessWord() will |
|
| 368 |
- // stop on the first occurrence of a variable name and not notice |
|
| 369 |
- // a subsequent one. So, putting the buildArgs list after the Config.Env |
|
| 370 |
- // list, in 'envs', is safe. |
|
| 371 |
- envs := b.Config.Env |
|
| 372 |
- for key, val := range b.buildArgs {
|
|
| 373 |
- if !b.isBuildArgAllowed(key) {
|
|
| 374 |
- // skip build-args that are not in allowed list, meaning they have |
|
| 375 |
- // not been defined by an "ARG" Dockerfile command yet. |
|
| 376 |
- // This is an error condition but only if there is no "ARG" in the entire |
|
| 377 |
- // Dockerfile, so we'll generate any necessary errors after we parsed |
|
| 378 |
- // the entire file (see 'leftoverArgs' processing in evaluator.go ) |
|
| 379 |
- continue |
|
| 380 |
- } |
|
| 381 |
- envs = append(envs, fmt.Sprintf("%s=%s", key, val))
|
|
| 382 |
- } |
|
| 383 |
- for ast.Next != nil {
|
|
| 384 |
- ast = ast.Next |
|
| 385 |
- var str string |
|
| 386 |
- str = ast.Value |
|
| 387 |
- if _, ok := replaceEnvAllowed[cmd]; ok {
|
|
| 388 |
- var err error |
|
| 389 |
- str, err = ProcessWord(ast.Value, envs) |
|
| 390 |
- if err != nil {
|
|
| 391 |
- return err |
|
| 392 |
- } |
|
| 393 |
- } |
|
| 394 |
- strList[i+l] = str |
|
| 395 |
- msgList[i] = ast.Value |
|
| 396 |
- i++ |
|
| 397 |
- } |
|
| 398 |
- |
|
| 399 |
- msg += " " + strings.Join(msgList, " ") |
|
| 400 |
- fmt.Fprintln(b.OutStream, msg) |
|
| 401 |
- |
|
| 402 |
- // XXX yes, we skip any cmds that are not valid; the parser should have |
|
| 403 |
- // picked these out already. |
|
| 404 |
- if f, ok := evaluateTable[cmd]; ok {
|
|
| 405 |
- b.BuilderFlags = NewBFlags() |
|
| 406 |
- b.BuilderFlags.Args = flags |
|
| 407 |
- return f(b, strList, attrs, original) |
|
| 408 |
- } |
|
| 409 |
- |
|
| 410 |
- return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(cmd))
|
|
| 411 |
-} |
|
| 412 |
- |
|
| 413 |
-// platformSupports is a short-term function to give users a quality error |
|
| 414 |
-// message if a Dockerfile uses a command not supported on the platform. |
|
| 415 |
-func platformSupports(command string) error {
|
|
| 416 |
- if runtime.GOOS != "windows" {
|
|
| 417 |
- return nil |
|
| 418 |
- } |
|
| 419 |
- switch command {
|
|
| 420 |
- case "expose", "volume", "user", "stopsignal", "arg": |
|
| 421 |
- return fmt.Errorf("The daemon on this platform does not support the command '%s'", command)
|
|
| 422 |
- } |
|
| 423 |
- return nil |
|
| 424 |
-} |
| 425 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,811 +0,0 @@ |
| 1 |
-package builder |
|
| 2 |
- |
|
| 3 |
-// internals for handling commands. Covers many areas and a lot of |
|
| 4 |
-// non-contiguous functionality. Please read the comments. |
|
| 5 |
- |
|
| 6 |
-import ( |
|
| 7 |
- "crypto/sha256" |
|
| 8 |
- "encoding/hex" |
|
| 9 |
- "fmt" |
|
| 10 |
- "io" |
|
| 11 |
- "io/ioutil" |
|
| 12 |
- "net/http" |
|
| 13 |
- "net/url" |
|
| 14 |
- "os" |
|
| 15 |
- "path/filepath" |
|
| 16 |
- "runtime" |
|
| 17 |
- "sort" |
|
| 18 |
- "strings" |
|
| 19 |
- "time" |
|
| 20 |
- |
|
| 21 |
- "github.com/Sirupsen/logrus" |
|
| 22 |
- "github.com/docker/docker/builder/parser" |
|
| 23 |
- "github.com/docker/docker/cliconfig" |
|
| 24 |
- "github.com/docker/docker/daemon" |
|
| 25 |
- "github.com/docker/docker/graph" |
|
| 26 |
- "github.com/docker/docker/image" |
|
| 27 |
- "github.com/docker/docker/pkg/archive" |
|
| 28 |
- "github.com/docker/docker/pkg/chrootarchive" |
|
| 29 |
- "github.com/docker/docker/pkg/httputils" |
|
| 30 |
- "github.com/docker/docker/pkg/ioutils" |
|
| 31 |
- "github.com/docker/docker/pkg/jsonmessage" |
|
| 32 |
- "github.com/docker/docker/pkg/parsers" |
|
| 33 |
- "github.com/docker/docker/pkg/progressreader" |
|
| 34 |
- "github.com/docker/docker/pkg/stringid" |
|
| 35 |
- "github.com/docker/docker/pkg/stringutils" |
|
| 36 |
- "github.com/docker/docker/pkg/symlink" |
|
| 37 |
- "github.com/docker/docker/pkg/system" |
|
| 38 |
- "github.com/docker/docker/pkg/tarsum" |
|
| 39 |
- "github.com/docker/docker/pkg/urlutil" |
|
| 40 |
- "github.com/docker/docker/registry" |
|
| 41 |
- "github.com/docker/docker/runconfig" |
|
| 42 |
-) |
|
| 43 |
- |
|
| 44 |
-func (b *builder) readContext(context io.Reader) (err error) {
|
|
| 45 |
- tmpdirPath, err := getTempDir("", "docker-build")
|
|
| 46 |
- if err != nil {
|
|
| 47 |
- return |
|
| 48 |
- } |
|
| 49 |
- |
|
| 50 |
- // Make sure we clean-up upon error. In the happy case the caller |
|
| 51 |
- // is expected to manage the clean-up |
|
| 52 |
- defer func() {
|
|
| 53 |
- if err != nil {
|
|
| 54 |
- if e := os.RemoveAll(tmpdirPath); e != nil {
|
|
| 55 |
- logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e)
|
|
| 56 |
- } |
|
| 57 |
- } |
|
| 58 |
- }() |
|
| 59 |
- |
|
| 60 |
- decompressedStream, err := archive.DecompressStream(context) |
|
| 61 |
- if err != nil {
|
|
| 62 |
- return |
|
| 63 |
- } |
|
| 64 |
- |
|
| 65 |
- if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil {
|
|
| 66 |
- return |
|
| 67 |
- } |
|
| 68 |
- |
|
| 69 |
- if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
|
|
| 70 |
- return |
|
| 71 |
- } |
|
| 72 |
- |
|
| 73 |
- b.contextPath = tmpdirPath |
|
| 74 |
- return |
|
| 75 |
-} |
|
| 76 |
- |
|
| 77 |
-func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment string) error {
|
|
| 78 |
- if b.disableCommit {
|
|
| 79 |
- return nil |
|
| 80 |
- } |
|
| 81 |
- if b.image == "" && !b.noBaseImage {
|
|
| 82 |
- return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
|
| 83 |
- } |
|
| 84 |
- b.Config.Image = b.image |
|
| 85 |
- if id == "" {
|
|
| 86 |
- cmd := b.Config.Cmd |
|
| 87 |
- if runtime.GOOS != "windows" {
|
|
| 88 |
- b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", "#(nop) "+comment)
|
|
| 89 |
- } else {
|
|
| 90 |
- b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S", "/C", "REM (nop) "+comment)
|
|
| 91 |
- } |
|
| 92 |
- defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
|
| 93 |
- |
|
| 94 |
- hit, err := b.probeCache() |
|
| 95 |
- if err != nil {
|
|
| 96 |
- return err |
|
| 97 |
- } |
|
| 98 |
- if hit {
|
|
| 99 |
- return nil |
|
| 100 |
- } |
|
| 101 |
- |
|
| 102 |
- container, err := b.create() |
|
| 103 |
- if err != nil {
|
|
| 104 |
- return err |
|
| 105 |
- } |
|
| 106 |
- id = container.ID |
|
| 107 |
- |
|
| 108 |
- if err := container.Mount(); err != nil {
|
|
| 109 |
- return err |
|
| 110 |
- } |
|
| 111 |
- defer container.Unmount() |
|
| 112 |
- } |
|
| 113 |
- container, err := b.Daemon.Get(id) |
|
| 114 |
- if err != nil {
|
|
| 115 |
- return err |
|
| 116 |
- } |
|
| 117 |
- |
|
| 118 |
- // Note: Actually copy the struct |
|
| 119 |
- autoConfig := *b.Config |
|
| 120 |
- autoConfig.Cmd = autoCmd |
|
| 121 |
- |
|
| 122 |
- commitCfg := &daemon.ContainerCommitConfig{
|
|
| 123 |
- Author: b.maintainer, |
|
| 124 |
- Pause: true, |
|
| 125 |
- Config: &autoConfig, |
|
| 126 |
- } |
|
| 127 |
- |
|
| 128 |
- // Commit the container |
|
| 129 |
- image, err := b.Daemon.Commit(container, commitCfg) |
|
| 130 |
- if err != nil {
|
|
| 131 |
- return err |
|
| 132 |
- } |
|
| 133 |
- b.Daemon.Graph().Retain(b.id, image.ID) |
|
| 134 |
- b.activeImages = append(b.activeImages, image.ID) |
|
| 135 |
- b.image = image.ID |
|
| 136 |
- return nil |
|
| 137 |
-} |
|
| 138 |
- |
|
| 139 |
-type copyInfo struct {
|
|
| 140 |
- origPath string |
|
| 141 |
- destPath string |
|
| 142 |
- hash string |
|
| 143 |
- decompress bool |
|
| 144 |
- tmpDir string |
|
| 145 |
-} |
|
| 146 |
- |
|
| 147 |
-func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
|
|
| 148 |
- if b.context == nil {
|
|
| 149 |
- return fmt.Errorf("No context given. Impossible to use %s", cmdName)
|
|
| 150 |
- } |
|
| 151 |
- |
|
| 152 |
- if len(args) < 2 {
|
|
| 153 |
- return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
|
|
| 154 |
- } |
|
| 155 |
- |
|
| 156 |
- // Work in daemon-specific filepath semantics |
|
| 157 |
- dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest |
|
| 158 |
- |
|
| 159 |
- copyInfos := []*copyInfo{}
|
|
| 160 |
- |
|
| 161 |
- b.Config.Image = b.image |
|
| 162 |
- |
|
| 163 |
- defer func() {
|
|
| 164 |
- for _, ci := range copyInfos {
|
|
| 165 |
- if ci.tmpDir != "" {
|
|
| 166 |
- os.RemoveAll(ci.tmpDir) |
|
| 167 |
- } |
|
| 168 |
- } |
|
| 169 |
- }() |
|
| 170 |
- |
|
| 171 |
- // Loop through each src file and calculate the info we need to |
|
| 172 |
- // do the copy (e.g. hash value if cached). Don't actually do |
|
| 173 |
- // the copy until we've looked at all src files |
|
| 174 |
- for _, orig := range args[0 : len(args)-1] {
|
|
| 175 |
- if err := calcCopyInfo( |
|
| 176 |
- b, |
|
| 177 |
- cmdName, |
|
| 178 |
- ©Infos, |
|
| 179 |
- orig, |
|
| 180 |
- dest, |
|
| 181 |
- allowRemote, |
|
| 182 |
- allowDecompression, |
|
| 183 |
- true, |
|
| 184 |
- ); err != nil {
|
|
| 185 |
- return err |
|
| 186 |
- } |
|
| 187 |
- } |
|
| 188 |
- |
|
| 189 |
- if len(copyInfos) == 0 {
|
|
| 190 |
- return fmt.Errorf("No source files were specified")
|
|
| 191 |
- } |
|
| 192 |
- if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
|
|
| 193 |
- return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
|
| 194 |
- } |
|
| 195 |
- |
|
| 196 |
- // For backwards compat, if there's just one CI then use it as the |
|
| 197 |
- // cache look-up string, otherwise hash 'em all into one |
|
| 198 |
- var srcHash string |
|
| 199 |
- var origPaths string |
|
| 200 |
- |
|
| 201 |
- if len(copyInfos) == 1 {
|
|
| 202 |
- srcHash = copyInfos[0].hash |
|
| 203 |
- origPaths = copyInfos[0].origPath |
|
| 204 |
- } else {
|
|
| 205 |
- var hashs []string |
|
| 206 |
- var origs []string |
|
| 207 |
- for _, ci := range copyInfos {
|
|
| 208 |
- hashs = append(hashs, ci.hash) |
|
| 209 |
- origs = append(origs, ci.origPath) |
|
| 210 |
- } |
|
| 211 |
- hasher := sha256.New() |
|
| 212 |
- hasher.Write([]byte(strings.Join(hashs, ","))) |
|
| 213 |
- srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) |
|
| 214 |
- origPaths = strings.Join(origs, " ") |
|
| 215 |
- } |
|
| 216 |
- |
|
| 217 |
- cmd := b.Config.Cmd |
|
| 218 |
- if runtime.GOOS != "windows" {
|
|
| 219 |
- b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
|
|
| 220 |
- } else {
|
|
| 221 |
- b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
|
|
| 222 |
- } |
|
| 223 |
- defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
|
| 224 |
- |
|
| 225 |
- hit, err := b.probeCache() |
|
| 226 |
- if err != nil {
|
|
| 227 |
- return err |
|
| 228 |
- } |
|
| 229 |
- |
|
| 230 |
- if hit {
|
|
| 231 |
- return nil |
|
| 232 |
- } |
|
| 233 |
- |
|
| 234 |
- ccr, err := b.Daemon.ContainerCreate("", b.Config, nil, true)
|
|
| 235 |
- if err != nil {
|
|
| 236 |
- return err |
|
| 237 |
- } |
|
| 238 |
- container, err := b.Daemon.Get(ccr.ID) |
|
| 239 |
- if err != nil {
|
|
| 240 |
- return err |
|
| 241 |
- } |
|
| 242 |
- |
|
| 243 |
- b.TmpContainers[container.ID] = struct{}{}
|
|
| 244 |
- |
|
| 245 |
- if err := container.Mount(); err != nil {
|
|
| 246 |
- return err |
|
| 247 |
- } |
|
| 248 |
- defer container.Unmount() |
|
| 249 |
- |
|
| 250 |
- for _, ci := range copyInfos {
|
|
| 251 |
- if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
|
|
| 252 |
- return err |
|
| 253 |
- } |
|
| 254 |
- } |
|
| 255 |
- |
|
| 256 |
- if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
|
|
| 257 |
- return err |
|
| 258 |
- } |
|
| 259 |
- return nil |
|
| 260 |
-} |
|
| 261 |
- |
|
| 262 |
-func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
|
|
| 263 |
- |
|
| 264 |
- // Work in daemon-specific OS filepath semantics. However, we save |
|
| 265 |
- // the the origPath passed in here, as it might also be a URL which |
|
| 266 |
- // we need to check for in this function. |
|
| 267 |
- passedInOrigPath := origPath |
|
| 268 |
- origPath = filepath.FromSlash(origPath) |
|
| 269 |
- destPath = filepath.FromSlash(destPath) |
|
| 270 |
- |
|
| 271 |
- if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
|
|
| 272 |
- origPath = origPath[1:] |
|
| 273 |
- } |
|
| 274 |
- origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) |
|
| 275 |
- |
|
| 276 |
- // Twiddle the destPath when its a relative path - meaning, make it |
|
| 277 |
- // relative to the WORKINGDIR |
|
| 278 |
- if !system.IsAbs(destPath) {
|
|
| 279 |
- hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator)) |
|
| 280 |
- destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath) |
|
| 281 |
- |
|
| 282 |
- // Make sure we preserve any trailing slash |
|
| 283 |
- if hasSlash {
|
|
| 284 |
- destPath += string(os.PathSeparator) |
|
| 285 |
- } |
|
| 286 |
- } |
|
| 287 |
- |
|
| 288 |
- // In the remote/URL case, download it and gen its hashcode |
|
| 289 |
- if urlutil.IsURL(passedInOrigPath) {
|
|
| 290 |
- |
|
| 291 |
- // As it's a URL, we go back to processing on what was passed in |
|
| 292 |
- // to this function |
|
| 293 |
- origPath = passedInOrigPath |
|
| 294 |
- |
|
| 295 |
- if !allowRemote {
|
|
| 296 |
- return fmt.Errorf("Source can't be a URL for %s", cmdName)
|
|
| 297 |
- } |
|
| 298 |
- |
|
| 299 |
- ci := copyInfo{}
|
|
| 300 |
- ci.origPath = origPath |
|
| 301 |
- ci.hash = origPath // default to this but can change |
|
| 302 |
- ci.destPath = destPath |
|
| 303 |
- ci.decompress = false |
|
| 304 |
- *cInfos = append(*cInfos, &ci) |
|
| 305 |
- |
|
| 306 |
- // Initiate the download |
|
| 307 |
- resp, err := httputils.Download(ci.origPath) |
|
| 308 |
- if err != nil {
|
|
| 309 |
- return err |
|
| 310 |
- } |
|
| 311 |
- |
|
| 312 |
- // Create a tmp dir |
|
| 313 |
- tmpDirName, err := getTempDir(b.contextPath, "docker-remote") |
|
| 314 |
- if err != nil {
|
|
| 315 |
- return err |
|
| 316 |
- } |
|
| 317 |
- ci.tmpDir = tmpDirName |
|
| 318 |
- |
|
| 319 |
- // Create a tmp file within our tmp dir |
|
| 320 |
- tmpFileName := filepath.Join(tmpDirName, "tmp") |
|
| 321 |
- tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) |
|
| 322 |
- if err != nil {
|
|
| 323 |
- return err |
|
| 324 |
- } |
|
| 325 |
- |
|
| 326 |
- // Download and dump result to tmp file |
|
| 327 |
- if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
|
|
| 328 |
- In: resp.Body, |
|
| 329 |
- Out: b.OutOld, |
|
| 330 |
- Formatter: b.StreamFormatter, |
|
| 331 |
- Size: resp.ContentLength, |
|
| 332 |
- NewLines: true, |
|
| 333 |
- ID: "", |
|
| 334 |
- Action: "Downloading", |
|
| 335 |
- })); err != nil {
|
|
| 336 |
- tmpFile.Close() |
|
| 337 |
- return err |
|
| 338 |
- } |
|
| 339 |
- fmt.Fprintf(b.OutStream, "\n") |
|
| 340 |
- tmpFile.Close() |
|
| 341 |
- |
|
| 342 |
- // Set the mtime to the Last-Modified header value if present |
|
| 343 |
- // Otherwise just remove atime and mtime |
|
| 344 |
- mTime := time.Time{}
|
|
| 345 |
- |
|
| 346 |
- lastMod := resp.Header.Get("Last-Modified")
|
|
| 347 |
- if lastMod != "" {
|
|
| 348 |
- // If we can't parse it then just let it default to 'zero' |
|
| 349 |
- // otherwise use the parsed time value |
|
| 350 |
- if parsedMTime, err := http.ParseTime(lastMod); err == nil {
|
|
| 351 |
- mTime = parsedMTime |
|
| 352 |
- } |
|
| 353 |
- } |
|
| 354 |
- |
|
| 355 |
- if err := system.Chtimes(tmpFileName, time.Time{}, mTime); err != nil {
|
|
| 356 |
- return err |
|
| 357 |
- } |
|
| 358 |
- |
|
| 359 |
- ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) |
|
| 360 |
- |
|
| 361 |
- // If the destination is a directory, figure out the filename. |
|
| 362 |
- if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) {
|
|
| 363 |
- u, err := url.Parse(origPath) |
|
| 364 |
- if err != nil {
|
|
| 365 |
- return err |
|
| 366 |
- } |
|
| 367 |
- path := filepath.FromSlash(u.Path) // Ensure in platform semantics |
|
| 368 |
- if strings.HasSuffix(path, string(os.PathSeparator)) {
|
|
| 369 |
- path = path[:len(path)-1] |
|
| 370 |
- } |
|
| 371 |
- parts := strings.Split(path, string(os.PathSeparator)) |
|
| 372 |
- filename := parts[len(parts)-1] |
|
| 373 |
- if filename == "" {
|
|
| 374 |
- return fmt.Errorf("cannot determine filename from url: %s", u)
|
|
| 375 |
- } |
|
| 376 |
- ci.destPath = ci.destPath + filename |
|
| 377 |
- } |
|
| 378 |
- |
|
| 379 |
- // Calc the checksum, even if we're using the cache |
|
| 380 |
- r, err := archive.Tar(tmpFileName, archive.Uncompressed) |
|
| 381 |
- if err != nil {
|
|
| 382 |
- return err |
|
| 383 |
- } |
|
| 384 |
- tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) |
|
| 385 |
- if err != nil {
|
|
| 386 |
- return err |
|
| 387 |
- } |
|
| 388 |
- if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
|
|
| 389 |
- return err |
|
| 390 |
- } |
|
| 391 |
- ci.hash = tarSum.Sum(nil) |
|
| 392 |
- r.Close() |
|
| 393 |
- |
|
| 394 |
- return nil |
|
| 395 |
- } |
|
| 396 |
- |
|
| 397 |
- // Deal with wildcards |
|
| 398 |
- if allowWildcards && containsWildcards(origPath) {
|
|
| 399 |
- for _, fileInfo := range b.context.GetSums() {
|
|
| 400 |
- if fileInfo.Name() == "" {
|
|
| 401 |
- continue |
|
| 402 |
- } |
|
| 403 |
- match, _ := filepath.Match(origPath, fileInfo.Name()) |
|
| 404 |
- if !match {
|
|
| 405 |
- continue |
|
| 406 |
- } |
|
| 407 |
- |
|
| 408 |
- // Note we set allowWildcards to false in case the name has |
|
| 409 |
- // a * in it |
|
| 410 |
- calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false) |
|
| 411 |
- } |
|
| 412 |
- return nil |
|
| 413 |
- } |
|
| 414 |
- |
|
| 415 |
- // Must be a dir or a file |
|
| 416 |
- |
|
| 417 |
- if err := b.checkPathForAddition(origPath); err != nil {
|
|
| 418 |
- return err |
|
| 419 |
- } |
|
| 420 |
- fi, _ := os.Stat(filepath.Join(b.contextPath, origPath)) |
|
| 421 |
- |
|
| 422 |
- ci := copyInfo{}
|
|
| 423 |
- ci.origPath = origPath |
|
| 424 |
- ci.hash = origPath |
|
| 425 |
- ci.destPath = destPath |
|
| 426 |
- ci.decompress = allowDecompression |
|
| 427 |
- *cInfos = append(*cInfos, &ci) |
|
| 428 |
- |
|
| 429 |
- // Deal with the single file case |
|
| 430 |
- if !fi.IsDir() {
|
|
| 431 |
- // This will match first file in sums of the archive |
|
| 432 |
- fis := b.context.GetSums().GetFile(ci.origPath) |
|
| 433 |
- if fis != nil {
|
|
| 434 |
- ci.hash = "file:" + fis.Sum() |
|
| 435 |
- } |
|
| 436 |
- return nil |
|
| 437 |
- } |
|
| 438 |
- |
|
| 439 |
- // Must be a dir |
|
| 440 |
- var subfiles []string |
|
| 441 |
- absOrigPath := filepath.Join(b.contextPath, ci.origPath) |
|
| 442 |
- |
|
| 443 |
- // Add a trailing / to make sure we only pick up nested files under |
|
| 444 |
- // the dir and not sibling files of the dir that just happen to |
|
| 445 |
- // start with the same chars |
|
| 446 |
- if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) {
|
|
| 447 |
- absOrigPath += string(os.PathSeparator) |
|
| 448 |
- } |
|
| 449 |
- |
|
| 450 |
- // Need path w/o slash too to find matching dir w/o trailing slash |
|
| 451 |
- absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] |
|
| 452 |
- |
|
| 453 |
- for _, fileInfo := range b.context.GetSums() {
|
|
| 454 |
- absFile := filepath.Join(b.contextPath, fileInfo.Name()) |
|
| 455 |
- // Any file in the context that starts with the given path will be |
|
| 456 |
- // picked up and its hashcode used. However, we'll exclude the |
|
| 457 |
- // root dir itself. We do this for a coupel of reasons: |
|
| 458 |
- // 1 - ADD/COPY will not copy the dir itself, just its children |
|
| 459 |
- // so there's no reason to include it in the hash calc |
|
| 460 |
- // 2 - the metadata on the dir will change when any child file |
|
| 461 |
- // changes. This will lead to a miss in the cache check if that |
|
| 462 |
- // child file is in the .dockerignore list. |
|
| 463 |
- if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
|
|
| 464 |
- subfiles = append(subfiles, fileInfo.Sum()) |
|
| 465 |
- } |
|
| 466 |
- } |
|
| 467 |
- sort.Strings(subfiles) |
|
| 468 |
- hasher := sha256.New() |
|
| 469 |
- hasher.Write([]byte(strings.Join(subfiles, ","))) |
|
| 470 |
- ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) |
|
| 471 |
- |
|
| 472 |
- return nil |
|
| 473 |
-} |
|
| 474 |
- |
|
| 475 |
-func containsWildcards(name string) bool {
|
|
| 476 |
- for i := 0; i < len(name); i++ {
|
|
| 477 |
- ch := name[i] |
|
| 478 |
- if ch == '\\' {
|
|
| 479 |
- i++ |
|
| 480 |
- } else if ch == '*' || ch == '?' || ch == '[' {
|
|
| 481 |
- return true |
|
| 482 |
- } |
|
| 483 |
- } |
|
| 484 |
- return false |
|
| 485 |
-} |
|
| 486 |
- |
|
| 487 |
-func (b *builder) pullImage(name string) (*image.Image, error) {
|
|
| 488 |
- remote, tag := parsers.ParseRepositoryTag(name) |
|
| 489 |
- if tag == "" {
|
|
| 490 |
- tag = "latest" |
|
| 491 |
- } |
|
| 492 |
- |
|
| 493 |
- pullRegistryAuth := &cliconfig.AuthConfig{}
|
|
| 494 |
- if len(b.AuthConfigs) > 0 {
|
|
| 495 |
- // The request came with a full auth config file, we prefer to use that |
|
| 496 |
- repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote) |
|
| 497 |
- if err != nil {
|
|
| 498 |
- return nil, err |
|
| 499 |
- } |
|
| 500 |
- |
|
| 501 |
- resolvedConfig := registry.ResolveAuthConfig( |
|
| 502 |
- &cliconfig.ConfigFile{AuthConfigs: b.AuthConfigs},
|
|
| 503 |
- repoInfo.Index, |
|
| 504 |
- ) |
|
| 505 |
- pullRegistryAuth = &resolvedConfig |
|
| 506 |
- } |
|
| 507 |
- |
|
| 508 |
- imagePullConfig := &graph.ImagePullConfig{
|
|
| 509 |
- AuthConfig: pullRegistryAuth, |
|
| 510 |
- OutStream: ioutils.NopWriteCloser(b.OutOld), |
|
| 511 |
- } |
|
| 512 |
- |
|
| 513 |
- if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
|
|
| 514 |
- return nil, err |
|
| 515 |
- } |
|
| 516 |
- |
|
| 517 |
- image, err := b.Daemon.Repositories().LookupImage(name) |
|
| 518 |
- if err != nil {
|
|
| 519 |
- return nil, err |
|
| 520 |
- } |
|
| 521 |
- |
|
| 522 |
- return image, nil |
|
| 523 |
-} |
|
| 524 |
- |
|
| 525 |
-func (b *builder) processImageFrom(img *image.Image) error {
|
|
| 526 |
- b.image = img.ID |
|
| 527 |
- |
|
| 528 |
- if img.Config != nil {
|
|
| 529 |
- b.Config = img.Config |
|
| 530 |
- } |
|
| 531 |
- |
|
| 532 |
- // The default path will be blank on Windows (set by HCS) |
|
| 533 |
- if len(b.Config.Env) == 0 && daemon.DefaultPathEnv != "" {
|
|
| 534 |
- b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) |
|
| 535 |
- } |
|
| 536 |
- |
|
| 537 |
- // Process ONBUILD triggers if they exist |
|
| 538 |
- if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
|
|
| 539 |
- word := "trigger" |
|
| 540 |
- if nTriggers > 1 {
|
|
| 541 |
- word = "triggers" |
|
| 542 |
- } |
|
| 543 |
- fmt.Fprintf(b.ErrStream, "# Executing %d build %s...\n", nTriggers, word) |
|
| 544 |
- } |
|
| 545 |
- |
|
| 546 |
- // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. |
|
| 547 |
- onBuildTriggers := b.Config.OnBuild |
|
| 548 |
- b.Config.OnBuild = []string{}
|
|
| 549 |
- |
|
| 550 |
- // parse the ONBUILD triggers by invoking the parser |
|
| 551 |
- for _, step := range onBuildTriggers {
|
|
| 552 |
- ast, err := parser.Parse(strings.NewReader(step)) |
|
| 553 |
- if err != nil {
|
|
| 554 |
- return err |
|
| 555 |
- } |
|
| 556 |
- |
|
| 557 |
- for i, n := range ast.Children {
|
|
| 558 |
- switch strings.ToUpper(n.Value) {
|
|
| 559 |
- case "ONBUILD": |
|
| 560 |
- return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
|
| 561 |
- case "MAINTAINER", "FROM": |
|
| 562 |
- return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value)
|
|
| 563 |
- } |
|
| 564 |
- |
|
| 565 |
- if err := b.dispatch(i, n); err != nil {
|
|
| 566 |
- return err |
|
| 567 |
- } |
|
| 568 |
- } |
|
| 569 |
- } |
|
| 570 |
- |
|
| 571 |
- return nil |
|
| 572 |
-} |
|
| 573 |
- |
|
| 574 |
-// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`) |
|
| 575 |
-// and if so attempts to look up the current `b.image` and `b.Config` pair |
|
| 576 |
-// in the current server `b.Daemon`. If an image is found, probeCache returns |
|
| 577 |
-// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there |
|
| 578 |
-// is any error, it returns `(false, err)`. |
|
| 579 |
-func (b *builder) probeCache() (bool, error) {
|
|
| 580 |
- if !b.UtilizeCache || b.cacheBusted {
|
|
| 581 |
- return false, nil |
|
| 582 |
- } |
|
| 583 |
- |
|
| 584 |
- cache, err := b.Daemon.ImageGetCached(b.image, b.Config) |
|
| 585 |
- if err != nil {
|
|
| 586 |
- return false, err |
|
| 587 |
- } |
|
| 588 |
- if cache == nil {
|
|
| 589 |
- logrus.Debugf("[BUILDER] Cache miss")
|
|
| 590 |
- b.cacheBusted = true |
|
| 591 |
- return false, nil |
|
| 592 |
- } |
|
| 593 |
- |
|
| 594 |
- fmt.Fprintf(b.OutStream, " ---> Using cache\n") |
|
| 595 |
- logrus.Debugf("[BUILDER] Use cached version")
|
|
| 596 |
- b.image = cache.ID |
|
| 597 |
- b.Daemon.Graph().Retain(b.id, cache.ID) |
|
| 598 |
- b.activeImages = append(b.activeImages, cache.ID) |
|
| 599 |
- return true, nil |
|
| 600 |
-} |
|
| 601 |
- |
|
| 602 |
-func (b *builder) create() (*daemon.Container, error) {
|
|
| 603 |
- if b.image == "" && !b.noBaseImage {
|
|
| 604 |
- return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
| 605 |
- } |
|
| 606 |
- b.Config.Image = b.image |
|
| 607 |
- |
|
| 608 |
- hostConfig := &runconfig.HostConfig{
|
|
| 609 |
- CPUShares: b.cpuShares, |
|
| 610 |
- CPUPeriod: b.cpuPeriod, |
|
| 611 |
- CPUQuota: b.cpuQuota, |
|
| 612 |
- CpusetCpus: b.cpuSetCpus, |
|
| 613 |
- CpusetMems: b.cpuSetMems, |
|
| 614 |
- CgroupParent: b.cgroupParent, |
|
| 615 |
- Memory: b.memory, |
|
| 616 |
- MemorySwap: b.memorySwap, |
|
| 617 |
- Ulimits: b.ulimits, |
|
| 618 |
- } |
|
| 619 |
- |
|
| 620 |
- config := *b.Config |
|
| 621 |
- |
|
| 622 |
- // Create the container |
|
| 623 |
- ccr, err := b.Daemon.ContainerCreate("", b.Config, hostConfig, true)
|
|
| 624 |
- if err != nil {
|
|
| 625 |
- return nil, err |
|
| 626 |
- } |
|
| 627 |
- for _, warning := range ccr.Warnings {
|
|
| 628 |
- fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning) |
|
| 629 |
- } |
|
| 630 |
- c, err := b.Daemon.Get(ccr.ID) |
|
| 631 |
- if err != nil {
|
|
| 632 |
- return nil, err |
|
| 633 |
- } |
|
| 634 |
- |
|
| 635 |
- b.TmpContainers[c.ID] = struct{}{}
|
|
| 636 |
- fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID)) |
|
| 637 |
- |
|
| 638 |
- if config.Cmd.Len() > 0 {
|
|
| 639 |
- // override the entry point that may have been picked up from the base image |
|
| 640 |
- s := config.Cmd.Slice() |
|
| 641 |
- c.Path = s[0] |
|
| 642 |
- c.Args = s[1:] |
|
| 643 |
- } else {
|
|
| 644 |
- config.Cmd = stringutils.NewStrSlice() |
|
| 645 |
- } |
|
| 646 |
- |
|
| 647 |
- return c, nil |
|
| 648 |
-} |
|
| 649 |
- |
|
| 650 |
-func (b *builder) run(c *daemon.Container) error {
|
|
| 651 |
- var errCh chan error |
|
| 652 |
- if b.Verbose {
|
|
| 653 |
- errCh = c.Attach(nil, b.OutStream, b.ErrStream) |
|
| 654 |
- } |
|
| 655 |
- |
|
| 656 |
- //start the container |
|
| 657 |
- if err := c.Start(); err != nil {
|
|
| 658 |
- return err |
|
| 659 |
- } |
|
| 660 |
- |
|
| 661 |
- finished := make(chan struct{})
|
|
| 662 |
- defer close(finished) |
|
| 663 |
- go func() {
|
|
| 664 |
- select {
|
|
| 665 |
- case <-b.cancelled: |
|
| 666 |
- logrus.Debugln("Build cancelled, killing container:", c.ID)
|
|
| 667 |
- c.Kill() |
|
| 668 |
- case <-finished: |
|
| 669 |
- } |
|
| 670 |
- }() |
|
| 671 |
- |
|
| 672 |
- if b.Verbose {
|
|
| 673 |
- // Block on reading output from container, stop on err or chan closed |
|
| 674 |
- if err := <-errCh; err != nil {
|
|
| 675 |
- return err |
|
| 676 |
- } |
|
| 677 |
- } |
|
| 678 |
- |
|
| 679 |
- // Wait for it to finish |
|
| 680 |
- if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
|
|
| 681 |
- return &jsonmessage.JSONError{
|
|
| 682 |
- Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
|
|
| 683 |
- Code: ret, |
|
| 684 |
- } |
|
| 685 |
- } |
|
| 686 |
- |
|
| 687 |
- return nil |
|
| 688 |
-} |
|
| 689 |
- |
|
| 690 |
-func (b *builder) checkPathForAddition(orig string) error {
|
|
| 691 |
- origPath := filepath.Join(b.contextPath, orig) |
|
| 692 |
- origPath, err := symlink.EvalSymlinks(origPath) |
|
| 693 |
- if err != nil {
|
|
| 694 |
- if os.IsNotExist(err) {
|
|
| 695 |
- return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 696 |
- } |
|
| 697 |
- return err |
|
| 698 |
- } |
|
| 699 |
- contextPath, err := symlink.EvalSymlinks(b.contextPath) |
|
| 700 |
- if err != nil {
|
|
| 701 |
- return err |
|
| 702 |
- } |
|
| 703 |
- if !strings.HasPrefix(origPath, contextPath) {
|
|
| 704 |
- return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
|
| 705 |
- } |
|
| 706 |
- if _, err := os.Stat(origPath); err != nil {
|
|
| 707 |
- if os.IsNotExist(err) {
|
|
| 708 |
- return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 709 |
- } |
|
| 710 |
- return err |
|
| 711 |
- } |
|
| 712 |
- return nil |
|
| 713 |
-} |
|
| 714 |
- |
|
| 715 |
-func (b *builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
|
|
| 716 |
- var ( |
|
| 717 |
- err error |
|
| 718 |
- destExists = true |
|
| 719 |
- origPath = filepath.Join(b.contextPath, orig) |
|
| 720 |
- destPath string |
|
| 721 |
- ) |
|
| 722 |
- |
|
| 723 |
- // Work in daemon-local OS specific file paths |
|
| 724 |
- dest = filepath.FromSlash(dest) |
|
| 725 |
- |
|
| 726 |
- destPath, err = container.GetResourcePath(dest) |
|
| 727 |
- if err != nil {
|
|
| 728 |
- return err |
|
| 729 |
- } |
|
| 730 |
- |
|
| 731 |
- // Preserve the trailing slash |
|
| 732 |
- if strings.HasSuffix(dest, string(os.PathSeparator)) || dest == "." {
|
|
| 733 |
- destPath = destPath + string(os.PathSeparator) |
|
| 734 |
- } |
|
| 735 |
- |
|
| 736 |
- destStat, err := os.Stat(destPath) |
|
| 737 |
- if err != nil {
|
|
| 738 |
- if !os.IsNotExist(err) {
|
|
| 739 |
- logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
|
|
| 740 |
- return err |
|
| 741 |
- } |
|
| 742 |
- destExists = false |
|
| 743 |
- } |
|
| 744 |
- |
|
| 745 |
- fi, err := os.Stat(origPath) |
|
| 746 |
- if err != nil {
|
|
| 747 |
- if os.IsNotExist(err) {
|
|
| 748 |
- return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 749 |
- } |
|
| 750 |
- return err |
|
| 751 |
- } |
|
| 752 |
- |
|
| 753 |
- if fi.IsDir() {
|
|
| 754 |
- return copyAsDirectory(origPath, destPath, destExists) |
|
| 755 |
- } |
|
| 756 |
- |
|
| 757 |
- // If we are adding a remote file (or we've been told not to decompress), do not try to untar it |
|
| 758 |
- if decompress {
|
|
| 759 |
- // First try to unpack the source as an archive |
|
| 760 |
- // to support the untar feature we need to clean up the path a little bit |
|
| 761 |
- // because tar is very forgiving. First we need to strip off the archive's |
|
| 762 |
- // filename from the path but this is only added if it does not end in slash |
|
| 763 |
- tarDest := destPath |
|
| 764 |
- if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
|
|
| 765 |
- tarDest = filepath.Dir(destPath) |
|
| 766 |
- } |
|
| 767 |
- |
|
| 768 |
- // try to successfully untar the orig |
|
| 769 |
- if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
|
|
| 770 |
- return nil |
|
| 771 |
- } else if err != io.EOF {
|
|
| 772 |
- logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
|
| 773 |
- } |
|
| 774 |
- } |
|
| 775 |
- |
|
| 776 |
- if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
|
|
| 777 |
- return err |
|
| 778 |
- } |
|
| 779 |
- if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
|
|
| 780 |
- return err |
|
| 781 |
- } |
|
| 782 |
- |
|
| 783 |
- resPath := destPath |
|
| 784 |
- if destExists && destStat.IsDir() {
|
|
| 785 |
- resPath = filepath.Join(destPath, filepath.Base(origPath)) |
|
| 786 |
- } |
|
| 787 |
- |
|
| 788 |
- return fixPermissions(origPath, resPath, 0, 0, destExists) |
|
| 789 |
-} |
|
| 790 |
- |
|
| 791 |
-func copyAsDirectory(source, destination string, destExisted bool) error {
|
|
| 792 |
- if err := chrootarchive.CopyWithTar(source, destination); err != nil {
|
|
| 793 |
- return err |
|
| 794 |
- } |
|
| 795 |
- return fixPermissions(source, destination, 0, 0, destExisted) |
|
| 796 |
-} |
|
| 797 |
- |
|
| 798 |
-func (b *builder) clearTmp() {
|
|
| 799 |
- for c := range b.TmpContainers {
|
|
| 800 |
- rmConfig := &daemon.ContainerRmConfig{
|
|
| 801 |
- ForceRemove: true, |
|
| 802 |
- RemoveVolume: true, |
|
| 803 |
- } |
|
| 804 |
- if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
|
|
| 805 |
- fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) |
|
| 806 |
- return |
|
| 807 |
- } |
|
| 808 |
- delete(b.TmpContainers, c) |
|
| 809 |
- fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c)) |
|
| 810 |
- } |
|
| 811 |
-} |
| 812 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,45 +0,0 @@ |
| 1 |
-// +build freebsd linux |
|
| 2 |
- |
|
| 3 |
-package builder |
|
| 4 |
- |
|
| 5 |
-import ( |
|
| 6 |
- "io/ioutil" |
|
| 7 |
- "os" |
|
| 8 |
- "path/filepath" |
|
| 9 |
-) |
|
| 10 |
- |
|
| 11 |
-func getTempDir(dir, prefix string) (string, error) {
|
|
| 12 |
- return ioutil.TempDir(dir, prefix) |
|
| 13 |
-} |
|
| 14 |
- |
|
| 15 |
-func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
|
| 16 |
- // If the destination didn't already exist, or the destination isn't a |
|
| 17 |
- // directory, then we should Lchown the destination. Otherwise, we shouldn't |
|
| 18 |
- // Lchown the destination. |
|
| 19 |
- destStat, err := os.Stat(destination) |
|
| 20 |
- if err != nil {
|
|
| 21 |
- // This should *never* be reached, because the destination must've already |
|
| 22 |
- // been created while untar-ing the context. |
|
| 23 |
- return err |
|
| 24 |
- } |
|
| 25 |
- doChownDestination := !destExisted || !destStat.IsDir() |
|
| 26 |
- |
|
| 27 |
- // We Walk on the source rather than on the destination because we don't |
|
| 28 |
- // want to change permissions on things we haven't created or modified. |
|
| 29 |
- return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
|
|
| 30 |
- // Do not alter the walk root iff. it existed before, as it doesn't fall under |
|
| 31 |
- // the domain of "things we should chown". |
|
| 32 |
- if !doChownDestination && (source == fullpath) {
|
|
| 33 |
- return nil |
|
| 34 |
- } |
|
| 35 |
- |
|
| 36 |
- // Path is prefixed by source: substitute with destination instead. |
|
| 37 |
- cleaned, err := filepath.Rel(source, fullpath) |
|
| 38 |
- if err != nil {
|
|
| 39 |
- return err |
|
| 40 |
- } |
|
| 41 |
- |
|
| 42 |
- fullpath = filepath.Join(destination, cleaned) |
|
| 43 |
- return os.Lchown(fullpath, uid, gid) |
|
| 44 |
- }) |
|
| 45 |
-} |
| 46 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,22 +0,0 @@ |
| 1 |
-// +build windows |
|
| 2 |
- |
|
| 3 |
-package builder |
|
| 4 |
- |
|
| 5 |
-import ( |
|
| 6 |
- "io/ioutil" |
|
| 7 |
- |
|
| 8 |
- "github.com/docker/docker/pkg/longpath" |
|
| 9 |
-) |
|
| 10 |
- |
|
| 11 |
-func getTempDir(dir, prefix string) (string, error) {
|
|
| 12 |
- tempDir, err := ioutil.TempDir(dir, prefix) |
|
| 13 |
- if err != nil {
|
|
| 14 |
- return "", err |
|
| 15 |
- } |
|
| 16 |
- return longpath.AddPrefix(tempDir), nil |
|
| 17 |
-} |
|
| 18 |
- |
|
| 19 |
-func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
|
| 20 |
- // chown is not supported on Windows |
|
| 21 |
- return nil |
|
| 22 |
-} |
| 23 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,376 +0,0 @@ |
| 1 |
-package builder |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bytes" |
|
| 5 |
- "errors" |
|
| 6 |
- "fmt" |
|
| 7 |
- "io" |
|
| 8 |
- "io/ioutil" |
|
| 9 |
- "os" |
|
| 10 |
- "runtime" |
|
| 11 |
- "strings" |
|
| 12 |
- "sync" |
|
| 13 |
- |
|
| 14 |
- "github.com/docker/docker/api" |
|
| 15 |
- "github.com/docker/docker/builder/parser" |
|
| 16 |
- "github.com/docker/docker/cliconfig" |
|
| 17 |
- "github.com/docker/docker/daemon" |
|
| 18 |
- "github.com/docker/docker/graph/tags" |
|
| 19 |
- "github.com/docker/docker/pkg/archive" |
|
| 20 |
- "github.com/docker/docker/pkg/httputils" |
|
| 21 |
- "github.com/docker/docker/pkg/parsers" |
|
| 22 |
- "github.com/docker/docker/pkg/progressreader" |
|
| 23 |
- "github.com/docker/docker/pkg/streamformatter" |
|
| 24 |
- "github.com/docker/docker/pkg/stringid" |
|
| 25 |
- "github.com/docker/docker/pkg/ulimit" |
|
| 26 |
- "github.com/docker/docker/pkg/urlutil" |
|
| 27 |
- "github.com/docker/docker/registry" |
|
| 28 |
- "github.com/docker/docker/runconfig" |
|
| 29 |
- "github.com/docker/docker/utils" |
|
| 30 |
-) |
|
| 31 |
- |
|
| 32 |
-// When downloading remote contexts, limit the amount (in bytes) |
|
| 33 |
-// to be read from the response body in order to detect its Content-Type |
|
| 34 |
-const maxPreambleLength = 100 |
|
| 35 |
- |
|
| 36 |
-// whitelist of commands allowed for a commit/import |
|
| 37 |
-var validCommitCommands = map[string]bool{
|
|
| 38 |
- "cmd": true, |
|
| 39 |
- "entrypoint": true, |
|
| 40 |
- "env": true, |
|
| 41 |
- "expose": true, |
|
| 42 |
- "label": true, |
|
| 43 |
- "onbuild": true, |
|
| 44 |
- "user": true, |
|
| 45 |
- "volume": true, |
|
| 46 |
- "workdir": true, |
|
| 47 |
-} |
|
| 48 |
- |
|
| 49 |
-// BuiltinAllowedBuildArgs is list of built-in allowed build args |
|
| 50 |
-var BuiltinAllowedBuildArgs = map[string]bool{
|
|
| 51 |
- "HTTP_PROXY": true, |
|
| 52 |
- "http_proxy": true, |
|
| 53 |
- "HTTPS_PROXY": true, |
|
| 54 |
- "https_proxy": true, |
|
| 55 |
- "FTP_PROXY": true, |
|
| 56 |
- "ftp_proxy": true, |
|
| 57 |
- "NO_PROXY": true, |
|
| 58 |
- "no_proxy": true, |
|
| 59 |
-} |
|
| 60 |
- |
|
| 61 |
-// Config contains all configs for a build job |
|
| 62 |
-type Config struct {
|
|
| 63 |
- DockerfileName string |
|
| 64 |
- RemoteURL string |
|
| 65 |
- RepoName string |
|
| 66 |
- SuppressOutput bool |
|
| 67 |
- NoCache bool |
|
| 68 |
- Remove bool |
|
| 69 |
- ForceRemove bool |
|
| 70 |
- Pull bool |
|
| 71 |
- Memory int64 |
|
| 72 |
- MemorySwap int64 |
|
| 73 |
- CPUShares int64 |
|
| 74 |
- CPUPeriod int64 |
|
| 75 |
- CPUQuota int64 |
|
| 76 |
- CPUSetCpus string |
|
| 77 |
- CPUSetMems string |
|
| 78 |
- CgroupParent string |
|
| 79 |
- Ulimits []*ulimit.Ulimit |
|
| 80 |
- AuthConfigs map[string]cliconfig.AuthConfig |
|
| 81 |
- BuildArgs map[string]string |
|
| 82 |
- |
|
| 83 |
- Stdout io.Writer |
|
| 84 |
- Context io.ReadCloser |
|
| 85 |
- // When closed, the job has been cancelled. |
|
| 86 |
- // Note: not all jobs implement cancellation. |
|
| 87 |
- // See Job.Cancel() and Job.WaitCancelled() |
|
| 88 |
- cancelled chan struct{}
|
|
| 89 |
- cancelOnce sync.Once |
|
| 90 |
-} |
|
| 91 |
- |
|
| 92 |
-// Cancel signals the build job to cancel |
|
| 93 |
-func (b *Config) Cancel() {
|
|
| 94 |
- b.cancelOnce.Do(func() {
|
|
| 95 |
- close(b.cancelled) |
|
| 96 |
- }) |
|
| 97 |
-} |
|
| 98 |
- |
|
| 99 |
-// WaitCancelled returns a channel which is closed ("never blocks") when
|
|
| 100 |
-// the job is cancelled. |
|
| 101 |
-func (b *Config) WaitCancelled() <-chan struct{} {
|
|
| 102 |
- return b.cancelled |
|
| 103 |
-} |
|
| 104 |
- |
|
| 105 |
-// NewBuildConfig returns a new Config struct |
|
| 106 |
-func NewBuildConfig() *Config {
|
|
| 107 |
- return &Config{
|
|
| 108 |
- AuthConfigs: map[string]cliconfig.AuthConfig{},
|
|
| 109 |
- cancelled: make(chan struct{}),
|
|
| 110 |
- } |
|
| 111 |
-} |
|
| 112 |
- |
|
| 113 |
-// Build is the main interface of the package, it gathers the Builder |
|
| 114 |
-// struct and calls builder.Run() to do all the real build job. |
|
| 115 |
-func Build(d *daemon.Daemon, buildConfig *Config) error {
|
|
| 116 |
- var ( |
|
| 117 |
- repoName string |
|
| 118 |
- tag string |
|
| 119 |
- context io.ReadCloser |
|
| 120 |
- ) |
|
| 121 |
- sf := streamformatter.NewJSONStreamFormatter() |
|
| 122 |
- |
|
| 123 |
- repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) |
|
| 124 |
- if repoName != "" {
|
|
| 125 |
- if err := registry.ValidateRepositoryName(repoName); err != nil {
|
|
| 126 |
- return err |
|
| 127 |
- } |
|
| 128 |
- if len(tag) > 0 {
|
|
| 129 |
- if err := tags.ValidateTagName(tag); err != nil {
|
|
| 130 |
- return err |
|
| 131 |
- } |
|
| 132 |
- } |
|
| 133 |
- } |
|
| 134 |
- |
|
| 135 |
- if buildConfig.RemoteURL == "" {
|
|
| 136 |
- context = ioutil.NopCloser(buildConfig.Context) |
|
| 137 |
- } else if urlutil.IsGitURL(buildConfig.RemoteURL) {
|
|
| 138 |
- root, err := utils.GitClone(buildConfig.RemoteURL) |
|
| 139 |
- if err != nil {
|
|
| 140 |
- return err |
|
| 141 |
- } |
|
| 142 |
- defer os.RemoveAll(root) |
|
| 143 |
- |
|
| 144 |
- c, err := archive.Tar(root, archive.Uncompressed) |
|
| 145 |
- if err != nil {
|
|
| 146 |
- return err |
|
| 147 |
- } |
|
| 148 |
- context = c |
|
| 149 |
- } else if urlutil.IsURL(buildConfig.RemoteURL) {
|
|
| 150 |
- f, err := httputils.Download(buildConfig.RemoteURL) |
|
| 151 |
- if err != nil {
|
|
| 152 |
- return fmt.Errorf("Error downloading remote context %s: %v", buildConfig.RemoteURL, err)
|
|
| 153 |
- } |
|
| 154 |
- defer f.Body.Close() |
|
| 155 |
- ct := f.Header.Get("Content-Type")
|
|
| 156 |
- clen := f.ContentLength |
|
| 157 |
- contentType, bodyReader, err := inspectResponse(ct, f.Body, clen) |
|
| 158 |
- |
|
| 159 |
- defer bodyReader.Close() |
|
| 160 |
- |
|
| 161 |
- if err != nil {
|
|
| 162 |
- return fmt.Errorf("Error detecting content type for remote %s: %v", buildConfig.RemoteURL, err)
|
|
| 163 |
- } |
|
| 164 |
- if contentType == httputils.MimeTypes.TextPlain {
|
|
| 165 |
- dockerFile, err := ioutil.ReadAll(bodyReader) |
|
| 166 |
- if err != nil {
|
|
| 167 |
- return err |
|
| 168 |
- } |
|
| 169 |
- |
|
| 170 |
- // When we're downloading just a Dockerfile put it in |
|
| 171 |
- // the default name - don't allow the client to move/specify it |
|
| 172 |
- buildConfig.DockerfileName = api.DefaultDockerfileName |
|
| 173 |
- |
|
| 174 |
- c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) |
|
| 175 |
- if err != nil {
|
|
| 176 |
- return err |
|
| 177 |
- } |
|
| 178 |
- context = c |
|
| 179 |
- } else {
|
|
| 180 |
- // Pass through - this is a pre-packaged context, presumably |
|
| 181 |
- // with a Dockerfile with the right name inside it. |
|
| 182 |
- prCfg := progressreader.Config{
|
|
| 183 |
- In: bodyReader, |
|
| 184 |
- Out: buildConfig.Stdout, |
|
| 185 |
- Formatter: sf, |
|
| 186 |
- Size: clen, |
|
| 187 |
- NewLines: true, |
|
| 188 |
- ID: "Downloading context", |
|
| 189 |
- Action: buildConfig.RemoteURL, |
|
| 190 |
- } |
|
| 191 |
- context = progressreader.New(prCfg) |
|
| 192 |
- } |
|
| 193 |
- } |
|
| 194 |
- |
|
| 195 |
- defer context.Close() |
|
| 196 |
- |
|
| 197 |
- builder := &builder{
|
|
| 198 |
- Daemon: d, |
|
| 199 |
- OutStream: &streamformatter.StdoutFormatter{
|
|
| 200 |
- Writer: buildConfig.Stdout, |
|
| 201 |
- StreamFormatter: sf, |
|
| 202 |
- }, |
|
| 203 |
- ErrStream: &streamformatter.StderrFormatter{
|
|
| 204 |
- Writer: buildConfig.Stdout, |
|
| 205 |
- StreamFormatter: sf, |
|
| 206 |
- }, |
|
| 207 |
- Verbose: !buildConfig.SuppressOutput, |
|
| 208 |
- UtilizeCache: !buildConfig.NoCache, |
|
| 209 |
- Remove: buildConfig.Remove, |
|
| 210 |
- ForceRemove: buildConfig.ForceRemove, |
|
| 211 |
- Pull: buildConfig.Pull, |
|
| 212 |
- OutOld: buildConfig.Stdout, |
|
| 213 |
- StreamFormatter: sf, |
|
| 214 |
- AuthConfigs: buildConfig.AuthConfigs, |
|
| 215 |
- dockerfileName: buildConfig.DockerfileName, |
|
| 216 |
- cpuShares: buildConfig.CPUShares, |
|
| 217 |
- cpuPeriod: buildConfig.CPUPeriod, |
|
| 218 |
- cpuQuota: buildConfig.CPUQuota, |
|
| 219 |
- cpuSetCpus: buildConfig.CPUSetCpus, |
|
| 220 |
- cpuSetMems: buildConfig.CPUSetMems, |
|
| 221 |
- cgroupParent: buildConfig.CgroupParent, |
|
| 222 |
- memory: buildConfig.Memory, |
|
| 223 |
- memorySwap: buildConfig.MemorySwap, |
|
| 224 |
- ulimits: buildConfig.Ulimits, |
|
| 225 |
- cancelled: buildConfig.WaitCancelled(), |
|
| 226 |
- id: stringid.GenerateRandomID(), |
|
| 227 |
- buildArgs: buildConfig.BuildArgs, |
|
| 228 |
- allowedBuildArgs: make(map[string]bool), |
|
| 229 |
- } |
|
| 230 |
- |
|
| 231 |
- defer func() {
|
|
| 232 |
- builder.Daemon.Graph().Release(builder.id, builder.activeImages...) |
|
| 233 |
- }() |
|
| 234 |
- |
|
| 235 |
- id, err := builder.Run(context) |
|
| 236 |
- if err != nil {
|
|
| 237 |
- return err |
|
| 238 |
- } |
|
| 239 |
- if repoName != "" {
|
|
| 240 |
- return d.Repositories().Tag(repoName, tag, id, true) |
|
| 241 |
- } |
|
| 242 |
- return nil |
|
| 243 |
-} |
|
| 244 |
- |
|
| 245 |
-// BuildFromConfig will do build directly from parameter 'changes', which comes |
|
| 246 |
-// from Dockerfile entries, it will: |
|
| 247 |
-// |
|
| 248 |
-// - call parse.Parse() to get AST root from Dockerfile entries |
|
| 249 |
-// - do build by calling builder.dispatch() to call all entries' handling routines |
|
| 250 |
-func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
|
|
| 251 |
- ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) |
|
| 252 |
- if err != nil {
|
|
| 253 |
- return nil, err |
|
| 254 |
- } |
|
| 255 |
- |
|
| 256 |
- // ensure that the commands are valid |
|
| 257 |
- for _, n := range ast.Children {
|
|
| 258 |
- if !validCommitCommands[n.Value] {
|
|
| 259 |
- return nil, fmt.Errorf("%s is not a valid change command", n.Value)
|
|
| 260 |
- } |
|
| 261 |
- } |
|
| 262 |
- |
|
| 263 |
- builder := &builder{
|
|
| 264 |
- Daemon: d, |
|
| 265 |
- Config: c, |
|
| 266 |
- OutStream: ioutil.Discard, |
|
| 267 |
- ErrStream: ioutil.Discard, |
|
| 268 |
- disableCommit: true, |
|
| 269 |
- } |
|
| 270 |
- |
|
| 271 |
- for i, n := range ast.Children {
|
|
| 272 |
- if err := builder.dispatch(i, n); err != nil {
|
|
| 273 |
- return nil, err |
|
| 274 |
- } |
|
| 275 |
- } |
|
| 276 |
- |
|
| 277 |
- return builder.Config, nil |
|
| 278 |
-} |
|
| 279 |
- |
|
| 280 |
-// CommitConfig contains build configs for commit operation |
|
| 281 |
-type CommitConfig struct {
|
|
| 282 |
- Pause bool |
|
| 283 |
- Repo string |
|
| 284 |
- Tag string |
|
| 285 |
- Author string |
|
| 286 |
- Comment string |
|
| 287 |
- Changes []string |
|
| 288 |
- Config *runconfig.Config |
|
| 289 |
-} |
|
| 290 |
- |
|
| 291 |
-// Commit will create a new image from a container's changes |
|
| 292 |
-func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
|
|
| 293 |
- container, err := d.Get(name) |
|
| 294 |
- if err != nil {
|
|
| 295 |
- return "", err |
|
| 296 |
- } |
|
| 297 |
- |
|
| 298 |
- // It is not possible to commit a running container on Windows |
|
| 299 |
- if runtime.GOOS == "windows" && container.IsRunning() {
|
|
| 300 |
- return "", fmt.Errorf("Windows does not support commit of a running container")
|
|
| 301 |
- } |
|
| 302 |
- |
|
| 303 |
- if c.Config == nil {
|
|
| 304 |
- c.Config = &runconfig.Config{}
|
|
| 305 |
- } |
|
| 306 |
- |
|
| 307 |
- newConfig, err := BuildFromConfig(d, c.Config, c.Changes) |
|
| 308 |
- if err != nil {
|
|
| 309 |
- return "", err |
|
| 310 |
- } |
|
| 311 |
- |
|
| 312 |
- if err := runconfig.Merge(newConfig, container.Config); err != nil {
|
|
| 313 |
- return "", err |
|
| 314 |
- } |
|
| 315 |
- |
|
| 316 |
- commitCfg := &daemon.ContainerCommitConfig{
|
|
| 317 |
- Pause: c.Pause, |
|
| 318 |
- Repo: c.Repo, |
|
| 319 |
- Tag: c.Tag, |
|
| 320 |
- Author: c.Author, |
|
| 321 |
- Comment: c.Comment, |
|
| 322 |
- Config: newConfig, |
|
| 323 |
- } |
|
| 324 |
- |
|
| 325 |
- img, err := d.Commit(container, commitCfg) |
|
| 326 |
- if err != nil {
|
|
| 327 |
- return "", err |
|
| 328 |
- } |
|
| 329 |
- |
|
| 330 |
- return img.ID, nil |
|
| 331 |
-} |
|
| 332 |
- |
|
| 333 |
-// inspectResponse looks into the http response data at r to determine whether its |
|
| 334 |
-// content-type is on the list of acceptable content types for remote build contexts. |
|
| 335 |
-// This function returns: |
|
| 336 |
-// - a string representation of the detected content-type |
|
| 337 |
-// - an io.Reader for the response body |
|
| 338 |
-// - an error value which will be non-nil either when something goes wrong while |
|
| 339 |
-// reading bytes from r or when the detected content-type is not acceptable. |
|
| 340 |
-func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) {
|
|
| 341 |
- plen := clen |
|
| 342 |
- if plen <= 0 || plen > maxPreambleLength {
|
|
| 343 |
- plen = maxPreambleLength |
|
| 344 |
- } |
|
| 345 |
- |
|
| 346 |
- preamble := make([]byte, plen, plen) |
|
| 347 |
- rlen, err := r.Read(preamble) |
|
| 348 |
- if rlen == 0 {
|
|
| 349 |
- return ct, r, errors.New("Empty response")
|
|
| 350 |
- } |
|
| 351 |
- if err != nil && err != io.EOF {
|
|
| 352 |
- return ct, r, err |
|
| 353 |
- } |
|
| 354 |
- |
|
| 355 |
- preambleR := bytes.NewReader(preamble) |
|
| 356 |
- bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) |
|
| 357 |
- // Some web servers will use application/octet-stream as the default |
|
| 358 |
- // content type for files without an extension (e.g. 'Dockerfile') |
|
| 359 |
- // so if we receive this value we better check for text content |
|
| 360 |
- contentType := ct |
|
| 361 |
- if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream {
|
|
| 362 |
- contentType, _, err = httputils.DetectContentType(preamble) |
|
| 363 |
- if err != nil {
|
|
| 364 |
- return contentType, bodyReader, err |
|
| 365 |
- } |
|
| 366 |
- } |
|
| 367 |
- |
|
| 368 |
- contentType = selectAcceptableMIME(contentType) |
|
| 369 |
- var cterr error |
|
| 370 |
- if len(contentType) == 0 {
|
|
| 371 |
- cterr = fmt.Errorf("unsupported Content-Type %q", ct)
|
|
| 372 |
- contentType = ct |
|
| 373 |
- } |
|
| 374 |
- |
|
| 375 |
- return contentType, bodyReader, cterr |
|
| 376 |
-} |
| 377 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,113 +0,0 @@ |
| 1 |
-package builder |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bytes" |
|
| 5 |
- "io/ioutil" |
|
| 6 |
- "testing" |
|
| 7 |
-) |
|
| 8 |
- |
|
| 9 |
-var textPlainDockerfile = "FROM busybox" |
|
| 10 |
-var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic
|
|
| 11 |
- |
|
| 12 |
-func TestInspectEmptyResponse(t *testing.T) {
|
|
| 13 |
- ct := "application/octet-stream" |
|
| 14 |
- br := ioutil.NopCloser(bytes.NewReader([]byte("")))
|
|
| 15 |
- contentType, bReader, err := inspectResponse(ct, br, 0) |
|
| 16 |
- if err == nil {
|
|
| 17 |
- t.Fatalf("Should have generated an error for an empty response")
|
|
| 18 |
- } |
|
| 19 |
- if contentType != "application/octet-stream" {
|
|
| 20 |
- t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType)
|
|
| 21 |
- } |
|
| 22 |
- body, err := ioutil.ReadAll(bReader) |
|
| 23 |
- if err != nil {
|
|
| 24 |
- t.Fatal(err) |
|
| 25 |
- } |
|
| 26 |
- if len(body) != 0 {
|
|
| 27 |
- t.Fatal("response body should remain empty")
|
|
| 28 |
- } |
|
| 29 |
-} |
|
| 30 |
- |
|
| 31 |
-func TestInspectResponseBinary(t *testing.T) {
|
|
| 32 |
- ct := "application/octet-stream" |
|
| 33 |
- br := ioutil.NopCloser(bytes.NewReader(binaryContext)) |
|
| 34 |
- contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) |
|
| 35 |
- if err != nil {
|
|
| 36 |
- t.Fatal(err) |
|
| 37 |
- } |
|
| 38 |
- if contentType != "application/octet-stream" {
|
|
| 39 |
- t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType)
|
|
| 40 |
- } |
|
| 41 |
- body, err := ioutil.ReadAll(bReader) |
|
| 42 |
- if err != nil {
|
|
| 43 |
- t.Fatal(err) |
|
| 44 |
- } |
|
| 45 |
- if len(body) != len(binaryContext) {
|
|
| 46 |
- t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body))
|
|
| 47 |
- } |
|
| 48 |
- for i := range body {
|
|
| 49 |
- if body[i] != binaryContext[i] {
|
|
| 50 |
- t.Fatalf("Corrupted response body at byte index %d", i)
|
|
| 51 |
- } |
|
| 52 |
- } |
|
| 53 |
-} |
|
| 54 |
- |
|
| 55 |
-func TestResponseUnsupportedContentType(t *testing.T) {
|
|
| 56 |
- content := []byte(textPlainDockerfile) |
|
| 57 |
- ct := "application/json" |
|
| 58 |
- br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 59 |
- contentType, bReader, err := inspectResponse(ct, br, int64(len(textPlainDockerfile))) |
|
| 60 |
- |
|
| 61 |
- if err == nil {
|
|
| 62 |
- t.Fatal("Should have returned an error on content-type 'application/json'")
|
|
| 63 |
- } |
|
| 64 |
- if contentType != ct {
|
|
| 65 |
- t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType)
|
|
| 66 |
- } |
|
| 67 |
- body, err := ioutil.ReadAll(bReader) |
|
| 68 |
- if err != nil {
|
|
| 69 |
- t.Fatal(err) |
|
| 70 |
- } |
|
| 71 |
- if string(body) != textPlainDockerfile {
|
|
| 72 |
- t.Fatalf("Corrupted response body %s", body)
|
|
| 73 |
- } |
|
| 74 |
-} |
|
| 75 |
- |
|
| 76 |
-func TestInspectResponseTextSimple(t *testing.T) {
|
|
| 77 |
- content := []byte(textPlainDockerfile) |
|
| 78 |
- ct := "text/plain" |
|
| 79 |
- br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 80 |
- contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) |
|
| 81 |
- if err != nil {
|
|
| 82 |
- t.Fatal(err) |
|
| 83 |
- } |
|
| 84 |
- if contentType != "text/plain" {
|
|
| 85 |
- t.Fatalf("Content type should be 'text/plain' but is %q", contentType)
|
|
| 86 |
- } |
|
| 87 |
- body, err := ioutil.ReadAll(bReader) |
|
| 88 |
- if err != nil {
|
|
| 89 |
- t.Fatal(err) |
|
| 90 |
- } |
|
| 91 |
- if string(body) != textPlainDockerfile {
|
|
| 92 |
- t.Fatalf("Corrupted response body %s", body)
|
|
| 93 |
- } |
|
| 94 |
-} |
|
| 95 |
- |
|
| 96 |
-func TestInspectResponseEmptyContentType(t *testing.T) {
|
|
| 97 |
- content := []byte(textPlainDockerfile) |
|
| 98 |
- br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 99 |
- contentType, bodyReader, err := inspectResponse("", br, int64(len(content)))
|
|
| 100 |
- if err != nil {
|
|
| 101 |
- t.Fatal(err) |
|
| 102 |
- } |
|
| 103 |
- if contentType != "text/plain" {
|
|
| 104 |
- t.Fatalf("Content type should be 'text/plain' but is %q", contentType)
|
|
| 105 |
- } |
|
| 106 |
- body, err := ioutil.ReadAll(bodyReader) |
|
| 107 |
- if err != nil {
|
|
| 108 |
- t.Fatal(err) |
|
| 109 |
- } |
|
| 110 |
- if string(body) != textPlainDockerfile {
|
|
| 111 |
- t.Fatalf("Corrupted response body %s", body)
|
|
| 112 |
- } |
|
| 113 |
-} |
| 114 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,32 +0,0 @@ |
| 1 |
-package main |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "os" |
|
| 6 |
- |
|
| 7 |
- "github.com/docker/docker/builder/parser" |
|
| 8 |
-) |
|
| 9 |
- |
|
| 10 |
-func main() {
|
|
| 11 |
- var f *os.File |
|
| 12 |
- var err error |
|
| 13 |
- |
|
| 14 |
- if len(os.Args) < 2 {
|
|
| 15 |
- fmt.Println("please supply filename(s)")
|
|
| 16 |
- os.Exit(1) |
|
| 17 |
- } |
|
| 18 |
- |
|
| 19 |
- for _, fn := range os.Args[1:] {
|
|
| 20 |
- f, err = os.Open(fn) |
|
| 21 |
- if err != nil {
|
|
| 22 |
- panic(err) |
|
| 23 |
- } |
|
| 24 |
- |
|
| 25 |
- ast, err := parser.Parse(f) |
|
| 26 |
- if err != nil {
|
|
| 27 |
- panic(err) |
|
| 28 |
- } else {
|
|
| 29 |
- fmt.Println(ast.Dump()) |
|
| 30 |
- } |
|
| 31 |
- } |
|
| 32 |
-} |
| 33 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,55 +0,0 @@ |
| 1 |
-package parser |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "testing" |
|
| 5 |
-) |
|
| 6 |
- |
|
| 7 |
-var invalidJSONArraysOfStrings = []string{
|
|
| 8 |
- `["a",42,"b"]`, |
|
| 9 |
- `["a",123.456,"b"]`, |
|
| 10 |
- `["a",{},"b"]`,
|
|
| 11 |
- `["a",{"c": "d"},"b"]`,
|
|
| 12 |
- `["a",["c"],"b"]`, |
|
| 13 |
- `["a",true,"b"]`, |
|
| 14 |
- `["a",false,"b"]`, |
|
| 15 |
- `["a",null,"b"]`, |
|
| 16 |
-} |
|
| 17 |
- |
|
| 18 |
-var validJSONArraysOfStrings = map[string][]string{
|
|
| 19 |
- `[]`: {},
|
|
| 20 |
- `[""]`: {""},
|
|
| 21 |
- `["a"]`: {"a"},
|
|
| 22 |
- `["a","b"]`: {"a", "b"},
|
|
| 23 |
- `[ "a", "b" ]`: {"a", "b"},
|
|
| 24 |
- `[ "a", "b" ]`: {"a", "b"},
|
|
| 25 |
- ` [ "a", "b" ] `: {"a", "b"},
|
|
| 26 |
- `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"},
|
|
| 27 |
-} |
|
| 28 |
- |
|
| 29 |
-func TestJSONArraysOfStrings(t *testing.T) {
|
|
| 30 |
- for json, expected := range validJSONArraysOfStrings {
|
|
| 31 |
- if node, _, err := parseJSON(json); err != nil {
|
|
| 32 |
- t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err)
|
|
| 33 |
- } else {
|
|
| 34 |
- i := 0 |
|
| 35 |
- for node != nil {
|
|
| 36 |
- if i >= len(expected) {
|
|
| 37 |
- t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json)
|
|
| 38 |
- } |
|
| 39 |
- if node.Value != expected[i] {
|
|
| 40 |
- t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i)
|
|
| 41 |
- } |
|
| 42 |
- node = node.Next |
|
| 43 |
- i++ |
|
| 44 |
- } |
|
| 45 |
- if i != len(expected) {
|
|
| 46 |
- t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json)
|
|
| 47 |
- } |
|
| 48 |
- } |
|
| 49 |
- } |
|
| 50 |
- for _, json := range invalidJSONArraysOfStrings {
|
|
| 51 |
- if _, _, err := parseJSON(json); err != errDockerfileNotStringArray {
|
|
| 52 |
- t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json)
|
|
| 53 |
- } |
|
| 54 |
- } |
|
| 55 |
-} |
| 56 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,331 +0,0 @@ |
| 1 |
-package parser |
|
| 2 |
- |
|
| 3 |
-// line parsers are dispatch calls that parse a single unit of text into a |
|
| 4 |
-// Node object which contains the whole statement. Dockerfiles have varied |
|
| 5 |
-// (but not usually unique, see ONBUILD for a unique example) parsing rules |
|
| 6 |
-// per-command, and these unify the processing in a way that makes it |
|
| 7 |
-// manageable. |
|
| 8 |
- |
|
| 9 |
-import ( |
|
| 10 |
- "encoding/json" |
|
| 11 |
- "errors" |
|
| 12 |
- "fmt" |
|
| 13 |
- "strings" |
|
| 14 |
- "unicode" |
|
| 15 |
-) |
|
| 16 |
- |
|
| 17 |
-var ( |
|
| 18 |
- errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.")
|
|
| 19 |
-) |
|
| 20 |
- |
|
| 21 |
-// ignore the current argument. This will still leave a command parsed, but |
|
| 22 |
-// will not incorporate the arguments into the ast. |
|
| 23 |
-func parseIgnore(rest string) (*Node, map[string]bool, error) {
|
|
| 24 |
- return &Node{}, nil, nil
|
|
| 25 |
-} |
|
| 26 |
- |
|
| 27 |
-// used for onbuild. Could potentially be used for anything that represents a |
|
| 28 |
-// statement with sub-statements. |
|
| 29 |
-// |
|
| 30 |
-// ONBUILD RUN foo bar -> (onbuild (run foo bar)) |
|
| 31 |
-// |
|
| 32 |
-func parseSubCommand(rest string) (*Node, map[string]bool, error) {
|
|
| 33 |
- if rest == "" {
|
|
| 34 |
- return nil, nil, nil |
|
| 35 |
- } |
|
| 36 |
- |
|
| 37 |
- _, child, err := parseLine(rest) |
|
| 38 |
- if err != nil {
|
|
| 39 |
- return nil, nil, err |
|
| 40 |
- } |
|
| 41 |
- |
|
| 42 |
- return &Node{Children: []*Node{child}}, nil, nil
|
|
| 43 |
-} |
|
| 44 |
- |
|
| 45 |
-// helper to parse words (i.e space delimited or quoted strings) in a statement. |
|
| 46 |
-// The quotes are preserved as part of this function and they are stripped later |
|
| 47 |
-// as part of processWords(). |
|
| 48 |
-func parseWords(rest string) []string {
|
|
| 49 |
- const ( |
|
| 50 |
- inSpaces = iota // looking for start of a word |
|
| 51 |
- inWord |
|
| 52 |
- inQuote |
|
| 53 |
- ) |
|
| 54 |
- |
|
| 55 |
- words := []string{}
|
|
| 56 |
- phase := inSpaces |
|
| 57 |
- word := "" |
|
| 58 |
- quote := '\000' |
|
| 59 |
- blankOK := false |
|
| 60 |
- var ch rune |
|
| 61 |
- |
|
| 62 |
- for pos := 0; pos <= len(rest); pos++ {
|
|
| 63 |
- if pos != len(rest) {
|
|
| 64 |
- ch = rune(rest[pos]) |
|
| 65 |
- } |
|
| 66 |
- |
|
| 67 |
- if phase == inSpaces { // Looking for start of word
|
|
| 68 |
- if pos == len(rest) { // end of input
|
|
| 69 |
- break |
|
| 70 |
- } |
|
| 71 |
- if unicode.IsSpace(ch) { // skip spaces
|
|
| 72 |
- continue |
|
| 73 |
- } |
|
| 74 |
- phase = inWord // found it, fall thru |
|
| 75 |
- } |
|
| 76 |
- if (phase == inWord || phase == inQuote) && (pos == len(rest)) {
|
|
| 77 |
- if blankOK || len(word) > 0 {
|
|
| 78 |
- words = append(words, word) |
|
| 79 |
- } |
|
| 80 |
- break |
|
| 81 |
- } |
|
| 82 |
- if phase == inWord {
|
|
| 83 |
- if unicode.IsSpace(ch) {
|
|
| 84 |
- phase = inSpaces |
|
| 85 |
- if blankOK || len(word) > 0 {
|
|
| 86 |
- words = append(words, word) |
|
| 87 |
- } |
|
| 88 |
- word = "" |
|
| 89 |
- blankOK = false |
|
| 90 |
- continue |
|
| 91 |
- } |
|
| 92 |
- if ch == '\'' || ch == '"' {
|
|
| 93 |
- quote = ch |
|
| 94 |
- blankOK = true |
|
| 95 |
- phase = inQuote |
|
| 96 |
- } |
|
| 97 |
- if ch == '\\' {
|
|
| 98 |
- if pos+1 == len(rest) {
|
|
| 99 |
- continue // just skip \ at end |
|
| 100 |
- } |
|
| 101 |
- // If we're not quoted and we see a \, then always just |
|
| 102 |
- // add \ plus the char to the word, even if the char |
|
| 103 |
- // is a quote. |
|
| 104 |
- word += string(ch) |
|
| 105 |
- pos++ |
|
| 106 |
- ch = rune(rest[pos]) |
|
| 107 |
- } |
|
| 108 |
- word += string(ch) |
|
| 109 |
- continue |
|
| 110 |
- } |
|
| 111 |
- if phase == inQuote {
|
|
| 112 |
- if ch == quote {
|
|
| 113 |
- phase = inWord |
|
| 114 |
- } |
|
| 115 |
- // \ is special except for ' quotes - can't escape anything for ' |
|
| 116 |
- if ch == '\\' && quote != '\'' {
|
|
| 117 |
- if pos+1 == len(rest) {
|
|
| 118 |
- phase = inWord |
|
| 119 |
- continue // just skip \ at end |
|
| 120 |
- } |
|
| 121 |
- pos++ |
|
| 122 |
- nextCh := rune(rest[pos]) |
|
| 123 |
- word += string(ch) |
|
| 124 |
- ch = nextCh |
|
| 125 |
- } |
|
| 126 |
- word += string(ch) |
|
| 127 |
- } |
|
| 128 |
- } |
|
| 129 |
- |
|
| 130 |
- return words |
|
| 131 |
-} |
|
| 132 |
- |
|
| 133 |
-// parse environment like statements. Note that this does *not* handle |
|
| 134 |
-// variable interpolation, which will be handled in the evaluator. |
|
| 135 |
-func parseNameVal(rest string, key string) (*Node, map[string]bool, error) {
|
|
| 136 |
- // This is kind of tricky because we need to support the old |
|
| 137 |
- // variant: KEY name value |
|
| 138 |
- // as well as the new one: KEY name=value ... |
|
| 139 |
- // The trigger to know which one is being used will be whether we hit |
|
| 140 |
- // a space or = first. space ==> old, "=" ==> new |
|
| 141 |
- |
|
| 142 |
- words := parseWords(rest) |
|
| 143 |
- if len(words) == 0 {
|
|
| 144 |
- return nil, nil, nil |
|
| 145 |
- } |
|
| 146 |
- |
|
| 147 |
- var rootnode *Node |
|
| 148 |
- |
|
| 149 |
- // Old format (KEY name value) |
|
| 150 |
- if !strings.Contains(words[0], "=") {
|
|
| 151 |
- node := &Node{}
|
|
| 152 |
- rootnode = node |
|
| 153 |
- strs := tokenWhitespace.Split(rest, 2) |
|
| 154 |
- |
|
| 155 |
- if len(strs) < 2 {
|
|
| 156 |
- return nil, nil, fmt.Errorf(key + " must have two arguments") |
|
| 157 |
- } |
|
| 158 |
- |
|
| 159 |
- node.Value = strs[0] |
|
| 160 |
- node.Next = &Node{}
|
|
| 161 |
- node.Next.Value = strs[1] |
|
| 162 |
- } else {
|
|
| 163 |
- var prevNode *Node |
|
| 164 |
- for i, word := range words {
|
|
| 165 |
- if !strings.Contains(word, "=") {
|
|
| 166 |
- return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word)
|
|
| 167 |
- } |
|
| 168 |
- parts := strings.SplitN(word, "=", 2) |
|
| 169 |
- |
|
| 170 |
- name := &Node{}
|
|
| 171 |
- value := &Node{}
|
|
| 172 |
- |
|
| 173 |
- name.Next = value |
|
| 174 |
- name.Value = parts[0] |
|
| 175 |
- value.Value = parts[1] |
|
| 176 |
- |
|
| 177 |
- if i == 0 {
|
|
| 178 |
- rootnode = name |
|
| 179 |
- } else {
|
|
| 180 |
- prevNode.Next = name |
|
| 181 |
- } |
|
| 182 |
- prevNode = value |
|
| 183 |
- } |
|
| 184 |
- } |
|
| 185 |
- |
|
| 186 |
- return rootnode, nil, nil |
|
| 187 |
-} |
|
| 188 |
- |
|
| 189 |
-func parseEnv(rest string) (*Node, map[string]bool, error) {
|
|
| 190 |
- return parseNameVal(rest, "ENV") |
|
| 191 |
-} |
|
| 192 |
- |
|
| 193 |
-func parseLabel(rest string) (*Node, map[string]bool, error) {
|
|
| 194 |
- return parseNameVal(rest, "LABEL") |
|
| 195 |
-} |
|
| 196 |
- |
|
| 197 |
-// parses a statement containing one or more keyword definition(s) and/or |
|
| 198 |
-// value assignments, like `name1 name2= name3="" name4=value`. |
|
| 199 |
-// Note that this is a stricter format than the old format of assignment, |
|
| 200 |
-// allowed by parseNameVal(), in a way that this only allows assignment of the |
|
| 201 |
-// form `keyword=[<value>]` like `name2=`, `name3=""`, and `name4=value` above. |
|
| 202 |
-// In addition, a keyword definition alone is of the form `keyword` like `name1` |
|
| 203 |
-// above. And the assignments `name2=` and `name3=""` are equivalent and |
|
| 204 |
-// assign an empty value to the respective keywords. |
|
| 205 |
-func parseNameOrNameVal(rest string) (*Node, map[string]bool, error) {
|
|
| 206 |
- words := parseWords(rest) |
|
| 207 |
- if len(words) == 0 {
|
|
| 208 |
- return nil, nil, nil |
|
| 209 |
- } |
|
| 210 |
- |
|
| 211 |
- var ( |
|
| 212 |
- rootnode *Node |
|
| 213 |
- prevNode *Node |
|
| 214 |
- ) |
|
| 215 |
- for i, word := range words {
|
|
| 216 |
- node := &Node{}
|
|
| 217 |
- node.Value = word |
|
| 218 |
- if i == 0 {
|
|
| 219 |
- rootnode = node |
|
| 220 |
- } else {
|
|
| 221 |
- prevNode.Next = node |
|
| 222 |
- } |
|
| 223 |
- prevNode = node |
|
| 224 |
- } |
|
| 225 |
- |
|
| 226 |
- return rootnode, nil, nil |
|
| 227 |
-} |
|
| 228 |
- |
|
| 229 |
-// parses a whitespace-delimited set of arguments. The result is effectively a |
|
| 230 |
-// linked list of string arguments. |
|
| 231 |
-func parseStringsWhitespaceDelimited(rest string) (*Node, map[string]bool, error) {
|
|
| 232 |
- if rest == "" {
|
|
| 233 |
- return nil, nil, nil |
|
| 234 |
- } |
|
| 235 |
- |
|
| 236 |
- node := &Node{}
|
|
| 237 |
- rootnode := node |
|
| 238 |
- prevnode := node |
|
| 239 |
- for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp
|
|
| 240 |
- prevnode = node |
|
| 241 |
- node.Value = str |
|
| 242 |
- node.Next = &Node{}
|
|
| 243 |
- node = node.Next |
|
| 244 |
- } |
|
| 245 |
- |
|
| 246 |
- // XXX to get around regexp.Split *always* providing an empty string at the |
|
| 247 |
- // end due to how our loop is constructed, nil out the last node in the |
|
| 248 |
- // chain. |
|
| 249 |
- prevnode.Next = nil |
|
| 250 |
- |
|
| 251 |
- return rootnode, nil, nil |
|
| 252 |
-} |
|
| 253 |
- |
|
| 254 |
-// parsestring just wraps the string in quotes and returns a working node. |
|
| 255 |
-func parseString(rest string) (*Node, map[string]bool, error) {
|
|
| 256 |
- if rest == "" {
|
|
| 257 |
- return nil, nil, nil |
|
| 258 |
- } |
|
| 259 |
- n := &Node{}
|
|
| 260 |
- n.Value = rest |
|
| 261 |
- return n, nil, nil |
|
| 262 |
-} |
|
| 263 |
- |
|
| 264 |
-// parseJSON converts JSON arrays to an AST. |
|
| 265 |
-func parseJSON(rest string) (*Node, map[string]bool, error) {
|
|
| 266 |
- rest = strings.TrimLeftFunc(rest, unicode.IsSpace) |
|
| 267 |
- if !strings.HasPrefix(rest, "[") {
|
|
| 268 |
- return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) |
|
| 269 |
- } |
|
| 270 |
- |
|
| 271 |
- var myJSON []interface{}
|
|
| 272 |
- if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil {
|
|
| 273 |
- return nil, nil, err |
|
| 274 |
- } |
|
| 275 |
- |
|
| 276 |
- var top, prev *Node |
|
| 277 |
- for _, str := range myJSON {
|
|
| 278 |
- s, ok := str.(string) |
|
| 279 |
- if !ok {
|
|
| 280 |
- return nil, nil, errDockerfileNotStringArray |
|
| 281 |
- } |
|
| 282 |
- |
|
| 283 |
- node := &Node{Value: s}
|
|
| 284 |
- if prev == nil {
|
|
| 285 |
- top = node |
|
| 286 |
- } else {
|
|
| 287 |
- prev.Next = node |
|
| 288 |
- } |
|
| 289 |
- prev = node |
|
| 290 |
- } |
|
| 291 |
- |
|
| 292 |
- return top, map[string]bool{"json": true}, nil
|
|
| 293 |
-} |
|
| 294 |
- |
|
| 295 |
-// parseMaybeJSON determines if the argument appears to be a JSON array. If |
|
| 296 |
-// so, passes to parseJSON; if not, quotes the result and returns a single |
|
| 297 |
-// node. |
|
| 298 |
-func parseMaybeJSON(rest string) (*Node, map[string]bool, error) {
|
|
| 299 |
- if rest == "" {
|
|
| 300 |
- return nil, nil, nil |
|
| 301 |
- } |
|
| 302 |
- |
|
| 303 |
- node, attrs, err := parseJSON(rest) |
|
| 304 |
- |
|
| 305 |
- if err == nil {
|
|
| 306 |
- return node, attrs, nil |
|
| 307 |
- } |
|
| 308 |
- if err == errDockerfileNotStringArray {
|
|
| 309 |
- return nil, nil, err |
|
| 310 |
- } |
|
| 311 |
- |
|
| 312 |
- node = &Node{}
|
|
| 313 |
- node.Value = rest |
|
| 314 |
- return node, nil, nil |
|
| 315 |
-} |
|
| 316 |
- |
|
| 317 |
-// parseMaybeJSONToList determines if the argument appears to be a JSON array. If |
|
| 318 |
-// so, passes to parseJSON; if not, attempts to parse it as a whitespace |
|
| 319 |
-// delimited string. |
|
| 320 |
-func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) {
|
|
| 321 |
- node, attrs, err := parseJSON(rest) |
|
| 322 |
- |
|
| 323 |
- if err == nil {
|
|
| 324 |
- return node, attrs, nil |
|
| 325 |
- } |
|
| 326 |
- if err == errDockerfileNotStringArray {
|
|
| 327 |
- return nil, nil, err |
|
| 328 |
- } |
|
| 329 |
- |
|
| 330 |
- return parseStringsWhitespaceDelimited(rest) |
|
| 331 |
-} |
| 332 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,145 +0,0 @@ |
| 1 |
-// Package parser implements a parser and parse tree dumper for Dockerfiles. |
|
| 2 |
-package parser |
|
| 3 |
- |
|
| 4 |
-import ( |
|
| 5 |
- "bufio" |
|
| 6 |
- "io" |
|
| 7 |
- "regexp" |
|
| 8 |
- "strings" |
|
| 9 |
- "unicode" |
|
| 10 |
- |
|
| 11 |
- "github.com/docker/docker/builder/command" |
|
| 12 |
-) |
|
| 13 |
- |
|
| 14 |
-// Node is a structure used to represent a parse tree. |
|
| 15 |
-// |
|
| 16 |
-// In the node there are three fields, Value, Next, and Children. Value is the |
|
| 17 |
-// current token's string value. Next is always the next non-child token, and |
|
| 18 |
-// children contains all the children. Here's an example: |
|
| 19 |
-// |
|
| 20 |
-// (value next (child child-next child-next-next) next-next) |
|
| 21 |
-// |
|
| 22 |
-// This data structure is frankly pretty lousy for handling complex languages, |
|
| 23 |
-// but lucky for us the Dockerfile isn't very complicated. This structure |
|
| 24 |
-// works a little more effectively than a "proper" parse tree for our needs. |
|
| 25 |
-// |
|
| 26 |
-type Node struct {
|
|
| 27 |
- Value string // actual content |
|
| 28 |
- Next *Node // the next item in the current sexp |
|
| 29 |
- Children []*Node // the children of this sexp |
|
| 30 |
- Attributes map[string]bool // special attributes for this node |
|
| 31 |
- Original string // original line used before parsing |
|
| 32 |
- Flags []string // only top Node should have this set |
|
| 33 |
-} |
|
| 34 |
- |
|
| 35 |
-var ( |
|
| 36 |
- dispatch map[string]func(string) (*Node, map[string]bool, error) |
|
| 37 |
- tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) |
|
| 38 |
- tokenLineContinuation = regexp.MustCompile(`\\[ \t]*$`) |
|
| 39 |
- tokenComment = regexp.MustCompile(`^#.*$`) |
|
| 40 |
-) |
|
| 41 |
- |
|
| 42 |
-func init() {
|
|
| 43 |
- // Dispatch Table. see line_parsers.go for the parse functions. |
|
| 44 |
- // The command is parsed and mapped to the line parser. The line parser |
|
| 45 |
- // receives the arguments but not the command, and returns an AST after |
|
| 46 |
- // reformulating the arguments according to the rules in the parser |
|
| 47 |
- // functions. Errors are propagated up by Parse() and the resulting AST can |
|
| 48 |
- // be incorporated directly into the existing AST as a next. |
|
| 49 |
- dispatch = map[string]func(string) (*Node, map[string]bool, error){
|
|
| 50 |
- command.User: parseString, |
|
| 51 |
- command.Onbuild: parseSubCommand, |
|
| 52 |
- command.Workdir: parseString, |
|
| 53 |
- command.Env: parseEnv, |
|
| 54 |
- command.Label: parseLabel, |
|
| 55 |
- command.Maintainer: parseString, |
|
| 56 |
- command.From: parseString, |
|
| 57 |
- command.Add: parseMaybeJSONToList, |
|
| 58 |
- command.Copy: parseMaybeJSONToList, |
|
| 59 |
- command.Run: parseMaybeJSON, |
|
| 60 |
- command.Cmd: parseMaybeJSON, |
|
| 61 |
- command.Entrypoint: parseMaybeJSON, |
|
| 62 |
- command.Expose: parseStringsWhitespaceDelimited, |
|
| 63 |
- command.Volume: parseMaybeJSONToList, |
|
| 64 |
- command.StopSignal: parseString, |
|
| 65 |
- command.Arg: parseNameOrNameVal, |
|
| 66 |
- } |
|
| 67 |
-} |
|
| 68 |
- |
|
| 69 |
-// parse a line and return the remainder. |
|
| 70 |
-func parseLine(line string) (string, *Node, error) {
|
|
| 71 |
- if line = stripComments(line); line == "" {
|
|
| 72 |
- return "", nil, nil |
|
| 73 |
- } |
|
| 74 |
- |
|
| 75 |
- if tokenLineContinuation.MatchString(line) {
|
|
| 76 |
- line = tokenLineContinuation.ReplaceAllString(line, "") |
|
| 77 |
- return line, nil, nil |
|
| 78 |
- } |
|
| 79 |
- |
|
| 80 |
- cmd, flags, args, err := splitCommand(line) |
|
| 81 |
- if err != nil {
|
|
| 82 |
- return "", nil, err |
|
| 83 |
- } |
|
| 84 |
- |
|
| 85 |
- node := &Node{}
|
|
| 86 |
- node.Value = cmd |
|
| 87 |
- |
|
| 88 |
- sexp, attrs, err := fullDispatch(cmd, args) |
|
| 89 |
- if err != nil {
|
|
| 90 |
- return "", nil, err |
|
| 91 |
- } |
|
| 92 |
- |
|
| 93 |
- node.Next = sexp |
|
| 94 |
- node.Attributes = attrs |
|
| 95 |
- node.Original = line |
|
| 96 |
- node.Flags = flags |
|
| 97 |
- |
|
| 98 |
- return "", node, nil |
|
| 99 |
-} |
|
| 100 |
- |
|
| 101 |
-// Parse is the main parse routine. |
|
| 102 |
-// It handles an io.ReadWriteCloser and returns the root of the AST. |
|
| 103 |
-func Parse(rwc io.Reader) (*Node, error) {
|
|
| 104 |
- root := &Node{}
|
|
| 105 |
- scanner := bufio.NewScanner(rwc) |
|
| 106 |
- |
|
| 107 |
- for scanner.Scan() {
|
|
| 108 |
- scannedLine := strings.TrimLeftFunc(scanner.Text(), unicode.IsSpace) |
|
| 109 |
- line, child, err := parseLine(scannedLine) |
|
| 110 |
- if err != nil {
|
|
| 111 |
- return nil, err |
|
| 112 |
- } |
|
| 113 |
- |
|
| 114 |
- if line != "" && child == nil {
|
|
| 115 |
- for scanner.Scan() {
|
|
| 116 |
- newline := scanner.Text() |
|
| 117 |
- |
|
| 118 |
- if stripComments(strings.TrimSpace(newline)) == "" {
|
|
| 119 |
- continue |
|
| 120 |
- } |
|
| 121 |
- |
|
| 122 |
- line, child, err = parseLine(line + newline) |
|
| 123 |
- if err != nil {
|
|
| 124 |
- return nil, err |
|
| 125 |
- } |
|
| 126 |
- |
|
| 127 |
- if child != nil {
|
|
| 128 |
- break |
|
| 129 |
- } |
|
| 130 |
- } |
|
| 131 |
- if child == nil && line != "" {
|
|
| 132 |
- line, child, err = parseLine(line) |
|
| 133 |
- if err != nil {
|
|
| 134 |
- return nil, err |
|
| 135 |
- } |
|
| 136 |
- } |
|
| 137 |
- } |
|
| 138 |
- |
|
| 139 |
- if child != nil {
|
|
| 140 |
- root.Children = append(root.Children, child) |
|
| 141 |
- } |
|
| 142 |
- } |
|
| 143 |
- |
|
| 144 |
- return root, nil |
|
| 145 |
-} |
| 146 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,112 +0,0 @@ |
| 1 |
-package parser |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "io/ioutil" |
|
| 6 |
- "os" |
|
| 7 |
- "path/filepath" |
|
| 8 |
- "testing" |
|
| 9 |
-) |
|
| 10 |
- |
|
| 11 |
-const testDir = "testfiles" |
|
| 12 |
-const negativeTestDir = "testfiles-negative" |
|
| 13 |
- |
|
| 14 |
-func getDirs(t *testing.T, dir string) []string {
|
|
| 15 |
- f, err := os.Open(dir) |
|
| 16 |
- if err != nil {
|
|
| 17 |
- t.Fatal(err) |
|
| 18 |
- } |
|
| 19 |
- |
|
| 20 |
- defer f.Close() |
|
| 21 |
- |
|
| 22 |
- dirs, err := f.Readdirnames(0) |
|
| 23 |
- if err != nil {
|
|
| 24 |
- t.Fatal(err) |
|
| 25 |
- } |
|
| 26 |
- |
|
| 27 |
- return dirs |
|
| 28 |
-} |
|
| 29 |
- |
|
| 30 |
-func TestTestNegative(t *testing.T) {
|
|
| 31 |
- for _, dir := range getDirs(t, negativeTestDir) {
|
|
| 32 |
- dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") |
|
| 33 |
- |
|
| 34 |
- df, err := os.Open(dockerfile) |
|
| 35 |
- if err != nil {
|
|
| 36 |
- t.Fatalf("Dockerfile missing for %s: %v", dir, err)
|
|
| 37 |
- } |
|
| 38 |
- |
|
| 39 |
- _, err = Parse(df) |
|
| 40 |
- if err == nil {
|
|
| 41 |
- t.Fatalf("No error parsing broken dockerfile for %s", dir)
|
|
| 42 |
- } |
|
| 43 |
- |
|
| 44 |
- df.Close() |
|
| 45 |
- } |
|
| 46 |
-} |
|
| 47 |
- |
|
| 48 |
-func TestTestData(t *testing.T) {
|
|
| 49 |
- for _, dir := range getDirs(t, testDir) {
|
|
| 50 |
- dockerfile := filepath.Join(testDir, dir, "Dockerfile") |
|
| 51 |
- resultfile := filepath.Join(testDir, dir, "result") |
|
| 52 |
- |
|
| 53 |
- df, err := os.Open(dockerfile) |
|
| 54 |
- if err != nil {
|
|
| 55 |
- t.Fatalf("Dockerfile missing for %s: %v", dir, err)
|
|
| 56 |
- } |
|
| 57 |
- defer df.Close() |
|
| 58 |
- |
|
| 59 |
- ast, err := Parse(df) |
|
| 60 |
- if err != nil {
|
|
| 61 |
- t.Fatalf("Error parsing %s's dockerfile: %v", dir, err)
|
|
| 62 |
- } |
|
| 63 |
- |
|
| 64 |
- content, err := ioutil.ReadFile(resultfile) |
|
| 65 |
- if err != nil {
|
|
| 66 |
- t.Fatalf("Error reading %s's result file: %v", dir, err)
|
|
| 67 |
- } |
|
| 68 |
- |
|
| 69 |
- if ast.Dump()+"\n" != string(content) {
|
|
| 70 |
- fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) |
|
| 71 |
- fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) |
|
| 72 |
- t.Fatalf("%s: AST dump of dockerfile does not match result", dir)
|
|
| 73 |
- } |
|
| 74 |
- } |
|
| 75 |
-} |
|
| 76 |
- |
|
| 77 |
-func TestParseWords(t *testing.T) {
|
|
| 78 |
- tests := []map[string][]string{
|
|
| 79 |
- {
|
|
| 80 |
- "input": {"foo"},
|
|
| 81 |
- "expect": {"foo"},
|
|
| 82 |
- }, |
|
| 83 |
- {
|
|
| 84 |
- "input": {"foo bar"},
|
|
| 85 |
- "expect": {"foo", "bar"},
|
|
| 86 |
- }, |
|
| 87 |
- {
|
|
| 88 |
- "input": {"foo=bar"},
|
|
| 89 |
- "expect": {"foo=bar"},
|
|
| 90 |
- }, |
|
| 91 |
- {
|
|
| 92 |
- "input": {"foo bar 'abc xyz'"},
|
|
| 93 |
- "expect": {"foo", "bar", "'abc xyz'"},
|
|
| 94 |
- }, |
|
| 95 |
- {
|
|
| 96 |
- "input": {`foo bar "abc xyz"`},
|
|
| 97 |
- "expect": {"foo", "bar", `"abc xyz"`},
|
|
| 98 |
- }, |
|
| 99 |
- } |
|
| 100 |
- |
|
| 101 |
- for _, test := range tests {
|
|
| 102 |
- words := parseWords(test["input"][0]) |
|
| 103 |
- if len(words) != len(test["expect"]) {
|
|
| 104 |
- t.Fatalf("length check failed. input: %v, expect: %v, output: %v", test["input"][0], test["expect"], words)
|
|
| 105 |
- } |
|
| 106 |
- for i, word := range words {
|
|
| 107 |
- if word != test["expect"][i] {
|
|
| 108 |
- t.Fatalf("word check failed for word: %q. input: %v, expect: %v, output: %v", word, test["input"][0], test["expect"], words)
|
|
| 109 |
- } |
|
| 110 |
- } |
|
| 111 |
- } |
|
| 112 |
-} |
| 2 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,11 +0,0 @@ |
| 1 |
-FROM ubuntu:14.04 |
|
| 2 |
-MAINTAINER Seongyeol Lim <seongyeol37@gmail.com> |
|
| 3 |
- |
|
| 4 |
-COPY . /go/src/github.com/docker/docker |
|
| 5 |
-ADD . / |
|
| 6 |
-ADD null / |
|
| 7 |
-COPY nullfile /tmp |
|
| 8 |
-ADD [ "vimrc", "/tmp" ] |
|
| 9 |
-COPY [ "bashrc", "/tmp" ] |
|
| 10 |
-COPY [ "test file", "/tmp" ] |
|
| 11 |
-ADD [ "test file", "/tmp/test file" ] |
| 12 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,10 +0,0 @@ |
| 1 |
-(from "ubuntu:14.04") |
|
| 2 |
-(maintainer "Seongyeol Lim <seongyeol37@gmail.com>") |
|
| 3 |
-(copy "." "/go/src/github.com/docker/docker") |
|
| 4 |
-(add "." "/") |
|
| 5 |
-(add "null" "/") |
|
| 6 |
-(copy "nullfile" "/tmp") |
|
| 7 |
-(add "vimrc" "/tmp") |
|
| 8 |
-(copy "bashrc" "/tmp") |
|
| 9 |
-(copy "test file" "/tmp") |
|
| 10 |
-(add "test file" "/tmp/test file") |
| 11 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,25 +0,0 @@ |
| 1 |
-FROM brimstone/ubuntu:14.04 |
|
| 2 |
- |
|
| 3 |
-MAINTAINER brimstone@the.narro.ws |
|
| 4 |
- |
|
| 5 |
-# TORUN -v /var/run/docker.sock:/var/run/docker.sock |
|
| 6 |
- |
|
| 7 |
-ENV GOPATH /go |
|
| 8 |
- |
|
| 9 |
-# Set our command |
|
| 10 |
-ENTRYPOINT ["/usr/local/bin/consuldock"] |
|
| 11 |
- |
|
| 12 |
-# Install the packages we need, clean up after them and us |
|
| 13 |
-RUN apt-get update \ |
|
| 14 |
- && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
|
| 15 |
- && apt-get install -y --no-install-recommends git golang ca-certificates \ |
|
| 16 |
- && apt-get clean \ |
|
| 17 |
- && rm -rf /var/lib/apt/lists \ |
|
| 18 |
- |
|
| 19 |
- && go get -v github.com/brimstone/consuldock \ |
|
| 20 |
- && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ |
|
| 21 |
- |
|
| 22 |
- && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
|
| 23 |
- && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
|
| 24 |
- && rm /tmp/dpkg.* \ |
|
| 25 |
- && rm -rf $GOPATH |
| 26 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,5 +0,0 @@ |
| 1 |
-(from "brimstone/ubuntu:14.04") |
|
| 2 |
-(maintainer "brimstone@the.narro.ws") |
|
| 3 |
-(env "GOPATH" "/go") |
|
| 4 |
-(entrypoint "/usr/local/bin/consuldock") |
|
| 5 |
-(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")
|
| 6 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,52 +0,0 @@ |
| 1 |
-FROM brimstone/ubuntu:14.04 |
|
| 2 |
- |
|
| 3 |
-CMD [] |
|
| 4 |
- |
|
| 5 |
-ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] |
|
| 6 |
- |
|
| 7 |
-EXPOSE 8500 8600 8400 8301 8302 |
|
| 8 |
- |
|
| 9 |
-RUN apt-get update \ |
|
| 10 |
- && apt-get install -y unzip wget \ |
|
| 11 |
- && apt-get clean \ |
|
| 12 |
- && rm -rf /var/lib/apt/lists |
|
| 13 |
- |
|
| 14 |
-RUN cd /tmp \ |
|
| 15 |
- && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ |
|
| 16 |
- -O web_ui.zip \ |
|
| 17 |
- && unzip web_ui.zip \ |
|
| 18 |
- && mv dist /webui \ |
|
| 19 |
- && rm web_ui.zip |
|
| 20 |
- |
|
| 21 |
-RUN apt-get update \ |
|
| 22 |
- && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
|
| 23 |
- && apt-get install -y --no-install-recommends unzip wget \ |
|
| 24 |
- && apt-get clean \ |
|
| 25 |
- && rm -rf /var/lib/apt/lists \ |
|
| 26 |
- |
|
| 27 |
- && cd /tmp \ |
|
| 28 |
- && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ |
|
| 29 |
- -O web_ui.zip \ |
|
| 30 |
- && unzip web_ui.zip \ |
|
| 31 |
- && mv dist /webui \ |
|
| 32 |
- && rm web_ui.zip \ |
|
| 33 |
- |
|
| 34 |
- && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
|
| 35 |
- && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
|
| 36 |
- && rm /tmp/dpkg.* |
|
| 37 |
- |
|
| 38 |
-ENV GOPATH /go |
|
| 39 |
- |
|
| 40 |
-RUN apt-get update \ |
|
| 41 |
- && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \
|
|
| 42 |
- && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ |
|
| 43 |
- && apt-get clean \ |
|
| 44 |
- && rm -rf /var/lib/apt/lists \ |
|
| 45 |
- |
|
| 46 |
- && go get -v github.com/hashicorp/consul \ |
|
| 47 |
- && mv $GOPATH/bin/consul /usr/bin/consul \ |
|
| 48 |
- |
|
| 49 |
- && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \
|
|
| 50 |
- && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \
|
|
| 51 |
- && rm /tmp/dpkg.* \ |
|
| 52 |
- && rm -rf $GOPATH |
| 53 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,9 +0,0 @@ |
| 1 |
-(from "brimstone/ubuntu:14.04") |
|
| 2 |
-(cmd) |
|
| 3 |
-(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") |
|
| 4 |
-(expose "8500" "8600" "8400" "8301" "8302") |
|
| 5 |
-(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") |
|
| 6 |
-(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") |
|
| 7 |
-(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*")
|
|
| 8 |
-(env "GOPATH" "/go") |
|
| 9 |
-(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH")
|
| 10 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,36 +0,0 @@ |
| 1 |
-FROM ubuntu:14.04 |
|
| 2 |
- |
|
| 3 |
-RUN echo hello\ |
|
| 4 |
- world\ |
|
| 5 |
- goodnight \ |
|
| 6 |
- moon\ |
|
| 7 |
- light\ |
|
| 8 |
-ning |
|
| 9 |
-RUN echo hello \ |
|
| 10 |
- world |
|
| 11 |
-RUN echo hello \ |
|
| 12 |
-world |
|
| 13 |
-RUN echo hello \ |
|
| 14 |
-goodbye\ |
|
| 15 |
-frog |
|
| 16 |
-RUN echo hello \ |
|
| 17 |
-world |
|
| 18 |
-RUN echo hi \ |
|
| 19 |
- \ |
|
| 20 |
- world \ |
|
| 21 |
-\ |
|
| 22 |
- good\ |
|
| 23 |
-\ |
|
| 24 |
-night |
|
| 25 |
-RUN echo goodbye\ |
|
| 26 |
-frog |
|
| 27 |
-RUN echo good\ |
|
| 28 |
-bye\ |
|
| 29 |
-frog |
|
| 30 |
- |
|
| 31 |
-RUN echo hello \ |
|
| 32 |
-# this is a comment |
|
| 33 |
- |
|
| 34 |
-# this is a comment with a blank line surrounding it |
|
| 35 |
- |
|
| 36 |
-this is some more useful stuff |
| 37 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,10 +0,0 @@ |
| 1 |
-(from "ubuntu:14.04") |
|
| 2 |
-(run "echo hello world goodnight moon lightning") |
|
| 3 |
-(run "echo hello world") |
|
| 4 |
-(run "echo hello world") |
|
| 5 |
-(run "echo hello goodbyefrog") |
|
| 6 |
-(run "echo hello world") |
|
| 7 |
-(run "echo hi world goodnight") |
|
| 8 |
-(run "echo goodbyefrog") |
|
| 9 |
-(run "echo goodbyefrog") |
|
| 10 |
-(run "echo hello this is some more useful stuff") |
| 11 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,54 +0,0 @@ |
| 1 |
-FROM cpuguy83/ubuntu |
|
| 2 |
-ENV NAGIOS_HOME /opt/nagios |
|
| 3 |
-ENV NAGIOS_USER nagios |
|
| 4 |
-ENV NAGIOS_GROUP nagios |
|
| 5 |
-ENV NAGIOS_CMDUSER nagios |
|
| 6 |
-ENV NAGIOS_CMDGROUP nagios |
|
| 7 |
-ENV NAGIOSADMIN_USER nagiosadmin |
|
| 8 |
-ENV NAGIOSADMIN_PASS nagios |
|
| 9 |
-ENV APACHE_RUN_USER nagios |
|
| 10 |
-ENV APACHE_RUN_GROUP nagios |
|
| 11 |
-ENV NAGIOS_TIMEZONE UTC |
|
| 12 |
- |
|
| 13 |
-RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list |
|
| 14 |
-RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx |
|
| 15 |
-RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP )
|
|
| 16 |
-RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) |
|
| 17 |
- |
|
| 18 |
-ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz |
|
| 19 |
-RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf
|
|
| 20 |
-ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ |
|
| 21 |
-RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install
|
|
| 22 |
- |
|
| 23 |
-RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars |
|
| 24 |
-RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default |
|
| 25 |
- |
|
| 26 |
-RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo
|
|
| 27 |
- |
|
| 28 |
-RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf
|
|
| 29 |
- |
|
| 30 |
-RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs
|
|
| 31 |
-RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg
|
|
| 32 |
-RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg
|
|
| 33 |
-RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf |
|
| 34 |
- |
|
| 35 |
-RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ |
|
| 36 |
- sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg |
|
| 37 |
-RUN cp /etc/services /var/spool/postfix/etc/ |
|
| 38 |
- |
|
| 39 |
-RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix |
|
| 40 |
-ADD nagios.init /etc/sv/nagios/run |
|
| 41 |
-ADD apache.init /etc/sv/apache/run |
|
| 42 |
-ADD postfix.init /etc/sv/postfix/run |
|
| 43 |
-ADD postfix.stop /etc/sv/postfix/finish |
|
| 44 |
- |
|
| 45 |
-ADD start.sh /usr/local/bin/start_nagios |
|
| 46 |
- |
|
| 47 |
-ENV APACHE_LOCK_DIR /var/run |
|
| 48 |
-ENV APACHE_LOG_DIR /var/log/apache2 |
|
| 49 |
- |
|
| 50 |
-EXPOSE 80 |
|
| 51 |
- |
|
| 52 |
-VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] |
|
| 53 |
- |
|
| 54 |
-CMD ["/usr/local/bin/start_nagios"] |
| 55 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,40 +0,0 @@ |
| 1 |
-(from "cpuguy83/ubuntu") |
|
| 2 |
-(env "NAGIOS_HOME" "/opt/nagios") |
|
| 3 |
-(env "NAGIOS_USER" "nagios") |
|
| 4 |
-(env "NAGIOS_GROUP" "nagios") |
|
| 5 |
-(env "NAGIOS_CMDUSER" "nagios") |
|
| 6 |
-(env "NAGIOS_CMDGROUP" "nagios") |
|
| 7 |
-(env "NAGIOSADMIN_USER" "nagiosadmin") |
|
| 8 |
-(env "NAGIOSADMIN_PASS" "nagios") |
|
| 9 |
-(env "APACHE_RUN_USER" "nagios") |
|
| 10 |
-(env "APACHE_RUN_GROUP" "nagios") |
|
| 11 |
-(env "NAGIOS_TIMEZONE" "UTC") |
|
| 12 |
-(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") |
|
| 13 |
-(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") |
|
| 14 |
-(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )")
|
|
| 15 |
-(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") |
|
| 16 |
-(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") |
|
| 17 |
-(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf")
|
|
| 18 |
-(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") |
|
| 19 |
-(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install")
|
|
| 20 |
-(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") |
|
| 21 |
-(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") |
|
| 22 |
-(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo")
|
|
| 23 |
-(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf")
|
|
| 24 |
-(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs")
|
|
| 25 |
-(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
|
|
| 26 |
-(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg")
|
|
| 27 |
-(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") |
|
| 28 |
-(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") |
|
| 29 |
-(run "cp /etc/services /var/spool/postfix/etc/") |
|
| 30 |
-(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") |
|
| 31 |
-(add "nagios.init" "/etc/sv/nagios/run") |
|
| 32 |
-(add "apache.init" "/etc/sv/apache/run") |
|
| 33 |
-(add "postfix.init" "/etc/sv/postfix/run") |
|
| 34 |
-(add "postfix.stop" "/etc/sv/postfix/finish") |
|
| 35 |
-(add "start.sh" "/usr/local/bin/start_nagios") |
|
| 36 |
-(env "APACHE_LOCK_DIR" "/var/run") |
|
| 37 |
-(env "APACHE_LOG_DIR" "/var/log/apache2") |
|
| 38 |
-(expose "80") |
|
| 39 |
-(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") |
|
| 40 |
-(cmd "/usr/local/bin/start_nagios") |
| 41 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,104 +0,0 @@ |
| 1 |
-# This file describes the standard way to build Docker, using docker |
|
| 2 |
-# |
|
| 3 |
-# Usage: |
|
| 4 |
-# |
|
| 5 |
-# # Assemble the full dev environment. This is slow the first time. |
|
| 6 |
-# docker build -t docker . |
|
| 7 |
-# |
|
| 8 |
-# # Mount your source in an interactive container for quick testing: |
|
| 9 |
-# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash |
|
| 10 |
-# |
|
| 11 |
-# # Run the test suite: |
|
| 12 |
-# docker run --privileged docker hack/make.sh test |
|
| 13 |
-# |
|
| 14 |
-# # Publish a release: |
|
| 15 |
-# docker run --privileged \ |
|
| 16 |
-# -e AWS_S3_BUCKET=baz \ |
|
| 17 |
-# -e AWS_ACCESS_KEY=foo \ |
|
| 18 |
-# -e AWS_SECRET_KEY=bar \ |
|
| 19 |
-# -e GPG_PASSPHRASE=gloubiboulga \ |
|
| 20 |
-# docker hack/release.sh |
|
| 21 |
-# |
|
| 22 |
-# Note: AppArmor used to mess with privileged mode, but this is no longer |
|
| 23 |
-# the case. Therefore, you don't have to disable it anymore. |
|
| 24 |
-# |
|
| 25 |
- |
|
| 26 |
-FROM ubuntu:14.04 |
|
| 27 |
-MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon) |
|
| 28 |
- |
|
| 29 |
-# Packaged dependencies |
|
| 30 |
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ |
|
| 31 |
- apt-utils \ |
|
| 32 |
- aufs-tools \ |
|
| 33 |
- automake \ |
|
| 34 |
- btrfs-tools \ |
|
| 35 |
- build-essential \ |
|
| 36 |
- curl \ |
|
| 37 |
- dpkg-sig \ |
|
| 38 |
- git \ |
|
| 39 |
- iptables \ |
|
| 40 |
- libapparmor-dev \ |
|
| 41 |
- libcap-dev \ |
|
| 42 |
- libsqlite3-dev \ |
|
| 43 |
- lxc=1.0* \ |
|
| 44 |
- mercurial \ |
|
| 45 |
- pandoc \ |
|
| 46 |
- parallel \ |
|
| 47 |
- reprepro \ |
|
| 48 |
- ruby1.9.1 \ |
|
| 49 |
- ruby1.9.1-dev \ |
|
| 50 |
- s3cmd=1.1.0* \ |
|
| 51 |
- --no-install-recommends |
|
| 52 |
- |
|
| 53 |
-# Get lvm2 source for compiling statically |
|
| 54 |
-RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 |
|
| 55 |
-# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags |
|
| 56 |
-# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly |
|
| 57 |
- |
|
| 58 |
-# Compile and install lvm2 |
|
| 59 |
-RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper |
|
| 60 |
-# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL |
|
| 61 |
- |
|
| 62 |
-# Install Go |
|
| 63 |
-RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz |
|
| 64 |
-ENV PATH /usr/local/go/bin:$PATH |
|
| 65 |
-ENV GOPATH /go:/go/src/github.com/docker/docker/vendor |
|
| 66 |
-RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 |
|
| 67 |
- |
|
| 68 |
-# Compile Go for cross compilation |
|
| 69 |
-ENV DOCKER_CROSSPLATFORMS \ |
|
| 70 |
- linux/386 linux/arm \ |
|
| 71 |
- darwin/amd64 darwin/386 \ |
|
| 72 |
- freebsd/amd64 freebsd/386 freebsd/arm |
|
| 73 |
-# (set an explicit GOARM of 5 for maximum compatibility) |
|
| 74 |
-ENV GOARM 5 |
|
| 75 |
-RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
|
|
| 76 |
- |
|
| 77 |
-# Grab Go's cover tool for dead-simple code coverage testing |
|
| 78 |
-RUN go get golang.org/x/tools/cmd/cover |
|
| 79 |
- |
|
| 80 |
-# TODO replace FPM with some very minimal debhelper stuff |
|
| 81 |
-RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 |
|
| 82 |
- |
|
| 83 |
-# Get the "busybox" image source so we can build locally instead of pulling |
|
| 84 |
-RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox |
|
| 85 |
- |
|
| 86 |
-# Setup s3cmd config |
|
| 87 |
-RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg |
|
| 88 |
- |
|
| 89 |
-# Set user.email so crosbymichael's in-container merge commits go smoothly |
|
| 90 |
-RUN git config --global user.email 'docker-dummy@example.com' |
|
| 91 |
- |
|
| 92 |
-# Add an unprivileged user to be used for tests which need it |
|
| 93 |
-RUN groupadd -r docker |
|
| 94 |
-RUN useradd --create-home --gid docker unprivilegeduser |
|
| 95 |
- |
|
| 96 |
-VOLUME /var/lib/docker |
|
| 97 |
-WORKDIR /go/src/github.com/docker/docker |
|
| 98 |
-ENV DOCKER_BUILDTAGS apparmor selinux |
|
| 99 |
- |
|
| 100 |
-# Wrap all commands in the "docker-in-docker" script to allow nested containers |
|
| 101 |
-ENTRYPOINT ["hack/dind"] |
|
| 102 |
- |
|
| 103 |
-# Upload docker source |
|
| 104 |
-COPY . /go/src/github.com/docker/docker |
| 105 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,24 +0,0 @@ |
| 1 |
-(from "ubuntu:14.04") |
|
| 2 |
-(maintainer "Tianon Gravi <admwiggin@gmail.com> (@tianon)") |
|
| 3 |
-(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tlxc=1.0* \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") |
|
| 4 |
-(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") |
|
| 5 |
-(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") |
|
| 6 |
-(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") |
|
| 7 |
-(env "PATH" "/usr/local/go/bin:$PATH") |
|
| 8 |
-(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") |
|
| 9 |
-(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") |
|
| 10 |
-(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") |
|
| 11 |
-(env "GOARM" "5") |
|
| 12 |
-(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
|
|
| 13 |
-(run "go get golang.org/x/tools/cmd/cover") |
|
| 14 |
-(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") |
|
| 15 |
-(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") |
|
| 16 |
-(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") |
|
| 17 |
-(run "git config --global user.email 'docker-dummy@example.com'") |
|
| 18 |
-(run "groupadd -r docker") |
|
| 19 |
-(run "useradd --create-home --gid docker unprivilegeduser") |
|
| 20 |
-(volume "/var/lib/docker") |
|
| 21 |
-(workdir "/go/src/github.com/docker/docker") |
|
| 22 |
-(env "DOCKER_BUILDTAGS" "apparmor selinux") |
|
| 23 |
-(entrypoint "hack/dind") |
|
| 24 |
-(copy "." "/go/src/github.com/docker/docker") |
| 25 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,23 +0,0 @@ |
| 1 |
-FROM ubuntu |
|
| 2 |
-ENV name value |
|
| 3 |
-ENV name=value |
|
| 4 |
-ENV name=value name2=value2 |
|
| 5 |
-ENV name="value value1" |
|
| 6 |
-ENV name=value\ value2 |
|
| 7 |
-ENV name="value'quote space'value2" |
|
| 8 |
-ENV name='value"double quote"value2' |
|
| 9 |
-ENV name=value\ value2 name2=value2\ value3 |
|
| 10 |
-ENV name="a\"b" |
|
| 11 |
-ENV name="a\'b" |
|
| 12 |
-ENV name='a\'b' |
|
| 13 |
-ENV name='a\'b'' |
|
| 14 |
-ENV name='a\"b' |
|
| 15 |
-ENV name="''" |
|
| 16 |
-# don't put anything after the next line - it must be the last line of the |
|
| 17 |
-# Dockerfile and it must end with \ |
|
| 18 |
-ENV name=value \ |
|
| 19 |
- name1=value1 \ |
|
| 20 |
- name2="value2a \ |
|
| 21 |
- value2b" \ |
|
| 22 |
- name3="value3a\n\"value3b\"" \ |
|
| 23 |
- name4="value4a\\nvalue4b" \ |
| 24 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,16 +0,0 @@ |
| 1 |
-(from "ubuntu") |
|
| 2 |
-(env "name" "value") |
|
| 3 |
-(env "name" "value") |
|
| 4 |
-(env "name" "value" "name2" "value2") |
|
| 5 |
-(env "name" "\"value value1\"") |
|
| 6 |
-(env "name" "value\\ value2") |
|
| 7 |
-(env "name" "\"value'quote space'value2\"") |
|
| 8 |
-(env "name" "'value\"double quote\"value2'") |
|
| 9 |
-(env "name" "value\\ value2" "name2" "value2\\ value3") |
|
| 10 |
-(env "name" "\"a\\\"b\"") |
|
| 11 |
-(env "name" "\"a\\'b\"") |
|
| 12 |
-(env "name" "'a\\'b'") |
|
| 13 |
-(env "name" "'a\\'b''") |
|
| 14 |
-(env "name" "'a\\\"b'") |
|
| 15 |
-(env "name" "\"''\"") |
|
| 16 |
-(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") |
| 17 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,14 +0,0 @@ |
| 1 |
-FROM ubuntu:14.04 |
|
| 2 |
-MAINTAINER Erik \\Hollensbe <erik@hollensbe.org>\" |
|
| 3 |
- |
|
| 4 |
-RUN apt-get \update && \ |
|
| 5 |
- apt-get \"install znc -y |
|
| 6 |
-ADD \conf\\" /.znc |
|
| 7 |
- |
|
| 8 |
-RUN foo \ |
|
| 9 |
- |
|
| 10 |
-bar \ |
|
| 11 |
- |
|
| 12 |
-baz |
|
| 13 |
- |
|
| 14 |
-CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] |
| 7 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,10 +0,0 @@ |
| 1 |
-FROM scratch |
|
| 2 |
-COPY foo /tmp/ |
|
| 3 |
-COPY --user=me foo /tmp/ |
|
| 4 |
-COPY --doit=true foo /tmp/ |
|
| 5 |
-COPY --user=me --doit=true foo /tmp/ |
|
| 6 |
-COPY --doit=true -- foo /tmp/ |
|
| 7 |
-COPY -- foo /tmp/ |
|
| 8 |
-CMD --doit [ "a", "b" ] |
|
| 9 |
-CMD --doit=true -- [ "a", "b" ] |
|
| 10 |
-CMD --doit -- [ ] |
| 11 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,10 +0,0 @@ |
| 1 |
-(from "scratch") |
|
| 2 |
-(copy "foo" "/tmp/") |
|
| 3 |
-(copy ["--user=me"] "foo" "/tmp/") |
|
| 4 |
-(copy ["--doit=true"] "foo" "/tmp/") |
|
| 5 |
-(copy ["--user=me" "--doit=true"] "foo" "/tmp/") |
|
| 6 |
-(copy ["--doit=true"] "foo" "/tmp/") |
|
| 7 |
-(copy "foo" "/tmp/") |
|
| 8 |
-(cmd ["--doit"] "a" "b") |
|
| 9 |
-(cmd ["--doit=true"] "a" "b") |
|
| 10 |
-(cmd ["--doit"]) |
| 11 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,15 +0,0 @@ |
| 1 |
-FROM ubuntu:14.04 |
|
| 2 |
- |
|
| 3 |
-RUN apt-get update && apt-get install wget -y |
|
| 4 |
-RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb |
|
| 5 |
-RUN dpkg -i influxdb_latest_amd64.deb |
|
| 6 |
-RUN rm -r /opt/influxdb/shared |
|
| 7 |
- |
|
| 8 |
-VOLUME /opt/influxdb/shared |
|
| 9 |
- |
|
| 10 |
-CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml |
|
| 11 |
- |
|
| 12 |
-EXPOSE 8083 |
|
| 13 |
-EXPOSE 8086 |
|
| 14 |
-EXPOSE 8090 |
|
| 15 |
-EXPOSE 8099 |
| 16 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,11 +0,0 @@ |
| 1 |
-(from "ubuntu:14.04") |
|
| 2 |
-(run "apt-get update && apt-get install wget -y") |
|
| 3 |
-(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") |
|
| 4 |
-(run "dpkg -i influxdb_latest_amd64.deb") |
|
| 5 |
-(run "rm -r /opt/influxdb/shared") |
|
| 6 |
-(volume "/opt/influxdb/shared") |
|
| 7 |
-(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") |
|
| 8 |
-(expose "8083") |
|
| 9 |
-(expose "8086") |
|
| 10 |
-(expose "8090") |
|
| 11 |
-(expose "8099") |
| 8 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,7 +0,0 @@ |
| 1 |
-(from "ubuntu:14.04") |
|
| 2 |
-(maintainer "James Turnbull \"james@example.com\"") |
|
| 3 |
-(env "REFRESHED_AT" "2014-06-01") |
|
| 4 |
-(run "apt-get update") |
|
| 5 |
-(run "apt-get -y install redis-server redis-tools") |
|
| 6 |
-(expose "6379") |
|
| 7 |
-(entrypoint "/usr/bin/redis-server") |
| 8 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,48 +0,0 @@ |
| 1 |
-FROM busybox:buildroot-2014.02 |
|
| 2 |
- |
|
| 3 |
-MAINTAINER docker <docker@docker.io> |
|
| 4 |
- |
|
| 5 |
-ONBUILD RUN ["echo", "test"] |
|
| 6 |
-ONBUILD RUN echo test |
|
| 7 |
-ONBUILD COPY . / |
|
| 8 |
- |
|
| 9 |
- |
|
| 10 |
-# RUN Commands \ |
|
| 11 |
-# linebreak in comment \ |
|
| 12 |
-RUN ["ls", "-la"] |
|
| 13 |
-RUN ["echo", "'1234'"] |
|
| 14 |
-RUN echo "1234" |
|
| 15 |
-RUN echo 1234 |
|
| 16 |
-RUN echo '1234' && \ |
|
| 17 |
- echo "456" && \ |
|
| 18 |
- echo 789 |
|
| 19 |
-RUN sh -c 'echo root:testpass \ |
|
| 20 |
- > /tmp/passwd' |
|
| 21 |
-RUN mkdir -p /test /test2 /test3/test |
|
| 22 |
- |
|
| 23 |
-# ENV \ |
|
| 24 |
-ENV SCUBA 1 DUBA 3 |
|
| 25 |
-ENV SCUBA "1 DUBA 3" |
|
| 26 |
- |
|
| 27 |
-# CMD \ |
|
| 28 |
-CMD ["echo", "test"] |
|
| 29 |
-CMD echo test |
|
| 30 |
-CMD echo "test" |
|
| 31 |
-CMD echo 'test' |
|
| 32 |
-CMD echo 'test' | wc - |
|
| 33 |
- |
|
| 34 |
-#EXPOSE\ |
|
| 35 |
-EXPOSE 3000 |
|
| 36 |
-EXPOSE 9000 5000 6000 |
|
| 37 |
- |
|
| 38 |
-USER docker |
|
| 39 |
-USER docker:root |
|
| 40 |
- |
|
| 41 |
-VOLUME ["/test"] |
|
| 42 |
-VOLUME ["/test", "/test2"] |
|
| 43 |
-VOLUME /test3 |
|
| 44 |
- |
|
| 45 |
-WORKDIR /test |
|
| 46 |
- |
|
| 47 |
-ADD . / |
|
| 48 |
-COPY . copy |
| 49 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,29 +0,0 @@ |
| 1 |
-(from "busybox:buildroot-2014.02") |
|
| 2 |
-(maintainer "docker <docker@docker.io>") |
|
| 3 |
-(onbuild (run "echo" "test")) |
|
| 4 |
-(onbuild (run "echo test")) |
|
| 5 |
-(onbuild (copy "." "/")) |
|
| 6 |
-(run "ls" "-la") |
|
| 7 |
-(run "echo" "'1234'") |
|
| 8 |
-(run "echo \"1234\"") |
|
| 9 |
-(run "echo 1234") |
|
| 10 |
-(run "echo '1234' && echo \"456\" && echo 789") |
|
| 11 |
-(run "sh -c 'echo root:testpass > /tmp/passwd'") |
|
| 12 |
-(run "mkdir -p /test /test2 /test3/test") |
|
| 13 |
-(env "SCUBA" "1 DUBA 3") |
|
| 14 |
-(env "SCUBA" "\"1 DUBA 3\"") |
|
| 15 |
-(cmd "echo" "test") |
|
| 16 |
-(cmd "echo test") |
|
| 17 |
-(cmd "echo \"test\"") |
|
| 18 |
-(cmd "echo 'test'") |
|
| 19 |
-(cmd "echo 'test' | wc -") |
|
| 20 |
-(expose "3000") |
|
| 21 |
-(expose "9000" "5000" "6000") |
|
| 22 |
-(user "docker") |
|
| 23 |
-(user "docker:root") |
|
| 24 |
-(volume "/test") |
|
| 25 |
-(volume "/test" "/test2") |
|
| 26 |
-(volume "/test3") |
|
| 27 |
-(workdir "/test") |
|
| 28 |
-(add "." "/") |
|
| 29 |
-(copy "." "copy") |
| 30 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,16 +0,0 @@ |
| 1 |
-FROM ubuntu:14.04 |
|
| 2 |
- |
|
| 3 |
-RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y |
|
| 4 |
-ADD .muttrc / |
|
| 5 |
-ADD .offlineimaprc / |
|
| 6 |
-ADD .tmux.conf / |
|
| 7 |
-ADD mutt /.mutt |
|
| 8 |
-ADD vim /.vim |
|
| 9 |
-ADD vimrc /.vimrc |
|
| 10 |
-ADD crontab /etc/crontab |
|
| 11 |
-RUN chmod 644 /etc/crontab |
|
| 12 |
-RUN mkdir /Mail |
|
| 13 |
-RUN mkdir /.offlineimap |
|
| 14 |
-RUN echo "export TERM=screen-256color" >/.zshenv |
|
| 15 |
- |
|
| 16 |
-CMD setsid cron; tmux -2 |
| 17 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,14 +0,0 @@ |
| 1 |
-(from "ubuntu:14.04") |
|
| 2 |
-(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") |
|
| 3 |
-(add ".muttrc" "/") |
|
| 4 |
-(add ".offlineimaprc" "/") |
|
| 5 |
-(add ".tmux.conf" "/") |
|
| 6 |
-(add "mutt" "/.mutt") |
|
| 7 |
-(add "vim" "/.vim") |
|
| 8 |
-(add "vimrc" "/.vimrc") |
|
| 9 |
-(add "crontab" "/etc/crontab") |
|
| 10 |
-(run "chmod 644 /etc/crontab") |
|
| 11 |
-(run "mkdir /Mail") |
|
| 12 |
-(run "mkdir /.offlineimap") |
|
| 13 |
-(run "echo \"export TERM=screen-256color\" >/.zshenv") |
|
| 14 |
-(cmd "setsid cron; tmux -2") |
| 5 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,14 +0,0 @@ |
| 1 |
-FROM ubuntu:14.04 |
|
| 2 |
-MAINTAINER Erik Hollensbe <erik@hollensbe.org> |
|
| 3 |
- |
|
| 4 |
-RUN apt-get update && apt-get install nginx-full -y |
|
| 5 |
-RUN rm -rf /etc/nginx |
|
| 6 |
-ADD etc /etc/nginx |
|
| 7 |
-RUN chown -R root:root /etc/nginx |
|
| 8 |
-RUN /usr/sbin/nginx -qt |
|
| 9 |
-RUN mkdir /www |
|
| 10 |
- |
|
| 11 |
-CMD ["/usr/sbin/nginx"] |
|
| 12 |
- |
|
| 13 |
-VOLUME /www |
|
| 14 |
-EXPOSE 80 |
| 15 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,11 +0,0 @@ |
| 1 |
-(from "ubuntu:14.04") |
|
| 2 |
-(maintainer "Erik Hollensbe <erik@hollensbe.org>") |
|
| 3 |
-(run "apt-get update && apt-get install nginx-full -y") |
|
| 4 |
-(run "rm -rf /etc/nginx") |
|
| 5 |
-(add "etc" "/etc/nginx") |
|
| 6 |
-(run "chown -R root:root /etc/nginx") |
|
| 7 |
-(run "/usr/sbin/nginx -qt") |
|
| 8 |
-(run "mkdir /www") |
|
| 9 |
-(cmd "/usr/sbin/nginx") |
|
| 10 |
-(volume "/www") |
|
| 11 |
-(expose "80") |
| 12 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,23 +0,0 @@ |
| 1 |
-FROM ubuntu:12.04 |
|
| 2 |
- |
|
| 3 |
-EXPOSE 27015 |
|
| 4 |
-EXPOSE 27005 |
|
| 5 |
-EXPOSE 26901 |
|
| 6 |
-EXPOSE 27020 |
|
| 7 |
- |
|
| 8 |
-RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y |
|
| 9 |
-RUN mkdir -p /steam |
|
| 10 |
-RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam |
|
| 11 |
-ADD ./script /steam/script |
|
| 12 |
-RUN /steam/steamcmd.sh +runscript /steam/script |
|
| 13 |
-RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf |
|
| 14 |
-RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf |
|
| 15 |
-ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg |
|
| 16 |
-ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg |
|
| 17 |
-ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg |
|
| 18 |
-RUN rm -r /steam/tf2/tf/addons/sourcemod/configs |
|
| 19 |
-ADD ./configs /steam/tf2/tf/addons/sourcemod/configs |
|
| 20 |
-RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en |
|
| 21 |
-RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en |
|
| 22 |
- |
|
| 23 |
-CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill |
| 24 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,20 +0,0 @@ |
| 1 |
-(from "ubuntu:12.04") |
|
| 2 |
-(expose "27015") |
|
| 3 |
-(expose "27005") |
|
| 4 |
-(expose "26901") |
|
| 5 |
-(expose "27020") |
|
| 6 |
-(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") |
|
| 7 |
-(run "mkdir -p /steam") |
|
| 8 |
-(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") |
|
| 9 |
-(add "./script" "/steam/script") |
|
| 10 |
-(run "/steam/steamcmd.sh +runscript /steam/script") |
|
| 11 |
-(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") |
|
| 12 |
-(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") |
|
| 13 |
-(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") |
|
| 14 |
-(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") |
|
| 15 |
-(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") |
|
| 16 |
-(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") |
|
| 17 |
-(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") |
|
| 18 |
-(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") |
|
| 19 |
-(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") |
|
| 20 |
-(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") |
| 6 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,176 +0,0 @@ |
| 1 |
-package parser |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "strconv" |
|
| 6 |
- "strings" |
|
| 7 |
- "unicode" |
|
| 8 |
-) |
|
| 9 |
- |
|
| 10 |
-// Dump dumps the AST defined by `node` as a list of sexps. |
|
| 11 |
-// Returns a string suitable for printing. |
|
| 12 |
-func (node *Node) Dump() string {
|
|
| 13 |
- str := "" |
|
| 14 |
- str += node.Value |
|
| 15 |
- |
|
| 16 |
- if len(node.Flags) > 0 {
|
|
| 17 |
- str += fmt.Sprintf(" %q", node.Flags)
|
|
| 18 |
- } |
|
| 19 |
- |
|
| 20 |
- for _, n := range node.Children {
|
|
| 21 |
- str += "(" + n.Dump() + ")\n"
|
|
| 22 |
- } |
|
| 23 |
- |
|
| 24 |
- if node.Next != nil {
|
|
| 25 |
- for n := node.Next; n != nil; n = n.Next {
|
|
| 26 |
- if len(n.Children) > 0 {
|
|
| 27 |
- str += " " + n.Dump() |
|
| 28 |
- } else {
|
|
| 29 |
- str += " " + strconv.Quote(n.Value) |
|
| 30 |
- } |
|
| 31 |
- } |
|
| 32 |
- } |
|
| 33 |
- |
|
| 34 |
- return strings.TrimSpace(str) |
|
| 35 |
-} |
|
| 36 |
- |
|
| 37 |
-// performs the dispatch based on the two primal strings, cmd and args. Please |
|
| 38 |
-// look at the dispatch table in parser.go to see how these dispatchers work. |
|
| 39 |
-func fullDispatch(cmd, args string) (*Node, map[string]bool, error) {
|
|
| 40 |
- fn := dispatch[cmd] |
|
| 41 |
- |
|
| 42 |
- // Ignore invalid Dockerfile instructions |
|
| 43 |
- if fn == nil {
|
|
| 44 |
- fn = parseIgnore |
|
| 45 |
- } |
|
| 46 |
- |
|
| 47 |
- sexp, attrs, err := fn(args) |
|
| 48 |
- if err != nil {
|
|
| 49 |
- return nil, nil, err |
|
| 50 |
- } |
|
| 51 |
- |
|
| 52 |
- return sexp, attrs, nil |
|
| 53 |
-} |
|
| 54 |
- |
|
| 55 |
-// splitCommand takes a single line of text and parses out the cmd and args, |
|
| 56 |
-// which are used for dispatching to more exact parsing functions. |
|
| 57 |
-func splitCommand(line string) (string, []string, string, error) {
|
|
| 58 |
- var args string |
|
| 59 |
- var flags []string |
|
| 60 |
- |
|
| 61 |
- // Make sure we get the same results irrespective of leading/trailing spaces |
|
| 62 |
- cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) |
|
| 63 |
- cmd := strings.ToLower(cmdline[0]) |
|
| 64 |
- |
|
| 65 |
- if len(cmdline) == 2 {
|
|
| 66 |
- var err error |
|
| 67 |
- args, flags, err = extractBuilderFlags(cmdline[1]) |
|
| 68 |
- if err != nil {
|
|
| 69 |
- return "", nil, "", err |
|
| 70 |
- } |
|
| 71 |
- } |
|
| 72 |
- |
|
| 73 |
- return cmd, flags, strings.TrimSpace(args), nil |
|
| 74 |
-} |
|
| 75 |
- |
|
| 76 |
-// covers comments and empty lines. Lines should be trimmed before passing to |
|
| 77 |
-// this function. |
|
| 78 |
-func stripComments(line string) string {
|
|
| 79 |
- // string is already trimmed at this point |
|
| 80 |
- if tokenComment.MatchString(line) {
|
|
| 81 |
- return tokenComment.ReplaceAllString(line, "") |
|
| 82 |
- } |
|
| 83 |
- |
|
| 84 |
- return line |
|
| 85 |
-} |
|
| 86 |
- |
|
| 87 |
-func extractBuilderFlags(line string) (string, []string, error) {
|
|
| 88 |
- // Parses the BuilderFlags and returns the remaining part of the line |
|
| 89 |
- |
|
| 90 |
- const ( |
|
| 91 |
- inSpaces = iota // looking for start of a word |
|
| 92 |
- inWord |
|
| 93 |
- inQuote |
|
| 94 |
- ) |
|
| 95 |
- |
|
| 96 |
- words := []string{}
|
|
| 97 |
- phase := inSpaces |
|
| 98 |
- word := "" |
|
| 99 |
- quote := '\000' |
|
| 100 |
- blankOK := false |
|
| 101 |
- var ch rune |
|
| 102 |
- |
|
| 103 |
- for pos := 0; pos <= len(line); pos++ {
|
|
| 104 |
- if pos != len(line) {
|
|
| 105 |
- ch = rune(line[pos]) |
|
| 106 |
- } |
|
| 107 |
- |
|
| 108 |
- if phase == inSpaces { // Looking for start of word
|
|
| 109 |
- if pos == len(line) { // end of input
|
|
| 110 |
- break |
|
| 111 |
- } |
|
| 112 |
- if unicode.IsSpace(ch) { // skip spaces
|
|
| 113 |
- continue |
|
| 114 |
- } |
|
| 115 |
- |
|
| 116 |
- // Only keep going if the next word starts with -- |
|
| 117 |
- if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' {
|
|
| 118 |
- return line[pos:], words, nil |
|
| 119 |
- } |
|
| 120 |
- |
|
| 121 |
- phase = inWord // found someting with "--", fall thru |
|
| 122 |
- } |
|
| 123 |
- if (phase == inWord || phase == inQuote) && (pos == len(line)) {
|
|
| 124 |
- if word != "--" && (blankOK || len(word) > 0) {
|
|
| 125 |
- words = append(words, word) |
|
| 126 |
- } |
|
| 127 |
- break |
|
| 128 |
- } |
|
| 129 |
- if phase == inWord {
|
|
| 130 |
- if unicode.IsSpace(ch) {
|
|
| 131 |
- phase = inSpaces |
|
| 132 |
- if word == "--" {
|
|
| 133 |
- return line[pos:], words, nil |
|
| 134 |
- } |
|
| 135 |
- if blankOK || len(word) > 0 {
|
|
| 136 |
- words = append(words, word) |
|
| 137 |
- } |
|
| 138 |
- word = "" |
|
| 139 |
- blankOK = false |
|
| 140 |
- continue |
|
| 141 |
- } |
|
| 142 |
- if ch == '\'' || ch == '"' {
|
|
| 143 |
- quote = ch |
|
| 144 |
- blankOK = true |
|
| 145 |
- phase = inQuote |
|
| 146 |
- continue |
|
| 147 |
- } |
|
| 148 |
- if ch == '\\' {
|
|
| 149 |
- if pos+1 == len(line) {
|
|
| 150 |
- continue // just skip \ at end |
|
| 151 |
- } |
|
| 152 |
- pos++ |
|
| 153 |
- ch = rune(line[pos]) |
|
| 154 |
- } |
|
| 155 |
- word += string(ch) |
|
| 156 |
- continue |
|
| 157 |
- } |
|
| 158 |
- if phase == inQuote {
|
|
| 159 |
- if ch == quote {
|
|
| 160 |
- phase = inWord |
|
| 161 |
- continue |
|
| 162 |
- } |
|
| 163 |
- if ch == '\\' {
|
|
| 164 |
- if pos+1 == len(line) {
|
|
| 165 |
- phase = inWord |
|
| 166 |
- continue // just skip \ at end |
|
| 167 |
- } |
|
| 168 |
- pos++ |
|
| 169 |
- ch = rune(line[pos]) |
|
| 170 |
- } |
|
| 171 |
- word += string(ch) |
|
| 172 |
- } |
|
| 173 |
- } |
|
| 174 |
- |
|
| 175 |
- return "", words, nil |
|
| 176 |
-} |
| 177 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,243 +0,0 @@ |
| 1 |
-package builder |
|
| 2 |
- |
|
| 3 |
-// This will take a single word and an array of env variables and |
|
| 4 |
-// process all quotes (" and ') as well as $xxx and ${xxx} env variable
|
|
| 5 |
-// tokens. Tries to mimic bash shell process. |
|
| 6 |
-// It doesn't support all flavors of ${xx:...} formats but new ones can
|
|
| 7 |
-// be added by adding code to the "special ${} format processing" section
|
|
| 8 |
- |
|
| 9 |
-import ( |
|
| 10 |
- "fmt" |
|
| 11 |
- "strings" |
|
| 12 |
- "unicode" |
|
| 13 |
-) |
|
| 14 |
- |
|
| 15 |
-type shellWord struct {
|
|
| 16 |
- word string |
|
| 17 |
- envs []string |
|
| 18 |
- pos int |
|
| 19 |
-} |
|
| 20 |
- |
|
| 21 |
-// ProcessWord will use the 'env' list of environment variables, |
|
| 22 |
-// and replace any env var references in 'word'. |
|
| 23 |
-func ProcessWord(word string, env []string) (string, error) {
|
|
| 24 |
- sw := &shellWord{
|
|
| 25 |
- word: word, |
|
| 26 |
- envs: env, |
|
| 27 |
- pos: 0, |
|
| 28 |
- } |
|
| 29 |
- return sw.process() |
|
| 30 |
-} |
|
| 31 |
- |
|
| 32 |
-func (sw *shellWord) process() (string, error) {
|
|
| 33 |
- return sw.processStopOn('\000')
|
|
| 34 |
-} |
|
| 35 |
- |
|
| 36 |
-// Process the word, starting at 'pos', and stop when we get to the |
|
| 37 |
-// end of the word or the 'stopChar' character |
|
| 38 |
-func (sw *shellWord) processStopOn(stopChar rune) (string, error) {
|
|
| 39 |
- var result string |
|
| 40 |
- var charFuncMapping = map[rune]func() (string, error){
|
|
| 41 |
- '\'': sw.processSingleQuote, |
|
| 42 |
- '"': sw.processDoubleQuote, |
|
| 43 |
- '$': sw.processDollar, |
|
| 44 |
- } |
|
| 45 |
- |
|
| 46 |
- for sw.pos < len(sw.word) {
|
|
| 47 |
- ch := sw.peek() |
|
| 48 |
- if stopChar != '\000' && ch == stopChar {
|
|
| 49 |
- sw.next() |
|
| 50 |
- break |
|
| 51 |
- } |
|
| 52 |
- if fn, ok := charFuncMapping[ch]; ok {
|
|
| 53 |
- // Call special processing func for certain chars |
|
| 54 |
- tmp, err := fn() |
|
| 55 |
- if err != nil {
|
|
| 56 |
- return "", err |
|
| 57 |
- } |
|
| 58 |
- result += tmp |
|
| 59 |
- } else {
|
|
| 60 |
- // Not special, just add it to the result |
|
| 61 |
- ch = sw.next() |
|
| 62 |
- if ch == '\\' {
|
|
| 63 |
- // '\' escapes, except end of line |
|
| 64 |
- ch = sw.next() |
|
| 65 |
- if ch == '\000' {
|
|
| 66 |
- continue |
|
| 67 |
- } |
|
| 68 |
- } |
|
| 69 |
- result += string(ch) |
|
| 70 |
- } |
|
| 71 |
- } |
|
| 72 |
- |
|
| 73 |
- return result, nil |
|
| 74 |
-} |
|
| 75 |
- |
|
| 76 |
-func (sw *shellWord) peek() rune {
|
|
| 77 |
- if sw.pos == len(sw.word) {
|
|
| 78 |
- return '\000' |
|
| 79 |
- } |
|
| 80 |
- return rune(sw.word[sw.pos]) |
|
| 81 |
-} |
|
| 82 |
- |
|
| 83 |
-func (sw *shellWord) next() rune {
|
|
| 84 |
- if sw.pos == len(sw.word) {
|
|
| 85 |
- return '\000' |
|
| 86 |
- } |
|
| 87 |
- ch := rune(sw.word[sw.pos]) |
|
| 88 |
- sw.pos++ |
|
| 89 |
- return ch |
|
| 90 |
-} |
|
| 91 |
- |
|
| 92 |
-func (sw *shellWord) processSingleQuote() (string, error) {
|
|
| 93 |
- // All chars between single quotes are taken as-is |
|
| 94 |
- // Note, you can't escape ' |
|
| 95 |
- var result string |
|
| 96 |
- |
|
| 97 |
- sw.next() |
|
| 98 |
- |
|
| 99 |
- for {
|
|
| 100 |
- ch := sw.next() |
|
| 101 |
- if ch == '\000' || ch == '\'' {
|
|
| 102 |
- break |
|
| 103 |
- } |
|
| 104 |
- result += string(ch) |
|
| 105 |
- } |
|
| 106 |
- return result, nil |
|
| 107 |
-} |
|
| 108 |
- |
|
| 109 |
-func (sw *shellWord) processDoubleQuote() (string, error) {
|
|
| 110 |
- // All chars up to the next " are taken as-is, even ', except any $ chars |
|
| 111 |
- // But you can escape " with a \ |
|
| 112 |
- var result string |
|
| 113 |
- |
|
| 114 |
- sw.next() |
|
| 115 |
- |
|
| 116 |
- for sw.pos < len(sw.word) {
|
|
| 117 |
- ch := sw.peek() |
|
| 118 |
- if ch == '"' {
|
|
| 119 |
- sw.next() |
|
| 120 |
- break |
|
| 121 |
- } |
|
| 122 |
- if ch == '$' {
|
|
| 123 |
- tmp, err := sw.processDollar() |
|
| 124 |
- if err != nil {
|
|
| 125 |
- return "", err |
|
| 126 |
- } |
|
| 127 |
- result += tmp |
|
| 128 |
- } else {
|
|
| 129 |
- ch = sw.next() |
|
| 130 |
- if ch == '\\' {
|
|
| 131 |
- chNext := sw.peek() |
|
| 132 |
- |
|
| 133 |
- if chNext == '\000' {
|
|
| 134 |
- // Ignore \ at end of word |
|
| 135 |
- continue |
|
| 136 |
- } |
|
| 137 |
- |
|
| 138 |
- if chNext == '"' || chNext == '$' {
|
|
| 139 |
- // \" and \$ can be escaped, all other \'s are left as-is |
|
| 140 |
- ch = sw.next() |
|
| 141 |
- } |
|
| 142 |
- } |
|
| 143 |
- result += string(ch) |
|
| 144 |
- } |
|
| 145 |
- } |
|
| 146 |
- |
|
| 147 |
- return result, nil |
|
| 148 |
-} |
|
| 149 |
- |
|
| 150 |
-func (sw *shellWord) processDollar() (string, error) {
|
|
| 151 |
- sw.next() |
|
| 152 |
- ch := sw.peek() |
|
| 153 |
- if ch == '{' {
|
|
| 154 |
- sw.next() |
|
| 155 |
- name := sw.processName() |
|
| 156 |
- ch = sw.peek() |
|
| 157 |
- if ch == '}' {
|
|
| 158 |
- // Normal ${xx} case
|
|
| 159 |
- sw.next() |
|
| 160 |
- return sw.getEnv(name), nil |
|
| 161 |
- } |
|
| 162 |
- if ch == ':' {
|
|
| 163 |
- // Special ${xx:...} format processing
|
|
| 164 |
- // Yes it allows for recursive $'s in the ... spot |
|
| 165 |
- |
|
| 166 |
- sw.next() // skip over : |
|
| 167 |
- modifier := sw.next() |
|
| 168 |
- |
|
| 169 |
- word, err := sw.processStopOn('}')
|
|
| 170 |
- if err != nil {
|
|
| 171 |
- return "", err |
|
| 172 |
- } |
|
| 173 |
- |
|
| 174 |
- // Grab the current value of the variable in question so we |
|
| 175 |
- // can use to to determine what to do based on the modifier |
|
| 176 |
- newValue := sw.getEnv(name) |
|
| 177 |
- |
|
| 178 |
- switch modifier {
|
|
| 179 |
- case '+': |
|
| 180 |
- if newValue != "" {
|
|
| 181 |
- newValue = word |
|
| 182 |
- } |
|
| 183 |
- return newValue, nil |
|
| 184 |
- |
|
| 185 |
- case '-': |
|
| 186 |
- if newValue == "" {
|
|
| 187 |
- newValue = word |
|
| 188 |
- } |
|
| 189 |
- return newValue, nil |
|
| 190 |
- |
|
| 191 |
- default: |
|
| 192 |
- return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word)
|
|
| 193 |
- } |
|
| 194 |
- } |
|
| 195 |
- return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word)
|
|
| 196 |
- } |
|
| 197 |
- // $xxx case |
|
| 198 |
- name := sw.processName() |
|
| 199 |
- if name == "" {
|
|
| 200 |
- return "$", nil |
|
| 201 |
- } |
|
| 202 |
- return sw.getEnv(name), nil |
|
| 203 |
-} |
|
| 204 |
- |
|
| 205 |
-func (sw *shellWord) processName() string {
|
|
| 206 |
- // Read in a name (alphanumeric or _) |
|
| 207 |
- // If it starts with a numeric then just return $# |
|
| 208 |
- var name string |
|
| 209 |
- |
|
| 210 |
- for sw.pos < len(sw.word) {
|
|
| 211 |
- ch := sw.peek() |
|
| 212 |
- if len(name) == 0 && unicode.IsDigit(ch) {
|
|
| 213 |
- ch = sw.next() |
|
| 214 |
- return string(ch) |
|
| 215 |
- } |
|
| 216 |
- if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' {
|
|
| 217 |
- break |
|
| 218 |
- } |
|
| 219 |
- ch = sw.next() |
|
| 220 |
- name += string(ch) |
|
| 221 |
- } |
|
| 222 |
- |
|
| 223 |
- return name |
|
| 224 |
-} |
|
| 225 |
- |
|
| 226 |
-func (sw *shellWord) getEnv(name string) string {
|
|
| 227 |
- for _, env := range sw.envs {
|
|
| 228 |
- i := strings.Index(env, "=") |
|
| 229 |
- if i < 0 {
|
|
| 230 |
- if name == env {
|
|
| 231 |
- // Should probably never get here, but just in case treat |
|
| 232 |
- // it like "var" and "var=" are the same |
|
| 233 |
- return "" |
|
| 234 |
- } |
|
| 235 |
- continue |
|
| 236 |
- } |
|
| 237 |
- if name != env[:i] {
|
|
| 238 |
- continue |
|
| 239 |
- } |
|
| 240 |
- return env[i+1:] |
|
| 241 |
- } |
|
| 242 |
- return "" |
|
| 243 |
-} |
| 244 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,93 +0,0 @@ |
| 1 |
-package builder |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bufio" |
|
| 5 |
- "os" |
|
| 6 |
- "strings" |
|
| 7 |
- "testing" |
|
| 8 |
-) |
|
| 9 |
- |
|
| 10 |
-func TestShellParser(t *testing.T) {
|
|
| 11 |
- file, err := os.Open("words")
|
|
| 12 |
- if err != nil {
|
|
| 13 |
- t.Fatalf("Can't open 'words': %s", err)
|
|
| 14 |
- } |
|
| 15 |
- defer file.Close() |
|
| 16 |
- |
|
| 17 |
- scanner := bufio.NewScanner(file) |
|
| 18 |
- envs := []string{"PWD=/home", "SHELL=bash"}
|
|
| 19 |
- for scanner.Scan() {
|
|
| 20 |
- line := scanner.Text() |
|
| 21 |
- |
|
| 22 |
- // Trim comments and blank lines |
|
| 23 |
- i := strings.Index(line, "#") |
|
| 24 |
- if i >= 0 {
|
|
| 25 |
- line = line[:i] |
|
| 26 |
- } |
|
| 27 |
- line = strings.TrimSpace(line) |
|
| 28 |
- |
|
| 29 |
- if line == "" {
|
|
| 30 |
- continue |
|
| 31 |
- } |
|
| 32 |
- |
|
| 33 |
- words := strings.Split(line, "|") |
|
| 34 |
- if len(words) != 2 {
|
|
| 35 |
- t.Fatalf("Error in 'words' - should be 2 words:%q", words)
|
|
| 36 |
- } |
|
| 37 |
- |
|
| 38 |
- words[0] = strings.TrimSpace(words[0]) |
|
| 39 |
- words[1] = strings.TrimSpace(words[1]) |
|
| 40 |
- |
|
| 41 |
- newWord, err := ProcessWord(words[0], envs) |
|
| 42 |
- |
|
| 43 |
- if err != nil {
|
|
| 44 |
- newWord = "error" |
|
| 45 |
- } |
|
| 46 |
- |
|
| 47 |
- if newWord != words[1] {
|
|
| 48 |
- t.Fatalf("Error. Src: %s Calc: %s Expected: %s", words[0], newWord, words[1])
|
|
| 49 |
- } |
|
| 50 |
- } |
|
| 51 |
-} |
|
| 52 |
- |
|
| 53 |
-func TestGetEnv(t *testing.T) {
|
|
| 54 |
- sw := &shellWord{
|
|
| 55 |
- word: "", |
|
| 56 |
- envs: nil, |
|
| 57 |
- pos: 0, |
|
| 58 |
- } |
|
| 59 |
- |
|
| 60 |
- sw.envs = []string{}
|
|
| 61 |
- if sw.getEnv("foo") != "" {
|
|
| 62 |
- t.Fatalf("2 - 'foo' should map to ''")
|
|
| 63 |
- } |
|
| 64 |
- |
|
| 65 |
- sw.envs = []string{"foo"}
|
|
| 66 |
- if sw.getEnv("foo") != "" {
|
|
| 67 |
- t.Fatalf("3 - 'foo' should map to ''")
|
|
| 68 |
- } |
|
| 69 |
- |
|
| 70 |
- sw.envs = []string{"foo="}
|
|
| 71 |
- if sw.getEnv("foo") != "" {
|
|
| 72 |
- t.Fatalf("4 - 'foo' should map to ''")
|
|
| 73 |
- } |
|
| 74 |
- |
|
| 75 |
- sw.envs = []string{"foo=bar"}
|
|
| 76 |
- if sw.getEnv("foo") != "bar" {
|
|
| 77 |
- t.Fatalf("5 - 'foo' should map to 'bar'")
|
|
| 78 |
- } |
|
| 79 |
- |
|
| 80 |
- sw.envs = []string{"foo=bar", "car=hat"}
|
|
| 81 |
- if sw.getEnv("foo") != "bar" {
|
|
| 82 |
- t.Fatalf("6 - 'foo' should map to 'bar'")
|
|
| 83 |
- } |
|
| 84 |
- if sw.getEnv("car") != "hat" {
|
|
| 85 |
- t.Fatalf("7 - 'car' should map to 'hat'")
|
|
| 86 |
- } |
|
| 87 |
- |
|
| 88 |
- // Make sure we grab the first 'car' in the list |
|
| 89 |
- sw.envs = []string{"foo=bar", "car=hat", "car=bike"}
|
|
| 90 |
- if sw.getEnv("car") != "hat" {
|
|
| 91 |
- t.Fatalf("8 - 'car' should map to 'hat'")
|
|
| 92 |
- } |
|
| 93 |
-} |
| 94 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,27 +0,0 @@ |
| 1 |
-package builder |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "regexp" |
|
| 5 |
- "strings" |
|
| 6 |
-) |
|
| 7 |
- |
|
| 8 |
-const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` |
|
| 9 |
- |
|
| 10 |
-var mimeRe = regexp.MustCompile(acceptableRemoteMIME) |
|
| 11 |
- |
|
| 12 |
-func selectAcceptableMIME(ct string) string {
|
|
| 13 |
- return mimeRe.FindString(ct) |
|
| 14 |
-} |
|
| 15 |
- |
|
| 16 |
-func handleJSONArgs(args []string, attributes map[string]bool) []string {
|
|
| 17 |
- if len(args) == 0 {
|
|
| 18 |
- return []string{}
|
|
| 19 |
- } |
|
| 20 |
- |
|
| 21 |
- if attributes != nil && attributes["json"] {
|
|
| 22 |
- return args |
|
| 23 |
- } |
|
| 24 |
- |
|
| 25 |
- // literal string command, not an exec array |
|
| 26 |
- return []string{strings.Join(args, " ")}
|
|
| 27 |
-} |
| 28 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,41 +0,0 @@ |
| 1 |
-package builder |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "testing" |
|
| 6 |
-) |
|
| 7 |
- |
|
| 8 |
-func TestSelectAcceptableMIME(t *testing.T) {
|
|
| 9 |
- validMimeStrings := []string{
|
|
| 10 |
- "application/x-bzip2", |
|
| 11 |
- "application/bzip2", |
|
| 12 |
- "application/gzip", |
|
| 13 |
- "application/x-gzip", |
|
| 14 |
- "application/x-xz", |
|
| 15 |
- "application/xz", |
|
| 16 |
- "application/tar", |
|
| 17 |
- "application/x-tar", |
|
| 18 |
- "application/octet-stream", |
|
| 19 |
- "text/plain", |
|
| 20 |
- } |
|
| 21 |
- |
|
| 22 |
- invalidMimeStrings := []string{
|
|
| 23 |
- "", |
|
| 24 |
- "application/octet", |
|
| 25 |
- "application/json", |
|
| 26 |
- } |
|
| 27 |
- |
|
| 28 |
- for _, m := range invalidMimeStrings {
|
|
| 29 |
- if len(selectAcceptableMIME(m)) > 0 {
|
|
| 30 |
- err := fmt.Errorf("Should not have accepted %q", m)
|
|
| 31 |
- t.Fatal(err) |
|
| 32 |
- } |
|
| 33 |
- } |
|
| 34 |
- |
|
| 35 |
- for _, m := range validMimeStrings {
|
|
| 36 |
- if str := selectAcceptableMIME(m); str == "" {
|
|
| 37 |
- err := fmt.Errorf("Should have accepted %q", m)
|
|
| 38 |
- t.Fatal(err) |
|
| 39 |
- } |
|
| 40 |
- } |
|
| 41 |
-} |
| 42 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,58 +0,0 @@ |
| 1 |
-hello | hello |
|
| 2 |
-he'll'o | hello |
|
| 3 |
-he'llo | hello |
|
| 4 |
-he\'llo | he'llo |
|
| 5 |
-he\\'llo | he\llo |
|
| 6 |
-abc\tdef | abctdef |
|
| 7 |
-"abc\tdef" | abc\tdef |
|
| 8 |
-'abc\tdef' | abc\tdef |
|
| 9 |
-hello\ | hello |
|
| 10 |
-hello\\ | hello\ |
|
| 11 |
-"hello | hello |
|
| 12 |
-"hello\" | hello" |
|
| 13 |
-"hel'lo" | hel'lo |
|
| 14 |
-'hello | hello |
|
| 15 |
-'hello\' | hello\ |
|
| 16 |
-"''" | '' |
|
| 17 |
-$. | $. |
|
| 18 |
-$1 | |
|
| 19 |
-he$1x | hex |
|
| 20 |
-he$.x | he$.x |
|
| 21 |
-he$pwd. | he. |
|
| 22 |
-he$PWD | he/home |
|
| 23 |
-he\$PWD | he$PWD |
|
| 24 |
-he\\$PWD | he\/home |
|
| 25 |
-he\${} | he${}
|
|
| 26 |
-he\${}xx | he${}xx
|
|
| 27 |
-he${} | he
|
|
| 28 |
-he${}xx | hexx
|
|
| 29 |
-he${hi} | he
|
|
| 30 |
-he${hi}xx | hexx
|
|
| 31 |
-he${PWD} | he/home
|
|
| 32 |
-he${.} | error
|
|
| 33 |
-he${XXX:-000}xx | he000xx
|
|
| 34 |
-he${PWD:-000}xx | he/homexx
|
|
| 35 |
-he${XXX:-$PWD}xx | he/homexx
|
|
| 36 |
-he${XXX:-${PWD:-yyy}}xx | he/homexx
|
|
| 37 |
-he${XXX:-${YYY:-yyy}}xx | heyyyxx
|
|
| 38 |
-he${XXX:YYY} | error
|
|
| 39 |
-he${XXX:+${PWD}}xx | hexx
|
|
| 40 |
-he${PWD:+${XXX}}xx | hexx
|
|
| 41 |
-he${PWD:+${SHELL}}xx | hebashxx
|
|
| 42 |
-he${XXX:+000}xx | hexx
|
|
| 43 |
-he${PWD:+000}xx | he000xx
|
|
| 44 |
-'he${XX}' | he${XX}
|
|
| 45 |
-"he${PWD}" | he/home
|
|
| 46 |
-"he'$PWD'" | he'/home' |
|
| 47 |
-"$PWD" | /home |
|
| 48 |
-'$PWD' | $PWD |
|
| 49 |
-'\$PWD' | \$PWD |
|
| 50 |
-'"hello"' | "hello" |
|
| 51 |
-he\$PWD | he$PWD |
|
| 52 |
-"he\$PWD" | he$PWD |
|
| 53 |
-'he\$PWD' | he\$PWD |
|
| 54 |
-he${PWD | error
|
|
| 55 |
-he${PWD:=000}xx | error
|
|
| 56 |
-he${PWD:+${PWD}:}xx | he/home:xx
|
|
| 57 |
-he${XXX:-\$PWD:}xx | he$PWD:xx
|
|
| 58 |
-he${XXX:-\${PWD}z}xx | he${PWDz}xx
|
| ... | ... |
@@ -18,7 +18,7 @@ import ( |
| 18 | 18 |
"text/template" |
| 19 | 19 |
"time" |
| 20 | 20 |
|
| 21 |
- "github.com/docker/docker/builder/command" |
|
| 21 |
+ "github.com/docker/docker/builder/dockerfile/command" |
|
| 22 | 22 |
"github.com/docker/docker/pkg/archive" |
| 23 | 23 |
"github.com/docker/docker/pkg/stringutils" |
| 24 | 24 |
"github.com/go-check/check" |