This patch creates interfaces in builder/ for building Docker images.
It is a first step in a series of patches to remove the daemon
dependency on builder and later allow a client-side Dockerfile builder
as well as potential builder plugins.
It is needed because we cannot remove the /build API endpoint, so we
need to keep the server-side Dockerfile builder, but we also want to
reuse the same Dockerfile parser and evaluator for both server-side and
client-side.
builder/dockerfile/ and api/server/builder.go contain implementations
of those interfaces as a refactoring of the current code.
Signed-off-by: Tibor Vass <tibor@docker.com>
| ... | ... |
@@ -12,7 +12,6 @@ import ( |
| 12 | 12 |
"net/url" |
| 13 | 13 |
"os" |
| 14 | 14 |
"os/exec" |
| 15 |
- "path" |
|
| 16 | 15 |
"path/filepath" |
| 17 | 16 |
"regexp" |
| 18 | 17 |
"runtime" |
| ... | ... |
@@ -131,13 +130,19 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
| 131 | 131 |
return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
|
| 132 | 132 |
} |
| 133 | 133 |
|
| 134 |
- var includes = []string{"."}
|
|
| 135 |
- |
|
| 136 |
- excludes, err := utils.ReadDockerIgnore(path.Join(contextDir, ".dockerignore")) |
|
| 137 |
- if err != nil {
|
|
| 134 |
+ f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) |
|
| 135 |
+ if err != nil && !os.IsNotExist(err) {
|
|
| 138 | 136 |
return err |
| 139 | 137 |
} |
| 140 | 138 |
|
| 139 |
+ var excludes []string |
|
| 140 |
+ if err == nil {
|
|
| 141 |
+ excludes, err = utils.ReadDockerIgnore(f) |
|
| 142 |
+ if err != nil {
|
|
| 143 |
+ return err |
|
| 144 |
+ } |
|
| 145 |
+ } |
|
| 146 |
+ |
|
| 141 | 147 |
if err := utils.ValidateContextDirectory(contextDir, excludes); err != nil {
|
| 142 | 148 |
return fmt.Errorf("Error checking context: '%s'.", err)
|
| 143 | 149 |
} |
| ... | ... |
@@ -149,6 +154,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
|
| 149 | 149 |
// removed. The deamon will remove them for us, if needed, after it |
| 150 | 150 |
// parses the Dockerfile. Ignore errors here, as they will have been |
| 151 | 151 |
// caught by ValidateContextDirectory above. |
| 152 |
+ var includes = []string{"."}
|
|
| 152 | 153 |
keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
|
| 153 | 154 |
keepThem2, _ := fileutils.Matches(relDockerfile, excludes) |
| 154 | 155 |
if keepThem1 || keepThem2 {
|
| ... | ... |
@@ -12,13 +12,18 @@ import ( |
| 12 | 12 |
"github.com/Sirupsen/logrus" |
| 13 | 13 |
"github.com/docker/docker/api/server/httputils" |
| 14 | 14 |
"github.com/docker/docker/api/types" |
| 15 |
+ "github.com/docker/docker/builder" |
|
| 15 | 16 |
"github.com/docker/docker/builder/dockerfile" |
| 16 | 17 |
"github.com/docker/docker/cliconfig" |
| 18 |
+ "github.com/docker/docker/daemon/daemonbuilder" |
|
| 17 | 19 |
"github.com/docker/docker/graph" |
| 20 |
+ "github.com/docker/docker/graph/tags" |
|
| 18 | 21 |
"github.com/docker/docker/pkg/ioutils" |
| 19 | 22 |
"github.com/docker/docker/pkg/parsers" |
| 23 |
+ "github.com/docker/docker/pkg/progressreader" |
|
| 20 | 24 |
"github.com/docker/docker/pkg/streamformatter" |
| 21 | 25 |
"github.com/docker/docker/pkg/ulimit" |
| 26 |
+ "github.com/docker/docker/registry" |
|
| 22 | 27 |
"github.com/docker/docker/runconfig" |
| 23 | 28 |
"github.com/docker/docker/utils" |
| 24 | 29 |
"golang.org/x/net/context" |
| ... | ... |
@@ -56,13 +61,18 @@ func (s *router) postCommit(ctx context.Context, w http.ResponseWriter, r *http. |
| 56 | 56 |
Config: c, |
| 57 | 57 |
} |
| 58 | 58 |
|
| 59 |
- imgID, err := dockerfile.Commit(cname, s.daemon, commitCfg) |
|
| 59 |
+ container, err := s.daemon.Get(cname) |
|
| 60 |
+ if err != nil {
|
|
| 61 |
+ return err |
|
| 62 |
+ } |
|
| 63 |
+ |
|
| 64 |
+ imgID, err := dockerfile.Commit(container, s.daemon, commitCfg) |
|
| 60 | 65 |
if err != nil {
|
| 61 | 66 |
return err |
| 62 | 67 |
} |
| 63 | 68 |
|
| 64 | 69 |
return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerCommitResponse{
|
| 65 |
- ID: imgID, |
|
| 70 |
+ ID: string(imgID), |
|
| 66 | 71 |
}) |
| 67 | 72 |
} |
| 68 | 73 |
|
| ... | ... |
@@ -125,7 +135,7 @@ func (s *router) postImagesCreate(ctx context.Context, w http.ResponseWriter, r |
| 125 | 125 |
// generated from the download to be available to the output |
| 126 | 126 |
// stream processing below |
| 127 | 127 |
var newConfig *runconfig.Config |
| 128 |
- newConfig, err = dockerfile.BuildFromConfig(s.daemon, &runconfig.Config{}, r.Form["changes"])
|
|
| 128 |
+ newConfig, err = dockerfile.BuildFromConfig(&runconfig.Config{}, r.Form["changes"])
|
|
| 129 | 129 |
if err != nil {
|
| 130 | 130 |
return err |
| 131 | 131 |
} |
| ... | ... |
@@ -269,7 +279,7 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R |
| 269 | 269 |
var ( |
| 270 | 270 |
authConfigs = map[string]cliconfig.AuthConfig{}
|
| 271 | 271 |
authConfigsEncoded = r.Header.Get("X-Registry-Config")
|
| 272 |
- buildConfig = dockerfile.NewBuildConfig() |
|
| 272 |
+ buildConfig = &dockerfile.Config{}
|
|
| 273 | 273 |
) |
| 274 | 274 |
|
| 275 | 275 |
if authConfigsEncoded != "" {
|
| ... | ... |
@@ -284,6 +294,21 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R |
| 284 | 284 |
w.Header().Set("Content-Type", "application/json")
|
| 285 | 285 |
|
| 286 | 286 |
version := httputils.VersionFromContext(ctx) |
| 287 |
+ output := ioutils.NewWriteFlusher(w) |
|
| 288 |
+ sf := streamformatter.NewJSONStreamFormatter() |
|
| 289 |
+ errf := func(err error) error {
|
|
| 290 |
+ // Do not write the error in the http output if it's still empty. |
|
| 291 |
+ // This prevents from writing a 200(OK) when there is an interal error. |
|
| 292 |
+ if !output.Flushed() {
|
|
| 293 |
+ return err |
|
| 294 |
+ } |
|
| 295 |
+ _, err = w.Write(sf.FormatError(errors.New(utils.GetErrorMessage(err)))) |
|
| 296 |
+ if err != nil {
|
|
| 297 |
+ logrus.Warnf("could not write error response: %v", err)
|
|
| 298 |
+ } |
|
| 299 |
+ return nil |
|
| 300 |
+ } |
|
| 301 |
+ |
|
| 287 | 302 |
if httputils.BoolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") {
|
| 288 | 303 |
buildConfig.Remove = true |
| 289 | 304 |
} else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") {
|
| ... | ... |
@@ -295,17 +320,22 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R |
| 295 | 295 |
buildConfig.Pull = true |
| 296 | 296 |
} |
| 297 | 297 |
|
| 298 |
- output := ioutils.NewWriteFlusher(w) |
|
| 299 |
- buildConfig.Stdout = output |
|
| 300 |
- buildConfig.Context = r.Body |
|
| 298 |
+ repoName, tag := parsers.ParseRepositoryTag(r.FormValue("t"))
|
|
| 299 |
+ if repoName != "" {
|
|
| 300 |
+ if err := registry.ValidateRepositoryName(repoName); err != nil {
|
|
| 301 |
+ return errf(err) |
|
| 302 |
+ } |
|
| 303 |
+ if len(tag) > 0 {
|
|
| 304 |
+ if err := tags.ValidateTagName(tag); err != nil {
|
|
| 305 |
+ return errf(err) |
|
| 306 |
+ } |
|
| 307 |
+ } |
|
| 308 |
+ } |
|
| 301 | 309 |
|
| 302 |
- buildConfig.RemoteURL = r.FormValue("remote")
|
|
| 303 | 310 |
buildConfig.DockerfileName = r.FormValue("dockerfile")
|
| 304 |
- buildConfig.RepoName = r.FormValue("t")
|
|
| 305 |
- buildConfig.SuppressOutput = httputils.BoolValue(r, "q") |
|
| 306 |
- buildConfig.NoCache = httputils.BoolValue(r, "nocache") |
|
| 311 |
+ buildConfig.Verbose = !httputils.BoolValue(r, "q") |
|
| 312 |
+ buildConfig.UseCache = !httputils.BoolValue(r, "nocache") |
|
| 307 | 313 |
buildConfig.ForceRemove = httputils.BoolValue(r, "forcerm") |
| 308 |
- buildConfig.AuthConfigs = authConfigs |
|
| 309 | 314 |
buildConfig.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") |
| 310 | 315 |
buildConfig.Memory = httputils.Int64ValueOrZero(r, "memory") |
| 311 | 316 |
buildConfig.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") |
| ... | ... |
@@ -319,7 +349,7 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R |
| 319 | 319 |
ulimitsJSON := r.FormValue("ulimits")
|
| 320 | 320 |
if ulimitsJSON != "" {
|
| 321 | 321 |
if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil {
|
| 322 |
- return err |
|
| 322 |
+ return errf(err) |
|
| 323 | 323 |
} |
| 324 | 324 |
buildConfig.Ulimits = buildUlimits |
| 325 | 325 |
} |
| ... | ... |
@@ -328,12 +358,50 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R |
| 328 | 328 |
buildArgsJSON := r.FormValue("buildargs")
|
| 329 | 329 |
if buildArgsJSON != "" {
|
| 330 | 330 |
if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil {
|
| 331 |
- return err |
|
| 331 |
+ return errf(err) |
|
| 332 | 332 |
} |
| 333 |
+ buildConfig.BuildArgs = buildArgs |
|
| 333 | 334 |
} |
| 334 |
- buildConfig.BuildArgs = buildArgs |
|
| 335 | 335 |
|
| 336 |
- // Job cancellation. Note: not all job types support this. |
|
| 336 |
+ remoteURL := r.FormValue("remote")
|
|
| 337 |
+ |
|
| 338 |
+ // Currently, only used if context is from a remote url. |
|
| 339 |
+ // The field `In` is set by DetectContextFromRemoteURL. |
|
| 340 |
+ // Look at code in DetectContextFromRemoteURL for more information. |
|
| 341 |
+ pReader := &progressreader.Config{
|
|
| 342 |
+ // TODO: make progressreader streamformatter-agnostic |
|
| 343 |
+ Out: output, |
|
| 344 |
+ Formatter: sf, |
|
| 345 |
+ Size: r.ContentLength, |
|
| 346 |
+ NewLines: true, |
|
| 347 |
+ ID: "Downloading context", |
|
| 348 |
+ Action: remoteURL, |
|
| 349 |
+ } |
|
| 350 |
+ |
|
| 351 |
+ var ( |
|
| 352 |
+ context builder.ModifiableContext |
|
| 353 |
+ dockerfileName string |
|
| 354 |
+ err error |
|
| 355 |
+ ) |
|
| 356 |
+ context, dockerfileName, err = daemonbuilder.DetectContextFromRemoteURL(r.Body, remoteURL, pReader) |
|
| 357 |
+ if err != nil {
|
|
| 358 |
+ return errf(err) |
|
| 359 |
+ } |
|
| 360 |
+ defer func() {
|
|
| 361 |
+ if err := context.Close(); err != nil {
|
|
| 362 |
+ logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
|
|
| 363 |
+ } |
|
| 364 |
+ }() |
|
| 365 |
+ |
|
| 366 |
+ docker := daemonbuilder.Docker{s.daemon, output, authConfigs}
|
|
| 367 |
+ |
|
| 368 |
+ b, err := dockerfile.NewBuilder(buildConfig, docker, builder.DockerIgnoreContext{context}, nil)
|
|
| 369 |
+ if err != nil {
|
|
| 370 |
+ return errf(err) |
|
| 371 |
+ } |
|
| 372 |
+ b.Stdout = &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf}
|
|
| 373 |
+ b.Stderr = &streamformatter.StderrFormatter{Writer: output, StreamFormatter: sf}
|
|
| 374 |
+ |
|
| 337 | 375 |
if closeNotifier, ok := w.(http.CloseNotifier); ok {
|
| 338 | 376 |
finished := make(chan struct{})
|
| 339 | 377 |
defer close(finished) |
| ... | ... |
@@ -342,20 +410,26 @@ func (s *router) postBuild(ctx context.Context, w http.ResponseWriter, r *http.R |
| 342 | 342 |
case <-finished: |
| 343 | 343 |
case <-closeNotifier.CloseNotify(): |
| 344 | 344 |
logrus.Infof("Client disconnected, cancelling job: build")
|
| 345 |
- buildConfig.Cancel() |
|
| 345 |
+ b.Cancel() |
|
| 346 | 346 |
} |
| 347 | 347 |
}() |
| 348 | 348 |
} |
| 349 | 349 |
|
| 350 |
- if err := dockerfile.Build(s.daemon, buildConfig); err != nil {
|
|
| 351 |
- // Do not write the error in the http output if it's still empty. |
|
| 352 |
- // This prevents from writing a 200(OK) when there is an interal error. |
|
| 353 |
- if !output.Flushed() {
|
|
| 354 |
- return err |
|
| 350 |
+ if len(dockerfileName) > 0 {
|
|
| 351 |
+ b.DockerfileName = dockerfileName |
|
| 352 |
+ } |
|
| 353 |
+ |
|
| 354 |
+ imgID, err := b.Build() |
|
| 355 |
+ if err != nil {
|
|
| 356 |
+ return errf(err) |
|
| 357 |
+ } |
|
| 358 |
+ |
|
| 359 |
+ if repoName != "" {
|
|
| 360 |
+ if err := s.daemon.Repositories().Tag(repoName, tag, string(imgID), true); err != nil {
|
|
| 361 |
+ return errf(err) |
|
| 355 | 362 |
} |
| 356 |
- sf := streamformatter.NewJSONStreamFormatter() |
|
| 357 |
- w.Write(sf.FormatError(errors.New(utils.GetErrorMessage(err)))) |
|
| 358 | 363 |
} |
| 364 |
+ |
|
| 359 | 365 |
return nil |
| 360 | 366 |
} |
| 361 | 367 |
|
| 362 | 368 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,139 @@ |
| 0 |
+// Package builder defines interfaces for any Docker builder to implement. |
|
| 1 |
+// |
|
| 2 |
+// Historically, only server-side Dockerfile interpreters existed. |
|
| 3 |
+// This package allows for other implementations of Docker builders. |
|
| 4 |
+package builder |
|
| 5 |
+ |
|
| 6 |
+import ( |
|
| 7 |
+ "io" |
|
| 8 |
+ "os" |
|
| 9 |
+ |
|
| 10 |
+ // TODO: remove dependency on daemon |
|
| 11 |
+ "github.com/docker/docker/daemon" |
|
| 12 |
+ "github.com/docker/docker/image" |
|
| 13 |
+ "github.com/docker/docker/runconfig" |
|
| 14 |
+) |
|
| 15 |
+ |
|
| 16 |
+// Builder abstracts a Docker builder whose only purpose is to build a Docker image referenced by an imageID. |
|
| 17 |
+type Builder interface {
|
|
| 18 |
+ // Build builds a Docker image referenced by an imageID string. |
|
| 19 |
+ // |
|
| 20 |
+ // Note: Tagging an image should not be done by a Builder, it should instead be done |
|
| 21 |
+ // by the caller. |
|
| 22 |
+ // |
|
| 23 |
+ // TODO: make this return a reference instead of string |
|
| 24 |
+ Build() (imageID string) |
|
| 25 |
+} |
|
| 26 |
+ |
|
| 27 |
+// Context represents a file system tree. |
|
| 28 |
+type Context interface {
|
|
| 29 |
+ // Close allows to signal that the filesystem tree won't be used anymore. |
|
| 30 |
+ // For Context implementations using a temporary directory, it is recommended to |
|
| 31 |
+ // delete the temporary directory in Close(). |
|
| 32 |
+ Close() error |
|
| 33 |
+ // Stat returns an entry corresponding to path if any. |
|
| 34 |
+ // It is recommended to return an error if path was not found. |
|
| 35 |
+ Stat(path string) (FileInfo, error) |
|
| 36 |
+ // Open opens path from the context and returns a readable stream of it. |
|
| 37 |
+ Open(path string) (io.ReadCloser, error) |
|
| 38 |
+ // Walk walks the tree of the context with the function passed to it. |
|
| 39 |
+ Walk(root string, walkFn WalkFunc) error |
|
| 40 |
+} |
|
| 41 |
+ |
|
| 42 |
+// WalkFunc is the type of the function called for each file or directory visited by Context.Walk(). |
|
| 43 |
+type WalkFunc func(path string, fi FileInfo, err error) error |
|
| 44 |
+ |
|
| 45 |
+// ModifiableContext represents a modifiable Context. |
|
| 46 |
+// TODO: remove this interface once we can get rid of Remove() |
|
| 47 |
+type ModifiableContext interface {
|
|
| 48 |
+ Context |
|
| 49 |
+ // Remove deletes the entry specified by `path`. |
|
| 50 |
+ // It is usual for directory entries to delete all its subentries. |
|
| 51 |
+ Remove(path string) error |
|
| 52 |
+} |
|
| 53 |
+ |
|
| 54 |
+// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file. |
|
| 55 |
+// TODO: remove this interface once pkg/archive exposes a walk function that Context can use. |
|
| 56 |
+type FileInfo interface {
|
|
| 57 |
+ os.FileInfo |
|
| 58 |
+ Path() string |
|
| 59 |
+} |
|
| 60 |
+ |
|
| 61 |
+// PathFileInfo is a convenience struct that implements the FileInfo interface. |
|
| 62 |
+type PathFileInfo struct {
|
|
| 63 |
+ os.FileInfo |
|
| 64 |
+ // FilePath holds the absolute path to the file. |
|
| 65 |
+ FilePath string |
|
| 66 |
+} |
|
| 67 |
+ |
|
| 68 |
+// Path returns the absolute path to the file. |
|
| 69 |
+func (fi PathFileInfo) Path() string {
|
|
| 70 |
+ return fi.FilePath |
|
| 71 |
+} |
|
| 72 |
+ |
|
| 73 |
+// Hashed defines an extra method intended for implementations of os.FileInfo. |
|
| 74 |
+type Hashed interface {
|
|
| 75 |
+ // Hash returns the hash of a file. |
|
| 76 |
+ Hash() string |
|
| 77 |
+ SetHash(string) |
|
| 78 |
+} |
|
| 79 |
+ |
|
| 80 |
+// HashedFileInfo is a convenient struct that augments FileInfo with a field. |
|
| 81 |
+type HashedFileInfo struct {
|
|
| 82 |
+ FileInfo |
|
| 83 |
+ // FileHash represents the hash of a file. |
|
| 84 |
+ FileHash string |
|
| 85 |
+} |
|
| 86 |
+ |
|
| 87 |
+// Hash returns the hash of a file. |
|
| 88 |
+func (fi HashedFileInfo) Hash() string {
|
|
| 89 |
+ return fi.FileHash |
|
| 90 |
+} |
|
| 91 |
+ |
|
| 92 |
+// SetHash sets the hash of a file. |
|
| 93 |
+func (fi *HashedFileInfo) SetHash(h string) {
|
|
| 94 |
+ fi.FileHash = h |
|
| 95 |
+} |
|
| 96 |
+ |
|
| 97 |
+// Docker abstracts calls to a Docker Daemon. |
|
| 98 |
+type Docker interface {
|
|
| 99 |
+ // TODO: use digest reference instead of name |
|
| 100 |
+ |
|
| 101 |
+ // LookupImage looks up a Docker image referenced by `name`. |
|
| 102 |
+ LookupImage(name string) (*image.Image, error) |
|
| 103 |
+ // Pull tells Docker to pull image referenced by `name`. |
|
| 104 |
+ Pull(name string) (*image.Image, error) |
|
| 105 |
+ |
|
| 106 |
+ // TODO: move daemon.Container to its own package |
|
| 107 |
+ |
|
| 108 |
+ // Container looks up a Docker container referenced by `id`. |
|
| 109 |
+ Container(id string) (*daemon.Container, error) |
|
| 110 |
+ // Create creates a new Docker container and returns potential warnings |
|
| 111 |
+ // TODO: put warnings in the error |
|
| 112 |
+ Create(*runconfig.Config, *runconfig.HostConfig) (*daemon.Container, []string, error) |
|
| 113 |
+ // Remove removes a container specified by `id`. |
|
| 114 |
+ Remove(id string, cfg *daemon.ContainerRmConfig) error |
|
| 115 |
+ // Commit creates a new Docker image from an existing Docker container. |
|
| 116 |
+ Commit(*daemon.Container, *daemon.ContainerCommitConfig) (*image.Image, error) |
|
| 117 |
+ // Copy copies/extracts a source FileInfo to a destination path inside a container |
|
| 118 |
+ // specified by a container object. |
|
| 119 |
+ // TODO: make an Extract method instead of passing `decompress` |
|
| 120 |
+ // TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used |
|
| 121 |
+ // with Context.Walk |
|
| 122 |
+ Copy(c *daemon.Container, destPath string, src FileInfo, decompress bool) error |
|
| 123 |
+ |
|
| 124 |
+ // Retain retains an image avoiding it to be removed or overwritten until a corresponding Release() call. |
|
| 125 |
+ // TODO: remove |
|
| 126 |
+ Retain(sessionID, imgID string) |
|
| 127 |
+ // Release releases a list of images that were retained for the time of a build. |
|
| 128 |
+ // TODO: remove |
|
| 129 |
+ Release(sessionID string, activeImages []string) |
|
| 130 |
+} |
|
| 131 |
+ |
|
| 132 |
+// ImageCache abstracts an image cache store. |
|
| 133 |
+// (parent image, child runconfig) -> child image |
|
| 134 |
+type ImageCache interface {
|
|
| 135 |
+ // GetCachedImage returns a reference to a cached image whose parent equals `parent` |
|
| 136 |
+ // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. |
|
| 137 |
+ GetCachedImage(parentID string, cfg *runconfig.Config) (imageID string, err error) |
|
| 138 |
+} |
| 0 | 139 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,292 @@ |
| 0 |
+package dockerfile |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "io" |
|
| 6 |
+ "io/ioutil" |
|
| 7 |
+ "os" |
|
| 8 |
+ "runtime" |
|
| 9 |
+ "strings" |
|
| 10 |
+ "sync" |
|
| 11 |
+ |
|
| 12 |
+ "github.com/Sirupsen/logrus" |
|
| 13 |
+ "github.com/docker/docker/builder" |
|
| 14 |
+ "github.com/docker/docker/builder/dockerfile/parser" |
|
| 15 |
+ "github.com/docker/docker/daemon" |
|
| 16 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 17 |
+ "github.com/docker/docker/pkg/ulimit" |
|
| 18 |
+ "github.com/docker/docker/runconfig" |
|
| 19 |
+) |
|
| 20 |
+ |
|
| 21 |
+var validCommitCommands = map[string]bool{
|
|
| 22 |
+ "cmd": true, |
|
| 23 |
+ "entrypoint": true, |
|
| 24 |
+ "env": true, |
|
| 25 |
+ "expose": true, |
|
| 26 |
+ "label": true, |
|
| 27 |
+ "onbuild": true, |
|
| 28 |
+ "user": true, |
|
| 29 |
+ "volume": true, |
|
| 30 |
+ "workdir": true, |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+// BuiltinAllowedBuildArgs is list of built-in allowed build args |
|
| 34 |
+var BuiltinAllowedBuildArgs = map[string]bool{
|
|
| 35 |
+ "HTTP_PROXY": true, |
|
| 36 |
+ "http_proxy": true, |
|
| 37 |
+ "HTTPS_PROXY": true, |
|
| 38 |
+ "https_proxy": true, |
|
| 39 |
+ "FTP_PROXY": true, |
|
| 40 |
+ "ftp_proxy": true, |
|
| 41 |
+ "NO_PROXY": true, |
|
| 42 |
+ "no_proxy": true, |
|
| 43 |
+} |
|
| 44 |
+ |
|
| 45 |
+// Config constitutes the configuration for a Dockerfile builder. |
|
| 46 |
+type Config struct {
|
|
| 47 |
+ // only used if Dockerfile has to be extracted from Context |
|
| 48 |
+ DockerfileName string |
|
| 49 |
+ |
|
| 50 |
+ Verbose bool |
|
| 51 |
+ UseCache bool |
|
| 52 |
+ Remove bool |
|
| 53 |
+ ForceRemove bool |
|
| 54 |
+ Pull bool |
|
| 55 |
+ BuildArgs map[string]string // build-time args received in build context for expansion/substitution and commands in 'run'. |
|
| 56 |
+ |
|
| 57 |
+ // resource constraints |
|
| 58 |
+ // TODO: factor out to be reused with Run ? |
|
| 59 |
+ |
|
| 60 |
+ Memory int64 |
|
| 61 |
+ MemorySwap int64 |
|
| 62 |
+ CPUShares int64 |
|
| 63 |
+ CPUPeriod int64 |
|
| 64 |
+ CPUQuota int64 |
|
| 65 |
+ CPUSetCpus string |
|
| 66 |
+ CPUSetMems string |
|
| 67 |
+ CgroupParent string |
|
| 68 |
+ Ulimits []*ulimit.Ulimit |
|
| 69 |
+} |
|
| 70 |
+ |
|
| 71 |
+// Builder is a Dockerfile builder |
|
| 72 |
+// It implements the builder.Builder interface. |
|
| 73 |
+type Builder struct {
|
|
| 74 |
+ *Config |
|
| 75 |
+ |
|
| 76 |
+ Stdout io.Writer |
|
| 77 |
+ Stderr io.Writer |
|
| 78 |
+ |
|
| 79 |
+ docker builder.Docker |
|
| 80 |
+ context builder.Context |
|
| 81 |
+ |
|
| 82 |
+ dockerfile *parser.Node |
|
| 83 |
+ runConfig *runconfig.Config // runconfig for cmd, run, entrypoint etc. |
|
| 84 |
+ flags *BFlags |
|
| 85 |
+ tmpContainers map[string]struct{}
|
|
| 86 |
+ image string // imageID |
|
| 87 |
+ noBaseImage bool |
|
| 88 |
+ maintainer string |
|
| 89 |
+ cmdSet bool |
|
| 90 |
+ disableCommit bool |
|
| 91 |
+ cacheBusted bool |
|
| 92 |
+ cancelled chan struct{}
|
|
| 93 |
+ cancelOnce sync.Once |
|
| 94 |
+ allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. |
|
| 95 |
+ |
|
| 96 |
+ // TODO: remove once docker.Commit can receive a tag |
|
| 97 |
+ id string |
|
| 98 |
+ activeImages []string |
|
| 99 |
+} |
|
| 100 |
+ |
|
| 101 |
+// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. |
|
| 102 |
+// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, |
|
| 103 |
+// will be read from the Context passed to Build(). |
|
| 104 |
+func NewBuilder(config *Config, docker builder.Docker, context builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) {
|
|
| 105 |
+ if config == nil {
|
|
| 106 |
+ config = new(Config) |
|
| 107 |
+ } |
|
| 108 |
+ if config.BuildArgs == nil {
|
|
| 109 |
+ config.BuildArgs = make(map[string]string) |
|
| 110 |
+ } |
|
| 111 |
+ b = &Builder{
|
|
| 112 |
+ Config: config, |
|
| 113 |
+ Stdout: os.Stdout, |
|
| 114 |
+ Stderr: os.Stderr, |
|
| 115 |
+ docker: docker, |
|
| 116 |
+ context: context, |
|
| 117 |
+ runConfig: new(runconfig.Config), |
|
| 118 |
+ tmpContainers: map[string]struct{}{},
|
|
| 119 |
+ cancelled: make(chan struct{}),
|
|
| 120 |
+ id: stringid.GenerateNonCryptoID(), |
|
| 121 |
+ allowedBuildArgs: make(map[string]bool), |
|
| 122 |
+ } |
|
| 123 |
+ if dockerfile != nil {
|
|
| 124 |
+ b.dockerfile, err = parser.Parse(dockerfile) |
|
| 125 |
+ if err != nil {
|
|
| 126 |
+ return nil, err |
|
| 127 |
+ } |
|
| 128 |
+ } |
|
| 129 |
+ |
|
| 130 |
+ return b, nil |
|
| 131 |
+} |
|
| 132 |
+ |
|
| 133 |
+// Build runs the Dockerfile builder from a context and a docker object that allows to make calls |
|
| 134 |
+// to Docker. |
|
| 135 |
+// |
|
| 136 |
+// This will (barring errors): |
|
| 137 |
+// |
|
| 138 |
+// * read the dockerfile from context |
|
| 139 |
+// * parse the dockerfile if not already parsed |
|
| 140 |
+// * walk the AST and execute it by dispatching to handlers. If Remove |
|
| 141 |
+// or ForceRemove is set, additional cleanup around containers happens after |
|
| 142 |
+// processing. |
|
| 143 |
+// * Print a happy message and return the image ID. |
|
| 144 |
+// * NOT tag the image, that is responsibility of the caller. |
|
| 145 |
+// |
|
| 146 |
+func (b *Builder) Build() (string, error) {
|
|
| 147 |
+ // TODO: remove once b.docker.Commit can take a tag parameter. |
|
| 148 |
+ defer func() {
|
|
| 149 |
+ b.docker.Release(b.id, b.activeImages) |
|
| 150 |
+ }() |
|
| 151 |
+ |
|
| 152 |
+ // If Dockerfile was not parsed yet, extract it from the Context |
|
| 153 |
+ if b.dockerfile == nil {
|
|
| 154 |
+ if err := b.readDockerfile(); err != nil {
|
|
| 155 |
+ return "", err |
|
| 156 |
+ } |
|
| 157 |
+ } |
|
| 158 |
+ |
|
| 159 |
+ var shortImgID string |
|
| 160 |
+ for i, n := range b.dockerfile.Children {
|
|
| 161 |
+ select {
|
|
| 162 |
+ case <-b.cancelled: |
|
| 163 |
+ logrus.Debug("Builder: build cancelled!")
|
|
| 164 |
+ fmt.Fprintf(b.Stdout, "Build cancelled") |
|
| 165 |
+ return "", fmt.Errorf("Build cancelled")
|
|
| 166 |
+ default: |
|
| 167 |
+ // Not cancelled yet, keep going... |
|
| 168 |
+ } |
|
| 169 |
+ if err := b.dispatch(i, n); err != nil {
|
|
| 170 |
+ if b.ForceRemove {
|
|
| 171 |
+ b.clearTmp() |
|
| 172 |
+ } |
|
| 173 |
+ return "", err |
|
| 174 |
+ } |
|
| 175 |
+ shortImgID = stringid.TruncateID(b.image) |
|
| 176 |
+ fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID) |
|
| 177 |
+ if b.Remove {
|
|
| 178 |
+ b.clearTmp() |
|
| 179 |
+ } |
|
| 180 |
+ } |
|
| 181 |
+ |
|
| 182 |
+ // check if there are any leftover build-args that were passed but not |
|
| 183 |
+ // consumed during build. Return an error, if there are any. |
|
| 184 |
+ leftoverArgs := []string{}
|
|
| 185 |
+ for arg := range b.BuildArgs {
|
|
| 186 |
+ if !b.isBuildArgAllowed(arg) {
|
|
| 187 |
+ leftoverArgs = append(leftoverArgs, arg) |
|
| 188 |
+ } |
|
| 189 |
+ } |
|
| 190 |
+ if len(leftoverArgs) > 0 {
|
|
| 191 |
+ return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs)
|
|
| 192 |
+ } |
|
| 193 |
+ |
|
| 194 |
+ if b.image == "" {
|
|
| 195 |
+ return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
|
|
| 196 |
+ } |
|
| 197 |
+ |
|
| 198 |
+ fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) |
|
| 199 |
+ return b.image, nil |
|
| 200 |
+} |
|
| 201 |
+ |
|
| 202 |
+// Cancel cancels an ongoing Dockerfile build. |
|
| 203 |
+func (b *Builder) Cancel() {
|
|
| 204 |
+ b.cancelOnce.Do(func() {
|
|
| 205 |
+ close(b.cancelled) |
|
| 206 |
+ }) |
|
| 207 |
+} |
|
| 208 |
+ |
|
| 209 |
+// CommitConfig contains build configs for commit operation |
|
| 210 |
+type CommitConfig struct {
|
|
| 211 |
+ Pause bool |
|
| 212 |
+ Repo string |
|
| 213 |
+ Tag string |
|
| 214 |
+ Author string |
|
| 215 |
+ Comment string |
|
| 216 |
+ Changes []string |
|
| 217 |
+ Config *runconfig.Config |
|
| 218 |
+} |
|
| 219 |
+ |
|
| 220 |
+// BuildFromConfig will do build directly from parameter 'changes', which comes |
|
| 221 |
+// from Dockerfile entries, it will: |
|
| 222 |
+// - call parse.Parse() to get AST root from Dockerfile entries |
|
| 223 |
+// - do build by calling builder.dispatch() to call all entries' handling routines |
|
| 224 |
+// TODO: remove? |
|
| 225 |
+func BuildFromConfig(config *runconfig.Config, changes []string) (*runconfig.Config, error) {
|
|
| 226 |
+ ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) |
|
| 227 |
+ if err != nil {
|
|
| 228 |
+ return nil, err |
|
| 229 |
+ } |
|
| 230 |
+ |
|
| 231 |
+ // ensure that the commands are valid |
|
| 232 |
+ for _, n := range ast.Children {
|
|
| 233 |
+ if !validCommitCommands[n.Value] {
|
|
| 234 |
+ return nil, fmt.Errorf("%s is not a valid change command", n.Value)
|
|
| 235 |
+ } |
|
| 236 |
+ } |
|
| 237 |
+ |
|
| 238 |
+ b, err := NewBuilder(nil, nil, nil, nil) |
|
| 239 |
+ if err != nil {
|
|
| 240 |
+ return nil, err |
|
| 241 |
+ } |
|
| 242 |
+ b.runConfig = config |
|
| 243 |
+ b.Stdout = ioutil.Discard |
|
| 244 |
+ b.Stderr = ioutil.Discard |
|
| 245 |
+ b.disableCommit = true |
|
| 246 |
+ |
|
| 247 |
+ for i, n := range ast.Children {
|
|
| 248 |
+ if err := b.dispatch(i, n); err != nil {
|
|
| 249 |
+ return nil, err |
|
| 250 |
+ } |
|
| 251 |
+ } |
|
| 252 |
+ |
|
| 253 |
+ return b.runConfig, nil |
|
| 254 |
+} |
|
| 255 |
+ |
|
| 256 |
+// Commit will create a new image from a container's changes |
|
| 257 |
+// TODO: remove daemon, make Commit a method on *Builder ? |
|
| 258 |
+func Commit(container *daemon.Container, d *daemon.Daemon, c *CommitConfig) (string, error) {
|
|
| 259 |
+ // It is not possible to commit a running container on Windows |
|
| 260 |
+ if runtime.GOOS == "windows" && container.IsRunning() {
|
|
| 261 |
+ return "", fmt.Errorf("Windows does not support commit of a running container")
|
|
| 262 |
+ } |
|
| 263 |
+ |
|
| 264 |
+ if c.Config == nil {
|
|
| 265 |
+ c.Config = &runconfig.Config{}
|
|
| 266 |
+ } |
|
| 267 |
+ |
|
| 268 |
+ newConfig, err := BuildFromConfig(c.Config, c.Changes) |
|
| 269 |
+ if err != nil {
|
|
| 270 |
+ return "", err |
|
| 271 |
+ } |
|
| 272 |
+ |
|
| 273 |
+ if err := runconfig.Merge(newConfig, container.Config); err != nil {
|
|
| 274 |
+ return "", err |
|
| 275 |
+ } |
|
| 276 |
+ |
|
| 277 |
+ commitCfg := &daemon.ContainerCommitConfig{
|
|
| 278 |
+ Pause: c.Pause, |
|
| 279 |
+ Repo: c.Repo, |
|
| 280 |
+ Tag: c.Tag, |
|
| 281 |
+ Author: c.Author, |
|
| 282 |
+ Comment: c.Comment, |
|
| 283 |
+ Config: newConfig, |
|
| 284 |
+ } |
|
| 285 |
+ |
|
| 286 |
+ img, err := d.Commit(container, commitCfg) |
|
| 287 |
+ if err != nil {
|
|
| 288 |
+ return "", err |
|
| 289 |
+ } |
|
| 290 |
+ return img.ID, nil |
|
| 291 |
+} |
| ... | ... |
@@ -19,6 +19,7 @@ import ( |
| 19 | 19 |
|
| 20 | 20 |
"github.com/Sirupsen/logrus" |
| 21 | 21 |
derr "github.com/docker/docker/errors" |
| 22 |
+ "github.com/docker/docker/image" |
|
| 22 | 23 |
flag "github.com/docker/docker/pkg/mflag" |
| 23 | 24 |
"github.com/docker/docker/pkg/nat" |
| 24 | 25 |
"github.com/docker/docker/pkg/signal" |
| ... | ... |
@@ -34,7 +35,7 @@ const ( |
| 34 | 34 |
) |
| 35 | 35 |
|
| 36 | 36 |
// dispatch with no layer / parsing. This is effectively not a command. |
| 37 |
-func nullDispatch(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 37 |
+func nullDispatch(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 38 | 38 |
return nil |
| 39 | 39 |
} |
| 40 | 40 |
|
| ... | ... |
@@ -43,7 +44,7 @@ func nullDispatch(b *builder, args []string, attributes map[string]bool, origina |
| 43 | 43 |
// Sets the environment variable foo to bar, also makes interpolation |
| 44 | 44 |
// in the dockerfile available from the next statement on via ${foo}.
|
| 45 | 45 |
// |
| 46 |
-func env(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 46 |
+func env(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 47 | 47 |
if len(args) == 0 {
|
| 48 | 48 |
return derr.ErrorCodeAtLeastOneArg.WithArgs("ENV")
|
| 49 | 49 |
} |
| ... | ... |
@@ -53,7 +54,7 @@ func env(b *builder, args []string, attributes map[string]bool, original string) |
| 53 | 53 |
return derr.ErrorCodeTooManyArgs.WithArgs("ENV")
|
| 54 | 54 |
} |
| 55 | 55 |
|
| 56 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 56 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 57 | 57 |
return err |
| 58 | 58 |
} |
| 59 | 59 |
|
| ... | ... |
@@ -62,10 +63,10 @@ func env(b *builder, args []string, attributes map[string]bool, original string) |
| 62 | 62 |
// context of a builder command. Will remove once we actually add |
| 63 | 63 |
// a builder command to something! |
| 64 | 64 |
/* |
| 65 |
- flBool1 := b.BuilderFlags.AddBool("bool1", false)
|
|
| 66 |
- flStr1 := b.BuilderFlags.AddString("str1", "HI")
|
|
| 65 |
+ flBool1 := b.flags.AddBool("bool1", false)
|
|
| 66 |
+ flStr1 := b.flags.AddString("str1", "HI")
|
|
| 67 | 67 |
|
| 68 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 68 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 69 | 69 |
return err |
| 70 | 70 |
} |
| 71 | 71 |
|
| ... | ... |
@@ -82,44 +83,44 @@ func env(b *builder, args []string, attributes map[string]bool, original string) |
| 82 | 82 |
commitStr += " " + newVar |
| 83 | 83 |
|
| 84 | 84 |
gotOne := false |
| 85 |
- for i, envVar := range b.Config.Env {
|
|
| 85 |
+ for i, envVar := range b.runConfig.Env {
|
|
| 86 | 86 |
envParts := strings.SplitN(envVar, "=", 2) |
| 87 | 87 |
if envParts[0] == args[j] {
|
| 88 |
- b.Config.Env[i] = newVar |
|
| 88 |
+ b.runConfig.Env[i] = newVar |
|
| 89 | 89 |
gotOne = true |
| 90 | 90 |
break |
| 91 | 91 |
} |
| 92 | 92 |
} |
| 93 | 93 |
if !gotOne {
|
| 94 |
- b.Config.Env = append(b.Config.Env, newVar) |
|
| 94 |
+ b.runConfig.Env = append(b.runConfig.Env, newVar) |
|
| 95 | 95 |
} |
| 96 | 96 |
j++ |
| 97 | 97 |
} |
| 98 | 98 |
|
| 99 |
- return b.commit("", b.Config.Cmd, commitStr)
|
|
| 99 |
+ return b.commit("", b.runConfig.Cmd, commitStr)
|
|
| 100 | 100 |
} |
| 101 | 101 |
|
| 102 | 102 |
// MAINTAINER some text <maybe@an.email.address> |
| 103 | 103 |
// |
| 104 | 104 |
// Sets the maintainer metadata. |
| 105 |
-func maintainer(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 105 |
+func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 106 | 106 |
if len(args) != 1 {
|
| 107 | 107 |
return derr.ErrorCodeExactlyOneArg.WithArgs("MAINTAINER")
|
| 108 | 108 |
} |
| 109 | 109 |
|
| 110 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 110 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 111 | 111 |
return err |
| 112 | 112 |
} |
| 113 | 113 |
|
| 114 | 114 |
b.maintainer = args[0] |
| 115 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
|
|
| 115 |
+ return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer))
|
|
| 116 | 116 |
} |
| 117 | 117 |
|
| 118 | 118 |
// LABEL some json data describing the image |
| 119 | 119 |
// |
| 120 | 120 |
// Sets the Label variable foo to bar, |
| 121 | 121 |
// |
| 122 |
-func label(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 122 |
+func label(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 123 | 123 |
if len(args) == 0 {
|
| 124 | 124 |
return derr.ErrorCodeAtLeastOneArg.WithArgs("LABEL")
|
| 125 | 125 |
} |
| ... | ... |
@@ -128,14 +129,14 @@ func label(b *builder, args []string, attributes map[string]bool, original strin |
| 128 | 128 |
return derr.ErrorCodeTooManyArgs.WithArgs("LABEL")
|
| 129 | 129 |
} |
| 130 | 130 |
|
| 131 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 131 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 132 | 132 |
return err |
| 133 | 133 |
} |
| 134 | 134 |
|
| 135 | 135 |
commitStr := "LABEL" |
| 136 | 136 |
|
| 137 |
- if b.Config.Labels == nil {
|
|
| 138 |
- b.Config.Labels = map[string]string{}
|
|
| 137 |
+ if b.runConfig.Labels == nil {
|
|
| 138 |
+ b.runConfig.Labels = map[string]string{}
|
|
| 139 | 139 |
} |
| 140 | 140 |
|
| 141 | 141 |
for j := 0; j < len(args); j++ {
|
| ... | ... |
@@ -144,10 +145,10 @@ func label(b *builder, args []string, attributes map[string]bool, original strin |
| 144 | 144 |
newVar := args[j] + "=" + args[j+1] + "" |
| 145 | 145 |
commitStr += " " + newVar |
| 146 | 146 |
|
| 147 |
- b.Config.Labels[args[j]] = args[j+1] |
|
| 147 |
+ b.runConfig.Labels[args[j]] = args[j+1] |
|
| 148 | 148 |
j++ |
| 149 | 149 |
} |
| 150 |
- return b.commit("", b.Config.Cmd, commitStr)
|
|
| 150 |
+ return b.commit("", b.runConfig.Cmd, commitStr)
|
|
| 151 | 151 |
} |
| 152 | 152 |
|
| 153 | 153 |
// ADD foo /path |
| ... | ... |
@@ -155,12 +156,12 @@ func label(b *builder, args []string, attributes map[string]bool, original strin |
| 155 | 155 |
// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling |
| 156 | 156 |
// exist here. If you do not wish to have this automatic handling, use COPY. |
| 157 | 157 |
// |
| 158 |
-func add(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 158 |
+func add(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 159 | 159 |
if len(args) < 2 {
|
| 160 | 160 |
return derr.ErrorCodeAtLeastTwoArgs.WithArgs("ADD")
|
| 161 | 161 |
} |
| 162 | 162 |
|
| 163 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 163 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 164 | 164 |
return err |
| 165 | 165 |
} |
| 166 | 166 |
|
| ... | ... |
@@ -171,12 +172,12 @@ func add(b *builder, args []string, attributes map[string]bool, original string) |
| 171 | 171 |
// |
| 172 | 172 |
// Same as 'ADD' but without the tar and remote url handling. |
| 173 | 173 |
// |
| 174 |
-func dispatchCopy(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 174 |
+func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 175 | 175 |
if len(args) < 2 {
|
| 176 | 176 |
return derr.ErrorCodeAtLeastTwoArgs.WithArgs("COPY")
|
| 177 | 177 |
} |
| 178 | 178 |
|
| 179 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 179 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 180 | 180 |
return err |
| 181 | 181 |
} |
| 182 | 182 |
|
| ... | ... |
@@ -187,12 +188,12 @@ func dispatchCopy(b *builder, args []string, attributes map[string]bool, origina |
| 187 | 187 |
// |
| 188 | 188 |
// This sets the image the dockerfile will build on top of. |
| 189 | 189 |
// |
| 190 |
-func from(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 190 |
+func from(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 191 | 191 |
if len(args) != 1 {
|
| 192 | 192 |
return derr.ErrorCodeExactlyOneArg.WithArgs("FROM")
|
| 193 | 193 |
} |
| 194 | 194 |
|
| 195 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 195 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 196 | 196 |
return err |
| 197 | 197 |
} |
| 198 | 198 |
|
| ... | ... |
@@ -208,25 +209,21 @@ func from(b *builder, args []string, attributes map[string]bool, original string |
| 208 | 208 |
return nil |
| 209 | 209 |
} |
| 210 | 210 |
|
| 211 |
- image, err := b.Daemon.Repositories().LookupImage(name) |
|
| 212 |
- if b.Pull {
|
|
| 213 |
- image, err = b.pullImage(name) |
|
| 214 |
- if err != nil {
|
|
| 215 |
- return err |
|
| 216 |
- } |
|
| 211 |
+ var ( |
|
| 212 |
+ image *image.Image |
|
| 213 |
+ err error |
|
| 214 |
+ ) |
|
| 215 |
+ // TODO: don't use `name`, instead resolve it to a digest |
|
| 216 |
+ if !b.Pull {
|
|
| 217 |
+ image, err = b.docker.LookupImage(name) |
|
| 218 |
+ // TODO: shouldn't we error out if error is different from "not found" ? |
|
| 217 | 219 |
} |
| 218 |
- if err != nil {
|
|
| 219 |
- if b.Daemon.Graph().IsNotExist(err, name) {
|
|
| 220 |
- image, err = b.pullImage(name) |
|
| 221 |
- } |
|
| 222 |
- |
|
| 223 |
- // note that the top level err will still be !nil here if IsNotExist is |
|
| 224 |
- // not the error. This approach just simplifies the logic a bit. |
|
| 220 |
+ if image == nil {
|
|
| 221 |
+ image, err = b.docker.Pull(name) |
|
| 225 | 222 |
if err != nil {
|
| 226 | 223 |
return err |
| 227 | 224 |
} |
| 228 | 225 |
} |
| 229 |
- |
|
| 230 | 226 |
return b.processImageFrom(image) |
| 231 | 227 |
} |
| 232 | 228 |
|
| ... | ... |
@@ -239,12 +236,12 @@ func from(b *builder, args []string, attributes map[string]bool, original string |
| 239 | 239 |
// special cases. search for 'OnBuild' in internals.go for additional special |
| 240 | 240 |
// cases. |
| 241 | 241 |
// |
| 242 |
-func onbuild(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 242 |
+func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 243 | 243 |
if len(args) == 0 {
|
| 244 | 244 |
return derr.ErrorCodeAtLeastOneArg.WithArgs("ONBUILD")
|
| 245 | 245 |
} |
| 246 | 246 |
|
| 247 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 247 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 248 | 248 |
return err |
| 249 | 249 |
} |
| 250 | 250 |
|
| ... | ... |
@@ -258,20 +255,20 @@ func onbuild(b *builder, args []string, attributes map[string]bool, original str |
| 258 | 258 |
|
| 259 | 259 |
original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") |
| 260 | 260 |
|
| 261 |
- b.Config.OnBuild = append(b.Config.OnBuild, original) |
|
| 262 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("ONBUILD %s", original))
|
|
| 261 |
+ b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) |
|
| 262 |
+ return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original))
|
|
| 263 | 263 |
} |
| 264 | 264 |
|
| 265 | 265 |
// WORKDIR /tmp |
| 266 | 266 |
// |
| 267 | 267 |
// Set the working directory for future RUN/CMD/etc statements. |
| 268 | 268 |
// |
| 269 |
-func workdir(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 269 |
+func workdir(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 270 | 270 |
if len(args) != 1 {
|
| 271 | 271 |
return derr.ErrorCodeExactlyOneArg.WithArgs("WORKDIR")
|
| 272 | 272 |
} |
| 273 | 273 |
|
| 274 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 274 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 275 | 275 |
return err |
| 276 | 276 |
} |
| 277 | 277 |
|
| ... | ... |
@@ -280,13 +277,13 @@ func workdir(b *builder, args []string, attributes map[string]bool, original str |
| 280 | 280 |
workdir := filepath.FromSlash(args[0]) |
| 281 | 281 |
|
| 282 | 282 |
if !system.IsAbs(workdir) {
|
| 283 |
- current := filepath.FromSlash(b.Config.WorkingDir) |
|
| 283 |
+ current := filepath.FromSlash(b.runConfig.WorkingDir) |
|
| 284 | 284 |
workdir = filepath.Join(string(os.PathSeparator), current, workdir) |
| 285 | 285 |
} |
| 286 | 286 |
|
| 287 |
- b.Config.WorkingDir = workdir |
|
| 287 |
+ b.runConfig.WorkingDir = workdir |
|
| 288 | 288 |
|
| 289 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
|
| 289 |
+ return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
|
| 290 | 290 |
} |
| 291 | 291 |
|
| 292 | 292 |
// RUN some command yo |
| ... | ... |
@@ -299,12 +296,12 @@ func workdir(b *builder, args []string, attributes map[string]bool, original str |
| 299 | 299 |
// RUN echo hi # cmd /S /C echo hi (Windows) |
| 300 | 300 |
// RUN [ "echo", "hi" ] # echo hi |
| 301 | 301 |
// |
| 302 |
-func run(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 302 |
+func run(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 303 | 303 |
if b.image == "" && !b.noBaseImage {
|
| 304 | 304 |
return derr.ErrorCodeMissingFrom |
| 305 | 305 |
} |
| 306 | 306 |
|
| 307 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 307 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 308 | 308 |
return err |
| 309 | 309 |
} |
| 310 | 310 |
|
| ... | ... |
@@ -328,13 +325,13 @@ func run(b *builder, args []string, attributes map[string]bool, original string) |
| 328 | 328 |
} |
| 329 | 329 |
|
| 330 | 330 |
// stash the cmd |
| 331 |
- cmd := b.Config.Cmd |
|
| 332 |
- runconfig.Merge(b.Config, config) |
|
| 331 |
+ cmd := b.runConfig.Cmd |
|
| 332 |
+ runconfig.Merge(b.runConfig, config) |
|
| 333 | 333 |
// stash the config environment |
| 334 |
- env := b.Config.Env |
|
| 334 |
+ env := b.runConfig.Env |
|
| 335 | 335 |
|
| 336 |
- defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
|
| 337 |
- defer func(env []string) { b.Config.Env = env }(env)
|
|
| 336 |
+ defer func(cmd *stringutils.StrSlice) { b.runConfig.Cmd = cmd }(cmd)
|
|
| 337 |
+ defer func(env []string) { b.runConfig.Env = env }(env)
|
|
| 338 | 338 |
|
| 339 | 339 |
// derive the net build-time environment for this run. We let config |
| 340 | 340 |
// environment override the build time environment. |
| ... | ... |
@@ -350,8 +347,8 @@ func run(b *builder, args []string, attributes map[string]bool, original string) |
| 350 | 350 |
// of RUN, without leaking it to the final image. It also aids cache |
| 351 | 351 |
// lookup for same image built with same build time environment. |
| 352 | 352 |
cmdBuildEnv := []string{}
|
| 353 |
- configEnv := runconfig.ConvertKVStringsToMap(b.Config.Env) |
|
| 354 |
- for key, val := range b.buildArgs {
|
|
| 353 |
+ configEnv := runconfig.ConvertKVStringsToMap(b.runConfig.Env) |
|
| 354 |
+ for key, val := range b.BuildArgs {
|
|
| 355 | 355 |
if !b.isBuildArgAllowed(key) {
|
| 356 | 356 |
// skip build-args that are not in allowed list, meaning they have |
| 357 | 357 |
// not been defined by an "ARG" Dockerfile command yet. |
| ... | ... |
@@ -379,7 +376,7 @@ func run(b *builder, args []string, attributes map[string]bool, original string) |
| 379 | 379 |
saveCmd = stringutils.NewStrSlice(append(tmpEnv, saveCmd.Slice()...)...) |
| 380 | 380 |
} |
| 381 | 381 |
|
| 382 |
- b.Config.Cmd = saveCmd |
|
| 382 |
+ b.runConfig.Cmd = saveCmd |
|
| 383 | 383 |
hit, err := b.probeCache() |
| 384 | 384 |
if err != nil {
|
| 385 | 385 |
return err |
| ... | ... |
@@ -389,11 +386,11 @@ func run(b *builder, args []string, attributes map[string]bool, original string) |
| 389 | 389 |
} |
| 390 | 390 |
|
| 391 | 391 |
// set Cmd manually, this is special case only for Dockerfiles |
| 392 |
- b.Config.Cmd = config.Cmd |
|
| 392 |
+ b.runConfig.Cmd = config.Cmd |
|
| 393 | 393 |
// set build-time environment for 'run'. |
| 394 |
- b.Config.Env = append(b.Config.Env, cmdBuildEnv...) |
|
| 394 |
+ b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) |
|
| 395 | 395 |
|
| 396 |
- logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd)
|
|
| 396 |
+ logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd)
|
|
| 397 | 397 |
|
| 398 | 398 |
c, err := b.create() |
| 399 | 399 |
if err != nil {
|
| ... | ... |
@@ -413,8 +410,8 @@ func run(b *builder, args []string, attributes map[string]bool, original string) |
| 413 | 413 |
// revert to original config environment and set the command string to |
| 414 | 414 |
// have the build-time env vars in it (if any) so that future cache look-ups |
| 415 | 415 |
// properly match it. |
| 416 |
- b.Config.Env = env |
|
| 417 |
- b.Config.Cmd = saveCmd |
|
| 416 |
+ b.runConfig.Env = env |
|
| 417 |
+ b.runConfig.Cmd = saveCmd |
|
| 418 | 418 |
if err := b.commit(c.ID, cmd, "run"); err != nil {
|
| 419 | 419 |
return err |
| 420 | 420 |
} |
| ... | ... |
@@ -427,8 +424,8 @@ func run(b *builder, args []string, attributes map[string]bool, original string) |
| 427 | 427 |
// Set the default command to run in the container (which may be empty). |
| 428 | 428 |
// Argument handling is the same as RUN. |
| 429 | 429 |
// |
| 430 |
-func cmd(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 431 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 430 |
+func cmd(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 431 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 432 | 432 |
return err |
| 433 | 433 |
} |
| 434 | 434 |
|
| ... | ... |
@@ -442,9 +439,9 @@ func cmd(b *builder, args []string, attributes map[string]bool, original string) |
| 442 | 442 |
} |
| 443 | 443 |
} |
| 444 | 444 |
|
| 445 |
- b.Config.Cmd = stringutils.NewStrSlice(cmdSlice...) |
|
| 445 |
+ b.runConfig.Cmd = stringutils.NewStrSlice(cmdSlice...) |
|
| 446 | 446 |
|
| 447 |
- if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
|
|
| 447 |
+ if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
|
|
| 448 | 448 |
return err |
| 449 | 449 |
} |
| 450 | 450 |
|
| ... | ... |
@@ -460,11 +457,11 @@ func cmd(b *builder, args []string, attributes map[string]bool, original string) |
| 460 | 460 |
// Set the entrypoint (which defaults to sh -c on linux, or cmd /S /C on Windows) to |
| 461 | 461 |
// /usr/sbin/nginx. Will accept the CMD as the arguments to /usr/sbin/nginx. |
| 462 | 462 |
// |
| 463 |
-// Handles command processing similar to CMD and RUN, only b.Config.Entrypoint |
|
| 463 |
+// Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint |
|
| 464 | 464 |
// is initialized at NewBuilder time instead of through argument parsing. |
| 465 | 465 |
// |
| 466 |
-func entrypoint(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 467 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 466 |
+func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 467 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 468 | 468 |
return err |
| 469 | 469 |
} |
| 470 | 470 |
|
| ... | ... |
@@ -473,26 +470,26 @@ func entrypoint(b *builder, args []string, attributes map[string]bool, original |
| 473 | 473 |
switch {
|
| 474 | 474 |
case attributes["json"]: |
| 475 | 475 |
// ENTRYPOINT ["echo", "hi"] |
| 476 |
- b.Config.Entrypoint = stringutils.NewStrSlice(parsed...) |
|
| 476 |
+ b.runConfig.Entrypoint = stringutils.NewStrSlice(parsed...) |
|
| 477 | 477 |
case len(parsed) == 0: |
| 478 | 478 |
// ENTRYPOINT [] |
| 479 |
- b.Config.Entrypoint = nil |
|
| 479 |
+ b.runConfig.Entrypoint = nil |
|
| 480 | 480 |
default: |
| 481 | 481 |
// ENTRYPOINT echo hi |
| 482 | 482 |
if runtime.GOOS != "windows" {
|
| 483 |
- b.Config.Entrypoint = stringutils.NewStrSlice("/bin/sh", "-c", parsed[0])
|
|
| 483 |
+ b.runConfig.Entrypoint = stringutils.NewStrSlice("/bin/sh", "-c", parsed[0])
|
|
| 484 | 484 |
} else {
|
| 485 |
- b.Config.Entrypoint = stringutils.NewStrSlice("cmd", "/S", "/C", parsed[0])
|
|
| 485 |
+ b.runConfig.Entrypoint = stringutils.NewStrSlice("cmd", "/S /C", parsed[0])
|
|
| 486 | 486 |
} |
| 487 | 487 |
} |
| 488 | 488 |
|
| 489 | 489 |
// when setting the entrypoint if a CMD was not explicitly set then |
| 490 | 490 |
// set the command to nil |
| 491 | 491 |
if !b.cmdSet {
|
| 492 |
- b.Config.Cmd = nil |
|
| 492 |
+ b.runConfig.Cmd = nil |
|
| 493 | 493 |
} |
| 494 | 494 |
|
| 495 |
- if err := b.commit("", b.Config.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.Config.Entrypoint)); err != nil {
|
|
| 495 |
+ if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil {
|
|
| 496 | 496 |
return err |
| 497 | 497 |
} |
| 498 | 498 |
|
| ... | ... |
@@ -502,21 +499,21 @@ func entrypoint(b *builder, args []string, attributes map[string]bool, original |
| 502 | 502 |
// EXPOSE 6667/tcp 7000/tcp |
| 503 | 503 |
// |
| 504 | 504 |
// Expose ports for links and port mappings. This all ends up in |
| 505 |
-// b.Config.ExposedPorts for runconfig. |
|
| 505 |
+// b.runConfig.ExposedPorts for runconfig. |
|
| 506 | 506 |
// |
| 507 |
-func expose(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 507 |
+func expose(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 508 | 508 |
portsTab := args |
| 509 | 509 |
|
| 510 | 510 |
if len(args) == 0 {
|
| 511 | 511 |
return derr.ErrorCodeAtLeastOneArg.WithArgs("EXPOSE")
|
| 512 | 512 |
} |
| 513 | 513 |
|
| 514 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 514 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 515 | 515 |
return err |
| 516 | 516 |
} |
| 517 | 517 |
|
| 518 |
- if b.Config.ExposedPorts == nil {
|
|
| 519 |
- b.Config.ExposedPorts = make(nat.PortSet) |
|
| 518 |
+ if b.runConfig.ExposedPorts == nil {
|
|
| 519 |
+ b.runConfig.ExposedPorts = make(nat.PortSet) |
|
| 520 | 520 |
} |
| 521 | 521 |
|
| 522 | 522 |
ports, _, err := nat.ParsePortSpecs(portsTab) |
| ... | ... |
@@ -530,14 +527,14 @@ func expose(b *builder, args []string, attributes map[string]bool, original stri |
| 530 | 530 |
portList := make([]string, len(ports)) |
| 531 | 531 |
var i int |
| 532 | 532 |
for port := range ports {
|
| 533 |
- if _, exists := b.Config.ExposedPorts[port]; !exists {
|
|
| 534 |
- b.Config.ExposedPorts[port] = struct{}{}
|
|
| 533 |
+ if _, exists := b.runConfig.ExposedPorts[port]; !exists {
|
|
| 534 |
+ b.runConfig.ExposedPorts[port] = struct{}{}
|
|
| 535 | 535 |
} |
| 536 | 536 |
portList[i] = string(port) |
| 537 | 537 |
i++ |
| 538 | 538 |
} |
| 539 | 539 |
sort.Strings(portList) |
| 540 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
|
|
| 540 |
+ return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " ")))
|
|
| 541 | 541 |
} |
| 542 | 542 |
|
| 543 | 543 |
// USER foo |
| ... | ... |
@@ -545,43 +542,43 @@ func expose(b *builder, args []string, attributes map[string]bool, original stri |
| 545 | 545 |
// Set the user to 'foo' for future commands and when running the |
| 546 | 546 |
// ENTRYPOINT/CMD at container run time. |
| 547 | 547 |
// |
| 548 |
-func user(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 548 |
+func user(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 549 | 549 |
if len(args) != 1 {
|
| 550 | 550 |
return derr.ErrorCodeExactlyOneArg.WithArgs("USER")
|
| 551 | 551 |
} |
| 552 | 552 |
|
| 553 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 553 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 554 | 554 |
return err |
| 555 | 555 |
} |
| 556 | 556 |
|
| 557 |
- b.Config.User = args[0] |
|
| 558 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("USER %v", args))
|
|
| 557 |
+ b.runConfig.User = args[0] |
|
| 558 |
+ return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args))
|
|
| 559 | 559 |
} |
| 560 | 560 |
|
| 561 | 561 |
// VOLUME /foo |
| 562 | 562 |
// |
| 563 | 563 |
// Expose the volume /foo for use. Will also accept the JSON array form. |
| 564 | 564 |
// |
| 565 |
-func volume(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 565 |
+func volume(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 566 | 566 |
if len(args) == 0 {
|
| 567 | 567 |
return derr.ErrorCodeAtLeastOneArg.WithArgs("VOLUME")
|
| 568 | 568 |
} |
| 569 | 569 |
|
| 570 |
- if err := b.BuilderFlags.Parse(); err != nil {
|
|
| 570 |
+ if err := b.flags.Parse(); err != nil {
|
|
| 571 | 571 |
return err |
| 572 | 572 |
} |
| 573 | 573 |
|
| 574 |
- if b.Config.Volumes == nil {
|
|
| 575 |
- b.Config.Volumes = map[string]struct{}{}
|
|
| 574 |
+ if b.runConfig.Volumes == nil {
|
|
| 575 |
+ b.runConfig.Volumes = map[string]struct{}{}
|
|
| 576 | 576 |
} |
| 577 | 577 |
for _, v := range args {
|
| 578 | 578 |
v = strings.TrimSpace(v) |
| 579 | 579 |
if v == "" {
|
| 580 | 580 |
return derr.ErrorCodeVolumeEmpty |
| 581 | 581 |
} |
| 582 |
- b.Config.Volumes[v] = struct{}{}
|
|
| 582 |
+ b.runConfig.Volumes[v] = struct{}{}
|
|
| 583 | 583 |
} |
| 584 |
- if err := b.commit("", b.Config.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
|
|
| 584 |
+ if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil {
|
|
| 585 | 585 |
return err |
| 586 | 586 |
} |
| 587 | 587 |
return nil |
| ... | ... |
@@ -590,7 +587,7 @@ func volume(b *builder, args []string, attributes map[string]bool, original stri |
| 590 | 590 |
// STOPSIGNAL signal |
| 591 | 591 |
// |
| 592 | 592 |
// Set the signal that will be used to kill the container. |
| 593 |
-func stopSignal(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 593 |
+func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 594 | 594 |
if len(args) != 1 {
|
| 595 | 595 |
return fmt.Errorf("STOPSIGNAL requires exactly one argument")
|
| 596 | 596 |
} |
| ... | ... |
@@ -601,8 +598,8 @@ func stopSignal(b *builder, args []string, attributes map[string]bool, original |
| 601 | 601 |
return err |
| 602 | 602 |
} |
| 603 | 603 |
|
| 604 |
- b.Config.StopSignal = sig |
|
| 605 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
|
|
| 604 |
+ b.runConfig.StopSignal = sig |
|
| 605 |
+ return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args))
|
|
| 606 | 606 |
} |
| 607 | 607 |
|
| 608 | 608 |
// ARG name[=value] |
| ... | ... |
@@ -610,7 +607,7 @@ func stopSignal(b *builder, args []string, attributes map[string]bool, original |
| 610 | 610 |
// Adds the variable foo to the trusted list of variables that can be passed |
| 611 | 611 |
// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. |
| 612 | 612 |
// Dockerfile author may optionally set a default value of this variable. |
| 613 |
-func arg(b *builder, args []string, attributes map[string]bool, original string) error {
|
|
| 613 |
+func arg(b *Builder, args []string, attributes map[string]bool, original string) error {
|
|
| 614 | 614 |
if len(args) != 1 {
|
| 615 | 615 |
return fmt.Errorf("ARG requires exactly one argument definition")
|
| 616 | 616 |
} |
| ... | ... |
@@ -642,9 +639,9 @@ func arg(b *builder, args []string, attributes map[string]bool, original string) |
| 642 | 642 |
// If there is a default value associated with this arg then add it to the |
| 643 | 643 |
// b.buildArgs if one is not already passed to the builder. The args passed |
| 644 | 644 |
// to builder override the defaut value of 'arg'. |
| 645 |
- if _, ok := b.buildArgs[name]; !ok && hasDefault {
|
|
| 646 |
- b.buildArgs[name] = value |
|
| 645 |
+ if _, ok := b.BuildArgs[name]; !ok && hasDefault {
|
|
| 646 |
+ b.BuildArgs[name] = value |
|
| 647 | 647 |
} |
| 648 | 648 |
|
| 649 |
- return b.commit("", b.Config.Cmd, fmt.Sprintf("ARG %s", arg))
|
|
| 649 |
+ return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg))
|
|
| 650 | 650 |
} |
| ... | ... |
@@ -21,26 +21,11 @@ package dockerfile |
| 21 | 21 |
|
| 22 | 22 |
import ( |
| 23 | 23 |
"fmt" |
| 24 |
- "io" |
|
| 25 |
- "os" |
|
| 26 |
- "path/filepath" |
|
| 27 | 24 |
"runtime" |
| 28 | 25 |
"strings" |
| 29 | 26 |
|
| 30 |
- "github.com/Sirupsen/logrus" |
|
| 31 |
- "github.com/docker/docker/api" |
|
| 32 | 27 |
"github.com/docker/docker/builder/dockerfile/command" |
| 33 | 28 |
"github.com/docker/docker/builder/dockerfile/parser" |
| 34 |
- "github.com/docker/docker/cliconfig" |
|
| 35 |
- "github.com/docker/docker/daemon" |
|
| 36 |
- "github.com/docker/docker/pkg/fileutils" |
|
| 37 |
- "github.com/docker/docker/pkg/streamformatter" |
|
| 38 |
- "github.com/docker/docker/pkg/stringid" |
|
| 39 |
- "github.com/docker/docker/pkg/symlink" |
|
| 40 |
- "github.com/docker/docker/pkg/tarsum" |
|
| 41 |
- "github.com/docker/docker/pkg/ulimit" |
|
| 42 |
- "github.com/docker/docker/runconfig" |
|
| 43 |
- "github.com/docker/docker/utils" |
|
| 44 | 29 |
) |
| 45 | 30 |
|
| 46 | 31 |
// Environment variable interpolation will happen on these statements only. |
| ... | ... |
@@ -57,10 +42,10 @@ var replaceEnvAllowed = map[string]struct{}{
|
| 57 | 57 |
command.Arg: {},
|
| 58 | 58 |
} |
| 59 | 59 |
|
| 60 |
-var evaluateTable map[string]func(*builder, []string, map[string]bool, string) error |
|
| 60 |
+var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error |
|
| 61 | 61 |
|
| 62 | 62 |
func init() {
|
| 63 |
- evaluateTable = map[string]func(*builder, []string, map[string]bool, string) error{
|
|
| 63 |
+ evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{
|
|
| 64 | 64 |
command.Env: env, |
| 65 | 65 |
command.Label: label, |
| 66 | 66 |
command.Maintainer: maintainer, |
| ... | ... |
@@ -80,223 +65,6 @@ func init() {
|
| 80 | 80 |
} |
| 81 | 81 |
} |
| 82 | 82 |
|
| 83 |
-// builder is an internal struct, used to maintain configuration of the Dockerfile's |
|
| 84 |
-// processing as it evaluates the parsing result. |
|
| 85 |
-type builder struct {
|
|
| 86 |
- Daemon *daemon.Daemon |
|
| 87 |
- |
|
| 88 |
- // effectively stdio for the run. Because it is not stdio, I said |
|
| 89 |
- // "Effectively". Do not use stdio anywhere in this package for any reason. |
|
| 90 |
- OutStream io.Writer |
|
| 91 |
- ErrStream io.Writer |
|
| 92 |
- |
|
| 93 |
- Verbose bool |
|
| 94 |
- UtilizeCache bool |
|
| 95 |
- cacheBusted bool |
|
| 96 |
- |
|
| 97 |
- // controls how images and containers are handled between steps. |
|
| 98 |
- Remove bool |
|
| 99 |
- ForceRemove bool |
|
| 100 |
- Pull bool |
|
| 101 |
- |
|
| 102 |
- // set this to true if we want the builder to not commit between steps. |
|
| 103 |
- // This is useful when we only want to use the evaluator table to generate |
|
| 104 |
- // the final configs of the Dockerfile but dont want the layers |
|
| 105 |
- disableCommit bool |
|
| 106 |
- |
|
| 107 |
- // Registry server auth configs used to pull images when handling `FROM`. |
|
| 108 |
- AuthConfigs map[string]cliconfig.AuthConfig |
|
| 109 |
- |
|
| 110 |
- // Deprecated, original writer used for ImagePull. To be removed. |
|
| 111 |
- OutOld io.Writer |
|
| 112 |
- StreamFormatter *streamformatter.StreamFormatter |
|
| 113 |
- |
|
| 114 |
- Config *runconfig.Config // runconfig for cmd, run, entrypoint etc. |
|
| 115 |
- |
|
| 116 |
- buildArgs map[string]string // build-time args received in build context for expansion/substitution and commands in 'run'. |
|
| 117 |
- allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. |
|
| 118 |
- |
|
| 119 |
- // both of these are controlled by the Remove and ForceRemove options in BuildOpts |
|
| 120 |
- TmpContainers map[string]struct{} // a map of containers used for removes
|
|
| 121 |
- |
|
| 122 |
- dockerfileName string // name of Dockerfile |
|
| 123 |
- dockerfile *parser.Node // the syntax tree of the dockerfile |
|
| 124 |
- image string // image name for commit processing |
|
| 125 |
- maintainer string // maintainer name. could probably be removed. |
|
| 126 |
- cmdSet bool // indicates is CMD was set in current Dockerfile |
|
| 127 |
- BuilderFlags *BFlags // current cmd's BuilderFlags - temporary |
|
| 128 |
- context tarsum.TarSum // the context is a tarball that is uploaded by the client |
|
| 129 |
- contextPath string // the path of the temporary directory the local context is unpacked to (server side) |
|
| 130 |
- noBaseImage bool // indicates that this build does not start from any base image, but is being built from an empty file system. |
|
| 131 |
- |
|
| 132 |
- // Set resource restrictions for build containers |
|
| 133 |
- cpuSetCpus string |
|
| 134 |
- cpuSetMems string |
|
| 135 |
- cpuShares int64 |
|
| 136 |
- cpuPeriod int64 |
|
| 137 |
- cpuQuota int64 |
|
| 138 |
- cgroupParent string |
|
| 139 |
- memory int64 |
|
| 140 |
- memorySwap int64 |
|
| 141 |
- ulimits []*ulimit.Ulimit |
|
| 142 |
- |
|
| 143 |
- cancelled <-chan struct{} // When closed, job was cancelled.
|
|
| 144 |
- |
|
| 145 |
- activeImages []string |
|
| 146 |
- id string // Used to hold reference images |
|
| 147 |
-} |
|
| 148 |
- |
|
| 149 |
-// Run the builder with the context. This is the lynchpin of this package. This |
|
| 150 |
-// will (barring errors): |
|
| 151 |
-// |
|
| 152 |
-// * call readContext() which will set up the temporary directory and unpack |
|
| 153 |
-// the context into it. |
|
| 154 |
-// * read the dockerfile |
|
| 155 |
-// * parse the dockerfile |
|
| 156 |
-// * walk the parse tree and execute it by dispatching to handlers. If Remove |
|
| 157 |
-// or ForceRemove is set, additional cleanup around containers happens after |
|
| 158 |
-// processing. |
|
| 159 |
-// * Print a happy message and return the image ID. |
|
| 160 |
-// |
|
| 161 |
-func (b *builder) Run(context io.Reader) (string, error) {
|
|
| 162 |
- if err := b.readContext(context); err != nil {
|
|
| 163 |
- return "", err |
|
| 164 |
- } |
|
| 165 |
- |
|
| 166 |
- defer func() {
|
|
| 167 |
- if err := os.RemoveAll(b.contextPath); err != nil {
|
|
| 168 |
- logrus.Debugf("[BUILDER] failed to remove temporary context: %s", err)
|
|
| 169 |
- } |
|
| 170 |
- }() |
|
| 171 |
- |
|
| 172 |
- if err := b.readDockerfile(); err != nil {
|
|
| 173 |
- return "", err |
|
| 174 |
- } |
|
| 175 |
- |
|
| 176 |
- // some initializations that would not have been supplied by the caller. |
|
| 177 |
- b.Config = &runconfig.Config{}
|
|
| 178 |
- |
|
| 179 |
- b.TmpContainers = map[string]struct{}{}
|
|
| 180 |
- |
|
| 181 |
- for i, n := range b.dockerfile.Children {
|
|
| 182 |
- select {
|
|
| 183 |
- case <-b.cancelled: |
|
| 184 |
- logrus.Debug("Builder: build cancelled!")
|
|
| 185 |
- fmt.Fprintf(b.OutStream, "Build cancelled") |
|
| 186 |
- return "", fmt.Errorf("Build cancelled")
|
|
| 187 |
- default: |
|
| 188 |
- // Not cancelled yet, keep going... |
|
| 189 |
- } |
|
| 190 |
- if err := b.dispatch(i, n); err != nil {
|
|
| 191 |
- if b.ForceRemove {
|
|
| 192 |
- b.clearTmp() |
|
| 193 |
- } |
|
| 194 |
- return "", err |
|
| 195 |
- } |
|
| 196 |
- fmt.Fprintf(b.OutStream, " ---> %s\n", stringid.TruncateID(b.image)) |
|
| 197 |
- if b.Remove {
|
|
| 198 |
- b.clearTmp() |
|
| 199 |
- } |
|
| 200 |
- } |
|
| 201 |
- |
|
| 202 |
- // check if there are any leftover build-args that were passed but not |
|
| 203 |
- // consumed during build. Return an error, if there are any. |
|
| 204 |
- leftoverArgs := []string{}
|
|
| 205 |
- for arg := range b.buildArgs {
|
|
| 206 |
- if !b.isBuildArgAllowed(arg) {
|
|
| 207 |
- leftoverArgs = append(leftoverArgs, arg) |
|
| 208 |
- } |
|
| 209 |
- } |
|
| 210 |
- if len(leftoverArgs) > 0 {
|
|
| 211 |
- return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs)
|
|
| 212 |
- } |
|
| 213 |
- |
|
| 214 |
- if b.image == "" {
|
|
| 215 |
- return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
|
|
| 216 |
- } |
|
| 217 |
- |
|
| 218 |
- fmt.Fprintf(b.OutStream, "Successfully built %s\n", stringid.TruncateID(b.image)) |
|
| 219 |
- return b.image, nil |
|
| 220 |
-} |
|
| 221 |
- |
|
| 222 |
-// Reads a Dockerfile from the current context. It assumes that the |
|
| 223 |
-// 'filename' is a relative path from the root of the context |
|
| 224 |
-func (b *builder) readDockerfile() error {
|
|
| 225 |
- // If no -f was specified then look for 'Dockerfile'. If we can't find |
|
| 226 |
- // that then look for 'dockerfile'. If neither are found then default |
|
| 227 |
- // back to 'Dockerfile' and use that in the error message. |
|
| 228 |
- if b.dockerfileName == "" {
|
|
| 229 |
- b.dockerfileName = api.DefaultDockerfileName |
|
| 230 |
- tmpFN := filepath.Join(b.contextPath, api.DefaultDockerfileName) |
|
| 231 |
- if _, err := os.Lstat(tmpFN); err != nil {
|
|
| 232 |
- tmpFN = filepath.Join(b.contextPath, strings.ToLower(api.DefaultDockerfileName)) |
|
| 233 |
- if _, err := os.Lstat(tmpFN); err == nil {
|
|
| 234 |
- b.dockerfileName = strings.ToLower(api.DefaultDockerfileName) |
|
| 235 |
- } |
|
| 236 |
- } |
|
| 237 |
- } |
|
| 238 |
- |
|
| 239 |
- origFile := b.dockerfileName |
|
| 240 |
- |
|
| 241 |
- filename, err := symlink.FollowSymlinkInScope(filepath.Join(b.contextPath, origFile), b.contextPath) |
|
| 242 |
- if err != nil {
|
|
| 243 |
- return fmt.Errorf("The Dockerfile (%s) must be within the build context", origFile)
|
|
| 244 |
- } |
|
| 245 |
- |
|
| 246 |
- fi, err := os.Lstat(filename) |
|
| 247 |
- if os.IsNotExist(err) {
|
|
| 248 |
- return fmt.Errorf("Cannot locate specified Dockerfile: %s", origFile)
|
|
| 249 |
- } |
|
| 250 |
- if fi.Size() == 0 {
|
|
| 251 |
- return fmt.Errorf("The Dockerfile (%s) cannot be empty", origFile)
|
|
| 252 |
- } |
|
| 253 |
- |
|
| 254 |
- f, err := os.Open(filename) |
|
| 255 |
- if err != nil {
|
|
| 256 |
- return err |
|
| 257 |
- } |
|
| 258 |
- |
|
| 259 |
- b.dockerfile, err = parser.Parse(f) |
|
| 260 |
- f.Close() |
|
| 261 |
- |
|
| 262 |
- if err != nil {
|
|
| 263 |
- return err |
|
| 264 |
- } |
|
| 265 |
- |
|
| 266 |
- // After the Dockerfile has been parsed, we need to check the .dockerignore |
|
| 267 |
- // file for either "Dockerfile" or ".dockerignore", and if either are |
|
| 268 |
- // present then erase them from the build context. These files should never |
|
| 269 |
- // have been sent from the client but we did send them to make sure that |
|
| 270 |
- // we had the Dockerfile to actually parse, and then we also need the |
|
| 271 |
- // .dockerignore file to know whether either file should be removed. |
|
| 272 |
- // Note that this assumes the Dockerfile has been read into memory and |
|
| 273 |
- // is now safe to be removed. |
|
| 274 |
- |
|
| 275 |
- excludes, _ := utils.ReadDockerIgnore(filepath.Join(b.contextPath, ".dockerignore")) |
|
| 276 |
- if rm, _ := fileutils.Matches(".dockerignore", excludes); rm == true {
|
|
| 277 |
- os.Remove(filepath.Join(b.contextPath, ".dockerignore")) |
|
| 278 |
- b.context.(tarsum.BuilderContext).Remove(".dockerignore")
|
|
| 279 |
- } |
|
| 280 |
- if rm, _ := fileutils.Matches(b.dockerfileName, excludes); rm == true {
|
|
| 281 |
- os.Remove(filepath.Join(b.contextPath, b.dockerfileName)) |
|
| 282 |
- b.context.(tarsum.BuilderContext).Remove(b.dockerfileName) |
|
| 283 |
- } |
|
| 284 |
- |
|
| 285 |
- return nil |
|
| 286 |
-} |
|
| 287 |
- |
|
| 288 |
-// determine if build arg is part of built-in args or user |
|
| 289 |
-// defined args in Dockerfile at any point in time. |
|
| 290 |
-func (b *builder) isBuildArgAllowed(arg string) bool {
|
|
| 291 |
- if _, ok := BuiltinAllowedBuildArgs[arg]; ok {
|
|
| 292 |
- return true |
|
| 293 |
- } |
|
| 294 |
- if _, ok := b.allowedBuildArgs[arg]; ok {
|
|
| 295 |
- return true |
|
| 296 |
- } |
|
| 297 |
- return false |
|
| 298 |
-} |
|
| 299 |
- |
|
| 300 | 83 |
// This method is the entrypoint to all statement handling routines. |
| 301 | 84 |
// |
| 302 | 85 |
// Almost all nodes will have this structure: |
| ... | ... |
@@ -311,8 +79,9 @@ func (b *builder) isBuildArgAllowed(arg string) bool {
|
| 311 | 311 |
// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to |
| 312 | 312 |
// deal with that, at least until it becomes more of a general concern with new |
| 313 | 313 |
// features. |
| 314 |
-func (b *builder) dispatch(stepN int, ast *parser.Node) error {
|
|
| 314 |
+func (b *Builder) dispatch(stepN int, ast *parser.Node) error {
|
|
| 315 | 315 |
cmd := ast.Value |
| 316 |
+ upperCasedCmd := strings.ToUpper(cmd) |
|
| 316 | 317 |
|
| 317 | 318 |
// To ensure the user is given a decent error message if the platform |
| 318 | 319 |
// on which the daemon is running does not support a builder command. |
| ... | ... |
@@ -324,7 +93,7 @@ func (b *builder) dispatch(stepN int, ast *parser.Node) error {
|
| 324 | 324 |
original := ast.Original |
| 325 | 325 |
flags := ast.Flags |
| 326 | 326 |
strs := []string{}
|
| 327 |
- msg := fmt.Sprintf("Step %d : %s", stepN+1, strings.ToUpper(cmd))
|
|
| 327 |
+ msg := fmt.Sprintf("Step %d : %s", stepN+1, upperCasedCmd)
|
|
| 328 | 328 |
|
| 329 | 329 |
if len(ast.Flags) > 0 {
|
| 330 | 330 |
msg += " " + strings.Join(ast.Flags, " ") |
| ... | ... |
@@ -368,8 +137,8 @@ func (b *builder) dispatch(stepN int, ast *parser.Node) error {
|
| 368 | 368 |
// stop on the first occurrence of a variable name and not notice |
| 369 | 369 |
// a subsequent one. So, putting the buildArgs list after the Config.Env |
| 370 | 370 |
// list, in 'envs', is safe. |
| 371 |
- envs := b.Config.Env |
|
| 372 |
- for key, val := range b.buildArgs {
|
|
| 371 |
+ envs := b.runConfig.Env |
|
| 372 |
+ for key, val := range b.BuildArgs {
|
|
| 373 | 373 |
if !b.isBuildArgAllowed(key) {
|
| 374 | 374 |
// skip build-args that are not in allowed list, meaning they have |
| 375 | 375 |
// not been defined by an "ARG" Dockerfile command yet. |
| ... | ... |
@@ -397,17 +166,17 @@ func (b *builder) dispatch(stepN int, ast *parser.Node) error {
|
| 397 | 397 |
} |
| 398 | 398 |
|
| 399 | 399 |
msg += " " + strings.Join(msgList, " ") |
| 400 |
- fmt.Fprintln(b.OutStream, msg) |
|
| 400 |
+ fmt.Fprintln(b.Stdout, msg) |
|
| 401 | 401 |
|
| 402 | 402 |
// XXX yes, we skip any cmds that are not valid; the parser should have |
| 403 | 403 |
// picked these out already. |
| 404 | 404 |
if f, ok := evaluateTable[cmd]; ok {
|
| 405 |
- b.BuilderFlags = NewBFlags() |
|
| 406 |
- b.BuilderFlags.Args = flags |
|
| 405 |
+ b.flags = NewBFlags() |
|
| 406 |
+ b.flags.Args = flags |
|
| 407 | 407 |
return f(b, strList, attrs, original) |
| 408 | 408 |
} |
| 409 | 409 |
|
| 410 |
- return fmt.Errorf("Unknown instruction: %s", strings.ToUpper(cmd))
|
|
| 410 |
+ return fmt.Errorf("Unknown instruction: %s", upperCasedCmd)
|
|
| 411 | 411 |
} |
| 412 | 412 |
|
| 413 | 413 |
// platformSupports is a short-term function to give users a quality error |
| ... | ... |
@@ -19,83 +19,45 @@ import ( |
| 19 | 19 |
"time" |
| 20 | 20 |
|
| 21 | 21 |
"github.com/Sirupsen/logrus" |
| 22 |
+ "github.com/docker/docker/api" |
|
| 23 |
+ "github.com/docker/docker/builder" |
|
| 22 | 24 |
"github.com/docker/docker/builder/dockerfile/parser" |
| 23 |
- "github.com/docker/docker/cliconfig" |
|
| 24 | 25 |
"github.com/docker/docker/daemon" |
| 25 |
- "github.com/docker/docker/graph" |
|
| 26 | 26 |
"github.com/docker/docker/image" |
| 27 | 27 |
"github.com/docker/docker/pkg/archive" |
| 28 |
- "github.com/docker/docker/pkg/chrootarchive" |
|
| 29 | 28 |
"github.com/docker/docker/pkg/httputils" |
| 30 | 29 |
"github.com/docker/docker/pkg/ioutils" |
| 31 | 30 |
"github.com/docker/docker/pkg/jsonmessage" |
| 32 |
- "github.com/docker/docker/pkg/parsers" |
|
| 33 | 31 |
"github.com/docker/docker/pkg/progressreader" |
| 32 |
+ "github.com/docker/docker/pkg/streamformatter" |
|
| 34 | 33 |
"github.com/docker/docker/pkg/stringid" |
| 35 | 34 |
"github.com/docker/docker/pkg/stringutils" |
| 36 |
- "github.com/docker/docker/pkg/symlink" |
|
| 37 | 35 |
"github.com/docker/docker/pkg/system" |
| 38 | 36 |
"github.com/docker/docker/pkg/tarsum" |
| 39 | 37 |
"github.com/docker/docker/pkg/urlutil" |
| 40 |
- "github.com/docker/docker/registry" |
|
| 41 | 38 |
"github.com/docker/docker/runconfig" |
| 42 | 39 |
) |
| 43 | 40 |
|
| 44 |
-func (b *builder) readContext(context io.Reader) (err error) {
|
|
| 45 |
- tmpdirPath, err := getTempDir("", "docker-build")
|
|
| 46 |
- if err != nil {
|
|
| 47 |
- return |
|
| 48 |
- } |
|
| 49 |
- |
|
| 50 |
- // Make sure we clean-up upon error. In the happy case the caller |
|
| 51 |
- // is expected to manage the clean-up |
|
| 52 |
- defer func() {
|
|
| 53 |
- if err != nil {
|
|
| 54 |
- if e := os.RemoveAll(tmpdirPath); e != nil {
|
|
| 55 |
- logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e)
|
|
| 56 |
- } |
|
| 57 |
- } |
|
| 58 |
- }() |
|
| 59 |
- |
|
| 60 |
- decompressedStream, err := archive.DecompressStream(context) |
|
| 61 |
- if err != nil {
|
|
| 62 |
- return |
|
| 63 |
- } |
|
| 64 |
- |
|
| 65 |
- if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil {
|
|
| 66 |
- return |
|
| 67 |
- } |
|
| 68 |
- |
|
| 69 |
- if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
|
|
| 70 |
- return |
|
| 71 |
- } |
|
| 72 |
- |
|
| 73 |
- b.contextPath = tmpdirPath |
|
| 74 |
- return |
|
| 75 |
-} |
|
| 76 |
- |
|
| 77 |
-func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment string) error {
|
|
| 41 |
+func (b *Builder) commit(id string, autoCmd *stringutils.StrSlice, comment string) error {
|
|
| 78 | 42 |
if b.disableCommit {
|
| 79 | 43 |
return nil |
| 80 | 44 |
} |
| 81 | 45 |
if b.image == "" && !b.noBaseImage {
|
| 82 | 46 |
return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
| 83 | 47 |
} |
| 84 |
- b.Config.Image = b.image |
|
| 48 |
+ b.runConfig.Image = b.image |
|
| 85 | 49 |
if id == "" {
|
| 86 |
- cmd := b.Config.Cmd |
|
| 50 |
+ cmd := b.runConfig.Cmd |
|
| 87 | 51 |
if runtime.GOOS != "windows" {
|
| 88 |
- b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", "#(nop) "+comment)
|
|
| 52 |
+ b.runConfig.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", "#(nop) "+comment)
|
|
| 89 | 53 |
} else {
|
| 90 |
- b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S", "/C", "REM (nop) "+comment)
|
|
| 54 |
+ b.runConfig.Cmd = stringutils.NewStrSlice("cmd", "/S /C", "REM (nop) "+comment)
|
|
| 91 | 55 |
} |
| 92 |
- defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
|
| 56 |
+ defer func(cmd *stringutils.StrSlice) { b.runConfig.Cmd = cmd }(cmd)
|
|
| 93 | 57 |
|
| 94 |
- hit, err := b.probeCache() |
|
| 95 |
- if err != nil {
|
|
| 58 |
+ if hit, err := b.probeCache(); err != nil {
|
|
| 96 | 59 |
return err |
| 97 |
- } |
|
| 98 |
- if hit {
|
|
| 60 |
+ } else if hit {
|
|
| 99 | 61 |
return nil |
| 100 | 62 |
} |
| 101 | 63 |
|
| ... | ... |
@@ -110,13 +72,14 @@ func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment strin |
| 110 | 110 |
} |
| 111 | 111 |
defer container.Unmount() |
| 112 | 112 |
} |
| 113 |
- container, err := b.Daemon.Get(id) |
|
| 113 |
+ |
|
| 114 |
+ container, err := b.docker.Container(id) |
|
| 114 | 115 |
if err != nil {
|
| 115 | 116 |
return err |
| 116 | 117 |
} |
| 117 | 118 |
|
| 118 | 119 |
// Note: Actually copy the struct |
| 119 |
- autoConfig := *b.Config |
|
| 120 |
+ autoConfig := *b.runConfig |
|
| 120 | 121 |
autoConfig.Cmd = autoCmd |
| 121 | 122 |
|
| 122 | 123 |
commitCfg := &daemon.ContainerCommitConfig{
|
| ... | ... |
@@ -126,25 +89,22 @@ func (b *builder) commit(id string, autoCmd *stringutils.StrSlice, comment strin |
| 126 | 126 |
} |
| 127 | 127 |
|
| 128 | 128 |
// Commit the container |
| 129 |
- image, err := b.Daemon.Commit(container, commitCfg) |
|
| 129 |
+ image, err := b.docker.Commit(container, commitCfg) |
|
| 130 | 130 |
if err != nil {
|
| 131 | 131 |
return err |
| 132 | 132 |
} |
| 133 |
- b.Daemon.Graph().Retain(b.id, image.ID) |
|
| 133 |
+ b.docker.Retain(b.id, image.ID) |
|
| 134 | 134 |
b.activeImages = append(b.activeImages, image.ID) |
| 135 | 135 |
b.image = image.ID |
| 136 | 136 |
return nil |
| 137 | 137 |
} |
| 138 | 138 |
|
| 139 | 139 |
type copyInfo struct {
|
| 140 |
- origPath string |
|
| 141 |
- destPath string |
|
| 142 |
- hash string |
|
| 140 |
+ builder.FileInfo |
|
| 143 | 141 |
decompress bool |
| 144 |
- tmpDir string |
|
| 145 | 142 |
} |
| 146 | 143 |
|
| 147 |
-func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecompression bool, cmdName string) error {
|
|
| 144 |
+func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error {
|
|
| 148 | 145 |
if b.context == nil {
|
| 149 | 146 |
return fmt.Errorf("No context given. Impossible to use %s", cmdName)
|
| 150 | 147 |
} |
| ... | ... |
@@ -156,57 +116,66 @@ func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecomp |
| 156 | 156 |
// Work in daemon-specific filepath semantics |
| 157 | 157 |
dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest |
| 158 | 158 |
|
| 159 |
- copyInfos := []*copyInfo{}
|
|
| 160 |
- |
|
| 161 |
- b.Config.Image = b.image |
|
| 159 |
+ b.runConfig.Image = b.image |
|
| 162 | 160 |
|
| 163 |
- defer func() {
|
|
| 164 |
- for _, ci := range copyInfos {
|
|
| 165 |
- if ci.tmpDir != "" {
|
|
| 166 |
- os.RemoveAll(ci.tmpDir) |
|
| 167 |
- } |
|
| 168 |
- } |
|
| 169 |
- }() |
|
| 161 |
+ var infos []copyInfo |
|
| 170 | 162 |
|
| 171 | 163 |
// Loop through each src file and calculate the info we need to |
| 172 | 164 |
// do the copy (e.g. hash value if cached). Don't actually do |
| 173 | 165 |
// the copy until we've looked at all src files |
| 166 |
+ var err error |
|
| 174 | 167 |
for _, orig := range args[0 : len(args)-1] {
|
| 175 |
- if err := calcCopyInfo( |
|
| 176 |
- b, |
|
| 177 |
- cmdName, |
|
| 178 |
- ©Infos, |
|
| 179 |
- orig, |
|
| 180 |
- dest, |
|
| 181 |
- allowRemote, |
|
| 182 |
- allowDecompression, |
|
| 183 |
- true, |
|
| 184 |
- ); err != nil {
|
|
| 168 |
+ var fi builder.FileInfo |
|
| 169 |
+ decompress := allowLocalDecompression |
|
| 170 |
+ if urlutil.IsURL(orig) {
|
|
| 171 |
+ if !allowRemote {
|
|
| 172 |
+ return fmt.Errorf("Source can't be a URL for %s", cmdName)
|
|
| 173 |
+ } |
|
| 174 |
+ fi, err = b.download(orig) |
|
| 175 |
+ if err != nil {
|
|
| 176 |
+ return err |
|
| 177 |
+ } |
|
| 178 |
+ defer os.RemoveAll(filepath.Dir(fi.Path())) |
|
| 179 |
+ decompress = false |
|
| 180 |
+ infos = append(infos, copyInfo{fi, decompress})
|
|
| 181 |
+ continue |
|
| 182 |
+ } |
|
| 183 |
+ // not a URL |
|
| 184 |
+ subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) |
|
| 185 |
+ if err != nil {
|
|
| 185 | 186 |
return err |
| 186 | 187 |
} |
| 188 |
+ |
|
| 189 |
+ infos = append(infos, subInfos...) |
|
| 187 | 190 |
} |
| 188 | 191 |
|
| 189 |
- if len(copyInfos) == 0 {
|
|
| 192 |
+ if len(infos) == 0 {
|
|
| 190 | 193 |
return fmt.Errorf("No source files were specified")
|
| 191 | 194 |
} |
| 192 |
- if len(copyInfos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
|
|
| 195 |
+ if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
|
|
| 193 | 196 |
return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
| 194 | 197 |
} |
| 195 | 198 |
|
| 196 |
- // For backwards compat, if there's just one CI then use it as the |
|
| 199 |
+ // For backwards compat, if there's just one info then use it as the |
|
| 197 | 200 |
// cache look-up string, otherwise hash 'em all into one |
| 198 | 201 |
var srcHash string |
| 199 | 202 |
var origPaths string |
| 200 | 203 |
|
| 201 |
- if len(copyInfos) == 1 {
|
|
| 202 |
- srcHash = copyInfos[0].hash |
|
| 203 |
- origPaths = copyInfos[0].origPath |
|
| 204 |
+ if len(infos) == 1 {
|
|
| 205 |
+ fi := infos[0].FileInfo |
|
| 206 |
+ origPaths = fi.Name() |
|
| 207 |
+ if hfi, ok := fi.(builder.Hashed); ok {
|
|
| 208 |
+ srcHash = hfi.Hash() |
|
| 209 |
+ } |
|
| 204 | 210 |
} else {
|
| 205 | 211 |
var hashs []string |
| 206 | 212 |
var origs []string |
| 207 |
- for _, ci := range copyInfos {
|
|
| 208 |
- hashs = append(hashs, ci.hash) |
|
| 209 |
- origs = append(origs, ci.origPath) |
|
| 213 |
+ for _, info := range infos {
|
|
| 214 |
+ fi := info.FileInfo |
|
| 215 |
+ origs = append(origs, fi.Name()) |
|
| 216 |
+ if hfi, ok := fi.(builder.Hashed); ok {
|
|
| 217 |
+ hashs = append(hashs, hfi.Hash()) |
|
| 218 |
+ } |
|
| 210 | 219 |
} |
| 211 | 220 |
hasher := sha256.New() |
| 212 | 221 |
hasher.Write([]byte(strings.Join(hashs, ","))) |
| ... | ... |
@@ -214,262 +183,225 @@ func (b *builder) runContextCommand(args []string, allowRemote bool, allowDecomp |
| 214 | 214 |
origPaths = strings.Join(origs, " ") |
| 215 | 215 |
} |
| 216 | 216 |
|
| 217 |
- cmd := b.Config.Cmd |
|
| 217 |
+ cmd := b.runConfig.Cmd |
|
| 218 | 218 |
if runtime.GOOS != "windows" {
|
| 219 |
- b.Config.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
|
|
| 219 |
+ b.runConfig.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
|
|
| 220 | 220 |
} else {
|
| 221 |
- b.Config.Cmd = stringutils.NewStrSlice("cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
|
|
| 221 |
+ b.runConfig.Cmd = stringutils.NewStrSlice("cmd", "/S /C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
|
|
| 222 | 222 |
} |
| 223 |
- defer func(cmd *stringutils.StrSlice) { b.Config.Cmd = cmd }(cmd)
|
|
| 223 |
+ defer func(cmd *stringutils.StrSlice) { b.runConfig.Cmd = cmd }(cmd)
|
|
| 224 | 224 |
|
| 225 |
- hit, err := b.probeCache() |
|
| 226 |
- if err != nil {
|
|
| 225 |
+ if hit, err := b.probeCache(); err != nil {
|
|
| 227 | 226 |
return err |
| 228 |
- } |
|
| 229 |
- |
|
| 230 |
- if hit {
|
|
| 227 |
+ } else if hit {
|
|
| 231 | 228 |
return nil |
| 232 | 229 |
} |
| 233 | 230 |
|
| 234 |
- ccr, err := b.Daemon.ContainerCreate("", b.Config, nil, true)
|
|
| 235 |
- if err != nil {
|
|
| 236 |
- return err |
|
| 237 |
- } |
|
| 238 |
- container, err := b.Daemon.Get(ccr.ID) |
|
| 231 |
+ container, _, err := b.docker.Create(b.runConfig, nil) |
|
| 239 | 232 |
if err != nil {
|
| 240 | 233 |
return err |
| 241 | 234 |
} |
| 235 |
+ defer container.Unmount() |
|
| 236 |
+ b.tmpContainers[container.ID] = struct{}{}
|
|
| 242 | 237 |
|
| 243 |
- b.TmpContainers[container.ID] = struct{}{}
|
|
| 238 |
+ comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)
|
|
| 244 | 239 |
|
| 245 |
- if err := container.Mount(); err != nil {
|
|
| 246 |
- return err |
|
| 240 |
+ // Twiddle the destination when its a relative path - meaning, make it |
|
| 241 |
+ // relative to the WORKINGDIR |
|
| 242 |
+ if !system.IsAbs(dest) {
|
|
| 243 |
+ hasSlash := strings.HasSuffix(dest, string(os.PathSeparator)) |
|
| 244 |
+ dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest) |
|
| 245 |
+ |
|
| 246 |
+ // Make sure we preserve any trailing slash |
|
| 247 |
+ if hasSlash {
|
|
| 248 |
+ dest += string(os.PathSeparator) |
|
| 249 |
+ } |
|
| 247 | 250 |
} |
| 248 |
- defer container.Unmount() |
|
| 249 | 251 |
|
| 250 |
- for _, ci := range copyInfos {
|
|
| 251 |
- if err := b.addContext(container, ci.origPath, ci.destPath, ci.decompress); err != nil {
|
|
| 252 |
+ for _, info := range infos {
|
|
| 253 |
+ if err := b.docker.Copy(container, dest, info.FileInfo, info.decompress); err != nil {
|
|
| 252 | 254 |
return err |
| 253 | 255 |
} |
| 254 | 256 |
} |
| 255 | 257 |
|
| 256 |
- if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)); err != nil {
|
|
| 258 |
+ if err := b.commit(container.ID, cmd, comment); err != nil {
|
|
| 257 | 259 |
return err |
| 258 | 260 |
} |
| 259 | 261 |
return nil |
| 260 | 262 |
} |
| 261 | 263 |
|
| 262 |
-func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {
|
|
| 263 |
- |
|
| 264 |
- // Work in daemon-specific OS filepath semantics. However, we save |
|
| 265 |
- // the the origPath passed in here, as it might also be a URL which |
|
| 266 |
- // we need to check for in this function. |
|
| 267 |
- passedInOrigPath := origPath |
|
| 268 |
- origPath = filepath.FromSlash(origPath) |
|
| 269 |
- destPath = filepath.FromSlash(destPath) |
|
| 270 |
- |
|
| 271 |
- if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
|
|
| 272 |
- origPath = origPath[1:] |
|
| 264 |
+func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) {
|
|
| 265 |
+ // get filename from URL |
|
| 266 |
+ u, err := url.Parse(srcURL) |
|
| 267 |
+ if err != nil {
|
|
| 268 |
+ return |
|
| 273 | 269 |
} |
| 274 |
- origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) |
|
| 275 |
- |
|
| 276 |
- // Twiddle the destPath when its a relative path - meaning, make it |
|
| 277 |
- // relative to the WORKINGDIR |
|
| 278 |
- if !system.IsAbs(destPath) {
|
|
| 279 |
- hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator)) |
|
| 280 |
- destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath) |
|
| 281 |
- |
|
| 282 |
- // Make sure we preserve any trailing slash |
|
| 283 |
- if hasSlash {
|
|
| 284 |
- destPath += string(os.PathSeparator) |
|
| 285 |
- } |
|
| 270 |
+ path := u.Path |
|
| 271 |
+ if strings.HasSuffix(path, string(os.PathSeparator)) {
|
|
| 272 |
+ path = path[:len(path)-1] |
|
| 273 |
+ } |
|
| 274 |
+ parts := strings.Split(path, string(os.PathSeparator)) |
|
| 275 |
+ filename := parts[len(parts)-1] |
|
| 276 |
+ if filename == "" {
|
|
| 277 |
+ err = fmt.Errorf("cannot determine filename from url: %s", u)
|
|
| 278 |
+ return |
|
| 286 | 279 |
} |
| 287 | 280 |
|
| 288 |
- // In the remote/URL case, download it and gen its hashcode |
|
| 289 |
- if urlutil.IsURL(passedInOrigPath) {
|
|
| 290 |
- |
|
| 291 |
- // As it's a URL, we go back to processing on what was passed in |
|
| 292 |
- // to this function |
|
| 293 |
- origPath = passedInOrigPath |
|
| 294 |
- |
|
| 295 |
- if !allowRemote {
|
|
| 296 |
- return fmt.Errorf("Source can't be a URL for %s", cmdName)
|
|
| 297 |
- } |
|
| 298 |
- |
|
| 299 |
- ci := copyInfo{}
|
|
| 300 |
- ci.origPath = origPath |
|
| 301 |
- ci.hash = origPath // default to this but can change |
|
| 302 |
- ci.destPath = destPath |
|
| 303 |
- ci.decompress = false |
|
| 304 |
- *cInfos = append(*cInfos, &ci) |
|
| 305 |
- |
|
| 306 |
- // Initiate the download |
|
| 307 |
- resp, err := httputils.Download(ci.origPath) |
|
| 308 |
- if err != nil {
|
|
| 309 |
- return err |
|
| 310 |
- } |
|
| 311 |
- |
|
| 312 |
- // Create a tmp dir |
|
| 313 |
- tmpDirName, err := getTempDir(b.contextPath, "docker-remote") |
|
| 314 |
- if err != nil {
|
|
| 315 |
- return err |
|
| 316 |
- } |
|
| 317 |
- ci.tmpDir = tmpDirName |
|
| 281 |
+ // Initiate the download |
|
| 282 |
+ resp, err := httputils.Download(srcURL) |
|
| 283 |
+ if err != nil {
|
|
| 284 |
+ return |
|
| 285 |
+ } |
|
| 318 | 286 |
|
| 319 |
- // Create a tmp file within our tmp dir |
|
| 320 |
- tmpFileName := filepath.Join(tmpDirName, "tmp") |
|
| 321 |
- tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) |
|
| 287 |
+ // Prepare file in a tmp dir |
|
| 288 |
+ tmpDir, err := ioutils.TempDir("", "docker-remote")
|
|
| 289 |
+ if err != nil {
|
|
| 290 |
+ return |
|
| 291 |
+ } |
|
| 292 |
+ defer func() {
|
|
| 322 | 293 |
if err != nil {
|
| 323 |
- return err |
|
| 294 |
+ os.RemoveAll(tmpDir) |
|
| 324 | 295 |
} |
| 296 |
+ }() |
|
| 297 |
+ tmpFileName := filepath.Join(tmpDir, filename) |
|
| 298 |
+ tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) |
|
| 299 |
+ if err != nil {
|
|
| 300 |
+ return |
|
| 301 |
+ } |
|
| 325 | 302 |
|
| 326 |
- // Download and dump result to tmp file |
|
| 327 |
- if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
|
|
| 328 |
- In: resp.Body, |
|
| 329 |
- Out: b.OutOld, |
|
| 330 |
- Formatter: b.StreamFormatter, |
|
| 331 |
- Size: resp.ContentLength, |
|
| 332 |
- NewLines: true, |
|
| 333 |
- ID: "", |
|
| 334 |
- Action: "Downloading", |
|
| 335 |
- })); err != nil {
|
|
| 336 |
- tmpFile.Close() |
|
| 337 |
- return err |
|
| 338 |
- } |
|
| 339 |
- fmt.Fprintf(b.OutStream, "\n") |
|
| 303 |
+ // Download and dump result to tmp file |
|
| 304 |
+ if _, err = io.Copy(tmpFile, progressreader.New(progressreader.Config{
|
|
| 305 |
+ In: resp.Body, |
|
| 306 |
+ // TODO: make progressreader streamformatter agnostic |
|
| 307 |
+ Out: b.Stdout.(*streamformatter.StdoutFormatter).Writer, |
|
| 308 |
+ Formatter: b.Stdout.(*streamformatter.StdoutFormatter).StreamFormatter, |
|
| 309 |
+ Size: resp.ContentLength, |
|
| 310 |
+ NewLines: true, |
|
| 311 |
+ ID: "", |
|
| 312 |
+ Action: "Downloading", |
|
| 313 |
+ })); err != nil {
|
|
| 340 | 314 |
tmpFile.Close() |
| 315 |
+ return |
|
| 316 |
+ } |
|
| 317 |
+ fmt.Fprintln(b.Stdout) |
|
| 318 |
+ // ignoring error because the file was already opened successfully |
|
| 319 |
+ tmpFileSt, err := tmpFile.Stat() |
|
| 320 |
+ if err != nil {
|
|
| 321 |
+ return |
|
| 322 |
+ } |
|
| 323 |
+ tmpFile.Close() |
|
| 341 | 324 |
|
| 342 |
- // Set the mtime to the Last-Modified header value if present |
|
| 343 |
- // Otherwise just remove atime and mtime |
|
| 344 |
- mTime := time.Time{}
|
|
| 325 |
+ // Set the mtime to the Last-Modified header value if present |
|
| 326 |
+ // Otherwise just remove atime and mtime |
|
| 327 |
+ mTime := time.Time{}
|
|
| 345 | 328 |
|
| 346 |
- lastMod := resp.Header.Get("Last-Modified")
|
|
| 347 |
- if lastMod != "" {
|
|
| 348 |
- // If we can't parse it then just let it default to 'zero' |
|
| 349 |
- // otherwise use the parsed time value |
|
| 350 |
- if parsedMTime, err := http.ParseTime(lastMod); err == nil {
|
|
| 351 |
- mTime = parsedMTime |
|
| 352 |
- } |
|
| 329 |
+ lastMod := resp.Header.Get("Last-Modified")
|
|
| 330 |
+ if lastMod != "" {
|
|
| 331 |
+ // If we can't parse it then just let it default to 'zero' |
|
| 332 |
+ // otherwise use the parsed time value |
|
| 333 |
+ if parsedMTime, err := http.ParseTime(lastMod); err == nil {
|
|
| 334 |
+ mTime = parsedMTime |
|
| 353 | 335 |
} |
| 336 |
+ } |
|
| 354 | 337 |
|
| 355 |
- if err := system.Chtimes(tmpFileName, time.Time{}, mTime); err != nil {
|
|
| 356 |
- return err |
|
| 357 |
- } |
|
| 338 |
+ if err = system.Chtimes(tmpFileName, time.Time{}, mTime); err != nil {
|
|
| 339 |
+ return |
|
| 340 |
+ } |
|
| 358 | 341 |
|
| 359 |
- ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) |
|
| 342 |
+ // Calc the checksum, even if we're using the cache |
|
| 343 |
+ r, err := archive.Tar(tmpFileName, archive.Uncompressed) |
|
| 344 |
+ if err != nil {
|
|
| 345 |
+ return |
|
| 346 |
+ } |
|
| 347 |
+ tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) |
|
| 348 |
+ if err != nil {
|
|
| 349 |
+ return |
|
| 350 |
+ } |
|
| 351 |
+ if _, err = io.Copy(ioutil.Discard, tarSum); err != nil {
|
|
| 352 |
+ return |
|
| 353 |
+ } |
|
| 354 |
+ hash := tarSum.Sum(nil) |
|
| 355 |
+ r.Close() |
|
| 356 |
+ return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil
|
|
| 357 |
+} |
|
| 360 | 358 |
|
| 361 |
- // If the destination is a directory, figure out the filename. |
|
| 362 |
- if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) {
|
|
| 363 |
- u, err := url.Parse(origPath) |
|
| 364 |
- if err != nil {
|
|
| 365 |
- return err |
|
| 366 |
- } |
|
| 367 |
- path := filepath.FromSlash(u.Path) // Ensure in platform semantics |
|
| 368 |
- if strings.HasSuffix(path, string(os.PathSeparator)) {
|
|
| 369 |
- path = path[:len(path)-1] |
|
| 370 |
- } |
|
| 371 |
- parts := strings.Split(path, string(os.PathSeparator)) |
|
| 372 |
- filename := parts[len(parts)-1] |
|
| 373 |
- if filename == "" {
|
|
| 374 |
- return fmt.Errorf("cannot determine filename from url: %s", u)
|
|
| 375 |
- } |
|
| 376 |
- ci.destPath = ci.destPath + filename |
|
| 377 |
- } |
|
| 359 |
+func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) {
|
|
| 378 | 360 |
|
| 379 |
- // Calc the checksum, even if we're using the cache |
|
| 380 |
- r, err := archive.Tar(tmpFileName, archive.Uncompressed) |
|
| 381 |
- if err != nil {
|
|
| 382 |
- return err |
|
| 383 |
- } |
|
| 384 |
- tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) |
|
| 385 |
- if err != nil {
|
|
| 386 |
- return err |
|
| 387 |
- } |
|
| 388 |
- if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
|
|
| 389 |
- return err |
|
| 390 |
- } |
|
| 391 |
- ci.hash = tarSum.Sum(nil) |
|
| 392 |
- r.Close() |
|
| 361 |
+ // Work in daemon-specific OS filepath semantics |
|
| 362 |
+ origPath = filepath.FromSlash(origPath) |
|
| 393 | 363 |
|
| 394 |
- return nil |
|
| 364 |
+ if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
|
|
| 365 |
+ origPath = origPath[1:] |
|
| 395 | 366 |
} |
| 367 |
+ origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) |
|
| 396 | 368 |
|
| 397 | 369 |
// Deal with wildcards |
| 398 | 370 |
if allowWildcards && containsWildcards(origPath) {
|
| 399 |
- for _, fileInfo := range b.context.GetSums() {
|
|
| 400 |
- if fileInfo.Name() == "" {
|
|
| 401 |
- continue |
|
| 371 |
+ var copyInfos []copyInfo |
|
| 372 |
+ if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error {
|
|
| 373 |
+ if err != nil {
|
|
| 374 |
+ return err |
|
| 375 |
+ } |
|
| 376 |
+ if info.Name() == "" {
|
|
| 377 |
+ // Why are we doing this check? |
|
| 378 |
+ return nil |
|
| 402 | 379 |
} |
| 403 |
- match, _ := filepath.Match(origPath, fileInfo.Name()) |
|
| 404 |
- if !match {
|
|
| 405 |
- continue |
|
| 380 |
+ if match, _ := filepath.Match(origPath, path); !match {
|
|
| 381 |
+ return nil |
|
| 406 | 382 |
} |
| 407 | 383 |
|
| 408 | 384 |
// Note we set allowWildcards to false in case the name has |
| 409 | 385 |
// a * in it |
| 410 |
- calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false) |
|
| 386 |
+ subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) |
|
| 387 |
+ if err != nil {
|
|
| 388 |
+ return err |
|
| 389 |
+ } |
|
| 390 |
+ copyInfos = append(copyInfos, subInfos...) |
|
| 391 |
+ return nil |
|
| 392 |
+ }); err != nil {
|
|
| 393 |
+ return nil, err |
|
| 411 | 394 |
} |
| 412 |
- return nil |
|
| 395 |
+ return copyInfos, nil |
|
| 413 | 396 |
} |
| 414 | 397 |
|
| 415 | 398 |
// Must be a dir or a file |
| 416 | 399 |
|
| 417 |
- if err := b.checkPathForAddition(origPath); err != nil {
|
|
| 418 |
- return err |
|
| 400 |
+ fi, err := b.context.Stat(origPath) |
|
| 401 |
+ if err != nil {
|
|
| 402 |
+ return nil, err |
|
| 419 | 403 |
} |
| 420 |
- fi, _ := os.Stat(filepath.Join(b.contextPath, origPath)) |
|
| 421 | 404 |
|
| 422 |
- ci := copyInfo{}
|
|
| 423 |
- ci.origPath = origPath |
|
| 424 |
- ci.hash = origPath |
|
| 425 |
- ci.destPath = destPath |
|
| 426 |
- ci.decompress = allowDecompression |
|
| 427 |
- *cInfos = append(*cInfos, &ci) |
|
| 405 |
+ copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}}
|
|
| 406 |
+ |
|
| 407 |
+ hfi, handleHash := fi.(builder.Hashed) |
|
| 408 |
+ if !handleHash {
|
|
| 409 |
+ return copyInfos, nil |
|
| 410 |
+ } |
|
| 428 | 411 |
|
| 429 | 412 |
// Deal with the single file case |
| 430 | 413 |
if !fi.IsDir() {
|
| 431 |
- // This will match first file in sums of the archive |
|
| 432 |
- fis := b.context.GetSums().GetFile(ci.origPath) |
|
| 433 |
- if fis != nil {
|
|
| 434 |
- ci.hash = "file:" + fis.Sum() |
|
| 435 |
- } |
|
| 436 |
- return nil |
|
| 414 |
+ hfi.SetHash("file:" + hfi.Hash())
|
|
| 415 |
+ return copyInfos, nil |
|
| 437 | 416 |
} |
| 438 | 417 |
|
| 439 | 418 |
// Must be a dir |
| 419 |
+ |
|
| 440 | 420 |
var subfiles []string |
| 441 |
- absOrigPath := filepath.Join(b.contextPath, ci.origPath) |
|
| 442 |
- |
|
| 443 |
- // Add a trailing / to make sure we only pick up nested files under |
|
| 444 |
- // the dir and not sibling files of the dir that just happen to |
|
| 445 |
- // start with the same chars |
|
| 446 |
- if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) {
|
|
| 447 |
- absOrigPath += string(os.PathSeparator) |
|
| 448 |
- } |
|
| 449 |
- |
|
| 450 |
- // Need path w/o slash too to find matching dir w/o trailing slash |
|
| 451 |
- absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] |
|
| 452 |
- |
|
| 453 |
- for _, fileInfo := range b.context.GetSums() {
|
|
| 454 |
- absFile := filepath.Join(b.contextPath, fileInfo.Name()) |
|
| 455 |
- // Any file in the context that starts with the given path will be |
|
| 456 |
- // picked up and its hashcode used. However, we'll exclude the |
|
| 457 |
- // root dir itself. We do this for a coupel of reasons: |
|
| 458 |
- // 1 - ADD/COPY will not copy the dir itself, just its children |
|
| 459 |
- // so there's no reason to include it in the hash calc |
|
| 460 |
- // 2 - the metadata on the dir will change when any child file |
|
| 461 |
- // changes. This will lead to a miss in the cache check if that |
|
| 462 |
- // child file is in the .dockerignore list. |
|
| 463 |
- if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
|
|
| 464 |
- subfiles = append(subfiles, fileInfo.Sum()) |
|
| 421 |
+ b.context.Walk(origPath, func(path string, info builder.FileInfo, err error) error {
|
|
| 422 |
+ if err != nil {
|
|
| 423 |
+ return err |
|
| 465 | 424 |
} |
| 466 |
- } |
|
| 425 |
+ // we already checked handleHash above |
|
| 426 |
+ subfiles = append(subfiles, info.(builder.Hashed).Hash()) |
|
| 427 |
+ return nil |
|
| 428 |
+ }) |
|
| 429 |
+ |
|
| 467 | 430 |
sort.Strings(subfiles) |
| 468 | 431 |
hasher := sha256.New() |
| 469 | 432 |
hasher.Write([]byte(strings.Join(subfiles, ","))) |
| 470 |
- ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) |
|
| 433 |
+ hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil)))
|
|
| 471 | 434 |
|
| 472 |
- return nil |
|
| 435 |
+ return copyInfos, nil |
|
| 473 | 436 |
} |
| 474 | 437 |
|
| 475 | 438 |
func containsWildcards(name string) bool {
|
| ... | ... |
@@ -484,68 +416,30 @@ func containsWildcards(name string) bool {
|
| 484 | 484 |
return false |
| 485 | 485 |
} |
| 486 | 486 |
|
| 487 |
-func (b *builder) pullImage(name string) (*image.Image, error) {
|
|
| 488 |
- remote, tag := parsers.ParseRepositoryTag(name) |
|
| 489 |
- if tag == "" {
|
|
| 490 |
- tag = "latest" |
|
| 491 |
- } |
|
| 492 |
- |
|
| 493 |
- pullRegistryAuth := &cliconfig.AuthConfig{}
|
|
| 494 |
- if len(b.AuthConfigs) > 0 {
|
|
| 495 |
- // The request came with a full auth config file, we prefer to use that |
|
| 496 |
- repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote) |
|
| 497 |
- if err != nil {
|
|
| 498 |
- return nil, err |
|
| 499 |
- } |
|
| 500 |
- |
|
| 501 |
- resolvedConfig := registry.ResolveAuthConfig( |
|
| 502 |
- &cliconfig.ConfigFile{AuthConfigs: b.AuthConfigs},
|
|
| 503 |
- repoInfo.Index, |
|
| 504 |
- ) |
|
| 505 |
- pullRegistryAuth = &resolvedConfig |
|
| 506 |
- } |
|
| 507 |
- |
|
| 508 |
- imagePullConfig := &graph.ImagePullConfig{
|
|
| 509 |
- AuthConfig: pullRegistryAuth, |
|
| 510 |
- OutStream: ioutils.NopWriteCloser(b.OutOld), |
|
| 511 |
- } |
|
| 512 |
- |
|
| 513 |
- if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
|
|
| 514 |
- return nil, err |
|
| 515 |
- } |
|
| 516 |
- |
|
| 517 |
- image, err := b.Daemon.Repositories().LookupImage(name) |
|
| 518 |
- if err != nil {
|
|
| 519 |
- return nil, err |
|
| 520 |
- } |
|
| 521 |
- |
|
| 522 |
- return image, nil |
|
| 523 |
-} |
|
| 524 |
- |
|
| 525 |
-func (b *builder) processImageFrom(img *image.Image) error {
|
|
| 487 |
+func (b *Builder) processImageFrom(img *image.Image) error {
|
|
| 526 | 488 |
b.image = img.ID |
| 527 | 489 |
|
| 528 | 490 |
if img.Config != nil {
|
| 529 |
- b.Config = img.Config |
|
| 491 |
+ b.runConfig = img.Config |
|
| 530 | 492 |
} |
| 531 | 493 |
|
| 532 | 494 |
// The default path will be blank on Windows (set by HCS) |
| 533 |
- if len(b.Config.Env) == 0 && daemon.DefaultPathEnv != "" {
|
|
| 534 |
- b.Config.Env = append(b.Config.Env, "PATH="+daemon.DefaultPathEnv) |
|
| 495 |
+ if len(b.runConfig.Env) == 0 && daemon.DefaultPathEnv != "" {
|
|
| 496 |
+ b.runConfig.Env = append(b.runConfig.Env, "PATH="+daemon.DefaultPathEnv) |
|
| 535 | 497 |
} |
| 536 | 498 |
|
| 537 | 499 |
// Process ONBUILD triggers if they exist |
| 538 |
- if nTriggers := len(b.Config.OnBuild); nTriggers != 0 {
|
|
| 500 |
+ if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 {
|
|
| 539 | 501 |
word := "trigger" |
| 540 | 502 |
if nTriggers > 1 {
|
| 541 | 503 |
word = "triggers" |
| 542 | 504 |
} |
| 543 |
- fmt.Fprintf(b.ErrStream, "# Executing %d build %s...\n", nTriggers, word) |
|
| 505 |
+ fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) |
|
| 544 | 506 |
} |
| 545 | 507 |
|
| 546 | 508 |
// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. |
| 547 |
- onBuildTriggers := b.Config.OnBuild |
|
| 548 |
- b.Config.OnBuild = []string{}
|
|
| 509 |
+ onBuildTriggers := b.runConfig.OnBuild |
|
| 510 |
+ b.runConfig.OnBuild = []string{}
|
|
| 549 | 511 |
|
| 550 | 512 |
// parse the ONBUILD triggers by invoking the parser |
| 551 | 513 |
for _, step := range onBuildTriggers {
|
| ... | ... |
@@ -571,86 +465,86 @@ func (b *builder) processImageFrom(img *image.Image) error {
|
| 571 | 571 |
return nil |
| 572 | 572 |
} |
| 573 | 573 |
|
| 574 |
-// probeCache checks to see if image-caching is enabled (`b.UtilizeCache`) |
|
| 575 |
-// and if so attempts to look up the current `b.image` and `b.Config` pair |
|
| 576 |
-// in the current server `b.Daemon`. If an image is found, probeCache returns |
|
| 577 |
-// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there |
|
| 578 |
-// is any error, it returns `(false, err)`. |
|
| 579 |
-func (b *builder) probeCache() (bool, error) {
|
|
| 580 |
- if !b.UtilizeCache || b.cacheBusted {
|
|
| 574 |
+// probeCache checks if `b.docker` implements builder.ImageCache and image-caching |
|
| 575 |
+// is enabled (`b.UseCache`). |
|
| 576 |
+// If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`. |
|
| 577 |
+// If an image is found, probeCache returns `(true, nil)`. |
|
| 578 |
+// If no image is found, it returns `(false, nil)`. |
|
| 579 |
+// If there is any error, it returns `(false, err)`. |
|
| 580 |
+func (b *Builder) probeCache() (bool, error) {
|
|
| 581 |
+ c, ok := b.docker.(builder.ImageCache) |
|
| 582 |
+ if !ok || !b.UseCache || b.cacheBusted {
|
|
| 581 | 583 |
return false, nil |
| 582 | 584 |
} |
| 583 |
- |
|
| 584 |
- cache, err := b.Daemon.ImageGetCached(b.image, b.Config) |
|
| 585 |
+ cache, err := c.GetCachedImage(b.image, b.runConfig) |
|
| 585 | 586 |
if err != nil {
|
| 586 | 587 |
return false, err |
| 587 | 588 |
} |
| 588 |
- if cache == nil {
|
|
| 589 |
+ if len(cache) == 0 {
|
|
| 589 | 590 |
logrus.Debugf("[BUILDER] Cache miss")
|
| 590 | 591 |
b.cacheBusted = true |
| 591 | 592 |
return false, nil |
| 592 | 593 |
} |
| 593 | 594 |
|
| 594 |
- fmt.Fprintf(b.OutStream, " ---> Using cache\n") |
|
| 595 |
+ fmt.Fprintf(b.Stdout, " ---> Using cache\n") |
|
| 595 | 596 |
logrus.Debugf("[BUILDER] Use cached version")
|
| 596 |
- b.image = cache.ID |
|
| 597 |
- b.Daemon.Graph().Retain(b.id, cache.ID) |
|
| 598 |
- b.activeImages = append(b.activeImages, cache.ID) |
|
| 597 |
+ b.image = string(cache) |
|
| 598 |
+ |
|
| 599 |
+ // TODO: remove once Commit can take a tag parameter. |
|
| 600 |
+ b.docker.Retain(b.id, b.image) |
|
| 601 |
+ b.activeImages = append(b.activeImages, b.image) |
|
| 602 |
+ |
|
| 599 | 603 |
return true, nil |
| 600 | 604 |
} |
| 601 | 605 |
|
| 602 |
-func (b *builder) create() (*daemon.Container, error) {
|
|
| 606 |
+func (b *Builder) create() (*daemon.Container, error) {
|
|
| 603 | 607 |
if b.image == "" && !b.noBaseImage {
|
| 604 | 608 |
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
| 605 | 609 |
} |
| 606 |
- b.Config.Image = b.image |
|
| 610 |
+ b.runConfig.Image = b.image |
|
| 607 | 611 |
|
| 612 |
+ // TODO: why not embed a hostconfig in builder? |
|
| 608 | 613 |
hostConfig := &runconfig.HostConfig{
|
| 609 |
- CPUShares: b.cpuShares, |
|
| 610 |
- CPUPeriod: b.cpuPeriod, |
|
| 611 |
- CPUQuota: b.cpuQuota, |
|
| 612 |
- CpusetCpus: b.cpuSetCpus, |
|
| 613 |
- CpusetMems: b.cpuSetMems, |
|
| 614 |
- CgroupParent: b.cgroupParent, |
|
| 615 |
- Memory: b.memory, |
|
| 616 |
- MemorySwap: b.memorySwap, |
|
| 617 |
- Ulimits: b.ulimits, |
|
| 614 |
+ CPUShares: b.CPUShares, |
|
| 615 |
+ CPUPeriod: b.CPUPeriod, |
|
| 616 |
+ CPUQuota: b.CPUQuota, |
|
| 617 |
+ CpusetCpus: b.CPUSetCpus, |
|
| 618 |
+ CpusetMems: b.CPUSetMems, |
|
| 619 |
+ CgroupParent: b.CgroupParent, |
|
| 620 |
+ Memory: b.Memory, |
|
| 621 |
+ MemorySwap: b.MemorySwap, |
|
| 622 |
+ Ulimits: b.Ulimits, |
|
| 618 | 623 |
} |
| 619 | 624 |
|
| 620 |
- config := *b.Config |
|
| 625 |
+ config := *b.runConfig |
|
| 621 | 626 |
|
| 622 | 627 |
// Create the container |
| 623 |
- ccr, err := b.Daemon.ContainerCreate("", b.Config, hostConfig, true)
|
|
| 628 |
+ c, warnings, err := b.docker.Create(b.runConfig, hostConfig) |
|
| 624 | 629 |
if err != nil {
|
| 625 | 630 |
return nil, err |
| 626 | 631 |
} |
| 627 |
- for _, warning := range ccr.Warnings {
|
|
| 628 |
- fmt.Fprintf(b.OutStream, " ---> [Warning] %s\n", warning) |
|
| 629 |
- } |
|
| 630 |
- c, err := b.Daemon.Get(ccr.ID) |
|
| 631 |
- if err != nil {
|
|
| 632 |
- return nil, err |
|
| 632 |
+ defer c.Unmount() |
|
| 633 |
+ for _, warning := range warnings {
|
|
| 634 |
+ fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) |
|
| 633 | 635 |
} |
| 634 | 636 |
|
| 635 |
- b.TmpContainers[c.ID] = struct{}{}
|
|
| 636 |
- fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID)) |
|
| 637 |
+ b.tmpContainers[c.ID] = struct{}{}
|
|
| 638 |
+ fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) |
|
| 637 | 639 |
|
| 638 | 640 |
if config.Cmd.Len() > 0 {
|
| 639 | 641 |
// override the entry point that may have been picked up from the base image |
| 640 | 642 |
s := config.Cmd.Slice() |
| 641 | 643 |
c.Path = s[0] |
| 642 | 644 |
c.Args = s[1:] |
| 643 |
- } else {
|
|
| 644 |
- config.Cmd = stringutils.NewStrSlice() |
|
| 645 | 645 |
} |
| 646 | 646 |
|
| 647 | 647 |
return c, nil |
| 648 | 648 |
} |
| 649 | 649 |
|
| 650 |
-func (b *builder) run(c *daemon.Container) error {
|
|
| 650 |
+func (b *Builder) run(c *daemon.Container) error {
|
|
| 651 | 651 |
var errCh chan error |
| 652 | 652 |
if b.Verbose {
|
| 653 |
- errCh = c.Attach(nil, b.OutStream, b.ErrStream) |
|
| 653 |
+ errCh = c.Attach(nil, b.Stdout, b.Stderr) |
|
| 654 | 654 |
} |
| 655 | 655 |
|
| 656 | 656 |
//start the container |
| ... | ... |
@@ -678,8 +572,9 @@ func (b *builder) run(c *daemon.Container) error {
|
| 678 | 678 |
|
| 679 | 679 |
// Wait for it to finish |
| 680 | 680 |
if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 {
|
| 681 |
+ // TODO: change error type, because jsonmessage.JSONError assumes HTTP |
|
| 681 | 682 |
return &jsonmessage.JSONError{
|
| 682 |
- Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.Config.Cmd.ToString(), ret),
|
|
| 683 |
+ Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", b.runConfig.Cmd.ToString(), ret),
|
|
| 683 | 684 |
Code: ret, |
| 684 | 685 |
} |
| 685 | 686 |
} |
| ... | ... |
@@ -687,125 +582,81 @@ func (b *builder) run(c *daemon.Container) error {
|
| 687 | 687 |
return nil |
| 688 | 688 |
} |
| 689 | 689 |
|
| 690 |
-func (b *builder) checkPathForAddition(orig string) error {
|
|
| 691 |
- origPath := filepath.Join(b.contextPath, orig) |
|
| 692 |
- origPath, err := symlink.EvalSymlinks(origPath) |
|
| 693 |
- if err != nil {
|
|
| 694 |
- if os.IsNotExist(err) {
|
|
| 695 |
- return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 690 |
+func (b *Builder) clearTmp() {
|
|
| 691 |
+ for c := range b.tmpContainers {
|
|
| 692 |
+ rmConfig := &daemon.ContainerRmConfig{
|
|
| 693 |
+ ForceRemove: true, |
|
| 694 |
+ RemoveVolume: true, |
|
| 696 | 695 |
} |
| 697 |
- return err |
|
| 698 |
- } |
|
| 699 |
- contextPath, err := symlink.EvalSymlinks(b.contextPath) |
|
| 700 |
- if err != nil {
|
|
| 701 |
- return err |
|
| 702 |
- } |
|
| 703 |
- if !strings.HasPrefix(origPath, contextPath) {
|
|
| 704 |
- return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
|
| 705 |
- } |
|
| 706 |
- if _, err := os.Stat(origPath); err != nil {
|
|
| 707 |
- if os.IsNotExist(err) {
|
|
| 708 |
- return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 696 |
+ if err := b.docker.Remove(c, rmConfig); err != nil {
|
|
| 697 |
+ fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) |
|
| 698 |
+ return |
|
| 709 | 699 |
} |
| 710 |
- return err |
|
| 700 |
+ delete(b.tmpContainers, c) |
|
| 701 |
+ fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) |
|
| 711 | 702 |
} |
| 712 |
- return nil |
|
| 713 | 703 |
} |
| 714 | 704 |
|
| 715 |
-func (b *builder) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
|
|
| 716 |
- var ( |
|
| 717 |
- err error |
|
| 718 |
- destExists = true |
|
| 719 |
- origPath = filepath.Join(b.contextPath, orig) |
|
| 720 |
- destPath string |
|
| 721 |
- ) |
|
| 722 |
- |
|
| 723 |
- // Work in daemon-local OS specific file paths |
|
| 724 |
- dest = filepath.FromSlash(dest) |
|
| 725 |
- |
|
| 726 |
- destPath, err = container.GetResourcePath(dest) |
|
| 727 |
- if err != nil {
|
|
| 728 |
- return err |
|
| 729 |
- } |
|
| 730 |
- |
|
| 731 |
- // Preserve the trailing slash |
|
| 732 |
- if strings.HasSuffix(dest, string(os.PathSeparator)) || dest == "." {
|
|
| 733 |
- destPath = destPath + string(os.PathSeparator) |
|
| 734 |
- } |
|
| 735 |
- |
|
| 736 |
- destStat, err := os.Stat(destPath) |
|
| 737 |
- if err != nil {
|
|
| 738 |
- if !os.IsNotExist(err) {
|
|
| 739 |
- logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
|
|
| 740 |
- return err |
|
| 705 |
+// readDockerfile reads a Dockerfile from the current context. |
|
| 706 |
+func (b *Builder) readDockerfile() error {
|
|
| 707 |
+ // If no -f was specified then look for 'Dockerfile'. If we can't find |
|
| 708 |
+ // that then look for 'dockerfile'. If neither are found then default |
|
| 709 |
+ // back to 'Dockerfile' and use that in the error message. |
|
| 710 |
+ if b.DockerfileName == "" {
|
|
| 711 |
+ b.DockerfileName = api.DefaultDockerfileName |
|
| 712 |
+ if _, err := b.context.Stat(b.DockerfileName); os.IsNotExist(err) {
|
|
| 713 |
+ lowercase := strings.ToLower(b.DockerfileName) |
|
| 714 |
+ if _, err := b.context.Stat(lowercase); err == nil {
|
|
| 715 |
+ b.DockerfileName = lowercase |
|
| 716 |
+ } |
|
| 741 | 717 |
} |
| 742 |
- destExists = false |
|
| 743 | 718 |
} |
| 744 | 719 |
|
| 745 |
- fi, err := os.Stat(origPath) |
|
| 720 |
+ f, err := b.context.Open(b.DockerfileName) |
|
| 746 | 721 |
if err != nil {
|
| 747 | 722 |
if os.IsNotExist(err) {
|
| 748 |
- return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 723 |
+ return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.DockerfileName)
|
|
| 749 | 724 |
} |
| 750 | 725 |
return err |
| 751 | 726 |
} |
| 752 |
- |
|
| 753 |
- if fi.IsDir() {
|
|
| 754 |
- return copyAsDirectory(origPath, destPath, destExists) |
|
| 755 |
- } |
|
| 756 |
- |
|
| 757 |
- // If we are adding a remote file (or we've been told not to decompress), do not try to untar it |
|
| 758 |
- if decompress {
|
|
| 759 |
- // First try to unpack the source as an archive |
|
| 760 |
- // to support the untar feature we need to clean up the path a little bit |
|
| 761 |
- // because tar is very forgiving. First we need to strip off the archive's |
|
| 762 |
- // filename from the path but this is only added if it does not end in slash |
|
| 763 |
- tarDest := destPath |
|
| 764 |
- if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
|
|
| 765 |
- tarDest = filepath.Dir(destPath) |
|
| 727 |
+ if f, ok := f.(*os.File); ok {
|
|
| 728 |
+ // ignoring error because Open already succeeded |
|
| 729 |
+ fi, err := f.Stat() |
|
| 730 |
+ if err != nil {
|
|
| 731 |
+ return fmt.Errorf("Unexpected error reading Dockerfile: %v", err)
|
|
| 766 | 732 |
} |
| 767 |
- |
|
| 768 |
- // try to successfully untar the orig |
|
| 769 |
- if err := chrootarchive.UntarPath(origPath, tarDest); err == nil {
|
|
| 770 |
- return nil |
|
| 771 |
- } else if err != io.EOF {
|
|
| 772 |
- logrus.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
|
|
| 733 |
+ if fi.Size() == 0 {
|
|
| 734 |
+ return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.DockerfileName)
|
|
| 773 | 735 |
} |
| 774 | 736 |
} |
| 775 |
- |
|
| 776 |
- if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
|
|
| 777 |
- return err |
|
| 778 |
- } |
|
| 779 |
- if err := chrootarchive.CopyWithTar(origPath, destPath); err != nil {
|
|
| 737 |
+ b.dockerfile, err = parser.Parse(f) |
|
| 738 |
+ f.Close() |
|
| 739 |
+ if err != nil {
|
|
| 780 | 740 |
return err |
| 781 | 741 |
} |
| 782 | 742 |
|
| 783 |
- resPath := destPath |
|
| 784 |
- if destExists && destStat.IsDir() {
|
|
| 785 |
- resPath = filepath.Join(destPath, filepath.Base(origPath)) |
|
| 743 |
+ // After the Dockerfile has been parsed, we need to check the .dockerignore |
|
| 744 |
+ // file for either "Dockerfile" or ".dockerignore", and if either are |
|
| 745 |
+ // present then erase them from the build context. These files should never |
|
| 746 |
+ // have been sent from the client but we did send them to make sure that |
|
| 747 |
+ // we had the Dockerfile to actually parse, and then we also need the |
|
| 748 |
+ // .dockerignore file to know whether either file should be removed. |
|
| 749 |
+ // Note that this assumes the Dockerfile has been read into memory and |
|
| 750 |
+ // is now safe to be removed. |
|
| 751 |
+ if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok {
|
|
| 752 |
+ dockerIgnore.Process([]string{b.DockerfileName})
|
|
| 786 | 753 |
} |
| 787 |
- |
|
| 788 |
- return fixPermissions(origPath, resPath, 0, 0, destExists) |
|
| 754 |
+ return nil |
|
| 789 | 755 |
} |
| 790 | 756 |
|
| 791 |
-func copyAsDirectory(source, destination string, destExisted bool) error {
|
|
| 792 |
- if err := chrootarchive.CopyWithTar(source, destination); err != nil {
|
|
| 793 |
- return err |
|
| 757 |
+// determine if build arg is part of built-in args or user |
|
| 758 |
+// defined args in Dockerfile at any point in time. |
|
| 759 |
+func (b *Builder) isBuildArgAllowed(arg string) bool {
|
|
| 760 |
+ if _, ok := BuiltinAllowedBuildArgs[arg]; ok {
|
|
| 761 |
+ return true |
|
| 794 | 762 |
} |
| 795 |
- return fixPermissions(source, destination, 0, 0, destExisted) |
|
| 796 |
-} |
|
| 797 |
- |
|
| 798 |
-func (b *builder) clearTmp() {
|
|
| 799 |
- for c := range b.TmpContainers {
|
|
| 800 |
- rmConfig := &daemon.ContainerRmConfig{
|
|
| 801 |
- ForceRemove: true, |
|
| 802 |
- RemoveVolume: true, |
|
| 803 |
- } |
|
| 804 |
- if err := b.Daemon.ContainerRm(c, rmConfig); err != nil {
|
|
| 805 |
- fmt.Fprintf(b.OutStream, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) |
|
| 806 |
- return |
|
| 807 |
- } |
|
| 808 |
- delete(b.TmpContainers, c) |
|
| 809 |
- fmt.Fprintf(b.OutStream, "Removing intermediate container %s\n", stringid.TruncateID(c)) |
|
| 763 |
+ if _, ok := b.allowedBuildArgs[arg]; ok {
|
|
| 764 |
+ return true |
|
| 810 | 765 |
} |
| 766 |
+ return false |
|
| 811 | 767 |
} |
| ... | ... |
@@ -1,17 +1,12 @@ |
| 1 |
-// +build freebsd linux |
|
| 1 |
+// +build !windows |
|
| 2 | 2 |
|
| 3 | 3 |
package dockerfile |
| 4 | 4 |
|
| 5 | 5 |
import ( |
| 6 |
- "io/ioutil" |
|
| 7 | 6 |
"os" |
| 8 | 7 |
"path/filepath" |
| 9 | 8 |
) |
| 10 | 9 |
|
| 11 |
-func getTempDir(dir, prefix string) (string, error) {
|
|
| 12 |
- return ioutil.TempDir(dir, prefix) |
|
| 13 |
-} |
|
| 14 |
- |
|
| 15 | 10 |
func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
| 16 | 11 |
// If the destination didn't already exist, or the destination isn't a |
| 17 | 12 |
// directory, then we should Lchown the destination. Otherwise, we shouldn't |
| ... | ... |
@@ -2,20 +2,6 @@ |
| 2 | 2 |
|
| 3 | 3 |
package dockerfile |
| 4 | 4 |
|
| 5 |
-import ( |
|
| 6 |
- "io/ioutil" |
|
| 7 |
- |
|
| 8 |
- "github.com/docker/docker/pkg/longpath" |
|
| 9 |
-) |
|
| 10 |
- |
|
| 11 |
-func getTempDir(dir, prefix string) (string, error) {
|
|
| 12 |
- tempDir, err := ioutil.TempDir(dir, prefix) |
|
| 13 |
- if err != nil {
|
|
| 14 |
- return "", err |
|
| 15 |
- } |
|
| 16 |
- return longpath.AddPrefix(tempDir), nil |
|
| 17 |
-} |
|
| 18 |
- |
|
| 19 | 5 |
func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
| 20 | 6 |
// chown is not supported on Windows |
| 21 | 7 |
return nil |
| 22 | 8 |
deleted file mode 100644 |
| ... | ... |
@@ -1,376 +0,0 @@ |
| 1 |
-package dockerfile |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bytes" |
|
| 5 |
- "errors" |
|
| 6 |
- "fmt" |
|
| 7 |
- "io" |
|
| 8 |
- "io/ioutil" |
|
| 9 |
- "os" |
|
| 10 |
- "runtime" |
|
| 11 |
- "strings" |
|
| 12 |
- "sync" |
|
| 13 |
- |
|
| 14 |
- "github.com/docker/docker/api" |
|
| 15 |
- "github.com/docker/docker/builder/dockerfile/parser" |
|
| 16 |
- "github.com/docker/docker/cliconfig" |
|
| 17 |
- "github.com/docker/docker/daemon" |
|
| 18 |
- "github.com/docker/docker/graph/tags" |
|
| 19 |
- "github.com/docker/docker/pkg/archive" |
|
| 20 |
- "github.com/docker/docker/pkg/httputils" |
|
| 21 |
- "github.com/docker/docker/pkg/parsers" |
|
| 22 |
- "github.com/docker/docker/pkg/progressreader" |
|
| 23 |
- "github.com/docker/docker/pkg/streamformatter" |
|
| 24 |
- "github.com/docker/docker/pkg/stringid" |
|
| 25 |
- "github.com/docker/docker/pkg/ulimit" |
|
| 26 |
- "github.com/docker/docker/pkg/urlutil" |
|
| 27 |
- "github.com/docker/docker/registry" |
|
| 28 |
- "github.com/docker/docker/runconfig" |
|
| 29 |
- "github.com/docker/docker/utils" |
|
| 30 |
-) |
|
| 31 |
- |
|
| 32 |
-// When downloading remote contexts, limit the amount (in bytes) |
|
| 33 |
-// to be read from the response body in order to detect its Content-Type |
|
| 34 |
-const maxPreambleLength = 100 |
|
| 35 |
- |
|
| 36 |
-// whitelist of commands allowed for a commit/import |
|
| 37 |
-var validCommitCommands = map[string]bool{
|
|
| 38 |
- "cmd": true, |
|
| 39 |
- "entrypoint": true, |
|
| 40 |
- "env": true, |
|
| 41 |
- "expose": true, |
|
| 42 |
- "label": true, |
|
| 43 |
- "onbuild": true, |
|
| 44 |
- "user": true, |
|
| 45 |
- "volume": true, |
|
| 46 |
- "workdir": true, |
|
| 47 |
-} |
|
| 48 |
- |
|
| 49 |
-// BuiltinAllowedBuildArgs is list of built-in allowed build args |
|
| 50 |
-var BuiltinAllowedBuildArgs = map[string]bool{
|
|
| 51 |
- "HTTP_PROXY": true, |
|
| 52 |
- "http_proxy": true, |
|
| 53 |
- "HTTPS_PROXY": true, |
|
| 54 |
- "https_proxy": true, |
|
| 55 |
- "FTP_PROXY": true, |
|
| 56 |
- "ftp_proxy": true, |
|
| 57 |
- "NO_PROXY": true, |
|
| 58 |
- "no_proxy": true, |
|
| 59 |
-} |
|
| 60 |
- |
|
| 61 |
-// Config contains all configs for a build job |
|
| 62 |
-type Config struct {
|
|
| 63 |
- DockerfileName string |
|
| 64 |
- RemoteURL string |
|
| 65 |
- RepoName string |
|
| 66 |
- SuppressOutput bool |
|
| 67 |
- NoCache bool |
|
| 68 |
- Remove bool |
|
| 69 |
- ForceRemove bool |
|
| 70 |
- Pull bool |
|
| 71 |
- Memory int64 |
|
| 72 |
- MemorySwap int64 |
|
| 73 |
- CPUShares int64 |
|
| 74 |
- CPUPeriod int64 |
|
| 75 |
- CPUQuota int64 |
|
| 76 |
- CPUSetCpus string |
|
| 77 |
- CPUSetMems string |
|
| 78 |
- CgroupParent string |
|
| 79 |
- Ulimits []*ulimit.Ulimit |
|
| 80 |
- AuthConfigs map[string]cliconfig.AuthConfig |
|
| 81 |
- BuildArgs map[string]string |
|
| 82 |
- |
|
| 83 |
- Stdout io.Writer |
|
| 84 |
- Context io.ReadCloser |
|
| 85 |
- // When closed, the job has been cancelled. |
|
| 86 |
- // Note: not all jobs implement cancellation. |
|
| 87 |
- // See Job.Cancel() and Job.WaitCancelled() |
|
| 88 |
- cancelled chan struct{}
|
|
| 89 |
- cancelOnce sync.Once |
|
| 90 |
-} |
|
| 91 |
- |
|
| 92 |
-// Cancel signals the build job to cancel |
|
| 93 |
-func (b *Config) Cancel() {
|
|
| 94 |
- b.cancelOnce.Do(func() {
|
|
| 95 |
- close(b.cancelled) |
|
| 96 |
- }) |
|
| 97 |
-} |
|
| 98 |
- |
|
| 99 |
-// WaitCancelled returns a channel which is closed ("never blocks") when
|
|
| 100 |
-// the job is cancelled. |
|
| 101 |
-func (b *Config) WaitCancelled() <-chan struct{} {
|
|
| 102 |
- return b.cancelled |
|
| 103 |
-} |
|
| 104 |
- |
|
| 105 |
-// NewBuildConfig returns a new Config struct |
|
| 106 |
-func NewBuildConfig() *Config {
|
|
| 107 |
- return &Config{
|
|
| 108 |
- AuthConfigs: map[string]cliconfig.AuthConfig{},
|
|
| 109 |
- cancelled: make(chan struct{}),
|
|
| 110 |
- } |
|
| 111 |
-} |
|
| 112 |
- |
|
| 113 |
-// Build is the main interface of the package, it gathers the Builder |
|
| 114 |
-// struct and calls builder.Run() to do all the real build job. |
|
| 115 |
-func Build(d *daemon.Daemon, buildConfig *Config) error {
|
|
| 116 |
- var ( |
|
| 117 |
- repoName string |
|
| 118 |
- tag string |
|
| 119 |
- context io.ReadCloser |
|
| 120 |
- ) |
|
| 121 |
- sf := streamformatter.NewJSONStreamFormatter() |
|
| 122 |
- |
|
| 123 |
- repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) |
|
| 124 |
- if repoName != "" {
|
|
| 125 |
- if err := registry.ValidateRepositoryName(repoName); err != nil {
|
|
| 126 |
- return err |
|
| 127 |
- } |
|
| 128 |
- if len(tag) > 0 {
|
|
| 129 |
- if err := tags.ValidateTagName(tag); err != nil {
|
|
| 130 |
- return err |
|
| 131 |
- } |
|
| 132 |
- } |
|
| 133 |
- } |
|
| 134 |
- |
|
| 135 |
- if buildConfig.RemoteURL == "" {
|
|
| 136 |
- context = ioutil.NopCloser(buildConfig.Context) |
|
| 137 |
- } else if urlutil.IsGitURL(buildConfig.RemoteURL) {
|
|
| 138 |
- root, err := utils.GitClone(buildConfig.RemoteURL) |
|
| 139 |
- if err != nil {
|
|
| 140 |
- return err |
|
| 141 |
- } |
|
| 142 |
- defer os.RemoveAll(root) |
|
| 143 |
- |
|
| 144 |
- c, err := archive.Tar(root, archive.Uncompressed) |
|
| 145 |
- if err != nil {
|
|
| 146 |
- return err |
|
| 147 |
- } |
|
| 148 |
- context = c |
|
| 149 |
- } else if urlutil.IsURL(buildConfig.RemoteURL) {
|
|
| 150 |
- f, err := httputils.Download(buildConfig.RemoteURL) |
|
| 151 |
- if err != nil {
|
|
| 152 |
- return fmt.Errorf("Error downloading remote context %s: %v", buildConfig.RemoteURL, err)
|
|
| 153 |
- } |
|
| 154 |
- defer f.Body.Close() |
|
| 155 |
- ct := f.Header.Get("Content-Type")
|
|
| 156 |
- clen := f.ContentLength |
|
| 157 |
- contentType, bodyReader, err := inspectResponse(ct, f.Body, clen) |
|
| 158 |
- |
|
| 159 |
- defer bodyReader.Close() |
|
| 160 |
- |
|
| 161 |
- if err != nil {
|
|
| 162 |
- return fmt.Errorf("Error detecting content type for remote %s: %v", buildConfig.RemoteURL, err)
|
|
| 163 |
- } |
|
| 164 |
- if contentType == httputils.MimeTypes.TextPlain {
|
|
| 165 |
- dockerFile, err := ioutil.ReadAll(bodyReader) |
|
| 166 |
- if err != nil {
|
|
| 167 |
- return err |
|
| 168 |
- } |
|
| 169 |
- |
|
| 170 |
- // When we're downloading just a Dockerfile put it in |
|
| 171 |
- // the default name - don't allow the client to move/specify it |
|
| 172 |
- buildConfig.DockerfileName = api.DefaultDockerfileName |
|
| 173 |
- |
|
| 174 |
- c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) |
|
| 175 |
- if err != nil {
|
|
| 176 |
- return err |
|
| 177 |
- } |
|
| 178 |
- context = c |
|
| 179 |
- } else {
|
|
| 180 |
- // Pass through - this is a pre-packaged context, presumably |
|
| 181 |
- // with a Dockerfile with the right name inside it. |
|
| 182 |
- prCfg := progressreader.Config{
|
|
| 183 |
- In: bodyReader, |
|
| 184 |
- Out: buildConfig.Stdout, |
|
| 185 |
- Formatter: sf, |
|
| 186 |
- Size: clen, |
|
| 187 |
- NewLines: true, |
|
| 188 |
- ID: "Downloading context", |
|
| 189 |
- Action: buildConfig.RemoteURL, |
|
| 190 |
- } |
|
| 191 |
- context = progressreader.New(prCfg) |
|
| 192 |
- } |
|
| 193 |
- } |
|
| 194 |
- |
|
| 195 |
- defer context.Close() |
|
| 196 |
- |
|
| 197 |
- builder := &builder{
|
|
| 198 |
- Daemon: d, |
|
| 199 |
- OutStream: &streamformatter.StdoutFormatter{
|
|
| 200 |
- Writer: buildConfig.Stdout, |
|
| 201 |
- StreamFormatter: sf, |
|
| 202 |
- }, |
|
| 203 |
- ErrStream: &streamformatter.StderrFormatter{
|
|
| 204 |
- Writer: buildConfig.Stdout, |
|
| 205 |
- StreamFormatter: sf, |
|
| 206 |
- }, |
|
| 207 |
- Verbose: !buildConfig.SuppressOutput, |
|
| 208 |
- UtilizeCache: !buildConfig.NoCache, |
|
| 209 |
- Remove: buildConfig.Remove, |
|
| 210 |
- ForceRemove: buildConfig.ForceRemove, |
|
| 211 |
- Pull: buildConfig.Pull, |
|
| 212 |
- OutOld: buildConfig.Stdout, |
|
| 213 |
- StreamFormatter: sf, |
|
| 214 |
- AuthConfigs: buildConfig.AuthConfigs, |
|
| 215 |
- dockerfileName: buildConfig.DockerfileName, |
|
| 216 |
- cpuShares: buildConfig.CPUShares, |
|
| 217 |
- cpuPeriod: buildConfig.CPUPeriod, |
|
| 218 |
- cpuQuota: buildConfig.CPUQuota, |
|
| 219 |
- cpuSetCpus: buildConfig.CPUSetCpus, |
|
| 220 |
- cpuSetMems: buildConfig.CPUSetMems, |
|
| 221 |
- cgroupParent: buildConfig.CgroupParent, |
|
| 222 |
- memory: buildConfig.Memory, |
|
| 223 |
- memorySwap: buildConfig.MemorySwap, |
|
| 224 |
- ulimits: buildConfig.Ulimits, |
|
| 225 |
- cancelled: buildConfig.WaitCancelled(), |
|
| 226 |
- id: stringid.GenerateRandomID(), |
|
| 227 |
- buildArgs: buildConfig.BuildArgs, |
|
| 228 |
- allowedBuildArgs: make(map[string]bool), |
|
| 229 |
- } |
|
| 230 |
- |
|
| 231 |
- defer func() {
|
|
| 232 |
- builder.Daemon.Graph().Release(builder.id, builder.activeImages...) |
|
| 233 |
- }() |
|
| 234 |
- |
|
| 235 |
- id, err := builder.Run(context) |
|
| 236 |
- if err != nil {
|
|
| 237 |
- return err |
|
| 238 |
- } |
|
| 239 |
- if repoName != "" {
|
|
| 240 |
- return d.Repositories().Tag(repoName, tag, id, true) |
|
| 241 |
- } |
|
| 242 |
- return nil |
|
| 243 |
-} |
|
| 244 |
- |
|
| 245 |
-// BuildFromConfig will do build directly from parameter 'changes', which comes |
|
| 246 |
-// from Dockerfile entries, it will: |
|
| 247 |
-// |
|
| 248 |
-// - call parse.Parse() to get AST root from Dockerfile entries |
|
| 249 |
-// - do build by calling builder.dispatch() to call all entries' handling routines |
|
| 250 |
-func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) {
|
|
| 251 |
- ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) |
|
| 252 |
- if err != nil {
|
|
| 253 |
- return nil, err |
|
| 254 |
- } |
|
| 255 |
- |
|
| 256 |
- // ensure that the commands are valid |
|
| 257 |
- for _, n := range ast.Children {
|
|
| 258 |
- if !validCommitCommands[n.Value] {
|
|
| 259 |
- return nil, fmt.Errorf("%s is not a valid change command", n.Value)
|
|
| 260 |
- } |
|
| 261 |
- } |
|
| 262 |
- |
|
| 263 |
- builder := &builder{
|
|
| 264 |
- Daemon: d, |
|
| 265 |
- Config: c, |
|
| 266 |
- OutStream: ioutil.Discard, |
|
| 267 |
- ErrStream: ioutil.Discard, |
|
| 268 |
- disableCommit: true, |
|
| 269 |
- } |
|
| 270 |
- |
|
| 271 |
- for i, n := range ast.Children {
|
|
| 272 |
- if err := builder.dispatch(i, n); err != nil {
|
|
| 273 |
- return nil, err |
|
| 274 |
- } |
|
| 275 |
- } |
|
| 276 |
- |
|
| 277 |
- return builder.Config, nil |
|
| 278 |
-} |
|
| 279 |
- |
|
| 280 |
-// CommitConfig contains build configs for commit operation |
|
| 281 |
-type CommitConfig struct {
|
|
| 282 |
- Pause bool |
|
| 283 |
- Repo string |
|
| 284 |
- Tag string |
|
| 285 |
- Author string |
|
| 286 |
- Comment string |
|
| 287 |
- Changes []string |
|
| 288 |
- Config *runconfig.Config |
|
| 289 |
-} |
|
| 290 |
- |
|
| 291 |
-// Commit will create a new image from a container's changes |
|
| 292 |
-func Commit(name string, d *daemon.Daemon, c *CommitConfig) (string, error) {
|
|
| 293 |
- container, err := d.Get(name) |
|
| 294 |
- if err != nil {
|
|
| 295 |
- return "", err |
|
| 296 |
- } |
|
| 297 |
- |
|
| 298 |
- // It is not possible to commit a running container on Windows |
|
| 299 |
- if runtime.GOOS == "windows" && container.IsRunning() {
|
|
| 300 |
- return "", fmt.Errorf("Windows does not support commit of a running container")
|
|
| 301 |
- } |
|
| 302 |
- |
|
| 303 |
- if c.Config == nil {
|
|
| 304 |
- c.Config = &runconfig.Config{}
|
|
| 305 |
- } |
|
| 306 |
- |
|
| 307 |
- newConfig, err := BuildFromConfig(d, c.Config, c.Changes) |
|
| 308 |
- if err != nil {
|
|
| 309 |
- return "", err |
|
| 310 |
- } |
|
| 311 |
- |
|
| 312 |
- if err := runconfig.Merge(newConfig, container.Config); err != nil {
|
|
| 313 |
- return "", err |
|
| 314 |
- } |
|
| 315 |
- |
|
| 316 |
- commitCfg := &daemon.ContainerCommitConfig{
|
|
| 317 |
- Pause: c.Pause, |
|
| 318 |
- Repo: c.Repo, |
|
| 319 |
- Tag: c.Tag, |
|
| 320 |
- Author: c.Author, |
|
| 321 |
- Comment: c.Comment, |
|
| 322 |
- Config: newConfig, |
|
| 323 |
- } |
|
| 324 |
- |
|
| 325 |
- img, err := d.Commit(container, commitCfg) |
|
| 326 |
- if err != nil {
|
|
| 327 |
- return "", err |
|
| 328 |
- } |
|
| 329 |
- |
|
| 330 |
- return img.ID, nil |
|
| 331 |
-} |
|
| 332 |
- |
|
| 333 |
-// inspectResponse looks into the http response data at r to determine whether its |
|
| 334 |
-// content-type is on the list of acceptable content types for remote build contexts. |
|
| 335 |
-// This function returns: |
|
| 336 |
-// - a string representation of the detected content-type |
|
| 337 |
-// - an io.Reader for the response body |
|
| 338 |
-// - an error value which will be non-nil either when something goes wrong while |
|
| 339 |
-// reading bytes from r or when the detected content-type is not acceptable. |
|
| 340 |
-func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) {
|
|
| 341 |
- plen := clen |
|
| 342 |
- if plen <= 0 || plen > maxPreambleLength {
|
|
| 343 |
- plen = maxPreambleLength |
|
| 344 |
- } |
|
| 345 |
- |
|
| 346 |
- preamble := make([]byte, plen, plen) |
|
| 347 |
- rlen, err := r.Read(preamble) |
|
| 348 |
- if rlen == 0 {
|
|
| 349 |
- return ct, r, errors.New("Empty response")
|
|
| 350 |
- } |
|
| 351 |
- if err != nil && err != io.EOF {
|
|
| 352 |
- return ct, r, err |
|
| 353 |
- } |
|
| 354 |
- |
|
| 355 |
- preambleR := bytes.NewReader(preamble) |
|
| 356 |
- bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) |
|
| 357 |
- // Some web servers will use application/octet-stream as the default |
|
| 358 |
- // content type for files without an extension (e.g. 'Dockerfile') |
|
| 359 |
- // so if we receive this value we better check for text content |
|
| 360 |
- contentType := ct |
|
| 361 |
- if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream {
|
|
| 362 |
- contentType, _, err = httputils.DetectContentType(preamble) |
|
| 363 |
- if err != nil {
|
|
| 364 |
- return contentType, bodyReader, err |
|
| 365 |
- } |
|
| 366 |
- } |
|
| 367 |
- |
|
| 368 |
- contentType = selectAcceptableMIME(contentType) |
|
| 369 |
- var cterr error |
|
| 370 |
- if len(contentType) == 0 {
|
|
| 371 |
- cterr = fmt.Errorf("unsupported Content-Type %q", ct)
|
|
| 372 |
- contentType = ct |
|
| 373 |
- } |
|
| 374 |
- |
|
| 375 |
- return contentType, bodyReader, cterr |
|
| 376 |
-} |
| 377 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,113 +0,0 @@ |
| 1 |
-package dockerfile |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bytes" |
|
| 5 |
- "io/ioutil" |
|
| 6 |
- "testing" |
|
| 7 |
-) |
|
| 8 |
- |
|
| 9 |
-var textPlainDockerfile = "FROM busybox" |
|
| 10 |
-var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic
|
|
| 11 |
- |
|
| 12 |
-func TestInspectEmptyResponse(t *testing.T) {
|
|
| 13 |
- ct := "application/octet-stream" |
|
| 14 |
- br := ioutil.NopCloser(bytes.NewReader([]byte("")))
|
|
| 15 |
- contentType, bReader, err := inspectResponse(ct, br, 0) |
|
| 16 |
- if err == nil {
|
|
| 17 |
- t.Fatalf("Should have generated an error for an empty response")
|
|
| 18 |
- } |
|
| 19 |
- if contentType != "application/octet-stream" {
|
|
| 20 |
- t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType)
|
|
| 21 |
- } |
|
| 22 |
- body, err := ioutil.ReadAll(bReader) |
|
| 23 |
- if err != nil {
|
|
| 24 |
- t.Fatal(err) |
|
| 25 |
- } |
|
| 26 |
- if len(body) != 0 {
|
|
| 27 |
- t.Fatal("response body should remain empty")
|
|
| 28 |
- } |
|
| 29 |
-} |
|
| 30 |
- |
|
| 31 |
-func TestInspectResponseBinary(t *testing.T) {
|
|
| 32 |
- ct := "application/octet-stream" |
|
| 33 |
- br := ioutil.NopCloser(bytes.NewReader(binaryContext)) |
|
| 34 |
- contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) |
|
| 35 |
- if err != nil {
|
|
| 36 |
- t.Fatal(err) |
|
| 37 |
- } |
|
| 38 |
- if contentType != "application/octet-stream" {
|
|
| 39 |
- t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType)
|
|
| 40 |
- } |
|
| 41 |
- body, err := ioutil.ReadAll(bReader) |
|
| 42 |
- if err != nil {
|
|
| 43 |
- t.Fatal(err) |
|
| 44 |
- } |
|
| 45 |
- if len(body) != len(binaryContext) {
|
|
| 46 |
- t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body))
|
|
| 47 |
- } |
|
| 48 |
- for i := range body {
|
|
| 49 |
- if body[i] != binaryContext[i] {
|
|
| 50 |
- t.Fatalf("Corrupted response body at byte index %d", i)
|
|
| 51 |
- } |
|
| 52 |
- } |
|
| 53 |
-} |
|
| 54 |
- |
|
| 55 |
-func TestResponseUnsupportedContentType(t *testing.T) {
|
|
| 56 |
- content := []byte(textPlainDockerfile) |
|
| 57 |
- ct := "application/json" |
|
| 58 |
- br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 59 |
- contentType, bReader, err := inspectResponse(ct, br, int64(len(textPlainDockerfile))) |
|
| 60 |
- |
|
| 61 |
- if err == nil {
|
|
| 62 |
- t.Fatal("Should have returned an error on content-type 'application/json'")
|
|
| 63 |
- } |
|
| 64 |
- if contentType != ct {
|
|
| 65 |
- t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType)
|
|
| 66 |
- } |
|
| 67 |
- body, err := ioutil.ReadAll(bReader) |
|
| 68 |
- if err != nil {
|
|
| 69 |
- t.Fatal(err) |
|
| 70 |
- } |
|
| 71 |
- if string(body) != textPlainDockerfile {
|
|
| 72 |
- t.Fatalf("Corrupted response body %s", body)
|
|
| 73 |
- } |
|
| 74 |
-} |
|
| 75 |
- |
|
| 76 |
-func TestInspectResponseTextSimple(t *testing.T) {
|
|
| 77 |
- content := []byte(textPlainDockerfile) |
|
| 78 |
- ct := "text/plain" |
|
| 79 |
- br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 80 |
- contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) |
|
| 81 |
- if err != nil {
|
|
| 82 |
- t.Fatal(err) |
|
| 83 |
- } |
|
| 84 |
- if contentType != "text/plain" {
|
|
| 85 |
- t.Fatalf("Content type should be 'text/plain' but is %q", contentType)
|
|
| 86 |
- } |
|
| 87 |
- body, err := ioutil.ReadAll(bReader) |
|
| 88 |
- if err != nil {
|
|
| 89 |
- t.Fatal(err) |
|
| 90 |
- } |
|
| 91 |
- if string(body) != textPlainDockerfile {
|
|
| 92 |
- t.Fatalf("Corrupted response body %s", body)
|
|
| 93 |
- } |
|
| 94 |
-} |
|
| 95 |
- |
|
| 96 |
-func TestInspectResponseEmptyContentType(t *testing.T) {
|
|
| 97 |
- content := []byte(textPlainDockerfile) |
|
| 98 |
- br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 99 |
- contentType, bodyReader, err := inspectResponse("", br, int64(len(content)))
|
|
| 100 |
- if err != nil {
|
|
| 101 |
- t.Fatal(err) |
|
| 102 |
- } |
|
| 103 |
- if contentType != "text/plain" {
|
|
| 104 |
- t.Fatalf("Content type should be 'text/plain' but is %q", contentType)
|
|
| 105 |
- } |
|
| 106 |
- body, err := ioutil.ReadAll(bodyReader) |
|
| 107 |
- if err != nil {
|
|
| 108 |
- t.Fatal(err) |
|
| 109 |
- } |
|
| 110 |
- if string(body) != textPlainDockerfile {
|
|
| 111 |
- t.Fatalf("Corrupted response body %s", body)
|
|
| 112 |
- } |
|
| 113 |
-} |
| ... | ... |
@@ -1,17 +1,6 @@ |
| 1 | 1 |
package dockerfile |
| 2 | 2 |
|
| 3 |
-import ( |
|
| 4 |
- "regexp" |
|
| 5 |
- "strings" |
|
| 6 |
-) |
|
| 7 |
- |
|
| 8 |
-const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` |
|
| 9 |
- |
|
| 10 |
-var mimeRe = regexp.MustCompile(acceptableRemoteMIME) |
|
| 11 |
- |
|
| 12 |
-func selectAcceptableMIME(ct string) string {
|
|
| 13 |
- return mimeRe.FindString(ct) |
|
| 14 |
-} |
|
| 3 |
+import "strings" |
|
| 15 | 4 |
|
| 16 | 5 |
func handleJSONArgs(args []string, attributes map[string]bool) []string {
|
| 17 | 6 |
if len(args) == 0 {
|
| 18 | 7 |
deleted file mode 100644 |
| ... | ... |
@@ -1,41 +0,0 @@ |
| 1 |
-package dockerfile |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "testing" |
|
| 6 |
-) |
|
| 7 |
- |
|
| 8 |
-func TestSelectAcceptableMIME(t *testing.T) {
|
|
| 9 |
- validMimeStrings := []string{
|
|
| 10 |
- "application/x-bzip2", |
|
| 11 |
- "application/bzip2", |
|
| 12 |
- "application/gzip", |
|
| 13 |
- "application/x-gzip", |
|
| 14 |
- "application/x-xz", |
|
| 15 |
- "application/xz", |
|
| 16 |
- "application/tar", |
|
| 17 |
- "application/x-tar", |
|
| 18 |
- "application/octet-stream", |
|
| 19 |
- "text/plain", |
|
| 20 |
- } |
|
| 21 |
- |
|
| 22 |
- invalidMimeStrings := []string{
|
|
| 23 |
- "", |
|
| 24 |
- "application/octet", |
|
| 25 |
- "application/json", |
|
| 26 |
- } |
|
| 27 |
- |
|
| 28 |
- for _, m := range invalidMimeStrings {
|
|
| 29 |
- if len(selectAcceptableMIME(m)) > 0 {
|
|
| 30 |
- err := fmt.Errorf("Should not have accepted %q", m)
|
|
| 31 |
- t.Fatal(err) |
|
| 32 |
- } |
|
| 33 |
- } |
|
| 34 |
- |
|
| 35 |
- for _, m := range validMimeStrings {
|
|
| 36 |
- if str := selectAcceptableMIME(m); str == "" {
|
|
| 37 |
- err := fmt.Errorf("Should have accepted %q", m)
|
|
| 38 |
- t.Fatal(err) |
|
| 39 |
- } |
|
| 40 |
- } |
|
| 41 |
-} |
| 42 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,47 @@ |
| 0 |
+package builder |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "os" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/docker/docker/pkg/fileutils" |
|
| 6 |
+ "github.com/docker/docker/utils" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+// DockerIgnoreContext wraps a ModifiableContext to add a method |
|
| 10 |
+// for handling the .dockerignore file at the root of the context. |
|
| 11 |
+type DockerIgnoreContext struct {
|
|
| 12 |
+ ModifiableContext |
|
| 13 |
+} |
|
| 14 |
+ |
|
| 15 |
+// Process reads the .dockerignore file at the root of the embedded context. |
|
| 16 |
+// If .dockerignore does not exist in the context, then nil is returned. |
|
| 17 |
+// |
|
| 18 |
+// It can take a list of files to be removed after .dockerignore is removed. |
|
| 19 |
+// This is used for server-side implementations of builders that need to send |
|
| 20 |
+// the .dockerignore file as well as the special files specified in filesToRemove, |
|
| 21 |
+// but expect them to be excluded from the context after they were processed. |
|
| 22 |
+// |
|
| 23 |
+// For example, server-side Dockerfile builders are expected to pass in the name |
|
| 24 |
+// of the Dockerfile to be removed after it was parsed. |
|
| 25 |
+// |
|
| 26 |
+// TODO: Don't require a ModifiableContext (use Context instead) and don't remove |
|
| 27 |
+// files, instead handle a list of files to be excluded from the context. |
|
| 28 |
+func (c DockerIgnoreContext) Process(filesToRemove []string) error {
|
|
| 29 |
+ dockerignore, err := c.Open(".dockerignore")
|
|
| 30 |
+ // Note that a missing .dockerignore file isn't treated as an error |
|
| 31 |
+ if err != nil {
|
|
| 32 |
+ if os.IsNotExist(err) {
|
|
| 33 |
+ return nil |
|
| 34 |
+ } |
|
| 35 |
+ return err |
|
| 36 |
+ } |
|
| 37 |
+ excludes, _ := utils.ReadDockerIgnore(dockerignore) |
|
| 38 |
+ filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
|
|
| 39 |
+ for _, fileToRemove := range filesToRemove {
|
|
| 40 |
+ rm, _ := fileutils.Matches(fileToRemove, excludes) |
|
| 41 |
+ if rm {
|
|
| 42 |
+ c.Remove(fileToRemove) |
|
| 43 |
+ } |
|
| 44 |
+ } |
|
| 45 |
+ return nil |
|
| 46 |
+} |
| 0 | 47 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,28 @@ |
| 0 |
+package builder |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "os" |
|
| 4 |
+ |
|
| 5 |
+ "github.com/docker/docker/pkg/archive" |
|
| 6 |
+ "github.com/docker/docker/utils" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. |
|
| 10 |
+func MakeGitContext(gitURL string) (ModifiableContext, error) {
|
|
| 11 |
+ root, err := utils.GitClone(gitURL) |
|
| 12 |
+ if err != nil {
|
|
| 13 |
+ return nil, err |
|
| 14 |
+ } |
|
| 15 |
+ |
|
| 16 |
+ c, err := archive.Tar(root, archive.Uncompressed) |
|
| 17 |
+ if err != nil {
|
|
| 18 |
+ return nil, err |
|
| 19 |
+ } |
|
| 20 |
+ |
|
| 21 |
+ defer func() {
|
|
| 22 |
+ // TODO: print errors? |
|
| 23 |
+ c.Close() |
|
| 24 |
+ os.RemoveAll(root) |
|
| 25 |
+ }() |
|
| 26 |
+ return MakeTarSumContext(c) |
|
| 27 |
+} |
| 0 | 28 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,115 @@ |
| 0 |
+package builder |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "errors" |
|
| 5 |
+ "fmt" |
|
| 6 |
+ "io" |
|
| 7 |
+ "io/ioutil" |
|
| 8 |
+ "regexp" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/docker/docker/pkg/httputils" |
|
| 11 |
+) |
|
| 12 |
+ |
|
| 13 |
+// When downloading remote contexts, limit the amount (in bytes) |
|
| 14 |
+// to be read from the response body in order to detect its Content-Type |
|
| 15 |
+const maxPreambleLength = 100 |
|
| 16 |
+ |
|
| 17 |
+const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` |
|
| 18 |
+ |
|
| 19 |
+var mimeRe = regexp.MustCompile(acceptableRemoteMIME) |
|
| 20 |
+ |
|
| 21 |
+// MakeRemoteContext downloads a context from remoteURL and returns it. |
|
| 22 |
+// |
|
| 23 |
+// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of |
|
| 24 |
+// maxPreambleLength bytes from the body to help detecting the MIME type. |
|
| 25 |
+// Look at acceptableRemoteMIME for more details. |
|
| 26 |
+// |
|
| 27 |
+// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected |
|
| 28 |
+// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). |
|
| 29 |
+// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. |
|
| 30 |
+func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) {
|
|
| 31 |
+ f, err := httputils.Download(remoteURL) |
|
| 32 |
+ if err != nil {
|
|
| 33 |
+ return nil, fmt.Errorf("Error downloading remote context %s: %v", remoteURL, err)
|
|
| 34 |
+ } |
|
| 35 |
+ defer f.Body.Close() |
|
| 36 |
+ |
|
| 37 |
+ var contextReader io.ReadCloser |
|
| 38 |
+ if contentTypeHandlers != nil {
|
|
| 39 |
+ contentType := f.Header.Get("Content-Type")
|
|
| 40 |
+ clen := f.ContentLength |
|
| 41 |
+ |
|
| 42 |
+ contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) |
|
| 43 |
+ if err != nil {
|
|
| 44 |
+ return nil, fmt.Errorf("Error detecting content type for remote %s: %v", remoteURL, err)
|
|
| 45 |
+ } |
|
| 46 |
+ defer contextReader.Close() |
|
| 47 |
+ |
|
| 48 |
+ // This loop tries to find a content-type handler for the detected content-type. |
|
| 49 |
+ // If it could not find one from the caller-supplied map, it tries the empty content-type `""` |
|
| 50 |
+ // which is interpreted as a fallback handler (usually used for raw tar contexts). |
|
| 51 |
+ for _, ct := range []string{contentType, ""} {
|
|
| 52 |
+ if fn, ok := contentTypeHandlers[ct]; ok {
|
|
| 53 |
+ defer contextReader.Close() |
|
| 54 |
+ if contextReader, err = fn(contextReader); err != nil {
|
|
| 55 |
+ return nil, err |
|
| 56 |
+ } |
|
| 57 |
+ break |
|
| 58 |
+ } |
|
| 59 |
+ } |
|
| 60 |
+ } |
|
| 61 |
+ |
|
| 62 |
+ // Pass through - this is a pre-packaged context, presumably |
|
| 63 |
+ // with a Dockerfile with the right name inside it. |
|
| 64 |
+ return MakeTarSumContext(contextReader) |
|
| 65 |
+} |
|
| 66 |
+ |
|
| 67 |
+// inspectResponse looks into the http response data at r to determine whether its |
|
| 68 |
+// content-type is on the list of acceptable content types for remote build contexts. |
|
| 69 |
+// This function returns: |
|
| 70 |
+// - a string representation of the detected content-type |
|
| 71 |
+// - an io.Reader for the response body |
|
| 72 |
+// - an error value which will be non-nil either when something goes wrong while |
|
| 73 |
+// reading bytes from r or when the detected content-type is not acceptable. |
|
| 74 |
+func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) {
|
|
| 75 |
+ plen := clen |
|
| 76 |
+ if plen <= 0 || plen > maxPreambleLength {
|
|
| 77 |
+ plen = maxPreambleLength |
|
| 78 |
+ } |
|
| 79 |
+ |
|
| 80 |
+ preamble := make([]byte, plen, plen) |
|
| 81 |
+ rlen, err := r.Read(preamble) |
|
| 82 |
+ if rlen == 0 {
|
|
| 83 |
+ return ct, r, errors.New("Empty response")
|
|
| 84 |
+ } |
|
| 85 |
+ if err != nil && err != io.EOF {
|
|
| 86 |
+ return ct, r, err |
|
| 87 |
+ } |
|
| 88 |
+ |
|
| 89 |
+ preambleR := bytes.NewReader(preamble) |
|
| 90 |
+ bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) |
|
| 91 |
+ // Some web servers will use application/octet-stream as the default |
|
| 92 |
+ // content type for files without an extension (e.g. 'Dockerfile') |
|
| 93 |
+ // so if we receive this value we better check for text content |
|
| 94 |
+ contentType := ct |
|
| 95 |
+ if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream {
|
|
| 96 |
+ contentType, _, err = httputils.DetectContentType(preamble) |
|
| 97 |
+ if err != nil {
|
|
| 98 |
+ return contentType, bodyReader, err |
|
| 99 |
+ } |
|
| 100 |
+ } |
|
| 101 |
+ |
|
| 102 |
+ contentType = selectAcceptableMIME(contentType) |
|
| 103 |
+ var cterr error |
|
| 104 |
+ if len(contentType) == 0 {
|
|
| 105 |
+ cterr = fmt.Errorf("unsupported Content-Type %q", ct)
|
|
| 106 |
+ contentType = ct |
|
| 107 |
+ } |
|
| 108 |
+ |
|
| 109 |
+ return contentType, bodyReader, cterr |
|
| 110 |
+} |
|
| 111 |
+ |
|
| 112 |
+func selectAcceptableMIME(ct string) string {
|
|
| 113 |
+ return mimeRe.FindString(ct) |
|
| 114 |
+} |
| 0 | 115 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,149 @@ |
| 0 |
+package builder |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "io/ioutil" |
|
| 6 |
+ "testing" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+var textPlainDockerfile = "FROM busybox" |
|
| 10 |
+var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic
|
|
| 11 |
+ |
|
| 12 |
+func TestSelectAcceptableMIME(t *testing.T) {
|
|
| 13 |
+ validMimeStrings := []string{
|
|
| 14 |
+ "application/x-bzip2", |
|
| 15 |
+ "application/bzip2", |
|
| 16 |
+ "application/gzip", |
|
| 17 |
+ "application/x-gzip", |
|
| 18 |
+ "application/x-xz", |
|
| 19 |
+ "application/xz", |
|
| 20 |
+ "application/tar", |
|
| 21 |
+ "application/x-tar", |
|
| 22 |
+ "application/octet-stream", |
|
| 23 |
+ "text/plain", |
|
| 24 |
+ } |
|
| 25 |
+ |
|
| 26 |
+ invalidMimeStrings := []string{
|
|
| 27 |
+ "", |
|
| 28 |
+ "application/octet", |
|
| 29 |
+ "application/json", |
|
| 30 |
+ } |
|
| 31 |
+ |
|
| 32 |
+ for _, m := range invalidMimeStrings {
|
|
| 33 |
+ if len(selectAcceptableMIME(m)) > 0 {
|
|
| 34 |
+ err := fmt.Errorf("Should not have accepted %q", m)
|
|
| 35 |
+ t.Fatal(err) |
|
| 36 |
+ } |
|
| 37 |
+ } |
|
| 38 |
+ |
|
| 39 |
+ for _, m := range validMimeStrings {
|
|
| 40 |
+ if str := selectAcceptableMIME(m); str == "" {
|
|
| 41 |
+ err := fmt.Errorf("Should have accepted %q", m)
|
|
| 42 |
+ t.Fatal(err) |
|
| 43 |
+ } |
|
| 44 |
+ } |
|
| 45 |
+} |
|
| 46 |
+ |
|
| 47 |
+func TestInspectEmptyResponse(t *testing.T) {
|
|
| 48 |
+ ct := "application/octet-stream" |
|
| 49 |
+ br := ioutil.NopCloser(bytes.NewReader([]byte("")))
|
|
| 50 |
+ contentType, bReader, err := inspectResponse(ct, br, 0) |
|
| 51 |
+ if err == nil {
|
|
| 52 |
+ t.Fatalf("Should have generated an error for an empty response")
|
|
| 53 |
+ } |
|
| 54 |
+ if contentType != "application/octet-stream" {
|
|
| 55 |
+ t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType)
|
|
| 56 |
+ } |
|
| 57 |
+ body, err := ioutil.ReadAll(bReader) |
|
| 58 |
+ if err != nil {
|
|
| 59 |
+ t.Fatal(err) |
|
| 60 |
+ } |
|
| 61 |
+ if len(body) != 0 {
|
|
| 62 |
+ t.Fatal("response body should remain empty")
|
|
| 63 |
+ } |
|
| 64 |
+} |
|
| 65 |
+ |
|
| 66 |
+func TestInspectResponseBinary(t *testing.T) {
|
|
| 67 |
+ ct := "application/octet-stream" |
|
| 68 |
+ br := ioutil.NopCloser(bytes.NewReader(binaryContext)) |
|
| 69 |
+ contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) |
|
| 70 |
+ if err != nil {
|
|
| 71 |
+ t.Fatal(err) |
|
| 72 |
+ } |
|
| 73 |
+ if contentType != "application/octet-stream" {
|
|
| 74 |
+ t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType)
|
|
| 75 |
+ } |
|
| 76 |
+ body, err := ioutil.ReadAll(bReader) |
|
| 77 |
+ if err != nil {
|
|
| 78 |
+ t.Fatal(err) |
|
| 79 |
+ } |
|
| 80 |
+ if len(body) != len(binaryContext) {
|
|
| 81 |
+ t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body))
|
|
| 82 |
+ } |
|
| 83 |
+ for i := range body {
|
|
| 84 |
+ if body[i] != binaryContext[i] {
|
|
| 85 |
+ t.Fatalf("Corrupted response body at byte index %d", i)
|
|
| 86 |
+ } |
|
| 87 |
+ } |
|
| 88 |
+} |
|
| 89 |
+ |
|
| 90 |
+func TestResponseUnsupportedContentType(t *testing.T) {
|
|
| 91 |
+ content := []byte(textPlainDockerfile) |
|
| 92 |
+ ct := "application/json" |
|
| 93 |
+ br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 94 |
+ contentType, bReader, err := inspectResponse(ct, br, int64(len(textPlainDockerfile))) |
|
| 95 |
+ |
|
| 96 |
+ if err == nil {
|
|
| 97 |
+ t.Fatal("Should have returned an error on content-type 'application/json'")
|
|
| 98 |
+ } |
|
| 99 |
+ if contentType != ct {
|
|
| 100 |
+ t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType)
|
|
| 101 |
+ } |
|
| 102 |
+ body, err := ioutil.ReadAll(bReader) |
|
| 103 |
+ if err != nil {
|
|
| 104 |
+ t.Fatal(err) |
|
| 105 |
+ } |
|
| 106 |
+ if string(body) != textPlainDockerfile {
|
|
| 107 |
+ t.Fatalf("Corrupted response body %s", body)
|
|
| 108 |
+ } |
|
| 109 |
+} |
|
| 110 |
+ |
|
| 111 |
+func TestInspectResponseTextSimple(t *testing.T) {
|
|
| 112 |
+ content := []byte(textPlainDockerfile) |
|
| 113 |
+ ct := "text/plain" |
|
| 114 |
+ br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 115 |
+ contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) |
|
| 116 |
+ if err != nil {
|
|
| 117 |
+ t.Fatal(err) |
|
| 118 |
+ } |
|
| 119 |
+ if contentType != "text/plain" {
|
|
| 120 |
+ t.Fatalf("Content type should be 'text/plain' but is %q", contentType)
|
|
| 121 |
+ } |
|
| 122 |
+ body, err := ioutil.ReadAll(bReader) |
|
| 123 |
+ if err != nil {
|
|
| 124 |
+ t.Fatal(err) |
|
| 125 |
+ } |
|
| 126 |
+ if string(body) != textPlainDockerfile {
|
|
| 127 |
+ t.Fatalf("Corrupted response body %s", body)
|
|
| 128 |
+ } |
|
| 129 |
+} |
|
| 130 |
+ |
|
| 131 |
+func TestInspectResponseEmptyContentType(t *testing.T) {
|
|
| 132 |
+ content := []byte(textPlainDockerfile) |
|
| 133 |
+ br := ioutil.NopCloser(bytes.NewReader(content)) |
|
| 134 |
+ contentType, bodyReader, err := inspectResponse("", br, int64(len(content)))
|
|
| 135 |
+ if err != nil {
|
|
| 136 |
+ t.Fatal(err) |
|
| 137 |
+ } |
|
| 138 |
+ if contentType != "text/plain" {
|
|
| 139 |
+ t.Fatalf("Content type should be 'text/plain' but is %q", contentType)
|
|
| 140 |
+ } |
|
| 141 |
+ body, err := ioutil.ReadAll(bodyReader) |
|
| 142 |
+ if err != nil {
|
|
| 143 |
+ t.Fatal(err) |
|
| 144 |
+ } |
|
| 145 |
+ if string(body) != textPlainDockerfile {
|
|
| 146 |
+ t.Fatalf("Corrupted response body %s", body)
|
|
| 147 |
+ } |
|
| 148 |
+} |
| 0 | 149 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,165 @@ |
| 0 |
+package builder |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "io" |
|
| 5 |
+ "os" |
|
| 6 |
+ "path/filepath" |
|
| 7 |
+ "strings" |
|
| 8 |
+ |
|
| 9 |
+ "github.com/docker/docker/pkg/archive" |
|
| 10 |
+ "github.com/docker/docker/pkg/chrootarchive" |
|
| 11 |
+ "github.com/docker/docker/pkg/ioutils" |
|
| 12 |
+ "github.com/docker/docker/pkg/symlink" |
|
| 13 |
+ "github.com/docker/docker/pkg/tarsum" |
|
| 14 |
+) |
|
| 15 |
+ |
|
| 16 |
+type tarSumContext struct {
|
|
| 17 |
+ root string |
|
| 18 |
+ sums tarsum.FileInfoSums |
|
| 19 |
+} |
|
| 20 |
+ |
|
| 21 |
+func (c *tarSumContext) Close() error {
|
|
| 22 |
+ return os.RemoveAll(c.root) |
|
| 23 |
+} |
|
| 24 |
+ |
|
| 25 |
+func convertPathError(err error, cleanpath string) error {
|
|
| 26 |
+ if err, ok := err.(*os.PathError); ok {
|
|
| 27 |
+ err.Path = cleanpath |
|
| 28 |
+ return err |
|
| 29 |
+ } |
|
| 30 |
+ return err |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+func (c *tarSumContext) Open(path string) (io.ReadCloser, error) {
|
|
| 34 |
+ cleanpath, fullpath, err := c.normalize(path) |
|
| 35 |
+ if err != nil {
|
|
| 36 |
+ return nil, err |
|
| 37 |
+ } |
|
| 38 |
+ r, err := os.Open(fullpath) |
|
| 39 |
+ if err != nil {
|
|
| 40 |
+ return nil, convertPathError(err, cleanpath) |
|
| 41 |
+ } |
|
| 42 |
+ return r, nil |
|
| 43 |
+} |
|
| 44 |
+ |
|
| 45 |
+func (c *tarSumContext) Stat(path string) (fi FileInfo, err error) {
|
|
| 46 |
+ cleanpath, fullpath, err := c.normalize(path) |
|
| 47 |
+ if err != nil {
|
|
| 48 |
+ return nil, err |
|
| 49 |
+ } |
|
| 50 |
+ |
|
| 51 |
+ st, err := os.Lstat(fullpath) |
|
| 52 |
+ if err != nil {
|
|
| 53 |
+ return nil, convertPathError(err, cleanpath) |
|
| 54 |
+ } |
|
| 55 |
+ |
|
| 56 |
+ fi = PathFileInfo{st, fullpath}
|
|
| 57 |
+ // we set sum to path by default for the case where GetFile returns nil. |
|
| 58 |
+ // The usual case is if cleanpath is empty. |
|
| 59 |
+ sum := path |
|
| 60 |
+ if tsInfo := c.sums.GetFile(cleanpath); tsInfo != nil {
|
|
| 61 |
+ sum = tsInfo.Sum() |
|
| 62 |
+ } |
|
| 63 |
+ fi = &HashedFileInfo{fi, sum}
|
|
| 64 |
+ return fi, nil |
|
| 65 |
+} |
|
| 66 |
+ |
|
| 67 |
+// MakeTarSumContext returns a build Context from a tar stream. |
|
| 68 |
+// |
|
| 69 |
+// It extracts the tar stream to a temporary folder that is deleted as soon as |
|
| 70 |
+// the Context is closed. |
|
| 71 |
+// As the extraction happens, a tarsum is calculated for every file, and the set of |
|
| 72 |
+// all those sums then becomes the source of truth for all operations on this Context. |
|
| 73 |
+// |
|
| 74 |
+// Closing tarStream has to be done by the caller. |
|
| 75 |
+func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) {
|
|
| 76 |
+ root, err := ioutils.TempDir("", "docker-builder")
|
|
| 77 |
+ if err != nil {
|
|
| 78 |
+ return nil, err |
|
| 79 |
+ } |
|
| 80 |
+ |
|
| 81 |
+ tsc := &tarSumContext{root: root}
|
|
| 82 |
+ |
|
| 83 |
+ // Make sure we clean-up upon error. In the happy case the caller |
|
| 84 |
+ // is expected to manage the clean-up |
|
| 85 |
+ defer func() {
|
|
| 86 |
+ if err != nil {
|
|
| 87 |
+ tsc.Close() |
|
| 88 |
+ } |
|
| 89 |
+ }() |
|
| 90 |
+ |
|
| 91 |
+ decompressedStream, err := archive.DecompressStream(tarStream) |
|
| 92 |
+ if err != nil {
|
|
| 93 |
+ return nil, err |
|
| 94 |
+ } |
|
| 95 |
+ |
|
| 96 |
+ sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) |
|
| 97 |
+ if err != nil {
|
|
| 98 |
+ return nil, err |
|
| 99 |
+ } |
|
| 100 |
+ |
|
| 101 |
+ if err := chrootarchive.Untar(sum, root, nil); err != nil {
|
|
| 102 |
+ return nil, err |
|
| 103 |
+ } |
|
| 104 |
+ |
|
| 105 |
+ tsc.sums = sum.GetSums() |
|
| 106 |
+ |
|
| 107 |
+ return tsc, nil |
|
| 108 |
+} |
|
| 109 |
+ |
|
| 110 |
+func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) {
|
|
| 111 |
+ cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] |
|
| 112 |
+ fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) |
|
| 113 |
+ if err != nil {
|
|
| 114 |
+ return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath)
|
|
| 115 |
+ } |
|
| 116 |
+ _, err = os.Stat(fullpath) |
|
| 117 |
+ if err != nil {
|
|
| 118 |
+ return "", "", convertPathError(err, path) |
|
| 119 |
+ } |
|
| 120 |
+ return |
|
| 121 |
+} |
|
| 122 |
+ |
|
| 123 |
+func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error {
|
|
| 124 |
+ for _, tsInfo := range c.sums {
|
|
| 125 |
+ path := tsInfo.Name() |
|
| 126 |
+ path, fullpath, err := c.normalize(path) |
|
| 127 |
+ if err != nil {
|
|
| 128 |
+ return err |
|
| 129 |
+ } |
|
| 130 |
+ |
|
| 131 |
+ // Any file in the context that starts with the given path will be |
|
| 132 |
+ // picked up and its hashcode used. However, we'll exclude the |
|
| 133 |
+ // root dir itself. We do this for a coupel of reasons: |
|
| 134 |
+ // 1 - ADD/COPY will not copy the dir itself, just its children |
|
| 135 |
+ // so there's no reason to include it in the hash calc |
|
| 136 |
+ // 2 - the metadata on the dir will change when any child file |
|
| 137 |
+ // changes. This will lead to a miss in the cache check if that |
|
| 138 |
+ // child file is in the .dockerignore list. |
|
| 139 |
+ if rel, err := filepath.Rel(root, path); err != nil {
|
|
| 140 |
+ return err |
|
| 141 |
+ } else if rel == "." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
|
| 142 |
+ continue |
|
| 143 |
+ } |
|
| 144 |
+ |
|
| 145 |
+ info, err := os.Lstat(fullpath) |
|
| 146 |
+ if err != nil {
|
|
| 147 |
+ return convertPathError(err, path) |
|
| 148 |
+ } |
|
| 149 |
+ // TODO check context breakout? |
|
| 150 |
+ fi := &HashedFileInfo{PathFileInfo{info, fullpath}, tsInfo.Sum()}
|
|
| 151 |
+ if err := walkFn(path, fi, nil); err != nil {
|
|
| 152 |
+ return err |
|
| 153 |
+ } |
|
| 154 |
+ } |
|
| 155 |
+ return nil |
|
| 156 |
+} |
|
| 157 |
+ |
|
| 158 |
+func (c *tarSumContext) Remove(path string) error {
|
|
| 159 |
+ _, fullpath, err := c.normalize(path) |
|
| 160 |
+ if err != nil {
|
|
| 161 |
+ return err |
|
| 162 |
+ } |
|
| 163 |
+ return os.RemoveAll(fullpath) |
|
| 164 |
+} |
| ... | ... |
@@ -27,7 +27,7 @@ func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hos |
| 27 | 27 |
|
| 28 | 28 |
daemon.adaptContainerSettings(hostConfig, adjustCPUShares) |
| 29 | 29 |
|
| 30 |
- container, buildWarnings, err := daemon.Create(config, hostConfig, name) |
|
| 30 |
+ container, err := daemon.Create(config, hostConfig, name) |
|
| 31 | 31 |
if err != nil {
|
| 32 | 32 |
if daemon.Graph().IsNotExist(err, config.Image) {
|
| 33 | 33 |
if strings.Contains(config.Image, "@") {
|
| ... | ... |
@@ -42,16 +42,13 @@ func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hos |
| 42 | 42 |
return types.ContainerCreateResponse{"", warnings}, err
|
| 43 | 43 |
} |
| 44 | 44 |
|
| 45 |
- warnings = append(warnings, buildWarnings...) |
|
| 46 |
- |
|
| 47 | 45 |
return types.ContainerCreateResponse{container.ID, warnings}, nil
|
| 48 | 46 |
} |
| 49 | 47 |
|
| 50 | 48 |
// Create creates a new container from the given configuration with a given name. |
| 51 |
-func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (retC *Container, retS []string, retErr error) {
|
|
| 49 |
+func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.HostConfig, name string) (retC *Container, retErr error) {
|
|
| 52 | 50 |
var ( |
| 53 | 51 |
container *Container |
| 54 |
- warnings []string |
|
| 55 | 52 |
img *image.Image |
| 56 | 53 |
imgID string |
| 57 | 54 |
err error |
| ... | ... |
@@ -60,16 +57,16 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos |
| 60 | 60 |
if config.Image != "" {
|
| 61 | 61 |
img, err = daemon.repositories.LookupImage(config.Image) |
| 62 | 62 |
if err != nil {
|
| 63 |
- return nil, nil, err |
|
| 63 |
+ return nil, err |
|
| 64 | 64 |
} |
| 65 | 65 |
if err = daemon.graph.CheckDepth(img); err != nil {
|
| 66 |
- return nil, nil, err |
|
| 66 |
+ return nil, err |
|
| 67 | 67 |
} |
| 68 | 68 |
imgID = img.ID |
| 69 | 69 |
} |
| 70 | 70 |
|
| 71 | 71 |
if err := daemon.mergeAndVerifyConfig(config, img); err != nil {
|
| 72 |
- return nil, nil, err |
|
| 72 |
+ return nil, err |
|
| 73 | 73 |
} |
| 74 | 74 |
|
| 75 | 75 |
if hostConfig == nil {
|
| ... | ... |
@@ -78,11 +75,11 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos |
| 78 | 78 |
if hostConfig.SecurityOpt == nil {
|
| 79 | 79 |
hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode) |
| 80 | 80 |
if err != nil {
|
| 81 |
- return nil, nil, err |
|
| 81 |
+ return nil, err |
|
| 82 | 82 |
} |
| 83 | 83 |
} |
| 84 | 84 |
if container, err = daemon.newContainer(name, config, imgID); err != nil {
|
| 85 |
- return nil, nil, err |
|
| 85 |
+ return nil, err |
|
| 86 | 86 |
} |
| 87 | 87 |
defer func() {
|
| 88 | 88 |
if retErr != nil {
|
| ... | ... |
@@ -93,13 +90,13 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos |
| 93 | 93 |
}() |
| 94 | 94 |
|
| 95 | 95 |
if err := daemon.Register(container); err != nil {
|
| 96 |
- return nil, nil, err |
|
| 96 |
+ return nil, err |
|
| 97 | 97 |
} |
| 98 | 98 |
if err := daemon.createRootfs(container); err != nil {
|
| 99 |
- return nil, nil, err |
|
| 99 |
+ return nil, err |
|
| 100 | 100 |
} |
| 101 | 101 |
if err := daemon.setHostConfig(container, hostConfig); err != nil {
|
| 102 |
- return nil, nil, err |
|
| 102 |
+ return nil, err |
|
| 103 | 103 |
} |
| 104 | 104 |
defer func() {
|
| 105 | 105 |
if retErr != nil {
|
| ... | ... |
@@ -109,20 +106,20 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos |
| 109 | 109 |
} |
| 110 | 110 |
}() |
| 111 | 111 |
if err := container.Mount(); err != nil {
|
| 112 |
- return nil, nil, err |
|
| 112 |
+ return nil, err |
|
| 113 | 113 |
} |
| 114 | 114 |
defer container.Unmount() |
| 115 | 115 |
|
| 116 | 116 |
if err := createContainerPlatformSpecificSettings(container, config, hostConfig, img); err != nil {
|
| 117 |
- return nil, nil, err |
|
| 117 |
+ return nil, err |
|
| 118 | 118 |
} |
| 119 | 119 |
|
| 120 | 120 |
if err := container.toDiskLocking(); err != nil {
|
| 121 | 121 |
logrus.Errorf("Error saving new container to disk: %v", err)
|
| 122 |
- return nil, nil, err |
|
| 122 |
+ return nil, err |
|
| 123 | 123 |
} |
| 124 | 124 |
container.logEvent("create")
|
| 125 |
- return container, warnings, nil |
|
| 125 |
+ return container, nil |
|
| 126 | 126 |
} |
| 127 | 127 |
|
| 128 | 128 |
func (daemon *Daemon) generateSecurityOpt(ipcMode runconfig.IpcMode, pidMode runconfig.PidMode) ([]string, error) {
|
| 129 | 129 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,238 @@ |
| 0 |
+package daemonbuilder |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "io" |
|
| 5 |
+ "io/ioutil" |
|
| 6 |
+ "os" |
|
| 7 |
+ "path/filepath" |
|
| 8 |
+ "strings" |
|
| 9 |
+ |
|
| 10 |
+ "github.com/Sirupsen/logrus" |
|
| 11 |
+ "github.com/docker/docker/api" |
|
| 12 |
+ "github.com/docker/docker/builder" |
|
| 13 |
+ "github.com/docker/docker/cliconfig" |
|
| 14 |
+ "github.com/docker/docker/daemon" |
|
| 15 |
+ "github.com/docker/docker/graph" |
|
| 16 |
+ "github.com/docker/docker/image" |
|
| 17 |
+ "github.com/docker/docker/pkg/archive" |
|
| 18 |
+ "github.com/docker/docker/pkg/chrootarchive" |
|
| 19 |
+ "github.com/docker/docker/pkg/httputils" |
|
| 20 |
+ "github.com/docker/docker/pkg/ioutils" |
|
| 21 |
+ "github.com/docker/docker/pkg/parsers" |
|
| 22 |
+ "github.com/docker/docker/pkg/progressreader" |
|
| 23 |
+ "github.com/docker/docker/pkg/system" |
|
| 24 |
+ "github.com/docker/docker/pkg/urlutil" |
|
| 25 |
+ "github.com/docker/docker/registry" |
|
| 26 |
+ "github.com/docker/docker/runconfig" |
|
| 27 |
+) |
|
| 28 |
+ |
|
| 29 |
+// Docker implements builder.Docker for the docker Daemon object. |
|
| 30 |
+type Docker struct {
|
|
| 31 |
+ Daemon *daemon.Daemon |
|
| 32 |
+ OutOld io.Writer |
|
| 33 |
+ AuthConfigs map[string]cliconfig.AuthConfig |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+// ensure Docker implements builder.Docker |
|
| 37 |
+var _ builder.Docker = Docker{}
|
|
| 38 |
+ |
|
| 39 |
+// LookupImage looks up a Docker image referenced by `name`. |
|
| 40 |
+func (d Docker) LookupImage(name string) (*image.Image, error) {
|
|
| 41 |
+ return d.Daemon.Repositories().LookupImage(name) |
|
| 42 |
+} |
|
| 43 |
+ |
|
| 44 |
+// Pull tells Docker to pull image referenced by `name`. |
|
| 45 |
+func (d Docker) Pull(name string) (*image.Image, error) {
|
|
| 46 |
+ remote, tag := parsers.ParseRepositoryTag(name) |
|
| 47 |
+ if tag == "" {
|
|
| 48 |
+ tag = "latest" |
|
| 49 |
+ } |
|
| 50 |
+ |
|
| 51 |
+ pullRegistryAuth := &cliconfig.AuthConfig{}
|
|
| 52 |
+ if len(d.AuthConfigs) > 0 {
|
|
| 53 |
+ // The request came with a full auth config file, we prefer to use that |
|
| 54 |
+ repoInfo, err := d.Daemon.RegistryService.ResolveRepository(remote) |
|
| 55 |
+ if err != nil {
|
|
| 56 |
+ return nil, err |
|
| 57 |
+ } |
|
| 58 |
+ |
|
| 59 |
+ resolvedConfig := registry.ResolveAuthConfig( |
|
| 60 |
+ &cliconfig.ConfigFile{AuthConfigs: d.AuthConfigs},
|
|
| 61 |
+ repoInfo.Index, |
|
| 62 |
+ ) |
|
| 63 |
+ pullRegistryAuth = &resolvedConfig |
|
| 64 |
+ } |
|
| 65 |
+ |
|
| 66 |
+ imagePullConfig := &graph.ImagePullConfig{
|
|
| 67 |
+ AuthConfig: pullRegistryAuth, |
|
| 68 |
+ OutStream: ioutils.NopWriteCloser(d.OutOld), |
|
| 69 |
+ } |
|
| 70 |
+ |
|
| 71 |
+ if err := d.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil {
|
|
| 72 |
+ return nil, err |
|
| 73 |
+ } |
|
| 74 |
+ |
|
| 75 |
+ return d.Daemon.Repositories().LookupImage(name) |
|
| 76 |
+} |
|
| 77 |
+ |
|
| 78 |
+// Container looks up a Docker container referenced by `id`. |
|
| 79 |
+func (d Docker) Container(id string) (*daemon.Container, error) {
|
|
| 80 |
+ return d.Daemon.Get(id) |
|
| 81 |
+} |
|
| 82 |
+ |
|
| 83 |
+// Create creates a new Docker container and returns potential warnings |
|
| 84 |
+func (d Docker) Create(cfg *runconfig.Config, hostCfg *runconfig.HostConfig) (*daemon.Container, []string, error) {
|
|
| 85 |
+ ccr, err := d.Daemon.ContainerCreate("", cfg, hostCfg, true)
|
|
| 86 |
+ if err != nil {
|
|
| 87 |
+ return nil, nil, err |
|
| 88 |
+ } |
|
| 89 |
+ container, err := d.Daemon.Get(ccr.ID) |
|
| 90 |
+ if err != nil {
|
|
| 91 |
+ return nil, ccr.Warnings, err |
|
| 92 |
+ } |
|
| 93 |
+ return container, ccr.Warnings, container.Mount() |
|
| 94 |
+} |
|
| 95 |
+ |
|
| 96 |
+// Remove removes a container specified by `id`. |
|
| 97 |
+func (d Docker) Remove(id string, cfg *daemon.ContainerRmConfig) error {
|
|
| 98 |
+ return d.Daemon.ContainerRm(id, cfg) |
|
| 99 |
+} |
|
| 100 |
+ |
|
| 101 |
+// Commit creates a new Docker image from an existing Docker container. |
|
| 102 |
+func (d Docker) Commit(c *daemon.Container, cfg *daemon.ContainerCommitConfig) (*image.Image, error) {
|
|
| 103 |
+ return d.Daemon.Commit(c, cfg) |
|
| 104 |
+} |
|
| 105 |
+ |
|
| 106 |
+// Retain retains an image avoiding it to be removed or overwritten until a corresponding Release() call. |
|
| 107 |
+func (d Docker) Retain(sessionID, imgID string) {
|
|
| 108 |
+ d.Daemon.Graph().Retain(sessionID, imgID) |
|
| 109 |
+} |
|
| 110 |
+ |
|
| 111 |
+// Release releases a list of images that were retained for the time of a build. |
|
| 112 |
+func (d Docker) Release(sessionID string, activeImages []string) {
|
|
| 113 |
+ d.Daemon.Graph().Release(sessionID, activeImages...) |
|
| 114 |
+} |
|
| 115 |
+ |
|
| 116 |
+// Copy copies/extracts a source FileInfo to a destination path inside a container |
|
| 117 |
+// specified by a container object. |
|
| 118 |
+// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). |
|
| 119 |
+// Copy should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. |
|
| 120 |
+func (d Docker) Copy(c *daemon.Container, destPath string, src builder.FileInfo, decompress bool) error {
|
|
| 121 |
+ srcPath := src.Path() |
|
| 122 |
+ destExists := true |
|
| 123 |
+ |
|
| 124 |
+ // Work in daemon-local OS specific file paths |
|
| 125 |
+ destPath = filepath.FromSlash(destPath) |
|
| 126 |
+ |
|
| 127 |
+ dest, err := c.GetResourcePath(destPath) |
|
| 128 |
+ if err != nil {
|
|
| 129 |
+ return err |
|
| 130 |
+ } |
|
| 131 |
+ |
|
| 132 |
+ // Preserve the trailing slash |
|
| 133 |
+ // TODO: why are we appending another path separator if there was already one? |
|
| 134 |
+ if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." {
|
|
| 135 |
+ dest += string(os.PathSeparator) |
|
| 136 |
+ } |
|
| 137 |
+ |
|
| 138 |
+ destPath = dest |
|
| 139 |
+ |
|
| 140 |
+ destStat, err := os.Stat(destPath) |
|
| 141 |
+ if err != nil {
|
|
| 142 |
+ if !os.IsNotExist(err) {
|
|
| 143 |
+ logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
|
|
| 144 |
+ return err |
|
| 145 |
+ } |
|
| 146 |
+ destExists = false |
|
| 147 |
+ } |
|
| 148 |
+ |
|
| 149 |
+ if src.IsDir() {
|
|
| 150 |
+ // copy as directory |
|
| 151 |
+ if err := chrootarchive.CopyWithTar(srcPath, destPath); err != nil {
|
|
| 152 |
+ return err |
|
| 153 |
+ } |
|
| 154 |
+ return fixPermissions(srcPath, destPath, 0, 0, destExists) |
|
| 155 |
+ } |
|
| 156 |
+ if decompress {
|
|
| 157 |
+ // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) |
|
| 158 |
+ |
|
| 159 |
+ // First try to unpack the source as an archive |
|
| 160 |
+ // to support the untar feature we need to clean up the path a little bit |
|
| 161 |
+ // because tar is very forgiving. First we need to strip off the archive's |
|
| 162 |
+ // filename from the path but this is only added if it does not end in slash |
|
| 163 |
+ tarDest := destPath |
|
| 164 |
+ if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
|
|
| 165 |
+ tarDest = filepath.Dir(destPath) |
|
| 166 |
+ } |
|
| 167 |
+ |
|
| 168 |
+ // try to successfully untar the orig |
|
| 169 |
+ if err := chrootarchive.UntarPath(srcPath, tarDest); err == nil {
|
|
| 170 |
+ return nil |
|
| 171 |
+ } else if err != io.EOF {
|
|
| 172 |
+ logrus.Debugf("Couldn't untar to %s: %v", tarDest, err)
|
|
| 173 |
+ } |
|
| 174 |
+ } |
|
| 175 |
+ |
|
| 176 |
+ // only needed for fixPermissions, but might as well put it before CopyFileWithTar |
|
| 177 |
+ if destExists && destStat.IsDir() {
|
|
| 178 |
+ destPath = filepath.Join(destPath, filepath.Base(srcPath)) |
|
| 179 |
+ } |
|
| 180 |
+ |
|
| 181 |
+ if err := system.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
|
|
| 182 |
+ return err |
|
| 183 |
+ } |
|
| 184 |
+ if err := chrootarchive.CopyFileWithTar(srcPath, destPath); err != nil {
|
|
| 185 |
+ return err |
|
| 186 |
+ } |
|
| 187 |
+ |
|
| 188 |
+ return fixPermissions(srcPath, destPath, 0, 0, destExists) |
|
| 189 |
+} |
|
| 190 |
+ |
|
| 191 |
+// GetCachedImage returns a reference to a cached image whose parent equals `parent` |
|
| 192 |
+// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. |
|
| 193 |
+func (d Docker) GetCachedImage(imgID string, cfg *runconfig.Config) (string, error) {
|
|
| 194 |
+ cache, err := d.Daemon.ImageGetCached(string(imgID), cfg) |
|
| 195 |
+ if cache == nil || err != nil {
|
|
| 196 |
+ return "", err |
|
| 197 |
+ } |
|
| 198 |
+ return cache.ID, nil |
|
| 199 |
+} |
|
| 200 |
+ |
|
| 201 |
+// Following is specific to builder contexts |
|
| 202 |
+ |
|
| 203 |
+// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used |
|
| 204 |
+// irrespective of user input. |
|
| 205 |
+// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). |
|
| 206 |
+func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, progressReader *progressreader.Config) (context builder.ModifiableContext, dockerfileName string, err error) {
|
|
| 207 |
+ switch {
|
|
| 208 |
+ case remoteURL == "": |
|
| 209 |
+ context, err = builder.MakeTarSumContext(r) |
|
| 210 |
+ case urlutil.IsGitURL(remoteURL): |
|
| 211 |
+ context, err = builder.MakeGitContext(remoteURL) |
|
| 212 |
+ case urlutil.IsURL(remoteURL): |
|
| 213 |
+ context, err = builder.MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){
|
|
| 214 |
+ httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {
|
|
| 215 |
+ dockerfile, err := ioutil.ReadAll(rc) |
|
| 216 |
+ if err != nil {
|
|
| 217 |
+ return nil, err |
|
| 218 |
+ } |
|
| 219 |
+ |
|
| 220 |
+ // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller |
|
| 221 |
+ // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. |
|
| 222 |
+ dockerfileName = api.DefaultDockerfileName |
|
| 223 |
+ |
|
| 224 |
+ // TODO: return a context without tarsum |
|
| 225 |
+ return archive.Generate(dockerfileName, string(dockerfile)) |
|
| 226 |
+ }, |
|
| 227 |
+ // fallback handler (tar context) |
|
| 228 |
+ "": func(rc io.ReadCloser) (io.ReadCloser, error) {
|
|
| 229 |
+ progressReader.In = rc |
|
| 230 |
+ return progressReader, nil |
|
| 231 |
+ }, |
|
| 232 |
+ }) |
|
| 233 |
+ default: |
|
| 234 |
+ err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL)
|
|
| 235 |
+ } |
|
| 236 |
+ return |
|
| 237 |
+} |
| 0 | 238 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,40 @@ |
| 0 |
+// +build freebsd linux |
|
| 1 |
+ |
|
| 2 |
+package daemonbuilder |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "os" |
|
| 6 |
+ "path/filepath" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 9 |
+func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
|
| 10 |
+ // If the destination didn't already exist, or the destination isn't a |
|
| 11 |
+ // directory, then we should Lchown the destination. Otherwise, we shouldn't |
|
| 12 |
+ // Lchown the destination. |
|
| 13 |
+ destStat, err := os.Stat(destination) |
|
| 14 |
+ if err != nil {
|
|
| 15 |
+ // This should *never* be reached, because the destination must've already |
|
| 16 |
+ // been created while untar-ing the context. |
|
| 17 |
+ return err |
|
| 18 |
+ } |
|
| 19 |
+ doChownDestination := !destExisted || !destStat.IsDir() |
|
| 20 |
+ |
|
| 21 |
+ // We Walk on the source rather than on the destination because we don't |
|
| 22 |
+ // want to change permissions on things we haven't created or modified. |
|
| 23 |
+ return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
|
|
| 24 |
+ // Do not alter the walk root iff. it existed before, as it doesn't fall under |
|
| 25 |
+ // the domain of "things we should chown". |
|
| 26 |
+ if !doChownDestination && (source == fullpath) {
|
|
| 27 |
+ return nil |
|
| 28 |
+ } |
|
| 29 |
+ |
|
| 30 |
+ // Path is prefixed by source: substitute with destination instead. |
|
| 31 |
+ cleaned, err := filepath.Rel(source, fullpath) |
|
| 32 |
+ if err != nil {
|
|
| 33 |
+ return err |
|
| 34 |
+ } |
|
| 35 |
+ |
|
| 36 |
+ fullpath = filepath.Join(destination, cleaned) |
|
| 37 |
+ return os.Lchown(fullpath, uid, gid) |
|
| 38 |
+ }) |
|
| 39 |
+} |
| ... | ... |
@@ -39,7 +39,7 @@ func (s *DockerSuite) TestBuildApiDockerfilePath(c *check.C) {
|
| 39 | 39 |
c.Fatal(err) |
| 40 | 40 |
} |
| 41 | 41 |
|
| 42 |
- if !strings.Contains(string(out), "must be within the build context") {
|
|
| 42 |
+ if !strings.Contains(string(out), "Forbidden path outside the build context") {
|
|
| 43 | 43 |
c.Fatalf("Didn't complain about leaving build context: %s", out)
|
| 44 | 44 |
} |
| 45 | 45 |
} |
| 0 | 10 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,18 @@ |
| 0 |
+// +build windows |
|
| 1 |
+ |
|
| 2 |
+package ioutils |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "io/ioutil" |
|
| 6 |
+ |
|
| 7 |
+ "github.com/docker/docker/pkg/longpath" |
|
| 8 |
+) |
|
| 9 |
+ |
|
| 10 |
+// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. |
|
| 11 |
+func TempDir(dir, prefix string) (string, error) {
|
|
| 12 |
+ tempDir, err := ioutil.TempDir(dir, prefix) |
|
| 13 |
+ if err != nil {
|
|
| 14 |
+ return "", err |
|
| 15 |
+ } |
|
| 16 |
+ return longpath.AddPrefix(tempDir), nil |
|
| 17 |
+} |
| ... | ... |
@@ -247,17 +247,11 @@ func ValidateContextDirectory(srcPath string, excludes []string) error {
|
| 247 | 247 |
// ReadDockerIgnore reads a .dockerignore file and returns the list of file patterns |
| 248 | 248 |
// to ignore. Note this will trim whitespace from each line as well |
| 249 | 249 |
// as use GO's "clean" func to get the shortest/cleanest path for each. |
| 250 |
-func ReadDockerIgnore(path string) ([]string, error) {
|
|
| 251 |
- // Note that a missing .dockerignore file isn't treated as an error |
|
| 252 |
- reader, err := os.Open(path) |
|
| 253 |
- if err != nil {
|
|
| 254 |
- if !os.IsNotExist(err) {
|
|
| 255 |
- return nil, fmt.Errorf("Error reading '%s': %v", path, err)
|
|
| 256 |
- } |
|
| 250 |
+func ReadDockerIgnore(reader io.ReadCloser) ([]string, error) {
|
|
| 251 |
+ if reader == nil {
|
|
| 257 | 252 |
return nil, nil |
| 258 | 253 |
} |
| 259 | 254 |
defer reader.Close() |
| 260 |
- |
|
| 261 | 255 |
scanner := bufio.NewScanner(reader) |
| 262 | 256 |
var excludes []string |
| 263 | 257 |
|
| ... | ... |
@@ -269,8 +263,8 @@ func ReadDockerIgnore(path string) ([]string, error) {
|
| 269 | 269 |
pattern = filepath.Clean(pattern) |
| 270 | 270 |
excludes = append(excludes, pattern) |
| 271 | 271 |
} |
| 272 |
- if err = scanner.Err(); err != nil {
|
|
| 273 |
- return nil, fmt.Errorf("Error reading '%s': %v", path, err)
|
|
| 272 |
+ if err := scanner.Err(); err != nil {
|
|
| 273 |
+ return nil, fmt.Errorf("Error reading .dockerignore: %v", err)
|
|
| 274 | 274 |
} |
| 275 | 275 |
return excludes, nil |
| 276 | 276 |
} |
| ... | ... |
@@ -63,24 +63,27 @@ func TestReadDockerIgnore(t *testing.T) {
|
| 63 | 63 |
} |
| 64 | 64 |
defer os.RemoveAll(tmpDir) |
| 65 | 65 |
|
| 66 |
- diName := filepath.Join(tmpDir, ".dockerignore") |
|
| 67 |
- |
|
| 68 |
- di, err := ReadDockerIgnore(diName) |
|
| 66 |
+ di, err := ReadDockerIgnore(nil) |
|
| 69 | 67 |
if err != nil {
|
| 70 |
- t.Fatalf("Expected not to have error, got %s", err)
|
|
| 68 |
+ t.Fatalf("Expected not to have error, got %v", err)
|
|
| 71 | 69 |
} |
| 72 | 70 |
|
| 73 | 71 |
if diLen := len(di); diLen != 0 {
|
| 74 | 72 |
t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen)
|
| 75 | 73 |
} |
| 76 | 74 |
|
| 75 |
+ diName := filepath.Join(tmpDir, ".dockerignore") |
|
| 77 | 76 |
content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile")
|
| 78 | 77 |
err = ioutil.WriteFile(diName, []byte(content), 0777) |
| 79 | 78 |
if err != nil {
|
| 80 | 79 |
t.Fatal(err) |
| 81 | 80 |
} |
| 82 | 81 |
|
| 83 |
- di, err = ReadDockerIgnore(diName) |
|
| 82 |
+ diFd, err := os.Open(diName) |
|
| 83 |
+ if err != nil {
|
|
| 84 |
+ t.Fatal(err) |
|
| 85 |
+ } |
|
| 86 |
+ di, err = ReadDockerIgnore(diFd) |
|
| 84 | 87 |
if err != nil {
|
| 85 | 88 |
t.Fatal(err) |
| 86 | 89 |
} |