Currently, daemonbuilder package (part of daemon) implemented the
builder backend. However, it was a very thin wrapper around daemon
methods and caused an implementation dependency for api/server build
endpoint. api/server buildrouter should only know about the backend
implementing the /build API endpoint.
Removing daemonbuilder involved moving build specific methods to
respective files in the daemon, where they fit naturally.
Signed-off-by: Anusha Ragunathan <anusha@docker.com>
| ... | ... |
@@ -1,5 +1,11 @@ |
| 1 | 1 |
package build |
| 2 | 2 |
|
| 3 |
+import ( |
|
| 4 |
+ "github.com/docker/docker/builder" |
|
| 5 |
+ "github.com/docker/engine-api/types" |
|
| 6 |
+ "io" |
|
| 7 |
+) |
|
| 8 |
+ |
|
| 3 | 9 |
// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. |
| 4 | 10 |
type Backend interface {
|
| 5 | 11 |
// Build builds a Docker image referenced by an imageID string. |
| ... | ... |
@@ -8,5 +14,5 @@ type Backend interface {
|
| 8 | 8 |
// by the caller. |
| 9 | 9 |
// |
| 10 | 10 |
// TODO: make this return a reference instead of string |
| 11 |
- Build() (imageID string) |
|
| 11 |
+ Build(config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) |
|
| 12 | 12 |
} |
| ... | ... |
@@ -3,17 +3,16 @@ package build |
| 3 | 3 |
import ( |
| 4 | 4 |
"github.com/docker/docker/api/server/router" |
| 5 | 5 |
"github.com/docker/docker/api/server/router/local" |
| 6 |
- "github.com/docker/docker/daemon" |
|
| 7 | 6 |
) |
| 8 | 7 |
|
| 9 | 8 |
// buildRouter is a router to talk with the build controller |
| 10 | 9 |
type buildRouter struct {
|
| 11 |
- backend *daemon.Daemon |
|
| 10 |
+ backend Backend |
|
| 12 | 11 |
routes []router.Route |
| 13 | 12 |
} |
| 14 | 13 |
|
| 15 | 14 |
// NewRouter initializes a new build router |
| 16 |
-func NewRouter(b *daemon.Daemon) router.Router {
|
|
| 15 |
+func NewRouter(b Backend) router.Router {
|
|
| 17 | 16 |
r := &buildRouter{
|
| 18 | 17 |
backend: b, |
| 19 | 18 |
} |
| ... | ... |
@@ -14,12 +14,9 @@ import ( |
| 14 | 14 |
"github.com/Sirupsen/logrus" |
| 15 | 15 |
"github.com/docker/docker/api/server/httputils" |
| 16 | 16 |
"github.com/docker/docker/builder" |
| 17 |
- "github.com/docker/docker/builder/dockerfile" |
|
| 18 |
- "github.com/docker/docker/daemon/daemonbuilder" |
|
| 19 | 17 |
"github.com/docker/docker/pkg/ioutils" |
| 20 | 18 |
"github.com/docker/docker/pkg/progress" |
| 21 | 19 |
"github.com/docker/docker/pkg/streamformatter" |
| 22 |
- "github.com/docker/docker/reference" |
|
| 23 | 20 |
"github.com/docker/docker/utils" |
| 24 | 21 |
"github.com/docker/engine-api/types" |
| 25 | 22 |
"github.com/docker/engine-api/types/container" |
| ... | ... |
@@ -27,45 +24,6 @@ import ( |
| 27 | 27 |
"golang.org/x/net/context" |
| 28 | 28 |
) |
| 29 | 29 |
|
| 30 |
-// sanitizeRepoAndTags parses the raw "t" parameter received from the client |
|
| 31 |
-// to a slice of repoAndTag. |
|
| 32 |
-// It also validates each repoName and tag. |
|
| 33 |
-func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
|
|
| 34 |
- var ( |
|
| 35 |
- repoAndTags []reference.Named |
|
| 36 |
- // This map is used for deduplicating the "-t" parameter. |
|
| 37 |
- uniqNames = make(map[string]struct{})
|
|
| 38 |
- ) |
|
| 39 |
- for _, repo := range names {
|
|
| 40 |
- if repo == "" {
|
|
| 41 |
- continue |
|
| 42 |
- } |
|
| 43 |
- |
|
| 44 |
- ref, err := reference.ParseNamed(repo) |
|
| 45 |
- if err != nil {
|
|
| 46 |
- return nil, err |
|
| 47 |
- } |
|
| 48 |
- |
|
| 49 |
- ref = reference.WithDefaultTag(ref) |
|
| 50 |
- |
|
| 51 |
- if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
|
| 52 |
- return nil, errors.New("build tag cannot contain a digest")
|
|
| 53 |
- } |
|
| 54 |
- |
|
| 55 |
- if _, isTagged := ref.(reference.NamedTagged); !isTagged {
|
|
| 56 |
- ref, err = reference.WithTag(ref, reference.DefaultTag) |
|
| 57 |
- } |
|
| 58 |
- |
|
| 59 |
- nameWithTag := ref.String() |
|
| 60 |
- |
|
| 61 |
- if _, exists := uniqNames[nameWithTag]; !exists {
|
|
| 62 |
- uniqNames[nameWithTag] = struct{}{}
|
|
| 63 |
- repoAndTags = append(repoAndTags, ref) |
|
| 64 |
- } |
|
| 65 |
- } |
|
| 66 |
- return repoAndTags, nil |
|
| 67 |
-} |
|
| 68 |
- |
|
| 69 | 30 |
func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) {
|
| 70 | 31 |
version := httputils.VersionFromContext(ctx) |
| 71 | 32 |
options := &types.ImageBuildOptions{}
|
| ... | ... |
@@ -92,6 +50,7 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui |
| 92 | 92 |
options.CPUSetCPUs = r.FormValue("cpusetcpus")
|
| 93 | 93 |
options.CPUSetMems = r.FormValue("cpusetmems")
|
| 94 | 94 |
options.CgroupParent = r.FormValue("cgroupparent")
|
| 95 |
+ options.Tags = r.Form["t"] |
|
| 95 | 96 |
|
| 96 | 97 |
if r.Form.Get("shmsize") != "" {
|
| 97 | 98 |
shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
|
| ... | ... |
@@ -170,11 +129,6 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r * |
| 170 | 170 |
return errf(err) |
| 171 | 171 |
} |
| 172 | 172 |
|
| 173 |
- repoAndTags, err := sanitizeRepoAndTags(r.Form["t"]) |
|
| 174 |
- if err != nil {
|
|
| 175 |
- return errf(err) |
|
| 176 |
- } |
|
| 177 |
- |
|
| 178 | 173 |
remoteURL := r.FormValue("remote")
|
| 179 | 174 |
|
| 180 | 175 |
// Currently, only used if context is from a remote url. |
| ... | ... |
@@ -190,8 +144,9 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r * |
| 190 | 190 |
var ( |
| 191 | 191 |
context builder.ModifiableContext |
| 192 | 192 |
dockerfileName string |
| 193 |
+ out io.Writer |
|
| 193 | 194 |
) |
| 194 |
- context, dockerfileName, err = daemonbuilder.DetectContextFromRemoteURL(r.Body, remoteURL, createProgressReader) |
|
| 195 |
+ context, dockerfileName, err = builder.DetectContextFromRemoteURL(r.Body, remoteURL, createProgressReader) |
|
| 195 | 196 |
if err != nil {
|
| 196 | 197 |
return errf(err) |
| 197 | 198 |
} |
| ... | ... |
@@ -204,51 +159,26 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r * |
| 204 | 204 |
buildOptions.Dockerfile = dockerfileName |
| 205 | 205 |
} |
| 206 | 206 |
|
| 207 |
- b, err := dockerfile.NewBuilder( |
|
| 208 |
- buildOptions, // result of newBuildConfig |
|
| 209 |
- &daemonbuilder.Docker{br.backend},
|
|
| 210 |
- builder.DockerIgnoreContext{ModifiableContext: context},
|
|
| 211 |
- nil) |
|
| 212 |
- if err != nil {
|
|
| 213 |
- return errf(err) |
|
| 214 |
- } |
|
| 215 |
- if buildOptions.SuppressOutput {
|
|
| 216 |
- b.Output = notVerboseBuffer |
|
| 217 |
- } else {
|
|
| 218 |
- b.Output = output |
|
| 219 |
- } |
|
| 220 |
- b.Stdout = &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf}
|
|
| 221 |
- b.Stderr = &streamformatter.StderrFormatter{Writer: output, StreamFormatter: sf}
|
|
| 207 |
+ out = output |
|
| 222 | 208 |
if buildOptions.SuppressOutput {
|
| 223 |
- b.Stdout = &streamformatter.StdoutFormatter{Writer: notVerboseBuffer, StreamFormatter: sf}
|
|
| 224 |
- b.Stderr = &streamformatter.StderrFormatter{Writer: notVerboseBuffer, StreamFormatter: sf}
|
|
| 209 |
+ out = notVerboseBuffer |
|
| 225 | 210 |
} |
| 211 |
+ stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf}
|
|
| 212 |
+ stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf}
|
|
| 226 | 213 |
|
| 227 |
- if closeNotifier, ok := w.(http.CloseNotifier); ok {
|
|
| 228 |
- finished := make(chan struct{})
|
|
| 229 |
- defer close(finished) |
|
| 230 |
- clientGone := closeNotifier.CloseNotify() |
|
| 231 |
- go func() {
|
|
| 232 |
- select {
|
|
| 233 |
- case <-finished: |
|
| 234 |
- case <-clientGone: |
|
| 235 |
- logrus.Infof("Client disconnected, cancelling job: build")
|
|
| 236 |
- b.Cancel() |
|
| 237 |
- } |
|
| 238 |
- }() |
|
| 214 |
+ closeNotifier := make(<-chan bool) |
|
| 215 |
+ if notifier, ok := w.(http.CloseNotifier); ok {
|
|
| 216 |
+ closeNotifier = notifier.CloseNotify() |
|
| 239 | 217 |
} |
| 240 | 218 |
|
| 241 |
- imgID, err := b.Build() |
|
| 219 |
+ imgID, err := br.backend.Build(buildOptions, |
|
| 220 |
+ builder.DockerIgnoreContext{ModifiableContext: context},
|
|
| 221 |
+ stdout, stderr, out, |
|
| 222 |
+ closeNotifier) |
|
| 242 | 223 |
if err != nil {
|
| 243 | 224 |
return errf(err) |
| 244 | 225 |
} |
| 245 | 226 |
|
| 246 |
- for _, rt := range repoAndTags {
|
|
| 247 |
- if err := br.backend.TagImage(rt, imgID); err != nil {
|
|
| 248 |
- return errf(err) |
|
| 249 |
- } |
|
| 250 |
- } |
|
| 251 |
- |
|
| 252 | 227 |
// Everything worked so if -q was provided the output from the daemon |
| 253 | 228 |
// should be just the image ID and we'll print that to stdout. |
| 254 | 229 |
if buildOptions.SuppressOutput {
|
| ... | ... |
@@ -15,6 +15,7 @@ import ( |
| 15 | 15 |
"github.com/docker/docker/api/server/router/network" |
| 16 | 16 |
"github.com/docker/docker/api/server/router/system" |
| 17 | 17 |
"github.com/docker/docker/api/server/router/volume" |
| 18 |
+ "github.com/docker/docker/builder/dockerfile" |
|
| 18 | 19 |
"github.com/docker/docker/daemon" |
| 19 | 20 |
"github.com/docker/docker/pkg/authorization" |
| 20 | 21 |
"github.com/docker/docker/utils" |
| ... | ... |
@@ -180,7 +181,7 @@ func (s *Server) InitRouters(d *daemon.Daemon) {
|
| 180 | 180 |
s.addRouter(network.NewRouter(d)) |
| 181 | 181 |
s.addRouter(system.NewRouter(d)) |
| 182 | 182 |
s.addRouter(volume.NewRouter(d)) |
| 183 |
- s.addRouter(build.NewRouter(d)) |
|
| 183 |
+ s.addRouter(build.NewRouter(dockerfile.NewBuildManager(d))) |
|
| 184 | 184 |
} |
| 185 | 185 |
|
| 186 | 186 |
// addRouter adds a new router to the server. |
| ... | ... |
@@ -9,6 +9,7 @@ import ( |
| 9 | 9 |
"os" |
| 10 | 10 |
"time" |
| 11 | 11 |
|
| 12 |
+ "github.com/docker/docker/reference" |
|
| 12 | 13 |
"github.com/docker/engine-api/types" |
| 13 | 14 |
"github.com/docker/engine-api/types/container" |
| 14 | 15 |
) |
| ... | ... |
@@ -99,11 +100,13 @@ type Backend interface {
|
| 99 | 99 |
// TODO: use digest reference instead of name |
| 100 | 100 |
|
| 101 | 101 |
// GetImage looks up a Docker image referenced by `name`. |
| 102 |
- GetImage(name string) (Image, error) |
|
| 102 |
+ GetImageOnBuild(name string) (Image, error) |
|
| 103 |
+ // Tag an image with newTag |
|
| 104 |
+ TagImage(newTag reference.Named, imageName string) error |
|
| 103 | 105 |
// Pull tells Docker to pull image referenced by `name`. |
| 104 |
- Pull(name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error) |
|
| 106 |
+ PullOnBuild(name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error) |
|
| 105 | 107 |
// ContainerAttach attaches to container. |
| 106 |
- ContainerAttach(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error |
|
| 108 |
+ ContainerAttachOnBuild(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error |
|
| 107 | 109 |
// ContainerCreate creates a new Docker container and returns potential warnings |
| 108 | 110 |
ContainerCreate(types.ContainerCreateConfig) (types.ContainerCreateResponse, error) |
| 109 | 111 |
// ContainerRm removes a container specified by `id`. |
| ... | ... |
@@ -116,9 +119,8 @@ type Backend interface {
|
| 116 | 116 |
ContainerStart(containerID string, hostConfig *container.HostConfig) error |
| 117 | 117 |
// ContainerWait stops processing until the given container is stopped. |
| 118 | 118 |
ContainerWait(containerID string, timeout time.Duration) (int, error) |
| 119 |
- |
|
| 120 | 119 |
// ContainerUpdateCmd updates container.Path and container.Args |
| 121 |
- ContainerUpdateCmd(containerID string, cmd []string) error |
|
| 120 |
+ ContainerUpdateCmdOnBuild(containerID string, cmd []string) error |
|
| 122 | 121 |
|
| 123 | 122 |
// ContainerCopy copies/extracts a source FileInfo to a destination path inside a container |
| 124 | 123 |
// specified by a container object. |
| ... | ... |
@@ -127,7 +129,13 @@ type Backend interface {
|
| 127 | 127 |
// with Context.Walk |
| 128 | 128 |
//ContainerCopy(name string, res string) (io.ReadCloser, error) |
| 129 | 129 |
// TODO: use copyBackend api |
| 130 |
- BuilderCopy(containerID string, destPath string, src FileInfo, decompress bool) error |
|
| 130 |
+ CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error |
|
| 131 |
+} |
|
| 132 |
+ |
|
| 133 |
+// Image represents a Docker image used by the builder. |
|
| 134 |
+type Image interface {
|
|
| 135 |
+ ImageID() string |
|
| 136 |
+ RunConfig() *container.Config |
|
| 131 | 137 |
} |
| 132 | 138 |
|
| 133 | 139 |
// ImageCache abstracts an image cache store. |
| ... | ... |
@@ -135,5 +143,5 @@ type Backend interface {
|
| 135 | 135 |
type ImageCache interface {
|
| 136 | 136 |
// GetCachedImage returns a reference to a cached image whose parent equals `parent` |
| 137 | 137 |
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. |
| 138 |
- GetCachedImage(parentID string, cfg *container.Config) (imageID string, err error) |
|
| 138 |
+ GetCachedImageOnBuild(parentID string, cfg *container.Config) (imageID string, err error) |
|
| 139 | 139 |
} |
| ... | ... |
@@ -2,6 +2,7 @@ package dockerfile |
| 2 | 2 |
|
| 3 | 3 |
import ( |
| 4 | 4 |
"bytes" |
| 5 |
+ "errors" |
|
| 5 | 6 |
"fmt" |
| 6 | 7 |
"io" |
| 7 | 8 |
"io/ioutil" |
| ... | ... |
@@ -13,6 +14,7 @@ import ( |
| 13 | 13 |
"github.com/docker/docker/builder" |
| 14 | 14 |
"github.com/docker/docker/builder/dockerfile/parser" |
| 15 | 15 |
"github.com/docker/docker/pkg/stringid" |
| 16 |
+ "github.com/docker/docker/reference" |
|
| 16 | 17 |
"github.com/docker/engine-api/types" |
| 17 | 18 |
"github.com/docker/engine-api/types/container" |
| 18 | 19 |
) |
| ... | ... |
@@ -48,6 +50,7 @@ type Builder struct {
|
| 48 | 48 |
|
| 49 | 49 |
Stdout io.Writer |
| 50 | 50 |
Stderr io.Writer |
| 51 |
+ Output io.Writer |
|
| 51 | 52 |
|
| 52 | 53 |
docker builder.Backend |
| 53 | 54 |
context builder.Context |
| ... | ... |
@@ -67,8 +70,17 @@ type Builder struct {
|
| 67 | 67 |
allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. |
| 68 | 68 |
|
| 69 | 69 |
// TODO: remove once docker.Commit can receive a tag |
| 70 |
- id string |
|
| 71 |
- Output io.Writer |
|
| 70 |
+ id string |
|
| 71 |
+} |
|
| 72 |
+ |
|
| 73 |
+// BuildManager implements builder.Backend and is shared across all Builder objects. |
|
| 74 |
+type BuildManager struct {
|
|
| 75 |
+ backend builder.Backend |
|
| 76 |
+} |
|
| 77 |
+ |
|
| 78 |
+// NewBuildManager creates a BuildManager. |
|
| 79 |
+func NewBuildManager(b builder.Backend) (bm *BuildManager) {
|
|
| 80 |
+ return &BuildManager{backend: b}
|
|
| 72 | 81 |
} |
| 73 | 82 |
|
| 74 | 83 |
// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. |
| ... | ... |
@@ -103,7 +115,57 @@ func NewBuilder(config *types.ImageBuildOptions, backend builder.Backend, contex |
| 103 | 103 |
return b, nil |
| 104 | 104 |
} |
| 105 | 105 |
|
| 106 |
-// Build runs the Dockerfile builder from a context and a docker object that allows to make calls |
|
| 106 |
+// sanitizeRepoAndTags parses the raw "t" parameter received from the client |
|
| 107 |
+// to a slice of repoAndTag. |
|
| 108 |
+// It also validates each repoName and tag. |
|
| 109 |
+func sanitizeRepoAndTags(names []string) ([]reference.Named, error) {
|
|
| 110 |
+ var ( |
|
| 111 |
+ repoAndTags []reference.Named |
|
| 112 |
+ // This map is used for deduplicating the "-t" parameter. |
|
| 113 |
+ uniqNames = make(map[string]struct{})
|
|
| 114 |
+ ) |
|
| 115 |
+ for _, repo := range names {
|
|
| 116 |
+ if repo == "" {
|
|
| 117 |
+ continue |
|
| 118 |
+ } |
|
| 119 |
+ |
|
| 120 |
+ ref, err := reference.ParseNamed(repo) |
|
| 121 |
+ if err != nil {
|
|
| 122 |
+ return nil, err |
|
| 123 |
+ } |
|
| 124 |
+ |
|
| 125 |
+ ref = reference.WithDefaultTag(ref) |
|
| 126 |
+ |
|
| 127 |
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
|
|
| 128 |
+ return nil, errors.New("build tag cannot contain a digest")
|
|
| 129 |
+ } |
|
| 130 |
+ |
|
| 131 |
+ if _, isTagged := ref.(reference.NamedTagged); !isTagged {
|
|
| 132 |
+ ref, err = reference.WithTag(ref, reference.DefaultTag) |
|
| 133 |
+ } |
|
| 134 |
+ |
|
| 135 |
+ nameWithTag := ref.String() |
|
| 136 |
+ |
|
| 137 |
+ if _, exists := uniqNames[nameWithTag]; !exists {
|
|
| 138 |
+ uniqNames[nameWithTag] = struct{}{}
|
|
| 139 |
+ repoAndTags = append(repoAndTags, ref) |
|
| 140 |
+ } |
|
| 141 |
+ } |
|
| 142 |
+ return repoAndTags, nil |
|
| 143 |
+} |
|
| 144 |
+ |
|
| 145 |
+// Build creates a NewBuilder, which builds the image. |
|
| 146 |
+func (bm *BuildManager) Build(config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) {
|
|
| 147 |
+ b, err := NewBuilder(config, bm.backend, context, nil) |
|
| 148 |
+ if err != nil {
|
|
| 149 |
+ return "", err |
|
| 150 |
+ } |
|
| 151 |
+ img, err := b.build(config, context, stdout, stderr, out, clientGone) |
|
| 152 |
+ return img, err |
|
| 153 |
+ |
|
| 154 |
+} |
|
| 155 |
+ |
|
| 156 |
+// build runs the Dockerfile builder from a context and a docker object that allows to make calls |
|
| 107 | 157 |
// to Docker. |
| 108 | 158 |
// |
| 109 | 159 |
// This will (barring errors): |
| ... | ... |
@@ -113,10 +175,16 @@ func NewBuilder(config *types.ImageBuildOptions, backend builder.Backend, contex |
| 113 | 113 |
// * walk the AST and execute it by dispatching to handlers. If Remove |
| 114 | 114 |
// or ForceRemove is set, additional cleanup around containers happens after |
| 115 | 115 |
// processing. |
| 116 |
+// * Tag image, if applicable. |
|
| 116 | 117 |
// * Print a happy message and return the image ID. |
| 117 |
-// * NOT tag the image, that is responsibility of the caller. |
|
| 118 | 118 |
// |
| 119 |
-func (b *Builder) Build() (string, error) {
|
|
| 119 |
+func (b *Builder) build(config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer, clientGone <-chan bool) (string, error) {
|
|
| 120 |
+ b.options = config |
|
| 121 |
+ b.context = context |
|
| 122 |
+ b.Stdout = stdout |
|
| 123 |
+ b.Stderr = stderr |
|
| 124 |
+ b.Output = out |
|
| 125 |
+ |
|
| 120 | 126 |
// If Dockerfile was not parsed yet, extract it from the Context |
| 121 | 127 |
if b.dockerfile == nil {
|
| 122 | 128 |
if err := b.readDockerfile(); err != nil {
|
| ... | ... |
@@ -124,6 +192,24 @@ func (b *Builder) Build() (string, error) {
|
| 124 | 124 |
} |
| 125 | 125 |
} |
| 126 | 126 |
|
| 127 |
+ finished := make(chan struct{})
|
|
| 128 |
+ defer close(finished) |
|
| 129 |
+ go func() {
|
|
| 130 |
+ select {
|
|
| 131 |
+ case <-finished: |
|
| 132 |
+ case <-clientGone: |
|
| 133 |
+ b.cancelOnce.Do(func() {
|
|
| 134 |
+ close(b.cancelled) |
|
| 135 |
+ }) |
|
| 136 |
+ } |
|
| 137 |
+ |
|
| 138 |
+ }() |
|
| 139 |
+ |
|
| 140 |
+ repoAndTags, err := sanitizeRepoAndTags(config.Tags) |
|
| 141 |
+ if err != nil {
|
|
| 142 |
+ return "", err |
|
| 143 |
+ } |
|
| 144 |
+ |
|
| 127 | 145 |
var shortImgID string |
| 128 | 146 |
for i, n := range b.dockerfile.Children {
|
| 129 | 147 |
select {
|
| ... | ... |
@@ -163,6 +249,12 @@ func (b *Builder) Build() (string, error) {
|
| 163 | 163 |
return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
|
| 164 | 164 |
} |
| 165 | 165 |
|
| 166 |
+ for _, rt := range repoAndTags {
|
|
| 167 |
+ if err := b.docker.TagImage(rt, b.image); err != nil {
|
|
| 168 |
+ return "", err |
|
| 169 |
+ } |
|
| 170 |
+ } |
|
| 171 |
+ |
|
| 166 | 172 |
fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) |
| 167 | 173 |
return b.image, nil |
| 168 | 174 |
} |
| ... | ... |
@@ -208,11 +208,11 @@ func from(b *Builder, args []string, attributes map[string]bool, original string |
| 208 | 208 |
} else {
|
| 209 | 209 |
// TODO: don't use `name`, instead resolve it to a digest |
| 210 | 210 |
if !b.options.PullParent {
|
| 211 |
- image, err = b.docker.GetImage(name) |
|
| 211 |
+ image, err = b.docker.GetImageOnBuild(name) |
|
| 212 | 212 |
// TODO: shouldn't we error out if error is different from "not found" ? |
| 213 | 213 |
} |
| 214 | 214 |
if image == nil {
|
| 215 |
- image, err = b.docker.Pull(name, b.options.AuthConfigs, b.Output) |
|
| 215 |
+ image, err = b.docker.PullOnBuild(name, b.options.AuthConfigs, b.Output) |
|
| 216 | 216 |
if err != nil {
|
| 217 | 217 |
return err |
| 218 | 218 |
} |
| ... | ... |
@@ -205,7 +205,7 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalD |
| 205 | 205 |
} |
| 206 | 206 |
|
| 207 | 207 |
for _, info := range infos {
|
| 208 |
- if err := b.docker.BuilderCopy(container.ID, dest, info.FileInfo, info.decompress); err != nil {
|
|
| 208 |
+ if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil {
|
|
| 209 | 209 |
return err |
| 210 | 210 |
} |
| 211 | 211 |
} |
| ... | ... |
@@ -396,10 +396,10 @@ func containsWildcards(name string) bool {
|
| 396 | 396 |
|
| 397 | 397 |
func (b *Builder) processImageFrom(img builder.Image) error {
|
| 398 | 398 |
if img != nil {
|
| 399 |
- b.image = img.ID() |
|
| 399 |
+ b.image = img.ImageID() |
|
| 400 | 400 |
|
| 401 |
- if img.Config() != nil {
|
|
| 402 |
- b.runConfig = img.Config() |
|
| 401 |
+ if img.RunConfig() != nil {
|
|
| 402 |
+ b.runConfig = img.RunConfig() |
|
| 403 | 403 |
} |
| 404 | 404 |
} |
| 405 | 405 |
|
| ... | ... |
@@ -469,7 +469,7 @@ func (b *Builder) probeCache() (bool, error) {
|
| 469 | 469 |
if !ok || b.options.NoCache || b.cacheBusted {
|
| 470 | 470 |
return false, nil |
| 471 | 471 |
} |
| 472 |
- cache, err := c.GetCachedImage(b.image, b.runConfig) |
|
| 472 |
+ cache, err := c.GetCachedImageOnBuild(b.image, b.runConfig) |
|
| 473 | 473 |
if err != nil {
|
| 474 | 474 |
return false, err |
| 475 | 475 |
} |
| ... | ... |
@@ -530,7 +530,7 @@ func (b *Builder) create() (string, error) {
|
| 530 | 530 |
|
| 531 | 531 |
if config.Cmd.Len() > 0 {
|
| 532 | 532 |
// override the entry point that may have been picked up from the base image |
| 533 |
- if err := b.docker.ContainerUpdateCmd(c.ID, config.Cmd.Slice()); err != nil {
|
|
| 533 |
+ if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd.Slice()); err != nil {
|
|
| 534 | 534 |
return "", err |
| 535 | 535 |
} |
| 536 | 536 |
} |
| ... | ... |
@@ -541,7 +541,7 @@ func (b *Builder) create() (string, error) {
|
| 541 | 541 |
func (b *Builder) run(cID string) (err error) {
|
| 542 | 542 |
errCh := make(chan error) |
| 543 | 543 |
go func() {
|
| 544 |
- errCh <- b.docker.ContainerAttach(cID, nil, b.Stdout, b.Stderr, true) |
|
| 544 |
+ errCh <- b.docker.ContainerAttachOnBuild(cID, nil, b.Stdout, b.Stderr, true) |
|
| 545 | 545 |
}() |
| 546 | 546 |
|
| 547 | 547 |
finished := make(chan struct{})
|
| ... | ... |
@@ -8,7 +8,10 @@ import ( |
| 8 | 8 |
"io/ioutil" |
| 9 | 9 |
"regexp" |
| 10 | 10 |
|
| 11 |
+ "github.com/docker/docker/api" |
|
| 12 |
+ "github.com/docker/docker/pkg/archive" |
|
| 11 | 13 |
"github.com/docker/docker/pkg/httputils" |
| 14 |
+ "github.com/docker/docker/pkg/urlutil" |
|
| 12 | 15 |
) |
| 13 | 16 |
|
| 14 | 17 |
// When downloading remote contexts, limit the amount (in bytes) |
| ... | ... |
@@ -65,6 +68,41 @@ func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io. |
| 65 | 65 |
return MakeTarSumContext(contextReader) |
| 66 | 66 |
} |
| 67 | 67 |
|
| 68 |
+// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used |
|
| 69 |
+// irrespective of user input. |
|
| 70 |
+// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). |
|
| 71 |
+func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) {
|
|
| 72 |
+ switch {
|
|
| 73 |
+ case remoteURL == "": |
|
| 74 |
+ context, err = MakeTarSumContext(r) |
|
| 75 |
+ case urlutil.IsGitURL(remoteURL): |
|
| 76 |
+ context, err = MakeGitContext(remoteURL) |
|
| 77 |
+ case urlutil.IsURL(remoteURL): |
|
| 78 |
+ context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){
|
|
| 79 |
+ httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {
|
|
| 80 |
+ dockerfile, err := ioutil.ReadAll(rc) |
|
| 81 |
+ if err != nil {
|
|
| 82 |
+ return nil, err |
|
| 83 |
+ } |
|
| 84 |
+ |
|
| 85 |
+ // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller |
|
| 86 |
+ // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. |
|
| 87 |
+ dockerfileName = api.DefaultDockerfileName |
|
| 88 |
+ |
|
| 89 |
+ // TODO: return a context without tarsum |
|
| 90 |
+ return archive.Generate(dockerfileName, string(dockerfile)) |
|
| 91 |
+ }, |
|
| 92 |
+ // fallback handler (tar context) |
|
| 93 |
+ "": func(rc io.ReadCloser) (io.ReadCloser, error) {
|
|
| 94 |
+ return createProgressReader(rc), nil |
|
| 95 |
+ }, |
|
| 96 |
+ }) |
|
| 97 |
+ default: |
|
| 98 |
+ err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL)
|
|
| 99 |
+ } |
|
| 100 |
+ return |
|
| 101 |
+} |
|
| 102 |
+ |
|
| 68 | 103 |
// inspectResponse looks into the http response data at r to determine whether its |
| 69 | 104 |
// content-type is on the list of acceptable content types for remote build contexts. |
| 70 | 105 |
// This function returns: |
| ... | ... |
@@ -7,9 +7,11 @@ import ( |
| 7 | 7 |
"path/filepath" |
| 8 | 8 |
"strings" |
| 9 | 9 |
|
| 10 |
+ "github.com/docker/docker/builder" |
|
| 10 | 11 |
"github.com/docker/docker/container" |
| 11 | 12 |
"github.com/docker/docker/pkg/archive" |
| 12 | 13 |
"github.com/docker/docker/pkg/chrootarchive" |
| 14 |
+ "github.com/docker/docker/pkg/idtools" |
|
| 13 | 15 |
"github.com/docker/docker/pkg/ioutils" |
| 14 | 16 |
"github.com/docker/engine-api/types" |
| 15 | 17 |
) |
| ... | ... |
@@ -328,3 +330,100 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str |
| 328 | 328 |
daemon.LogContainerEvent(container, "copy") |
| 329 | 329 |
return reader, nil |
| 330 | 330 |
} |
| 331 |
+ |
|
| 332 |
+// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container |
|
| 333 |
+// specified by a container object. |
|
| 334 |
+// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). |
|
| 335 |
+// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. |
|
| 336 |
+func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error {
|
|
| 337 |
+ srcPath := src.Path() |
|
| 338 |
+ destExists := true |
|
| 339 |
+ destDir := false |
|
| 340 |
+ rootUID, rootGID := daemon.GetRemappedUIDGID() |
|
| 341 |
+ |
|
| 342 |
+ // Work in daemon-local OS specific file paths |
|
| 343 |
+ destPath = filepath.FromSlash(destPath) |
|
| 344 |
+ |
|
| 345 |
+ c, err := daemon.GetContainer(cID) |
|
| 346 |
+ if err != nil {
|
|
| 347 |
+ return err |
|
| 348 |
+ } |
|
| 349 |
+ err = daemon.Mount(c) |
|
| 350 |
+ if err != nil {
|
|
| 351 |
+ return err |
|
| 352 |
+ } |
|
| 353 |
+ defer daemon.Unmount(c) |
|
| 354 |
+ |
|
| 355 |
+ dest, err := c.GetResourcePath(destPath) |
|
| 356 |
+ if err != nil {
|
|
| 357 |
+ return err |
|
| 358 |
+ } |
|
| 359 |
+ |
|
| 360 |
+ // Preserve the trailing slash |
|
| 361 |
+ // TODO: why are we appending another path separator if there was already one? |
|
| 362 |
+ if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." {
|
|
| 363 |
+ destDir = true |
|
| 364 |
+ dest += string(os.PathSeparator) |
|
| 365 |
+ } |
|
| 366 |
+ |
|
| 367 |
+ destPath = dest |
|
| 368 |
+ |
|
| 369 |
+ destStat, err := os.Stat(destPath) |
|
| 370 |
+ if err != nil {
|
|
| 371 |
+ if !os.IsNotExist(err) {
|
|
| 372 |
+ //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
|
|
| 373 |
+ return err |
|
| 374 |
+ } |
|
| 375 |
+ destExists = false |
|
| 376 |
+ } |
|
| 377 |
+ |
|
| 378 |
+ uidMaps, gidMaps := daemon.GetUIDGIDMaps() |
|
| 379 |
+ archiver := &archive.Archiver{
|
|
| 380 |
+ Untar: chrootarchive.Untar, |
|
| 381 |
+ UIDMaps: uidMaps, |
|
| 382 |
+ GIDMaps: gidMaps, |
|
| 383 |
+ } |
|
| 384 |
+ |
|
| 385 |
+ if src.IsDir() {
|
|
| 386 |
+ // copy as directory |
|
| 387 |
+ if err := archiver.CopyWithTar(srcPath, destPath); err != nil {
|
|
| 388 |
+ return err |
|
| 389 |
+ } |
|
| 390 |
+ return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) |
|
| 391 |
+ } |
|
| 392 |
+ if decompress && archive.IsArchivePath(srcPath) {
|
|
| 393 |
+ // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) |
|
| 394 |
+ |
|
| 395 |
+ // First try to unpack the source as an archive |
|
| 396 |
+ // to support the untar feature we need to clean up the path a little bit |
|
| 397 |
+ // because tar is very forgiving. First we need to strip off the archive's |
|
| 398 |
+ // filename from the path but this is only added if it does not end in slash |
|
| 399 |
+ tarDest := destPath |
|
| 400 |
+ if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
|
|
| 401 |
+ tarDest = filepath.Dir(destPath) |
|
| 402 |
+ } |
|
| 403 |
+ |
|
| 404 |
+ // try to successfully untar the orig |
|
| 405 |
+ err := archiver.UntarPath(srcPath, tarDest) |
|
| 406 |
+ /* |
|
| 407 |
+ if err != nil {
|
|
| 408 |
+ logrus.Errorf("Couldn't untar to %s: %v", tarDest, err)
|
|
| 409 |
+ } |
|
| 410 |
+ */ |
|
| 411 |
+ return err |
|
| 412 |
+ } |
|
| 413 |
+ |
|
| 414 |
+ // only needed for fixPermissions, but might as well put it before CopyFileWithTar |
|
| 415 |
+ if destDir || (destExists && destStat.IsDir()) {
|
|
| 416 |
+ destPath = filepath.Join(destPath, src.Name()) |
|
| 417 |
+ } |
|
| 418 |
+ |
|
| 419 |
+ if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil {
|
|
| 420 |
+ return err |
|
| 421 |
+ } |
|
| 422 |
+ if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil {
|
|
| 423 |
+ return err |
|
| 424 |
+ } |
|
| 425 |
+ |
|
| 426 |
+ return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) |
|
| 427 |
+} |
| ... | ... |
@@ -2,7 +2,11 @@ |
| 2 | 2 |
|
| 3 | 3 |
package daemon |
| 4 | 4 |
|
| 5 |
-import "github.com/docker/docker/container" |
|
| 5 |
+import ( |
|
| 6 |
+ "github.com/docker/docker/container" |
|
| 7 |
+ "os" |
|
| 8 |
+ "path/filepath" |
|
| 9 |
+) |
|
| 6 | 10 |
|
| 7 | 11 |
// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it |
| 8 | 12 |
// cannot be in a read-only volume. If it is not in a volume, the container |
| ... | ... |
@@ -19,3 +23,35 @@ func checkIfPathIsInAVolume(container *container.Container, absPath string) (boo |
| 19 | 19 |
} |
| 20 | 20 |
return toVolume, nil |
| 21 | 21 |
} |
| 22 |
+ |
|
| 23 |
+func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
|
| 24 |
+ // If the destination didn't already exist, or the destination isn't a |
|
| 25 |
+ // directory, then we should Lchown the destination. Otherwise, we shouldn't |
|
| 26 |
+ // Lchown the destination. |
|
| 27 |
+ destStat, err := os.Stat(destination) |
|
| 28 |
+ if err != nil {
|
|
| 29 |
+ // This should *never* be reached, because the destination must've already |
|
| 30 |
+ // been created while untar-ing the context. |
|
| 31 |
+ return err |
|
| 32 |
+ } |
|
| 33 |
+ doChownDestination := !destExisted || !destStat.IsDir() |
|
| 34 |
+ |
|
| 35 |
+ // We Walk on the source rather than on the destination because we don't |
|
| 36 |
+ // want to change permissions on things we haven't created or modified. |
|
| 37 |
+ return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
|
|
| 38 |
+ // Do not alter the walk root iff. it existed before, as it doesn't fall under |
|
| 39 |
+ // the domain of "things we should chown". |
|
| 40 |
+ if !doChownDestination && (source == fullpath) {
|
|
| 41 |
+ return nil |
|
| 42 |
+ } |
|
| 43 |
+ |
|
| 44 |
+ // Path is prefixed by source: substitute with destination instead. |
|
| 45 |
+ cleaned, err := filepath.Rel(source, fullpath) |
|
| 46 |
+ if err != nil {
|
|
| 47 |
+ return err |
|
| 48 |
+ } |
|
| 49 |
+ |
|
| 50 |
+ fullpath = filepath.Join(destination, cleaned) |
|
| 51 |
+ return os.Lchown(fullpath, uid, gid) |
|
| 52 |
+ }) |
|
| 53 |
+} |
| ... | ... |
@@ -11,3 +11,8 @@ import "github.com/docker/docker/container" |
| 11 | 11 |
func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) {
|
| 12 | 12 |
return false, nil |
| 13 | 13 |
} |
| 14 |
+ |
|
| 15 |
+func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
|
| 16 |
+ // chown is not supported on Windows |
|
| 17 |
+ return nil |
|
| 18 |
+} |
| ... | ... |
@@ -100,6 +100,16 @@ func (daemon *Daemon) ContainerWsAttachWithLogs(prefixOrName string, c *Containe |
| 100 | 100 |
return daemon.attachWithLogs(container, c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream, c.DetachKeys) |
| 101 | 101 |
} |
| 102 | 102 |
|
| 103 |
+// ContainerAttachOnBuild attaches streams to the container cID. If stream is true, it streams the output. |
|
| 104 |
+func (daemon *Daemon) ContainerAttachOnBuild(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error {
|
|
| 105 |
+ return daemon.ContainerWsAttachWithLogs(cID, &ContainerWsAttachWithLogsConfig{
|
|
| 106 |
+ InStream: stdin, |
|
| 107 |
+ OutStream: stdout, |
|
| 108 |
+ ErrStream: stderr, |
|
| 109 |
+ Stream: stream, |
|
| 110 |
+ }) |
|
| 111 |
+} |
|
| 112 |
+ |
|
| 103 | 113 |
func (daemon *Daemon) attachWithLogs(container *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error {
|
| 104 | 114 |
if logs {
|
| 105 | 115 |
logDriver, err := daemon.getLogger(container) |
| ... | ... |
@@ -22,6 +22,7 @@ import ( |
| 22 | 22 |
"github.com/Sirupsen/logrus" |
| 23 | 23 |
"github.com/docker/distribution/digest" |
| 24 | 24 |
"github.com/docker/docker/api" |
| 25 |
+ "github.com/docker/docker/builder" |
|
| 25 | 26 |
"github.com/docker/docker/container" |
| 26 | 27 |
"github.com/docker/docker/daemon/events" |
| 27 | 28 |
"github.com/docker/docker/daemon/exec" |
| ... | ... |
@@ -1035,6 +1036,35 @@ func (daemon *Daemon) PullImage(ref reference.Named, metaHeaders map[string][]st |
| 1035 | 1035 |
return err |
| 1036 | 1036 |
} |
| 1037 | 1037 |
|
| 1038 |
+// PullOnBuild tells Docker to pull image referenced by `name`. |
|
| 1039 |
+func (daemon *Daemon) PullOnBuild(name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) {
|
|
| 1040 |
+ ref, err := reference.ParseNamed(name) |
|
| 1041 |
+ if err != nil {
|
|
| 1042 |
+ return nil, err |
|
| 1043 |
+ } |
|
| 1044 |
+ ref = reference.WithDefaultTag(ref) |
|
| 1045 |
+ |
|
| 1046 |
+ pullRegistryAuth := &types.AuthConfig{}
|
|
| 1047 |
+ if len(authConfigs) > 0 {
|
|
| 1048 |
+ // The request came with a full auth config file, we prefer to use that |
|
| 1049 |
+ repoInfo, err := daemon.RegistryService.ResolveRepository(ref) |
|
| 1050 |
+ if err != nil {
|
|
| 1051 |
+ return nil, err |
|
| 1052 |
+ } |
|
| 1053 |
+ |
|
| 1054 |
+ resolvedConfig := registry.ResolveAuthConfig( |
|
| 1055 |
+ authConfigs, |
|
| 1056 |
+ repoInfo.Index, |
|
| 1057 |
+ ) |
|
| 1058 |
+ pullRegistryAuth = &resolvedConfig |
|
| 1059 |
+ } |
|
| 1060 |
+ |
|
| 1061 |
+ if err := daemon.PullImage(ref, nil, pullRegistryAuth, output); err != nil {
|
|
| 1062 |
+ return nil, err |
|
| 1063 |
+ } |
|
| 1064 |
+ return daemon.GetImage(name) |
|
| 1065 |
+} |
|
| 1066 |
+ |
|
| 1038 | 1067 |
// ExportImage exports a list of images to the given output stream. The |
| 1039 | 1068 |
// exported images are archived into a tar when written to the output |
| 1040 | 1069 |
// stream. All images with the given tag and all versions containing |
| ... | ... |
@@ -1275,6 +1305,15 @@ func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) {
|
| 1275 | 1275 |
return daemon.imageStore.Get(imgID) |
| 1276 | 1276 |
} |
| 1277 | 1277 |
|
| 1278 |
+// GetImageOnBuild looks up a Docker image referenced by `name`. |
|
| 1279 |
+func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) {
|
|
| 1280 |
+ img, err := daemon.GetImage(name) |
|
| 1281 |
+ if err != nil {
|
|
| 1282 |
+ return nil, err |
|
| 1283 |
+ } |
|
| 1284 |
+ return img, nil |
|
| 1285 |
+} |
|
| 1286 |
+ |
|
| 1278 | 1287 |
// GraphDriverName returns the name of the graph driver used by the layer.Store |
| 1279 | 1288 |
func (daemon *Daemon) GraphDriverName() string {
|
| 1280 | 1289 |
return daemon.layerStore.DriverName() |
| ... | ... |
@@ -1301,11 +1340,11 @@ func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
|
| 1301 | 1301 |
return uid, gid |
| 1302 | 1302 |
} |
| 1303 | 1303 |
|
| 1304 |
-// ImageGetCached returns the most recent created image that is a child |
|
| 1304 |
+// GetCachedImage returns the most recent created image that is a child |
|
| 1305 | 1305 |
// of the image with imgID, that had the same config when it was |
| 1306 | 1306 |
// created. nil is returned if a child cannot be found. An error is |
| 1307 | 1307 |
// returned if the parent image cannot be found. |
| 1308 |
-func (daemon *Daemon) ImageGetCached(imgID image.ID, config *containertypes.Config) (*image.Image, error) {
|
|
| 1308 |
+func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) {
|
|
| 1309 | 1309 |
// Loop on the children of the given image and check the config |
| 1310 | 1310 |
getMatch := func(siblings []image.ID) (*image.Image, error) {
|
| 1311 | 1311 |
var match *image.Image |
| ... | ... |
@@ -1342,6 +1381,16 @@ func (daemon *Daemon) ImageGetCached(imgID image.ID, config *containertypes.Conf |
| 1342 | 1342 |
return getMatch(siblings) |
| 1343 | 1343 |
} |
| 1344 | 1344 |
|
| 1345 |
+// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` |
|
| 1346 |
+// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. |
|
| 1347 |
+func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) {
|
|
| 1348 |
+ cache, err := daemon.GetCachedImage(image.ID(imgID), cfg) |
|
| 1349 |
+ if cache == nil || err != nil {
|
|
| 1350 |
+ return "", err |
|
| 1351 |
+ } |
|
| 1352 |
+ return cache.ID().String(), nil |
|
| 1353 |
+} |
|
| 1354 |
+ |
|
| 1345 | 1355 |
// tempDir returns the default directory to use for temporary files. |
| 1346 | 1356 |
func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
|
| 1347 | 1357 |
var tmpDir string |
| 1348 | 1358 |
deleted file mode 100644 |
| ... | ... |
@@ -1,235 +0,0 @@ |
| 1 |
-package daemonbuilder |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "io" |
|
| 6 |
- "io/ioutil" |
|
| 7 |
- "os" |
|
| 8 |
- "path/filepath" |
|
| 9 |
- "strings" |
|
| 10 |
- |
|
| 11 |
- "github.com/Sirupsen/logrus" |
|
| 12 |
- "github.com/docker/docker/api" |
|
| 13 |
- "github.com/docker/docker/builder" |
|
| 14 |
- "github.com/docker/docker/daemon" |
|
| 15 |
- "github.com/docker/docker/image" |
|
| 16 |
- "github.com/docker/docker/pkg/archive" |
|
| 17 |
- "github.com/docker/docker/pkg/chrootarchive" |
|
| 18 |
- "github.com/docker/docker/pkg/httputils" |
|
| 19 |
- "github.com/docker/docker/pkg/idtools" |
|
| 20 |
- "github.com/docker/docker/pkg/ioutils" |
|
| 21 |
- "github.com/docker/docker/pkg/urlutil" |
|
| 22 |
- "github.com/docker/docker/reference" |
|
| 23 |
- "github.com/docker/docker/registry" |
|
| 24 |
- "github.com/docker/engine-api/types" |
|
| 25 |
- "github.com/docker/engine-api/types/container" |
|
| 26 |
-) |
|
| 27 |
- |
|
| 28 |
-// Docker implements builder.Backend for the docker Daemon object. |
|
| 29 |
-type Docker struct {
|
|
| 30 |
- *daemon.Daemon |
|
| 31 |
-} |
|
| 32 |
- |
|
| 33 |
-// ensure Docker implements builder.Backend |
|
| 34 |
-var _ builder.Backend = Docker{}
|
|
| 35 |
- |
|
| 36 |
-// Pull tells Docker to pull image referenced by `name`. |
|
| 37 |
-func (d Docker) Pull(name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) {
|
|
| 38 |
- ref, err := reference.ParseNamed(name) |
|
| 39 |
- if err != nil {
|
|
| 40 |
- return nil, err |
|
| 41 |
- } |
|
| 42 |
- ref = reference.WithDefaultTag(ref) |
|
| 43 |
- |
|
| 44 |
- pullRegistryAuth := &types.AuthConfig{}
|
|
| 45 |
- if len(authConfigs) > 0 {
|
|
| 46 |
- // The request came with a full auth config file, we prefer to use that |
|
| 47 |
- repoInfo, err := d.Daemon.RegistryService.ResolveRepository(ref) |
|
| 48 |
- if err != nil {
|
|
| 49 |
- return nil, err |
|
| 50 |
- } |
|
| 51 |
- |
|
| 52 |
- resolvedConfig := registry.ResolveAuthConfig( |
|
| 53 |
- authConfigs, |
|
| 54 |
- repoInfo.Index, |
|
| 55 |
- ) |
|
| 56 |
- pullRegistryAuth = &resolvedConfig |
|
| 57 |
- } |
|
| 58 |
- |
|
| 59 |
- if err := d.Daemon.PullImage(ref, nil, pullRegistryAuth, ioutils.NopWriteCloser(output)); err != nil {
|
|
| 60 |
- return nil, err |
|
| 61 |
- } |
|
| 62 |
- return d.GetImage(name) |
|
| 63 |
-} |
|
| 64 |
- |
|
| 65 |
-// GetImage looks up a Docker image referenced by `name`. |
|
| 66 |
-func (d Docker) GetImage(name string) (builder.Image, error) {
|
|
| 67 |
- img, err := d.Daemon.GetImage(name) |
|
| 68 |
- if err != nil {
|
|
| 69 |
- return nil, err |
|
| 70 |
- } |
|
| 71 |
- return imgWrap{img}, nil
|
|
| 72 |
-} |
|
| 73 |
- |
|
| 74 |
-// ContainerUpdateCmd updates Path and Args for the container with ID cID. |
|
| 75 |
-func (d Docker) ContainerUpdateCmd(cID string, cmd []string) error {
|
|
| 76 |
- c, err := d.Daemon.GetContainer(cID) |
|
| 77 |
- if err != nil {
|
|
| 78 |
- return err |
|
| 79 |
- } |
|
| 80 |
- c.Path = cmd[0] |
|
| 81 |
- c.Args = cmd[1:] |
|
| 82 |
- return nil |
|
| 83 |
-} |
|
| 84 |
- |
|
| 85 |
-// ContainerAttach attaches streams to the container cID. If stream is true, it streams the output. |
|
| 86 |
-func (d Docker) ContainerAttach(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error {
|
|
| 87 |
- return d.Daemon.ContainerWsAttachWithLogs(cID, &daemon.ContainerWsAttachWithLogsConfig{
|
|
| 88 |
- InStream: stdin, |
|
| 89 |
- OutStream: stdout, |
|
| 90 |
- ErrStream: stderr, |
|
| 91 |
- Stream: stream, |
|
| 92 |
- }) |
|
| 93 |
-} |
|
| 94 |
- |
|
| 95 |
-// BuilderCopy copies/extracts a source FileInfo to a destination path inside a container |
|
| 96 |
-// specified by a container object. |
|
| 97 |
-// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). |
|
| 98 |
-// BuilderCopy should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. |
|
| 99 |
-func (d Docker) BuilderCopy(cID string, destPath string, src builder.FileInfo, decompress bool) error {
|
|
| 100 |
- srcPath := src.Path() |
|
| 101 |
- destExists := true |
|
| 102 |
- destDir := false |
|
| 103 |
- rootUID, rootGID := d.Daemon.GetRemappedUIDGID() |
|
| 104 |
- |
|
| 105 |
- // Work in daemon-local OS specific file paths |
|
| 106 |
- destPath = filepath.FromSlash(destPath) |
|
| 107 |
- |
|
| 108 |
- c, err := d.Daemon.GetContainer(cID) |
|
| 109 |
- if err != nil {
|
|
| 110 |
- return err |
|
| 111 |
- } |
|
| 112 |
- err = d.Daemon.Mount(c) |
|
| 113 |
- if err != nil {
|
|
| 114 |
- return err |
|
| 115 |
- } |
|
| 116 |
- defer d.Daemon.Unmount(c) |
|
| 117 |
- |
|
| 118 |
- dest, err := c.GetResourcePath(destPath) |
|
| 119 |
- if err != nil {
|
|
| 120 |
- return err |
|
| 121 |
- } |
|
| 122 |
- |
|
| 123 |
- // Preserve the trailing slash |
|
| 124 |
- // TODO: why are we appending another path separator if there was already one? |
|
| 125 |
- if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." {
|
|
| 126 |
- destDir = true |
|
| 127 |
- dest += string(os.PathSeparator) |
|
| 128 |
- } |
|
| 129 |
- |
|
| 130 |
- destPath = dest |
|
| 131 |
- |
|
| 132 |
- destStat, err := os.Stat(destPath) |
|
| 133 |
- if err != nil {
|
|
| 134 |
- if !os.IsNotExist(err) {
|
|
| 135 |
- logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err)
|
|
| 136 |
- return err |
|
| 137 |
- } |
|
| 138 |
- destExists = false |
|
| 139 |
- } |
|
| 140 |
- |
|
| 141 |
- uidMaps, gidMaps := d.Daemon.GetUIDGIDMaps() |
|
| 142 |
- archiver := &archive.Archiver{
|
|
| 143 |
- Untar: chrootarchive.Untar, |
|
| 144 |
- UIDMaps: uidMaps, |
|
| 145 |
- GIDMaps: gidMaps, |
|
| 146 |
- } |
|
| 147 |
- |
|
| 148 |
- if src.IsDir() {
|
|
| 149 |
- // copy as directory |
|
| 150 |
- if err := archiver.CopyWithTar(srcPath, destPath); err != nil {
|
|
| 151 |
- return err |
|
| 152 |
- } |
|
| 153 |
- return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) |
|
| 154 |
- } |
|
| 155 |
- if decompress && archive.IsArchivePath(srcPath) {
|
|
| 156 |
- // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) |
|
| 157 |
- |
|
| 158 |
- // First try to unpack the source as an archive |
|
| 159 |
- // to support the untar feature we need to clean up the path a little bit |
|
| 160 |
- // because tar is very forgiving. First we need to strip off the archive's |
|
| 161 |
- // filename from the path but this is only added if it does not end in slash |
|
| 162 |
- tarDest := destPath |
|
| 163 |
- if strings.HasSuffix(tarDest, string(os.PathSeparator)) {
|
|
| 164 |
- tarDest = filepath.Dir(destPath) |
|
| 165 |
- } |
|
| 166 |
- |
|
| 167 |
- // try to successfully untar the orig |
|
| 168 |
- err := archiver.UntarPath(srcPath, tarDest) |
|
| 169 |
- if err != nil {
|
|
| 170 |
- logrus.Errorf("Couldn't untar to %s: %v", tarDest, err)
|
|
| 171 |
- } |
|
| 172 |
- return err |
|
| 173 |
- } |
|
| 174 |
- |
|
| 175 |
- // only needed for fixPermissions, but might as well put it before CopyFileWithTar |
|
| 176 |
- if destDir || (destExists && destStat.IsDir()) {
|
|
| 177 |
- destPath = filepath.Join(destPath, src.Name()) |
|
| 178 |
- } |
|
| 179 |
- |
|
| 180 |
- if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil {
|
|
| 181 |
- return err |
|
| 182 |
- } |
|
| 183 |
- if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil {
|
|
| 184 |
- return err |
|
| 185 |
- } |
|
| 186 |
- |
|
| 187 |
- return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) |
|
| 188 |
-} |
|
| 189 |
- |
|
| 190 |
-// GetCachedImage returns a reference to a cached image whose parent equals `parent` |
|
| 191 |
-// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. |
|
| 192 |
-func (d Docker) GetCachedImage(imgID string, cfg *container.Config) (string, error) {
|
|
| 193 |
- cache, err := d.Daemon.ImageGetCached(image.ID(imgID), cfg) |
|
| 194 |
- if cache == nil || err != nil {
|
|
| 195 |
- return "", err |
|
| 196 |
- } |
|
| 197 |
- return cache.ID().String(), nil |
|
| 198 |
-} |
|
| 199 |
- |
|
| 200 |
-// Following is specific to builder contexts |
|
| 201 |
- |
|
| 202 |
-// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used |
|
| 203 |
-// irrespective of user input. |
|
| 204 |
-// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). |
|
| 205 |
-func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context builder.ModifiableContext, dockerfileName string, err error) {
|
|
| 206 |
- switch {
|
|
| 207 |
- case remoteURL == "": |
|
| 208 |
- context, err = builder.MakeTarSumContext(r) |
|
| 209 |
- case urlutil.IsGitURL(remoteURL): |
|
| 210 |
- context, err = builder.MakeGitContext(remoteURL) |
|
| 211 |
- case urlutil.IsURL(remoteURL): |
|
| 212 |
- context, err = builder.MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){
|
|
| 213 |
- httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {
|
|
| 214 |
- dockerfile, err := ioutil.ReadAll(rc) |
|
| 215 |
- if err != nil {
|
|
| 216 |
- return nil, err |
|
| 217 |
- } |
|
| 218 |
- |
|
| 219 |
- // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller |
|
| 220 |
- // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. |
|
| 221 |
- dockerfileName = api.DefaultDockerfileName |
|
| 222 |
- |
|
| 223 |
- // TODO: return a context without tarsum |
|
| 224 |
- return archive.Generate(dockerfileName, string(dockerfile)) |
|
| 225 |
- }, |
|
| 226 |
- // fallback handler (tar context) |
|
| 227 |
- "": func(rc io.ReadCloser) (io.ReadCloser, error) {
|
|
| 228 |
- return createProgressReader(rc), nil |
|
| 229 |
- }, |
|
| 230 |
- }) |
|
| 231 |
- default: |
|
| 232 |
- err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL)
|
|
| 233 |
- } |
|
| 234 |
- return |
|
| 235 |
-} |
| 236 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,40 +0,0 @@ |
| 1 |
-// +build freebsd linux |
|
| 2 |
- |
|
| 3 |
-package daemonbuilder |
|
| 4 |
- |
|
| 5 |
-import ( |
|
| 6 |
- "os" |
|
| 7 |
- "path/filepath" |
|
| 8 |
-) |
|
| 9 |
- |
|
| 10 |
-func fixPermissions(source, destination string, uid, gid int, destExisted bool) error {
|
|
| 11 |
- // If the destination didn't already exist, or the destination isn't a |
|
| 12 |
- // directory, then we should Lchown the destination. Otherwise, we shouldn't |
|
| 13 |
- // Lchown the destination. |
|
| 14 |
- destStat, err := os.Stat(destination) |
|
| 15 |
- if err != nil {
|
|
| 16 |
- // This should *never* be reached, because the destination must've already |
|
| 17 |
- // been created while untar-ing the context. |
|
| 18 |
- return err |
|
| 19 |
- } |
|
| 20 |
- doChownDestination := !destExisted || !destStat.IsDir() |
|
| 21 |
- |
|
| 22 |
- // We Walk on the source rather than on the destination because we don't |
|
| 23 |
- // want to change permissions on things we haven't created or modified. |
|
| 24 |
- return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error {
|
|
| 25 |
- // Do not alter the walk root iff. it existed before, as it doesn't fall under |
|
| 26 |
- // the domain of "things we should chown". |
|
| 27 |
- if !doChownDestination && (source == fullpath) {
|
|
| 28 |
- return nil |
|
| 29 |
- } |
|
| 30 |
- |
|
| 31 |
- // Path is prefixed by source: substitute with destination instead. |
|
| 32 |
- cleaned, err := filepath.Rel(source, fullpath) |
|
| 33 |
- if err != nil {
|
|
| 34 |
- return err |
|
| 35 |
- } |
|
| 36 |
- |
|
| 37 |
- fullpath = filepath.Join(destination, cleaned) |
|
| 38 |
- return os.Lchown(fullpath, uid, gid) |
|
| 39 |
- }) |
|
| 40 |
-} |
| 9 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,18 +0,0 @@ |
| 1 |
-package daemonbuilder |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "github.com/docker/docker/image" |
|
| 5 |
- "github.com/docker/engine-api/types/container" |
|
| 6 |
-) |
|
| 7 |
- |
|
| 8 |
-type imgWrap struct {
|
|
| 9 |
- inner *image.Image |
|
| 10 |
-} |
|
| 11 |
- |
|
| 12 |
-func (img imgWrap) ID() string {
|
|
| 13 |
- return string(img.inner.ID()) |
|
| 14 |
-} |
|
| 15 |
- |
|
| 16 |
-func (img imgWrap) Config() *container.Config {
|
|
| 17 |
- return img.inner.Config |
|
| 18 |
-} |
| ... | ... |
@@ -22,6 +22,17 @@ func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostCon |
| 22 | 22 |
return warnings, nil |
| 23 | 23 |
} |
| 24 | 24 |
|
| 25 |
+// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID. |
|
| 26 |
+func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error {
|
|
| 27 |
+ c, err := daemon.GetContainer(cID) |
|
| 28 |
+ if err != nil {
|
|
| 29 |
+ return err |
|
| 30 |
+ } |
|
| 31 |
+ c.Path = cmd[0] |
|
| 32 |
+ c.Args = cmd[1:] |
|
| 33 |
+ return nil |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 25 | 36 |
func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error {
|
| 26 | 37 |
if hostConfig == nil {
|
| 27 | 38 |
return nil |
| ... | ... |
@@ -70,6 +70,16 @@ func (img *Image) ID() ID {
|
| 70 | 70 |
return img.computedID |
| 71 | 71 |
} |
| 72 | 72 |
|
| 73 |
+// ImageID stringizes ID. |
|
| 74 |
+func (img *Image) ImageID() string {
|
|
| 75 |
+ return string(img.ID()) |
|
| 76 |
+} |
|
| 77 |
+ |
|
| 78 |
+// RunConfig returns the image's container config. |
|
| 79 |
+func (img *Image) RunConfig() *container.Config {
|
|
| 80 |
+ return img.Config |
|
| 81 |
+} |
|
| 82 |
+ |
|
| 73 | 83 |
// MarshalJSON serializes the image to JSON. It sorts the top-level keys so |
| 74 | 84 |
// that JSON that's been manipulated by a push/pull cycle with a legacy |
| 75 | 85 |
// registry won't end up with a different key order. |