Browse code

server/buildfile.go -> builder/builder.go; add maintainers file

Docker-DCO-1.1-Signed-off-by: Erik Hollensbe <github@hollensbe.org> (github: erikh)

Erik Hollensbe authored on 2014/07/29 14:22:58
Showing 6 changed files
1 1
new file mode 100644
... ...
@@ -0,0 +1,2 @@
0
+Tibor Vass <teabee89@gmail.com> (@tiborvass)
1
+Erik Hollensbe <github@hollensbe.org> (@erikh)
0 2
new file mode 100644
... ...
@@ -0,0 +1,911 @@
0
+package builder
1
+
2
+import (
3
+	"crypto/sha256"
4
+	"encoding/hex"
5
+	"encoding/json"
6
+	"errors"
7
+	"fmt"
8
+	"io"
9
+	"io/ioutil"
10
+	"net/url"
11
+	"os"
12
+	"path"
13
+	"path/filepath"
14
+	"reflect"
15
+	"regexp"
16
+	"sort"
17
+	"strings"
18
+	"syscall"
19
+	"time"
20
+
21
+	"github.com/docker/docker/archive"
22
+	"github.com/docker/docker/daemon"
23
+	"github.com/docker/docker/engine"
24
+	"github.com/docker/docker/nat"
25
+	"github.com/docker/docker/pkg/symlink"
26
+	"github.com/docker/docker/pkg/system"
27
+	"github.com/docker/docker/registry"
28
+	"github.com/docker/docker/runconfig"
29
+	"github.com/docker/docker/utils"
30
+)
31
+
32
+var (
33
+	ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
34
+)
35
+
36
+type BuildFile interface {
37
+	Build(io.Reader) (string, error)
38
+	CmdFrom(string) error
39
+	CmdRun(string) error
40
+}
41
+
42
+type buildFile struct {
43
+	daemon *daemon.Daemon
44
+	eng    *engine.Engine
45
+
46
+	image      string
47
+	maintainer string
48
+	config     *runconfig.Config
49
+
50
+	contextPath string
51
+	context     *utils.TarSum
52
+
53
+	verbose      bool
54
+	utilizeCache bool
55
+	rm           bool
56
+	forceRm      bool
57
+
58
+	authConfig *registry.AuthConfig
59
+	configFile *registry.ConfigFile
60
+
61
+	tmpContainers map[string]struct{}
62
+	tmpImages     map[string]struct{}
63
+
64
+	outStream io.Writer
65
+	errStream io.Writer
66
+
67
+	// Deprecated, original writer used for ImagePull. To be removed.
68
+	outOld io.Writer
69
+	sf     *utils.StreamFormatter
70
+}
71
+
72
+func (b *buildFile) clearTmp(containers map[string]struct{}) {
73
+	for c := range containers {
74
+		tmp := b.daemon.Get(c)
75
+		if err := b.daemon.Destroy(tmp); err != nil {
76
+			fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
77
+		} else {
78
+			delete(containers, c)
79
+			fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c))
80
+		}
81
+	}
82
+}
83
+
84
+func (b *buildFile) CmdFrom(name string) error {
85
+	image, err := b.daemon.Repositories().LookupImage(name)
86
+	if err != nil {
87
+		if b.daemon.Graph().IsNotExist(err) {
88
+			remote, tag := utils.ParseRepositoryTag(name)
89
+			pullRegistryAuth := b.authConfig
90
+			if len(b.configFile.Configs) > 0 {
91
+				// The request came with a full auth config file, we prefer to use that
92
+				endpoint, _, err := registry.ResolveRepositoryName(remote)
93
+				if err != nil {
94
+					return err
95
+				}
96
+				resolvedAuth := b.configFile.ResolveAuthConfig(endpoint)
97
+				pullRegistryAuth = &resolvedAuth
98
+			}
99
+			job := b.eng.Job("pull", remote, tag)
100
+			job.SetenvBool("json", b.sf.Json())
101
+			job.SetenvBool("parallel", true)
102
+			job.SetenvJson("authConfig", pullRegistryAuth)
103
+			job.Stdout.Add(b.outOld)
104
+			if err := job.Run(); err != nil {
105
+				return err
106
+			}
107
+			image, err = b.daemon.Repositories().LookupImage(name)
108
+			if err != nil {
109
+				return err
110
+			}
111
+		} else {
112
+			return err
113
+		}
114
+	}
115
+	b.image = image.ID
116
+	b.config = &runconfig.Config{}
117
+	if image.Config != nil {
118
+		b.config = image.Config
119
+	}
120
+	if b.config.Env == nil || len(b.config.Env) == 0 {
121
+		b.config.Env = append(b.config.Env, "HOME=/", "PATH="+daemon.DefaultPathEnv)
122
+	}
123
+	// Process ONBUILD triggers if they exist
124
+	if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
125
+		fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers)
126
+	}
127
+
128
+	// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
129
+	onBuildTriggers := b.config.OnBuild
130
+	b.config.OnBuild = []string{}
131
+
132
+	for n, step := range onBuildTriggers {
133
+		splitStep := strings.Split(step, " ")
134
+		stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " "))
135
+		switch stepInstruction {
136
+		case "ONBUILD":
137
+			return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
138
+		case "MAINTAINER", "FROM":
139
+			return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
140
+		}
141
+		if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil {
142
+			return err
143
+		}
144
+	}
145
+	return nil
146
+}
147
+
148
+// The ONBUILD command declares a build instruction to be executed in any future build
149
+// using the current image as a base.
150
+func (b *buildFile) CmdOnbuild(trigger string) error {
151
+	splitTrigger := strings.Split(trigger, " ")
152
+	triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " "))
153
+	switch triggerInstruction {
154
+	case "ONBUILD":
155
+		return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
156
+	case "MAINTAINER", "FROM":
157
+		return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
158
+	}
159
+	b.config.OnBuild = append(b.config.OnBuild, trigger)
160
+	return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
161
+}
162
+
163
+func (b *buildFile) CmdMaintainer(name string) error {
164
+	b.maintainer = name
165
+	return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
166
+}
167
+
168
+// probeCache checks to see if image-caching is enabled (`b.utilizeCache`)
169
+// and if so attempts to look up the current `b.image` and `b.config` pair
170
+// in the current server `b.daemon`. If an image is found, probeCache returns
171
+// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
172
+// is any error, it returns `(false, err)`.
173
+func (b *buildFile) probeCache() (bool, error) {
174
+	if b.utilizeCache {
175
+		if cache, err := b.daemon.ImageGetCached(b.image, b.config); err != nil {
176
+			return false, err
177
+		} else if cache != nil {
178
+			fmt.Fprintf(b.outStream, " ---> Using cache\n")
179
+			utils.Debugf("[BUILDER] Use cached version")
180
+			b.image = cache.ID
181
+			return true, nil
182
+		} else {
183
+			utils.Debugf("[BUILDER] Cache miss")
184
+		}
185
+	}
186
+	return false, nil
187
+}
188
+
189
+func (b *buildFile) CmdRun(args string) error {
190
+	if b.image == "" {
191
+		return fmt.Errorf("Please provide a source image with `from` prior to run")
192
+	}
193
+	config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil)
194
+	if err != nil {
195
+		return err
196
+	}
197
+
198
+	cmd := b.config.Cmd
199
+	b.config.Cmd = nil
200
+	runconfig.Merge(b.config, config)
201
+
202
+	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
203
+
204
+	utils.Debugf("Command to be executed: %v", b.config.Cmd)
205
+
206
+	hit, err := b.probeCache()
207
+	if err != nil {
208
+		return err
209
+	}
210
+	if hit {
211
+		return nil
212
+	}
213
+
214
+	c, err := b.create()
215
+	if err != nil {
216
+		return err
217
+	}
218
+	// Ensure that we keep the container mounted until the commit
219
+	// to avoid unmounting and then mounting directly again
220
+	c.Mount()
221
+	defer c.Unmount()
222
+
223
+	err = b.run(c)
224
+	if err != nil {
225
+		return err
226
+	}
227
+	if err := b.commit(c.ID, cmd, "run"); err != nil {
228
+		return err
229
+	}
230
+
231
+	return nil
232
+}
233
+
234
+func (b *buildFile) FindEnvKey(key string) int {
235
+	for k, envVar := range b.config.Env {
236
+		envParts := strings.SplitN(envVar, "=", 2)
237
+		if key == envParts[0] {
238
+			return k
239
+		}
240
+	}
241
+	return -1
242
+}
243
+
244
+func (b *buildFile) ReplaceEnvMatches(value string) (string, error) {
245
+	exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
246
+	if err != nil {
247
+		return value, err
248
+	}
249
+	matches := exp.FindAllString(value, -1)
250
+	for _, match := range matches {
251
+		match = match[strings.Index(match, "$"):]
252
+		matchKey := strings.Trim(match, "${}")
253
+
254
+		for _, envVar := range b.config.Env {
255
+			envParts := strings.SplitN(envVar, "=", 2)
256
+			envKey := envParts[0]
257
+			envValue := envParts[1]
258
+
259
+			if envKey == matchKey {
260
+				value = strings.Replace(value, match, envValue, -1)
261
+				break
262
+			}
263
+		}
264
+	}
265
+	return value, nil
266
+}
267
+
268
+func (b *buildFile) CmdEnv(args string) error {
269
+	tmp := strings.SplitN(args, " ", 2)
270
+	if len(tmp) != 2 {
271
+		return fmt.Errorf("Invalid ENV format")
272
+	}
273
+	key := strings.Trim(tmp[0], " \t")
274
+	value := strings.Trim(tmp[1], " \t")
275
+
276
+	envKey := b.FindEnvKey(key)
277
+	replacedValue, err := b.ReplaceEnvMatches(value)
278
+	if err != nil {
279
+		return err
280
+	}
281
+	replacedVar := fmt.Sprintf("%s=%s", key, replacedValue)
282
+
283
+	if envKey >= 0 {
284
+		b.config.Env[envKey] = replacedVar
285
+	} else {
286
+		b.config.Env = append(b.config.Env, replacedVar)
287
+	}
288
+	return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar))
289
+}
290
+
291
+func (b *buildFile) buildCmdFromJson(args string) []string {
292
+	var cmd []string
293
+	if err := json.Unmarshal([]byte(args), &cmd); err != nil {
294
+		utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
295
+		cmd = []string{"/bin/sh", "-c", args}
296
+	}
297
+	return cmd
298
+}
299
+
300
+func (b *buildFile) CmdCmd(args string) error {
301
+	cmd := b.buildCmdFromJson(args)
302
+	b.config.Cmd = cmd
303
+	if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
304
+		return err
305
+	}
306
+	return nil
307
+}
308
+
309
+func (b *buildFile) CmdEntrypoint(args string) error {
310
+	entrypoint := b.buildCmdFromJson(args)
311
+	b.config.Entrypoint = entrypoint
312
+	if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
313
+		return err
314
+	}
315
+	return nil
316
+}
317
+
318
+func (b *buildFile) CmdExpose(args string) error {
319
+	portsTab := strings.Split(args, " ")
320
+
321
+	if b.config.ExposedPorts == nil {
322
+		b.config.ExposedPorts = make(nat.PortSet)
323
+	}
324
+	ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...))
325
+	if err != nil {
326
+		return err
327
+	}
328
+	for port := range ports {
329
+		if _, exists := b.config.ExposedPorts[port]; !exists {
330
+			b.config.ExposedPorts[port] = struct{}{}
331
+		}
332
+	}
333
+	b.config.PortSpecs = nil
334
+
335
+	return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
336
+}
337
+
338
+func (b *buildFile) CmdUser(args string) error {
339
+	b.config.User = args
340
+	return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
341
+}
342
+
343
+func (b *buildFile) CmdInsert(args string) error {
344
+	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
345
+}
346
+
347
+func (b *buildFile) CmdCopy(args string) error {
348
+	return b.runContextCommand(args, false, false, "COPY")
349
+}
350
+
351
+func (b *buildFile) CmdWorkdir(workdir string) error {
352
+	if workdir[0] == '/' {
353
+		b.config.WorkingDir = workdir
354
+	} else {
355
+		if b.config.WorkingDir == "" {
356
+			b.config.WorkingDir = "/"
357
+		}
358
+		b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir)
359
+	}
360
+	return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
361
+}
362
+
363
+func (b *buildFile) CmdVolume(args string) error {
364
+	if args == "" {
365
+		return fmt.Errorf("Volume cannot be empty")
366
+	}
367
+
368
+	var volume []string
369
+	if err := json.Unmarshal([]byte(args), &volume); err != nil {
370
+		volume = []string{args}
371
+	}
372
+	if b.config.Volumes == nil {
373
+		b.config.Volumes = map[string]struct{}{}
374
+	}
375
+	for _, v := range volume {
376
+		b.config.Volumes[v] = struct{}{}
377
+	}
378
+	if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
379
+		return err
380
+	}
381
+	return nil
382
+}
383
+
384
+func (b *buildFile) checkPathForAddition(orig string) error {
385
+	origPath := path.Join(b.contextPath, orig)
386
+	if p, err := filepath.EvalSymlinks(origPath); err != nil {
387
+		if os.IsNotExist(err) {
388
+			return fmt.Errorf("%s: no such file or directory", orig)
389
+		}
390
+		return err
391
+	} else {
392
+		origPath = p
393
+	}
394
+	if !strings.HasPrefix(origPath, b.contextPath) {
395
+		return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
396
+	}
397
+	_, err := os.Stat(origPath)
398
+	if err != nil {
399
+		if os.IsNotExist(err) {
400
+			return fmt.Errorf("%s: no such file or directory", orig)
401
+		}
402
+		return err
403
+	}
404
+	return nil
405
+}
406
+
407
+func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
408
+	var (
409
+		err        error
410
+		destExists = true
411
+		origPath   = path.Join(b.contextPath, orig)
412
+		destPath   = path.Join(container.RootfsPath(), dest)
413
+	)
414
+
415
+	if destPath != container.RootfsPath() {
416
+		destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
417
+		if err != nil {
418
+			return err
419
+		}
420
+	}
421
+
422
+	// Preserve the trailing '/'
423
+	if strings.HasSuffix(dest, "/") || dest == "." {
424
+		destPath = destPath + "/"
425
+	}
426
+
427
+	destStat, err := os.Stat(destPath)
428
+	if err != nil {
429
+		if !os.IsNotExist(err) {
430
+			return err
431
+		}
432
+		destExists = false
433
+	}
434
+
435
+	fi, err := os.Stat(origPath)
436
+	if err != nil {
437
+		if os.IsNotExist(err) {
438
+			return fmt.Errorf("%s: no such file or directory", orig)
439
+		}
440
+		return err
441
+	}
442
+
443
+	if fi.IsDir() {
444
+		return copyAsDirectory(origPath, destPath, destExists)
445
+	}
446
+
447
+	// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
448
+	if decompress {
449
+		// First try to unpack the source as an archive
450
+		// to support the untar feature we need to clean up the path a little bit
451
+		// because tar is very forgiving.  First we need to strip off the archive's
452
+		// filename from the path but this is only added if it does not end in / .
453
+		tarDest := destPath
454
+		if strings.HasSuffix(tarDest, "/") {
455
+			tarDest = filepath.Dir(destPath)
456
+		}
457
+
458
+		// try to successfully untar the orig
459
+		if err := archive.UntarPath(origPath, tarDest); err == nil {
460
+			return nil
461
+		} else if err != io.EOF {
462
+			utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
463
+		}
464
+	}
465
+
466
+	if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
467
+		return err
468
+	}
469
+	if err := archive.CopyWithTar(origPath, destPath); err != nil {
470
+		return err
471
+	}
472
+
473
+	resPath := destPath
474
+	if destExists && destStat.IsDir() {
475
+		resPath = path.Join(destPath, path.Base(origPath))
476
+	}
477
+
478
+	return fixPermissions(resPath, 0, 0)
479
+}
480
+
481
+func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error {
482
+	if b.context == nil {
483
+		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
484
+	}
485
+	tmp := strings.SplitN(args, " ", 2)
486
+	if len(tmp) != 2 {
487
+		return fmt.Errorf("Invalid %s format", cmdName)
488
+	}
489
+
490
+	orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t"))
491
+	if err != nil {
492
+		return err
493
+	}
494
+
495
+	dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t"))
496
+	if err != nil {
497
+		return err
498
+	}
499
+
500
+	cmd := b.config.Cmd
501
+	b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
502
+	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
503
+	b.config.Image = b.image
504
+
505
+	var (
506
+		origPath   = orig
507
+		destPath   = dest
508
+		remoteHash string
509
+		isRemote   bool
510
+		decompress = true
511
+	)
512
+
513
+	isRemote = utils.IsURL(orig)
514
+	if isRemote && !allowRemote {
515
+		return fmt.Errorf("Source can't be an URL for %s", cmdName)
516
+	} else if utils.IsURL(orig) {
517
+		// Initiate the download
518
+		resp, err := utils.Download(orig)
519
+		if err != nil {
520
+			return err
521
+		}
522
+
523
+		// Create a tmp dir
524
+		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
525
+		if err != nil {
526
+			return err
527
+		}
528
+
529
+		// Create a tmp file within our tmp dir
530
+		tmpFileName := path.Join(tmpDirName, "tmp")
531
+		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
532
+		if err != nil {
533
+			return err
534
+		}
535
+		defer os.RemoveAll(tmpDirName)
536
+
537
+		// Download and dump result to tmp file
538
+		if _, err := io.Copy(tmpFile, resp.Body); err != nil {
539
+			tmpFile.Close()
540
+			return err
541
+		}
542
+		tmpFile.Close()
543
+
544
+		// Remove the mtime of the newly created tmp file
545
+		if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
546
+			return err
547
+		}
548
+
549
+		origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
550
+
551
+		// Process the checksum
552
+		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
553
+		if err != nil {
554
+			return err
555
+		}
556
+		tarSum := &utils.TarSum{Reader: r, DisableCompression: true}
557
+		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
558
+			return err
559
+		}
560
+		remoteHash = tarSum.Sum(nil)
561
+		r.Close()
562
+
563
+		// If the destination is a directory, figure out the filename.
564
+		if strings.HasSuffix(dest, "/") {
565
+			u, err := url.Parse(orig)
566
+			if err != nil {
567
+				return err
568
+			}
569
+			path := u.Path
570
+			if strings.HasSuffix(path, "/") {
571
+				path = path[:len(path)-1]
572
+			}
573
+			parts := strings.Split(path, "/")
574
+			filename := parts[len(parts)-1]
575
+			if filename == "" {
576
+				return fmt.Errorf("cannot determine filename from url: %s", u)
577
+			}
578
+			destPath = dest + filename
579
+		}
580
+	}
581
+
582
+	if err := b.checkPathForAddition(origPath); err != nil {
583
+		return err
584
+	}
585
+
586
+	// Hash path and check the cache
587
+	if b.utilizeCache {
588
+		var (
589
+			hash string
590
+			sums = b.context.GetSums()
591
+		)
592
+
593
+		if remoteHash != "" {
594
+			hash = remoteHash
595
+		} else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
596
+			return err
597
+		} else if fi.IsDir() {
598
+			var subfiles []string
599
+			for file, sum := range sums {
600
+				absFile := path.Join(b.contextPath, file)
601
+				absOrigPath := path.Join(b.contextPath, origPath)
602
+				if strings.HasPrefix(absFile, absOrigPath) {
603
+					subfiles = append(subfiles, sum)
604
+				}
605
+			}
606
+			sort.Strings(subfiles)
607
+			hasher := sha256.New()
608
+			hasher.Write([]byte(strings.Join(subfiles, ",")))
609
+			hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
610
+		} else {
611
+			if origPath[0] == '/' && len(origPath) > 1 {
612
+				origPath = origPath[1:]
613
+			}
614
+			origPath = strings.TrimPrefix(origPath, "./")
615
+			if h, ok := sums[origPath]; ok {
616
+				hash = "file:" + h
617
+			}
618
+		}
619
+		b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)}
620
+		hit, err := b.probeCache()
621
+		if err != nil {
622
+			return err
623
+		}
624
+		// If we do not have a hash, never use the cache
625
+		if hit && hash != "" {
626
+			return nil
627
+		}
628
+	}
629
+
630
+	// Create the container
631
+	container, _, err := b.daemon.Create(b.config, "")
632
+	if err != nil {
633
+		return err
634
+	}
635
+	b.tmpContainers[container.ID] = struct{}{}
636
+
637
+	if err := container.Mount(); err != nil {
638
+		return err
639
+	}
640
+	defer container.Unmount()
641
+
642
+	if !allowDecompression || isRemote {
643
+		decompress = false
644
+	}
645
+	if err := b.addContext(container, origPath, destPath, decompress); err != nil {
646
+		return err
647
+	}
648
+
649
+	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil {
650
+		return err
651
+	}
652
+	return nil
653
+}
654
+
655
+func (b *buildFile) CmdAdd(args string) error {
656
+	return b.runContextCommand(args, true, true, "ADD")
657
+}
658
+
659
+func (b *buildFile) create() (*daemon.Container, error) {
660
+	if b.image == "" {
661
+		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
662
+	}
663
+	b.config.Image = b.image
664
+
665
+	// Create the container
666
+	c, _, err := b.daemon.Create(b.config, "")
667
+	if err != nil {
668
+		return nil, err
669
+	}
670
+	b.tmpContainers[c.ID] = struct{}{}
671
+	fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
672
+
673
+	// override the entry point that may have been picked up from the base image
674
+	c.Path = b.config.Cmd[0]
675
+	c.Args = b.config.Cmd[1:]
676
+
677
+	return c, nil
678
+}
679
+
680
+func (b *buildFile) run(c *daemon.Container) error {
681
+	var errCh chan error
682
+	if b.verbose {
683
+		errCh = utils.Go(func() error {
684
+			return <-b.daemon.Attach(c, nil, nil, b.outStream, b.errStream)
685
+		})
686
+	}
687
+
688
+	//start the container
689
+	if err := c.Start(); err != nil {
690
+		return err
691
+	}
692
+
693
+	if errCh != nil {
694
+		if err := <-errCh; err != nil {
695
+			return err
696
+		}
697
+	}
698
+
699
+	// Wait for it to finish
700
+	if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
701
+		err := &utils.JSONError{
702
+			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
703
+			Code:    ret,
704
+		}
705
+		return err
706
+	}
707
+
708
+	return nil
709
+}
710
+
711
+// Commit the container <id> with the autorun command <autoCmd>
712
+func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
713
+	if b.image == "" {
714
+		return fmt.Errorf("Please provide a source image with `from` prior to commit")
715
+	}
716
+	b.config.Image = b.image
717
+	if id == "" {
718
+		cmd := b.config.Cmd
719
+		b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
720
+		defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
721
+
722
+		hit, err := b.probeCache()
723
+		if err != nil {
724
+			return err
725
+		}
726
+		if hit {
727
+			return nil
728
+		}
729
+
730
+		container, warnings, err := b.daemon.Create(b.config, "")
731
+		if err != nil {
732
+			return err
733
+		}
734
+		for _, warning := range warnings {
735
+			fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning)
736
+		}
737
+		b.tmpContainers[container.ID] = struct{}{}
738
+		fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
739
+		id = container.ID
740
+
741
+		if err := container.Mount(); err != nil {
742
+			return err
743
+		}
744
+		defer container.Unmount()
745
+	}
746
+	container := b.daemon.Get(id)
747
+	if container == nil {
748
+		return fmt.Errorf("An error occured while creating the container")
749
+	}
750
+
751
+	// Note: Actually copy the struct
752
+	autoConfig := *b.config
753
+	autoConfig.Cmd = autoCmd
754
+	// Commit the container
755
+	image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
756
+	if err != nil {
757
+		return err
758
+	}
759
+	b.tmpImages[image.ID] = struct{}{}
760
+	b.image = image.ID
761
+	return nil
762
+}
763
+
764
+// Long lines can be split with a backslash
765
+var lineContinuation = regexp.MustCompile(`\\\s*\n`)
766
+
767
+func (b *buildFile) Build(context io.Reader) (string, error) {
768
+	tmpdirPath, err := ioutil.TempDir("", "docker-build")
769
+	if err != nil {
770
+		return "", err
771
+	}
772
+
773
+	decompressedStream, err := archive.DecompressStream(context)
774
+	if err != nil {
775
+		return "", err
776
+	}
777
+
778
+	b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true}
779
+	if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
780
+		return "", err
781
+	}
782
+	defer os.RemoveAll(tmpdirPath)
783
+
784
+	b.contextPath = tmpdirPath
785
+	filename := path.Join(tmpdirPath, "Dockerfile")
786
+	if _, err := os.Stat(filename); os.IsNotExist(err) {
787
+		return "", fmt.Errorf("Can't build a directory with no Dockerfile")
788
+	}
789
+	fileBytes, err := ioutil.ReadFile(filename)
790
+	if err != nil {
791
+		return "", err
792
+	}
793
+	if len(fileBytes) == 0 {
794
+		return "", ErrDockerfileEmpty
795
+	}
796
+	var (
797
+		dockerfile = lineContinuation.ReplaceAllString(stripComments(fileBytes), "")
798
+		stepN      = 0
799
+	)
800
+	for _, line := range strings.Split(dockerfile, "\n") {
801
+		line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n")
802
+		if len(line) == 0 {
803
+			continue
804
+		}
805
+		if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil {
806
+			if b.forceRm {
807
+				b.clearTmp(b.tmpContainers)
808
+			}
809
+			return "", err
810
+		} else if b.rm {
811
+			b.clearTmp(b.tmpContainers)
812
+		}
813
+		stepN += 1
814
+	}
815
+	if b.image != "" {
816
+		fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
817
+		return b.image, nil
818
+	}
819
+	return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
820
+}
821
+
822
+// BuildStep parses a single build step from `instruction` and executes it in the current context.
823
+func (b *buildFile) BuildStep(name, expression string) error {
824
+	fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression)
825
+	tmp := strings.SplitN(expression, " ", 2)
826
+	if len(tmp) != 2 {
827
+		return fmt.Errorf("Invalid Dockerfile format")
828
+	}
829
+	instruction := strings.ToLower(strings.Trim(tmp[0], " "))
830
+	arguments := strings.Trim(tmp[1], " ")
831
+
832
+	method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
833
+	if !exists {
834
+		fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
835
+		return nil
836
+	}
837
+
838
+	ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
839
+	if ret != nil {
840
+		return ret.(error)
841
+	}
842
+
843
+	fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
844
+	return nil
845
+}
846
+
847
+func stripComments(raw []byte) string {
848
+	var (
849
+		out   []string
850
+		lines = strings.Split(string(raw), "\n")
851
+	)
852
+	for _, l := range lines {
853
+		if len(l) == 0 || l[0] == '#' {
854
+			continue
855
+		}
856
+		out = append(out, l)
857
+	}
858
+	return strings.Join(out, "\n")
859
+}
860
+
861
+func copyAsDirectory(source, destination string, destinationExists bool) error {
862
+	if err := archive.CopyWithTar(source, destination); err != nil {
863
+		return err
864
+	}
865
+
866
+	if destinationExists {
867
+		files, err := ioutil.ReadDir(source)
868
+		if err != nil {
869
+			return err
870
+		}
871
+
872
+		for _, file := range files {
873
+			if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
874
+				return err
875
+			}
876
+		}
877
+		return nil
878
+	}
879
+
880
+	return fixPermissions(destination, 0, 0)
881
+}
882
+
883
+func fixPermissions(destination string, uid, gid int) error {
884
+	return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
885
+		if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
886
+			return err
887
+		}
888
+		return nil
889
+	})
890
+}
891
+
892
+func NewBuildFile(d *daemon.Daemon, eng *engine.Engine, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
893
+	return &buildFile{
894
+		daemon:        d,
895
+		eng:           eng,
896
+		config:        &runconfig.Config{},
897
+		outStream:     outStream,
898
+		errStream:     errStream,
899
+		tmpContainers: make(map[string]struct{}),
900
+		tmpImages:     make(map[string]struct{}),
901
+		verbose:       verbose,
902
+		utilizeCache:  utilizeCache,
903
+		rm:            rm,
904
+		forceRm:       forceRm,
905
+		sf:            sf,
906
+		authConfig:    auth,
907
+		configFile:    authConfigFile,
908
+		outOld:        outOld,
909
+	}
910
+}
... ...
@@ -513,7 +513,7 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
513 513
 	if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
514 514
 		// FIXME: here is race condition between two RUN instructions in Dockerfile
515 515
 		// because they share same runconfig and change image. Must be fixed
516
-		// in server/buildfile.go
516
+		// in builder/builder.go
517 517
 		if err := container.toDisk(); err != nil {
518 518
 			utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
519 519
 		}
... ...
@@ -1099,3 +1099,35 @@ func (daemon *Daemon) checkLocaldns() error {
1099 1099
 	}
1100 1100
 	return nil
1101 1101
 }
1102
+
1103
+func (daemon *Daemon) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
1104
+	// Retrieve all images
1105
+	images, err := daemon.Graph().Map()
1106
+	if err != nil {
1107
+		return nil, err
1108
+	}
1109
+
1110
+	// Store the tree in a map of map (map[parentId][childId])
1111
+	imageMap := make(map[string]map[string]struct{})
1112
+	for _, img := range images {
1113
+		if _, exists := imageMap[img.Parent]; !exists {
1114
+			imageMap[img.Parent] = make(map[string]struct{})
1115
+		}
1116
+		imageMap[img.Parent][img.ID] = struct{}{}
1117
+	}
1118
+
1119
+	// Loop on the children of the given image and check the config
1120
+	var match *image.Image
1121
+	for elem := range imageMap[imgID] {
1122
+		img, err := daemon.Graph().Get(elem)
1123
+		if err != nil {
1124
+			return nil, err
1125
+		}
1126
+		if runconfig.Compare(&img.ContainerConfig, config) {
1127
+			if match == nil || match.Created.Before(img.Created) {
1128
+				match = img
1129
+			}
1130
+		}
1131
+	}
1132
+	return match, nil
1133
+}
1102 1134
deleted file mode 100644
... ...
@@ -1,910 +0,0 @@
1
-package server
2
-
3
-import (
4
-	"crypto/sha256"
5
-	"encoding/hex"
6
-	"encoding/json"
7
-	"errors"
8
-	"fmt"
9
-	"io"
10
-	"io/ioutil"
11
-	"net/url"
12
-	"os"
13
-	"path"
14
-	"path/filepath"
15
-	"reflect"
16
-	"regexp"
17
-	"sort"
18
-	"strings"
19
-	"syscall"
20
-	"time"
21
-
22
-	"github.com/docker/docker/archive"
23
-	"github.com/docker/docker/daemon"
24
-	"github.com/docker/docker/nat"
25
-	"github.com/docker/docker/pkg/symlink"
26
-	"github.com/docker/docker/pkg/system"
27
-	"github.com/docker/docker/registry"
28
-	"github.com/docker/docker/runconfig"
29
-	"github.com/docker/docker/utils"
30
-)
31
-
32
-var (
33
-	ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
34
-)
35
-
36
-type BuildFile interface {
37
-	Build(io.Reader) (string, error)
38
-	CmdFrom(string) error
39
-	CmdRun(string) error
40
-}
41
-
42
-type buildFile struct {
43
-	daemon *daemon.Daemon
44
-	srv    *Server
45
-
46
-	image      string
47
-	maintainer string
48
-	config     *runconfig.Config
49
-
50
-	contextPath string
51
-	context     *utils.TarSum
52
-
53
-	verbose      bool
54
-	utilizeCache bool
55
-	rm           bool
56
-	forceRm      bool
57
-
58
-	authConfig *registry.AuthConfig
59
-	configFile *registry.ConfigFile
60
-
61
-	tmpContainers map[string]struct{}
62
-	tmpImages     map[string]struct{}
63
-
64
-	outStream io.Writer
65
-	errStream io.Writer
66
-
67
-	// Deprecated, original writer used for ImagePull. To be removed.
68
-	outOld io.Writer
69
-	sf     *utils.StreamFormatter
70
-}
71
-
72
-func (b *buildFile) clearTmp(containers map[string]struct{}) {
73
-	for c := range containers {
74
-		tmp := b.daemon.Get(c)
75
-		if err := b.daemon.Destroy(tmp); err != nil {
76
-			fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
77
-		} else {
78
-			delete(containers, c)
79
-			fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c))
80
-		}
81
-	}
82
-}
83
-
84
-func (b *buildFile) CmdFrom(name string) error {
85
-	image, err := b.daemon.Repositories().LookupImage(name)
86
-	if err != nil {
87
-		if b.daemon.Graph().IsNotExist(err) {
88
-			remote, tag := utils.ParseRepositoryTag(name)
89
-			pullRegistryAuth := b.authConfig
90
-			if len(b.configFile.Configs) > 0 {
91
-				// The request came with a full auth config file, we prefer to use that
92
-				endpoint, _, err := registry.ResolveRepositoryName(remote)
93
-				if err != nil {
94
-					return err
95
-				}
96
-				resolvedAuth := b.configFile.ResolveAuthConfig(endpoint)
97
-				pullRegistryAuth = &resolvedAuth
98
-			}
99
-			job := b.srv.Eng.Job("pull", remote, tag)
100
-			job.SetenvBool("json", b.sf.Json())
101
-			job.SetenvBool("parallel", true)
102
-			job.SetenvJson("authConfig", pullRegistryAuth)
103
-			job.Stdout.Add(b.outOld)
104
-			if err := job.Run(); err != nil {
105
-				return err
106
-			}
107
-			image, err = b.daemon.Repositories().LookupImage(name)
108
-			if err != nil {
109
-				return err
110
-			}
111
-		} else {
112
-			return err
113
-		}
114
-	}
115
-	b.image = image.ID
116
-	b.config = &runconfig.Config{}
117
-	if image.Config != nil {
118
-		b.config = image.Config
119
-	}
120
-	if b.config.Env == nil || len(b.config.Env) == 0 {
121
-		b.config.Env = append(b.config.Env, "HOME=/", "PATH="+daemon.DefaultPathEnv)
122
-	}
123
-	// Process ONBUILD triggers if they exist
124
-	if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
125
-		fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers)
126
-	}
127
-
128
-	// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
129
-	onBuildTriggers := b.config.OnBuild
130
-	b.config.OnBuild = []string{}
131
-
132
-	for n, step := range onBuildTriggers {
133
-		splitStep := strings.Split(step, " ")
134
-		stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " "))
135
-		switch stepInstruction {
136
-		case "ONBUILD":
137
-			return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
138
-		case "MAINTAINER", "FROM":
139
-			return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
140
-		}
141
-		if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil {
142
-			return err
143
-		}
144
-	}
145
-	return nil
146
-}
147
-
148
-// The ONBUILD command declares a build instruction to be executed in any future build
149
-// using the current image as a base.
150
-func (b *buildFile) CmdOnbuild(trigger string) error {
151
-	splitTrigger := strings.Split(trigger, " ")
152
-	triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " "))
153
-	switch triggerInstruction {
154
-	case "ONBUILD":
155
-		return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
156
-	case "MAINTAINER", "FROM":
157
-		return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
158
-	}
159
-	b.config.OnBuild = append(b.config.OnBuild, trigger)
160
-	return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
161
-}
162
-
163
-func (b *buildFile) CmdMaintainer(name string) error {
164
-	b.maintainer = name
165
-	return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
166
-}
167
-
168
-// probeCache checks to see if image-caching is enabled (`b.utilizeCache`)
169
-// and if so attempts to look up the current `b.image` and `b.config` pair
170
-// in the current server `b.srv`. If an image is found, probeCache returns
171
-// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
172
-// is any error, it returns `(false, err)`.
173
-func (b *buildFile) probeCache() (bool, error) {
174
-	if b.utilizeCache {
175
-		if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
176
-			return false, err
177
-		} else if cache != nil {
178
-			fmt.Fprintf(b.outStream, " ---> Using cache\n")
179
-			utils.Debugf("[BUILDER] Use cached version")
180
-			b.image = cache.ID
181
-			return true, nil
182
-		} else {
183
-			utils.Debugf("[BUILDER] Cache miss")
184
-		}
185
-	}
186
-	return false, nil
187
-}
188
-
189
-func (b *buildFile) CmdRun(args string) error {
190
-	if b.image == "" {
191
-		return fmt.Errorf("Please provide a source image with `from` prior to run")
192
-	}
193
-	config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil)
194
-	if err != nil {
195
-		return err
196
-	}
197
-
198
-	cmd := b.config.Cmd
199
-	b.config.Cmd = nil
200
-	runconfig.Merge(b.config, config)
201
-
202
-	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
203
-
204
-	utils.Debugf("Command to be executed: %v", b.config.Cmd)
205
-
206
-	hit, err := b.probeCache()
207
-	if err != nil {
208
-		return err
209
-	}
210
-	if hit {
211
-		return nil
212
-	}
213
-
214
-	c, err := b.create()
215
-	if err != nil {
216
-		return err
217
-	}
218
-	// Ensure that we keep the container mounted until the commit
219
-	// to avoid unmounting and then mounting directly again
220
-	c.Mount()
221
-	defer c.Unmount()
222
-
223
-	err = b.run(c)
224
-	if err != nil {
225
-		return err
226
-	}
227
-	if err := b.commit(c.ID, cmd, "run"); err != nil {
228
-		return err
229
-	}
230
-
231
-	return nil
232
-}
233
-
234
-func (b *buildFile) FindEnvKey(key string) int {
235
-	for k, envVar := range b.config.Env {
236
-		envParts := strings.SplitN(envVar, "=", 2)
237
-		if key == envParts[0] {
238
-			return k
239
-		}
240
-	}
241
-	return -1
242
-}
243
-
244
-func (b *buildFile) ReplaceEnvMatches(value string) (string, error) {
245
-	exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
246
-	if err != nil {
247
-		return value, err
248
-	}
249
-	matches := exp.FindAllString(value, -1)
250
-	for _, match := range matches {
251
-		match = match[strings.Index(match, "$"):]
252
-		matchKey := strings.Trim(match, "${}")
253
-
254
-		for _, envVar := range b.config.Env {
255
-			envParts := strings.SplitN(envVar, "=", 2)
256
-			envKey := envParts[0]
257
-			envValue := envParts[1]
258
-
259
-			if envKey == matchKey {
260
-				value = strings.Replace(value, match, envValue, -1)
261
-				break
262
-			}
263
-		}
264
-	}
265
-	return value, nil
266
-}
267
-
268
-func (b *buildFile) CmdEnv(args string) error {
269
-	tmp := strings.SplitN(args, " ", 2)
270
-	if len(tmp) != 2 {
271
-		return fmt.Errorf("Invalid ENV format")
272
-	}
273
-	key := strings.Trim(tmp[0], " \t")
274
-	value := strings.Trim(tmp[1], " \t")
275
-
276
-	envKey := b.FindEnvKey(key)
277
-	replacedValue, err := b.ReplaceEnvMatches(value)
278
-	if err != nil {
279
-		return err
280
-	}
281
-	replacedVar := fmt.Sprintf("%s=%s", key, replacedValue)
282
-
283
-	if envKey >= 0 {
284
-		b.config.Env[envKey] = replacedVar
285
-	} else {
286
-		b.config.Env = append(b.config.Env, replacedVar)
287
-	}
288
-	return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar))
289
-}
290
-
291
-func (b *buildFile) buildCmdFromJson(args string) []string {
292
-	var cmd []string
293
-	if err := json.Unmarshal([]byte(args), &cmd); err != nil {
294
-		utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
295
-		cmd = []string{"/bin/sh", "-c", args}
296
-	}
297
-	return cmd
298
-}
299
-
300
-func (b *buildFile) CmdCmd(args string) error {
301
-	cmd := b.buildCmdFromJson(args)
302
-	b.config.Cmd = cmd
303
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
304
-		return err
305
-	}
306
-	return nil
307
-}
308
-
309
-func (b *buildFile) CmdEntrypoint(args string) error {
310
-	entrypoint := b.buildCmdFromJson(args)
311
-	b.config.Entrypoint = entrypoint
312
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
313
-		return err
314
-	}
315
-	return nil
316
-}
317
-
318
-func (b *buildFile) CmdExpose(args string) error {
319
-	portsTab := strings.Split(args, " ")
320
-
321
-	if b.config.ExposedPorts == nil {
322
-		b.config.ExposedPorts = make(nat.PortSet)
323
-	}
324
-	ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...))
325
-	if err != nil {
326
-		return err
327
-	}
328
-	for port := range ports {
329
-		if _, exists := b.config.ExposedPorts[port]; !exists {
330
-			b.config.ExposedPorts[port] = struct{}{}
331
-		}
332
-	}
333
-	b.config.PortSpecs = nil
334
-
335
-	return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
336
-}
337
-
338
-func (b *buildFile) CmdUser(args string) error {
339
-	b.config.User = args
340
-	return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
341
-}
342
-
343
-func (b *buildFile) CmdInsert(args string) error {
344
-	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
345
-}
346
-
347
-func (b *buildFile) CmdCopy(args string) error {
348
-	return b.runContextCommand(args, false, false, "COPY")
349
-}
350
-
351
-func (b *buildFile) CmdWorkdir(workdir string) error {
352
-	if workdir[0] == '/' {
353
-		b.config.WorkingDir = workdir
354
-	} else {
355
-		if b.config.WorkingDir == "" {
356
-			b.config.WorkingDir = "/"
357
-		}
358
-		b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir)
359
-	}
360
-	return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
361
-}
362
-
363
-func (b *buildFile) CmdVolume(args string) error {
364
-	if args == "" {
365
-		return fmt.Errorf("Volume cannot be empty")
366
-	}
367
-
368
-	var volume []string
369
-	if err := json.Unmarshal([]byte(args), &volume); err != nil {
370
-		volume = []string{args}
371
-	}
372
-	if b.config.Volumes == nil {
373
-		b.config.Volumes = map[string]struct{}{}
374
-	}
375
-	for _, v := range volume {
376
-		b.config.Volumes[v] = struct{}{}
377
-	}
378
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
379
-		return err
380
-	}
381
-	return nil
382
-}
383
-
384
-func (b *buildFile) checkPathForAddition(orig string) error {
385
-	origPath := path.Join(b.contextPath, orig)
386
-	if p, err := filepath.EvalSymlinks(origPath); err != nil {
387
-		if os.IsNotExist(err) {
388
-			return fmt.Errorf("%s: no such file or directory", orig)
389
-		}
390
-		return err
391
-	} else {
392
-		origPath = p
393
-	}
394
-	if !strings.HasPrefix(origPath, b.contextPath) {
395
-		return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
396
-	}
397
-	_, err := os.Stat(origPath)
398
-	if err != nil {
399
-		if os.IsNotExist(err) {
400
-			return fmt.Errorf("%s: no such file or directory", orig)
401
-		}
402
-		return err
403
-	}
404
-	return nil
405
-}
406
-
407
-func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
408
-	var (
409
-		err        error
410
-		destExists = true
411
-		origPath   = path.Join(b.contextPath, orig)
412
-		destPath   = path.Join(container.RootfsPath(), dest)
413
-	)
414
-
415
-	if destPath != container.RootfsPath() {
416
-		destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
417
-		if err != nil {
418
-			return err
419
-		}
420
-	}
421
-
422
-	// Preserve the trailing '/'
423
-	if strings.HasSuffix(dest, "/") || dest == "." {
424
-		destPath = destPath + "/"
425
-	}
426
-
427
-	destStat, err := os.Stat(destPath)
428
-	if err != nil {
429
-		if !os.IsNotExist(err) {
430
-			return err
431
-		}
432
-		destExists = false
433
-	}
434
-
435
-	fi, err := os.Stat(origPath)
436
-	if err != nil {
437
-		if os.IsNotExist(err) {
438
-			return fmt.Errorf("%s: no such file or directory", orig)
439
-		}
440
-		return err
441
-	}
442
-
443
-	if fi.IsDir() {
444
-		return copyAsDirectory(origPath, destPath, destExists)
445
-	}
446
-
447
-	// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
448
-	if decompress {
449
-		// First try to unpack the source as an archive
450
-		// to support the untar feature we need to clean up the path a little bit
451
-		// because tar is very forgiving.  First we need to strip off the archive's
452
-		// filename from the path but this is only added if it does not end in / .
453
-		tarDest := destPath
454
-		if strings.HasSuffix(tarDest, "/") {
455
-			tarDest = filepath.Dir(destPath)
456
-		}
457
-
458
-		// try to successfully untar the orig
459
-		if err := archive.UntarPath(origPath, tarDest); err == nil {
460
-			return nil
461
-		} else if err != io.EOF {
462
-			utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
463
-		}
464
-	}
465
-
466
-	if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
467
-		return err
468
-	}
469
-	if err := archive.CopyWithTar(origPath, destPath); err != nil {
470
-		return err
471
-	}
472
-
473
-	resPath := destPath
474
-	if destExists && destStat.IsDir() {
475
-		resPath = path.Join(destPath, path.Base(origPath))
476
-	}
477
-
478
-	return fixPermissions(resPath, 0, 0)
479
-}
480
-
481
-func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error {
482
-	if b.context == nil {
483
-		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
484
-	}
485
-	tmp := strings.SplitN(args, " ", 2)
486
-	if len(tmp) != 2 {
487
-		return fmt.Errorf("Invalid %s format", cmdName)
488
-	}
489
-
490
-	orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t"))
491
-	if err != nil {
492
-		return err
493
-	}
494
-
495
-	dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t"))
496
-	if err != nil {
497
-		return err
498
-	}
499
-
500
-	cmd := b.config.Cmd
501
-	b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
502
-	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
503
-	b.config.Image = b.image
504
-
505
-	var (
506
-		origPath   = orig
507
-		destPath   = dest
508
-		remoteHash string
509
-		isRemote   bool
510
-		decompress = true
511
-	)
512
-
513
-	isRemote = utils.IsURL(orig)
514
-	if isRemote && !allowRemote {
515
-		return fmt.Errorf("Source can't be an URL for %s", cmdName)
516
-	} else if utils.IsURL(orig) {
517
-		// Initiate the download
518
-		resp, err := utils.Download(orig)
519
-		if err != nil {
520
-			return err
521
-		}
522
-
523
-		// Create a tmp dir
524
-		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
525
-		if err != nil {
526
-			return err
527
-		}
528
-
529
-		// Create a tmp file within our tmp dir
530
-		tmpFileName := path.Join(tmpDirName, "tmp")
531
-		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
532
-		if err != nil {
533
-			return err
534
-		}
535
-		defer os.RemoveAll(tmpDirName)
536
-
537
-		// Download and dump result to tmp file
538
-		if _, err := io.Copy(tmpFile, resp.Body); err != nil {
539
-			tmpFile.Close()
540
-			return err
541
-		}
542
-		tmpFile.Close()
543
-
544
-		// Remove the mtime of the newly created tmp file
545
-		if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
546
-			return err
547
-		}
548
-
549
-		origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
550
-
551
-		// Process the checksum
552
-		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
553
-		if err != nil {
554
-			return err
555
-		}
556
-		tarSum := &utils.TarSum{Reader: r, DisableCompression: true}
557
-		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
558
-			return err
559
-		}
560
-		remoteHash = tarSum.Sum(nil)
561
-		r.Close()
562
-
563
-		// If the destination is a directory, figure out the filename.
564
-		if strings.HasSuffix(dest, "/") {
565
-			u, err := url.Parse(orig)
566
-			if err != nil {
567
-				return err
568
-			}
569
-			path := u.Path
570
-			if strings.HasSuffix(path, "/") {
571
-				path = path[:len(path)-1]
572
-			}
573
-			parts := strings.Split(path, "/")
574
-			filename := parts[len(parts)-1]
575
-			if filename == "" {
576
-				return fmt.Errorf("cannot determine filename from url: %s", u)
577
-			}
578
-			destPath = dest + filename
579
-		}
580
-	}
581
-
582
-	if err := b.checkPathForAddition(origPath); err != nil {
583
-		return err
584
-	}
585
-
586
-	// Hash path and check the cache
587
-	if b.utilizeCache {
588
-		var (
589
-			hash string
590
-			sums = b.context.GetSums()
591
-		)
592
-
593
-		if remoteHash != "" {
594
-			hash = remoteHash
595
-		} else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
596
-			return err
597
-		} else if fi.IsDir() {
598
-			var subfiles []string
599
-			for file, sum := range sums {
600
-				absFile := path.Join(b.contextPath, file)
601
-				absOrigPath := path.Join(b.contextPath, origPath)
602
-				if strings.HasPrefix(absFile, absOrigPath) {
603
-					subfiles = append(subfiles, sum)
604
-				}
605
-			}
606
-			sort.Strings(subfiles)
607
-			hasher := sha256.New()
608
-			hasher.Write([]byte(strings.Join(subfiles, ",")))
609
-			hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
610
-		} else {
611
-			if origPath[0] == '/' && len(origPath) > 1 {
612
-				origPath = origPath[1:]
613
-			}
614
-			origPath = strings.TrimPrefix(origPath, "./")
615
-			if h, ok := sums[origPath]; ok {
616
-				hash = "file:" + h
617
-			}
618
-		}
619
-		b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)}
620
-		hit, err := b.probeCache()
621
-		if err != nil {
622
-			return err
623
-		}
624
-		// If we do not have a hash, never use the cache
625
-		if hit && hash != "" {
626
-			return nil
627
-		}
628
-	}
629
-
630
-	// Create the container
631
-	container, _, err := b.daemon.Create(b.config, "")
632
-	if err != nil {
633
-		return err
634
-	}
635
-	b.tmpContainers[container.ID] = struct{}{}
636
-
637
-	if err := container.Mount(); err != nil {
638
-		return err
639
-	}
640
-	defer container.Unmount()
641
-
642
-	if !allowDecompression || isRemote {
643
-		decompress = false
644
-	}
645
-	if err := b.addContext(container, origPath, destPath, decompress); err != nil {
646
-		return err
647
-	}
648
-
649
-	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil {
650
-		return err
651
-	}
652
-	return nil
653
-}
654
-
655
-func (b *buildFile) CmdAdd(args string) error {
656
-	return b.runContextCommand(args, true, true, "ADD")
657
-}
658
-
659
-func (b *buildFile) create() (*daemon.Container, error) {
660
-	if b.image == "" {
661
-		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
662
-	}
663
-	b.config.Image = b.image
664
-
665
-	// Create the container
666
-	c, _, err := b.daemon.Create(b.config, "")
667
-	if err != nil {
668
-		return nil, err
669
-	}
670
-	b.tmpContainers[c.ID] = struct{}{}
671
-	fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
672
-
673
-	// override the entry point that may have been picked up from the base image
674
-	c.Path = b.config.Cmd[0]
675
-	c.Args = b.config.Cmd[1:]
676
-
677
-	return c, nil
678
-}
679
-
680
-func (b *buildFile) run(c *daemon.Container) error {
681
-	var errCh chan error
682
-	if b.verbose {
683
-		errCh = utils.Go(func() error {
684
-			return <-b.daemon.Attach(c, nil, nil, b.outStream, b.errStream)
685
-		})
686
-	}
687
-
688
-	//start the container
689
-	if err := c.Start(); err != nil {
690
-		return err
691
-	}
692
-
693
-	if errCh != nil {
694
-		if err := <-errCh; err != nil {
695
-			return err
696
-		}
697
-	}
698
-
699
-	// Wait for it to finish
700
-	if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
701
-		err := &utils.JSONError{
702
-			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
703
-			Code:    ret,
704
-		}
705
-		return err
706
-	}
707
-
708
-	return nil
709
-}
710
-
711
-// Commit the container <id> with the autorun command <autoCmd>
712
-func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
713
-	if b.image == "" {
714
-		return fmt.Errorf("Please provide a source image with `from` prior to commit")
715
-	}
716
-	b.config.Image = b.image
717
-	if id == "" {
718
-		cmd := b.config.Cmd
719
-		b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
720
-		defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
721
-
722
-		hit, err := b.probeCache()
723
-		if err != nil {
724
-			return err
725
-		}
726
-		if hit {
727
-			return nil
728
-		}
729
-
730
-		container, warnings, err := b.daemon.Create(b.config, "")
731
-		if err != nil {
732
-			return err
733
-		}
734
-		for _, warning := range warnings {
735
-			fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning)
736
-		}
737
-		b.tmpContainers[container.ID] = struct{}{}
738
-		fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
739
-		id = container.ID
740
-
741
-		if err := container.Mount(); err != nil {
742
-			return err
743
-		}
744
-		defer container.Unmount()
745
-	}
746
-	container := b.daemon.Get(id)
747
-	if container == nil {
748
-		return fmt.Errorf("An error occured while creating the container")
749
-	}
750
-
751
-	// Note: Actually copy the struct
752
-	autoConfig := *b.config
753
-	autoConfig.Cmd = autoCmd
754
-	// Commit the container
755
-	image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
756
-	if err != nil {
757
-		return err
758
-	}
759
-	b.tmpImages[image.ID] = struct{}{}
760
-	b.image = image.ID
761
-	return nil
762
-}
763
-
764
-// Long lines can be split with a backslash
765
-var lineContinuation = regexp.MustCompile(`\\\s*\n`)
766
-
767
-func (b *buildFile) Build(context io.Reader) (string, error) {
768
-	tmpdirPath, err := ioutil.TempDir("", "docker-build")
769
-	if err != nil {
770
-		return "", err
771
-	}
772
-
773
-	decompressedStream, err := archive.DecompressStream(context)
774
-	if err != nil {
775
-		return "", err
776
-	}
777
-
778
-	b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true}
779
-	if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
780
-		return "", err
781
-	}
782
-	defer os.RemoveAll(tmpdirPath)
783
-
784
-	b.contextPath = tmpdirPath
785
-	filename := path.Join(tmpdirPath, "Dockerfile")
786
-	if _, err := os.Stat(filename); os.IsNotExist(err) {
787
-		return "", fmt.Errorf("Can't build a directory with no Dockerfile")
788
-	}
789
-	fileBytes, err := ioutil.ReadFile(filename)
790
-	if err != nil {
791
-		return "", err
792
-	}
793
-	if len(fileBytes) == 0 {
794
-		return "", ErrDockerfileEmpty
795
-	}
796
-	var (
797
-		dockerfile = lineContinuation.ReplaceAllString(stripComments(fileBytes), "")
798
-		stepN      = 0
799
-	)
800
-	for _, line := range strings.Split(dockerfile, "\n") {
801
-		line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n")
802
-		if len(line) == 0 {
803
-			continue
804
-		}
805
-		if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil {
806
-			if b.forceRm {
807
-				b.clearTmp(b.tmpContainers)
808
-			}
809
-			return "", err
810
-		} else if b.rm {
811
-			b.clearTmp(b.tmpContainers)
812
-		}
813
-		stepN += 1
814
-	}
815
-	if b.image != "" {
816
-		fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
817
-		return b.image, nil
818
-	}
819
-	return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
820
-}
821
-
822
-// BuildStep parses a single build step from `instruction` and executes it in the current context.
823
-func (b *buildFile) BuildStep(name, expression string) error {
824
-	fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression)
825
-	tmp := strings.SplitN(expression, " ", 2)
826
-	if len(tmp) != 2 {
827
-		return fmt.Errorf("Invalid Dockerfile format")
828
-	}
829
-	instruction := strings.ToLower(strings.Trim(tmp[0], " "))
830
-	arguments := strings.Trim(tmp[1], " ")
831
-
832
-	method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
833
-	if !exists {
834
-		fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
835
-		return nil
836
-	}
837
-
838
-	ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
839
-	if ret != nil {
840
-		return ret.(error)
841
-	}
842
-
843
-	fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
844
-	return nil
845
-}
846
-
847
-func stripComments(raw []byte) string {
848
-	var (
849
-		out   []string
850
-		lines = strings.Split(string(raw), "\n")
851
-	)
852
-	for _, l := range lines {
853
-		if len(l) == 0 || l[0] == '#' {
854
-			continue
855
-		}
856
-		out = append(out, l)
857
-	}
858
-	return strings.Join(out, "\n")
859
-}
860
-
861
-func copyAsDirectory(source, destination string, destinationExists bool) error {
862
-	if err := archive.CopyWithTar(source, destination); err != nil {
863
-		return err
864
-	}
865
-
866
-	if destinationExists {
867
-		files, err := ioutil.ReadDir(source)
868
-		if err != nil {
869
-			return err
870
-		}
871
-
872
-		for _, file := range files {
873
-			if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
874
-				return err
875
-			}
876
-		}
877
-		return nil
878
-	}
879
-
880
-	return fixPermissions(destination, 0, 0)
881
-}
882
-
883
-func fixPermissions(destination string, uid, gid int) error {
884
-	return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
885
-		if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
886
-			return err
887
-		}
888
-		return nil
889
-	})
890
-}
891
-
892
-func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
893
-	return &buildFile{
894
-		daemon:        srv.daemon,
895
-		srv:           srv,
896
-		config:        &runconfig.Config{},
897
-		outStream:     outStream,
898
-		errStream:     errStream,
899
-		tmpContainers: make(map[string]struct{}),
900
-		tmpImages:     make(map[string]struct{}),
901
-		verbose:       verbose,
902
-		utilizeCache:  utilizeCache,
903
-		rm:            rm,
904
-		forceRm:       forceRm,
905
-		sf:            sf,
906
-		authConfig:    auth,
907
-		configFile:    authConfigFile,
908
-		outOld:        outOld,
909
-	}
910
-}
... ...
@@ -46,6 +46,7 @@ import (
46 46
 	"time"
47 47
 
48 48
 	"github.com/docker/docker/archive"
49
+	"github.com/docker/docker/builder"
49 50
 	"github.com/docker/docker/daemon"
50 51
 	"github.com/docker/docker/daemonconfig"
51 52
 	"github.com/docker/docker/dockerversion"
... ...
@@ -534,7 +535,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status {
534 534
 	defer context.Close()
535 535
 
536 536
 	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
537
-	b := NewBuildFile(srv,
537
+	b := builder.NewBuildFile(srv.daemon, srv.Eng,
538 538
 		&utils.StdoutFormater{
539 539
 			Writer:          job.Stdout,
540 540
 			StreamFormatter: sf,
... ...
@@ -2058,38 +2059,6 @@ func (srv *Server) canDeleteImage(imgID string, force, untagged bool) error {
2058 2058
 	return nil
2059 2059
 }
2060 2060
 
2061
-func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
2062
-	// Retrieve all images
2063
-	images, err := srv.daemon.Graph().Map()
2064
-	if err != nil {
2065
-		return nil, err
2066
-	}
2067
-
2068
-	// Store the tree in a map of map (map[parentId][childId])
2069
-	imageMap := make(map[string]map[string]struct{})
2070
-	for _, img := range images {
2071
-		if _, exists := imageMap[img.Parent]; !exists {
2072
-			imageMap[img.Parent] = make(map[string]struct{})
2073
-		}
2074
-		imageMap[img.Parent][img.ID] = struct{}{}
2075
-	}
2076
-
2077
-	// Loop on the children of the given image and check the config
2078
-	var match *image.Image
2079
-	for elem := range imageMap[imgID] {
2080
-		img, err := srv.daemon.Graph().Get(elem)
2081
-		if err != nil {
2082
-			return nil, err
2083
-		}
2084
-		if runconfig.Compare(&img.ContainerConfig, config) {
2085
-			if match == nil || match.Created.Before(img.Created) {
2086
-				match = img
2087
-			}
2088
-		}
2089
-	}
2090
-	return match, nil
2091
-}
2092
-
2093 2061
 func (srv *Server) setHostConfig(container *daemon.Container, hostConfig *runconfig.HostConfig) error {
2094 2062
 	// Validate the HostConfig binds. Make sure that:
2095 2063
 	// the source exists