Browse code

Move "build" to daemon/build.go

Signed-off-by: Solomon Hykes <solomon@docker.com>

Solomon Hykes authored on 2014/08/08 17:00:16
Showing 5 changed files
1 1
deleted file mode 100644
... ...
@@ -1,927 +0,0 @@
1
-package builder
2
-
3
-import (
4
-	"crypto/sha256"
5
-	"encoding/hex"
6
-	"encoding/json"
7
-	"errors"
8
-	"fmt"
9
-	"io"
10
-	"io/ioutil"
11
-	"net/url"
12
-	"os"
13
-	"path"
14
-	"path/filepath"
15
-	"reflect"
16
-	"regexp"
17
-	"sort"
18
-	"strings"
19
-	"syscall"
20
-	"time"
21
-
22
-	"github.com/docker/docker/archive"
23
-	"github.com/docker/docker/daemon"
24
-	"github.com/docker/docker/engine"
25
-	"github.com/docker/docker/nat"
26
-	"github.com/docker/docker/pkg/parsers"
27
-	"github.com/docker/docker/pkg/symlink"
28
-	"github.com/docker/docker/pkg/system"
29
-	"github.com/docker/docker/pkg/tarsum"
30
-	"github.com/docker/docker/registry"
31
-	"github.com/docker/docker/runconfig"
32
-	"github.com/docker/docker/utils"
33
-)
34
-
35
-var (
36
-	ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
37
-)
38
-
39
-type BuildFile interface {
40
-	Build(io.Reader) (string, error)
41
-	CmdFrom(string) error
42
-	CmdRun(string) error
43
-}
44
-
45
-type buildFile struct {
46
-	daemon *daemon.Daemon
47
-	eng    *engine.Engine
48
-
49
-	image      string
50
-	maintainer string
51
-	config     *runconfig.Config
52
-
53
-	contextPath string
54
-	context     *tarsum.TarSum
55
-
56
-	verbose      bool
57
-	utilizeCache bool
58
-	rm           bool
59
-	forceRm      bool
60
-
61
-	authConfig *registry.AuthConfig
62
-	configFile *registry.ConfigFile
63
-
64
-	tmpContainers map[string]struct{}
65
-	tmpImages     map[string]struct{}
66
-
67
-	outStream io.Writer
68
-	errStream io.Writer
69
-
70
-	// Deprecated, original writer used for ImagePull. To be removed.
71
-	outOld io.Writer
72
-	sf     *utils.StreamFormatter
73
-
74
-	// cmdSet indicates is CMD was set in current Dockerfile
75
-	cmdSet bool
76
-}
77
-
78
-func (b *buildFile) clearTmp(containers map[string]struct{}) {
79
-	for c := range containers {
80
-		tmp := b.daemon.Get(c)
81
-		if err := b.daemon.Destroy(tmp); err != nil {
82
-			fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
83
-		} else {
84
-			delete(containers, c)
85
-			fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c))
86
-		}
87
-	}
88
-}
89
-
90
-func (b *buildFile) CmdFrom(name string) error {
91
-	image, err := b.daemon.Repositories().LookupImage(name)
92
-	if err != nil {
93
-		if b.daemon.Graph().IsNotExist(err) {
94
-			remote, tag := parsers.ParseRepositoryTag(name)
95
-			pullRegistryAuth := b.authConfig
96
-			if len(b.configFile.Configs) > 0 {
97
-				// The request came with a full auth config file, we prefer to use that
98
-				endpoint, _, err := registry.ResolveRepositoryName(remote)
99
-				if err != nil {
100
-					return err
101
-				}
102
-				resolvedAuth := b.configFile.ResolveAuthConfig(endpoint)
103
-				pullRegistryAuth = &resolvedAuth
104
-			}
105
-			job := b.eng.Job("pull", remote, tag)
106
-			job.SetenvBool("json", b.sf.Json())
107
-			job.SetenvBool("parallel", true)
108
-			job.SetenvJson("authConfig", pullRegistryAuth)
109
-			job.Stdout.Add(b.outOld)
110
-			if err := job.Run(); err != nil {
111
-				return err
112
-			}
113
-			image, err = b.daemon.Repositories().LookupImage(name)
114
-			if err != nil {
115
-				return err
116
-			}
117
-		} else {
118
-			return err
119
-		}
120
-	}
121
-	b.image = image.ID
122
-	b.config = &runconfig.Config{}
123
-	if image.Config != nil {
124
-		b.config = image.Config
125
-	}
126
-	if b.config.Env == nil || len(b.config.Env) == 0 {
127
-		b.config.Env = append(b.config.Env, "PATH="+daemon.DefaultPathEnv)
128
-	}
129
-	// Process ONBUILD triggers if they exist
130
-	if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
131
-		fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers)
132
-	}
133
-
134
-	// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
135
-	onBuildTriggers := b.config.OnBuild
136
-	b.config.OnBuild = []string{}
137
-
138
-	for n, step := range onBuildTriggers {
139
-		splitStep := strings.Split(step, " ")
140
-		stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " "))
141
-		switch stepInstruction {
142
-		case "ONBUILD":
143
-			return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
144
-		case "MAINTAINER", "FROM":
145
-			return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
146
-		}
147
-		if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil {
148
-			return err
149
-		}
150
-	}
151
-	return nil
152
-}
153
-
154
-// The ONBUILD command declares a build instruction to be executed in any future build
155
-// using the current image as a base.
156
-func (b *buildFile) CmdOnbuild(trigger string) error {
157
-	splitTrigger := strings.Split(trigger, " ")
158
-	triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " "))
159
-	switch triggerInstruction {
160
-	case "ONBUILD":
161
-		return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
162
-	case "MAINTAINER", "FROM":
163
-		return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
164
-	}
165
-	b.config.OnBuild = append(b.config.OnBuild, trigger)
166
-	return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
167
-}
168
-
169
-func (b *buildFile) CmdMaintainer(name string) error {
170
-	b.maintainer = name
171
-	return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
172
-}
173
-
174
-// probeCache checks to see if image-caching is enabled (`b.utilizeCache`)
175
-// and if so attempts to look up the current `b.image` and `b.config` pair
176
-// in the current server `b.daemon`. If an image is found, probeCache returns
177
-// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
178
-// is any error, it returns `(false, err)`.
179
-func (b *buildFile) probeCache() (bool, error) {
180
-	if b.utilizeCache {
181
-		if cache, err := b.daemon.ImageGetCached(b.image, b.config); err != nil {
182
-			return false, err
183
-		} else if cache != nil {
184
-			fmt.Fprintf(b.outStream, " ---> Using cache\n")
185
-			utils.Debugf("[BUILDER] Use cached version")
186
-			b.image = cache.ID
187
-			return true, nil
188
-		} else {
189
-			utils.Debugf("[BUILDER] Cache miss")
190
-		}
191
-	}
192
-	return false, nil
193
-}
194
-
195
-func (b *buildFile) CmdRun(args string) error {
196
-	if b.image == "" {
197
-		return fmt.Errorf("Please provide a source image with `from` prior to run")
198
-	}
199
-	config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil)
200
-	if err != nil {
201
-		return err
202
-	}
203
-
204
-	cmd := b.config.Cmd
205
-	// set Cmd manually, this is special case only for Dockerfiles
206
-	b.config.Cmd = config.Cmd
207
-	runconfig.Merge(b.config, config)
208
-
209
-	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
210
-
211
-	utils.Debugf("Command to be executed: %v", b.config.Cmd)
212
-
213
-	hit, err := b.probeCache()
214
-	if err != nil {
215
-		return err
216
-	}
217
-	if hit {
218
-		return nil
219
-	}
220
-
221
-	c, err := b.create()
222
-	if err != nil {
223
-		return err
224
-	}
225
-	// Ensure that we keep the container mounted until the commit
226
-	// to avoid unmounting and then mounting directly again
227
-	c.Mount()
228
-	defer c.Unmount()
229
-
230
-	err = b.run(c)
231
-	if err != nil {
232
-		return err
233
-	}
234
-	if err := b.commit(c.ID, cmd, "run"); err != nil {
235
-		return err
236
-	}
237
-
238
-	return nil
239
-}
240
-
241
-func (b *buildFile) FindEnvKey(key string) int {
242
-	for k, envVar := range b.config.Env {
243
-		envParts := strings.SplitN(envVar, "=", 2)
244
-		if key == envParts[0] {
245
-			return k
246
-		}
247
-	}
248
-	return -1
249
-}
250
-
251
-func (b *buildFile) ReplaceEnvMatches(value string) (string, error) {
252
-	exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
253
-	if err != nil {
254
-		return value, err
255
-	}
256
-	matches := exp.FindAllString(value, -1)
257
-	for _, match := range matches {
258
-		match = match[strings.Index(match, "$"):]
259
-		matchKey := strings.Trim(match, "${}")
260
-
261
-		for _, envVar := range b.config.Env {
262
-			envParts := strings.SplitN(envVar, "=", 2)
263
-			envKey := envParts[0]
264
-			envValue := envParts[1]
265
-
266
-			if envKey == matchKey {
267
-				value = strings.Replace(value, match, envValue, -1)
268
-				break
269
-			}
270
-		}
271
-	}
272
-	return value, nil
273
-}
274
-
275
-func (b *buildFile) CmdEnv(args string) error {
276
-	tmp := strings.SplitN(args, " ", 2)
277
-	if len(tmp) != 2 {
278
-		return fmt.Errorf("Invalid ENV format")
279
-	}
280
-	key := strings.Trim(tmp[0], " \t")
281
-	value := strings.Trim(tmp[1], " \t")
282
-
283
-	envKey := b.FindEnvKey(key)
284
-	replacedValue, err := b.ReplaceEnvMatches(value)
285
-	if err != nil {
286
-		return err
287
-	}
288
-	replacedVar := fmt.Sprintf("%s=%s", key, replacedValue)
289
-
290
-	if envKey >= 0 {
291
-		b.config.Env[envKey] = replacedVar
292
-	} else {
293
-		b.config.Env = append(b.config.Env, replacedVar)
294
-	}
295
-	return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar))
296
-}
297
-
298
-func (b *buildFile) buildCmdFromJson(args string) []string {
299
-	var cmd []string
300
-	if err := json.Unmarshal([]byte(args), &cmd); err != nil {
301
-		utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
302
-		cmd = []string{"/bin/sh", "-c", args}
303
-	}
304
-	return cmd
305
-}
306
-
307
-func (b *buildFile) CmdCmd(args string) error {
308
-	cmd := b.buildCmdFromJson(args)
309
-	b.config.Cmd = cmd
310
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
311
-		return err
312
-	}
313
-	b.cmdSet = true
314
-	return nil
315
-}
316
-
317
-func (b *buildFile) CmdEntrypoint(args string) error {
318
-	entrypoint := b.buildCmdFromJson(args)
319
-	b.config.Entrypoint = entrypoint
320
-	// if there is no cmd in current Dockerfile - cleanup cmd
321
-	if !b.cmdSet {
322
-		b.config.Cmd = nil
323
-	}
324
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
325
-		return err
326
-	}
327
-	return nil
328
-}
329
-
330
-func (b *buildFile) CmdExpose(args string) error {
331
-	portsTab := strings.Split(args, " ")
332
-
333
-	if b.config.ExposedPorts == nil {
334
-		b.config.ExposedPorts = make(nat.PortSet)
335
-	}
336
-	ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...))
337
-	if err != nil {
338
-		return err
339
-	}
340
-	for port := range ports {
341
-		if _, exists := b.config.ExposedPorts[port]; !exists {
342
-			b.config.ExposedPorts[port] = struct{}{}
343
-		}
344
-	}
345
-	b.config.PortSpecs = nil
346
-
347
-	return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
348
-}
349
-
350
-func (b *buildFile) CmdUser(args string) error {
351
-	b.config.User = args
352
-	return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
353
-}
354
-
355
-func (b *buildFile) CmdInsert(args string) error {
356
-	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
357
-}
358
-
359
-func (b *buildFile) CmdCopy(args string) error {
360
-	return b.runContextCommand(args, false, false, "COPY")
361
-}
362
-
363
-func (b *buildFile) CmdWorkdir(workdir string) error {
364
-	if workdir[0] == '/' {
365
-		b.config.WorkingDir = workdir
366
-	} else {
367
-		if b.config.WorkingDir == "" {
368
-			b.config.WorkingDir = "/"
369
-		}
370
-		b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir)
371
-	}
372
-	return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
373
-}
374
-
375
-func (b *buildFile) CmdVolume(args string) error {
376
-	if args == "" {
377
-		return fmt.Errorf("Volume cannot be empty")
378
-	}
379
-
380
-	var volume []string
381
-	if err := json.Unmarshal([]byte(args), &volume); err != nil {
382
-		volume = []string{args}
383
-	}
384
-	if b.config.Volumes == nil {
385
-		b.config.Volumes = map[string]struct{}{}
386
-	}
387
-	for _, v := range volume {
388
-		b.config.Volumes[v] = struct{}{}
389
-	}
390
-	if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
391
-		return err
392
-	}
393
-	return nil
394
-}
395
-
396
-func (b *buildFile) checkPathForAddition(orig string) error {
397
-	origPath := path.Join(b.contextPath, orig)
398
-	if p, err := filepath.EvalSymlinks(origPath); err != nil {
399
-		if os.IsNotExist(err) {
400
-			return fmt.Errorf("%s: no such file or directory", orig)
401
-		}
402
-		return err
403
-	} else {
404
-		origPath = p
405
-	}
406
-	if !strings.HasPrefix(origPath, b.contextPath) {
407
-		return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
408
-	}
409
-	_, err := os.Stat(origPath)
410
-	if err != nil {
411
-		if os.IsNotExist(err) {
412
-			return fmt.Errorf("%s: no such file or directory", orig)
413
-		}
414
-		return err
415
-	}
416
-	return nil
417
-}
418
-
419
-func (b *buildFile) addContext(container *daemon.Container, orig, dest string, decompress bool) error {
420
-	var (
421
-		err        error
422
-		destExists = true
423
-		origPath   = path.Join(b.contextPath, orig)
424
-		destPath   = path.Join(container.RootfsPath(), dest)
425
-	)
426
-
427
-	if destPath != container.RootfsPath() {
428
-		destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
429
-		if err != nil {
430
-			return err
431
-		}
432
-	}
433
-
434
-	// Preserve the trailing '/'
435
-	if strings.HasSuffix(dest, "/") || dest == "." {
436
-		destPath = destPath + "/"
437
-	}
438
-
439
-	destStat, err := os.Stat(destPath)
440
-	if err != nil {
441
-		if !os.IsNotExist(err) {
442
-			return err
443
-		}
444
-		destExists = false
445
-	}
446
-
447
-	fi, err := os.Stat(origPath)
448
-	if err != nil {
449
-		if os.IsNotExist(err) {
450
-			return fmt.Errorf("%s: no such file or directory", orig)
451
-		}
452
-		return err
453
-	}
454
-
455
-	if fi.IsDir() {
456
-		return copyAsDirectory(origPath, destPath, destExists)
457
-	}
458
-
459
-	// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
460
-	if decompress {
461
-		// First try to unpack the source as an archive
462
-		// to support the untar feature we need to clean up the path a little bit
463
-		// because tar is very forgiving.  First we need to strip off the archive's
464
-		// filename from the path but this is only added if it does not end in / .
465
-		tarDest := destPath
466
-		if strings.HasSuffix(tarDest, "/") {
467
-			tarDest = filepath.Dir(destPath)
468
-		}
469
-
470
-		// try to successfully untar the orig
471
-		if err := archive.UntarPath(origPath, tarDest); err == nil {
472
-			return nil
473
-		} else if err != io.EOF {
474
-			utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
475
-		}
476
-	}
477
-
478
-	if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
479
-		return err
480
-	}
481
-	if err := archive.CopyWithTar(origPath, destPath); err != nil {
482
-		return err
483
-	}
484
-
485
-	resPath := destPath
486
-	if destExists && destStat.IsDir() {
487
-		resPath = path.Join(destPath, path.Base(origPath))
488
-	}
489
-
490
-	return fixPermissions(resPath, 0, 0)
491
-}
492
-
493
-func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error {
494
-	if b.context == nil {
495
-		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
496
-	}
497
-	tmp := strings.SplitN(args, " ", 2)
498
-	if len(tmp) != 2 {
499
-		return fmt.Errorf("Invalid %s format", cmdName)
500
-	}
501
-
502
-	orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t"))
503
-	if err != nil {
504
-		return err
505
-	}
506
-
507
-	dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t"))
508
-	if err != nil {
509
-		return err
510
-	}
511
-
512
-	cmd := b.config.Cmd
513
-	b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
514
-	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
515
-	b.config.Image = b.image
516
-
517
-	var (
518
-		origPath   = orig
519
-		destPath   = dest
520
-		remoteHash string
521
-		isRemote   bool
522
-		decompress = true
523
-	)
524
-
525
-	isRemote = utils.IsURL(orig)
526
-	if isRemote && !allowRemote {
527
-		return fmt.Errorf("Source can't be an URL for %s", cmdName)
528
-	} else if utils.IsURL(orig) {
529
-		// Initiate the download
530
-		resp, err := utils.Download(orig)
531
-		if err != nil {
532
-			return err
533
-		}
534
-
535
-		// Create a tmp dir
536
-		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
537
-		if err != nil {
538
-			return err
539
-		}
540
-
541
-		// Create a tmp file within our tmp dir
542
-		tmpFileName := path.Join(tmpDirName, "tmp")
543
-		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
544
-		if err != nil {
545
-			return err
546
-		}
547
-		defer os.RemoveAll(tmpDirName)
548
-
549
-		// Download and dump result to tmp file
550
-		if _, err := io.Copy(tmpFile, resp.Body); err != nil {
551
-			tmpFile.Close()
552
-			return err
553
-		}
554
-		tmpFile.Close()
555
-
556
-		// Remove the mtime of the newly created tmp file
557
-		if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
558
-			return err
559
-		}
560
-
561
-		origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
562
-
563
-		// Process the checksum
564
-		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
565
-		if err != nil {
566
-			return err
567
-		}
568
-		tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true}
569
-		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
570
-			return err
571
-		}
572
-		remoteHash = tarSum.Sum(nil)
573
-		r.Close()
574
-
575
-		// If the destination is a directory, figure out the filename.
576
-		if strings.HasSuffix(dest, "/") {
577
-			u, err := url.Parse(orig)
578
-			if err != nil {
579
-				return err
580
-			}
581
-			path := u.Path
582
-			if strings.HasSuffix(path, "/") {
583
-				path = path[:len(path)-1]
584
-			}
585
-			parts := strings.Split(path, "/")
586
-			filename := parts[len(parts)-1]
587
-			if filename == "" {
588
-				return fmt.Errorf("cannot determine filename from url: %s", u)
589
-			}
590
-			destPath = dest + filename
591
-		}
592
-	}
593
-
594
-	if err := b.checkPathForAddition(origPath); err != nil {
595
-		return err
596
-	}
597
-
598
-	// Hash path and check the cache
599
-	if b.utilizeCache {
600
-		var (
601
-			hash string
602
-			sums = b.context.GetSums()
603
-		)
604
-
605
-		if remoteHash != "" {
606
-			hash = remoteHash
607
-		} else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
608
-			return err
609
-		} else if fi.IsDir() {
610
-			var subfiles []string
611
-			for file, sum := range sums {
612
-				absFile := path.Join(b.contextPath, file)
613
-				absOrigPath := path.Join(b.contextPath, origPath)
614
-				if strings.HasPrefix(absFile, absOrigPath) {
615
-					subfiles = append(subfiles, sum)
616
-				}
617
-			}
618
-			sort.Strings(subfiles)
619
-			hasher := sha256.New()
620
-			hasher.Write([]byte(strings.Join(subfiles, ",")))
621
-			hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
622
-		} else {
623
-			if origPath[0] == '/' && len(origPath) > 1 {
624
-				origPath = origPath[1:]
625
-			}
626
-			origPath = strings.TrimPrefix(origPath, "./")
627
-			if h, ok := sums[origPath]; ok {
628
-				hash = "file:" + h
629
-			}
630
-		}
631
-		b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)}
632
-		hit, err := b.probeCache()
633
-		if err != nil {
634
-			return err
635
-		}
636
-		// If we do not have a hash, never use the cache
637
-		if hit && hash != "" {
638
-			return nil
639
-		}
640
-	}
641
-
642
-	// Create the container
643
-	container, _, err := b.daemon.Create(b.config, "")
644
-	if err != nil {
645
-		return err
646
-	}
647
-	b.tmpContainers[container.ID] = struct{}{}
648
-
649
-	if err := container.Mount(); err != nil {
650
-		return err
651
-	}
652
-	defer container.Unmount()
653
-
654
-	if !allowDecompression || isRemote {
655
-		decompress = false
656
-	}
657
-	if err := b.addContext(container, origPath, destPath, decompress); err != nil {
658
-		return err
659
-	}
660
-
661
-	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil {
662
-		return err
663
-	}
664
-	return nil
665
-}
666
-
667
-func (b *buildFile) CmdAdd(args string) error {
668
-	return b.runContextCommand(args, true, true, "ADD")
669
-}
670
-
671
-func (b *buildFile) create() (*daemon.Container, error) {
672
-	if b.image == "" {
673
-		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
674
-	}
675
-	b.config.Image = b.image
676
-
677
-	// Create the container
678
-	c, _, err := b.daemon.Create(b.config, "")
679
-	if err != nil {
680
-		return nil, err
681
-	}
682
-	b.tmpContainers[c.ID] = struct{}{}
683
-	fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
684
-
685
-	// override the entry point that may have been picked up from the base image
686
-	c.Path = b.config.Cmd[0]
687
-	c.Args = b.config.Cmd[1:]
688
-
689
-	return c, nil
690
-}
691
-
692
-func (b *buildFile) run(c *daemon.Container) error {
693
-	var errCh chan error
694
-	if b.verbose {
695
-		errCh = utils.Go(func() error {
696
-			// FIXME: call the 'attach' job so that daemon.Attach can be made private
697
-			//
698
-			// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
699
-			// but without hijacking for stdin. Also, with attach there can be race
700
-			// condition because of some output already was printed before it.
701
-			return <-b.daemon.Attach(c, nil, nil, b.outStream, b.errStream)
702
-		})
703
-	}
704
-
705
-	//start the container
706
-	if err := c.Start(); err != nil {
707
-		return err
708
-	}
709
-
710
-	if errCh != nil {
711
-		if err := <-errCh; err != nil {
712
-			return err
713
-		}
714
-	}
715
-
716
-	// Wait for it to finish
717
-	if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
718
-		err := &utils.JSONError{
719
-			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
720
-			Code:    ret,
721
-		}
722
-		return err
723
-	}
724
-
725
-	return nil
726
-}
727
-
728
-// Commit the container <id> with the autorun command <autoCmd>
729
-func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
730
-	if b.image == "" {
731
-		return fmt.Errorf("Please provide a source image with `from` prior to commit")
732
-	}
733
-	b.config.Image = b.image
734
-	if id == "" {
735
-		cmd := b.config.Cmd
736
-		b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
737
-		defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
738
-
739
-		hit, err := b.probeCache()
740
-		if err != nil {
741
-			return err
742
-		}
743
-		if hit {
744
-			return nil
745
-		}
746
-
747
-		container, warnings, err := b.daemon.Create(b.config, "")
748
-		if err != nil {
749
-			return err
750
-		}
751
-		for _, warning := range warnings {
752
-			fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning)
753
-		}
754
-		b.tmpContainers[container.ID] = struct{}{}
755
-		fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
756
-		id = container.ID
757
-
758
-		if err := container.Mount(); err != nil {
759
-			return err
760
-		}
761
-		defer container.Unmount()
762
-	}
763
-	container := b.daemon.Get(id)
764
-	if container == nil {
765
-		return fmt.Errorf("An error occured while creating the container")
766
-	}
767
-
768
-	// Note: Actually copy the struct
769
-	autoConfig := *b.config
770
-	autoConfig.Cmd = autoCmd
771
-	// Commit the container
772
-	image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
773
-	if err != nil {
774
-		return err
775
-	}
776
-	b.tmpImages[image.ID] = struct{}{}
777
-	b.image = image.ID
778
-	return nil
779
-}
780
-
781
-// Long lines can be split with a backslash
782
-var lineContinuation = regexp.MustCompile(`\\\s*\n`)
783
-
784
-func (b *buildFile) Build(context io.Reader) (string, error) {
785
-	tmpdirPath, err := ioutil.TempDir("", "docker-build")
786
-	if err != nil {
787
-		return "", err
788
-	}
789
-
790
-	decompressedStream, err := archive.DecompressStream(context)
791
-	if err != nil {
792
-		return "", err
793
-	}
794
-
795
-	b.context = &tarsum.TarSum{Reader: decompressedStream, DisableCompression: true}
796
-	if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
797
-		return "", err
798
-	}
799
-	defer os.RemoveAll(tmpdirPath)
800
-
801
-	b.contextPath = tmpdirPath
802
-	filename := path.Join(tmpdirPath, "Dockerfile")
803
-	if _, err := os.Stat(filename); os.IsNotExist(err) {
804
-		return "", fmt.Errorf("Can't build a directory with no Dockerfile")
805
-	}
806
-	fileBytes, err := ioutil.ReadFile(filename)
807
-	if err != nil {
808
-		return "", err
809
-	}
810
-	if len(fileBytes) == 0 {
811
-		return "", ErrDockerfileEmpty
812
-	}
813
-	var (
814
-		dockerfile = lineContinuation.ReplaceAllString(stripComments(fileBytes), "")
815
-		stepN      = 0
816
-	)
817
-	for _, line := range strings.Split(dockerfile, "\n") {
818
-		line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n")
819
-		if len(line) == 0 {
820
-			continue
821
-		}
822
-		if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil {
823
-			if b.forceRm {
824
-				b.clearTmp(b.tmpContainers)
825
-			}
826
-			return "", err
827
-		} else if b.rm {
828
-			b.clearTmp(b.tmpContainers)
829
-		}
830
-		stepN += 1
831
-	}
832
-	if b.image != "" {
833
-		fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
834
-		return b.image, nil
835
-	}
836
-	return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
837
-}
838
-
839
-// BuildStep parses a single build step from `instruction` and executes it in the current context.
840
-func (b *buildFile) BuildStep(name, expression string) error {
841
-	fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression)
842
-	tmp := strings.SplitN(expression, " ", 2)
843
-	if len(tmp) != 2 {
844
-		return fmt.Errorf("Invalid Dockerfile format")
845
-	}
846
-	instruction := strings.ToLower(strings.Trim(tmp[0], " "))
847
-	arguments := strings.Trim(tmp[1], " ")
848
-
849
-	method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
850
-	if !exists {
851
-		fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
852
-		return nil
853
-	}
854
-
855
-	ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
856
-	if ret != nil {
857
-		return ret.(error)
858
-	}
859
-
860
-	fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
861
-	return nil
862
-}
863
-
864
-func stripComments(raw []byte) string {
865
-	var (
866
-		out   []string
867
-		lines = strings.Split(string(raw), "\n")
868
-	)
869
-	for _, l := range lines {
870
-		if len(l) == 0 || l[0] == '#' {
871
-			continue
872
-		}
873
-		out = append(out, l)
874
-	}
875
-	return strings.Join(out, "\n")
876
-}
877
-
878
-func copyAsDirectory(source, destination string, destinationExists bool) error {
879
-	if err := archive.CopyWithTar(source, destination); err != nil {
880
-		return err
881
-	}
882
-
883
-	if destinationExists {
884
-		files, err := ioutil.ReadDir(source)
885
-		if err != nil {
886
-			return err
887
-		}
888
-
889
-		for _, file := range files {
890
-			if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
891
-				return err
892
-			}
893
-		}
894
-		return nil
895
-	}
896
-
897
-	return fixPermissions(destination, 0, 0)
898
-}
899
-
900
-func fixPermissions(destination string, uid, gid int) error {
901
-	return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
902
-		if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
903
-			return err
904
-		}
905
-		return nil
906
-	})
907
-}
908
-
909
-func NewBuildFile(d *daemon.Daemon, eng *engine.Engine, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
910
-	return &buildFile{
911
-		daemon:        d,
912
-		eng:           eng,
913
-		config:        &runconfig.Config{},
914
-		outStream:     outStream,
915
-		errStream:     errStream,
916
-		tmpContainers: make(map[string]struct{}),
917
-		tmpImages:     make(map[string]struct{}),
918
-		verbose:       verbose,
919
-		utilizeCache:  utilizeCache,
920
-		rm:            rm,
921
-		forceRm:       forceRm,
922
-		sf:            sf,
923
-		authConfig:    auth,
924
-		configFile:    authConfigFile,
925
-		outOld:        outOld,
926
-	}
927
-}
928 1
new file mode 100644
... ...
@@ -0,0 +1,1007 @@
0
+package daemon
1
+
2
+import (
3
+	"crypto/sha256"
4
+	"encoding/hex"
5
+	"encoding/json"
6
+	"errors"
7
+	"fmt"
8
+	"io"
9
+	"io/ioutil"
10
+	"net/url"
11
+	"os"
12
+	"os/exec"
13
+	"path"
14
+	"path/filepath"
15
+	"reflect"
16
+	"regexp"
17
+	"sort"
18
+	"strings"
19
+	"syscall"
20
+	"time"
21
+
22
+	"github.com/docker/docker/archive"
23
+	"github.com/docker/docker/engine"
24
+	"github.com/docker/docker/nat"
25
+	"github.com/docker/docker/pkg/parsers"
26
+	"github.com/docker/docker/pkg/symlink"
27
+	"github.com/docker/docker/pkg/system"
28
+	"github.com/docker/docker/pkg/tarsum"
29
+	"github.com/docker/docker/registry"
30
+	"github.com/docker/docker/runconfig"
31
+	"github.com/docker/docker/utils"
32
+)
33
+
34
+func (daemon *Daemon) CmdBuild(job *engine.Job) engine.Status {
35
+	if len(job.Args) != 0 {
36
+		return job.Errorf("Usage: %s\n", job.Name)
37
+	}
38
+	var (
39
+		remoteURL      = job.Getenv("remote")
40
+		repoName       = job.Getenv("t")
41
+		suppressOutput = job.GetenvBool("q")
42
+		noCache        = job.GetenvBool("nocache")
43
+		rm             = job.GetenvBool("rm")
44
+		forceRm        = job.GetenvBool("forcerm")
45
+		authConfig     = &registry.AuthConfig{}
46
+		configFile     = &registry.ConfigFile{}
47
+		tag            string
48
+		context        io.ReadCloser
49
+	)
50
+	job.GetenvJson("authConfig", authConfig)
51
+	job.GetenvJson("configFile", configFile)
52
+	repoName, tag = parsers.ParseRepositoryTag(repoName)
53
+
54
+	if remoteURL == "" {
55
+		context = ioutil.NopCloser(job.Stdin)
56
+	} else if utils.IsGIT(remoteURL) {
57
+		if !strings.HasPrefix(remoteURL, "git://") {
58
+			remoteURL = "https://" + remoteURL
59
+		}
60
+		root, err := ioutil.TempDir("", "docker-build-git")
61
+		if err != nil {
62
+			return job.Error(err)
63
+		}
64
+		defer os.RemoveAll(root)
65
+
66
+		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
67
+			return job.Errorf("Error trying to use git: %s (%s)", err, output)
68
+		}
69
+
70
+		c, err := archive.Tar(root, archive.Uncompressed)
71
+		if err != nil {
72
+			return job.Error(err)
73
+		}
74
+		context = c
75
+	} else if utils.IsURL(remoteURL) {
76
+		f, err := utils.Download(remoteURL)
77
+		if err != nil {
78
+			return job.Error(err)
79
+		}
80
+		defer f.Body.Close()
81
+		dockerFile, err := ioutil.ReadAll(f.Body)
82
+		if err != nil {
83
+			return job.Error(err)
84
+		}
85
+		c, err := archive.Generate("Dockerfile", string(dockerFile))
86
+		if err != nil {
87
+			return job.Error(err)
88
+		}
89
+		context = c
90
+	}
91
+	defer context.Close()
92
+
93
+	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
94
+	b := NewBuildFile(daemon, daemon.eng,
95
+		&utils.StdoutFormater{
96
+			Writer:          job.Stdout,
97
+			StreamFormatter: sf,
98
+		},
99
+		&utils.StderrFormater{
100
+			Writer:          job.Stdout,
101
+			StreamFormatter: sf,
102
+		},
103
+		!suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile)
104
+	id, err := b.Build(context)
105
+	if err != nil {
106
+		return job.Error(err)
107
+	}
108
+	if repoName != "" {
109
+		daemon.Repositories().Set(repoName, tag, id, false)
110
+	}
111
+	return engine.StatusOK
112
+}
113
+
114
+var (
115
+	ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
116
+)
117
+
118
+type BuildFile interface {
119
+	Build(io.Reader) (string, error)
120
+	CmdFrom(string) error
121
+	CmdRun(string) error
122
+}
123
+
124
+type buildFile struct {
125
+	daemon *Daemon
126
+	eng    *engine.Engine
127
+
128
+	image      string
129
+	maintainer string
130
+	config     *runconfig.Config
131
+
132
+	contextPath string
133
+	context     *tarsum.TarSum
134
+
135
+	verbose      bool
136
+	utilizeCache bool
137
+	rm           bool
138
+	forceRm      bool
139
+
140
+	authConfig *registry.AuthConfig
141
+	configFile *registry.ConfigFile
142
+
143
+	tmpContainers map[string]struct{}
144
+	tmpImages     map[string]struct{}
145
+
146
+	outStream io.Writer
147
+	errStream io.Writer
148
+
149
+	// Deprecated, original writer used for ImagePull. To be removed.
150
+	outOld io.Writer
151
+	sf     *utils.StreamFormatter
152
+
153
+	// cmdSet indicates is CMD was set in current Dockerfile
154
+	cmdSet bool
155
+}
156
+
157
+func (b *buildFile) clearTmp(containers map[string]struct{}) {
158
+	for c := range containers {
159
+		tmp := b.daemon.Get(c)
160
+		if err := b.daemon.Destroy(tmp); err != nil {
161
+			fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error())
162
+		} else {
163
+			delete(containers, c)
164
+			fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c))
165
+		}
166
+	}
167
+}
168
+
169
+func (b *buildFile) CmdFrom(name string) error {
170
+	image, err := b.daemon.Repositories().LookupImage(name)
171
+	if err != nil {
172
+		if b.daemon.Graph().IsNotExist(err) {
173
+			remote, tag := parsers.ParseRepositoryTag(name)
174
+			pullRegistryAuth := b.authConfig
175
+			if len(b.configFile.Configs) > 0 {
176
+				// The request came with a full auth config file, we prefer to use that
177
+				endpoint, _, err := registry.ResolveRepositoryName(remote)
178
+				if err != nil {
179
+					return err
180
+				}
181
+				resolvedAuth := b.configFile.ResolveAuthConfig(endpoint)
182
+				pullRegistryAuth = &resolvedAuth
183
+			}
184
+			job := b.eng.Job("pull", remote, tag)
185
+			job.SetenvBool("json", b.sf.Json())
186
+			job.SetenvBool("parallel", true)
187
+			job.SetenvJson("authConfig", pullRegistryAuth)
188
+			job.Stdout.Add(b.outOld)
189
+			if err := job.Run(); err != nil {
190
+				return err
191
+			}
192
+			image, err = b.daemon.Repositories().LookupImage(name)
193
+			if err != nil {
194
+				return err
195
+			}
196
+		} else {
197
+			return err
198
+		}
199
+	}
200
+	b.image = image.ID
201
+	b.config = &runconfig.Config{}
202
+	if image.Config != nil {
203
+		b.config = image.Config
204
+	}
205
+	if b.config.Env == nil || len(b.config.Env) == 0 {
206
+		b.config.Env = append(b.config.Env, "PATH="+DefaultPathEnv)
207
+	}
208
+	// Process ONBUILD triggers if they exist
209
+	if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
210
+		fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers)
211
+	}
212
+
213
+	// Copy the ONBUILD triggers, and remove them from the config, since the config will be commited.
214
+	onBuildTriggers := b.config.OnBuild
215
+	b.config.OnBuild = []string{}
216
+
217
+	for n, step := range onBuildTriggers {
218
+		splitStep := strings.Split(step, " ")
219
+		stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " "))
220
+		switch stepInstruction {
221
+		case "ONBUILD":
222
+			return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
223
+		case "MAINTAINER", "FROM":
224
+			return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
225
+		}
226
+		if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil {
227
+			return err
228
+		}
229
+	}
230
+	return nil
231
+}
232
+
233
+// The ONBUILD command declares a build instruction to be executed in any future build
234
+// using the current image as a base.
235
+func (b *buildFile) CmdOnbuild(trigger string) error {
236
+	splitTrigger := strings.Split(trigger, " ")
237
+	triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " "))
238
+	switch triggerInstruction {
239
+	case "ONBUILD":
240
+		return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
241
+	case "MAINTAINER", "FROM":
242
+		return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
243
+	}
244
+	b.config.OnBuild = append(b.config.OnBuild, trigger)
245
+	return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
246
+}
247
+
248
+func (b *buildFile) CmdMaintainer(name string) error {
249
+	b.maintainer = name
250
+	return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
251
+}
252
+
253
+// probeCache checks to see if image-caching is enabled (`b.utilizeCache`)
254
+// and if so attempts to look up the current `b.image` and `b.config` pair
255
+// in the current server `b.daemon`. If an image is found, probeCache returns
256
+// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there
257
+// is any error, it returns `(false, err)`.
258
+func (b *buildFile) probeCache() (bool, error) {
259
+	if b.utilizeCache {
260
+		if cache, err := b.daemon.ImageGetCached(b.image, b.config); err != nil {
261
+			return false, err
262
+		} else if cache != nil {
263
+			fmt.Fprintf(b.outStream, " ---> Using cache\n")
264
+			utils.Debugf("[BUILDER] Use cached version")
265
+			b.image = cache.ID
266
+			return true, nil
267
+		} else {
268
+			utils.Debugf("[BUILDER] Cache miss")
269
+		}
270
+	}
271
+	return false, nil
272
+}
273
+
274
+func (b *buildFile) CmdRun(args string) error {
275
+	if b.image == "" {
276
+		return fmt.Errorf("Please provide a source image with `from` prior to run")
277
+	}
278
+	config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil)
279
+	if err != nil {
280
+		return err
281
+	}
282
+
283
+	cmd := b.config.Cmd
284
+	// set Cmd manually, this is special case only for Dockerfiles
285
+	b.config.Cmd = config.Cmd
286
+	runconfig.Merge(b.config, config)
287
+
288
+	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
289
+
290
+	utils.Debugf("Command to be executed: %v", b.config.Cmd)
291
+
292
+	hit, err := b.probeCache()
293
+	if err != nil {
294
+		return err
295
+	}
296
+	if hit {
297
+		return nil
298
+	}
299
+
300
+	c, err := b.create()
301
+	if err != nil {
302
+		return err
303
+	}
304
+	// Ensure that we keep the container mounted until the commit
305
+	// to avoid unmounting and then mounting directly again
306
+	c.Mount()
307
+	defer c.Unmount()
308
+
309
+	err = b.run(c)
310
+	if err != nil {
311
+		return err
312
+	}
313
+	if err := b.commit(c.ID, cmd, "run"); err != nil {
314
+		return err
315
+	}
316
+
317
+	return nil
318
+}
319
+
320
+func (b *buildFile) FindEnvKey(key string) int {
321
+	for k, envVar := range b.config.Env {
322
+		envParts := strings.SplitN(envVar, "=", 2)
323
+		if key == envParts[0] {
324
+			return k
325
+		}
326
+	}
327
+	return -1
328
+}
329
+
330
+func (b *buildFile) ReplaceEnvMatches(value string) (string, error) {
331
+	exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
332
+	if err != nil {
333
+		return value, err
334
+	}
335
+	matches := exp.FindAllString(value, -1)
336
+	for _, match := range matches {
337
+		match = match[strings.Index(match, "$"):]
338
+		matchKey := strings.Trim(match, "${}")
339
+
340
+		for _, envVar := range b.config.Env {
341
+			envParts := strings.SplitN(envVar, "=", 2)
342
+			envKey := envParts[0]
343
+			envValue := envParts[1]
344
+
345
+			if envKey == matchKey {
346
+				value = strings.Replace(value, match, envValue, -1)
347
+				break
348
+			}
349
+		}
350
+	}
351
+	return value, nil
352
+}
353
+
354
+func (b *buildFile) CmdEnv(args string) error {
355
+	tmp := strings.SplitN(args, " ", 2)
356
+	if len(tmp) != 2 {
357
+		return fmt.Errorf("Invalid ENV format")
358
+	}
359
+	key := strings.Trim(tmp[0], " \t")
360
+	value := strings.Trim(tmp[1], " \t")
361
+
362
+	envKey := b.FindEnvKey(key)
363
+	replacedValue, err := b.ReplaceEnvMatches(value)
364
+	if err != nil {
365
+		return err
366
+	}
367
+	replacedVar := fmt.Sprintf("%s=%s", key, replacedValue)
368
+
369
+	if envKey >= 0 {
370
+		b.config.Env[envKey] = replacedVar
371
+	} else {
372
+		b.config.Env = append(b.config.Env, replacedVar)
373
+	}
374
+	return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar))
375
+}
376
+
377
+func (b *buildFile) buildCmdFromJson(args string) []string {
378
+	var cmd []string
379
+	if err := json.Unmarshal([]byte(args), &cmd); err != nil {
380
+		utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
381
+		cmd = []string{"/bin/sh", "-c", args}
382
+	}
383
+	return cmd
384
+}
385
+
386
+func (b *buildFile) CmdCmd(args string) error {
387
+	cmd := b.buildCmdFromJson(args)
388
+	b.config.Cmd = cmd
389
+	if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
390
+		return err
391
+	}
392
+	b.cmdSet = true
393
+	return nil
394
+}
395
+
396
+func (b *buildFile) CmdEntrypoint(args string) error {
397
+	entrypoint := b.buildCmdFromJson(args)
398
+	b.config.Entrypoint = entrypoint
399
+	// if there is no cmd in current Dockerfile - cleanup cmd
400
+	if !b.cmdSet {
401
+		b.config.Cmd = nil
402
+	}
403
+	if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
404
+		return err
405
+	}
406
+	return nil
407
+}
408
+
409
+func (b *buildFile) CmdExpose(args string) error {
410
+	portsTab := strings.Split(args, " ")
411
+
412
+	if b.config.ExposedPorts == nil {
413
+		b.config.ExposedPorts = make(nat.PortSet)
414
+	}
415
+	ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...))
416
+	if err != nil {
417
+		return err
418
+	}
419
+	for port := range ports {
420
+		if _, exists := b.config.ExposedPorts[port]; !exists {
421
+			b.config.ExposedPorts[port] = struct{}{}
422
+		}
423
+	}
424
+	b.config.PortSpecs = nil
425
+
426
+	return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
427
+}
428
+
429
+func (b *buildFile) CmdUser(args string) error {
430
+	b.config.User = args
431
+	return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
432
+}
433
+
434
+func (b *buildFile) CmdInsert(args string) error {
435
+	return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
436
+}
437
+
438
+func (b *buildFile) CmdCopy(args string) error {
439
+	return b.runContextCommand(args, false, false, "COPY")
440
+}
441
+
442
+func (b *buildFile) CmdWorkdir(workdir string) error {
443
+	if workdir[0] == '/' {
444
+		b.config.WorkingDir = workdir
445
+	} else {
446
+		if b.config.WorkingDir == "" {
447
+			b.config.WorkingDir = "/"
448
+		}
449
+		b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir)
450
+	}
451
+	return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
452
+}
453
+
454
+func (b *buildFile) CmdVolume(args string) error {
455
+	if args == "" {
456
+		return fmt.Errorf("Volume cannot be empty")
457
+	}
458
+
459
+	var volume []string
460
+	if err := json.Unmarshal([]byte(args), &volume); err != nil {
461
+		volume = []string{args}
462
+	}
463
+	if b.config.Volumes == nil {
464
+		b.config.Volumes = map[string]struct{}{}
465
+	}
466
+	for _, v := range volume {
467
+		b.config.Volumes[v] = struct{}{}
468
+	}
469
+	if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
470
+		return err
471
+	}
472
+	return nil
473
+}
474
+
475
+func (b *buildFile) checkPathForAddition(orig string) error {
476
+	origPath := path.Join(b.contextPath, orig)
477
+	if p, err := filepath.EvalSymlinks(origPath); err != nil {
478
+		if os.IsNotExist(err) {
479
+			return fmt.Errorf("%s: no such file or directory", orig)
480
+		}
481
+		return err
482
+	} else {
483
+		origPath = p
484
+	}
485
+	if !strings.HasPrefix(origPath, b.contextPath) {
486
+		return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
487
+	}
488
+	_, err := os.Stat(origPath)
489
+	if err != nil {
490
+		if os.IsNotExist(err) {
491
+			return fmt.Errorf("%s: no such file or directory", orig)
492
+		}
493
+		return err
494
+	}
495
+	return nil
496
+}
497
+
498
+func (b *buildFile) addContext(container *Container, orig, dest string, decompress bool) error {
499
+	var (
500
+		err        error
501
+		destExists = true
502
+		origPath   = path.Join(b.contextPath, orig)
503
+		destPath   = path.Join(container.RootfsPath(), dest)
504
+	)
505
+
506
+	if destPath != container.RootfsPath() {
507
+		destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath())
508
+		if err != nil {
509
+			return err
510
+		}
511
+	}
512
+
513
+	// Preserve the trailing '/'
514
+	if strings.HasSuffix(dest, "/") || dest == "." {
515
+		destPath = destPath + "/"
516
+	}
517
+
518
+	destStat, err := os.Stat(destPath)
519
+	if err != nil {
520
+		if !os.IsNotExist(err) {
521
+			return err
522
+		}
523
+		destExists = false
524
+	}
525
+
526
+	fi, err := os.Stat(origPath)
527
+	if err != nil {
528
+		if os.IsNotExist(err) {
529
+			return fmt.Errorf("%s: no such file or directory", orig)
530
+		}
531
+		return err
532
+	}
533
+
534
+	if fi.IsDir() {
535
+		return copyAsDirectory(origPath, destPath, destExists)
536
+	}
537
+
538
+	// If we are adding a remote file (or we've been told not to decompress), do not try to untar it
539
+	if decompress {
540
+		// First try to unpack the source as an archive
541
+		// to support the untar feature we need to clean up the path a little bit
542
+		// because tar is very forgiving.  First we need to strip off the archive's
543
+		// filename from the path but this is only added if it does not end in / .
544
+		tarDest := destPath
545
+		if strings.HasSuffix(tarDest, "/") {
546
+			tarDest = filepath.Dir(destPath)
547
+		}
548
+
549
+		// try to successfully untar the orig
550
+		if err := archive.UntarPath(origPath, tarDest); err == nil {
551
+			return nil
552
+		} else if err != io.EOF {
553
+			utils.Debugf("Couldn't untar %s to %s: %s", origPath, tarDest, err)
554
+		}
555
+	}
556
+
557
+	if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
558
+		return err
559
+	}
560
+	if err := archive.CopyWithTar(origPath, destPath); err != nil {
561
+		return err
562
+	}
563
+
564
+	resPath := destPath
565
+	if destExists && destStat.IsDir() {
566
+		resPath = path.Join(destPath, path.Base(origPath))
567
+	}
568
+
569
+	return fixPermissions(resPath, 0, 0)
570
+}
571
+
572
+func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error {
573
+	if b.context == nil {
574
+		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
575
+	}
576
+	tmp := strings.SplitN(args, " ", 2)
577
+	if len(tmp) != 2 {
578
+		return fmt.Errorf("Invalid %s format", cmdName)
579
+	}
580
+
581
+	orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t"))
582
+	if err != nil {
583
+		return err
584
+	}
585
+
586
+	dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t"))
587
+	if err != nil {
588
+		return err
589
+	}
590
+
591
+	cmd := b.config.Cmd
592
+	b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
593
+	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
594
+	b.config.Image = b.image
595
+
596
+	var (
597
+		origPath   = orig
598
+		destPath   = dest
599
+		remoteHash string
600
+		isRemote   bool
601
+		decompress = true
602
+	)
603
+
604
+	isRemote = utils.IsURL(orig)
605
+	if isRemote && !allowRemote {
606
+		return fmt.Errorf("Source can't be an URL for %s", cmdName)
607
+	} else if utils.IsURL(orig) {
608
+		// Initiate the download
609
+		resp, err := utils.Download(orig)
610
+		if err != nil {
611
+			return err
612
+		}
613
+
614
+		// Create a tmp dir
615
+		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
616
+		if err != nil {
617
+			return err
618
+		}
619
+
620
+		// Create a tmp file within our tmp dir
621
+		tmpFileName := path.Join(tmpDirName, "tmp")
622
+		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
623
+		if err != nil {
624
+			return err
625
+		}
626
+		defer os.RemoveAll(tmpDirName)
627
+
628
+		// Download and dump result to tmp file
629
+		if _, err := io.Copy(tmpFile, resp.Body); err != nil {
630
+			tmpFile.Close()
631
+			return err
632
+		}
633
+		tmpFile.Close()
634
+
635
+		// Remove the mtime of the newly created tmp file
636
+		if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
637
+			return err
638
+		}
639
+
640
+		origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
641
+
642
+		// Process the checksum
643
+		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
644
+		if err != nil {
645
+			return err
646
+		}
647
+		tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true}
648
+		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
649
+			return err
650
+		}
651
+		remoteHash = tarSum.Sum(nil)
652
+		r.Close()
653
+
654
+		// If the destination is a directory, figure out the filename.
655
+		if strings.HasSuffix(dest, "/") {
656
+			u, err := url.Parse(orig)
657
+			if err != nil {
658
+				return err
659
+			}
660
+			path := u.Path
661
+			if strings.HasSuffix(path, "/") {
662
+				path = path[:len(path)-1]
663
+			}
664
+			parts := strings.Split(path, "/")
665
+			filename := parts[len(parts)-1]
666
+			if filename == "" {
667
+				return fmt.Errorf("cannot determine filename from url: %s", u)
668
+			}
669
+			destPath = dest + filename
670
+		}
671
+	}
672
+
673
+	if err := b.checkPathForAddition(origPath); err != nil {
674
+		return err
675
+	}
676
+
677
+	// Hash path and check the cache
678
+	if b.utilizeCache {
679
+		var (
680
+			hash string
681
+			sums = b.context.GetSums()
682
+		)
683
+
684
+		if remoteHash != "" {
685
+			hash = remoteHash
686
+		} else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
687
+			return err
688
+		} else if fi.IsDir() {
689
+			var subfiles []string
690
+			for file, sum := range sums {
691
+				absFile := path.Join(b.contextPath, file)
692
+				absOrigPath := path.Join(b.contextPath, origPath)
693
+				if strings.HasPrefix(absFile, absOrigPath) {
694
+					subfiles = append(subfiles, sum)
695
+				}
696
+			}
697
+			sort.Strings(subfiles)
698
+			hasher := sha256.New()
699
+			hasher.Write([]byte(strings.Join(subfiles, ",")))
700
+			hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
701
+		} else {
702
+			if origPath[0] == '/' && len(origPath) > 1 {
703
+				origPath = origPath[1:]
704
+			}
705
+			origPath = strings.TrimPrefix(origPath, "./")
706
+			if h, ok := sums[origPath]; ok {
707
+				hash = "file:" + h
708
+			}
709
+		}
710
+		b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)}
711
+		hit, err := b.probeCache()
712
+		if err != nil {
713
+			return err
714
+		}
715
+		// If we do not have a hash, never use the cache
716
+		if hit && hash != "" {
717
+			return nil
718
+		}
719
+	}
720
+
721
+	// Create the container
722
+	container, _, err := b.daemon.Create(b.config, "")
723
+	if err != nil {
724
+		return err
725
+	}
726
+	b.tmpContainers[container.ID] = struct{}{}
727
+
728
+	if err := container.Mount(); err != nil {
729
+		return err
730
+	}
731
+	defer container.Unmount()
732
+
733
+	if !allowDecompression || isRemote {
734
+		decompress = false
735
+	}
736
+	if err := b.addContext(container, origPath, destPath, decompress); err != nil {
737
+		return err
738
+	}
739
+
740
+	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil {
741
+		return err
742
+	}
743
+	return nil
744
+}
745
+
746
+func (b *buildFile) CmdAdd(args string) error {
747
+	return b.runContextCommand(args, true, true, "ADD")
748
+}
749
+
750
+func (b *buildFile) create() (*Container, error) {
751
+	if b.image == "" {
752
+		return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
753
+	}
754
+	b.config.Image = b.image
755
+
756
+	// Create the container
757
+	c, _, err := b.daemon.Create(b.config, "")
758
+	if err != nil {
759
+		return nil, err
760
+	}
761
+	b.tmpContainers[c.ID] = struct{}{}
762
+	fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID))
763
+
764
+	// override the entry point that may have been picked up from the base image
765
+	c.Path = b.config.Cmd[0]
766
+	c.Args = b.config.Cmd[1:]
767
+
768
+	return c, nil
769
+}
770
+
771
+func (b *buildFile) run(c *Container) error {
772
+	var errCh chan error
773
+	if b.verbose {
774
+		errCh = utils.Go(func() error {
775
+			// FIXME: call the 'attach' job so that daemon.Attach can be made private
776
+			//
777
+			// FIXME (LK4D4): Also, maybe makes sense to call "logs" job, it is like attach
778
+			// but without hijacking for stdin. Also, with attach there can be race
779
+			// condition because of some output already was printed before it.
780
+			return <-b.daemon.Attach(c, nil, nil, b.outStream, b.errStream)
781
+		})
782
+	}
783
+
784
+	//start the container
785
+	if err := c.Start(); err != nil {
786
+		return err
787
+	}
788
+
789
+	if errCh != nil {
790
+		if err := <-errCh; err != nil {
791
+			return err
792
+		}
793
+	}
794
+
795
+	// Wait for it to finish
796
+	if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
797
+		err := &utils.JSONError{
798
+			Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
799
+			Code:    ret,
800
+		}
801
+		return err
802
+	}
803
+
804
+	return nil
805
+}
806
+
807
+// Commit the container <id> with the autorun command <autoCmd>
808
+func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
809
+	if b.image == "" {
810
+		return fmt.Errorf("Please provide a source image with `from` prior to commit")
811
+	}
812
+	b.config.Image = b.image
813
+	if id == "" {
814
+		cmd := b.config.Cmd
815
+		b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
816
+		defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
817
+
818
+		hit, err := b.probeCache()
819
+		if err != nil {
820
+			return err
821
+		}
822
+		if hit {
823
+			return nil
824
+		}
825
+
826
+		container, warnings, err := b.daemon.Create(b.config, "")
827
+		if err != nil {
828
+			return err
829
+		}
830
+		for _, warning := range warnings {
831
+			fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning)
832
+		}
833
+		b.tmpContainers[container.ID] = struct{}{}
834
+		fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
835
+		id = container.ID
836
+
837
+		if err := container.Mount(); err != nil {
838
+			return err
839
+		}
840
+		defer container.Unmount()
841
+	}
842
+	container := b.daemon.Get(id)
843
+	if container == nil {
844
+		return fmt.Errorf("An error occured while creating the container")
845
+	}
846
+
847
+	// Note: Actually copy the struct
848
+	autoConfig := *b.config
849
+	autoConfig.Cmd = autoCmd
850
+	// Commit the container
851
+	image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
852
+	if err != nil {
853
+		return err
854
+	}
855
+	b.tmpImages[image.ID] = struct{}{}
856
+	b.image = image.ID
857
+	return nil
858
+}
859
+
860
+// Long lines can be split with a backslash
861
+var lineContinuation = regexp.MustCompile(`\\\s*\n`)
862
+
863
+func (b *buildFile) Build(context io.Reader) (string, error) {
864
+	tmpdirPath, err := ioutil.TempDir("", "docker-build")
865
+	if err != nil {
866
+		return "", err
867
+	}
868
+
869
+	decompressedStream, err := archive.DecompressStream(context)
870
+	if err != nil {
871
+		return "", err
872
+	}
873
+
874
+	b.context = &tarsum.TarSum{Reader: decompressedStream, DisableCompression: true}
875
+	if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
876
+		return "", err
877
+	}
878
+	defer os.RemoveAll(tmpdirPath)
879
+
880
+	b.contextPath = tmpdirPath
881
+	filename := path.Join(tmpdirPath, "Dockerfile")
882
+	if _, err := os.Stat(filename); os.IsNotExist(err) {
883
+		return "", fmt.Errorf("Can't build a directory with no Dockerfile")
884
+	}
885
+	fileBytes, err := ioutil.ReadFile(filename)
886
+	if err != nil {
887
+		return "", err
888
+	}
889
+	if len(fileBytes) == 0 {
890
+		return "", ErrDockerfileEmpty
891
+	}
892
+	var (
893
+		dockerfile = lineContinuation.ReplaceAllString(stripComments(fileBytes), "")
894
+		stepN      = 0
895
+	)
896
+	for _, line := range strings.Split(dockerfile, "\n") {
897
+		line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n")
898
+		if len(line) == 0 {
899
+			continue
900
+		}
901
+		if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil {
902
+			if b.forceRm {
903
+				b.clearTmp(b.tmpContainers)
904
+			}
905
+			return "", err
906
+		} else if b.rm {
907
+			b.clearTmp(b.tmpContainers)
908
+		}
909
+		stepN += 1
910
+	}
911
+	if b.image != "" {
912
+		fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
913
+		return b.image, nil
914
+	}
915
+	return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
916
+}
917
+
918
+// BuildStep parses a single build step from `instruction` and executes it in the current context.
919
+func (b *buildFile) BuildStep(name, expression string) error {
920
+	fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression)
921
+	tmp := strings.SplitN(expression, " ", 2)
922
+	if len(tmp) != 2 {
923
+		return fmt.Errorf("Invalid Dockerfile format")
924
+	}
925
+	instruction := strings.ToLower(strings.Trim(tmp[0], " "))
926
+	arguments := strings.Trim(tmp[1], " ")
927
+
928
+	method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
929
+	if !exists {
930
+		fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction))
931
+		return nil
932
+	}
933
+
934
+	ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
935
+	if ret != nil {
936
+		return ret.(error)
937
+	}
938
+
939
+	fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image))
940
+	return nil
941
+}
942
+
943
+func stripComments(raw []byte) string {
944
+	var (
945
+		out   []string
946
+		lines = strings.Split(string(raw), "\n")
947
+	)
948
+	for _, l := range lines {
949
+		if len(l) == 0 || l[0] == '#' {
950
+			continue
951
+		}
952
+		out = append(out, l)
953
+	}
954
+	return strings.Join(out, "\n")
955
+}
956
+
957
+func copyAsDirectory(source, destination string, destinationExists bool) error {
958
+	if err := archive.CopyWithTar(source, destination); err != nil {
959
+		return err
960
+	}
961
+
962
+	if destinationExists {
963
+		files, err := ioutil.ReadDir(source)
964
+		if err != nil {
965
+			return err
966
+		}
967
+
968
+		for _, file := range files {
969
+			if err := fixPermissions(filepath.Join(destination, file.Name()), 0, 0); err != nil {
970
+				return err
971
+			}
972
+		}
973
+		return nil
974
+	}
975
+
976
+	return fixPermissions(destination, 0, 0)
977
+}
978
+
979
+func fixPermissions(destination string, uid, gid int) error {
980
+	return filepath.Walk(destination, func(path string, info os.FileInfo, err error) error {
981
+		if err := os.Lchown(path, uid, gid); err != nil && !os.IsNotExist(err) {
982
+			return err
983
+		}
984
+		return nil
985
+	})
986
+}
987
+
988
+func NewBuildFile(d *Daemon, eng *engine.Engine, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, forceRm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
989
+	return &buildFile{
990
+		daemon:        d,
991
+		eng:           eng,
992
+		config:        &runconfig.Config{},
993
+		outStream:     outStream,
994
+		errStream:     errStream,
995
+		tmpContainers: make(map[string]struct{}),
996
+		tmpImages:     make(map[string]struct{}),
997
+		verbose:       verbose,
998
+		utilizeCache:  utilizeCache,
999
+		rm:            rm,
1000
+		forceRm:       forceRm,
1001
+		sf:            sf,
1002
+		authConfig:    auth,
1003
+		configFile:    authConfigFile,
1004
+		outOld:        outOld,
1005
+	}
1006
+}
... ...
@@ -109,6 +109,7 @@ func (daemon *Daemon) Install(eng *engine.Engine) error {
109 109
 	// FIXME: remove ImageDelete's dependency on Daemon, then move to graph/
110 110
 	for name, method := range map[string]engine.Handler{
111 111
 		"attach":            daemon.ContainerAttach,
112
+		"build":             daemon.CmdBuild,
112 113
 		"commit":            daemon.ContainerCommit,
113 114
 		"container_changes": daemon.ContainerChanges,
114 115
 		"container_copy":    daemon.ContainerCopy,
115 116
deleted file mode 100644
... ...
@@ -1,100 +0,0 @@
1
-// DEPRECATION NOTICE. PLEASE DO NOT ADD ANYTHING TO THIS FILE.
2
-//
3
-// For additional commments see server/server.go
4
-//
5
-package server
6
-
7
-import (
8
-	"io"
9
-	"io/ioutil"
10
-	"os"
11
-	"os/exec"
12
-	"strings"
13
-
14
-	"github.com/docker/docker/archive"
15
-	"github.com/docker/docker/builder"
16
-	"github.com/docker/docker/engine"
17
-	"github.com/docker/docker/pkg/parsers"
18
-	"github.com/docker/docker/registry"
19
-	"github.com/docker/docker/utils"
20
-)
21
-
22
-func (srv *Server) Build(job *engine.Job) engine.Status {
23
-	if len(job.Args) != 0 {
24
-		return job.Errorf("Usage: %s\n", job.Name)
25
-	}
26
-	var (
27
-		remoteURL      = job.Getenv("remote")
28
-		repoName       = job.Getenv("t")
29
-		suppressOutput = job.GetenvBool("q")
30
-		noCache        = job.GetenvBool("nocache")
31
-		rm             = job.GetenvBool("rm")
32
-		forceRm        = job.GetenvBool("forcerm")
33
-		authConfig     = &registry.AuthConfig{}
34
-		configFile     = &registry.ConfigFile{}
35
-		tag            string
36
-		context        io.ReadCloser
37
-	)
38
-	job.GetenvJson("authConfig", authConfig)
39
-	job.GetenvJson("configFile", configFile)
40
-	repoName, tag = parsers.ParseRepositoryTag(repoName)
41
-
42
-	if remoteURL == "" {
43
-		context = ioutil.NopCloser(job.Stdin)
44
-	} else if utils.IsGIT(remoteURL) {
45
-		if !strings.HasPrefix(remoteURL, "git://") {
46
-			remoteURL = "https://" + remoteURL
47
-		}
48
-		root, err := ioutil.TempDir("", "docker-build-git")
49
-		if err != nil {
50
-			return job.Error(err)
51
-		}
52
-		defer os.RemoveAll(root)
53
-
54
-		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
55
-			return job.Errorf("Error trying to use git: %s (%s)", err, output)
56
-		}
57
-
58
-		c, err := archive.Tar(root, archive.Uncompressed)
59
-		if err != nil {
60
-			return job.Error(err)
61
-		}
62
-		context = c
63
-	} else if utils.IsURL(remoteURL) {
64
-		f, err := utils.Download(remoteURL)
65
-		if err != nil {
66
-			return job.Error(err)
67
-		}
68
-		defer f.Body.Close()
69
-		dockerFile, err := ioutil.ReadAll(f.Body)
70
-		if err != nil {
71
-			return job.Error(err)
72
-		}
73
-		c, err := archive.Generate("Dockerfile", string(dockerFile))
74
-		if err != nil {
75
-			return job.Error(err)
76
-		}
77
-		context = c
78
-	}
79
-	defer context.Close()
80
-
81
-	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
82
-	b := builder.NewBuildFile(srv.daemon, srv.Eng,
83
-		&utils.StdoutFormater{
84
-			Writer:          job.Stdout,
85
-			StreamFormatter: sf,
86
-		},
87
-		&utils.StderrFormater{
88
-			Writer:          job.Stdout,
89
-			StreamFormatter: sf,
90
-		},
91
-		!suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile)
92
-	id, err := b.Build(context)
93
-	if err != nil {
94
-		return job.Error(err)
95
-	}
96
-	if repoName != "" {
97
-		srv.daemon.Repositories().Set(repoName, tag, id, false)
98
-	}
99
-	return engine.StatusOK
100
-}
... ...
@@ -31,9 +31,7 @@ func InitServer(job *engine.Job) engine.Status {
31 31
 	job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
32 32
 	job.Eng.Hack_SetGlobalVar("httpapi.daemon", srv.daemon)
33 33
 
34
-	for name, handler := range map[string]engine.Handler{
35
-		"build": srv.Build,
36
-	} {
34
+	for name, handler := range map[string]engine.Handler{} {
37 35
 		if err := job.Eng.Register(name, srv.handlerWrap(handler)); err != nil {
38 36
 			return job.Error(err)
39 37
 		}