Remove pathCache from imageContexts
Extract validateCopySourcePath
Extract copyWithWildcards
Extract copyInfoForFile and walkSource from calcCopyInfo
Move copy internals to copy.go
remove source from Builder
Signed-off-by: Daniel Nephin <dnephin@docker.com>
... | ... |
@@ -98,7 +98,6 @@ type Builder struct { |
98 | 98 |
Output io.Writer |
99 | 99 |
|
100 | 100 |
docker builder.Backend |
101 |
- source builder.Source |
|
102 | 101 |
clientCtx context.Context |
103 | 102 |
|
104 | 103 |
tmpContainers map[string]struct{} |
... | ... |
@@ -108,6 +107,7 @@ type Builder struct { |
108 | 108 |
buildArgs *buildArgs |
109 | 109 |
imageCache builder.ImageCache |
110 | 110 |
imageSources *imageSources |
111 |
+ pathCache pathCache |
|
111 | 112 |
} |
112 | 113 |
|
113 | 114 |
// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options. |
... | ... |
@@ -128,6 +128,7 @@ func newBuilder(clientCtx context.Context, options builderOptions) *Builder { |
128 | 128 |
buildArgs: newBuildArgs(config.BuildArgs), |
129 | 129 |
buildStages: newBuildStages(), |
130 | 130 |
imageSources: newImageSources(clientCtx, options), |
131 |
+ pathCache: options.PathCache, |
|
131 | 132 |
} |
132 | 133 |
return b |
133 | 134 |
} |
... | ... |
@@ -144,9 +145,6 @@ func (b *Builder) resetImageCache() { |
144 | 144 |
func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) { |
145 | 145 |
defer b.imageSources.Unmount() |
146 | 146 |
|
147 |
- // TODO: Remove source field from Builder |
|
148 |
- b.source = source |
|
149 |
- |
|
150 | 147 |
addNodesForLabelOption(dockerfile.AST, b.options.Labels) |
151 | 148 |
|
152 | 149 |
if err := checkDispatchDockerfile(dockerfile.AST); err != nil { |
... | ... |
@@ -154,7 +152,7 @@ func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*buil |
154 | 154 |
return nil, err |
155 | 155 |
} |
156 | 156 |
|
157 |
- dispatchState, err := b.dispatchDockerfileWithCancellation(dockerfile) |
|
157 |
+ dispatchState, err := b.dispatchDockerfileWithCancellation(dockerfile, source) |
|
158 | 158 |
if err != nil { |
159 | 159 |
return nil, err |
160 | 160 |
} |
... | ... |
@@ -180,7 +178,7 @@ func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error |
180 | 180 |
return aux.Emit(types.BuildResult{ID: state.imageID}) |
181 | 181 |
} |
182 | 182 |
|
183 |
-func (b *Builder) dispatchDockerfileWithCancellation(dockerfile *parser.Result) (*dispatchState, error) { |
|
183 |
+func (b *Builder) dispatchDockerfileWithCancellation(dockerfile *parser.Result, source builder.Source) (*dispatchState, error) { |
|
184 | 184 |
shlex := NewShellLex(dockerfile.EscapeToken) |
185 | 185 |
state := newDispatchState() |
186 | 186 |
total := len(dockerfile.AST.Children) |
... | ... |
@@ -214,6 +212,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(dockerfile *parser.Result) |
214 | 214 |
stepMsg: formatStep(i, total), |
215 | 215 |
node: n, |
216 | 216 |
shlex: shlex, |
217 |
+ source: source, |
|
217 | 218 |
} |
218 | 219 |
if state, err = b.dispatch(opts); err != nil { |
219 | 220 |
if b.options.ForceRemove { |
220 | 221 |
new file mode 100644 |
... | ... |
@@ -0,0 +1,360 @@ |
0 |
+package dockerfile |
|
1 |
+ |
|
2 |
+import ( |
|
3 |
+ "fmt" |
|
4 |
+ "io" |
|
5 |
+ "net/http" |
|
6 |
+ "net/url" |
|
7 |
+ "os" |
|
8 |
+ "path/filepath" |
|
9 |
+ "sort" |
|
10 |
+ "strings" |
|
11 |
+ "time" |
|
12 |
+ |
|
13 |
+ "github.com/docker/docker/builder" |
|
14 |
+ "github.com/docker/docker/builder/remotecontext" |
|
15 |
+ "github.com/docker/docker/pkg/httputils" |
|
16 |
+ "github.com/docker/docker/pkg/ioutils" |
|
17 |
+ "github.com/docker/docker/pkg/progress" |
|
18 |
+ "github.com/docker/docker/pkg/streamformatter" |
|
19 |
+ "github.com/docker/docker/pkg/system" |
|
20 |
+ "github.com/docker/docker/pkg/urlutil" |
|
21 |
+ "github.com/pkg/errors" |
|
22 |
+) |
|
23 |
+ |
|
24 |
+type pathCache interface { |
|
25 |
+ Load(key interface{}) (value interface{}, ok bool) |
|
26 |
+ Store(key, value interface{}) |
|
27 |
+} |
|
28 |
+ |
|
29 |
+// copyInfo is a data object which stores the metadata about each source file in |
|
30 |
+// a copyInstruction |
|
31 |
+type copyInfo struct { |
|
32 |
+ root string |
|
33 |
+ path string |
|
34 |
+ hash string |
|
35 |
+} |
|
36 |
+ |
|
37 |
+func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo { |
|
38 |
+ return copyInfo{root: source.Root(), path: path, hash: hash} |
|
39 |
+} |
|
40 |
+ |
|
41 |
+func newCopyInfos(copyInfos ...copyInfo) []copyInfo { |
|
42 |
+ return copyInfos |
|
43 |
+} |
|
44 |
+ |
|
45 |
+// copyInstruction is a fully parsed COPY or ADD command that is passed to |
|
46 |
+// Builder.performCopy to copy files into the image filesystem |
|
47 |
+type copyInstruction struct { |
|
48 |
+ cmdName string |
|
49 |
+ infos []copyInfo |
|
50 |
+ dest string |
|
51 |
+ allowLocalDecompression bool |
|
52 |
+} |
|
53 |
+ |
|
54 |
+// copier reads a raw COPY or ADD command, fetches remote sources using a downloader, |
|
55 |
+// and creates a copyInstruction |
|
56 |
+type copier struct { |
|
57 |
+ imageSource *imageMount |
|
58 |
+ source builder.Source |
|
59 |
+ pathCache pathCache |
|
60 |
+ download sourceDownloader |
|
61 |
+ tmpPaths []string |
|
62 |
+} |
|
63 |
+ |
|
64 |
+func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier { |
|
65 |
+ return copier{ |
|
66 |
+ source: req.source, |
|
67 |
+ pathCache: req.builder.pathCache, |
|
68 |
+ download: download, |
|
69 |
+ imageSource: imageSource, |
|
70 |
+ } |
|
71 |
+} |
|
72 |
+ |
|
73 |
+func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) { |
|
74 |
+ inst := copyInstruction{cmdName: cmdName} |
|
75 |
+ last := len(args) - 1 |
|
76 |
+ |
|
77 |
+ // Work in daemon-specific filepath semantics |
|
78 |
+ inst.dest = filepath.FromSlash(args[last]) |
|
79 |
+ |
|
80 |
+ infos, err := o.getCopyInfosForSourcePaths(args[0:last]) |
|
81 |
+ if err != nil { |
|
82 |
+ return inst, errors.Wrapf(err, "%s failed", cmdName) |
|
83 |
+ } |
|
84 |
+ if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) { |
|
85 |
+ return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) |
|
86 |
+ } |
|
87 |
+ inst.infos = infos |
|
88 |
+ return inst, nil |
|
89 |
+} |
|
90 |
+ |
|
91 |
+// getCopyInfosForSourcePaths iterates over the source files and calculate the info |
|
92 |
+// needed to copy (e.g. hash value if cached) |
|
93 |
+func (o *copier) getCopyInfosForSourcePaths(sources []string) ([]copyInfo, error) { |
|
94 |
+ var infos []copyInfo |
|
95 |
+ for _, orig := range sources { |
|
96 |
+ subinfos, err := o.getCopyInfoForSourcePath(orig) |
|
97 |
+ if err != nil { |
|
98 |
+ return nil, err |
|
99 |
+ } |
|
100 |
+ infos = append(infos, subinfos...) |
|
101 |
+ } |
|
102 |
+ |
|
103 |
+ if len(infos) == 0 { |
|
104 |
+ return nil, errors.New("no source files were specified") |
|
105 |
+ } |
|
106 |
+ return infos, nil |
|
107 |
+} |
|
108 |
+ |
|
109 |
+func (o *copier) getCopyInfoForSourcePath(orig string) ([]copyInfo, error) { |
|
110 |
+ if !urlutil.IsURL(orig) { |
|
111 |
+ return o.calcCopyInfo(orig, true) |
|
112 |
+ } |
|
113 |
+ remote, path, err := o.download(orig) |
|
114 |
+ if err != nil { |
|
115 |
+ return nil, err |
|
116 |
+ } |
|
117 |
+ o.tmpPaths = append(o.tmpPaths, remote.Root()) |
|
118 |
+ |
|
119 |
+ hash, err := remote.Hash(path) |
|
120 |
+ return newCopyInfos(newCopyInfoFromSource(remote, path, hash)), err |
|
121 |
+} |
|
122 |
+ |
|
123 |
+// Cleanup removes any temporary directories created as part of downloading |
|
124 |
+// remote files. |
|
125 |
+func (o *copier) Cleanup() { |
|
126 |
+ for _, path := range o.tmpPaths { |
|
127 |
+ os.RemoveAll(path) |
|
128 |
+ } |
|
129 |
+ o.tmpPaths = []string{} |
|
130 |
+} |
|
131 |
+ |
|
132 |
+// TODO: allowWildcards can probably be removed by refactoring this function further. |
|
133 |
+func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) { |
|
134 |
+ imageSource := o.imageSource |
|
135 |
+ if err := validateCopySourcePath(imageSource, origPath); err != nil { |
|
136 |
+ return nil, err |
|
137 |
+ } |
|
138 |
+ |
|
139 |
+ // Work in daemon-specific OS filepath semantics |
|
140 |
+ origPath = filepath.FromSlash(origPath) |
|
141 |
+ origPath = strings.TrimPrefix(origPath, string(os.PathSeparator)) |
|
142 |
+ origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) |
|
143 |
+ |
|
144 |
+ // TODO: do this when creating copier. Requires validateCopySourcePath |
|
145 |
+ // (and other below) to be aware of the difference sources. Why is it only |
|
146 |
+ // done on image Source? |
|
147 |
+ if imageSource != nil { |
|
148 |
+ var err error |
|
149 |
+ o.source, err = imageSource.Source() |
|
150 |
+ if err != nil { |
|
151 |
+ return nil, errors.Wrapf(err, "failed to copy") |
|
152 |
+ } |
|
153 |
+ } |
|
154 |
+ |
|
155 |
+ if o.source == nil { |
|
156 |
+ return nil, errors.Errorf("missing build context") |
|
157 |
+ } |
|
158 |
+ |
|
159 |
+ // Deal with wildcards |
|
160 |
+ if allowWildcards && containsWildcards(origPath) { |
|
161 |
+ return o.copyWithWildcards(origPath) |
|
162 |
+ } |
|
163 |
+ |
|
164 |
+ if imageSource != nil && imageSource.ImageID() != "" { |
|
165 |
+ // return a cached copy if one exists |
|
166 |
+ if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok { |
|
167 |
+ return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil |
|
168 |
+ } |
|
169 |
+ } |
|
170 |
+ |
|
171 |
+ // Deal with the single file case |
|
172 |
+ copyInfo, err := copyInfoForFile(o.source, origPath) |
|
173 |
+ switch { |
|
174 |
+ case err != nil: |
|
175 |
+ return nil, err |
|
176 |
+ case copyInfo.hash != "": |
|
177 |
+ o.storeInPathCache(imageSource, origPath, copyInfo.hash) |
|
178 |
+ return newCopyInfos(copyInfo), err |
|
179 |
+ } |
|
180 |
+ |
|
181 |
+ // TODO: remove, handle dirs in Hash() |
|
182 |
+ subfiles, err := walkSource(o.source, origPath) |
|
183 |
+ if err != nil { |
|
184 |
+ return nil, err |
|
185 |
+ } |
|
186 |
+ |
|
187 |
+ hash := hashStringSlice("dir", subfiles) |
|
188 |
+ o.storeInPathCache(imageSource, origPath, hash) |
|
189 |
+ return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil |
|
190 |
+} |
|
191 |
+ |
|
192 |
+func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { |
|
193 |
+ if im != nil { |
|
194 |
+ o.pathCache.Store(im.ImageID()+path, hash) |
|
195 |
+ } |
|
196 |
+} |
|
197 |
+ |
|
198 |
+func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { |
|
199 |
+ var copyInfos []copyInfo |
|
200 |
+ if err := filepath.Walk(o.source.Root(), func(path string, info os.FileInfo, err error) error { |
|
201 |
+ if err != nil { |
|
202 |
+ return err |
|
203 |
+ } |
|
204 |
+ rel, err := remotecontext.Rel(o.source.Root(), path) |
|
205 |
+ if err != nil { |
|
206 |
+ return err |
|
207 |
+ } |
|
208 |
+ |
|
209 |
+ if rel == "." { |
|
210 |
+ return nil |
|
211 |
+ } |
|
212 |
+ if match, _ := filepath.Match(origPath, rel); !match { |
|
213 |
+ return nil |
|
214 |
+ } |
|
215 |
+ |
|
216 |
+ // Note we set allowWildcards to false in case the name has |
|
217 |
+ // a * in it |
|
218 |
+ subInfos, err := o.calcCopyInfo(rel, false) |
|
219 |
+ if err != nil { |
|
220 |
+ return err |
|
221 |
+ } |
|
222 |
+ copyInfos = append(copyInfos, subInfos...) |
|
223 |
+ return nil |
|
224 |
+ }); err != nil { |
|
225 |
+ return nil, err |
|
226 |
+ } |
|
227 |
+ return copyInfos, nil |
|
228 |
+} |
|
229 |
+ |
|
230 |
+func copyInfoForFile(source builder.Source, path string) (copyInfo, error) { |
|
231 |
+ fi, err := remotecontext.StatAt(source, path) |
|
232 |
+ if err != nil { |
|
233 |
+ return copyInfo{}, err |
|
234 |
+ } |
|
235 |
+ |
|
236 |
+ if fi.IsDir() { |
|
237 |
+ return copyInfo{}, nil |
|
238 |
+ } |
|
239 |
+ hash, err := source.Hash(path) |
|
240 |
+ if err != nil { |
|
241 |
+ return copyInfo{}, err |
|
242 |
+ } |
|
243 |
+ return newCopyInfoFromSource(source, path, "file:"+hash), nil |
|
244 |
+} |
|
245 |
+ |
|
246 |
+// TODO: dedupe with copyWithWildcards() |
|
247 |
+func walkSource(source builder.Source, origPath string) ([]string, error) { |
|
248 |
+ fp, err := remotecontext.FullPath(source, origPath) |
|
249 |
+ if err != nil { |
|
250 |
+ return nil, err |
|
251 |
+ } |
|
252 |
+ // Must be a dir |
|
253 |
+ var subfiles []string |
|
254 |
+ err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error { |
|
255 |
+ if err != nil { |
|
256 |
+ return err |
|
257 |
+ } |
|
258 |
+ rel, err := remotecontext.Rel(source.Root(), path) |
|
259 |
+ if err != nil { |
|
260 |
+ return err |
|
261 |
+ } |
|
262 |
+ if rel == "." { |
|
263 |
+ return nil |
|
264 |
+ } |
|
265 |
+ hash, err := source.Hash(rel) |
|
266 |
+ if err != nil { |
|
267 |
+ return nil |
|
268 |
+ } |
|
269 |
+ // we already checked handleHash above |
|
270 |
+ subfiles = append(subfiles, hash) |
|
271 |
+ return nil |
|
272 |
+ }) |
|
273 |
+ if err != nil { |
|
274 |
+ return nil, err |
|
275 |
+ } |
|
276 |
+ |
|
277 |
+ sort.Strings(subfiles) |
|
278 |
+ return subfiles, nil |
|
279 |
+} |
|
280 |
+ |
|
281 |
+type sourceDownloader func(string) (builder.Source, string, error) |
|
282 |
+ |
|
283 |
+func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader { |
|
284 |
+ return func(url string) (builder.Source, string, error) { |
|
285 |
+ return downloadSource(output, stdout, url) |
|
286 |
+ } |
|
287 |
+} |
|
288 |
+ |
|
289 |
+func errOnSourceDownload(_ string) (builder.Source, string, error) { |
|
290 |
+ return nil, "", errors.New("source can't be a URL for COPY") |
|
291 |
+} |
|
292 |
+ |
|
293 |
+func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) { |
|
294 |
+ // get filename from URL |
|
295 |
+ u, err := url.Parse(srcURL) |
|
296 |
+ if err != nil { |
|
297 |
+ return |
|
298 |
+ } |
|
299 |
+ filename := filepath.Base(filepath.FromSlash(u.Path)) // Ensure in platform semantics |
|
300 |
+ if filename == "" { |
|
301 |
+ err = errors.Errorf("cannot determine filename from url: %s", u) |
|
302 |
+ return |
|
303 |
+ } |
|
304 |
+ |
|
305 |
+ // Initiate the download |
|
306 |
+ resp, err := httputils.Download(srcURL) |
|
307 |
+ if err != nil { |
|
308 |
+ return |
|
309 |
+ } |
|
310 |
+ |
|
311 |
+ // Prepare file in a tmp dir |
|
312 |
+ tmpDir, err := ioutils.TempDir("", "docker-remote") |
|
313 |
+ if err != nil { |
|
314 |
+ return |
|
315 |
+ } |
|
316 |
+ defer func() { |
|
317 |
+ if err != nil { |
|
318 |
+ os.RemoveAll(tmpDir) |
|
319 |
+ } |
|
320 |
+ }() |
|
321 |
+ tmpFileName := filepath.Join(tmpDir, filename) |
|
322 |
+ tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) |
|
323 |
+ if err != nil { |
|
324 |
+ return |
|
325 |
+ } |
|
326 |
+ |
|
327 |
+ progressOutput := streamformatter.NewJSONProgressOutput(output, true) |
|
328 |
+ progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") |
|
329 |
+ // Download and dump result to tmp file |
|
330 |
+ // TODO: add filehash directly |
|
331 |
+ if _, err = io.Copy(tmpFile, progressReader); err != nil { |
|
332 |
+ tmpFile.Close() |
|
333 |
+ return |
|
334 |
+ } |
|
335 |
+ // TODO: how important is this random blank line to the output? |
|
336 |
+ fmt.Fprintln(stdout) |
|
337 |
+ |
|
338 |
+ // Set the mtime to the Last-Modified header value if present |
|
339 |
+ // Otherwise just remove atime and mtime |
|
340 |
+ mTime := time.Time{} |
|
341 |
+ |
|
342 |
+ lastMod := resp.Header.Get("Last-Modified") |
|
343 |
+ if lastMod != "" { |
|
344 |
+ // If we can't parse it then just let it default to 'zero' |
|
345 |
+ // otherwise use the parsed time value |
|
346 |
+ if parsedMTime, err := http.ParseTime(lastMod); err == nil { |
|
347 |
+ mTime = parsedMTime |
|
348 |
+ } |
|
349 |
+ } |
|
350 |
+ |
|
351 |
+ tmpFile.Close() |
|
352 |
+ |
|
353 |
+ if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { |
|
354 |
+ return |
|
355 |
+ } |
|
356 |
+ |
|
357 |
+ lc, err := remotecontext.NewLazyContext(tmpDir) |
|
358 |
+ return lc, filename, err |
|
359 |
+} |
... | ... |
@@ -148,7 +148,16 @@ func add(req dispatchRequest) error { |
148 | 148 |
return err |
149 | 149 |
} |
150 | 150 |
|
151 |
- return req.builder.runContextCommand(req, true, true, "ADD", nil) |
|
151 |
+ downloader := newRemoteSourceDownloader(req.builder.Output, req.builder.Stdout) |
|
152 |
+ copier := copierFromDispatchRequest(req, downloader, nil) |
|
153 |
+ defer copier.Cleanup() |
|
154 |
+ copyInstruction, err := copier.createCopyInstruction(req.args, "ADD") |
|
155 |
+ if err != nil { |
|
156 |
+ return err |
|
157 |
+ } |
|
158 |
+ copyInstruction.allowLocalDecompression = true |
|
159 |
+ |
|
160 |
+ return req.builder.performCopy(req.state, copyInstruction) |
|
152 | 161 |
} |
153 | 162 |
|
154 | 163 |
// COPY foo /path |
... | ... |
@@ -169,7 +178,15 @@ func dispatchCopy(req dispatchRequest) error { |
169 | 169 |
if err != nil { |
170 | 170 |
return errors.Wrapf(err, "invalid from flag value %s", flFrom.Value) |
171 | 171 |
} |
172 |
- return req.builder.runContextCommand(req, false, false, "COPY", im) |
|
172 |
+ |
|
173 |
+ copier := copierFromDispatchRequest(req, errOnSourceDownload, im) |
|
174 |
+ defer copier.Cleanup() |
|
175 |
+ copyInstruction, err := copier.createCopyInstruction(req.args, "COPY") |
|
176 |
+ if err != nil { |
|
177 |
+ return err |
|
178 |
+ } |
|
179 |
+ |
|
180 |
+ return req.builder.performCopy(req.state, copyInstruction) |
|
173 | 181 |
} |
174 | 182 |
|
175 | 183 |
func (b *Builder) getImageMount(fromFlag *Flag) (*imageMount, error) { |
... | ... |
@@ -68,6 +68,7 @@ type dispatchRequest struct { |
68 | 68 |
original string |
69 | 69 |
shlex *ShellLex |
70 | 70 |
state *dispatchState |
71 |
+ source builder.Source |
|
71 | 72 |
} |
72 | 73 |
|
73 | 74 |
func newDispatchRequestFromOptions(options dispatchOptions, builder *Builder, args []string) dispatchRequest { |
... | ... |
@@ -79,6 +80,7 @@ func newDispatchRequestFromOptions(options dispatchOptions, builder *Builder, ar |
79 | 79 |
flags: NewBFlagsWithArgs(options.node.Flags), |
80 | 80 |
shlex: options.shlex, |
81 | 81 |
state: options.state, |
82 |
+ source: options.source, |
|
82 | 83 |
} |
83 | 84 |
} |
84 | 85 |
|
... | ... |
@@ -181,6 +183,7 @@ type dispatchOptions struct { |
181 | 181 |
stepMsg string |
182 | 182 |
node *parser.Node |
183 | 183 |
shlex *ShellLex |
184 |
+ source builder.Source |
|
184 | 185 |
} |
185 | 186 |
|
186 | 187 |
// dispatchState is a data object which is modified by dispatchers |
... | ... |
@@ -105,13 +105,13 @@ func initDispatchTestCases() []dispatchTestCase { |
105 | 105 |
{ |
106 | 106 |
name: "COPY wildcard no files", |
107 | 107 |
dockerfile: `COPY file*.txt /tmp/`, |
108 |
- expectedError: "No source files were specified", |
|
108 |
+ expectedError: "COPY failed: no source files were specified", |
|
109 | 109 |
files: nil, |
110 | 110 |
}, |
111 | 111 |
{ |
112 | 112 |
name: "COPY url", |
113 | 113 |
dockerfile: `COPY https://index.docker.io/robots.txt /`, |
114 |
- expectedError: "Source can't be a URL for COPY", |
|
114 |
+ expectedError: "source can't be a URL for COPY", |
|
115 | 115 |
files: nil, |
116 | 116 |
}, |
117 | 117 |
{ |
... | ... |
@@ -184,7 +184,6 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) { |
184 | 184 |
b := &Builder{ |
185 | 185 |
options: options, |
186 | 186 |
Stdout: ioutil.Discard, |
187 |
- source: context, |
|
188 | 187 |
buildArgs: newBuildArgs(options.BuildArgs), |
189 | 188 |
} |
190 | 189 |
|
... | ... |
@@ -196,6 +195,7 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) { |
196 | 196 |
stepMsg: formatStep(0, len(n.Children)), |
197 | 197 |
node: n.Children[0], |
198 | 198 |
shlex: shlex, |
199 |
+ source: context, |
|
199 | 200 |
} |
200 | 201 |
state, err = b.dispatch(opts) |
201 | 202 |
|
... | ... |
@@ -13,11 +13,6 @@ import ( |
13 | 13 |
"golang.org/x/net/context" |
14 | 14 |
) |
15 | 15 |
|
16 |
-type pathCache interface { |
|
17 |
- Load(key interface{}) (value interface{}, ok bool) |
|
18 |
- Store(key, value interface{}) |
|
19 |
-} |
|
20 |
- |
|
21 | 16 |
type buildStage struct { |
22 | 17 |
id string |
23 | 18 |
config *container.Config |
... | ... |
@@ -148,23 +143,6 @@ func (m *imageSources) Unmount() (retErr error) { |
148 | 148 |
return |
149 | 149 |
} |
150 | 150 |
|
151 |
-// TODO: remove getCache/setCache from imageSources |
|
152 |
-func (m *imageSources) getCache(id, path string) (interface{}, bool) { |
|
153 |
- if m.cache != nil { |
|
154 |
- if id == "" { |
|
155 |
- return nil, false |
|
156 |
- } |
|
157 |
- return m.cache.Load(id + path) |
|
158 |
- } |
|
159 |
- return nil, false |
|
160 |
-} |
|
161 |
- |
|
162 |
-func (m *imageSources) setCache(id, path string, v interface{}) { |
|
163 |
- if m.cache != nil { |
|
164 |
- m.cache.Store(id+path, v) |
|
165 |
- } |
|
166 |
-} |
|
167 |
- |
|
168 | 151 |
// imageMount is a reference to an image that can be used as a builder.Source |
169 | 152 |
type imageMount struct { |
170 | 153 |
image builder.Image |
... | ... |
@@ -208,3 +186,7 @@ func (im *imageMount) unmount() error { |
208 | 208 |
func (im *imageMount) Image() builder.Image { |
209 | 209 |
return im.image |
210 | 210 |
} |
211 |
+ |
|
212 |
+func (im *imageMount) ImageID() string { |
|
213 |
+ return im.image.ImageID() |
|
214 |
+} |
... | ... |
@@ -7,31 +7,15 @@ import ( |
7 | 7 |
"crypto/sha256" |
8 | 8 |
"encoding/hex" |
9 | 9 |
"fmt" |
10 |
- "io" |
|
11 |
- "net/http" |
|
12 |
- "net/url" |
|
13 |
- "os" |
|
14 |
- "path/filepath" |
|
15 |
- "runtime" |
|
16 |
- "sort" |
|
17 | 10 |
"strings" |
18 |
- "time" |
|
19 | 11 |
|
20 | 12 |
"github.com/Sirupsen/logrus" |
21 | 13 |
"github.com/docker/docker/api/types" |
22 | 14 |
"github.com/docker/docker/api/types/backend" |
23 | 15 |
"github.com/docker/docker/api/types/container" |
24 |
- "github.com/docker/docker/builder" |
|
25 |
- "github.com/docker/docker/builder/remotecontext" |
|
26 | 16 |
containerpkg "github.com/docker/docker/container" |
27 |
- "github.com/docker/docker/pkg/httputils" |
|
28 |
- "github.com/docker/docker/pkg/ioutils" |
|
29 | 17 |
"github.com/docker/docker/pkg/jsonmessage" |
30 |
- "github.com/docker/docker/pkg/progress" |
|
31 |
- "github.com/docker/docker/pkg/streamformatter" |
|
32 | 18 |
"github.com/docker/docker/pkg/stringid" |
33 |
- "github.com/docker/docker/pkg/system" |
|
34 |
- "github.com/docker/docker/pkg/urlutil" |
|
35 | 19 |
"github.com/pkg/errors" |
36 | 20 |
) |
37 | 21 |
|
... | ... |
@@ -83,90 +67,14 @@ func (b *Builder) commitContainer(dispatchState *dispatchState, id string, conta |
83 | 83 |
return nil |
84 | 84 |
} |
85 | 85 |
|
86 |
-type copyInfo struct { |
|
87 |
- root string |
|
88 |
- path string |
|
89 |
- hash string |
|
90 |
- decompress bool |
|
91 |
-} |
|
92 |
- |
|
93 |
-// TODO: this needs to be split so that a Builder method doesn't accept req |
|
94 |
-func (b *Builder) runContextCommand(req dispatchRequest, allowRemote bool, allowLocalDecompression bool, cmdName string, imageSource *imageMount) error { |
|
95 |
- args := req.args |
|
96 |
- if len(args) < 2 { |
|
97 |
- return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) |
|
98 |
- } |
|
99 |
- |
|
100 |
- // Work in daemon-specific filepath semantics |
|
101 |
- dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest |
|
102 |
- |
|
103 |
- var infos []copyInfo |
|
104 |
- |
|
105 |
- // Loop through each src file and calculate the info we need to |
|
106 |
- // do the copy (e.g. hash value if cached). Don't actually do |
|
107 |
- // the copy until we've looked at all src files |
|
108 |
- var err error |
|
109 |
- for _, orig := range args[0 : len(args)-1] { |
|
110 |
- if urlutil.IsURL(orig) { |
|
111 |
- if !allowRemote { |
|
112 |
- return fmt.Errorf("Source can't be a URL for %s", cmdName) |
|
113 |
- } |
|
114 |
- remote, path, err := b.download(orig) |
|
115 |
- if err != nil { |
|
116 |
- return err |
|
117 |
- } |
|
118 |
- defer os.RemoveAll(remote.Root()) |
|
119 |
- h, err := remote.Hash(path) |
|
120 |
- if err != nil { |
|
121 |
- return err |
|
122 |
- } |
|
123 |
- infos = append(infos, copyInfo{ |
|
124 |
- root: remote.Root(), |
|
125 |
- path: path, |
|
126 |
- hash: h, |
|
127 |
- }) |
|
128 |
- continue |
|
129 |
- } |
|
130 |
- // not a URL |
|
131 |
- subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true, imageSource) |
|
132 |
- if err != nil { |
|
133 |
- return err |
|
134 |
- } |
|
135 |
- |
|
136 |
- infos = append(infos, subInfos...) |
|
137 |
- } |
|
138 |
- |
|
139 |
- if len(infos) == 0 { |
|
140 |
- return errors.New("No source files were specified") |
|
141 |
- } |
|
142 |
- if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { |
|
143 |
- return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) |
|
144 |
- } |
|
145 |
- |
|
146 |
- // For backwards compat, if there's just one info then use it as the |
|
147 |
- // cache look-up string, otherwise hash 'em all into one |
|
148 |
- var srcHash string |
|
149 |
- |
|
150 |
- if len(infos) == 1 { |
|
151 |
- info := infos[0] |
|
152 |
- srcHash = info.hash |
|
153 |
- } else { |
|
154 |
- var hashs []string |
|
155 |
- var origs []string |
|
156 |
- for _, info := range infos { |
|
157 |
- origs = append(origs, info.path) |
|
158 |
- hashs = append(hashs, info.hash) |
|
159 |
- } |
|
160 |
- hasher := sha256.New() |
|
161 |
- hasher.Write([]byte(strings.Join(hashs, ","))) |
|
162 |
- srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) |
|
163 |
- } |
|
86 |
+func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error { |
|
87 |
+ srcHash := getSourceHashFromInfos(inst.infos) |
|
164 | 88 |
|
165 | 89 |
// TODO: should this have been using origPaths instead of srcHash in the comment? |
166 | 90 |
runConfigWithCommentCmd := copyRunConfig( |
167 |
- req.state.runConfig, |
|
168 |
- withCmdCommentString(fmt.Sprintf("%s %s in %s ", cmdName, srcHash, dest))) |
|
169 |
- if hit, err := b.probeCache(req.state, runConfigWithCommentCmd); err != nil || hit { |
|
91 |
+ state.runConfig, |
|
92 |
+ withCmdCommentString(fmt.Sprintf("%s %s in %s ", inst.cmdName, srcHash, inst.dest))) |
|
93 |
+ if hit, err := b.probeCache(state, runConfigWithCommentCmd); err != nil || hit { |
|
170 | 94 |
return err |
171 | 95 |
} |
172 | 96 |
|
... | ... |
@@ -182,17 +90,36 @@ func (b *Builder) runContextCommand(req dispatchRequest, allowRemote bool, allow |
182 | 182 |
|
183 | 183 |
// Twiddle the destination when it's a relative path - meaning, make it |
184 | 184 |
// relative to the WORKINGDIR |
185 |
- if dest, err = normaliseDest(cmdName, req.state.runConfig.WorkingDir, dest); err != nil { |
|
185 |
+ dest, err := normaliseDest(inst.cmdName, state.runConfig.WorkingDir, inst.dest) |
|
186 |
+ if err != nil { |
|
186 | 187 |
return err |
187 | 188 |
} |
188 | 189 |
|
189 |
- for _, info := range infos { |
|
190 |
- if err := b.docker.CopyOnBuild(container.ID, dest, info.root, info.path, info.decompress); err != nil { |
|
190 |
+ for _, info := range inst.infos { |
|
191 |
+ if err := b.docker.CopyOnBuild(container.ID, dest, info.root, info.path, inst.allowLocalDecompression); err != nil { |
|
191 | 192 |
return err |
192 | 193 |
} |
193 | 194 |
} |
195 |
+ return b.commitContainer(state, container.ID, runConfigWithCommentCmd) |
|
196 |
+} |
|
197 |
+ |
|
198 |
+// For backwards compat, if there's just one info then use it as the |
|
199 |
+// cache look-up string, otherwise hash 'em all into one |
|
200 |
+func getSourceHashFromInfos(infos []copyInfo) string { |
|
201 |
+ if len(infos) == 1 { |
|
202 |
+ return infos[0].hash |
|
203 |
+ } |
|
204 |
+ var hashs []string |
|
205 |
+ for _, info := range infos { |
|
206 |
+ hashs = append(hashs, info.hash) |
|
207 |
+ } |
|
208 |
+ return hashStringSlice("multi", hashs) |
|
209 |
+} |
|
194 | 210 |
|
195 |
- return b.commitContainer(req.state, container.ID, runConfigWithCommentCmd) |
|
211 |
+func hashStringSlice(prefix string, slice []string) string { |
|
212 |
+ hasher := sha256.New() |
|
213 |
+ hasher.Write([]byte(strings.Join(slice, ","))) |
|
214 |
+ return prefix + ":" + hex.EncodeToString(hasher.Sum(nil)) |
|
196 | 215 |
} |
197 | 216 |
|
198 | 217 |
type runConfigModifier func(*container.Config) |
... | ... |
@@ -259,227 +186,6 @@ func getShell(c *container.Config) []string { |
259 | 259 |
return append([]string{}, c.Shell[:]...) |
260 | 260 |
} |
261 | 261 |
|
262 |
-func (b *Builder) download(srcURL string) (remote builder.Source, p string, err error) { |
|
263 |
- // get filename from URL |
|
264 |
- u, err := url.Parse(srcURL) |
|
265 |
- if err != nil { |
|
266 |
- return |
|
267 |
- } |
|
268 |
- path := filepath.FromSlash(u.Path) // Ensure in platform semantics |
|
269 |
- if strings.HasSuffix(path, string(os.PathSeparator)) { |
|
270 |
- path = path[:len(path)-1] |
|
271 |
- } |
|
272 |
- parts := strings.Split(path, string(os.PathSeparator)) |
|
273 |
- filename := parts[len(parts)-1] |
|
274 |
- if filename == "" { |
|
275 |
- err = fmt.Errorf("cannot determine filename from url: %s", u) |
|
276 |
- return |
|
277 |
- } |
|
278 |
- |
|
279 |
- // Initiate the download |
|
280 |
- resp, err := httputils.Download(srcURL) |
|
281 |
- if err != nil { |
|
282 |
- return |
|
283 |
- } |
|
284 |
- |
|
285 |
- // Prepare file in a tmp dir |
|
286 |
- tmpDir, err := ioutils.TempDir("", "docker-remote") |
|
287 |
- if err != nil { |
|
288 |
- return |
|
289 |
- } |
|
290 |
- defer func() { |
|
291 |
- if err != nil { |
|
292 |
- os.RemoveAll(tmpDir) |
|
293 |
- } |
|
294 |
- }() |
|
295 |
- tmpFileName := filepath.Join(tmpDir, filename) |
|
296 |
- tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) |
|
297 |
- if err != nil { |
|
298 |
- return |
|
299 |
- } |
|
300 |
- |
|
301 |
- progressOutput := streamformatter.NewJSONProgressOutput(b.Output, true) |
|
302 |
- progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") |
|
303 |
- // Download and dump result to tmp file |
|
304 |
- // TODO: add filehash directly |
|
305 |
- if _, err = io.Copy(tmpFile, progressReader); err != nil { |
|
306 |
- tmpFile.Close() |
|
307 |
- return |
|
308 |
- } |
|
309 |
- fmt.Fprintln(b.Stdout) |
|
310 |
- |
|
311 |
- // Set the mtime to the Last-Modified header value if present |
|
312 |
- // Otherwise just remove atime and mtime |
|
313 |
- mTime := time.Time{} |
|
314 |
- |
|
315 |
- lastMod := resp.Header.Get("Last-Modified") |
|
316 |
- if lastMod != "" { |
|
317 |
- // If we can't parse it then just let it default to 'zero' |
|
318 |
- // otherwise use the parsed time value |
|
319 |
- if parsedMTime, err := http.ParseTime(lastMod); err == nil { |
|
320 |
- mTime = parsedMTime |
|
321 |
- } |
|
322 |
- } |
|
323 |
- |
|
324 |
- tmpFile.Close() |
|
325 |
- |
|
326 |
- if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { |
|
327 |
- return |
|
328 |
- } |
|
329 |
- |
|
330 |
- lc, err := remotecontext.NewLazyContext(tmpDir) |
|
331 |
- if err != nil { |
|
332 |
- return |
|
333 |
- } |
|
334 |
- |
|
335 |
- return lc, filename, nil |
|
336 |
-} |
|
337 |
- |
|
338 |
-var windowsBlacklist = map[string]bool{ |
|
339 |
- "c:\\": true, |
|
340 |
- "c:\\windows": true, |
|
341 |
-} |
|
342 |
- |
|
343 |
-func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool, imageSource *imageMount) ([]copyInfo, error) { |
|
344 |
- |
|
345 |
- // Work in daemon-specific OS filepath semantics |
|
346 |
- origPath = filepath.FromSlash(origPath) |
|
347 |
- // validate windows paths from other images |
|
348 |
- if imageSource != nil && runtime.GOOS == "windows" { |
|
349 |
- p := strings.ToLower(filepath.Clean(origPath)) |
|
350 |
- if !filepath.IsAbs(p) { |
|
351 |
- if filepath.VolumeName(p) != "" { |
|
352 |
- if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths |
|
353 |
- p = p[:len(p)-1] |
|
354 |
- } |
|
355 |
- p += "\\" |
|
356 |
- } else { |
|
357 |
- p = filepath.Join("c:\\", p) |
|
358 |
- } |
|
359 |
- } |
|
360 |
- if _, blacklisted := windowsBlacklist[p]; blacklisted { |
|
361 |
- return nil, errors.New("copy from c:\\ or c:\\windows is not allowed on windows") |
|
362 |
- } |
|
363 |
- } |
|
364 |
- |
|
365 |
- if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { |
|
366 |
- origPath = origPath[1:] |
|
367 |
- } |
|
368 |
- origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) |
|
369 |
- |
|
370 |
- source := b.source |
|
371 |
- var err error |
|
372 |
- if imageSource != nil { |
|
373 |
- source, err = imageSource.Source() |
|
374 |
- if err != nil { |
|
375 |
- return nil, errors.Wrapf(err, "failed to copy") |
|
376 |
- } |
|
377 |
- } |
|
378 |
- |
|
379 |
- if source == nil { |
|
380 |
- return nil, errors.Errorf("No context given. Impossible to use %s", cmdName) |
|
381 |
- } |
|
382 |
- |
|
383 |
- // Deal with wildcards |
|
384 |
- if allowWildcards && containsWildcards(origPath) { |
|
385 |
- var copyInfos []copyInfo |
|
386 |
- if err := filepath.Walk(source.Root(), func(path string, info os.FileInfo, err error) error { |
|
387 |
- if err != nil { |
|
388 |
- return err |
|
389 |
- } |
|
390 |
- rel, err := remotecontext.Rel(source.Root(), path) |
|
391 |
- if err != nil { |
|
392 |
- return err |
|
393 |
- } |
|
394 |
- if rel == "." { |
|
395 |
- return nil |
|
396 |
- } |
|
397 |
- if match, _ := filepath.Match(origPath, rel); !match { |
|
398 |
- return nil |
|
399 |
- } |
|
400 |
- |
|
401 |
- // Note we set allowWildcards to false in case the name has |
|
402 |
- // a * in it |
|
403 |
- subInfos, err := b.calcCopyInfo(cmdName, rel, allowLocalDecompression, false, imageSource) |
|
404 |
- if err != nil { |
|
405 |
- return err |
|
406 |
- } |
|
407 |
- copyInfos = append(copyInfos, subInfos...) |
|
408 |
- return nil |
|
409 |
- }); err != nil { |
|
410 |
- return nil, err |
|
411 |
- } |
|
412 |
- return copyInfos, nil |
|
413 |
- } |
|
414 |
- |
|
415 |
- // Must be a dir or a file |
|
416 |
- hash, err := source.Hash(origPath) |
|
417 |
- if err != nil { |
|
418 |
- return nil, err |
|
419 |
- } |
|
420 |
- |
|
421 |
- fi, err := remotecontext.StatAt(source, origPath) |
|
422 |
- if err != nil { |
|
423 |
- return nil, err |
|
424 |
- } |
|
425 |
- |
|
426 |
- // TODO: remove, handle dirs in Hash() |
|
427 |
- copyInfos := []copyInfo{{root: source.Root(), path: origPath, hash: hash, decompress: allowLocalDecompression}} |
|
428 |
- |
|
429 |
- if imageSource != nil { |
|
430 |
- // fast-cache based on imageID |
|
431 |
- if h, ok := b.imageSources.getCache(imageSource.Image().ImageID(), origPath); ok { |
|
432 |
- copyInfos[0].hash = h.(string) |
|
433 |
- return copyInfos, nil |
|
434 |
- } |
|
435 |
- } |
|
436 |
- |
|
437 |
- // Deal with the single file case |
|
438 |
- if !fi.IsDir() { |
|
439 |
- copyInfos[0].hash = "file:" + copyInfos[0].hash |
|
440 |
- return copyInfos, nil |
|
441 |
- } |
|
442 |
- |
|
443 |
- fp, err := remotecontext.FullPath(source, origPath) |
|
444 |
- if err != nil { |
|
445 |
- return nil, err |
|
446 |
- } |
|
447 |
- // Must be a dir |
|
448 |
- var subfiles []string |
|
449 |
- err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error { |
|
450 |
- if err != nil { |
|
451 |
- return err |
|
452 |
- } |
|
453 |
- rel, err := remotecontext.Rel(source.Root(), path) |
|
454 |
- if err != nil { |
|
455 |
- return err |
|
456 |
- } |
|
457 |
- if rel == "." { |
|
458 |
- return nil |
|
459 |
- } |
|
460 |
- hash, err := source.Hash(rel) |
|
461 |
- if err != nil { |
|
462 |
- return nil |
|
463 |
- } |
|
464 |
- // we already checked handleHash above |
|
465 |
- subfiles = append(subfiles, hash) |
|
466 |
- return nil |
|
467 |
- }) |
|
468 |
- if err != nil { |
|
469 |
- return nil, err |
|
470 |
- } |
|
471 |
- |
|
472 |
- sort.Strings(subfiles) |
|
473 |
- hasher := sha256.New() |
|
474 |
- hasher.Write([]byte(strings.Join(subfiles, ","))) |
|
475 |
- copyInfos[0].hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) |
|
476 |
- if imageSource != nil { |
|
477 |
- b.imageSources.setCache(imageSource.Image().ImageID(), origPath, copyInfos[0].hash) |
|
478 |
- } |
|
479 |
- |
|
480 |
- return copyInfos, nil |
|
481 |
-} |
|
482 |
- |
|
483 | 262 |
// probeCache checks if cache match can be found for current build instruction. |
484 | 263 |
// If an image is found, probeCache returns `(true, nil)`. |
485 | 264 |
// If no image is found, it returns `(false, nil)`. |
... | ... |
@@ -7,6 +7,7 @@ import ( |
7 | 7 |
"strings" |
8 | 8 |
|
9 | 9 |
"github.com/docker/docker/pkg/system" |
10 |
+ "github.com/pkg/errors" |
|
10 | 11 |
) |
11 | 12 |
|
12 | 13 |
// normaliseDest normalises the destination of a COPY/ADD command in a |
... | ... |
@@ -64,3 +65,31 @@ func containsWildcards(name string) bool { |
64 | 64 |
} |
65 | 65 |
return false |
66 | 66 |
} |
67 |
+ |
|
68 |
+var pathBlacklist = map[string]bool{ |
|
69 |
+ "c:\\": true, |
|
70 |
+ "c:\\windows": true, |
|
71 |
+} |
|
72 |
+ |
|
73 |
+func validateCopySourcePath(imageSource *imageMount, origPath string) error { |
|
74 |
+ // validate windows paths from other images |
|
75 |
+ if imageSource == nil { |
|
76 |
+ return nil |
|
77 |
+ } |
|
78 |
+ origPath = filepath.FromSlash(origPath) |
|
79 |
+ p := strings.ToLower(filepath.Clean(origPath)) |
|
80 |
+ if !filepath.IsAbs(p) { |
|
81 |
+ if filepath.VolumeName(p) != "" { |
|
82 |
+ if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths |
|
83 |
+ p = p[:len(p)-1] |
|
84 |
+ } |
|
85 |
+ p += "\\" |
|
86 |
+ } else { |
|
87 |
+ p = filepath.Join("c:\\", p) |
|
88 |
+ } |
|
89 |
+ } |
|
90 |
+ if _, blacklisted := pathBlacklist[p]; blacklisted { |
|
91 |
+ return errors.New("copy from c:\\ or c:\\windows is not allowed on windows") |
|
92 |
+ } |
|
93 |
+ return nil |
|
94 |
+} |