| 1 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,827 +0,0 @@ |
| 1 |
-package docker |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "crypto/sha256" |
|
| 5 |
- "encoding/hex" |
|
| 6 |
- "encoding/json" |
|
| 7 |
- "errors" |
|
| 8 |
- "fmt" |
|
| 9 |
- "github.com/dotcloud/docker/archive" |
|
| 10 |
- "github.com/dotcloud/docker/nat" |
|
| 11 |
- "github.com/dotcloud/docker/registry" |
|
| 12 |
- "github.com/dotcloud/docker/runconfig" |
|
| 13 |
- "github.com/dotcloud/docker/runtime" |
|
| 14 |
- "github.com/dotcloud/docker/utils" |
|
| 15 |
- "io" |
|
| 16 |
- "io/ioutil" |
|
| 17 |
- "net/url" |
|
| 18 |
- "os" |
|
| 19 |
- "path" |
|
| 20 |
- "path/filepath" |
|
| 21 |
- "reflect" |
|
| 22 |
- "regexp" |
|
| 23 |
- "sort" |
|
| 24 |
- "strings" |
|
| 25 |
-) |
|
| 26 |
- |
|
| 27 |
-var ( |
|
| 28 |
- ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
|
|
| 29 |
-) |
|
| 30 |
- |
|
| 31 |
-type BuildFile interface {
|
|
| 32 |
- Build(io.Reader) (string, error) |
|
| 33 |
- CmdFrom(string) error |
|
| 34 |
- CmdRun(string) error |
|
| 35 |
-} |
|
| 36 |
- |
|
| 37 |
-type buildFile struct {
|
|
| 38 |
- runtime *runtime.Runtime |
|
| 39 |
- srv *Server |
|
| 40 |
- |
|
| 41 |
- image string |
|
| 42 |
- maintainer string |
|
| 43 |
- config *runconfig.Config |
|
| 44 |
- |
|
| 45 |
- contextPath string |
|
| 46 |
- context *utils.TarSum |
|
| 47 |
- |
|
| 48 |
- verbose bool |
|
| 49 |
- utilizeCache bool |
|
| 50 |
- rm bool |
|
| 51 |
- |
|
| 52 |
- authConfig *registry.AuthConfig |
|
| 53 |
- configFile *registry.ConfigFile |
|
| 54 |
- |
|
| 55 |
- tmpContainers map[string]struct{}
|
|
| 56 |
- tmpImages map[string]struct{}
|
|
| 57 |
- |
|
| 58 |
- outStream io.Writer |
|
| 59 |
- errStream io.Writer |
|
| 60 |
- |
|
| 61 |
- // Deprecated, original writer used for ImagePull. To be removed. |
|
| 62 |
- outOld io.Writer |
|
| 63 |
- sf *utils.StreamFormatter |
|
| 64 |
-} |
|
| 65 |
- |
|
| 66 |
-func (b *buildFile) clearTmp(containers map[string]struct{}) {
|
|
| 67 |
- for c := range containers {
|
|
| 68 |
- tmp := b.runtime.Get(c) |
|
| 69 |
- if err := b.runtime.Destroy(tmp); err != nil {
|
|
| 70 |
- fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) |
|
| 71 |
- } else {
|
|
| 72 |
- fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c)) |
|
| 73 |
- } |
|
| 74 |
- } |
|
| 75 |
-} |
|
| 76 |
- |
|
| 77 |
-func (b *buildFile) CmdFrom(name string) error {
|
|
| 78 |
- image, err := b.runtime.Repositories().LookupImage(name) |
|
| 79 |
- if err != nil {
|
|
| 80 |
- if b.runtime.Graph().IsNotExist(err) {
|
|
| 81 |
- remote, tag := utils.ParseRepositoryTag(name) |
|
| 82 |
- pullRegistryAuth := b.authConfig |
|
| 83 |
- if len(b.configFile.Configs) > 0 {
|
|
| 84 |
- // The request came with a full auth config file, we prefer to use that |
|
| 85 |
- endpoint, _, err := registry.ResolveRepositoryName(remote) |
|
| 86 |
- if err != nil {
|
|
| 87 |
- return err |
|
| 88 |
- } |
|
| 89 |
- resolvedAuth := b.configFile.ResolveAuthConfig(endpoint) |
|
| 90 |
- pullRegistryAuth = &resolvedAuth |
|
| 91 |
- } |
|
| 92 |
- job := b.srv.Eng.Job("pull", remote, tag)
|
|
| 93 |
- job.SetenvBool("json", b.sf.Json())
|
|
| 94 |
- job.SetenvBool("parallel", true)
|
|
| 95 |
- job.SetenvJson("authConfig", pullRegistryAuth)
|
|
| 96 |
- job.Stdout.Add(b.outOld) |
|
| 97 |
- if err := job.Run(); err != nil {
|
|
| 98 |
- return err |
|
| 99 |
- } |
|
| 100 |
- image, err = b.runtime.Repositories().LookupImage(name) |
|
| 101 |
- if err != nil {
|
|
| 102 |
- return err |
|
| 103 |
- } |
|
| 104 |
- } else {
|
|
| 105 |
- return err |
|
| 106 |
- } |
|
| 107 |
- } |
|
| 108 |
- b.image = image.ID |
|
| 109 |
- b.config = &runconfig.Config{}
|
|
| 110 |
- if image.Config != nil {
|
|
| 111 |
- b.config = image.Config |
|
| 112 |
- } |
|
| 113 |
- if b.config.Env == nil || len(b.config.Env) == 0 {
|
|
| 114 |
- b.config.Env = append(b.config.Env, "HOME=/", "PATH="+runtime.DefaultPathEnv) |
|
| 115 |
- } |
|
| 116 |
- // Process ONBUILD triggers if they exist |
|
| 117 |
- if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
|
|
| 118 |
- fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers) |
|
| 119 |
- } |
|
| 120 |
- for n, step := range b.config.OnBuild {
|
|
| 121 |
- splitStep := strings.Split(step, " ") |
|
| 122 |
- stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " ")) |
|
| 123 |
- switch stepInstruction {
|
|
| 124 |
- case "ONBUILD": |
|
| 125 |
- return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
|
|
| 126 |
- case "MAINTAINER", "FROM": |
|
| 127 |
- return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
|
|
| 128 |
- } |
|
| 129 |
- if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil {
|
|
| 130 |
- return err |
|
| 131 |
- } |
|
| 132 |
- } |
|
| 133 |
- b.config.OnBuild = []string{}
|
|
| 134 |
- return nil |
|
| 135 |
-} |
|
| 136 |
- |
|
| 137 |
-// The ONBUILD command declares a build instruction to be executed in any future build |
|
| 138 |
-// using the current image as a base. |
|
| 139 |
-func (b *buildFile) CmdOnbuild(trigger string) error {
|
|
| 140 |
- splitTrigger := strings.Split(trigger, " ") |
|
| 141 |
- triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " ")) |
|
| 142 |
- switch triggerInstruction {
|
|
| 143 |
- case "ONBUILD": |
|
| 144 |
- return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
|
| 145 |
- case "MAINTAINER", "FROM": |
|
| 146 |
- return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
|
|
| 147 |
- } |
|
| 148 |
- b.config.OnBuild = append(b.config.OnBuild, trigger) |
|
| 149 |
- return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
|
|
| 150 |
-} |
|
| 151 |
- |
|
| 152 |
-func (b *buildFile) CmdMaintainer(name string) error {
|
|
| 153 |
- b.maintainer = name |
|
| 154 |
- return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
|
|
| 155 |
-} |
|
| 156 |
- |
|
| 157 |
-// probeCache checks to see if image-caching is enabled (`b.utilizeCache`) |
|
| 158 |
-// and if so attempts to look up the current `b.image` and `b.config` pair |
|
| 159 |
-// in the current server `b.srv`. If an image is found, probeCache returns |
|
| 160 |
-// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there |
|
| 161 |
-// is any error, it returns `(false, err)`. |
|
| 162 |
-func (b *buildFile) probeCache() (bool, error) {
|
|
| 163 |
- if b.utilizeCache {
|
|
| 164 |
- if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
|
| 165 |
- return false, err |
|
| 166 |
- } else if cache != nil {
|
|
| 167 |
- fmt.Fprintf(b.outStream, " ---> Using cache\n") |
|
| 168 |
- utils.Debugf("[BUILDER] Use cached version")
|
|
| 169 |
- b.image = cache.ID |
|
| 170 |
- return true, nil |
|
| 171 |
- } else {
|
|
| 172 |
- utils.Debugf("[BUILDER] Cache miss")
|
|
| 173 |
- } |
|
| 174 |
- } |
|
| 175 |
- return false, nil |
|
| 176 |
-} |
|
| 177 |
- |
|
| 178 |
-func (b *buildFile) CmdRun(args string) error {
|
|
| 179 |
- if b.image == "" {
|
|
| 180 |
- return fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
| 181 |
- } |
|
| 182 |
- config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil)
|
|
| 183 |
- if err != nil {
|
|
| 184 |
- return err |
|
| 185 |
- } |
|
| 186 |
- |
|
| 187 |
- cmd := b.config.Cmd |
|
| 188 |
- b.config.Cmd = nil |
|
| 189 |
- runconfig.Merge(b.config, config) |
|
| 190 |
- |
|
| 191 |
- defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
|
|
| 192 |
- |
|
| 193 |
- utils.Debugf("Command to be executed: %v", b.config.Cmd)
|
|
| 194 |
- |
|
| 195 |
- hit, err := b.probeCache() |
|
| 196 |
- if err != nil {
|
|
| 197 |
- return err |
|
| 198 |
- } |
|
| 199 |
- if hit {
|
|
| 200 |
- return nil |
|
| 201 |
- } |
|
| 202 |
- |
|
| 203 |
- c, err := b.create() |
|
| 204 |
- if err != nil {
|
|
| 205 |
- return err |
|
| 206 |
- } |
|
| 207 |
- // Ensure that we keep the container mounted until the commit |
|
| 208 |
- // to avoid unmounting and then mounting directly again |
|
| 209 |
- c.Mount() |
|
| 210 |
- defer c.Unmount() |
|
| 211 |
- |
|
| 212 |
- err = b.run(c) |
|
| 213 |
- if err != nil {
|
|
| 214 |
- return err |
|
| 215 |
- } |
|
| 216 |
- if err := b.commit(c.ID, cmd, "run"); err != nil {
|
|
| 217 |
- return err |
|
| 218 |
- } |
|
| 219 |
- |
|
| 220 |
- return nil |
|
| 221 |
-} |
|
| 222 |
- |
|
| 223 |
-func (b *buildFile) FindEnvKey(key string) int {
|
|
| 224 |
- for k, envVar := range b.config.Env {
|
|
| 225 |
- envParts := strings.SplitN(envVar, "=", 2) |
|
| 226 |
- if key == envParts[0] {
|
|
| 227 |
- return k |
|
| 228 |
- } |
|
| 229 |
- } |
|
| 230 |
- return -1 |
|
| 231 |
-} |
|
| 232 |
- |
|
| 233 |
-func (b *buildFile) ReplaceEnvMatches(value string) (string, error) {
|
|
| 234 |
- exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
|
|
| 235 |
- if err != nil {
|
|
| 236 |
- return value, err |
|
| 237 |
- } |
|
| 238 |
- matches := exp.FindAllString(value, -1) |
|
| 239 |
- for _, match := range matches {
|
|
| 240 |
- match = match[strings.Index(match, "$"):] |
|
| 241 |
- matchKey := strings.Trim(match, "${}")
|
|
| 242 |
- |
|
| 243 |
- for _, envVar := range b.config.Env {
|
|
| 244 |
- envParts := strings.SplitN(envVar, "=", 2) |
|
| 245 |
- envKey := envParts[0] |
|
| 246 |
- envValue := envParts[1] |
|
| 247 |
- |
|
| 248 |
- if envKey == matchKey {
|
|
| 249 |
- value = strings.Replace(value, match, envValue, -1) |
|
| 250 |
- break |
|
| 251 |
- } |
|
| 252 |
- } |
|
| 253 |
- } |
|
| 254 |
- return value, nil |
|
| 255 |
-} |
|
| 256 |
- |
|
| 257 |
-func (b *buildFile) CmdEnv(args string) error {
|
|
| 258 |
- tmp := strings.SplitN(args, " ", 2) |
|
| 259 |
- if len(tmp) != 2 {
|
|
| 260 |
- return fmt.Errorf("Invalid ENV format")
|
|
| 261 |
- } |
|
| 262 |
- key := strings.Trim(tmp[0], " \t") |
|
| 263 |
- value := strings.Trim(tmp[1], " \t") |
|
| 264 |
- |
|
| 265 |
- envKey := b.FindEnvKey(key) |
|
| 266 |
- replacedValue, err := b.ReplaceEnvMatches(value) |
|
| 267 |
- if err != nil {
|
|
| 268 |
- return err |
|
| 269 |
- } |
|
| 270 |
- replacedVar := fmt.Sprintf("%s=%s", key, replacedValue)
|
|
| 271 |
- |
|
| 272 |
- if envKey >= 0 {
|
|
| 273 |
- b.config.Env[envKey] = replacedVar |
|
| 274 |
- } else {
|
|
| 275 |
- b.config.Env = append(b.config.Env, replacedVar) |
|
| 276 |
- } |
|
| 277 |
- return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar))
|
|
| 278 |
-} |
|
| 279 |
- |
|
| 280 |
-func (b *buildFile) buildCmdFromJson(args string) []string {
|
|
| 281 |
- var cmd []string |
|
| 282 |
- if err := json.Unmarshal([]byte(args), &cmd); err != nil {
|
|
| 283 |
- utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
|
|
| 284 |
- cmd = []string{"/bin/sh", "-c", args}
|
|
| 285 |
- } |
|
| 286 |
- return cmd |
|
| 287 |
-} |
|
| 288 |
- |
|
| 289 |
-func (b *buildFile) CmdCmd(args string) error {
|
|
| 290 |
- cmd := b.buildCmdFromJson(args) |
|
| 291 |
- b.config.Cmd = cmd |
|
| 292 |
- if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
|
| 293 |
- return err |
|
| 294 |
- } |
|
| 295 |
- return nil |
|
| 296 |
-} |
|
| 297 |
- |
|
| 298 |
-func (b *buildFile) CmdEntrypoint(args string) error {
|
|
| 299 |
- entrypoint := b.buildCmdFromJson(args) |
|
| 300 |
- b.config.Entrypoint = entrypoint |
|
| 301 |
- if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
|
|
| 302 |
- return err |
|
| 303 |
- } |
|
| 304 |
- return nil |
|
| 305 |
-} |
|
| 306 |
- |
|
| 307 |
-func (b *buildFile) CmdExpose(args string) error {
|
|
| 308 |
- portsTab := strings.Split(args, " ") |
|
| 309 |
- |
|
| 310 |
- if b.config.ExposedPorts == nil {
|
|
| 311 |
- b.config.ExposedPorts = make(nat.PortSet) |
|
| 312 |
- } |
|
| 313 |
- ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...)) |
|
| 314 |
- if err != nil {
|
|
| 315 |
- return err |
|
| 316 |
- } |
|
| 317 |
- for port := range ports {
|
|
| 318 |
- if _, exists := b.config.ExposedPorts[port]; !exists {
|
|
| 319 |
- b.config.ExposedPorts[port] = struct{}{}
|
|
| 320 |
- } |
|
| 321 |
- } |
|
| 322 |
- b.config.PortSpecs = nil |
|
| 323 |
- |
|
| 324 |
- return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
|
|
| 325 |
-} |
|
| 326 |
- |
|
| 327 |
-func (b *buildFile) CmdUser(args string) error {
|
|
| 328 |
- b.config.User = args |
|
| 329 |
- return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
|
|
| 330 |
-} |
|
| 331 |
- |
|
| 332 |
-func (b *buildFile) CmdInsert(args string) error {
|
|
| 333 |
- return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
|
|
| 334 |
-} |
|
| 335 |
- |
|
| 336 |
-func (b *buildFile) CmdCopy(args string) error {
|
|
| 337 |
- return fmt.Errorf("COPY has been deprecated. Please use ADD instead")
|
|
| 338 |
-} |
|
| 339 |
- |
|
| 340 |
-func (b *buildFile) CmdWorkdir(workdir string) error {
|
|
| 341 |
- b.config.WorkingDir = workdir |
|
| 342 |
- return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
|
| 343 |
-} |
|
| 344 |
- |
|
| 345 |
-func (b *buildFile) CmdVolume(args string) error {
|
|
| 346 |
- if args == "" {
|
|
| 347 |
- return fmt.Errorf("Volume cannot be empty")
|
|
| 348 |
- } |
|
| 349 |
- |
|
| 350 |
- var volume []string |
|
| 351 |
- if err := json.Unmarshal([]byte(args), &volume); err != nil {
|
|
| 352 |
- volume = []string{args}
|
|
| 353 |
- } |
|
| 354 |
- if b.config.Volumes == nil {
|
|
| 355 |
- b.config.Volumes = map[string]struct{}{}
|
|
| 356 |
- } |
|
| 357 |
- for _, v := range volume {
|
|
| 358 |
- b.config.Volumes[v] = struct{}{}
|
|
| 359 |
- } |
|
| 360 |
- if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
|
|
| 361 |
- return err |
|
| 362 |
- } |
|
| 363 |
- return nil |
|
| 364 |
-} |
|
| 365 |
- |
|
| 366 |
-func (b *buildFile) checkPathForAddition(orig string) error {
|
|
| 367 |
- origPath := path.Join(b.contextPath, orig) |
|
| 368 |
- if p, err := filepath.EvalSymlinks(origPath); err != nil {
|
|
| 369 |
- if os.IsNotExist(err) {
|
|
| 370 |
- return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 371 |
- } |
|
| 372 |
- return err |
|
| 373 |
- } else {
|
|
| 374 |
- origPath = p |
|
| 375 |
- } |
|
| 376 |
- if !strings.HasPrefix(origPath, b.contextPath) {
|
|
| 377 |
- return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
|
| 378 |
- } |
|
| 379 |
- _, err := os.Stat(origPath) |
|
| 380 |
- if err != nil {
|
|
| 381 |
- if os.IsNotExist(err) {
|
|
| 382 |
- return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 383 |
- } |
|
| 384 |
- return err |
|
| 385 |
- } |
|
| 386 |
- return nil |
|
| 387 |
-} |
|
| 388 |
- |
|
| 389 |
-func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error {
|
|
| 390 |
- var ( |
|
| 391 |
- origPath = path.Join(b.contextPath, orig) |
|
| 392 |
- destPath = path.Join(container.RootfsPath(), dest) |
|
| 393 |
- ) |
|
| 394 |
- // Preserve the trailing '/' |
|
| 395 |
- if strings.HasSuffix(dest, "/") {
|
|
| 396 |
- destPath = destPath + "/" |
|
| 397 |
- } |
|
| 398 |
- fi, err := os.Stat(origPath) |
|
| 399 |
- if err != nil {
|
|
| 400 |
- if os.IsNotExist(err) {
|
|
| 401 |
- return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 402 |
- } |
|
| 403 |
- return err |
|
| 404 |
- } |
|
| 405 |
- |
|
| 406 |
- if fi.IsDir() {
|
|
| 407 |
- if err := archive.CopyWithTar(origPath, destPath); err != nil {
|
|
| 408 |
- return err |
|
| 409 |
- } |
|
| 410 |
- return nil |
|
| 411 |
- } |
|
| 412 |
- |
|
| 413 |
- // First try to unpack the source as an archive |
|
| 414 |
- // to support the untar feature we need to clean up the path a little bit |
|
| 415 |
- // because tar is very forgiving. First we need to strip off the archive's |
|
| 416 |
- // filename from the path but this is only added if it does not end in / . |
|
| 417 |
- tarDest := destPath |
|
| 418 |
- if strings.HasSuffix(tarDest, "/") {
|
|
| 419 |
- tarDest = filepath.Dir(destPath) |
|
| 420 |
- } |
|
| 421 |
- |
|
| 422 |
- // If we are adding a remote file, do not try to untar it |
|
| 423 |
- if !remote {
|
|
| 424 |
- // try to successfully untar the orig |
|
| 425 |
- if err := archive.UntarPath(origPath, tarDest); err == nil {
|
|
| 426 |
- return nil |
|
| 427 |
- } |
|
| 428 |
- utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err)
|
|
| 429 |
- } |
|
| 430 |
- |
|
| 431 |
- // If that fails, just copy it as a regular file |
|
| 432 |
- // but do not use all the magic path handling for the tar path |
|
| 433 |
- if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
|
|
| 434 |
- return err |
|
| 435 |
- } |
|
| 436 |
- if err := archive.CopyWithTar(origPath, destPath); err != nil {
|
|
| 437 |
- return err |
|
| 438 |
- } |
|
| 439 |
- return nil |
|
| 440 |
-} |
|
| 441 |
- |
|
| 442 |
-func (b *buildFile) CmdAdd(args string) error {
|
|
| 443 |
- if b.context == nil {
|
|
| 444 |
- return fmt.Errorf("No context given. Impossible to use ADD")
|
|
| 445 |
- } |
|
| 446 |
- tmp := strings.SplitN(args, " ", 2) |
|
| 447 |
- if len(tmp) != 2 {
|
|
| 448 |
- return fmt.Errorf("Invalid ADD format")
|
|
| 449 |
- } |
|
| 450 |
- |
|
| 451 |
- orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) |
|
| 452 |
- if err != nil {
|
|
| 453 |
- return err |
|
| 454 |
- } |
|
| 455 |
- |
|
| 456 |
- dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) |
|
| 457 |
- if err != nil {
|
|
| 458 |
- return err |
|
| 459 |
- } |
|
| 460 |
- |
|
| 461 |
- cmd := b.config.Cmd |
|
| 462 |
- b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)}
|
|
| 463 |
- b.config.Image = b.image |
|
| 464 |
- |
|
| 465 |
- var ( |
|
| 466 |
- origPath = orig |
|
| 467 |
- destPath = dest |
|
| 468 |
- remoteHash string |
|
| 469 |
- isRemote bool |
|
| 470 |
- ) |
|
| 471 |
- |
|
| 472 |
- if utils.IsURL(orig) {
|
|
| 473 |
- isRemote = true |
|
| 474 |
- resp, err := utils.Download(orig) |
|
| 475 |
- if err != nil {
|
|
| 476 |
- return err |
|
| 477 |
- } |
|
| 478 |
- tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") |
|
| 479 |
- if err != nil {
|
|
| 480 |
- return err |
|
| 481 |
- } |
|
| 482 |
- tmpFileName := path.Join(tmpDirName, "tmp") |
|
| 483 |
- tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) |
|
| 484 |
- if err != nil {
|
|
| 485 |
- return err |
|
| 486 |
- } |
|
| 487 |
- defer os.RemoveAll(tmpDirName) |
|
| 488 |
- if _, err = io.Copy(tmpFile, resp.Body); err != nil {
|
|
| 489 |
- tmpFile.Close() |
|
| 490 |
- return err |
|
| 491 |
- } |
|
| 492 |
- origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) |
|
| 493 |
- tmpFile.Close() |
|
| 494 |
- |
|
| 495 |
- // Process the checksum |
|
| 496 |
- r, err := archive.Tar(tmpFileName, archive.Uncompressed) |
|
| 497 |
- if err != nil {
|
|
| 498 |
- return err |
|
| 499 |
- } |
|
| 500 |
- tarSum := utils.TarSum{Reader: r, DisableCompression: true}
|
|
| 501 |
- remoteHash = tarSum.Sum(nil) |
|
| 502 |
- r.Close() |
|
| 503 |
- |
|
| 504 |
- // If the destination is a directory, figure out the filename. |
|
| 505 |
- if strings.HasSuffix(dest, "/") {
|
|
| 506 |
- u, err := url.Parse(orig) |
|
| 507 |
- if err != nil {
|
|
| 508 |
- return err |
|
| 509 |
- } |
|
| 510 |
- path := u.Path |
|
| 511 |
- if strings.HasSuffix(path, "/") {
|
|
| 512 |
- path = path[:len(path)-1] |
|
| 513 |
- } |
|
| 514 |
- parts := strings.Split(path, "/") |
|
| 515 |
- filename := parts[len(parts)-1] |
|
| 516 |
- if filename == "" {
|
|
| 517 |
- return fmt.Errorf("cannot determine filename from url: %s", u)
|
|
| 518 |
- } |
|
| 519 |
- destPath = dest + filename |
|
| 520 |
- } |
|
| 521 |
- } |
|
| 522 |
- |
|
| 523 |
- if err := b.checkPathForAddition(origPath); err != nil {
|
|
| 524 |
- return err |
|
| 525 |
- } |
|
| 526 |
- |
|
| 527 |
- // Hash path and check the cache |
|
| 528 |
- if b.utilizeCache {
|
|
| 529 |
- var ( |
|
| 530 |
- hash string |
|
| 531 |
- sums = b.context.GetSums() |
|
| 532 |
- ) |
|
| 533 |
- |
|
| 534 |
- if remoteHash != "" {
|
|
| 535 |
- hash = remoteHash |
|
| 536 |
- } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
|
|
| 537 |
- return err |
|
| 538 |
- } else if fi.IsDir() {
|
|
| 539 |
- var subfiles []string |
|
| 540 |
- for file, sum := range sums {
|
|
| 541 |
- absFile := path.Join(b.contextPath, file) |
|
| 542 |
- absOrigPath := path.Join(b.contextPath, origPath) |
|
| 543 |
- if strings.HasPrefix(absFile, absOrigPath) {
|
|
| 544 |
- subfiles = append(subfiles, sum) |
|
| 545 |
- } |
|
| 546 |
- } |
|
| 547 |
- sort.Strings(subfiles) |
|
| 548 |
- hasher := sha256.New() |
|
| 549 |
- hasher.Write([]byte(strings.Join(subfiles, ","))) |
|
| 550 |
- hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) |
|
| 551 |
- } else {
|
|
| 552 |
- if origPath[0] == '/' && len(origPath) > 1 {
|
|
| 553 |
- origPath = origPath[1:] |
|
| 554 |
- } |
|
| 555 |
- origPath = strings.TrimPrefix(origPath, "./") |
|
| 556 |
- if h, ok := sums[origPath]; ok {
|
|
| 557 |
- hash = "file:" + h |
|
| 558 |
- } |
|
| 559 |
- } |
|
| 560 |
- b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)}
|
|
| 561 |
- hit, err := b.probeCache() |
|
| 562 |
- if err != nil {
|
|
| 563 |
- return err |
|
| 564 |
- } |
|
| 565 |
- // If we do not have a hash, never use the cache |
|
| 566 |
- if hit && hash != "" {
|
|
| 567 |
- return nil |
|
| 568 |
- } |
|
| 569 |
- } |
|
| 570 |
- |
|
| 571 |
- // Create the container and start it |
|
| 572 |
- container, _, err := b.runtime.Create(b.config, "") |
|
| 573 |
- if err != nil {
|
|
| 574 |
- return err |
|
| 575 |
- } |
|
| 576 |
- b.tmpContainers[container.ID] = struct{}{}
|
|
| 577 |
- |
|
| 578 |
- if err := container.Mount(); err != nil {
|
|
| 579 |
- return err |
|
| 580 |
- } |
|
| 581 |
- defer container.Unmount() |
|
| 582 |
- |
|
| 583 |
- if err := b.addContext(container, origPath, destPath, isRemote); err != nil {
|
|
| 584 |
- return err |
|
| 585 |
- } |
|
| 586 |
- |
|
| 587 |
- if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil {
|
|
| 588 |
- return err |
|
| 589 |
- } |
|
| 590 |
- b.config.Cmd = cmd |
|
| 591 |
- return nil |
|
| 592 |
-} |
|
| 593 |
- |
|
| 594 |
-type StdoutFormater struct {
|
|
| 595 |
- io.Writer |
|
| 596 |
- *utils.StreamFormatter |
|
| 597 |
-} |
|
| 598 |
- |
|
| 599 |
-func (sf *StdoutFormater) Write(buf []byte) (int, error) {
|
|
| 600 |
- formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) |
|
| 601 |
- n, err := sf.Writer.Write(formattedBuf) |
|
| 602 |
- if n != len(formattedBuf) {
|
|
| 603 |
- return n, io.ErrShortWrite |
|
| 604 |
- } |
|
| 605 |
- return len(buf), err |
|
| 606 |
-} |
|
| 607 |
- |
|
| 608 |
-type StderrFormater struct {
|
|
| 609 |
- io.Writer |
|
| 610 |
- *utils.StreamFormatter |
|
| 611 |
-} |
|
| 612 |
- |
|
| 613 |
-func (sf *StderrFormater) Write(buf []byte) (int, error) {
|
|
| 614 |
- formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m")
|
|
| 615 |
- n, err := sf.Writer.Write(formattedBuf) |
|
| 616 |
- if n != len(formattedBuf) {
|
|
| 617 |
- return n, io.ErrShortWrite |
|
| 618 |
- } |
|
| 619 |
- return len(buf), err |
|
| 620 |
-} |
|
| 621 |
- |
|
| 622 |
-func (b *buildFile) create() (*runtime.Container, error) {
|
|
| 623 |
- if b.image == "" {
|
|
| 624 |
- return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
| 625 |
- } |
|
| 626 |
- b.config.Image = b.image |
|
| 627 |
- |
|
| 628 |
- // Create the container and start it |
|
| 629 |
- c, _, err := b.runtime.Create(b.config, "") |
|
| 630 |
- if err != nil {
|
|
| 631 |
- return nil, err |
|
| 632 |
- } |
|
| 633 |
- b.tmpContainers[c.ID] = struct{}{}
|
|
| 634 |
- fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) |
|
| 635 |
- |
|
| 636 |
- // override the entry point that may have been picked up from the base image |
|
| 637 |
- c.Path = b.config.Cmd[0] |
|
| 638 |
- c.Args = b.config.Cmd[1:] |
|
| 639 |
- |
|
| 640 |
- return c, nil |
|
| 641 |
-} |
|
| 642 |
- |
|
| 643 |
-func (b *buildFile) run(c *runtime.Container) error {
|
|
| 644 |
- var errCh chan error |
|
| 645 |
- |
|
| 646 |
- if b.verbose {
|
|
| 647 |
- errCh = utils.Go(func() error {
|
|
| 648 |
- return <-c.Attach(nil, nil, b.outStream, b.errStream) |
|
| 649 |
- }) |
|
| 650 |
- } |
|
| 651 |
- |
|
| 652 |
- //start the container |
|
| 653 |
- if err := c.Start(); err != nil {
|
|
| 654 |
- return err |
|
| 655 |
- } |
|
| 656 |
- |
|
| 657 |
- if errCh != nil {
|
|
| 658 |
- if err := <-errCh; err != nil {
|
|
| 659 |
- return err |
|
| 660 |
- } |
|
| 661 |
- } |
|
| 662 |
- |
|
| 663 |
- // Wait for it to finish |
|
| 664 |
- if ret := c.Wait(); ret != 0 {
|
|
| 665 |
- err := &utils.JSONError{
|
|
| 666 |
- Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
|
|
| 667 |
- Code: ret, |
|
| 668 |
- } |
|
| 669 |
- return err |
|
| 670 |
- } |
|
| 671 |
- |
|
| 672 |
- return nil |
|
| 673 |
-} |
|
| 674 |
- |
|
| 675 |
-// Commit the container <id> with the autorun command <autoCmd> |
|
| 676 |
-func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
|
| 677 |
- if b.image == "" {
|
|
| 678 |
- return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
|
| 679 |
- } |
|
| 680 |
- b.config.Image = b.image |
|
| 681 |
- if id == "" {
|
|
| 682 |
- cmd := b.config.Cmd |
|
| 683 |
- b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
|
|
| 684 |
- defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
|
|
| 685 |
- |
|
| 686 |
- hit, err := b.probeCache() |
|
| 687 |
- if err != nil {
|
|
| 688 |
- return err |
|
| 689 |
- } |
|
| 690 |
- if hit {
|
|
| 691 |
- return nil |
|
| 692 |
- } |
|
| 693 |
- |
|
| 694 |
- container, warnings, err := b.runtime.Create(b.config, "") |
|
| 695 |
- if err != nil {
|
|
| 696 |
- return err |
|
| 697 |
- } |
|
| 698 |
- for _, warning := range warnings {
|
|
| 699 |
- fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning) |
|
| 700 |
- } |
|
| 701 |
- b.tmpContainers[container.ID] = struct{}{}
|
|
| 702 |
- fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) |
|
| 703 |
- id = container.ID |
|
| 704 |
- |
|
| 705 |
- if err := container.Mount(); err != nil {
|
|
| 706 |
- return err |
|
| 707 |
- } |
|
| 708 |
- defer container.Unmount() |
|
| 709 |
- } |
|
| 710 |
- container := b.runtime.Get(id) |
|
| 711 |
- if container == nil {
|
|
| 712 |
- return fmt.Errorf("An error occured while creating the container")
|
|
| 713 |
- } |
|
| 714 |
- |
|
| 715 |
- // Note: Actually copy the struct |
|
| 716 |
- autoConfig := *b.config |
|
| 717 |
- autoConfig.Cmd = autoCmd |
|
| 718 |
- // Commit the container |
|
| 719 |
- image, err := b.runtime.Commit(container, "", "", "", b.maintainer, &autoConfig) |
|
| 720 |
- if err != nil {
|
|
| 721 |
- return err |
|
| 722 |
- } |
|
| 723 |
- b.tmpImages[image.ID] = struct{}{}
|
|
| 724 |
- b.image = image.ID |
|
| 725 |
- return nil |
|
| 726 |
-} |
|
| 727 |
- |
|
| 728 |
-// Long lines can be split with a backslash |
|
| 729 |
-var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`) |
|
| 730 |
- |
|
| 731 |
-func (b *buildFile) Build(context io.Reader) (string, error) {
|
|
| 732 |
- tmpdirPath, err := ioutil.TempDir("", "docker-build")
|
|
| 733 |
- if err != nil {
|
|
| 734 |
- return "", err |
|
| 735 |
- } |
|
| 736 |
- |
|
| 737 |
- decompressedStream, err := archive.DecompressStream(context) |
|
| 738 |
- if err != nil {
|
|
| 739 |
- return "", err |
|
| 740 |
- } |
|
| 741 |
- |
|
| 742 |
- b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true}
|
|
| 743 |
- if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
|
|
| 744 |
- return "", err |
|
| 745 |
- } |
|
| 746 |
- defer os.RemoveAll(tmpdirPath) |
|
| 747 |
- |
|
| 748 |
- b.contextPath = tmpdirPath |
|
| 749 |
- filename := path.Join(tmpdirPath, "Dockerfile") |
|
| 750 |
- if _, err := os.Stat(filename); os.IsNotExist(err) {
|
|
| 751 |
- return "", fmt.Errorf("Can't build a directory with no Dockerfile")
|
|
| 752 |
- } |
|
| 753 |
- fileBytes, err := ioutil.ReadFile(filename) |
|
| 754 |
- if err != nil {
|
|
| 755 |
- return "", err |
|
| 756 |
- } |
|
| 757 |
- if len(fileBytes) == 0 {
|
|
| 758 |
- return "", ErrDockerfileEmpty |
|
| 759 |
- } |
|
| 760 |
- dockerfile := string(fileBytes) |
|
| 761 |
- dockerfile = lineContinuation.ReplaceAllString(dockerfile, "") |
|
| 762 |
- stepN := 0 |
|
| 763 |
- for _, line := range strings.Split(dockerfile, "\n") {
|
|
| 764 |
- line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n") |
|
| 765 |
- // Skip comments and empty line |
|
| 766 |
- if len(line) == 0 || line[0] == '#' {
|
|
| 767 |
- continue |
|
| 768 |
- } |
|
| 769 |
- if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil {
|
|
| 770 |
- return "", err |
|
| 771 |
- } |
|
| 772 |
- stepN += 1 |
|
| 773 |
- |
|
| 774 |
- } |
|
| 775 |
- if b.image != "" {
|
|
| 776 |
- fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image)) |
|
| 777 |
- if b.rm {
|
|
| 778 |
- b.clearTmp(b.tmpContainers) |
|
| 779 |
- } |
|
| 780 |
- return b.image, nil |
|
| 781 |
- } |
|
| 782 |
- return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
|
|
| 783 |
-} |
|
| 784 |
- |
|
| 785 |
-// BuildStep parses a single build step from `instruction` and executes it in the current context. |
|
| 786 |
-func (b *buildFile) BuildStep(name, expression string) error {
|
|
| 787 |
- fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression) |
|
| 788 |
- tmp := strings.SplitN(expression, " ", 2) |
|
| 789 |
- if len(tmp) != 2 {
|
|
| 790 |
- return fmt.Errorf("Invalid Dockerfile format")
|
|
| 791 |
- } |
|
| 792 |
- instruction := strings.ToLower(strings.Trim(tmp[0], " ")) |
|
| 793 |
- arguments := strings.Trim(tmp[1], " ") |
|
| 794 |
- |
|
| 795 |
- method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
|
|
| 796 |
- if !exists {
|
|
| 797 |
- fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction)) |
|
| 798 |
- return nil |
|
| 799 |
- } |
|
| 800 |
- |
|
| 801 |
- ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
|
|
| 802 |
- if ret != nil {
|
|
| 803 |
- return ret.(error) |
|
| 804 |
- } |
|
| 805 |
- |
|
| 806 |
- fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image)) |
|
| 807 |
- return nil |
|
| 808 |
-} |
|
| 809 |
- |
|
| 810 |
-func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
|
|
| 811 |
- return &buildFile{
|
|
| 812 |
- runtime: srv.runtime, |
|
| 813 |
- srv: srv, |
|
| 814 |
- config: &runconfig.Config{},
|
|
| 815 |
- outStream: outStream, |
|
| 816 |
- errStream: errStream, |
|
| 817 |
- tmpContainers: make(map[string]struct{}),
|
|
| 818 |
- tmpImages: make(map[string]struct{}),
|
|
| 819 |
- verbose: verbose, |
|
| 820 |
- utilizeCache: utilizeCache, |
|
| 821 |
- rm: rm, |
|
| 822 |
- sf: sf, |
|
| 823 |
- authConfig: auth, |
|
| 824 |
- configFile: authConfigFile, |
|
| 825 |
- outOld: outOld, |
|
| 826 |
- } |
|
| 827 |
-} |
| ... | ... |
@@ -3,9 +3,9 @@ package builtins |
| 3 | 3 |
import ( |
| 4 | 4 |
"github.com/dotcloud/docker/engine" |
| 5 | 5 |
|
| 6 |
- "github.com/dotcloud/docker" |
|
| 7 | 6 |
"github.com/dotcloud/docker/api" |
| 8 | 7 |
"github.com/dotcloud/docker/networkdriver/lxc" |
| 8 |
+ "github.com/dotcloud/docker/server" |
|
| 9 | 9 |
) |
| 10 | 10 |
|
| 11 | 11 |
func Register(eng *engine.Engine) {
|
| ... | ... |
@@ -34,6 +34,6 @@ func remote(eng *engine.Engine) {
|
| 34 | 34 |
// These components should be broken off into plugins of their own. |
| 35 | 35 |
// |
| 36 | 36 |
func daemon(eng *engine.Engine) {
|
| 37 |
- eng.Register("initserver", docker.InitServer)
|
|
| 37 |
+ eng.Register("initserver", server.InitServer)
|
|
| 38 | 38 |
eng.Register("init_networkdriver", lxc.InitDriver)
|
| 39 | 39 |
} |
| 40 | 40 |
deleted file mode 100644 |
| ... | ... |
@@ -1,2426 +0,0 @@ |
| 1 |
-package docker |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "encoding/json" |
|
| 5 |
- "fmt" |
|
| 6 |
- "github.com/dotcloud/docker/archive" |
|
| 7 |
- "github.com/dotcloud/docker/daemonconfig" |
|
| 8 |
- "github.com/dotcloud/docker/dockerversion" |
|
| 9 |
- "github.com/dotcloud/docker/engine" |
|
| 10 |
- "github.com/dotcloud/docker/graph" |
|
| 11 |
- "github.com/dotcloud/docker/image" |
|
| 12 |
- "github.com/dotcloud/docker/pkg/graphdb" |
|
| 13 |
- "github.com/dotcloud/docker/pkg/signal" |
|
| 14 |
- "github.com/dotcloud/docker/registry" |
|
| 15 |
- "github.com/dotcloud/docker/runconfig" |
|
| 16 |
- "github.com/dotcloud/docker/runtime" |
|
| 17 |
- "github.com/dotcloud/docker/utils" |
|
| 18 |
- "io" |
|
| 19 |
- "io/ioutil" |
|
| 20 |
- "log" |
|
| 21 |
- "net/http" |
|
| 22 |
- "net/url" |
|
| 23 |
- "os" |
|
| 24 |
- "os/exec" |
|
| 25 |
- gosignal "os/signal" |
|
| 26 |
- "path" |
|
| 27 |
- "path/filepath" |
|
| 28 |
- goruntime "runtime" |
|
| 29 |
- "strconv" |
|
| 30 |
- "strings" |
|
| 31 |
- "sync" |
|
| 32 |
- "syscall" |
|
| 33 |
- "time" |
|
| 34 |
-) |
|
| 35 |
- |
|
| 36 |
-// jobInitApi runs the remote api server `srv` as a daemon, |
|
| 37 |
-// Only one api server can run at the same time - this is enforced by a pidfile. |
|
| 38 |
-// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. |
|
| 39 |
-func InitServer(job *engine.Job) engine.Status {
|
|
| 40 |
- job.Logf("Creating server")
|
|
| 41 |
- srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job)) |
|
| 42 |
- if err != nil {
|
|
| 43 |
- return job.Error(err) |
|
| 44 |
- } |
|
| 45 |
- if srv.runtime.Config().Pidfile != "" {
|
|
| 46 |
- job.Logf("Creating pidfile")
|
|
| 47 |
- if err := utils.CreatePidFile(srv.runtime.Config().Pidfile); err != nil {
|
|
| 48 |
- // FIXME: do we need fatal here instead of returning a job error? |
|
| 49 |
- log.Fatal(err) |
|
| 50 |
- } |
|
| 51 |
- } |
|
| 52 |
- job.Logf("Setting up signal traps")
|
|
| 53 |
- c := make(chan os.Signal, 1) |
|
| 54 |
- gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) |
|
| 55 |
- go func() {
|
|
| 56 |
- sig := <-c |
|
| 57 |
- log.Printf("Received signal '%v', exiting\n", sig)
|
|
| 58 |
- utils.RemovePidFile(srv.runtime.Config().Pidfile) |
|
| 59 |
- srv.Close() |
|
| 60 |
- os.Exit(0) |
|
| 61 |
- }() |
|
| 62 |
- job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
|
|
| 63 |
- job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime)
|
|
| 64 |
- |
|
| 65 |
- for name, handler := range map[string]engine.Handler{
|
|
| 66 |
- "export": srv.ContainerExport, |
|
| 67 |
- "create": srv.ContainerCreate, |
|
| 68 |
- "stop": srv.ContainerStop, |
|
| 69 |
- "restart": srv.ContainerRestart, |
|
| 70 |
- "start": srv.ContainerStart, |
|
| 71 |
- "kill": srv.ContainerKill, |
|
| 72 |
- "wait": srv.ContainerWait, |
|
| 73 |
- "tag": srv.ImageTag, |
|
| 74 |
- "resize": srv.ContainerResize, |
|
| 75 |
- "commit": srv.ContainerCommit, |
|
| 76 |
- "info": srv.DockerInfo, |
|
| 77 |
- "container_delete": srv.ContainerDestroy, |
|
| 78 |
- "image_export": srv.ImageExport, |
|
| 79 |
- "images": srv.Images, |
|
| 80 |
- "history": srv.ImageHistory, |
|
| 81 |
- "viz": srv.ImagesViz, |
|
| 82 |
- "container_copy": srv.ContainerCopy, |
|
| 83 |
- "insert": srv.ImageInsert, |
|
| 84 |
- "attach": srv.ContainerAttach, |
|
| 85 |
- "search": srv.ImagesSearch, |
|
| 86 |
- "changes": srv.ContainerChanges, |
|
| 87 |
- "top": srv.ContainerTop, |
|
| 88 |
- "version": srv.DockerVersion, |
|
| 89 |
- "load": srv.ImageLoad, |
|
| 90 |
- "build": srv.Build, |
|
| 91 |
- "pull": srv.ImagePull, |
|
| 92 |
- "import": srv.ImageImport, |
|
| 93 |
- "image_delete": srv.ImageDelete, |
|
| 94 |
- "inspect": srv.JobInspect, |
|
| 95 |
- "events": srv.Events, |
|
| 96 |
- "push": srv.ImagePush, |
|
| 97 |
- "containers": srv.Containers, |
|
| 98 |
- "auth": srv.Auth, |
|
| 99 |
- } {
|
|
| 100 |
- if err := job.Eng.Register(name, handler); err != nil {
|
|
| 101 |
- return job.Error(err) |
|
| 102 |
- } |
|
| 103 |
- } |
|
| 104 |
- return engine.StatusOK |
|
| 105 |
-} |
|
| 106 |
- |
|
| 107 |
-// simpleVersionInfo is a simple implementation of |
|
| 108 |
-// the interface VersionInfo, which is used |
|
| 109 |
-// to provide version information for some product, |
|
| 110 |
-// component, etc. It stores the product name and the version |
|
| 111 |
-// in string and returns them on calls to Name() and Version(). |
|
| 112 |
-type simpleVersionInfo struct {
|
|
| 113 |
- name string |
|
| 114 |
- version string |
|
| 115 |
-} |
|
| 116 |
- |
|
| 117 |
-func (v *simpleVersionInfo) Name() string {
|
|
| 118 |
- return v.name |
|
| 119 |
-} |
|
| 120 |
- |
|
| 121 |
-func (v *simpleVersionInfo) Version() string {
|
|
| 122 |
- return v.version |
|
| 123 |
-} |
|
| 124 |
- |
|
| 125 |
-// ContainerKill send signal to the container |
|
| 126 |
-// If no signal is given (sig 0), then Kill with SIGKILL and wait |
|
| 127 |
-// for the container to exit. |
|
| 128 |
-// If a signal is given, then just send it to the container and return. |
|
| 129 |
-func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
|
|
| 130 |
- if n := len(job.Args); n < 1 || n > 2 {
|
|
| 131 |
- return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
|
|
| 132 |
- } |
|
| 133 |
- var ( |
|
| 134 |
- name = job.Args[0] |
|
| 135 |
- sig uint64 |
|
| 136 |
- err error |
|
| 137 |
- ) |
|
| 138 |
- |
|
| 139 |
- // If we have a signal, look at it. Otherwise, do nothing |
|
| 140 |
- if len(job.Args) == 2 && job.Args[1] != "" {
|
|
| 141 |
- // Check if we passed the signal as a number: |
|
| 142 |
- // The largest legal signal is 31, so let's parse on 5 bits |
|
| 143 |
- sig, err = strconv.ParseUint(job.Args[1], 10, 5) |
|
| 144 |
- if err != nil {
|
|
| 145 |
- // The signal is not a number, treat it as a string |
|
| 146 |
- sig = uint64(signal.SignalMap[job.Args[1]]) |
|
| 147 |
- if sig == 0 {
|
|
| 148 |
- return job.Errorf("Invalid signal: %s", job.Args[1])
|
|
| 149 |
- } |
|
| 150 |
- |
|
| 151 |
- } |
|
| 152 |
- } |
|
| 153 |
- |
|
| 154 |
- if container := srv.runtime.Get(name); container != nil {
|
|
| 155 |
- // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) |
|
| 156 |
- if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
|
|
| 157 |
- if err := container.Kill(); err != nil {
|
|
| 158 |
- return job.Errorf("Cannot kill container %s: %s", name, err)
|
|
| 159 |
- } |
|
| 160 |
- srv.LogEvent("kill", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 161 |
- } else {
|
|
| 162 |
- // Otherwise, just send the requested signal |
|
| 163 |
- if err := container.KillSig(int(sig)); err != nil {
|
|
| 164 |
- return job.Errorf("Cannot kill container %s: %s", name, err)
|
|
| 165 |
- } |
|
| 166 |
- // FIXME: Add event for signals |
|
| 167 |
- } |
|
| 168 |
- } else {
|
|
| 169 |
- return job.Errorf("No such container: %s", name)
|
|
| 170 |
- } |
|
| 171 |
- return engine.StatusOK |
|
| 172 |
-} |
|
| 173 |
- |
|
| 174 |
-func (srv *Server) Auth(job *engine.Job) engine.Status {
|
|
| 175 |
- var ( |
|
| 176 |
- err error |
|
| 177 |
- authConfig = ®istry.AuthConfig{}
|
|
| 178 |
- ) |
|
| 179 |
- |
|
| 180 |
- job.GetenvJson("authConfig", authConfig)
|
|
| 181 |
- // TODO: this is only done here because auth and registry need to be merged into one pkg |
|
| 182 |
- if addr := authConfig.ServerAddress; addr != "" && addr != registry.IndexServerAddress() {
|
|
| 183 |
- addr, err = registry.ExpandAndVerifyRegistryUrl(addr) |
|
| 184 |
- if err != nil {
|
|
| 185 |
- return job.Error(err) |
|
| 186 |
- } |
|
| 187 |
- authConfig.ServerAddress = addr |
|
| 188 |
- } |
|
| 189 |
- status, err := registry.Login(authConfig, srv.HTTPRequestFactory(nil)) |
|
| 190 |
- if err != nil {
|
|
| 191 |
- return job.Error(err) |
|
| 192 |
- } |
|
| 193 |
- job.Printf("%s\n", status)
|
|
| 194 |
- return engine.StatusOK |
|
| 195 |
-} |
|
| 196 |
- |
|
| 197 |
-func (srv *Server) Events(job *engine.Job) engine.Status {
|
|
| 198 |
- if len(job.Args) != 1 {
|
|
| 199 |
- return job.Errorf("Usage: %s FROM", job.Name)
|
|
| 200 |
- } |
|
| 201 |
- |
|
| 202 |
- var ( |
|
| 203 |
- from = job.Args[0] |
|
| 204 |
- since = job.GetenvInt64("since")
|
|
| 205 |
- ) |
|
| 206 |
- sendEvent := func(event *utils.JSONMessage) error {
|
|
| 207 |
- b, err := json.Marshal(event) |
|
| 208 |
- if err != nil {
|
|
| 209 |
- return fmt.Errorf("JSON error")
|
|
| 210 |
- } |
|
| 211 |
- _, err = job.Stdout.Write(b) |
|
| 212 |
- if err != nil {
|
|
| 213 |
- // On error, evict the listener |
|
| 214 |
- utils.Errorf("%s", err)
|
|
| 215 |
- srv.Lock() |
|
| 216 |
- delete(srv.listeners, from) |
|
| 217 |
- srv.Unlock() |
|
| 218 |
- return err |
|
| 219 |
- } |
|
| 220 |
- return nil |
|
| 221 |
- } |
|
| 222 |
- |
|
| 223 |
- listener := make(chan utils.JSONMessage) |
|
| 224 |
- srv.Lock() |
|
| 225 |
- srv.listeners[from] = listener |
|
| 226 |
- srv.Unlock() |
|
| 227 |
- job.Stdout.Write(nil) // flush |
|
| 228 |
- if since != 0 {
|
|
| 229 |
- // If since, send previous events that happened after the timestamp |
|
| 230 |
- for _, event := range srv.GetEvents() {
|
|
| 231 |
- if event.Time >= since {
|
|
| 232 |
- err := sendEvent(&event) |
|
| 233 |
- if err != nil && err.Error() == "JSON error" {
|
|
| 234 |
- continue |
|
| 235 |
- } |
|
| 236 |
- if err != nil {
|
|
| 237 |
- job.Error(err) |
|
| 238 |
- return engine.StatusErr |
|
| 239 |
- } |
|
| 240 |
- } |
|
| 241 |
- } |
|
| 242 |
- } |
|
| 243 |
- for event := range listener {
|
|
| 244 |
- err := sendEvent(&event) |
|
| 245 |
- if err != nil && err.Error() == "JSON error" {
|
|
| 246 |
- continue |
|
| 247 |
- } |
|
| 248 |
- if err != nil {
|
|
| 249 |
- return job.Error(err) |
|
| 250 |
- } |
|
| 251 |
- } |
|
| 252 |
- return engine.StatusOK |
|
| 253 |
-} |
|
| 254 |
- |
|
| 255 |
-func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
|
|
| 256 |
- if len(job.Args) != 1 {
|
|
| 257 |
- return job.Errorf("Usage: %s container_id", job.Name)
|
|
| 258 |
- } |
|
| 259 |
- name := job.Args[0] |
|
| 260 |
- if container := srv.runtime.Get(name); container != nil {
|
|
| 261 |
- data, err := container.Export() |
|
| 262 |
- if err != nil {
|
|
| 263 |
- return job.Errorf("%s: %s", name, err)
|
|
| 264 |
- } |
|
| 265 |
- defer data.Close() |
|
| 266 |
- |
|
| 267 |
- // Stream the entire contents of the container (basically a volatile snapshot) |
|
| 268 |
- if _, err := io.Copy(job.Stdout, data); err != nil {
|
|
| 269 |
- return job.Errorf("%s: %s", name, err)
|
|
| 270 |
- } |
|
| 271 |
- // FIXME: factor job-specific LogEvent to engine.Job.Run() |
|
| 272 |
- srv.LogEvent("export", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 273 |
- return engine.StatusOK |
|
| 274 |
- } |
|
| 275 |
- return job.Errorf("No such container: %s", name)
|
|
| 276 |
-} |
|
| 277 |
- |
|
| 278 |
-// ImageExport exports all images with the given tag. All versions |
|
| 279 |
-// containing the same tag are exported. The resulting output is an |
|
| 280 |
-// uncompressed tar ball. |
|
| 281 |
-// name is the set of tags to export. |
|
| 282 |
-// out is the writer where the images are written to. |
|
| 283 |
-func (srv *Server) ImageExport(job *engine.Job) engine.Status {
|
|
| 284 |
- if len(job.Args) != 1 {
|
|
| 285 |
- return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 286 |
- } |
|
| 287 |
- name := job.Args[0] |
|
| 288 |
- // get image json |
|
| 289 |
- tempdir, err := ioutil.TempDir("", "docker-export-")
|
|
| 290 |
- if err != nil {
|
|
| 291 |
- return job.Error(err) |
|
| 292 |
- } |
|
| 293 |
- defer os.RemoveAll(tempdir) |
|
| 294 |
- |
|
| 295 |
- utils.Debugf("Serializing %s", name)
|
|
| 296 |
- |
|
| 297 |
- rootRepo, err := srv.runtime.Repositories().Get(name) |
|
| 298 |
- if err != nil {
|
|
| 299 |
- return job.Error(err) |
|
| 300 |
- } |
|
| 301 |
- if rootRepo != nil {
|
|
| 302 |
- for _, id := range rootRepo {
|
|
| 303 |
- image, err := srv.ImageInspect(id) |
|
| 304 |
- if err != nil {
|
|
| 305 |
- return job.Error(err) |
|
| 306 |
- } |
|
| 307 |
- |
|
| 308 |
- if err := srv.exportImage(image, tempdir); err != nil {
|
|
| 309 |
- return job.Error(err) |
|
| 310 |
- } |
|
| 311 |
- } |
|
| 312 |
- |
|
| 313 |
- // write repositories |
|
| 314 |
- rootRepoMap := map[string]graph.Repository{}
|
|
| 315 |
- rootRepoMap[name] = rootRepo |
|
| 316 |
- rootRepoJson, _ := json.Marshal(rootRepoMap) |
|
| 317 |
- |
|
| 318 |
- if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.ModeAppend); err != nil {
|
|
| 319 |
- return job.Error(err) |
|
| 320 |
- } |
|
| 321 |
- } else {
|
|
| 322 |
- image, err := srv.ImageInspect(name) |
|
| 323 |
- if err != nil {
|
|
| 324 |
- return job.Error(err) |
|
| 325 |
- } |
|
| 326 |
- if err := srv.exportImage(image, tempdir); err != nil {
|
|
| 327 |
- return job.Error(err) |
|
| 328 |
- } |
|
| 329 |
- } |
|
| 330 |
- |
|
| 331 |
- fs, err := archive.Tar(tempdir, archive.Uncompressed) |
|
| 332 |
- if err != nil {
|
|
| 333 |
- return job.Error(err) |
|
| 334 |
- } |
|
| 335 |
- defer fs.Close() |
|
| 336 |
- |
|
| 337 |
- if _, err := io.Copy(job.Stdout, fs); err != nil {
|
|
| 338 |
- return job.Error(err) |
|
| 339 |
- } |
|
| 340 |
- return engine.StatusOK |
|
| 341 |
-} |
|
| 342 |
- |
|
| 343 |
-func (srv *Server) exportImage(img *image.Image, tempdir string) error {
|
|
| 344 |
- for i := img; i != nil; {
|
|
| 345 |
- // temporary directory |
|
| 346 |
- tmpImageDir := path.Join(tempdir, i.ID) |
|
| 347 |
- if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil {
|
|
| 348 |
- if os.IsExist(err) {
|
|
| 349 |
- return nil |
|
| 350 |
- } |
|
| 351 |
- return err |
|
| 352 |
- } |
|
| 353 |
- |
|
| 354 |
- var version = "1.0" |
|
| 355 |
- var versionBuf = []byte(version) |
|
| 356 |
- |
|
| 357 |
- if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.ModeAppend); err != nil {
|
|
| 358 |
- return err |
|
| 359 |
- } |
|
| 360 |
- |
|
| 361 |
- // serialize json |
|
| 362 |
- b, err := json.Marshal(i) |
|
| 363 |
- if err != nil {
|
|
| 364 |
- return err |
|
| 365 |
- } |
|
| 366 |
- if err := ioutil.WriteFile(path.Join(tmpImageDir, "json"), b, os.ModeAppend); err != nil {
|
|
| 367 |
- return err |
|
| 368 |
- } |
|
| 369 |
- |
|
| 370 |
- // serialize filesystem |
|
| 371 |
- fs, err := i.TarLayer() |
|
| 372 |
- if err != nil {
|
|
| 373 |
- return err |
|
| 374 |
- } |
|
| 375 |
- defer fs.Close() |
|
| 376 |
- |
|
| 377 |
- fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) |
|
| 378 |
- if err != nil {
|
|
| 379 |
- return err |
|
| 380 |
- } |
|
| 381 |
- if _, err = io.Copy(fsTar, fs); err != nil {
|
|
| 382 |
- return err |
|
| 383 |
- } |
|
| 384 |
- fsTar.Close() |
|
| 385 |
- |
|
| 386 |
- // find parent |
|
| 387 |
- if i.Parent != "" {
|
|
| 388 |
- i, err = srv.ImageInspect(i.Parent) |
|
| 389 |
- if err != nil {
|
|
| 390 |
- return err |
|
| 391 |
- } |
|
| 392 |
- } else {
|
|
| 393 |
- i = nil |
|
| 394 |
- } |
|
| 395 |
- } |
|
| 396 |
- return nil |
|
| 397 |
-} |
|
| 398 |
- |
|
| 399 |
-func (srv *Server) Build(job *engine.Job) engine.Status {
|
|
| 400 |
- if len(job.Args) != 0 {
|
|
| 401 |
- return job.Errorf("Usage: %s\n", job.Name)
|
|
| 402 |
- } |
|
| 403 |
- var ( |
|
| 404 |
- remoteURL = job.Getenv("remote")
|
|
| 405 |
- repoName = job.Getenv("t")
|
|
| 406 |
- suppressOutput = job.GetenvBool("q")
|
|
| 407 |
- noCache = job.GetenvBool("nocache")
|
|
| 408 |
- rm = job.GetenvBool("rm")
|
|
| 409 |
- authConfig = ®istry.AuthConfig{}
|
|
| 410 |
- configFile = ®istry.ConfigFile{}
|
|
| 411 |
- tag string |
|
| 412 |
- context io.ReadCloser |
|
| 413 |
- ) |
|
| 414 |
- job.GetenvJson("authConfig", authConfig)
|
|
| 415 |
- job.GetenvJson("configFile", configFile)
|
|
| 416 |
- repoName, tag = utils.ParseRepositoryTag(repoName) |
|
| 417 |
- |
|
| 418 |
- if remoteURL == "" {
|
|
| 419 |
- context = ioutil.NopCloser(job.Stdin) |
|
| 420 |
- } else if utils.IsGIT(remoteURL) {
|
|
| 421 |
- if !strings.HasPrefix(remoteURL, "git://") {
|
|
| 422 |
- remoteURL = "https://" + remoteURL |
|
| 423 |
- } |
|
| 424 |
- root, err := ioutil.TempDir("", "docker-build-git")
|
|
| 425 |
- if err != nil {
|
|
| 426 |
- return job.Error(err) |
|
| 427 |
- } |
|
| 428 |
- defer os.RemoveAll(root) |
|
| 429 |
- |
|
| 430 |
- if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
|
|
| 431 |
- return job.Errorf("Error trying to use git: %s (%s)", err, output)
|
|
| 432 |
- } |
|
| 433 |
- |
|
| 434 |
- c, err := archive.Tar(root, archive.Uncompressed) |
|
| 435 |
- if err != nil {
|
|
| 436 |
- return job.Error(err) |
|
| 437 |
- } |
|
| 438 |
- context = c |
|
| 439 |
- } else if utils.IsURL(remoteURL) {
|
|
| 440 |
- f, err := utils.Download(remoteURL) |
|
| 441 |
- if err != nil {
|
|
| 442 |
- return job.Error(err) |
|
| 443 |
- } |
|
| 444 |
- defer f.Body.Close() |
|
| 445 |
- dockerFile, err := ioutil.ReadAll(f.Body) |
|
| 446 |
- if err != nil {
|
|
| 447 |
- return job.Error(err) |
|
| 448 |
- } |
|
| 449 |
- c, err := archive.Generate("Dockerfile", string(dockerFile))
|
|
| 450 |
- if err != nil {
|
|
| 451 |
- return job.Error(err) |
|
| 452 |
- } |
|
| 453 |
- context = c |
|
| 454 |
- } |
|
| 455 |
- defer context.Close() |
|
| 456 |
- |
|
| 457 |
- sf := utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
| 458 |
- b := NewBuildFile(srv, |
|
| 459 |
- &StdoutFormater{
|
|
| 460 |
- Writer: job.Stdout, |
|
| 461 |
- StreamFormatter: sf, |
|
| 462 |
- }, |
|
| 463 |
- &StderrFormater{
|
|
| 464 |
- Writer: job.Stdout, |
|
| 465 |
- StreamFormatter: sf, |
|
| 466 |
- }, |
|
| 467 |
- !suppressOutput, !noCache, rm, job.Stdout, sf, authConfig, configFile) |
|
| 468 |
- id, err := b.Build(context) |
|
| 469 |
- if err != nil {
|
|
| 470 |
- return job.Error(err) |
|
| 471 |
- } |
|
| 472 |
- if repoName != "" {
|
|
| 473 |
- srv.runtime.Repositories().Set(repoName, tag, id, false) |
|
| 474 |
- } |
|
| 475 |
- return engine.StatusOK |
|
| 476 |
-} |
|
| 477 |
- |
|
| 478 |
-// Loads a set of images into the repository. This is the complementary of ImageExport. |
|
| 479 |
-// The input stream is an uncompressed tar ball containing images and metadata. |
|
| 480 |
-func (srv *Server) ImageLoad(job *engine.Job) engine.Status {
|
|
| 481 |
- tmpImageDir, err := ioutil.TempDir("", "docker-import-")
|
|
| 482 |
- if err != nil {
|
|
| 483 |
- return job.Error(err) |
|
| 484 |
- } |
|
| 485 |
- defer os.RemoveAll(tmpImageDir) |
|
| 486 |
- |
|
| 487 |
- var ( |
|
| 488 |
- repoTarFile = path.Join(tmpImageDir, "repo.tar") |
|
| 489 |
- repoDir = path.Join(tmpImageDir, "repo") |
|
| 490 |
- ) |
|
| 491 |
- |
|
| 492 |
- tarFile, err := os.Create(repoTarFile) |
|
| 493 |
- if err != nil {
|
|
| 494 |
- return job.Error(err) |
|
| 495 |
- } |
|
| 496 |
- if _, err := io.Copy(tarFile, job.Stdin); err != nil {
|
|
| 497 |
- return job.Error(err) |
|
| 498 |
- } |
|
| 499 |
- tarFile.Close() |
|
| 500 |
- |
|
| 501 |
- repoFile, err := os.Open(repoTarFile) |
|
| 502 |
- if err != nil {
|
|
| 503 |
- return job.Error(err) |
|
| 504 |
- } |
|
| 505 |
- if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
|
|
| 506 |
- return job.Error(err) |
|
| 507 |
- } |
|
| 508 |
- if err := archive.Untar(repoFile, repoDir, nil); err != nil {
|
|
| 509 |
- return job.Error(err) |
|
| 510 |
- } |
|
| 511 |
- |
|
| 512 |
- dirs, err := ioutil.ReadDir(repoDir) |
|
| 513 |
- if err != nil {
|
|
| 514 |
- return job.Error(err) |
|
| 515 |
- } |
|
| 516 |
- |
|
| 517 |
- for _, d := range dirs {
|
|
| 518 |
- if d.IsDir() {
|
|
| 519 |
- if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil {
|
|
| 520 |
- return job.Error(err) |
|
| 521 |
- } |
|
| 522 |
- } |
|
| 523 |
- } |
|
| 524 |
- |
|
| 525 |
- repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) |
|
| 526 |
- if err == nil {
|
|
| 527 |
- repositories := map[string]graph.Repository{}
|
|
| 528 |
- if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
|
|
| 529 |
- return job.Error(err) |
|
| 530 |
- } |
|
| 531 |
- |
|
| 532 |
- for imageName, tagMap := range repositories {
|
|
| 533 |
- for tag, address := range tagMap {
|
|
| 534 |
- if err := srv.runtime.Repositories().Set(imageName, tag, address, true); err != nil {
|
|
| 535 |
- return job.Error(err) |
|
| 536 |
- } |
|
| 537 |
- } |
|
| 538 |
- } |
|
| 539 |
- } else if !os.IsNotExist(err) {
|
|
| 540 |
- return job.Error(err) |
|
| 541 |
- } |
|
| 542 |
- |
|
| 543 |
- return engine.StatusOK |
|
| 544 |
-} |
|
| 545 |
- |
|
| 546 |
-func (srv *Server) recursiveLoad(address, tmpImageDir string) error {
|
|
| 547 |
- if _, err := srv.ImageInspect(address); err != nil {
|
|
| 548 |
- utils.Debugf("Loading %s", address)
|
|
| 549 |
- |
|
| 550 |
- imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) |
|
| 551 |
- if err != nil {
|
|
| 552 |
- utils.Debugf("Error reading json", err)
|
|
| 553 |
- return err |
|
| 554 |
- } |
|
| 555 |
- |
|
| 556 |
- layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar")) |
|
| 557 |
- if err != nil {
|
|
| 558 |
- utils.Debugf("Error reading embedded tar", err)
|
|
| 559 |
- return err |
|
| 560 |
- } |
|
| 561 |
- img, err := image.NewImgJSON(imageJson) |
|
| 562 |
- if err != nil {
|
|
| 563 |
- utils.Debugf("Error unmarshalling json", err)
|
|
| 564 |
- return err |
|
| 565 |
- } |
|
| 566 |
- if img.Parent != "" {
|
|
| 567 |
- if !srv.runtime.Graph().Exists(img.Parent) {
|
|
| 568 |
- if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil {
|
|
| 569 |
- return err |
|
| 570 |
- } |
|
| 571 |
- } |
|
| 572 |
- } |
|
| 573 |
- if err := srv.runtime.Graph().Register(imageJson, layer, img); err != nil {
|
|
| 574 |
- return err |
|
| 575 |
- } |
|
| 576 |
- } |
|
| 577 |
- utils.Debugf("Completed processing %s", address)
|
|
| 578 |
- |
|
| 579 |
- return nil |
|
| 580 |
-} |
|
| 581 |
- |
|
| 582 |
-func (srv *Server) ImagesSearch(job *engine.Job) engine.Status {
|
|
| 583 |
- if n := len(job.Args); n != 1 {
|
|
| 584 |
- return job.Errorf("Usage: %s TERM", job.Name)
|
|
| 585 |
- } |
|
| 586 |
- var ( |
|
| 587 |
- term = job.Args[0] |
|
| 588 |
- metaHeaders = map[string][]string{}
|
|
| 589 |
- authConfig = ®istry.AuthConfig{}
|
|
| 590 |
- ) |
|
| 591 |
- job.GetenvJson("authConfig", authConfig)
|
|
| 592 |
- job.GetenvJson("metaHeaders", metaHeaders)
|
|
| 593 |
- |
|
| 594 |
- r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), registry.IndexServerAddress()) |
|
| 595 |
- if err != nil {
|
|
| 596 |
- return job.Error(err) |
|
| 597 |
- } |
|
| 598 |
- results, err := r.SearchRepositories(term) |
|
| 599 |
- if err != nil {
|
|
| 600 |
- return job.Error(err) |
|
| 601 |
- } |
|
| 602 |
- outs := engine.NewTable("star_count", 0)
|
|
| 603 |
- for _, result := range results.Results {
|
|
| 604 |
- out := &engine.Env{}
|
|
| 605 |
- out.Import(result) |
|
| 606 |
- outs.Add(out) |
|
| 607 |
- } |
|
| 608 |
- outs.ReverseSort() |
|
| 609 |
- if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
|
| 610 |
- return job.Error(err) |
|
| 611 |
- } |
|
| 612 |
- return engine.StatusOK |
|
| 613 |
-} |
|
| 614 |
- |
|
| 615 |
-func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
|
|
| 616 |
- if len(job.Args) != 3 {
|
|
| 617 |
- return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name)
|
|
| 618 |
- } |
|
| 619 |
- |
|
| 620 |
- var ( |
|
| 621 |
- name = job.Args[0] |
|
| 622 |
- url = job.Args[1] |
|
| 623 |
- path = job.Args[2] |
|
| 624 |
- ) |
|
| 625 |
- |
|
| 626 |
- sf := utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
| 627 |
- |
|
| 628 |
- out := utils.NewWriteFlusher(job.Stdout) |
|
| 629 |
- img, err := srv.runtime.Repositories().LookupImage(name) |
|
| 630 |
- if err != nil {
|
|
| 631 |
- return job.Error(err) |
|
| 632 |
- } |
|
| 633 |
- |
|
| 634 |
- file, err := utils.Download(url) |
|
| 635 |
- if err != nil {
|
|
| 636 |
- return job.Error(err) |
|
| 637 |
- } |
|
| 638 |
- defer file.Body.Close() |
|
| 639 |
- |
|
| 640 |
- config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.SystemConfig())
|
|
| 641 |
- if err != nil {
|
|
| 642 |
- return job.Error(err) |
|
| 643 |
- } |
|
| 644 |
- |
|
| 645 |
- c, _, err := srv.runtime.Create(config, "") |
|
| 646 |
- if err != nil {
|
|
| 647 |
- return job.Error(err) |
|
| 648 |
- } |
|
| 649 |
- |
|
| 650 |
- if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, utils.TruncateID(img.ID), "Downloading"), path); err != nil {
|
|
| 651 |
- return job.Error(err) |
|
| 652 |
- } |
|
| 653 |
- // FIXME: Handle custom repo, tag comment, author |
|
| 654 |
- img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil) |
|
| 655 |
- if err != nil {
|
|
| 656 |
- out.Write(sf.FormatError(err)) |
|
| 657 |
- return engine.StatusErr |
|
| 658 |
- } |
|
| 659 |
- out.Write(sf.FormatStatus("", img.ID))
|
|
| 660 |
- return engine.StatusOK |
|
| 661 |
-} |
|
| 662 |
- |
|
| 663 |
-func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
|
|
| 664 |
- images, _ := srv.runtime.Graph().Map() |
|
| 665 |
- if images == nil {
|
|
| 666 |
- return engine.StatusOK |
|
| 667 |
- } |
|
| 668 |
- job.Stdout.Write([]byte("digraph docker {\n"))
|
|
| 669 |
- |
|
| 670 |
- var ( |
|
| 671 |
- parentImage *image.Image |
|
| 672 |
- err error |
|
| 673 |
- ) |
|
| 674 |
- for _, image := range images {
|
|
| 675 |
- parentImage, err = image.GetParent() |
|
| 676 |
- if err != nil {
|
|
| 677 |
- return job.Errorf("Error while getting parent image: %v", err)
|
|
| 678 |
- } |
|
| 679 |
- if parentImage != nil {
|
|
| 680 |
- job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
|
|
| 681 |
- } else {
|
|
| 682 |
- job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
|
|
| 683 |
- } |
|
| 684 |
- } |
|
| 685 |
- |
|
| 686 |
- reporefs := make(map[string][]string) |
|
| 687 |
- |
|
| 688 |
- for name, repository := range srv.runtime.Repositories().Repositories {
|
|
| 689 |
- for tag, id := range repository {
|
|
| 690 |
- reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
|
|
| 691 |
- } |
|
| 692 |
- } |
|
| 693 |
- |
|
| 694 |
- for id, repos := range reporefs {
|
|
| 695 |
- job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
|
|
| 696 |
- } |
|
| 697 |
- job.Stdout.Write([]byte(" base [style=invisible]\n}\n"))
|
|
| 698 |
- return engine.StatusOK |
|
| 699 |
-} |
|
| 700 |
- |
|
| 701 |
-func (srv *Server) Images(job *engine.Job) engine.Status {
|
|
| 702 |
- var ( |
|
| 703 |
- allImages map[string]*image.Image |
|
| 704 |
- err error |
|
| 705 |
- ) |
|
| 706 |
- if job.GetenvBool("all") {
|
|
| 707 |
- allImages, err = srv.runtime.Graph().Map() |
|
| 708 |
- } else {
|
|
| 709 |
- allImages, err = srv.runtime.Graph().Heads() |
|
| 710 |
- } |
|
| 711 |
- if err != nil {
|
|
| 712 |
- return job.Error(err) |
|
| 713 |
- } |
|
| 714 |
- lookup := make(map[string]*engine.Env) |
|
| 715 |
- for name, repository := range srv.runtime.Repositories().Repositories {
|
|
| 716 |
- if job.Getenv("filter") != "" {
|
|
| 717 |
- if match, _ := path.Match(job.Getenv("filter"), name); !match {
|
|
| 718 |
- continue |
|
| 719 |
- } |
|
| 720 |
- } |
|
| 721 |
- for tag, id := range repository {
|
|
| 722 |
- image, err := srv.runtime.Graph().Get(id) |
|
| 723 |
- if err != nil {
|
|
| 724 |
- log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
|
|
| 725 |
- continue |
|
| 726 |
- } |
|
| 727 |
- |
|
| 728 |
- if out, exists := lookup[id]; exists {
|
|
| 729 |
- out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag)))
|
|
| 730 |
- } else {
|
|
| 731 |
- out := &engine.Env{}
|
|
| 732 |
- delete(allImages, id) |
|
| 733 |
- out.Set("ParentId", image.Parent)
|
|
| 734 |
- out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)})
|
|
| 735 |
- out.Set("Id", image.ID)
|
|
| 736 |
- out.SetInt64("Created", image.Created.Unix())
|
|
| 737 |
- out.SetInt64("Size", image.Size)
|
|
| 738 |
- out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
|
|
| 739 |
- lookup[id] = out |
|
| 740 |
- } |
|
| 741 |
- |
|
| 742 |
- } |
|
| 743 |
- } |
|
| 744 |
- |
|
| 745 |
- outs := engine.NewTable("Created", len(lookup))
|
|
| 746 |
- for _, value := range lookup {
|
|
| 747 |
- outs.Add(value) |
|
| 748 |
- } |
|
| 749 |
- |
|
| 750 |
- // Display images which aren't part of a repository/tag |
|
| 751 |
- if job.Getenv("filter") == "" {
|
|
| 752 |
- for _, image := range allImages {
|
|
| 753 |
- out := &engine.Env{}
|
|
| 754 |
- out.Set("ParentId", image.Parent)
|
|
| 755 |
- out.SetList("RepoTags", []string{"<none>:<none>"})
|
|
| 756 |
- out.Set("Id", image.ID)
|
|
| 757 |
- out.SetInt64("Created", image.Created.Unix())
|
|
| 758 |
- out.SetInt64("Size", image.Size)
|
|
| 759 |
- out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
|
|
| 760 |
- outs.Add(out) |
|
| 761 |
- } |
|
| 762 |
- } |
|
| 763 |
- |
|
| 764 |
- outs.ReverseSort() |
|
| 765 |
- if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
|
| 766 |
- return job.Error(err) |
|
| 767 |
- } |
|
| 768 |
- return engine.StatusOK |
|
| 769 |
-} |
|
| 770 |
- |
|
| 771 |
-func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
|
|
| 772 |
- images, _ := srv.runtime.Graph().Map() |
|
| 773 |
- var imgcount int |
|
| 774 |
- if images == nil {
|
|
| 775 |
- imgcount = 0 |
|
| 776 |
- } else {
|
|
| 777 |
- imgcount = len(images) |
|
| 778 |
- } |
|
| 779 |
- kernelVersion := "<unknown>" |
|
| 780 |
- if kv, err := utils.GetKernelVersion(); err == nil {
|
|
| 781 |
- kernelVersion = kv.String() |
|
| 782 |
- } |
|
| 783 |
- |
|
| 784 |
- // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) |
|
| 785 |
- initPath := utils.DockerInitPath("")
|
|
| 786 |
- if initPath == "" {
|
|
| 787 |
- // if that fails, we'll just return the path from the runtime |
|
| 788 |
- initPath = srv.runtime.SystemInitPath() |
|
| 789 |
- } |
|
| 790 |
- |
|
| 791 |
- v := &engine.Env{}
|
|
| 792 |
- v.SetInt("Containers", len(srv.runtime.List()))
|
|
| 793 |
- v.SetInt("Images", imgcount)
|
|
| 794 |
- v.Set("Driver", srv.runtime.GraphDriver().String())
|
|
| 795 |
- v.SetJson("DriverStatus", srv.runtime.GraphDriver().Status())
|
|
| 796 |
- v.SetBool("MemoryLimit", srv.runtime.SystemConfig().MemoryLimit)
|
|
| 797 |
- v.SetBool("SwapLimit", srv.runtime.SystemConfig().SwapLimit)
|
|
| 798 |
- v.SetBool("IPv4Forwarding", !srv.runtime.SystemConfig().IPv4ForwardingDisabled)
|
|
| 799 |
- v.SetBool("Debug", os.Getenv("DEBUG") != "")
|
|
| 800 |
- v.SetInt("NFd", utils.GetTotalUsedFds())
|
|
| 801 |
- v.SetInt("NGoroutines", goruntime.NumGoroutine())
|
|
| 802 |
- v.Set("ExecutionDriver", srv.runtime.ExecutionDriver().Name())
|
|
| 803 |
- v.SetInt("NEventsListener", len(srv.listeners))
|
|
| 804 |
- v.Set("KernelVersion", kernelVersion)
|
|
| 805 |
- v.Set("IndexServerAddress", registry.IndexServerAddress())
|
|
| 806 |
- v.Set("InitSha1", dockerversion.INITSHA1)
|
|
| 807 |
- v.Set("InitPath", initPath)
|
|
| 808 |
- if _, err := v.WriteTo(job.Stdout); err != nil {
|
|
| 809 |
- return job.Error(err) |
|
| 810 |
- } |
|
| 811 |
- return engine.StatusOK |
|
| 812 |
-} |
|
| 813 |
- |
|
| 814 |
-func (srv *Server) DockerVersion(job *engine.Job) engine.Status {
|
|
| 815 |
- v := &engine.Env{}
|
|
| 816 |
- v.Set("Version", dockerversion.VERSION)
|
|
| 817 |
- v.Set("GitCommit", dockerversion.GITCOMMIT)
|
|
| 818 |
- v.Set("GoVersion", goruntime.Version())
|
|
| 819 |
- v.Set("Os", goruntime.GOOS)
|
|
| 820 |
- v.Set("Arch", goruntime.GOARCH)
|
|
| 821 |
- if kernelVersion, err := utils.GetKernelVersion(); err == nil {
|
|
| 822 |
- v.Set("KernelVersion", kernelVersion.String())
|
|
| 823 |
- } |
|
| 824 |
- if _, err := v.WriteTo(job.Stdout); err != nil {
|
|
| 825 |
- return job.Error(err) |
|
| 826 |
- } |
|
| 827 |
- return engine.StatusOK |
|
| 828 |
-} |
|
| 829 |
- |
|
| 830 |
-func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
|
|
| 831 |
- if n := len(job.Args); n != 1 {
|
|
| 832 |
- return job.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 833 |
- } |
|
| 834 |
- name := job.Args[0] |
|
| 835 |
- foundImage, err := srv.runtime.Repositories().LookupImage(name) |
|
| 836 |
- if err != nil {
|
|
| 837 |
- return job.Error(err) |
|
| 838 |
- } |
|
| 839 |
- |
|
| 840 |
- lookupMap := make(map[string][]string) |
|
| 841 |
- for name, repository := range srv.runtime.Repositories().Repositories {
|
|
| 842 |
- for tag, id := range repository {
|
|
| 843 |
- // If the ID already has a reverse lookup, do not update it unless for "latest" |
|
| 844 |
- if _, exists := lookupMap[id]; !exists {
|
|
| 845 |
- lookupMap[id] = []string{}
|
|
| 846 |
- } |
|
| 847 |
- lookupMap[id] = append(lookupMap[id], name+":"+tag) |
|
| 848 |
- } |
|
| 849 |
- } |
|
| 850 |
- |
|
| 851 |
- outs := engine.NewTable("Created", 0)
|
|
| 852 |
- err = foundImage.WalkHistory(func(img *image.Image) error {
|
|
| 853 |
- out := &engine.Env{}
|
|
| 854 |
- out.Set("Id", img.ID)
|
|
| 855 |
- out.SetInt64("Created", img.Created.Unix())
|
|
| 856 |
- out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " "))
|
|
| 857 |
- out.SetList("Tags", lookupMap[img.ID])
|
|
| 858 |
- out.SetInt64("Size", img.Size)
|
|
| 859 |
- outs.Add(out) |
|
| 860 |
- return nil |
|
| 861 |
- }) |
|
| 862 |
- outs.ReverseSort() |
|
| 863 |
- if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
|
| 864 |
- return job.Error(err) |
|
| 865 |
- } |
|
| 866 |
- return engine.StatusOK |
|
| 867 |
-} |
|
| 868 |
- |
|
| 869 |
-func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
|
|
| 870 |
- if len(job.Args) != 1 && len(job.Args) != 2 {
|
|
| 871 |
- return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
|
|
| 872 |
- } |
|
| 873 |
- var ( |
|
| 874 |
- name = job.Args[0] |
|
| 875 |
- psArgs = "-ef" |
|
| 876 |
- ) |
|
| 877 |
- |
|
| 878 |
- if len(job.Args) == 2 && job.Args[1] != "" {
|
|
| 879 |
- psArgs = job.Args[1] |
|
| 880 |
- } |
|
| 881 |
- |
|
| 882 |
- if container := srv.runtime.Get(name); container != nil {
|
|
| 883 |
- if !container.State.IsRunning() {
|
|
| 884 |
- return job.Errorf("Container %s is not running", name)
|
|
| 885 |
- } |
|
| 886 |
- pids, err := srv.runtime.ExecutionDriver().GetPidsForContainer(container.ID) |
|
| 887 |
- if err != nil {
|
|
| 888 |
- return job.Error(err) |
|
| 889 |
- } |
|
| 890 |
- output, err := exec.Command("ps", psArgs).Output()
|
|
| 891 |
- if err != nil {
|
|
| 892 |
- return job.Errorf("Error running ps: %s", err)
|
|
| 893 |
- } |
|
| 894 |
- |
|
| 895 |
- lines := strings.Split(string(output), "\n") |
|
| 896 |
- header := strings.Fields(lines[0]) |
|
| 897 |
- out := &engine.Env{}
|
|
| 898 |
- out.SetList("Titles", header)
|
|
| 899 |
- |
|
| 900 |
- pidIndex := -1 |
|
| 901 |
- for i, name := range header {
|
|
| 902 |
- if name == "PID" {
|
|
| 903 |
- pidIndex = i |
|
| 904 |
- } |
|
| 905 |
- } |
|
| 906 |
- if pidIndex == -1 {
|
|
| 907 |
- return job.Errorf("Couldn't find PID field in ps output")
|
|
| 908 |
- } |
|
| 909 |
- |
|
| 910 |
- processes := [][]string{}
|
|
| 911 |
- for _, line := range lines[1:] {
|
|
| 912 |
- if len(line) == 0 {
|
|
| 913 |
- continue |
|
| 914 |
- } |
|
| 915 |
- fields := strings.Fields(line) |
|
| 916 |
- p, err := strconv.Atoi(fields[pidIndex]) |
|
| 917 |
- if err != nil {
|
|
| 918 |
- return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
|
|
| 919 |
- } |
|
| 920 |
- |
|
| 921 |
- for _, pid := range pids {
|
|
| 922 |
- if pid == p {
|
|
| 923 |
- // Make sure number of fields equals number of header titles |
|
| 924 |
- // merging "overhanging" fields |
|
| 925 |
- process := fields[:len(header)-1] |
|
| 926 |
- process = append(process, strings.Join(fields[len(header)-1:], " ")) |
|
| 927 |
- processes = append(processes, process) |
|
| 928 |
- } |
|
| 929 |
- } |
|
| 930 |
- } |
|
| 931 |
- out.SetJson("Processes", processes)
|
|
| 932 |
- out.WriteTo(job.Stdout) |
|
| 933 |
- return engine.StatusOK |
|
| 934 |
- |
|
| 935 |
- } |
|
| 936 |
- return job.Errorf("No such container: %s", name)
|
|
| 937 |
-} |
|
| 938 |
- |
|
| 939 |
-func (srv *Server) ContainerChanges(job *engine.Job) engine.Status {
|
|
| 940 |
- if n := len(job.Args); n != 1 {
|
|
| 941 |
- return job.Errorf("Usage: %s CONTAINER", job.Name)
|
|
| 942 |
- } |
|
| 943 |
- name := job.Args[0] |
|
| 944 |
- if container := srv.runtime.Get(name); container != nil {
|
|
| 945 |
- outs := engine.NewTable("", 0)
|
|
| 946 |
- changes, err := container.Changes() |
|
| 947 |
- if err != nil {
|
|
| 948 |
- return job.Error(err) |
|
| 949 |
- } |
|
| 950 |
- for _, change := range changes {
|
|
| 951 |
- out := &engine.Env{}
|
|
| 952 |
- if err := out.Import(change); err != nil {
|
|
| 953 |
- return job.Error(err) |
|
| 954 |
- } |
|
| 955 |
- outs.Add(out) |
|
| 956 |
- } |
|
| 957 |
- if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
|
| 958 |
- return job.Error(err) |
|
| 959 |
- } |
|
| 960 |
- } else {
|
|
| 961 |
- return job.Errorf("No such container: %s", name)
|
|
| 962 |
- } |
|
| 963 |
- return engine.StatusOK |
|
| 964 |
-} |
|
| 965 |
- |
|
| 966 |
-func (srv *Server) Containers(job *engine.Job) engine.Status {
|
|
| 967 |
- var ( |
|
| 968 |
- foundBefore bool |
|
| 969 |
- displayed int |
|
| 970 |
- all = job.GetenvBool("all")
|
|
| 971 |
- since = job.Getenv("since")
|
|
| 972 |
- before = job.Getenv("before")
|
|
| 973 |
- n = job.GetenvInt("limit")
|
|
| 974 |
- size = job.GetenvBool("size")
|
|
| 975 |
- ) |
|
| 976 |
- outs := engine.NewTable("Created", 0)
|
|
| 977 |
- |
|
| 978 |
- names := map[string][]string{}
|
|
| 979 |
- srv.runtime.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
|
|
| 980 |
- names[e.ID()] = append(names[e.ID()], p) |
|
| 981 |
- return nil |
|
| 982 |
- }, -1) |
|
| 983 |
- |
|
| 984 |
- for _, container := range srv.runtime.List() {
|
|
| 985 |
- if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
|
|
| 986 |
- continue |
|
| 987 |
- } |
|
| 988 |
- if before != "" && !foundBefore {
|
|
| 989 |
- if container.ID == before || utils.TruncateID(container.ID) == before {
|
|
| 990 |
- foundBefore = true |
|
| 991 |
- } |
|
| 992 |
- continue |
|
| 993 |
- } |
|
| 994 |
- if n > 0 && displayed == n {
|
|
| 995 |
- break |
|
| 996 |
- } |
|
| 997 |
- if container.ID == since || utils.TruncateID(container.ID) == since {
|
|
| 998 |
- break |
|
| 999 |
- } |
|
| 1000 |
- displayed++ |
|
| 1001 |
- out := &engine.Env{}
|
|
| 1002 |
- out.Set("Id", container.ID)
|
|
| 1003 |
- out.SetList("Names", names[container.ID])
|
|
| 1004 |
- out.Set("Image", srv.runtime.Repositories().ImageName(container.Image))
|
|
| 1005 |
- if len(container.Args) > 0 {
|
|
| 1006 |
- out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, strings.Join(container.Args, " ")))
|
|
| 1007 |
- } else {
|
|
| 1008 |
- out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
|
|
| 1009 |
- } |
|
| 1010 |
- out.SetInt64("Created", container.Created.Unix())
|
|
| 1011 |
- out.Set("Status", container.State.String())
|
|
| 1012 |
- str, err := container.NetworkSettings.PortMappingAPI().ToListString() |
|
| 1013 |
- if err != nil {
|
|
| 1014 |
- return job.Error(err) |
|
| 1015 |
- } |
|
| 1016 |
- out.Set("Ports", str)
|
|
| 1017 |
- if size {
|
|
| 1018 |
- sizeRw, sizeRootFs := container.GetSize() |
|
| 1019 |
- out.SetInt64("SizeRw", sizeRw)
|
|
| 1020 |
- out.SetInt64("SizeRootFs", sizeRootFs)
|
|
| 1021 |
- } |
|
| 1022 |
- outs.Add(out) |
|
| 1023 |
- } |
|
| 1024 |
- outs.ReverseSort() |
|
| 1025 |
- if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
|
| 1026 |
- return job.Error(err) |
|
| 1027 |
- } |
|
| 1028 |
- return engine.StatusOK |
|
| 1029 |
-} |
|
| 1030 |
- |
|
| 1031 |
-func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
|
|
| 1032 |
- if len(job.Args) != 1 {
|
|
| 1033 |
- return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
|
|
| 1034 |
- } |
|
| 1035 |
- name := job.Args[0] |
|
| 1036 |
- |
|
| 1037 |
- container := srv.runtime.Get(name) |
|
| 1038 |
- if container == nil {
|
|
| 1039 |
- return job.Errorf("No such container: %s", name)
|
|
| 1040 |
- } |
|
| 1041 |
- var config = container.Config |
|
| 1042 |
- var newConfig runconfig.Config |
|
| 1043 |
- if err := job.GetenvJson("config", &newConfig); err != nil {
|
|
| 1044 |
- return job.Error(err) |
|
| 1045 |
- } |
|
| 1046 |
- |
|
| 1047 |
- if err := runconfig.Merge(&newConfig, config); err != nil {
|
|
| 1048 |
- return job.Error(err) |
|
| 1049 |
- } |
|
| 1050 |
- |
|
| 1051 |
- img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
|
|
| 1052 |
- if err != nil {
|
|
| 1053 |
- return job.Error(err) |
|
| 1054 |
- } |
|
| 1055 |
- job.Printf("%s\n", img.ID)
|
|
| 1056 |
- return engine.StatusOK |
|
| 1057 |
-} |
|
| 1058 |
- |
|
| 1059 |
-func (srv *Server) ImageTag(job *engine.Job) engine.Status {
|
|
| 1060 |
- if len(job.Args) != 2 && len(job.Args) != 3 {
|
|
| 1061 |
- return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
|
|
| 1062 |
- } |
|
| 1063 |
- var tag string |
|
| 1064 |
- if len(job.Args) == 3 {
|
|
| 1065 |
- tag = job.Args[2] |
|
| 1066 |
- } |
|
| 1067 |
- if err := srv.runtime.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
|
|
| 1068 |
- return job.Error(err) |
|
| 1069 |
- } |
|
| 1070 |
- return engine.StatusOK |
|
| 1071 |
-} |
|
| 1072 |
- |
|
| 1073 |
-func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
|
|
| 1074 |
- history, err := r.GetRemoteHistory(imgID, endpoint, token) |
|
| 1075 |
- if err != nil {
|
|
| 1076 |
- return err |
|
| 1077 |
- } |
|
| 1078 |
- out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil)) |
|
| 1079 |
- // FIXME: Try to stream the images? |
|
| 1080 |
- // FIXME: Launch the getRemoteImage() in goroutines |
|
| 1081 |
- |
|
| 1082 |
- for i := len(history) - 1; i >= 0; i-- {
|
|
| 1083 |
- id := history[i] |
|
| 1084 |
- |
|
| 1085 |
- // ensure no two downloads of the same layer happen at the same time |
|
| 1086 |
- if c, err := srv.poolAdd("pull", "layer:"+id); err != nil {
|
|
| 1087 |
- utils.Errorf("Image (id: %s) pull is already running, skipping: %v", id, err)
|
|
| 1088 |
- <-c |
|
| 1089 |
- } |
|
| 1090 |
- defer srv.poolRemove("pull", "layer:"+id)
|
|
| 1091 |
- |
|
| 1092 |
- if !srv.runtime.Graph().Exists(id) {
|
|
| 1093 |
- out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) |
|
| 1094 |
- var ( |
|
| 1095 |
- imgJSON []byte |
|
| 1096 |
- imgSize int |
|
| 1097 |
- err error |
|
| 1098 |
- img *image.Image |
|
| 1099 |
- ) |
|
| 1100 |
- retries := 5 |
|
| 1101 |
- for j := 1; j <= retries; j++ {
|
|
| 1102 |
- imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) |
|
| 1103 |
- if err != nil && j == retries {
|
|
| 1104 |
- out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) |
|
| 1105 |
- return err |
|
| 1106 |
- } else if err != nil {
|
|
| 1107 |
- time.Sleep(time.Duration(j) * 500 * time.Millisecond) |
|
| 1108 |
- continue |
|
| 1109 |
- } |
|
| 1110 |
- img, err = image.NewImgJSON(imgJSON) |
|
| 1111 |
- if err != nil && j == retries {
|
|
| 1112 |
- out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) |
|
| 1113 |
- return fmt.Errorf("Failed to parse json: %s", err)
|
|
| 1114 |
- } else if err != nil {
|
|
| 1115 |
- time.Sleep(time.Duration(j) * 500 * time.Millisecond) |
|
| 1116 |
- continue |
|
| 1117 |
- } else {
|
|
| 1118 |
- break |
|
| 1119 |
- } |
|
| 1120 |
- } |
|
| 1121 |
- |
|
| 1122 |
- // Get the layer |
|
| 1123 |
- out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling fs layer", nil)) |
|
| 1124 |
- layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token) |
|
| 1125 |
- if err != nil {
|
|
| 1126 |
- out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) |
|
| 1127 |
- return err |
|
| 1128 |
- } |
|
| 1129 |
- defer layer.Close() |
|
| 1130 |
- if err := srv.runtime.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
|
|
| 1131 |
- out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) |
|
| 1132 |
- return err |
|
| 1133 |
- } |
|
| 1134 |
- } |
|
| 1135 |
- out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) |
|
| 1136 |
- |
|
| 1137 |
- } |
|
| 1138 |
- return nil |
|
| 1139 |
-} |
|
| 1140 |
- |
|
| 1141 |
-func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error {
|
|
| 1142 |
- out.Write(sf.FormatStatus("", "Pulling repository %s", localName))
|
|
| 1143 |
- |
|
| 1144 |
- repoData, err := r.GetRepositoryData(remoteName) |
|
| 1145 |
- if err != nil {
|
|
| 1146 |
- return err |
|
| 1147 |
- } |
|
| 1148 |
- |
|
| 1149 |
- utils.Debugf("Retrieving the tag list")
|
|
| 1150 |
- tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens) |
|
| 1151 |
- if err != nil {
|
|
| 1152 |
- utils.Errorf("%v", err)
|
|
| 1153 |
- return err |
|
| 1154 |
- } |
|
| 1155 |
- |
|
| 1156 |
- for tag, id := range tagsList {
|
|
| 1157 |
- repoData.ImgList[id] = ®istry.ImgData{
|
|
| 1158 |
- ID: id, |
|
| 1159 |
- Tag: tag, |
|
| 1160 |
- Checksum: "", |
|
| 1161 |
- } |
|
| 1162 |
- } |
|
| 1163 |
- |
|
| 1164 |
- utils.Debugf("Registering tags")
|
|
| 1165 |
- // If no tag has been specified, pull them all |
|
| 1166 |
- if askedTag == "" {
|
|
| 1167 |
- for tag, id := range tagsList {
|
|
| 1168 |
- repoData.ImgList[id].Tag = tag |
|
| 1169 |
- } |
|
| 1170 |
- } else {
|
|
| 1171 |
- // Otherwise, check that the tag exists and use only that one |
|
| 1172 |
- id, exists := tagsList[askedTag] |
|
| 1173 |
- if !exists {
|
|
| 1174 |
- return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName)
|
|
| 1175 |
- } |
|
| 1176 |
- repoData.ImgList[id].Tag = askedTag |
|
| 1177 |
- } |
|
| 1178 |
- |
|
| 1179 |
- errors := make(chan error) |
|
| 1180 |
- for _, image := range repoData.ImgList {
|
|
| 1181 |
- downloadImage := func(img *registry.ImgData) {
|
|
| 1182 |
- if askedTag != "" && img.Tag != askedTag {
|
|
| 1183 |
- utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
|
|
| 1184 |
- if parallel {
|
|
| 1185 |
- errors <- nil |
|
| 1186 |
- } |
|
| 1187 |
- return |
|
| 1188 |
- } |
|
| 1189 |
- |
|
| 1190 |
- if img.Tag == "" {
|
|
| 1191 |
- utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
|
|
| 1192 |
- if parallel {
|
|
| 1193 |
- errors <- nil |
|
| 1194 |
- } |
|
| 1195 |
- return |
|
| 1196 |
- } |
|
| 1197 |
- |
|
| 1198 |
- // ensure no two downloads of the same image happen at the same time |
|
| 1199 |
- if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
|
|
| 1200 |
- if c != nil {
|
|
| 1201 |
- out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) |
|
| 1202 |
- <-c |
|
| 1203 |
- out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) |
|
| 1204 |
- } else {
|
|
| 1205 |
- utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
|
|
| 1206 |
- } |
|
| 1207 |
- if parallel {
|
|
| 1208 |
- errors <- nil |
|
| 1209 |
- } |
|
| 1210 |
- return |
|
| 1211 |
- } |
|
| 1212 |
- defer srv.poolRemove("pull", "img:"+img.ID)
|
|
| 1213 |
- |
|
| 1214 |
- out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil))
|
|
| 1215 |
- success := false |
|
| 1216 |
- var lastErr error |
|
| 1217 |
- for _, ep := range repoData.Endpoints {
|
|
| 1218 |
- out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil))
|
|
| 1219 |
- if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
|
|
| 1220 |
- // Its not ideal that only the last error is returned, it would be better to concatenate the errors. |
|
| 1221 |
- // As the error is also given to the output stream the user will see the error. |
|
| 1222 |
- lastErr = err |
|
| 1223 |
- out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil))
|
|
| 1224 |
- continue |
|
| 1225 |
- } |
|
| 1226 |
- success = true |
|
| 1227 |
- break |
|
| 1228 |
- } |
|
| 1229 |
- if !success {
|
|
| 1230 |
- out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, %s", img.Tag, localName, lastErr), nil))
|
|
| 1231 |
- if parallel {
|
|
| 1232 |
- errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
|
|
| 1233 |
- return |
|
| 1234 |
- } |
|
| 1235 |
- } |
|
| 1236 |
- out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) |
|
| 1237 |
- |
|
| 1238 |
- if parallel {
|
|
| 1239 |
- errors <- nil |
|
| 1240 |
- } |
|
| 1241 |
- } |
|
| 1242 |
- |
|
| 1243 |
- if parallel {
|
|
| 1244 |
- go downloadImage(image) |
|
| 1245 |
- } else {
|
|
| 1246 |
- downloadImage(image) |
|
| 1247 |
- } |
|
| 1248 |
- } |
|
| 1249 |
- if parallel {
|
|
| 1250 |
- var lastError error |
|
| 1251 |
- for i := 0; i < len(repoData.ImgList); i++ {
|
|
| 1252 |
- if err := <-errors; err != nil {
|
|
| 1253 |
- lastError = err |
|
| 1254 |
- } |
|
| 1255 |
- } |
|
| 1256 |
- if lastError != nil {
|
|
| 1257 |
- return lastError |
|
| 1258 |
- } |
|
| 1259 |
- |
|
| 1260 |
- } |
|
| 1261 |
- for tag, id := range tagsList {
|
|
| 1262 |
- if askedTag != "" && tag != askedTag {
|
|
| 1263 |
- continue |
|
| 1264 |
- } |
|
| 1265 |
- if err := srv.runtime.Repositories().Set(localName, tag, id, true); err != nil {
|
|
| 1266 |
- return err |
|
| 1267 |
- } |
|
| 1268 |
- } |
|
| 1269 |
- if err := srv.runtime.Repositories().Save(); err != nil {
|
|
| 1270 |
- return err |
|
| 1271 |
- } |
|
| 1272 |
- |
|
| 1273 |
- return nil |
|
| 1274 |
-} |
|
| 1275 |
- |
|
| 1276 |
-func (srv *Server) poolAdd(kind, key string) (chan struct{}, error) {
|
|
| 1277 |
- srv.Lock() |
|
| 1278 |
- defer srv.Unlock() |
|
| 1279 |
- |
|
| 1280 |
- if c, exists := srv.pullingPool[key]; exists {
|
|
| 1281 |
- return c, fmt.Errorf("pull %s is already in progress", key)
|
|
| 1282 |
- } |
|
| 1283 |
- if c, exists := srv.pushingPool[key]; exists {
|
|
| 1284 |
- return c, fmt.Errorf("push %s is already in progress", key)
|
|
| 1285 |
- } |
|
| 1286 |
- |
|
| 1287 |
- c := make(chan struct{})
|
|
| 1288 |
- switch kind {
|
|
| 1289 |
- case "pull": |
|
| 1290 |
- srv.pullingPool[key] = c |
|
| 1291 |
- case "push": |
|
| 1292 |
- srv.pushingPool[key] = c |
|
| 1293 |
- default: |
|
| 1294 |
- return nil, fmt.Errorf("Unknown pool type")
|
|
| 1295 |
- } |
|
| 1296 |
- return c, nil |
|
| 1297 |
-} |
|
| 1298 |
- |
|
| 1299 |
-func (srv *Server) poolRemove(kind, key string) error {
|
|
| 1300 |
- srv.Lock() |
|
| 1301 |
- defer srv.Unlock() |
|
| 1302 |
- switch kind {
|
|
| 1303 |
- case "pull": |
|
| 1304 |
- if c, exists := srv.pullingPool[key]; exists {
|
|
| 1305 |
- close(c) |
|
| 1306 |
- delete(srv.pullingPool, key) |
|
| 1307 |
- } |
|
| 1308 |
- case "push": |
|
| 1309 |
- if c, exists := srv.pushingPool[key]; exists {
|
|
| 1310 |
- close(c) |
|
| 1311 |
- delete(srv.pushingPool, key) |
|
| 1312 |
- } |
|
| 1313 |
- default: |
|
| 1314 |
- return fmt.Errorf("Unknown pool type")
|
|
| 1315 |
- } |
|
| 1316 |
- return nil |
|
| 1317 |
-} |
|
| 1318 |
- |
|
| 1319 |
-func (srv *Server) ImagePull(job *engine.Job) engine.Status {
|
|
| 1320 |
- if n := len(job.Args); n != 1 && n != 2 {
|
|
| 1321 |
- return job.Errorf("Usage: %s IMAGE [TAG]", job.Name)
|
|
| 1322 |
- } |
|
| 1323 |
- var ( |
|
| 1324 |
- localName = job.Args[0] |
|
| 1325 |
- tag string |
|
| 1326 |
- sf = utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
| 1327 |
- authConfig = ®istry.AuthConfig{}
|
|
| 1328 |
- metaHeaders map[string][]string |
|
| 1329 |
- ) |
|
| 1330 |
- if len(job.Args) > 1 {
|
|
| 1331 |
- tag = job.Args[1] |
|
| 1332 |
- } |
|
| 1333 |
- |
|
| 1334 |
- job.GetenvJson("authConfig", authConfig)
|
|
| 1335 |
- job.GetenvJson("metaHeaders", metaHeaders)
|
|
| 1336 |
- |
|
| 1337 |
- c, err := srv.poolAdd("pull", localName+":"+tag)
|
|
| 1338 |
- if err != nil {
|
|
| 1339 |
- if c != nil {
|
|
| 1340 |
- // Another pull of the same repository is already taking place; just wait for it to finish |
|
| 1341 |
- job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
|
|
| 1342 |
- <-c |
|
| 1343 |
- return engine.StatusOK |
|
| 1344 |
- } |
|
| 1345 |
- return job.Error(err) |
|
| 1346 |
- } |
|
| 1347 |
- defer srv.poolRemove("pull", localName+":"+tag)
|
|
| 1348 |
- |
|
| 1349 |
- // Resolve the Repository name from fqn to endpoint + name |
|
| 1350 |
- hostname, remoteName, err := registry.ResolveRepositoryName(localName) |
|
| 1351 |
- if err != nil {
|
|
| 1352 |
- return job.Error(err) |
|
| 1353 |
- } |
|
| 1354 |
- |
|
| 1355 |
- endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) |
|
| 1356 |
- if err != nil {
|
|
| 1357 |
- return job.Error(err) |
|
| 1358 |
- } |
|
| 1359 |
- |
|
| 1360 |
- r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) |
|
| 1361 |
- if err != nil {
|
|
| 1362 |
- return job.Error(err) |
|
| 1363 |
- } |
|
| 1364 |
- |
|
| 1365 |
- if endpoint == registry.IndexServerAddress() {
|
|
| 1366 |
- // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" |
|
| 1367 |
- localName = remoteName |
|
| 1368 |
- } |
|
| 1369 |
- |
|
| 1370 |
- if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil {
|
|
| 1371 |
- return job.Error(err) |
|
| 1372 |
- } |
|
| 1373 |
- |
|
| 1374 |
- return engine.StatusOK |
|
| 1375 |
-} |
|
| 1376 |
- |
|
| 1377 |
-// Retrieve the all the images to be uploaded in the correct order |
|
| 1378 |
-func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[string][]string, error) {
|
|
| 1379 |
- var ( |
|
| 1380 |
- imageList []string |
|
| 1381 |
- imagesSeen map[string]bool = make(map[string]bool) |
|
| 1382 |
- tagsByImage map[string][]string = make(map[string][]string) |
|
| 1383 |
- ) |
|
| 1384 |
- |
|
| 1385 |
- for tag, id := range localRepo {
|
|
| 1386 |
- var imageListForThisTag []string |
|
| 1387 |
- |
|
| 1388 |
- tagsByImage[id] = append(tagsByImage[id], tag) |
|
| 1389 |
- |
|
| 1390 |
- for img, err := srv.runtime.Graph().Get(id); img != nil; img, err = img.GetParent() {
|
|
| 1391 |
- if err != nil {
|
|
| 1392 |
- return nil, nil, err |
|
| 1393 |
- } |
|
| 1394 |
- |
|
| 1395 |
- if imagesSeen[img.ID] {
|
|
| 1396 |
- // This image is already on the list, we can ignore it and all its parents |
|
| 1397 |
- break |
|
| 1398 |
- } |
|
| 1399 |
- |
|
| 1400 |
- imagesSeen[img.ID] = true |
|
| 1401 |
- imageListForThisTag = append(imageListForThisTag, img.ID) |
|
| 1402 |
- } |
|
| 1403 |
- |
|
| 1404 |
- // reverse the image list for this tag (so the "most"-parent image is first) |
|
| 1405 |
- for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 {
|
|
| 1406 |
- imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] |
|
| 1407 |
- } |
|
| 1408 |
- |
|
| 1409 |
- // append to main image list |
|
| 1410 |
- imageList = append(imageList, imageListForThisTag...) |
|
| 1411 |
- } |
|
| 1412 |
- |
|
| 1413 |
- utils.Debugf("Image list: %v", imageList)
|
|
| 1414 |
- utils.Debugf("Tags by image: %v", tagsByImage)
|
|
| 1415 |
- |
|
| 1416 |
- return imageList, tagsByImage, nil |
|
| 1417 |
-} |
|
| 1418 |
- |
|
| 1419 |
-func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, sf *utils.StreamFormatter) error {
|
|
| 1420 |
- out = utils.NewWriteFlusher(out) |
|
| 1421 |
- utils.Debugf("Local repo: %s", localRepo)
|
|
| 1422 |
- imgList, tagsByImage, err := srv.getImageList(localRepo) |
|
| 1423 |
- if err != nil {
|
|
| 1424 |
- return err |
|
| 1425 |
- } |
|
| 1426 |
- |
|
| 1427 |
- out.Write(sf.FormatStatus("", "Sending image list"))
|
|
| 1428 |
- |
|
| 1429 |
- var repoData *registry.RepositoryData |
|
| 1430 |
- var imageIndex []*registry.ImgData |
|
| 1431 |
- |
|
| 1432 |
- for _, imgId := range imgList {
|
|
| 1433 |
- if tags, exists := tagsByImage[imgId]; exists {
|
|
| 1434 |
- // If an image has tags you must add an entry in the image index |
|
| 1435 |
- // for each tag |
|
| 1436 |
- for _, tag := range tags {
|
|
| 1437 |
- imageIndex = append(imageIndex, ®istry.ImgData{
|
|
| 1438 |
- ID: imgId, |
|
| 1439 |
- Tag: tag, |
|
| 1440 |
- }) |
|
| 1441 |
- } |
|
| 1442 |
- } else {
|
|
| 1443 |
- // If the image does not have a tag it still needs to be sent to the |
|
| 1444 |
- // registry with an empty tag so that it is accociated with the repository |
|
| 1445 |
- imageIndex = append(imageIndex, ®istry.ImgData{
|
|
| 1446 |
- ID: imgId, |
|
| 1447 |
- Tag: "", |
|
| 1448 |
- }) |
|
| 1449 |
- |
|
| 1450 |
- } |
|
| 1451 |
- } |
|
| 1452 |
- |
|
| 1453 |
- utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo)
|
|
| 1454 |
- for _, data := range imageIndex {
|
|
| 1455 |
- utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag)
|
|
| 1456 |
- } |
|
| 1457 |
- |
|
| 1458 |
- // Register all the images in a repository with the registry |
|
| 1459 |
- // If an image is not in this list it will not be associated with the repository |
|
| 1460 |
- repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) |
|
| 1461 |
- if err != nil {
|
|
| 1462 |
- return err |
|
| 1463 |
- } |
|
| 1464 |
- |
|
| 1465 |
- for _, ep := range repoData.Endpoints {
|
|
| 1466 |
- out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo)))
|
|
| 1467 |
- |
|
| 1468 |
- for _, imgId := range imgList {
|
|
| 1469 |
- if r.LookupRemoteImage(imgId, ep, repoData.Tokens) {
|
|
| 1470 |
- out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId)))
|
|
| 1471 |
- } else {
|
|
| 1472 |
- if _, err := srv.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil {
|
|
| 1473 |
- // FIXME: Continue on error? |
|
| 1474 |
- return err |
|
| 1475 |
- } |
|
| 1476 |
- } |
|
| 1477 |
- |
|
| 1478 |
- for _, tag := range tagsByImage[imgId] {
|
|
| 1479 |
- out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag))
|
|
| 1480 |
- |
|
| 1481 |
- if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil {
|
|
| 1482 |
- return err |
|
| 1483 |
- } |
|
| 1484 |
- } |
|
| 1485 |
- } |
|
| 1486 |
- } |
|
| 1487 |
- |
|
| 1488 |
- if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil {
|
|
| 1489 |
- return err |
|
| 1490 |
- } |
|
| 1491 |
- |
|
| 1492 |
- return nil |
|
| 1493 |
-} |
|
| 1494 |
- |
|
| 1495 |
-func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
|
|
| 1496 |
- out = utils.NewWriteFlusher(out) |
|
| 1497 |
- jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.Graph().Root, imgID, "json")) |
|
| 1498 |
- if err != nil {
|
|
| 1499 |
- return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
|
|
| 1500 |
- } |
|
| 1501 |
- out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) |
|
| 1502 |
- |
|
| 1503 |
- imgData := ®istry.ImgData{
|
|
| 1504 |
- ID: imgID, |
|
| 1505 |
- } |
|
| 1506 |
- |
|
| 1507 |
- // Send the json |
|
| 1508 |
- if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
|
|
| 1509 |
- if err == registry.ErrAlreadyExists {
|
|
| 1510 |
- out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) |
|
| 1511 |
- return "", nil |
|
| 1512 |
- } |
|
| 1513 |
- return "", err |
|
| 1514 |
- } |
|
| 1515 |
- |
|
| 1516 |
- layerData, err := srv.runtime.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out) |
|
| 1517 |
- if err != nil {
|
|
| 1518 |
- return "", fmt.Errorf("Failed to generate layer archive: %s", err)
|
|
| 1519 |
- } |
|
| 1520 |
- defer os.RemoveAll(layerData.Name()) |
|
| 1521 |
- |
|
| 1522 |
- // Send the layer |
|
| 1523 |
- checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) |
|
| 1524 |
- if err != nil {
|
|
| 1525 |
- return "", err |
|
| 1526 |
- } |
|
| 1527 |
- imgData.Checksum = checksum |
|
| 1528 |
- imgData.ChecksumPayload = checksumPayload |
|
| 1529 |
- // Send the checksum |
|
| 1530 |
- if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
|
|
| 1531 |
- return "", err |
|
| 1532 |
- } |
|
| 1533 |
- |
|
| 1534 |
- out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) |
|
| 1535 |
- return imgData.Checksum, nil |
|
| 1536 |
-} |
|
| 1537 |
- |
|
| 1538 |
-// FIXME: Allow to interrupt current push when new push of same image is done. |
|
| 1539 |
-func (srv *Server) ImagePush(job *engine.Job) engine.Status {
|
|
| 1540 |
- if n := len(job.Args); n != 1 {
|
|
| 1541 |
- return job.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 1542 |
- } |
|
| 1543 |
- var ( |
|
| 1544 |
- localName = job.Args[0] |
|
| 1545 |
- sf = utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
| 1546 |
- authConfig = ®istry.AuthConfig{}
|
|
| 1547 |
- metaHeaders map[string][]string |
|
| 1548 |
- ) |
|
| 1549 |
- |
|
| 1550 |
- job.GetenvJson("authConfig", authConfig)
|
|
| 1551 |
- job.GetenvJson("metaHeaders", metaHeaders)
|
|
| 1552 |
- if _, err := srv.poolAdd("push", localName); err != nil {
|
|
| 1553 |
- return job.Error(err) |
|
| 1554 |
- } |
|
| 1555 |
- defer srv.poolRemove("push", localName)
|
|
| 1556 |
- |
|
| 1557 |
- // Resolve the Repository name from fqn to endpoint + name |
|
| 1558 |
- hostname, remoteName, err := registry.ResolveRepositoryName(localName) |
|
| 1559 |
- if err != nil {
|
|
| 1560 |
- return job.Error(err) |
|
| 1561 |
- } |
|
| 1562 |
- |
|
| 1563 |
- endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) |
|
| 1564 |
- if err != nil {
|
|
| 1565 |
- return job.Error(err) |
|
| 1566 |
- } |
|
| 1567 |
- |
|
| 1568 |
- img, err := srv.runtime.Graph().Get(localName) |
|
| 1569 |
- r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) |
|
| 1570 |
- if err2 != nil {
|
|
| 1571 |
- return job.Error(err2) |
|
| 1572 |
- } |
|
| 1573 |
- |
|
| 1574 |
- if err != nil {
|
|
| 1575 |
- reposLen := len(srv.runtime.Repositories().Repositories[localName]) |
|
| 1576 |
- job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
|
|
| 1577 |
- // If it fails, try to get the repository |
|
| 1578 |
- if localRepo, exists := srv.runtime.Repositories().Repositories[localName]; exists {
|
|
| 1579 |
- if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil {
|
|
| 1580 |
- return job.Error(err) |
|
| 1581 |
- } |
|
| 1582 |
- return engine.StatusOK |
|
| 1583 |
- } |
|
| 1584 |
- return job.Error(err) |
|
| 1585 |
- } |
|
| 1586 |
- |
|
| 1587 |
- var token []string |
|
| 1588 |
- job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName))
|
|
| 1589 |
- if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil {
|
|
| 1590 |
- return job.Error(err) |
|
| 1591 |
- } |
|
| 1592 |
- return engine.StatusOK |
|
| 1593 |
-} |
|
| 1594 |
- |
|
| 1595 |
-func (srv *Server) ImageImport(job *engine.Job) engine.Status {
|
|
| 1596 |
- if n := len(job.Args); n != 2 && n != 3 {
|
|
| 1597 |
- return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
|
|
| 1598 |
- } |
|
| 1599 |
- var ( |
|
| 1600 |
- src = job.Args[0] |
|
| 1601 |
- repo = job.Args[1] |
|
| 1602 |
- tag string |
|
| 1603 |
- sf = utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
| 1604 |
- archive archive.ArchiveReader |
|
| 1605 |
- resp *http.Response |
|
| 1606 |
- ) |
|
| 1607 |
- if len(job.Args) > 2 {
|
|
| 1608 |
- tag = job.Args[2] |
|
| 1609 |
- } |
|
| 1610 |
- |
|
| 1611 |
- if src == "-" {
|
|
| 1612 |
- archive = job.Stdin |
|
| 1613 |
- } else {
|
|
| 1614 |
- u, err := url.Parse(src) |
|
| 1615 |
- if err != nil {
|
|
| 1616 |
- return job.Error(err) |
|
| 1617 |
- } |
|
| 1618 |
- if u.Scheme == "" {
|
|
| 1619 |
- u.Scheme = "http" |
|
| 1620 |
- u.Host = src |
|
| 1621 |
- u.Path = "" |
|
| 1622 |
- } |
|
| 1623 |
- job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u))
|
|
| 1624 |
- // Download with curl (pretty progress bar) |
|
| 1625 |
- // If curl is not available, fallback to http.Get() |
|
| 1626 |
- resp, err = utils.Download(u.String()) |
|
| 1627 |
- if err != nil {
|
|
| 1628 |
- return job.Error(err) |
|
| 1629 |
- } |
|
| 1630 |
- progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") |
|
| 1631 |
- defer progressReader.Close() |
|
| 1632 |
- archive = progressReader |
|
| 1633 |
- } |
|
| 1634 |
- img, err := srv.runtime.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil) |
|
| 1635 |
- if err != nil {
|
|
| 1636 |
- return job.Error(err) |
|
| 1637 |
- } |
|
| 1638 |
- // Optionally register the image at REPO/TAG |
|
| 1639 |
- if repo != "" {
|
|
| 1640 |
- if err := srv.runtime.Repositories().Set(repo, tag, img.ID, true); err != nil {
|
|
| 1641 |
- return job.Error(err) |
|
| 1642 |
- } |
|
| 1643 |
- } |
|
| 1644 |
- job.Stdout.Write(sf.FormatStatus("", img.ID))
|
|
| 1645 |
- return engine.StatusOK |
|
| 1646 |
-} |
|
| 1647 |
- |
|
| 1648 |
-func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
|
|
| 1649 |
- var name string |
|
| 1650 |
- if len(job.Args) == 1 {
|
|
| 1651 |
- name = job.Args[0] |
|
| 1652 |
- } else if len(job.Args) > 1 {
|
|
| 1653 |
- return job.Errorf("Usage: %s", job.Name)
|
|
| 1654 |
- } |
|
| 1655 |
- config := runconfig.ContainerConfigFromJob(job) |
|
| 1656 |
- if config.Memory != 0 && config.Memory < 524288 {
|
|
| 1657 |
- return job.Errorf("Minimum memory limit allowed is 512k")
|
|
| 1658 |
- } |
|
| 1659 |
- if config.Memory > 0 && !srv.runtime.SystemConfig().MemoryLimit {
|
|
| 1660 |
- job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
|
| 1661 |
- config.Memory = 0 |
|
| 1662 |
- } |
|
| 1663 |
- if config.Memory > 0 && !srv.runtime.SystemConfig().SwapLimit {
|
|
| 1664 |
- job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
|
| 1665 |
- config.MemorySwap = -1 |
|
| 1666 |
- } |
|
| 1667 |
- resolvConf, err := utils.GetResolvConf() |
|
| 1668 |
- if err != nil {
|
|
| 1669 |
- return job.Error(err) |
|
| 1670 |
- } |
|
| 1671 |
- if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.Config().Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
|
| 1672 |
- job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", runtime.DefaultDns)
|
|
| 1673 |
- config.Dns = runtime.DefaultDns |
|
| 1674 |
- } |
|
| 1675 |
- |
|
| 1676 |
- container, buildWarnings, err := srv.runtime.Create(config, name) |
|
| 1677 |
- if err != nil {
|
|
| 1678 |
- if srv.runtime.Graph().IsNotExist(err) {
|
|
| 1679 |
- _, tag := utils.ParseRepositoryTag(config.Image) |
|
| 1680 |
- if tag == "" {
|
|
| 1681 |
- tag = graph.DEFAULTTAG |
|
| 1682 |
- } |
|
| 1683 |
- return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
|
|
| 1684 |
- } |
|
| 1685 |
- return job.Error(err) |
|
| 1686 |
- } |
|
| 1687 |
- if !container.Config.NetworkDisabled && srv.runtime.SystemConfig().IPv4ForwardingDisabled {
|
|
| 1688 |
- job.Errorf("IPv4 forwarding is disabled.\n")
|
|
| 1689 |
- } |
|
| 1690 |
- srv.LogEvent("create", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 1691 |
- // FIXME: this is necessary because runtime.Create might return a nil container |
|
| 1692 |
- // with a non-nil error. This should not happen! Once it's fixed we |
|
| 1693 |
- // can remove this workaround. |
|
| 1694 |
- if container != nil {
|
|
| 1695 |
- job.Printf("%s\n", container.ID)
|
|
| 1696 |
- } |
|
| 1697 |
- for _, warning := range buildWarnings {
|
|
| 1698 |
- job.Errorf("%s\n", warning)
|
|
| 1699 |
- } |
|
| 1700 |
- return engine.StatusOK |
|
| 1701 |
-} |
|
| 1702 |
- |
|
| 1703 |
-func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
|
|
| 1704 |
- if len(job.Args) != 1 {
|
|
| 1705 |
- return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 1706 |
- } |
|
| 1707 |
- var ( |
|
| 1708 |
- name = job.Args[0] |
|
| 1709 |
- t = 10 |
|
| 1710 |
- ) |
|
| 1711 |
- if job.EnvExists("t") {
|
|
| 1712 |
- t = job.GetenvInt("t")
|
|
| 1713 |
- } |
|
| 1714 |
- if container := srv.runtime.Get(name); container != nil {
|
|
| 1715 |
- if err := container.Restart(int(t)); err != nil {
|
|
| 1716 |
- return job.Errorf("Cannot restart container %s: %s\n", name, err)
|
|
| 1717 |
- } |
|
| 1718 |
- srv.LogEvent("restart", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 1719 |
- } else {
|
|
| 1720 |
- return job.Errorf("No such container: %s\n", name)
|
|
| 1721 |
- } |
|
| 1722 |
- return engine.StatusOK |
|
| 1723 |
-} |
|
| 1724 |
- |
|
| 1725 |
-func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|
| 1726 |
- if len(job.Args) != 1 {
|
|
| 1727 |
- return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
|
|
| 1728 |
- } |
|
| 1729 |
- name := job.Args[0] |
|
| 1730 |
- removeVolume := job.GetenvBool("removeVolume")
|
|
| 1731 |
- removeLink := job.GetenvBool("removeLink")
|
|
| 1732 |
- forceRemove := job.GetenvBool("forceRemove")
|
|
| 1733 |
- |
|
| 1734 |
- container := srv.runtime.Get(name) |
|
| 1735 |
- |
|
| 1736 |
- if removeLink {
|
|
| 1737 |
- if container == nil {
|
|
| 1738 |
- return job.Errorf("No such link: %s", name)
|
|
| 1739 |
- } |
|
| 1740 |
- name, err := runtime.GetFullContainerName(name) |
|
| 1741 |
- if err != nil {
|
|
| 1742 |
- job.Error(err) |
|
| 1743 |
- } |
|
| 1744 |
- parent, n := path.Split(name) |
|
| 1745 |
- if parent == "/" {
|
|
| 1746 |
- return job.Errorf("Conflict, cannot remove the default name of the container")
|
|
| 1747 |
- } |
|
| 1748 |
- pe := srv.runtime.ContainerGraph().Get(parent) |
|
| 1749 |
- if pe == nil {
|
|
| 1750 |
- return job.Errorf("Cannot get parent %s for name %s", parent, name)
|
|
| 1751 |
- } |
|
| 1752 |
- parentContainer := srv.runtime.Get(pe.ID()) |
|
| 1753 |
- |
|
| 1754 |
- if parentContainer != nil {
|
|
| 1755 |
- parentContainer.DisableLink(n) |
|
| 1756 |
- } |
|
| 1757 |
- |
|
| 1758 |
- if err := srv.runtime.ContainerGraph().Delete(name); err != nil {
|
|
| 1759 |
- return job.Error(err) |
|
| 1760 |
- } |
|
| 1761 |
- return engine.StatusOK |
|
| 1762 |
- } |
|
| 1763 |
- |
|
| 1764 |
- if container != nil {
|
|
| 1765 |
- if container.State.IsRunning() {
|
|
| 1766 |
- if forceRemove {
|
|
| 1767 |
- if err := container.Stop(5); err != nil {
|
|
| 1768 |
- return job.Errorf("Could not stop running container, cannot remove - %v", err)
|
|
| 1769 |
- } |
|
| 1770 |
- } else {
|
|
| 1771 |
- return job.Errorf("Impossible to remove a running container, please stop it first or use -f")
|
|
| 1772 |
- } |
|
| 1773 |
- } |
|
| 1774 |
- if err := srv.runtime.Destroy(container); err != nil {
|
|
| 1775 |
- return job.Errorf("Cannot destroy container %s: %s", name, err)
|
|
| 1776 |
- } |
|
| 1777 |
- srv.LogEvent("destroy", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 1778 |
- |
|
| 1779 |
- if removeVolume {
|
|
| 1780 |
- var ( |
|
| 1781 |
- volumes = make(map[string]struct{})
|
|
| 1782 |
- binds = make(map[string]struct{})
|
|
| 1783 |
- usedVolumes = make(map[string]*runtime.Container) |
|
| 1784 |
- ) |
|
| 1785 |
- |
|
| 1786 |
- // the volume id is always the base of the path |
|
| 1787 |
- getVolumeId := func(p string) string {
|
|
| 1788 |
- return filepath.Base(strings.TrimSuffix(p, "/layer")) |
|
| 1789 |
- } |
|
| 1790 |
- |
|
| 1791 |
- // populate bind map so that they can be skipped and not removed |
|
| 1792 |
- for _, bind := range container.HostConfig().Binds {
|
|
| 1793 |
- source := strings.Split(bind, ":")[0] |
|
| 1794 |
- // TODO: refactor all volume stuff, all of it |
|
| 1795 |
- // this is very important that we eval the link |
|
| 1796 |
- // or comparing the keys to container.Volumes will not work |
|
| 1797 |
- p, err := filepath.EvalSymlinks(source) |
|
| 1798 |
- if err != nil {
|
|
| 1799 |
- return job.Error(err) |
|
| 1800 |
- } |
|
| 1801 |
- source = p |
|
| 1802 |
- binds[source] = struct{}{}
|
|
| 1803 |
- } |
|
| 1804 |
- |
|
| 1805 |
- // Store all the deleted containers volumes |
|
| 1806 |
- for _, volumeId := range container.Volumes {
|
|
| 1807 |
- // Skip the volumes mounted from external |
|
| 1808 |
- // bind mounts here will will be evaluated for a symlink |
|
| 1809 |
- if _, exists := binds[volumeId]; exists {
|
|
| 1810 |
- continue |
|
| 1811 |
- } |
|
| 1812 |
- |
|
| 1813 |
- volumeId = getVolumeId(volumeId) |
|
| 1814 |
- volumes[volumeId] = struct{}{}
|
|
| 1815 |
- } |
|
| 1816 |
- |
|
| 1817 |
- // Retrieve all volumes from all remaining containers |
|
| 1818 |
- for _, container := range srv.runtime.List() {
|
|
| 1819 |
- for _, containerVolumeId := range container.Volumes {
|
|
| 1820 |
- containerVolumeId = getVolumeId(containerVolumeId) |
|
| 1821 |
- usedVolumes[containerVolumeId] = container |
|
| 1822 |
- } |
|
| 1823 |
- } |
|
| 1824 |
- |
|
| 1825 |
- for volumeId := range volumes {
|
|
| 1826 |
- // If the requested volu |
|
| 1827 |
- if c, exists := usedVolumes[volumeId]; exists {
|
|
| 1828 |
- log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
|
|
| 1829 |
- continue |
|
| 1830 |
- } |
|
| 1831 |
- if err := srv.runtime.Volumes().Delete(volumeId); err != nil {
|
|
| 1832 |
- return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
|
|
| 1833 |
- } |
|
| 1834 |
- } |
|
| 1835 |
- } |
|
| 1836 |
- } else {
|
|
| 1837 |
- return job.Errorf("No such container: %s", name)
|
|
| 1838 |
- } |
|
| 1839 |
- return engine.StatusOK |
|
| 1840 |
-} |
|
| 1841 |
- |
|
| 1842 |
-func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force bool) error {
|
|
| 1843 |
- var ( |
|
| 1844 |
- repoName, tag string |
|
| 1845 |
- tags = []string{}
|
|
| 1846 |
- ) |
|
| 1847 |
- |
|
| 1848 |
- repoName, tag = utils.ParseRepositoryTag(name) |
|
| 1849 |
- if tag == "" {
|
|
| 1850 |
- tag = graph.DEFAULTTAG |
|
| 1851 |
- } |
|
| 1852 |
- |
|
| 1853 |
- img, err := srv.runtime.Repositories().LookupImage(name) |
|
| 1854 |
- if err != nil {
|
|
| 1855 |
- if r, _ := srv.runtime.Repositories().Get(repoName); r != nil {
|
|
| 1856 |
- return fmt.Errorf("No such image: %s:%s", repoName, tag)
|
|
| 1857 |
- } |
|
| 1858 |
- return fmt.Errorf("No such image: %s", name)
|
|
| 1859 |
- } |
|
| 1860 |
- |
|
| 1861 |
- if strings.Contains(img.ID, name) {
|
|
| 1862 |
- repoName = "" |
|
| 1863 |
- tag = "" |
|
| 1864 |
- } |
|
| 1865 |
- |
|
| 1866 |
- byParents, err := srv.runtime.Graph().ByParent() |
|
| 1867 |
- if err != nil {
|
|
| 1868 |
- return err |
|
| 1869 |
- } |
|
| 1870 |
- |
|
| 1871 |
- //If delete by id, see if the id belong only to one repository |
|
| 1872 |
- if repoName == "" {
|
|
| 1873 |
- for _, repoAndTag := range srv.runtime.Repositories().ByID()[img.ID] {
|
|
| 1874 |
- parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag) |
|
| 1875 |
- if repoName == "" || repoName == parsedRepo {
|
|
| 1876 |
- repoName = parsedRepo |
|
| 1877 |
- if parsedTag != "" {
|
|
| 1878 |
- tags = append(tags, parsedTag) |
|
| 1879 |
- } |
|
| 1880 |
- } else if repoName != parsedRepo && !force {
|
|
| 1881 |
- // the id belongs to multiple repos, like base:latest and user:test, |
|
| 1882 |
- // in that case return conflict |
|
| 1883 |
- return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name)
|
|
| 1884 |
- } |
|
| 1885 |
- } |
|
| 1886 |
- } else {
|
|
| 1887 |
- tags = append(tags, tag) |
|
| 1888 |
- } |
|
| 1889 |
- |
|
| 1890 |
- if !first && len(tags) > 0 {
|
|
| 1891 |
- return nil |
|
| 1892 |
- } |
|
| 1893 |
- |
|
| 1894 |
- //Untag the current image |
|
| 1895 |
- for _, tag := range tags {
|
|
| 1896 |
- tagDeleted, err := srv.runtime.Repositories().Delete(repoName, tag) |
|
| 1897 |
- if err != nil {
|
|
| 1898 |
- return err |
|
| 1899 |
- } |
|
| 1900 |
- if tagDeleted {
|
|
| 1901 |
- out := &engine.Env{}
|
|
| 1902 |
- out.Set("Untagged", repoName+":"+tag)
|
|
| 1903 |
- imgs.Add(out) |
|
| 1904 |
- srv.LogEvent("untag", img.ID, "")
|
|
| 1905 |
- } |
|
| 1906 |
- } |
|
| 1907 |
- tags = srv.runtime.Repositories().ByID()[img.ID] |
|
| 1908 |
- if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
|
|
| 1909 |
- if len(byParents[img.ID]) == 0 {
|
|
| 1910 |
- if err := srv.canDeleteImage(img.ID); err != nil {
|
|
| 1911 |
- return err |
|
| 1912 |
- } |
|
| 1913 |
- if err := srv.runtime.Repositories().DeleteAll(img.ID); err != nil {
|
|
| 1914 |
- return err |
|
| 1915 |
- } |
|
| 1916 |
- if err := srv.runtime.Graph().Delete(img.ID); err != nil {
|
|
| 1917 |
- return err |
|
| 1918 |
- } |
|
| 1919 |
- out := &engine.Env{}
|
|
| 1920 |
- out.Set("Deleted", img.ID)
|
|
| 1921 |
- imgs.Add(out) |
|
| 1922 |
- srv.LogEvent("delete", img.ID, "")
|
|
| 1923 |
- if img.Parent != "" {
|
|
| 1924 |
- err := srv.DeleteImage(img.Parent, imgs, false, force) |
|
| 1925 |
- if first {
|
|
| 1926 |
- return err |
|
| 1927 |
- } |
|
| 1928 |
- |
|
| 1929 |
- } |
|
| 1930 |
- |
|
| 1931 |
- } |
|
| 1932 |
- } |
|
| 1933 |
- return nil |
|
| 1934 |
-} |
|
| 1935 |
- |
|
| 1936 |
-func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
|
|
| 1937 |
- if n := len(job.Args); n != 1 {
|
|
| 1938 |
- return job.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 1939 |
- } |
|
| 1940 |
- imgs := engine.NewTable("", 0)
|
|
| 1941 |
- if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force")); err != nil {
|
|
| 1942 |
- return job.Error(err) |
|
| 1943 |
- } |
|
| 1944 |
- if len(imgs.Data) == 0 {
|
|
| 1945 |
- return job.Errorf("Conflict, %s wasn't deleted", job.Args[0])
|
|
| 1946 |
- } |
|
| 1947 |
- if _, err := imgs.WriteListTo(job.Stdout); err != nil {
|
|
| 1948 |
- return job.Error(err) |
|
| 1949 |
- } |
|
| 1950 |
- return engine.StatusOK |
|
| 1951 |
-} |
|
| 1952 |
- |
|
| 1953 |
-func (srv *Server) canDeleteImage(imgID string) error {
|
|
| 1954 |
- for _, container := range srv.runtime.List() {
|
|
| 1955 |
- parent, err := srv.runtime.Repositories().LookupImage(container.Image) |
|
| 1956 |
- if err != nil {
|
|
| 1957 |
- return err |
|
| 1958 |
- } |
|
| 1959 |
- |
|
| 1960 |
- if err := parent.WalkHistory(func(p *image.Image) error {
|
|
| 1961 |
- if imgID == p.ID {
|
|
| 1962 |
- return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it", utils.TruncateID(imgID), utils.TruncateID(container.ID))
|
|
| 1963 |
- } |
|
| 1964 |
- return nil |
|
| 1965 |
- }); err != nil {
|
|
| 1966 |
- return err |
|
| 1967 |
- } |
|
| 1968 |
- } |
|
| 1969 |
- return nil |
|
| 1970 |
-} |
|
| 1971 |
- |
|
| 1972 |
-func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
|
|
| 1973 |
- // Retrieve all images |
|
| 1974 |
- images, err := srv.runtime.Graph().Map() |
|
| 1975 |
- if err != nil {
|
|
| 1976 |
- return nil, err |
|
| 1977 |
- } |
|
| 1978 |
- |
|
| 1979 |
- // Store the tree in a map of map (map[parentId][childId]) |
|
| 1980 |
- imageMap := make(map[string]map[string]struct{})
|
|
| 1981 |
- for _, img := range images {
|
|
| 1982 |
- if _, exists := imageMap[img.Parent]; !exists {
|
|
| 1983 |
- imageMap[img.Parent] = make(map[string]struct{})
|
|
| 1984 |
- } |
|
| 1985 |
- imageMap[img.Parent][img.ID] = struct{}{}
|
|
| 1986 |
- } |
|
| 1987 |
- |
|
| 1988 |
- // Loop on the children of the given image and check the config |
|
| 1989 |
- var match *image.Image |
|
| 1990 |
- for elem := range imageMap[imgID] {
|
|
| 1991 |
- img, err := srv.runtime.Graph().Get(elem) |
|
| 1992 |
- if err != nil {
|
|
| 1993 |
- return nil, err |
|
| 1994 |
- } |
|
| 1995 |
- if runconfig.Compare(&img.ContainerConfig, config) {
|
|
| 1996 |
- if match == nil || match.Created.Before(img.Created) {
|
|
| 1997 |
- match = img |
|
| 1998 |
- } |
|
| 1999 |
- } |
|
| 2000 |
- } |
|
| 2001 |
- return match, nil |
|
| 2002 |
-} |
|
| 2003 |
- |
|
| 2004 |
-func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runconfig.HostConfig) error {
|
|
| 2005 |
- runtime := srv.runtime |
|
| 2006 |
- |
|
| 2007 |
- if hostConfig != nil && hostConfig.Links != nil {
|
|
| 2008 |
- for _, l := range hostConfig.Links {
|
|
| 2009 |
- parts, err := utils.PartParser("name:alias", l)
|
|
| 2010 |
- if err != nil {
|
|
| 2011 |
- return err |
|
| 2012 |
- } |
|
| 2013 |
- child, err := srv.runtime.GetByName(parts["name"]) |
|
| 2014 |
- if err != nil {
|
|
| 2015 |
- return err |
|
| 2016 |
- } |
|
| 2017 |
- if child == nil {
|
|
| 2018 |
- return fmt.Errorf("Could not get container for %s", parts["name"])
|
|
| 2019 |
- } |
|
| 2020 |
- if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil {
|
|
| 2021 |
- return err |
|
| 2022 |
- } |
|
| 2023 |
- } |
|
| 2024 |
- |
|
| 2025 |
- // After we load all the links into the runtime |
|
| 2026 |
- // set them to nil on the hostconfig |
|
| 2027 |
- hostConfig.Links = nil |
|
| 2028 |
- if err := container.WriteHostConfig(); err != nil {
|
|
| 2029 |
- return err |
|
| 2030 |
- } |
|
| 2031 |
- } |
|
| 2032 |
- return nil |
|
| 2033 |
-} |
|
| 2034 |
- |
|
| 2035 |
-func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
|
|
| 2036 |
- if len(job.Args) < 1 {
|
|
| 2037 |
- return job.Errorf("Usage: %s container_id", job.Name)
|
|
| 2038 |
- } |
|
| 2039 |
- name := job.Args[0] |
|
| 2040 |
- runtime := srv.runtime |
|
| 2041 |
- container := runtime.Get(name) |
|
| 2042 |
- |
|
| 2043 |
- if container == nil {
|
|
| 2044 |
- return job.Errorf("No such container: %s", name)
|
|
| 2045 |
- } |
|
| 2046 |
- // If no environment was set, then no hostconfig was passed. |
|
| 2047 |
- if len(job.Environ()) > 0 {
|
|
| 2048 |
- hostConfig := runconfig.ContainerHostConfigFromJob(job) |
|
| 2049 |
- // Validate the HostConfig binds. Make sure that: |
|
| 2050 |
- // 1) the source of a bind mount isn't / |
|
| 2051 |
- // The bind mount "/:/foo" isn't allowed. |
|
| 2052 |
- // 2) Check that the source exists |
|
| 2053 |
- // The source to be bind mounted must exist. |
|
| 2054 |
- for _, bind := range hostConfig.Binds {
|
|
| 2055 |
- splitBind := strings.Split(bind, ":") |
|
| 2056 |
- source := splitBind[0] |
|
| 2057 |
- |
|
| 2058 |
- // refuse to bind mount "/" to the container |
|
| 2059 |
- if source == "/" {
|
|
| 2060 |
- return job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind)
|
|
| 2061 |
- } |
|
| 2062 |
- |
|
| 2063 |
- // ensure the source exists on the host |
|
| 2064 |
- _, err := os.Stat(source) |
|
| 2065 |
- if err != nil && os.IsNotExist(err) {
|
|
| 2066 |
- err = os.MkdirAll(source, 0755) |
|
| 2067 |
- if err != nil {
|
|
| 2068 |
- return job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error())
|
|
| 2069 |
- } |
|
| 2070 |
- } |
|
| 2071 |
- } |
|
| 2072 |
- // Register any links from the host config before starting the container |
|
| 2073 |
- if err := srv.RegisterLinks(container, hostConfig); err != nil {
|
|
| 2074 |
- return job.Error(err) |
|
| 2075 |
- } |
|
| 2076 |
- container.SetHostConfig(hostConfig) |
|
| 2077 |
- container.ToDisk() |
|
| 2078 |
- } |
|
| 2079 |
- if err := container.Start(); err != nil {
|
|
| 2080 |
- return job.Errorf("Cannot start container %s: %s", name, err)
|
|
| 2081 |
- } |
|
| 2082 |
- srv.LogEvent("start", container.ID, runtime.Repositories().ImageName(container.Image))
|
|
| 2083 |
- |
|
| 2084 |
- return engine.StatusOK |
|
| 2085 |
-} |
|
| 2086 |
- |
|
| 2087 |
-func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
|
|
| 2088 |
- if len(job.Args) != 1 {
|
|
| 2089 |
- return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 2090 |
- } |
|
| 2091 |
- var ( |
|
| 2092 |
- name = job.Args[0] |
|
| 2093 |
- t = 10 |
|
| 2094 |
- ) |
|
| 2095 |
- if job.EnvExists("t") {
|
|
| 2096 |
- t = job.GetenvInt("t")
|
|
| 2097 |
- } |
|
| 2098 |
- if container := srv.runtime.Get(name); container != nil {
|
|
| 2099 |
- if err := container.Stop(int(t)); err != nil {
|
|
| 2100 |
- return job.Errorf("Cannot stop container %s: %s\n", name, err)
|
|
| 2101 |
- } |
|
| 2102 |
- srv.LogEvent("stop", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 2103 |
- } else {
|
|
| 2104 |
- return job.Errorf("No such container: %s\n", name)
|
|
| 2105 |
- } |
|
| 2106 |
- return engine.StatusOK |
|
| 2107 |
-} |
|
| 2108 |
- |
|
| 2109 |
-func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
|
|
| 2110 |
- if len(job.Args) != 1 {
|
|
| 2111 |
- return job.Errorf("Usage: %s", job.Name)
|
|
| 2112 |
- } |
|
| 2113 |
- name := job.Args[0] |
|
| 2114 |
- if container := srv.runtime.Get(name); container != nil {
|
|
| 2115 |
- status := container.Wait() |
|
| 2116 |
- job.Printf("%d\n", status)
|
|
| 2117 |
- return engine.StatusOK |
|
| 2118 |
- } |
|
| 2119 |
- return job.Errorf("%s: no such container: %s", job.Name, name)
|
|
| 2120 |
-} |
|
| 2121 |
- |
|
| 2122 |
-func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
|
|
| 2123 |
- if len(job.Args) != 3 {
|
|
| 2124 |
- return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
|
|
| 2125 |
- } |
|
| 2126 |
- name := job.Args[0] |
|
| 2127 |
- height, err := strconv.Atoi(job.Args[1]) |
|
| 2128 |
- if err != nil {
|
|
| 2129 |
- return job.Error(err) |
|
| 2130 |
- } |
|
| 2131 |
- width, err := strconv.Atoi(job.Args[2]) |
|
| 2132 |
- if err != nil {
|
|
| 2133 |
- return job.Error(err) |
|
| 2134 |
- } |
|
| 2135 |
- if container := srv.runtime.Get(name); container != nil {
|
|
| 2136 |
- if err := container.Resize(height, width); err != nil {
|
|
| 2137 |
- return job.Error(err) |
|
| 2138 |
- } |
|
| 2139 |
- return engine.StatusOK |
|
| 2140 |
- } |
|
| 2141 |
- return job.Errorf("No such container: %s", name)
|
|
| 2142 |
-} |
|
| 2143 |
- |
|
| 2144 |
-func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
|
|
| 2145 |
- if len(job.Args) != 1 {
|
|
| 2146 |
- return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 2147 |
- } |
|
| 2148 |
- |
|
| 2149 |
- var ( |
|
| 2150 |
- name = job.Args[0] |
|
| 2151 |
- logs = job.GetenvBool("logs")
|
|
| 2152 |
- stream = job.GetenvBool("stream")
|
|
| 2153 |
- stdin = job.GetenvBool("stdin")
|
|
| 2154 |
- stdout = job.GetenvBool("stdout")
|
|
| 2155 |
- stderr = job.GetenvBool("stderr")
|
|
| 2156 |
- ) |
|
| 2157 |
- |
|
| 2158 |
- container := srv.runtime.Get(name) |
|
| 2159 |
- if container == nil {
|
|
| 2160 |
- return job.Errorf("No such container: %s", name)
|
|
| 2161 |
- } |
|
| 2162 |
- |
|
| 2163 |
- //logs |
|
| 2164 |
- if logs {
|
|
| 2165 |
- cLog, err := container.ReadLog("json")
|
|
| 2166 |
- if err != nil && os.IsNotExist(err) {
|
|
| 2167 |
- // Legacy logs |
|
| 2168 |
- utils.Debugf("Old logs format")
|
|
| 2169 |
- if stdout {
|
|
| 2170 |
- cLog, err := container.ReadLog("stdout")
|
|
| 2171 |
- if err != nil {
|
|
| 2172 |
- utils.Errorf("Error reading logs (stdout): %s", err)
|
|
| 2173 |
- } else if _, err := io.Copy(job.Stdout, cLog); err != nil {
|
|
| 2174 |
- utils.Errorf("Error streaming logs (stdout): %s", err)
|
|
| 2175 |
- } |
|
| 2176 |
- } |
|
| 2177 |
- if stderr {
|
|
| 2178 |
- cLog, err := container.ReadLog("stderr")
|
|
| 2179 |
- if err != nil {
|
|
| 2180 |
- utils.Errorf("Error reading logs (stderr): %s", err)
|
|
| 2181 |
- } else if _, err := io.Copy(job.Stderr, cLog); err != nil {
|
|
| 2182 |
- utils.Errorf("Error streaming logs (stderr): %s", err)
|
|
| 2183 |
- } |
|
| 2184 |
- } |
|
| 2185 |
- } else if err != nil {
|
|
| 2186 |
- utils.Errorf("Error reading logs (json): %s", err)
|
|
| 2187 |
- } else {
|
|
| 2188 |
- dec := json.NewDecoder(cLog) |
|
| 2189 |
- for {
|
|
| 2190 |
- l := &utils.JSONLog{}
|
|
| 2191 |
- |
|
| 2192 |
- if err := dec.Decode(l); err == io.EOF {
|
|
| 2193 |
- break |
|
| 2194 |
- } else if err != nil {
|
|
| 2195 |
- utils.Errorf("Error streaming logs: %s", err)
|
|
| 2196 |
- break |
|
| 2197 |
- } |
|
| 2198 |
- if l.Stream == "stdout" && stdout {
|
|
| 2199 |
- fmt.Fprintf(job.Stdout, "%s", l.Log) |
|
| 2200 |
- } |
|
| 2201 |
- if l.Stream == "stderr" && stderr {
|
|
| 2202 |
- fmt.Fprintf(job.Stderr, "%s", l.Log) |
|
| 2203 |
- } |
|
| 2204 |
- } |
|
| 2205 |
- } |
|
| 2206 |
- } |
|
| 2207 |
- |
|
| 2208 |
- //stream |
|
| 2209 |
- if stream {
|
|
| 2210 |
- if container.State.IsGhost() {
|
|
| 2211 |
- return job.Errorf("Impossible to attach to a ghost container")
|
|
| 2212 |
- } |
|
| 2213 |
- |
|
| 2214 |
- var ( |
|
| 2215 |
- cStdin io.ReadCloser |
|
| 2216 |
- cStdout, cStderr io.Writer |
|
| 2217 |
- cStdinCloser io.Closer |
|
| 2218 |
- ) |
|
| 2219 |
- |
|
| 2220 |
- if stdin {
|
|
| 2221 |
- r, w := io.Pipe() |
|
| 2222 |
- go func() {
|
|
| 2223 |
- defer w.Close() |
|
| 2224 |
- defer utils.Debugf("Closing buffered stdin pipe")
|
|
| 2225 |
- io.Copy(w, job.Stdin) |
|
| 2226 |
- }() |
|
| 2227 |
- cStdin = r |
|
| 2228 |
- cStdinCloser = job.Stdin |
|
| 2229 |
- } |
|
| 2230 |
- if stdout {
|
|
| 2231 |
- cStdout = job.Stdout |
|
| 2232 |
- } |
|
| 2233 |
- if stderr {
|
|
| 2234 |
- cStderr = job.Stderr |
|
| 2235 |
- } |
|
| 2236 |
- |
|
| 2237 |
- <-container.Attach(cStdin, cStdinCloser, cStdout, cStderr) |
|
| 2238 |
- |
|
| 2239 |
- // If we are in stdinonce mode, wait for the process to end |
|
| 2240 |
- // otherwise, simply return |
|
| 2241 |
- if container.Config.StdinOnce && !container.Config.Tty {
|
|
| 2242 |
- container.Wait() |
|
| 2243 |
- } |
|
| 2244 |
- } |
|
| 2245 |
- return engine.StatusOK |
|
| 2246 |
-} |
|
| 2247 |
- |
|
| 2248 |
-func (srv *Server) ContainerInspect(name string) (*runtime.Container, error) {
|
|
| 2249 |
- if container := srv.runtime.Get(name); container != nil {
|
|
| 2250 |
- return container, nil |
|
| 2251 |
- } |
|
| 2252 |
- return nil, fmt.Errorf("No such container: %s", name)
|
|
| 2253 |
-} |
|
| 2254 |
- |
|
| 2255 |
-func (srv *Server) ImageInspect(name string) (*image.Image, error) {
|
|
| 2256 |
- if image, err := srv.runtime.Repositories().LookupImage(name); err == nil && image != nil {
|
|
| 2257 |
- return image, nil |
|
| 2258 |
- } |
|
| 2259 |
- return nil, fmt.Errorf("No such image: %s", name)
|
|
| 2260 |
-} |
|
| 2261 |
- |
|
| 2262 |
-func (srv *Server) JobInspect(job *engine.Job) engine.Status {
|
|
| 2263 |
- // TODO: deprecate KIND/conflict |
|
| 2264 |
- if n := len(job.Args); n != 2 {
|
|
| 2265 |
- return job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name)
|
|
| 2266 |
- } |
|
| 2267 |
- var ( |
|
| 2268 |
- name = job.Args[0] |
|
| 2269 |
- kind = job.Args[1] |
|
| 2270 |
- object interface{}
|
|
| 2271 |
- conflict = job.GetenvBool("conflict") //should the job detect conflict between containers and images
|
|
| 2272 |
- image, errImage = srv.ImageInspect(name) |
|
| 2273 |
- container, errContainer = srv.ContainerInspect(name) |
|
| 2274 |
- ) |
|
| 2275 |
- |
|
| 2276 |
- if conflict && image != nil && container != nil {
|
|
| 2277 |
- return job.Errorf("Conflict between containers and images")
|
|
| 2278 |
- } |
|
| 2279 |
- |
|
| 2280 |
- switch kind {
|
|
| 2281 |
- case "image": |
|
| 2282 |
- if errImage != nil {
|
|
| 2283 |
- return job.Error(errImage) |
|
| 2284 |
- } |
|
| 2285 |
- object = image |
|
| 2286 |
- case "container": |
|
| 2287 |
- if errContainer != nil {
|
|
| 2288 |
- return job.Error(errContainer) |
|
| 2289 |
- } |
|
| 2290 |
- object = &struct {
|
|
| 2291 |
- *runtime.Container |
|
| 2292 |
- HostConfig *runconfig.HostConfig |
|
| 2293 |
- }{container, container.HostConfig()}
|
|
| 2294 |
- default: |
|
| 2295 |
- return job.Errorf("Unknown kind: %s", kind)
|
|
| 2296 |
- } |
|
| 2297 |
- |
|
| 2298 |
- b, err := json.Marshal(object) |
|
| 2299 |
- if err != nil {
|
|
| 2300 |
- return job.Error(err) |
|
| 2301 |
- } |
|
| 2302 |
- job.Stdout.Write(b) |
|
| 2303 |
- return engine.StatusOK |
|
| 2304 |
-} |
|
| 2305 |
- |
|
| 2306 |
-func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
|
|
| 2307 |
- if len(job.Args) != 2 {
|
|
| 2308 |
- return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
|
|
| 2309 |
- } |
|
| 2310 |
- |
|
| 2311 |
- var ( |
|
| 2312 |
- name = job.Args[0] |
|
| 2313 |
- resource = job.Args[1] |
|
| 2314 |
- ) |
|
| 2315 |
- |
|
| 2316 |
- if container := srv.runtime.Get(name); container != nil {
|
|
| 2317 |
- |
|
| 2318 |
- data, err := container.Copy(resource) |
|
| 2319 |
- if err != nil {
|
|
| 2320 |
- return job.Error(err) |
|
| 2321 |
- } |
|
| 2322 |
- defer data.Close() |
|
| 2323 |
- |
|
| 2324 |
- if _, err := io.Copy(job.Stdout, data); err != nil {
|
|
| 2325 |
- return job.Error(err) |
|
| 2326 |
- } |
|
| 2327 |
- return engine.StatusOK |
|
| 2328 |
- } |
|
| 2329 |
- return job.Errorf("No such container: %s", name)
|
|
| 2330 |
-} |
|
| 2331 |
- |
|
| 2332 |
-func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
|
|
| 2333 |
- runtime, err := runtime.NewRuntime(config, eng) |
|
| 2334 |
- if err != nil {
|
|
| 2335 |
- return nil, err |
|
| 2336 |
- } |
|
| 2337 |
- srv := &Server{
|
|
| 2338 |
- Eng: eng, |
|
| 2339 |
- runtime: runtime, |
|
| 2340 |
- pullingPool: make(map[string]chan struct{}),
|
|
| 2341 |
- pushingPool: make(map[string]chan struct{}),
|
|
| 2342 |
- events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events |
|
| 2343 |
- listeners: make(map[string]chan utils.JSONMessage), |
|
| 2344 |
- running: true, |
|
| 2345 |
- } |
|
| 2346 |
- runtime.SetServer(srv) |
|
| 2347 |
- return srv, nil |
|
| 2348 |
-} |
|
| 2349 |
- |
|
| 2350 |
-func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
|
|
| 2351 |
- httpVersion := make([]utils.VersionInfo, 0, 4) |
|
| 2352 |
- httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION})
|
|
| 2353 |
- httpVersion = append(httpVersion, &simpleVersionInfo{"go", goruntime.Version()})
|
|
| 2354 |
- httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT})
|
|
| 2355 |
- if kernelVersion, err := utils.GetKernelVersion(); err == nil {
|
|
| 2356 |
- httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()})
|
|
| 2357 |
- } |
|
| 2358 |
- httpVersion = append(httpVersion, &simpleVersionInfo{"os", goruntime.GOOS})
|
|
| 2359 |
- httpVersion = append(httpVersion, &simpleVersionInfo{"arch", goruntime.GOARCH})
|
|
| 2360 |
- ud := utils.NewHTTPUserAgentDecorator(httpVersion...) |
|
| 2361 |
- md := &utils.HTTPMetaHeadersDecorator{
|
|
| 2362 |
- Headers: metaHeaders, |
|
| 2363 |
- } |
|
| 2364 |
- factory := utils.NewHTTPRequestFactory(ud, md) |
|
| 2365 |
- return factory |
|
| 2366 |
-} |
|
| 2367 |
- |
|
| 2368 |
-func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
|
|
| 2369 |
- now := time.Now().UTC().Unix() |
|
| 2370 |
- jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
|
|
| 2371 |
- srv.AddEvent(jm) |
|
| 2372 |
- for _, c := range srv.listeners {
|
|
| 2373 |
- select { // non blocking channel
|
|
| 2374 |
- case c <- jm: |
|
| 2375 |
- default: |
|
| 2376 |
- } |
|
| 2377 |
- } |
|
| 2378 |
- return &jm |
|
| 2379 |
-} |
|
| 2380 |
- |
|
| 2381 |
-func (srv *Server) AddEvent(jm utils.JSONMessage) {
|
|
| 2382 |
- srv.Lock() |
|
| 2383 |
- defer srv.Unlock() |
|
| 2384 |
- srv.events = append(srv.events, jm) |
|
| 2385 |
-} |
|
| 2386 |
- |
|
| 2387 |
-func (srv *Server) GetEvents() []utils.JSONMessage {
|
|
| 2388 |
- srv.RLock() |
|
| 2389 |
- defer srv.RUnlock() |
|
| 2390 |
- return srv.events |
|
| 2391 |
-} |
|
| 2392 |
- |
|
| 2393 |
-func (srv *Server) SetRunning(status bool) {
|
|
| 2394 |
- srv.Lock() |
|
| 2395 |
- defer srv.Unlock() |
|
| 2396 |
- |
|
| 2397 |
- srv.running = status |
|
| 2398 |
-} |
|
| 2399 |
- |
|
| 2400 |
-func (srv *Server) IsRunning() bool {
|
|
| 2401 |
- srv.RLock() |
|
| 2402 |
- defer srv.RUnlock() |
|
| 2403 |
- return srv.running |
|
| 2404 |
-} |
|
| 2405 |
- |
|
| 2406 |
-func (srv *Server) Close() error {
|
|
| 2407 |
- if srv == nil {
|
|
| 2408 |
- return nil |
|
| 2409 |
- } |
|
| 2410 |
- srv.SetRunning(false) |
|
| 2411 |
- if srv.runtime == nil {
|
|
| 2412 |
- return nil |
|
| 2413 |
- } |
|
| 2414 |
- return srv.runtime.Close() |
|
| 2415 |
-} |
|
| 2416 |
- |
|
| 2417 |
-type Server struct {
|
|
| 2418 |
- sync.RWMutex |
|
| 2419 |
- runtime *runtime.Runtime |
|
| 2420 |
- pullingPool map[string]chan struct{}
|
|
| 2421 |
- pushingPool map[string]chan struct{}
|
|
| 2422 |
- events []utils.JSONMessage |
|
| 2423 |
- listeners map[string]chan utils.JSONMessage |
|
| 2424 |
- Eng *engine.Engine |
|
| 2425 |
- running bool |
|
| 2426 |
-} |
| 2427 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,799 @@ |
| 0 |
+package server |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "crypto/sha256" |
|
| 4 |
+ "encoding/hex" |
|
| 5 |
+ "encoding/json" |
|
| 6 |
+ "errors" |
|
| 7 |
+ "fmt" |
|
| 8 |
+ "github.com/dotcloud/docker/archive" |
|
| 9 |
+ "github.com/dotcloud/docker/nat" |
|
| 10 |
+ "github.com/dotcloud/docker/registry" |
|
| 11 |
+ "github.com/dotcloud/docker/runconfig" |
|
| 12 |
+ "github.com/dotcloud/docker/runtime" |
|
| 13 |
+ "github.com/dotcloud/docker/utils" |
|
| 14 |
+ "io" |
|
| 15 |
+ "io/ioutil" |
|
| 16 |
+ "net/url" |
|
| 17 |
+ "os" |
|
| 18 |
+ "path" |
|
| 19 |
+ "path/filepath" |
|
| 20 |
+ "reflect" |
|
| 21 |
+ "regexp" |
|
| 22 |
+ "sort" |
|
| 23 |
+ "strings" |
|
| 24 |
+) |
|
| 25 |
+ |
|
| 26 |
+var ( |
|
| 27 |
+ ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty")
|
|
| 28 |
+) |
|
| 29 |
+ |
|
| 30 |
+type BuildFile interface {
|
|
| 31 |
+ Build(io.Reader) (string, error) |
|
| 32 |
+ CmdFrom(string) error |
|
| 33 |
+ CmdRun(string) error |
|
| 34 |
+} |
|
| 35 |
+ |
|
| 36 |
+type buildFile struct {
|
|
| 37 |
+ runtime *runtime.Runtime |
|
| 38 |
+ srv *Server |
|
| 39 |
+ |
|
| 40 |
+ image string |
|
| 41 |
+ maintainer string |
|
| 42 |
+ config *runconfig.Config |
|
| 43 |
+ |
|
| 44 |
+ contextPath string |
|
| 45 |
+ context *utils.TarSum |
|
| 46 |
+ |
|
| 47 |
+ verbose bool |
|
| 48 |
+ utilizeCache bool |
|
| 49 |
+ rm bool |
|
| 50 |
+ |
|
| 51 |
+ authConfig *registry.AuthConfig |
|
| 52 |
+ configFile *registry.ConfigFile |
|
| 53 |
+ |
|
| 54 |
+ tmpContainers map[string]struct{}
|
|
| 55 |
+ tmpImages map[string]struct{}
|
|
| 56 |
+ |
|
| 57 |
+ outStream io.Writer |
|
| 58 |
+ errStream io.Writer |
|
| 59 |
+ |
|
| 60 |
+ // Deprecated, original writer used for ImagePull. To be removed. |
|
| 61 |
+ outOld io.Writer |
|
| 62 |
+ sf *utils.StreamFormatter |
|
| 63 |
+} |
|
| 64 |
+ |
|
| 65 |
+func (b *buildFile) clearTmp(containers map[string]struct{}) {
|
|
| 66 |
+ for c := range containers {
|
|
| 67 |
+ tmp := b.runtime.Get(c) |
|
| 68 |
+ if err := b.runtime.Destroy(tmp); err != nil {
|
|
| 69 |
+ fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) |
|
| 70 |
+ } else {
|
|
| 71 |
+ fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c)) |
|
| 72 |
+ } |
|
| 73 |
+ } |
|
| 74 |
+} |
|
| 75 |
+ |
|
| 76 |
+func (b *buildFile) CmdFrom(name string) error {
|
|
| 77 |
+ image, err := b.runtime.Repositories().LookupImage(name) |
|
| 78 |
+ if err != nil {
|
|
| 79 |
+ if b.runtime.Graph().IsNotExist(err) {
|
|
| 80 |
+ remote, tag := utils.ParseRepositoryTag(name) |
|
| 81 |
+ pullRegistryAuth := b.authConfig |
|
| 82 |
+ if len(b.configFile.Configs) > 0 {
|
|
| 83 |
+ // The request came with a full auth config file, we prefer to use that |
|
| 84 |
+ endpoint, _, err := registry.ResolveRepositoryName(remote) |
|
| 85 |
+ if err != nil {
|
|
| 86 |
+ return err |
|
| 87 |
+ } |
|
| 88 |
+ resolvedAuth := b.configFile.ResolveAuthConfig(endpoint) |
|
| 89 |
+ pullRegistryAuth = &resolvedAuth |
|
| 90 |
+ } |
|
| 91 |
+ job := b.srv.Eng.Job("pull", remote, tag)
|
|
| 92 |
+ job.SetenvBool("json", b.sf.Json())
|
|
| 93 |
+ job.SetenvBool("parallel", true)
|
|
| 94 |
+ job.SetenvJson("authConfig", pullRegistryAuth)
|
|
| 95 |
+ job.Stdout.Add(b.outOld) |
|
| 96 |
+ if err := job.Run(); err != nil {
|
|
| 97 |
+ return err |
|
| 98 |
+ } |
|
| 99 |
+ image, err = b.runtime.Repositories().LookupImage(name) |
|
| 100 |
+ if err != nil {
|
|
| 101 |
+ return err |
|
| 102 |
+ } |
|
| 103 |
+ } else {
|
|
| 104 |
+ return err |
|
| 105 |
+ } |
|
| 106 |
+ } |
|
| 107 |
+ b.image = image.ID |
|
| 108 |
+ b.config = &runconfig.Config{}
|
|
| 109 |
+ if image.Config != nil {
|
|
| 110 |
+ b.config = image.Config |
|
| 111 |
+ } |
|
| 112 |
+ if b.config.Env == nil || len(b.config.Env) == 0 {
|
|
| 113 |
+ b.config.Env = append(b.config.Env, "HOME=/", "PATH="+runtime.DefaultPathEnv) |
|
| 114 |
+ } |
|
| 115 |
+ // Process ONBUILD triggers if they exist |
|
| 116 |
+ if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
|
|
| 117 |
+ fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers) |
|
| 118 |
+ } |
|
| 119 |
+ for n, step := range b.config.OnBuild {
|
|
| 120 |
+ splitStep := strings.Split(step, " ") |
|
| 121 |
+ stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " ")) |
|
| 122 |
+ switch stepInstruction {
|
|
| 123 |
+ case "ONBUILD": |
|
| 124 |
+ return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step)
|
|
| 125 |
+ case "MAINTAINER", "FROM": |
|
| 126 |
+ return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step)
|
|
| 127 |
+ } |
|
| 128 |
+ if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil {
|
|
| 129 |
+ return err |
|
| 130 |
+ } |
|
| 131 |
+ } |
|
| 132 |
+ b.config.OnBuild = []string{}
|
|
| 133 |
+ return nil |
|
| 134 |
+} |
|
| 135 |
+ |
|
| 136 |
+// The ONBUILD command declares a build instruction to be executed in any future build |
|
| 137 |
+// using the current image as a base. |
|
| 138 |
+func (b *buildFile) CmdOnbuild(trigger string) error {
|
|
| 139 |
+ splitTrigger := strings.Split(trigger, " ") |
|
| 140 |
+ triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " ")) |
|
| 141 |
+ switch triggerInstruction {
|
|
| 142 |
+ case "ONBUILD": |
|
| 143 |
+ return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
|
|
| 144 |
+ case "MAINTAINER", "FROM": |
|
| 145 |
+ return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
|
|
| 146 |
+ } |
|
| 147 |
+ b.config.OnBuild = append(b.config.OnBuild, trigger) |
|
| 148 |
+ return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger))
|
|
| 149 |
+} |
|
| 150 |
+ |
|
| 151 |
+func (b *buildFile) CmdMaintainer(name string) error {
|
|
| 152 |
+ b.maintainer = name |
|
| 153 |
+ return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name))
|
|
| 154 |
+} |
|
| 155 |
+ |
|
| 156 |
+// probeCache checks to see if image-caching is enabled (`b.utilizeCache`) |
|
| 157 |
+// and if so attempts to look up the current `b.image` and `b.config` pair |
|
| 158 |
+// in the current server `b.srv`. If an image is found, probeCache returns |
|
| 159 |
+// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there |
|
| 160 |
+// is any error, it returns `(false, err)`. |
|
| 161 |
+func (b *buildFile) probeCache() (bool, error) {
|
|
| 162 |
+ if b.utilizeCache {
|
|
| 163 |
+ if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil {
|
|
| 164 |
+ return false, err |
|
| 165 |
+ } else if cache != nil {
|
|
| 166 |
+ fmt.Fprintf(b.outStream, " ---> Using cache\n") |
|
| 167 |
+ utils.Debugf("[BUILDER] Use cached version")
|
|
| 168 |
+ b.image = cache.ID |
|
| 169 |
+ return true, nil |
|
| 170 |
+ } else {
|
|
| 171 |
+ utils.Debugf("[BUILDER] Cache miss")
|
|
| 172 |
+ } |
|
| 173 |
+ } |
|
| 174 |
+ return false, nil |
|
| 175 |
+} |
|
| 176 |
+ |
|
| 177 |
+func (b *buildFile) CmdRun(args string) error {
|
|
| 178 |
+ if b.image == "" {
|
|
| 179 |
+ return fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
| 180 |
+ } |
|
| 181 |
+ config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil)
|
|
| 182 |
+ if err != nil {
|
|
| 183 |
+ return err |
|
| 184 |
+ } |
|
| 185 |
+ |
|
| 186 |
+ cmd := b.config.Cmd |
|
| 187 |
+ b.config.Cmd = nil |
|
| 188 |
+ runconfig.Merge(b.config, config) |
|
| 189 |
+ |
|
| 190 |
+ defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
|
|
| 191 |
+ |
|
| 192 |
+ utils.Debugf("Command to be executed: %v", b.config.Cmd)
|
|
| 193 |
+ |
|
| 194 |
+ hit, err := b.probeCache() |
|
| 195 |
+ if err != nil {
|
|
| 196 |
+ return err |
|
| 197 |
+ } |
|
| 198 |
+ if hit {
|
|
| 199 |
+ return nil |
|
| 200 |
+ } |
|
| 201 |
+ |
|
| 202 |
+ c, err := b.create() |
|
| 203 |
+ if err != nil {
|
|
| 204 |
+ return err |
|
| 205 |
+ } |
|
| 206 |
+ // Ensure that we keep the container mounted until the commit |
|
| 207 |
+ // to avoid unmounting and then mounting directly again |
|
| 208 |
+ c.Mount() |
|
| 209 |
+ defer c.Unmount() |
|
| 210 |
+ |
|
| 211 |
+ err = b.run(c) |
|
| 212 |
+ if err != nil {
|
|
| 213 |
+ return err |
|
| 214 |
+ } |
|
| 215 |
+ if err := b.commit(c.ID, cmd, "run"); err != nil {
|
|
| 216 |
+ return err |
|
| 217 |
+ } |
|
| 218 |
+ |
|
| 219 |
+ return nil |
|
| 220 |
+} |
|
| 221 |
+ |
|
| 222 |
+func (b *buildFile) FindEnvKey(key string) int {
|
|
| 223 |
+ for k, envVar := range b.config.Env {
|
|
| 224 |
+ envParts := strings.SplitN(envVar, "=", 2) |
|
| 225 |
+ if key == envParts[0] {
|
|
| 226 |
+ return k |
|
| 227 |
+ } |
|
| 228 |
+ } |
|
| 229 |
+ return -1 |
|
| 230 |
+} |
|
| 231 |
+ |
|
| 232 |
+func (b *buildFile) ReplaceEnvMatches(value string) (string, error) {
|
|
| 233 |
+ exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)")
|
|
| 234 |
+ if err != nil {
|
|
| 235 |
+ return value, err |
|
| 236 |
+ } |
|
| 237 |
+ matches := exp.FindAllString(value, -1) |
|
| 238 |
+ for _, match := range matches {
|
|
| 239 |
+ match = match[strings.Index(match, "$"):] |
|
| 240 |
+ matchKey := strings.Trim(match, "${}")
|
|
| 241 |
+ |
|
| 242 |
+ for _, envVar := range b.config.Env {
|
|
| 243 |
+ envParts := strings.SplitN(envVar, "=", 2) |
|
| 244 |
+ envKey := envParts[0] |
|
| 245 |
+ envValue := envParts[1] |
|
| 246 |
+ |
|
| 247 |
+ if envKey == matchKey {
|
|
| 248 |
+ value = strings.Replace(value, match, envValue, -1) |
|
| 249 |
+ break |
|
| 250 |
+ } |
|
| 251 |
+ } |
|
| 252 |
+ } |
|
| 253 |
+ return value, nil |
|
| 254 |
+} |
|
| 255 |
+ |
|
| 256 |
+func (b *buildFile) CmdEnv(args string) error {
|
|
| 257 |
+ tmp := strings.SplitN(args, " ", 2) |
|
| 258 |
+ if len(tmp) != 2 {
|
|
| 259 |
+ return fmt.Errorf("Invalid ENV format")
|
|
| 260 |
+ } |
|
| 261 |
+ key := strings.Trim(tmp[0], " \t") |
|
| 262 |
+ value := strings.Trim(tmp[1], " \t") |
|
| 263 |
+ |
|
| 264 |
+ envKey := b.FindEnvKey(key) |
|
| 265 |
+ replacedValue, err := b.ReplaceEnvMatches(value) |
|
| 266 |
+ if err != nil {
|
|
| 267 |
+ return err |
|
| 268 |
+ } |
|
| 269 |
+ replacedVar := fmt.Sprintf("%s=%s", key, replacedValue)
|
|
| 270 |
+ |
|
| 271 |
+ if envKey >= 0 {
|
|
| 272 |
+ b.config.Env[envKey] = replacedVar |
|
| 273 |
+ } else {
|
|
| 274 |
+ b.config.Env = append(b.config.Env, replacedVar) |
|
| 275 |
+ } |
|
| 276 |
+ return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar))
|
|
| 277 |
+} |
|
| 278 |
+ |
|
| 279 |
+func (b *buildFile) buildCmdFromJson(args string) []string {
|
|
| 280 |
+ var cmd []string |
|
| 281 |
+ if err := json.Unmarshal([]byte(args), &cmd); err != nil {
|
|
| 282 |
+ utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err)
|
|
| 283 |
+ cmd = []string{"/bin/sh", "-c", args}
|
|
| 284 |
+ } |
|
| 285 |
+ return cmd |
|
| 286 |
+} |
|
| 287 |
+ |
|
| 288 |
+func (b *buildFile) CmdCmd(args string) error {
|
|
| 289 |
+ cmd := b.buildCmdFromJson(args) |
|
| 290 |
+ b.config.Cmd = cmd |
|
| 291 |
+ if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil {
|
|
| 292 |
+ return err |
|
| 293 |
+ } |
|
| 294 |
+ return nil |
|
| 295 |
+} |
|
| 296 |
+ |
|
| 297 |
+func (b *buildFile) CmdEntrypoint(args string) error {
|
|
| 298 |
+ entrypoint := b.buildCmdFromJson(args) |
|
| 299 |
+ b.config.Entrypoint = entrypoint |
|
| 300 |
+ if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil {
|
|
| 301 |
+ return err |
|
| 302 |
+ } |
|
| 303 |
+ return nil |
|
| 304 |
+} |
|
| 305 |
+ |
|
| 306 |
+func (b *buildFile) CmdExpose(args string) error {
|
|
| 307 |
+ portsTab := strings.Split(args, " ") |
|
| 308 |
+ |
|
| 309 |
+ if b.config.ExposedPorts == nil {
|
|
| 310 |
+ b.config.ExposedPorts = make(nat.PortSet) |
|
| 311 |
+ } |
|
| 312 |
+ ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...)) |
|
| 313 |
+ if err != nil {
|
|
| 314 |
+ return err |
|
| 315 |
+ } |
|
| 316 |
+ for port := range ports {
|
|
| 317 |
+ if _, exists := b.config.ExposedPorts[port]; !exists {
|
|
| 318 |
+ b.config.ExposedPorts[port] = struct{}{}
|
|
| 319 |
+ } |
|
| 320 |
+ } |
|
| 321 |
+ b.config.PortSpecs = nil |
|
| 322 |
+ |
|
| 323 |
+ return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
|
|
| 324 |
+} |
|
| 325 |
+ |
|
| 326 |
+func (b *buildFile) CmdUser(args string) error {
|
|
| 327 |
+ b.config.User = args |
|
| 328 |
+ return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args))
|
|
| 329 |
+} |
|
| 330 |
+ |
|
| 331 |
+func (b *buildFile) CmdInsert(args string) error {
|
|
| 332 |
+ return fmt.Errorf("INSERT has been deprecated. Please use ADD instead")
|
|
| 333 |
+} |
|
| 334 |
+ |
|
| 335 |
+func (b *buildFile) CmdCopy(args string) error {
|
|
| 336 |
+ return fmt.Errorf("COPY has been deprecated. Please use ADD instead")
|
|
| 337 |
+} |
|
| 338 |
+ |
|
| 339 |
+func (b *buildFile) CmdWorkdir(workdir string) error {
|
|
| 340 |
+ b.config.WorkingDir = workdir |
|
| 341 |
+ return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
|
|
| 342 |
+} |
|
| 343 |
+ |
|
| 344 |
+func (b *buildFile) CmdVolume(args string) error {
|
|
| 345 |
+ if args == "" {
|
|
| 346 |
+ return fmt.Errorf("Volume cannot be empty")
|
|
| 347 |
+ } |
|
| 348 |
+ |
|
| 349 |
+ var volume []string |
|
| 350 |
+ if err := json.Unmarshal([]byte(args), &volume); err != nil {
|
|
| 351 |
+ volume = []string{args}
|
|
| 352 |
+ } |
|
| 353 |
+ if b.config.Volumes == nil {
|
|
| 354 |
+ b.config.Volumes = map[string]struct{}{}
|
|
| 355 |
+ } |
|
| 356 |
+ for _, v := range volume {
|
|
| 357 |
+ b.config.Volumes[v] = struct{}{}
|
|
| 358 |
+ } |
|
| 359 |
+ if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil {
|
|
| 360 |
+ return err |
|
| 361 |
+ } |
|
| 362 |
+ return nil |
|
| 363 |
+} |
|
| 364 |
+ |
|
| 365 |
+func (b *buildFile) checkPathForAddition(orig string) error {
|
|
| 366 |
+ origPath := path.Join(b.contextPath, orig) |
|
| 367 |
+ if p, err := filepath.EvalSymlinks(origPath); err != nil {
|
|
| 368 |
+ if os.IsNotExist(err) {
|
|
| 369 |
+ return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 370 |
+ } |
|
| 371 |
+ return err |
|
| 372 |
+ } else {
|
|
| 373 |
+ origPath = p |
|
| 374 |
+ } |
|
| 375 |
+ if !strings.HasPrefix(origPath, b.contextPath) {
|
|
| 376 |
+ return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath)
|
|
| 377 |
+ } |
|
| 378 |
+ _, err := os.Stat(origPath) |
|
| 379 |
+ if err != nil {
|
|
| 380 |
+ if os.IsNotExist(err) {
|
|
| 381 |
+ return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 382 |
+ } |
|
| 383 |
+ return err |
|
| 384 |
+ } |
|
| 385 |
+ return nil |
|
| 386 |
+} |
|
| 387 |
+ |
|
| 388 |
+func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error {
|
|
| 389 |
+ var ( |
|
| 390 |
+ origPath = path.Join(b.contextPath, orig) |
|
| 391 |
+ destPath = path.Join(container.RootfsPath(), dest) |
|
| 392 |
+ ) |
|
| 393 |
+ // Preserve the trailing '/' |
|
| 394 |
+ if strings.HasSuffix(dest, "/") {
|
|
| 395 |
+ destPath = destPath + "/" |
|
| 396 |
+ } |
|
| 397 |
+ fi, err := os.Stat(origPath) |
|
| 398 |
+ if err != nil {
|
|
| 399 |
+ if os.IsNotExist(err) {
|
|
| 400 |
+ return fmt.Errorf("%s: no such file or directory", orig)
|
|
| 401 |
+ } |
|
| 402 |
+ return err |
|
| 403 |
+ } |
|
| 404 |
+ |
|
| 405 |
+ if fi.IsDir() {
|
|
| 406 |
+ if err := archive.CopyWithTar(origPath, destPath); err != nil {
|
|
| 407 |
+ return err |
|
| 408 |
+ } |
|
| 409 |
+ return nil |
|
| 410 |
+ } |
|
| 411 |
+ |
|
| 412 |
+ // First try to unpack the source as an archive |
|
| 413 |
+ // to support the untar feature we need to clean up the path a little bit |
|
| 414 |
+ // because tar is very forgiving. First we need to strip off the archive's |
|
| 415 |
+ // filename from the path but this is only added if it does not end in / . |
|
| 416 |
+ tarDest := destPath |
|
| 417 |
+ if strings.HasSuffix(tarDest, "/") {
|
|
| 418 |
+ tarDest = filepath.Dir(destPath) |
|
| 419 |
+ } |
|
| 420 |
+ |
|
| 421 |
+ // If we are adding a remote file, do not try to untar it |
|
| 422 |
+ if !remote {
|
|
| 423 |
+ // try to successfully untar the orig |
|
| 424 |
+ if err := archive.UntarPath(origPath, tarDest); err == nil {
|
|
| 425 |
+ return nil |
|
| 426 |
+ } |
|
| 427 |
+ utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err)
|
|
| 428 |
+ } |
|
| 429 |
+ |
|
| 430 |
+ // If that fails, just copy it as a regular file |
|
| 431 |
+ // but do not use all the magic path handling for the tar path |
|
| 432 |
+ if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil {
|
|
| 433 |
+ return err |
|
| 434 |
+ } |
|
| 435 |
+ if err := archive.CopyWithTar(origPath, destPath); err != nil {
|
|
| 436 |
+ return err |
|
| 437 |
+ } |
|
| 438 |
+ return nil |
|
| 439 |
+} |
|
| 440 |
+ |
|
| 441 |
+func (b *buildFile) CmdAdd(args string) error {
|
|
| 442 |
+ if b.context == nil {
|
|
| 443 |
+ return fmt.Errorf("No context given. Impossible to use ADD")
|
|
| 444 |
+ } |
|
| 445 |
+ tmp := strings.SplitN(args, " ", 2) |
|
| 446 |
+ if len(tmp) != 2 {
|
|
| 447 |
+ return fmt.Errorf("Invalid ADD format")
|
|
| 448 |
+ } |
|
| 449 |
+ |
|
| 450 |
+ orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) |
|
| 451 |
+ if err != nil {
|
|
| 452 |
+ return err |
|
| 453 |
+ } |
|
| 454 |
+ |
|
| 455 |
+ dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) |
|
| 456 |
+ if err != nil {
|
|
| 457 |
+ return err |
|
| 458 |
+ } |
|
| 459 |
+ |
|
| 460 |
+ cmd := b.config.Cmd |
|
| 461 |
+ b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)}
|
|
| 462 |
+ b.config.Image = b.image |
|
| 463 |
+ |
|
| 464 |
+ var ( |
|
| 465 |
+ origPath = orig |
|
| 466 |
+ destPath = dest |
|
| 467 |
+ remoteHash string |
|
| 468 |
+ isRemote bool |
|
| 469 |
+ ) |
|
| 470 |
+ |
|
| 471 |
+ if utils.IsURL(orig) {
|
|
| 472 |
+ isRemote = true |
|
| 473 |
+ resp, err := utils.Download(orig) |
|
| 474 |
+ if err != nil {
|
|
| 475 |
+ return err |
|
| 476 |
+ } |
|
| 477 |
+ tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") |
|
| 478 |
+ if err != nil {
|
|
| 479 |
+ return err |
|
| 480 |
+ } |
|
| 481 |
+ tmpFileName := path.Join(tmpDirName, "tmp") |
|
| 482 |
+ tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) |
|
| 483 |
+ if err != nil {
|
|
| 484 |
+ return err |
|
| 485 |
+ } |
|
| 486 |
+ defer os.RemoveAll(tmpDirName) |
|
| 487 |
+ if _, err = io.Copy(tmpFile, resp.Body); err != nil {
|
|
| 488 |
+ tmpFile.Close() |
|
| 489 |
+ return err |
|
| 490 |
+ } |
|
| 491 |
+ origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) |
|
| 492 |
+ tmpFile.Close() |
|
| 493 |
+ |
|
| 494 |
+ // Process the checksum |
|
| 495 |
+ r, err := archive.Tar(tmpFileName, archive.Uncompressed) |
|
| 496 |
+ if err != nil {
|
|
| 497 |
+ return err |
|
| 498 |
+ } |
|
| 499 |
+ tarSum := utils.TarSum{Reader: r, DisableCompression: true}
|
|
| 500 |
+ remoteHash = tarSum.Sum(nil) |
|
| 501 |
+ r.Close() |
|
| 502 |
+ |
|
| 503 |
+ // If the destination is a directory, figure out the filename. |
|
| 504 |
+ if strings.HasSuffix(dest, "/") {
|
|
| 505 |
+ u, err := url.Parse(orig) |
|
| 506 |
+ if err != nil {
|
|
| 507 |
+ return err |
|
| 508 |
+ } |
|
| 509 |
+ path := u.Path |
|
| 510 |
+ if strings.HasSuffix(path, "/") {
|
|
| 511 |
+ path = path[:len(path)-1] |
|
| 512 |
+ } |
|
| 513 |
+ parts := strings.Split(path, "/") |
|
| 514 |
+ filename := parts[len(parts)-1] |
|
| 515 |
+ if filename == "" {
|
|
| 516 |
+ return fmt.Errorf("cannot determine filename from url: %s", u)
|
|
| 517 |
+ } |
|
| 518 |
+ destPath = dest + filename |
|
| 519 |
+ } |
|
| 520 |
+ } |
|
| 521 |
+ |
|
| 522 |
+ if err := b.checkPathForAddition(origPath); err != nil {
|
|
| 523 |
+ return err |
|
| 524 |
+ } |
|
| 525 |
+ |
|
| 526 |
+ // Hash path and check the cache |
|
| 527 |
+ if b.utilizeCache {
|
|
| 528 |
+ var ( |
|
| 529 |
+ hash string |
|
| 530 |
+ sums = b.context.GetSums() |
|
| 531 |
+ ) |
|
| 532 |
+ |
|
| 533 |
+ if remoteHash != "" {
|
|
| 534 |
+ hash = remoteHash |
|
| 535 |
+ } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
|
|
| 536 |
+ return err |
|
| 537 |
+ } else if fi.IsDir() {
|
|
| 538 |
+ var subfiles []string |
|
| 539 |
+ for file, sum := range sums {
|
|
| 540 |
+ absFile := path.Join(b.contextPath, file) |
|
| 541 |
+ absOrigPath := path.Join(b.contextPath, origPath) |
|
| 542 |
+ if strings.HasPrefix(absFile, absOrigPath) {
|
|
| 543 |
+ subfiles = append(subfiles, sum) |
|
| 544 |
+ } |
|
| 545 |
+ } |
|
| 546 |
+ sort.Strings(subfiles) |
|
| 547 |
+ hasher := sha256.New() |
|
| 548 |
+ hasher.Write([]byte(strings.Join(subfiles, ","))) |
|
| 549 |
+ hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) |
|
| 550 |
+ } else {
|
|
| 551 |
+ if origPath[0] == '/' && len(origPath) > 1 {
|
|
| 552 |
+ origPath = origPath[1:] |
|
| 553 |
+ } |
|
| 554 |
+ origPath = strings.TrimPrefix(origPath, "./") |
|
| 555 |
+ if h, ok := sums[origPath]; ok {
|
|
| 556 |
+ hash = "file:" + h |
|
| 557 |
+ } |
|
| 558 |
+ } |
|
| 559 |
+ b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)}
|
|
| 560 |
+ hit, err := b.probeCache() |
|
| 561 |
+ if err != nil {
|
|
| 562 |
+ return err |
|
| 563 |
+ } |
|
| 564 |
+ // If we do not have a hash, never use the cache |
|
| 565 |
+ if hit && hash != "" {
|
|
| 566 |
+ return nil |
|
| 567 |
+ } |
|
| 568 |
+ } |
|
| 569 |
+ |
|
| 570 |
+ // Create the container and start it |
|
| 571 |
+ container, _, err := b.runtime.Create(b.config, "") |
|
| 572 |
+ if err != nil {
|
|
| 573 |
+ return err |
|
| 574 |
+ } |
|
| 575 |
+ b.tmpContainers[container.ID] = struct{}{}
|
|
| 576 |
+ |
|
| 577 |
+ if err := container.Mount(); err != nil {
|
|
| 578 |
+ return err |
|
| 579 |
+ } |
|
| 580 |
+ defer container.Unmount() |
|
| 581 |
+ |
|
| 582 |
+ if err := b.addContext(container, origPath, destPath, isRemote); err != nil {
|
|
| 583 |
+ return err |
|
| 584 |
+ } |
|
| 585 |
+ |
|
| 586 |
+ if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil {
|
|
| 587 |
+ return err |
|
| 588 |
+ } |
|
| 589 |
+ b.config.Cmd = cmd |
|
| 590 |
+ return nil |
|
| 591 |
+} |
|
| 592 |
+ |
|
| 593 |
+func (b *buildFile) create() (*runtime.Container, error) {
|
|
| 594 |
+ if b.image == "" {
|
|
| 595 |
+ return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
|
|
| 596 |
+ } |
|
| 597 |
+ b.config.Image = b.image |
|
| 598 |
+ |
|
| 599 |
+ // Create the container and start it |
|
| 600 |
+ c, _, err := b.runtime.Create(b.config, "") |
|
| 601 |
+ if err != nil {
|
|
| 602 |
+ return nil, err |
|
| 603 |
+ } |
|
| 604 |
+ b.tmpContainers[c.ID] = struct{}{}
|
|
| 605 |
+ fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) |
|
| 606 |
+ |
|
| 607 |
+ // override the entry point that may have been picked up from the base image |
|
| 608 |
+ c.Path = b.config.Cmd[0] |
|
| 609 |
+ c.Args = b.config.Cmd[1:] |
|
| 610 |
+ |
|
| 611 |
+ return c, nil |
|
| 612 |
+} |
|
| 613 |
+ |
|
| 614 |
+func (b *buildFile) run(c *runtime.Container) error {
|
|
| 615 |
+ var errCh chan error |
|
| 616 |
+ |
|
| 617 |
+ if b.verbose {
|
|
| 618 |
+ errCh = utils.Go(func() error {
|
|
| 619 |
+ return <-c.Attach(nil, nil, b.outStream, b.errStream) |
|
| 620 |
+ }) |
|
| 621 |
+ } |
|
| 622 |
+ |
|
| 623 |
+ //start the container |
|
| 624 |
+ if err := c.Start(); err != nil {
|
|
| 625 |
+ return err |
|
| 626 |
+ } |
|
| 627 |
+ |
|
| 628 |
+ if errCh != nil {
|
|
| 629 |
+ if err := <-errCh; err != nil {
|
|
| 630 |
+ return err |
|
| 631 |
+ } |
|
| 632 |
+ } |
|
| 633 |
+ |
|
| 634 |
+ // Wait for it to finish |
|
| 635 |
+ if ret := c.Wait(); ret != 0 {
|
|
| 636 |
+ err := &utils.JSONError{
|
|
| 637 |
+ Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
|
|
| 638 |
+ Code: ret, |
|
| 639 |
+ } |
|
| 640 |
+ return err |
|
| 641 |
+ } |
|
| 642 |
+ |
|
| 643 |
+ return nil |
|
| 644 |
+} |
|
| 645 |
+ |
|
| 646 |
+// Commit the container <id> with the autorun command <autoCmd> |
|
| 647 |
+func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
|
|
| 648 |
+ if b.image == "" {
|
|
| 649 |
+ return fmt.Errorf("Please provide a source image with `from` prior to commit")
|
|
| 650 |
+ } |
|
| 651 |
+ b.config.Image = b.image |
|
| 652 |
+ if id == "" {
|
|
| 653 |
+ cmd := b.config.Cmd |
|
| 654 |
+ b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment}
|
|
| 655 |
+ defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
|
|
| 656 |
+ |
|
| 657 |
+ hit, err := b.probeCache() |
|
| 658 |
+ if err != nil {
|
|
| 659 |
+ return err |
|
| 660 |
+ } |
|
| 661 |
+ if hit {
|
|
| 662 |
+ return nil |
|
| 663 |
+ } |
|
| 664 |
+ |
|
| 665 |
+ container, warnings, err := b.runtime.Create(b.config, "") |
|
| 666 |
+ if err != nil {
|
|
| 667 |
+ return err |
|
| 668 |
+ } |
|
| 669 |
+ for _, warning := range warnings {
|
|
| 670 |
+ fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning) |
|
| 671 |
+ } |
|
| 672 |
+ b.tmpContainers[container.ID] = struct{}{}
|
|
| 673 |
+ fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) |
|
| 674 |
+ id = container.ID |
|
| 675 |
+ |
|
| 676 |
+ if err := container.Mount(); err != nil {
|
|
| 677 |
+ return err |
|
| 678 |
+ } |
|
| 679 |
+ defer container.Unmount() |
|
| 680 |
+ } |
|
| 681 |
+ container := b.runtime.Get(id) |
|
| 682 |
+ if container == nil {
|
|
| 683 |
+ return fmt.Errorf("An error occured while creating the container")
|
|
| 684 |
+ } |
|
| 685 |
+ |
|
| 686 |
+ // Note: Actually copy the struct |
|
| 687 |
+ autoConfig := *b.config |
|
| 688 |
+ autoConfig.Cmd = autoCmd |
|
| 689 |
+ // Commit the container |
|
| 690 |
+ image, err := b.runtime.Commit(container, "", "", "", b.maintainer, &autoConfig) |
|
| 691 |
+ if err != nil {
|
|
| 692 |
+ return err |
|
| 693 |
+ } |
|
| 694 |
+ b.tmpImages[image.ID] = struct{}{}
|
|
| 695 |
+ b.image = image.ID |
|
| 696 |
+ return nil |
|
| 697 |
+} |
|
| 698 |
+ |
|
| 699 |
+// Long lines can be split with a backslash |
|
| 700 |
+var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`) |
|
| 701 |
+ |
|
| 702 |
+func (b *buildFile) Build(context io.Reader) (string, error) {
|
|
| 703 |
+ tmpdirPath, err := ioutil.TempDir("", "docker-build")
|
|
| 704 |
+ if err != nil {
|
|
| 705 |
+ return "", err |
|
| 706 |
+ } |
|
| 707 |
+ |
|
| 708 |
+ decompressedStream, err := archive.DecompressStream(context) |
|
| 709 |
+ if err != nil {
|
|
| 710 |
+ return "", err |
|
| 711 |
+ } |
|
| 712 |
+ |
|
| 713 |
+ b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true}
|
|
| 714 |
+ if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
|
|
| 715 |
+ return "", err |
|
| 716 |
+ } |
|
| 717 |
+ defer os.RemoveAll(tmpdirPath) |
|
| 718 |
+ |
|
| 719 |
+ b.contextPath = tmpdirPath |
|
| 720 |
+ filename := path.Join(tmpdirPath, "Dockerfile") |
|
| 721 |
+ if _, err := os.Stat(filename); os.IsNotExist(err) {
|
|
| 722 |
+ return "", fmt.Errorf("Can't build a directory with no Dockerfile")
|
|
| 723 |
+ } |
|
| 724 |
+ fileBytes, err := ioutil.ReadFile(filename) |
|
| 725 |
+ if err != nil {
|
|
| 726 |
+ return "", err |
|
| 727 |
+ } |
|
| 728 |
+ if len(fileBytes) == 0 {
|
|
| 729 |
+ return "", ErrDockerfileEmpty |
|
| 730 |
+ } |
|
| 731 |
+ dockerfile := string(fileBytes) |
|
| 732 |
+ dockerfile = lineContinuation.ReplaceAllString(dockerfile, "") |
|
| 733 |
+ stepN := 0 |
|
| 734 |
+ for _, line := range strings.Split(dockerfile, "\n") {
|
|
| 735 |
+ line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n") |
|
| 736 |
+ // Skip comments and empty line |
|
| 737 |
+ if len(line) == 0 || line[0] == '#' {
|
|
| 738 |
+ continue |
|
| 739 |
+ } |
|
| 740 |
+ if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil {
|
|
| 741 |
+ return "", err |
|
| 742 |
+ } |
|
| 743 |
+ stepN += 1 |
|
| 744 |
+ |
|
| 745 |
+ } |
|
| 746 |
+ if b.image != "" {
|
|
| 747 |
+ fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image)) |
|
| 748 |
+ if b.rm {
|
|
| 749 |
+ b.clearTmp(b.tmpContainers) |
|
| 750 |
+ } |
|
| 751 |
+ return b.image, nil |
|
| 752 |
+ } |
|
| 753 |
+ return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n")
|
|
| 754 |
+} |
|
| 755 |
+ |
|
| 756 |
+// BuildStep parses a single build step from `instruction` and executes it in the current context. |
|
| 757 |
+func (b *buildFile) BuildStep(name, expression string) error {
|
|
| 758 |
+ fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression) |
|
| 759 |
+ tmp := strings.SplitN(expression, " ", 2) |
|
| 760 |
+ if len(tmp) != 2 {
|
|
| 761 |
+ return fmt.Errorf("Invalid Dockerfile format")
|
|
| 762 |
+ } |
|
| 763 |
+ instruction := strings.ToLower(strings.Trim(tmp[0], " ")) |
|
| 764 |
+ arguments := strings.Trim(tmp[1], " ") |
|
| 765 |
+ |
|
| 766 |
+ method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:]))
|
|
| 767 |
+ if !exists {
|
|
| 768 |
+ fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction)) |
|
| 769 |
+ return nil |
|
| 770 |
+ } |
|
| 771 |
+ |
|
| 772 |
+ ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface()
|
|
| 773 |
+ if ret != nil {
|
|
| 774 |
+ return ret.(error) |
|
| 775 |
+ } |
|
| 776 |
+ |
|
| 777 |
+ fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image)) |
|
| 778 |
+ return nil |
|
| 779 |
+} |
|
| 780 |
+ |
|
| 781 |
+func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
|
|
| 782 |
+ return &buildFile{
|
|
| 783 |
+ runtime: srv.runtime, |
|
| 784 |
+ srv: srv, |
|
| 785 |
+ config: &runconfig.Config{},
|
|
| 786 |
+ outStream: outStream, |
|
| 787 |
+ errStream: errStream, |
|
| 788 |
+ tmpContainers: make(map[string]struct{}),
|
|
| 789 |
+ tmpImages: make(map[string]struct{}),
|
|
| 790 |
+ verbose: verbose, |
|
| 791 |
+ utilizeCache: utilizeCache, |
|
| 792 |
+ rm: rm, |
|
| 793 |
+ sf: sf, |
|
| 794 |
+ authConfig: auth, |
|
| 795 |
+ configFile: authConfigFile, |
|
| 796 |
+ outOld: outOld, |
|
| 797 |
+ } |
|
| 798 |
+} |
| 0 | 799 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,2426 @@ |
| 0 |
+package server |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "encoding/json" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "github.com/dotcloud/docker/archive" |
|
| 6 |
+ "github.com/dotcloud/docker/daemonconfig" |
|
| 7 |
+ "github.com/dotcloud/docker/dockerversion" |
|
| 8 |
+ "github.com/dotcloud/docker/engine" |
|
| 9 |
+ "github.com/dotcloud/docker/graph" |
|
| 10 |
+ "github.com/dotcloud/docker/image" |
|
| 11 |
+ "github.com/dotcloud/docker/pkg/graphdb" |
|
| 12 |
+ "github.com/dotcloud/docker/pkg/signal" |
|
| 13 |
+ "github.com/dotcloud/docker/registry" |
|
| 14 |
+ "github.com/dotcloud/docker/runconfig" |
|
| 15 |
+ "github.com/dotcloud/docker/runtime" |
|
| 16 |
+ "github.com/dotcloud/docker/utils" |
|
| 17 |
+ "io" |
|
| 18 |
+ "io/ioutil" |
|
| 19 |
+ "log" |
|
| 20 |
+ "net/http" |
|
| 21 |
+ "net/url" |
|
| 22 |
+ "os" |
|
| 23 |
+ "os/exec" |
|
| 24 |
+ gosignal "os/signal" |
|
| 25 |
+ "path" |
|
| 26 |
+ "path/filepath" |
|
| 27 |
+ goruntime "runtime" |
|
| 28 |
+ "strconv" |
|
| 29 |
+ "strings" |
|
| 30 |
+ "sync" |
|
| 31 |
+ "syscall" |
|
| 32 |
+ "time" |
|
| 33 |
+) |
|
| 34 |
+ |
|
| 35 |
+// jobInitApi runs the remote api server `srv` as a daemon, |
|
| 36 |
+// Only one api server can run at the same time - this is enforced by a pidfile. |
|
| 37 |
+// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. |
|
| 38 |
+func InitServer(job *engine.Job) engine.Status {
|
|
| 39 |
+ job.Logf("Creating server")
|
|
| 40 |
+ srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job)) |
|
| 41 |
+ if err != nil {
|
|
| 42 |
+ return job.Error(err) |
|
| 43 |
+ } |
|
| 44 |
+ if srv.runtime.Config().Pidfile != "" {
|
|
| 45 |
+ job.Logf("Creating pidfile")
|
|
| 46 |
+ if err := utils.CreatePidFile(srv.runtime.Config().Pidfile); err != nil {
|
|
| 47 |
+ // FIXME: do we need fatal here instead of returning a job error? |
|
| 48 |
+ log.Fatal(err) |
|
| 49 |
+ } |
|
| 50 |
+ } |
|
| 51 |
+ job.Logf("Setting up signal traps")
|
|
| 52 |
+ c := make(chan os.Signal, 1) |
|
| 53 |
+ gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) |
|
| 54 |
+ go func() {
|
|
| 55 |
+ sig := <-c |
|
| 56 |
+ log.Printf("Received signal '%v', exiting\n", sig)
|
|
| 57 |
+ utils.RemovePidFile(srv.runtime.Config().Pidfile) |
|
| 58 |
+ srv.Close() |
|
| 59 |
+ os.Exit(0) |
|
| 60 |
+ }() |
|
| 61 |
+ job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
|
|
| 62 |
+ job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime)
|
|
| 63 |
+ |
|
| 64 |
+ for name, handler := range map[string]engine.Handler{
|
|
| 65 |
+ "export": srv.ContainerExport, |
|
| 66 |
+ "create": srv.ContainerCreate, |
|
| 67 |
+ "stop": srv.ContainerStop, |
|
| 68 |
+ "restart": srv.ContainerRestart, |
|
| 69 |
+ "start": srv.ContainerStart, |
|
| 70 |
+ "kill": srv.ContainerKill, |
|
| 71 |
+ "wait": srv.ContainerWait, |
|
| 72 |
+ "tag": srv.ImageTag, |
|
| 73 |
+ "resize": srv.ContainerResize, |
|
| 74 |
+ "commit": srv.ContainerCommit, |
|
| 75 |
+ "info": srv.DockerInfo, |
|
| 76 |
+ "container_delete": srv.ContainerDestroy, |
|
| 77 |
+ "image_export": srv.ImageExport, |
|
| 78 |
+ "images": srv.Images, |
|
| 79 |
+ "history": srv.ImageHistory, |
|
| 80 |
+ "viz": srv.ImagesViz, |
|
| 81 |
+ "container_copy": srv.ContainerCopy, |
|
| 82 |
+ "insert": srv.ImageInsert, |
|
| 83 |
+ "attach": srv.ContainerAttach, |
|
| 84 |
+ "search": srv.ImagesSearch, |
|
| 85 |
+ "changes": srv.ContainerChanges, |
|
| 86 |
+ "top": srv.ContainerTop, |
|
| 87 |
+ "version": srv.DockerVersion, |
|
| 88 |
+ "load": srv.ImageLoad, |
|
| 89 |
+ "build": srv.Build, |
|
| 90 |
+ "pull": srv.ImagePull, |
|
| 91 |
+ "import": srv.ImageImport, |
|
| 92 |
+ "image_delete": srv.ImageDelete, |
|
| 93 |
+ "inspect": srv.JobInspect, |
|
| 94 |
+ "events": srv.Events, |
|
| 95 |
+ "push": srv.ImagePush, |
|
| 96 |
+ "containers": srv.Containers, |
|
| 97 |
+ "auth": srv.Auth, |
|
| 98 |
+ } {
|
|
| 99 |
+ if err := job.Eng.Register(name, handler); err != nil {
|
|
| 100 |
+ return job.Error(err) |
|
| 101 |
+ } |
|
| 102 |
+ } |
|
| 103 |
+ return engine.StatusOK |
|
| 104 |
+} |
|
| 105 |
+ |
|
| 106 |
+// simpleVersionInfo is a simple implementation of |
|
| 107 |
+// the interface VersionInfo, which is used |
|
| 108 |
+// to provide version information for some product, |
|
| 109 |
+// component, etc. It stores the product name and the version |
|
| 110 |
+// in string and returns them on calls to Name() and Version(). |
|
| 111 |
+type simpleVersionInfo struct {
|
|
| 112 |
+ name string |
|
| 113 |
+ version string |
|
| 114 |
+} |
|
| 115 |
+ |
|
| 116 |
+func (v *simpleVersionInfo) Name() string {
|
|
| 117 |
+ return v.name |
|
| 118 |
+} |
|
| 119 |
+ |
|
| 120 |
+func (v *simpleVersionInfo) Version() string {
|
|
| 121 |
+ return v.version |
|
| 122 |
+} |
|
| 123 |
+ |
|
| 124 |
+// ContainerKill send signal to the container |
|
| 125 |
+// If no signal is given (sig 0), then Kill with SIGKILL and wait |
|
| 126 |
+// for the container to exit. |
|
| 127 |
+// If a signal is given, then just send it to the container and return. |
|
| 128 |
+func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
|
|
| 129 |
+ if n := len(job.Args); n < 1 || n > 2 {
|
|
| 130 |
+ return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
|
|
| 131 |
+ } |
|
| 132 |
+ var ( |
|
| 133 |
+ name = job.Args[0] |
|
| 134 |
+ sig uint64 |
|
| 135 |
+ err error |
|
| 136 |
+ ) |
|
| 137 |
+ |
|
| 138 |
+ // If we have a signal, look at it. Otherwise, do nothing |
|
| 139 |
+ if len(job.Args) == 2 && job.Args[1] != "" {
|
|
| 140 |
+ // Check if we passed the signal as a number: |
|
| 141 |
+ // The largest legal signal is 31, so let's parse on 5 bits |
|
| 142 |
+ sig, err = strconv.ParseUint(job.Args[1], 10, 5) |
|
| 143 |
+ if err != nil {
|
|
| 144 |
+ // The signal is not a number, treat it as a string |
|
| 145 |
+ sig = uint64(signal.SignalMap[job.Args[1]]) |
|
| 146 |
+ if sig == 0 {
|
|
| 147 |
+ return job.Errorf("Invalid signal: %s", job.Args[1])
|
|
| 148 |
+ } |
|
| 149 |
+ |
|
| 150 |
+ } |
|
| 151 |
+ } |
|
| 152 |
+ |
|
| 153 |
+ if container := srv.runtime.Get(name); container != nil {
|
|
| 154 |
+ // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) |
|
| 155 |
+ if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
|
|
| 156 |
+ if err := container.Kill(); err != nil {
|
|
| 157 |
+ return job.Errorf("Cannot kill container %s: %s", name, err)
|
|
| 158 |
+ } |
|
| 159 |
+ srv.LogEvent("kill", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 160 |
+ } else {
|
|
| 161 |
+ // Otherwise, just send the requested signal |
|
| 162 |
+ if err := container.KillSig(int(sig)); err != nil {
|
|
| 163 |
+ return job.Errorf("Cannot kill container %s: %s", name, err)
|
|
| 164 |
+ } |
|
| 165 |
+ // FIXME: Add event for signals |
|
| 166 |
+ } |
|
| 167 |
+ } else {
|
|
| 168 |
+ return job.Errorf("No such container: %s", name)
|
|
| 169 |
+ } |
|
| 170 |
+ return engine.StatusOK |
|
| 171 |
+} |
|
| 172 |
+ |
|
| 173 |
+func (srv *Server) Auth(job *engine.Job) engine.Status {
|
|
| 174 |
+ var ( |
|
| 175 |
+ err error |
|
| 176 |
+ authConfig = ®istry.AuthConfig{}
|
|
| 177 |
+ ) |
|
| 178 |
+ |
|
| 179 |
+ job.GetenvJson("authConfig", authConfig)
|
|
| 180 |
+ // TODO: this is only done here because auth and registry need to be merged into one pkg |
|
| 181 |
+ if addr := authConfig.ServerAddress; addr != "" && addr != registry.IndexServerAddress() {
|
|
| 182 |
+ addr, err = registry.ExpandAndVerifyRegistryUrl(addr) |
|
| 183 |
+ if err != nil {
|
|
| 184 |
+ return job.Error(err) |
|
| 185 |
+ } |
|
| 186 |
+ authConfig.ServerAddress = addr |
|
| 187 |
+ } |
|
| 188 |
+ status, err := registry.Login(authConfig, srv.HTTPRequestFactory(nil)) |
|
| 189 |
+ if err != nil {
|
|
| 190 |
+ return job.Error(err) |
|
| 191 |
+ } |
|
| 192 |
+ job.Printf("%s\n", status)
|
|
| 193 |
+ return engine.StatusOK |
|
| 194 |
+} |
|
| 195 |
+ |
|
| 196 |
+func (srv *Server) Events(job *engine.Job) engine.Status {
|
|
| 197 |
+ if len(job.Args) != 1 {
|
|
| 198 |
+ return job.Errorf("Usage: %s FROM", job.Name)
|
|
| 199 |
+ } |
|
| 200 |
+ |
|
| 201 |
+ var ( |
|
| 202 |
+ from = job.Args[0] |
|
| 203 |
+ since = job.GetenvInt64("since")
|
|
| 204 |
+ ) |
|
| 205 |
+ sendEvent := func(event *utils.JSONMessage) error {
|
|
| 206 |
+ b, err := json.Marshal(event) |
|
| 207 |
+ if err != nil {
|
|
| 208 |
+ return fmt.Errorf("JSON error")
|
|
| 209 |
+ } |
|
| 210 |
+ _, err = job.Stdout.Write(b) |
|
| 211 |
+ if err != nil {
|
|
| 212 |
+ // On error, evict the listener |
|
| 213 |
+ utils.Errorf("%s", err)
|
|
| 214 |
+ srv.Lock() |
|
| 215 |
+ delete(srv.listeners, from) |
|
| 216 |
+ srv.Unlock() |
|
| 217 |
+ return err |
|
| 218 |
+ } |
|
| 219 |
+ return nil |
|
| 220 |
+ } |
|
| 221 |
+ |
|
| 222 |
+ listener := make(chan utils.JSONMessage) |
|
| 223 |
+ srv.Lock() |
|
| 224 |
+ srv.listeners[from] = listener |
|
| 225 |
+ srv.Unlock() |
|
| 226 |
+ job.Stdout.Write(nil) // flush |
|
| 227 |
+ if since != 0 {
|
|
| 228 |
+ // If since, send previous events that happened after the timestamp |
|
| 229 |
+ for _, event := range srv.GetEvents() {
|
|
| 230 |
+ if event.Time >= since {
|
|
| 231 |
+ err := sendEvent(&event) |
|
| 232 |
+ if err != nil && err.Error() == "JSON error" {
|
|
| 233 |
+ continue |
|
| 234 |
+ } |
|
| 235 |
+ if err != nil {
|
|
| 236 |
+ job.Error(err) |
|
| 237 |
+ return engine.StatusErr |
|
| 238 |
+ } |
|
| 239 |
+ } |
|
| 240 |
+ } |
|
| 241 |
+ } |
|
| 242 |
+ for event := range listener {
|
|
| 243 |
+ err := sendEvent(&event) |
|
| 244 |
+ if err != nil && err.Error() == "JSON error" {
|
|
| 245 |
+ continue |
|
| 246 |
+ } |
|
| 247 |
+ if err != nil {
|
|
| 248 |
+ return job.Error(err) |
|
| 249 |
+ } |
|
| 250 |
+ } |
|
| 251 |
+ return engine.StatusOK |
|
| 252 |
+} |
|
| 253 |
+ |
|
| 254 |
+func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
|
|
| 255 |
+ if len(job.Args) != 1 {
|
|
| 256 |
+ return job.Errorf("Usage: %s container_id", job.Name)
|
|
| 257 |
+ } |
|
| 258 |
+ name := job.Args[0] |
|
| 259 |
+ if container := srv.runtime.Get(name); container != nil {
|
|
| 260 |
+ data, err := container.Export() |
|
| 261 |
+ if err != nil {
|
|
| 262 |
+ return job.Errorf("%s: %s", name, err)
|
|
| 263 |
+ } |
|
| 264 |
+ defer data.Close() |
|
| 265 |
+ |
|
| 266 |
+ // Stream the entire contents of the container (basically a volatile snapshot) |
|
| 267 |
+ if _, err := io.Copy(job.Stdout, data); err != nil {
|
|
| 268 |
+ return job.Errorf("%s: %s", name, err)
|
|
| 269 |
+ } |
|
| 270 |
+ // FIXME: factor job-specific LogEvent to engine.Job.Run() |
|
| 271 |
+ srv.LogEvent("export", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 272 |
+ return engine.StatusOK |
|
| 273 |
+ } |
|
| 274 |
+ return job.Errorf("No such container: %s", name)
|
|
| 275 |
+} |
|
| 276 |
+ |
|
| 277 |
+// ImageExport exports all images with the given tag. All versions |
|
| 278 |
+// containing the same tag are exported. The resulting output is an |
|
| 279 |
+// uncompressed tar ball. |
|
| 280 |
+// name is the set of tags to export. |
|
| 281 |
+// out is the writer where the images are written to. |
|
| 282 |
+func (srv *Server) ImageExport(job *engine.Job) engine.Status {
|
|
| 283 |
+ if len(job.Args) != 1 {
|
|
| 284 |
+ return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 285 |
+ } |
|
| 286 |
+ name := job.Args[0] |
|
| 287 |
+ // get image json |
|
| 288 |
+ tempdir, err := ioutil.TempDir("", "docker-export-")
|
|
| 289 |
+ if err != nil {
|
|
| 290 |
+ return job.Error(err) |
|
| 291 |
+ } |
|
| 292 |
+ defer os.RemoveAll(tempdir) |
|
| 293 |
+ |
|
| 294 |
+ utils.Debugf("Serializing %s", name)
|
|
| 295 |
+ |
|
| 296 |
+ rootRepo, err := srv.runtime.Repositories().Get(name) |
|
| 297 |
+ if err != nil {
|
|
| 298 |
+ return job.Error(err) |
|
| 299 |
+ } |
|
| 300 |
+ if rootRepo != nil {
|
|
| 301 |
+ for _, id := range rootRepo {
|
|
| 302 |
+ image, err := srv.ImageInspect(id) |
|
| 303 |
+ if err != nil {
|
|
| 304 |
+ return job.Error(err) |
|
| 305 |
+ } |
|
| 306 |
+ |
|
| 307 |
+ if err := srv.exportImage(image, tempdir); err != nil {
|
|
| 308 |
+ return job.Error(err) |
|
| 309 |
+ } |
|
| 310 |
+ } |
|
| 311 |
+ |
|
| 312 |
+ // write repositories |
|
| 313 |
+ rootRepoMap := map[string]graph.Repository{}
|
|
| 314 |
+ rootRepoMap[name] = rootRepo |
|
| 315 |
+ rootRepoJson, _ := json.Marshal(rootRepoMap) |
|
| 316 |
+ |
|
| 317 |
+ if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.ModeAppend); err != nil {
|
|
| 318 |
+ return job.Error(err) |
|
| 319 |
+ } |
|
| 320 |
+ } else {
|
|
| 321 |
+ image, err := srv.ImageInspect(name) |
|
| 322 |
+ if err != nil {
|
|
| 323 |
+ return job.Error(err) |
|
| 324 |
+ } |
|
| 325 |
+ if err := srv.exportImage(image, tempdir); err != nil {
|
|
| 326 |
+ return job.Error(err) |
|
| 327 |
+ } |
|
| 328 |
+ } |
|
| 329 |
+ |
|
| 330 |
+ fs, err := archive.Tar(tempdir, archive.Uncompressed) |
|
| 331 |
+ if err != nil {
|
|
| 332 |
+ return job.Error(err) |
|
| 333 |
+ } |
|
| 334 |
+ defer fs.Close() |
|
| 335 |
+ |
|
| 336 |
+ if _, err := io.Copy(job.Stdout, fs); err != nil {
|
|
| 337 |
+ return job.Error(err) |
|
| 338 |
+ } |
|
| 339 |
+ return engine.StatusOK |
|
| 340 |
+} |
|
| 341 |
+ |
|
| 342 |
+func (srv *Server) exportImage(img *image.Image, tempdir string) error {
|
|
| 343 |
+ for i := img; i != nil; {
|
|
| 344 |
+ // temporary directory |
|
| 345 |
+ tmpImageDir := path.Join(tempdir, i.ID) |
|
| 346 |
+ if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil {
|
|
| 347 |
+ if os.IsExist(err) {
|
|
| 348 |
+ return nil |
|
| 349 |
+ } |
|
| 350 |
+ return err |
|
| 351 |
+ } |
|
| 352 |
+ |
|
| 353 |
+ var version = "1.0" |
|
| 354 |
+ var versionBuf = []byte(version) |
|
| 355 |
+ |
|
| 356 |
+ if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.ModeAppend); err != nil {
|
|
| 357 |
+ return err |
|
| 358 |
+ } |
|
| 359 |
+ |
|
| 360 |
+ // serialize json |
|
| 361 |
+ b, err := json.Marshal(i) |
|
| 362 |
+ if err != nil {
|
|
| 363 |
+ return err |
|
| 364 |
+ } |
|
| 365 |
+ if err := ioutil.WriteFile(path.Join(tmpImageDir, "json"), b, os.ModeAppend); err != nil {
|
|
| 366 |
+ return err |
|
| 367 |
+ } |
|
| 368 |
+ |
|
| 369 |
+ // serialize filesystem |
|
| 370 |
+ fs, err := i.TarLayer() |
|
| 371 |
+ if err != nil {
|
|
| 372 |
+ return err |
|
| 373 |
+ } |
|
| 374 |
+ defer fs.Close() |
|
| 375 |
+ |
|
| 376 |
+ fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) |
|
| 377 |
+ if err != nil {
|
|
| 378 |
+ return err |
|
| 379 |
+ } |
|
| 380 |
+ if _, err = io.Copy(fsTar, fs); err != nil {
|
|
| 381 |
+ return err |
|
| 382 |
+ } |
|
| 383 |
+ fsTar.Close() |
|
| 384 |
+ |
|
| 385 |
+ // find parent |
|
| 386 |
+ if i.Parent != "" {
|
|
| 387 |
+ i, err = srv.ImageInspect(i.Parent) |
|
| 388 |
+ if err != nil {
|
|
| 389 |
+ return err |
|
| 390 |
+ } |
|
| 391 |
+ } else {
|
|
| 392 |
+ i = nil |
|
| 393 |
+ } |
|
| 394 |
+ } |
|
| 395 |
+ return nil |
|
| 396 |
+} |
|
| 397 |
+ |
|
| 398 |
+func (srv *Server) Build(job *engine.Job) engine.Status {
|
|
| 399 |
+ if len(job.Args) != 0 {
|
|
| 400 |
+ return job.Errorf("Usage: %s\n", job.Name)
|
|
| 401 |
+ } |
|
| 402 |
+ var ( |
|
| 403 |
+ remoteURL = job.Getenv("remote")
|
|
| 404 |
+ repoName = job.Getenv("t")
|
|
| 405 |
+ suppressOutput = job.GetenvBool("q")
|
|
| 406 |
+ noCache = job.GetenvBool("nocache")
|
|
| 407 |
+ rm = job.GetenvBool("rm")
|
|
| 408 |
+ authConfig = ®istry.AuthConfig{}
|
|
| 409 |
+ configFile = ®istry.ConfigFile{}
|
|
| 410 |
+ tag string |
|
| 411 |
+ context io.ReadCloser |
|
| 412 |
+ ) |
|
| 413 |
+ job.GetenvJson("authConfig", authConfig)
|
|
| 414 |
+ job.GetenvJson("configFile", configFile)
|
|
| 415 |
+ repoName, tag = utils.ParseRepositoryTag(repoName) |
|
| 416 |
+ |
|
| 417 |
+ if remoteURL == "" {
|
|
| 418 |
+ context = ioutil.NopCloser(job.Stdin) |
|
| 419 |
+ } else if utils.IsGIT(remoteURL) {
|
|
| 420 |
+ if !strings.HasPrefix(remoteURL, "git://") {
|
|
| 421 |
+ remoteURL = "https://" + remoteURL |
|
| 422 |
+ } |
|
| 423 |
+ root, err := ioutil.TempDir("", "docker-build-git")
|
|
| 424 |
+ if err != nil {
|
|
| 425 |
+ return job.Error(err) |
|
| 426 |
+ } |
|
| 427 |
+ defer os.RemoveAll(root) |
|
| 428 |
+ |
|
| 429 |
+ if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
|
|
| 430 |
+ return job.Errorf("Error trying to use git: %s (%s)", err, output)
|
|
| 431 |
+ } |
|
| 432 |
+ |
|
| 433 |
+ c, err := archive.Tar(root, archive.Uncompressed) |
|
| 434 |
+ if err != nil {
|
|
| 435 |
+ return job.Error(err) |
|
| 436 |
+ } |
|
| 437 |
+ context = c |
|
| 438 |
+ } else if utils.IsURL(remoteURL) {
|
|
| 439 |
+ f, err := utils.Download(remoteURL) |
|
| 440 |
+ if err != nil {
|
|
| 441 |
+ return job.Error(err) |
|
| 442 |
+ } |
|
| 443 |
+ defer f.Body.Close() |
|
| 444 |
+ dockerFile, err := ioutil.ReadAll(f.Body) |
|
| 445 |
+ if err != nil {
|
|
| 446 |
+ return job.Error(err) |
|
| 447 |
+ } |
|
| 448 |
+ c, err := archive.Generate("Dockerfile", string(dockerFile))
|
|
| 449 |
+ if err != nil {
|
|
| 450 |
+ return job.Error(err) |
|
| 451 |
+ } |
|
| 452 |
+ context = c |
|
| 453 |
+ } |
|
| 454 |
+ defer context.Close() |
|
| 455 |
+ |
|
| 456 |
+ sf := utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
| 457 |
+ b := NewBuildFile(srv, |
|
| 458 |
+ &utils.StdoutFormater{
|
|
| 459 |
+ Writer: job.Stdout, |
|
| 460 |
+ StreamFormatter: sf, |
|
| 461 |
+ }, |
|
| 462 |
+ &utils.StderrFormater{
|
|
| 463 |
+ Writer: job.Stdout, |
|
| 464 |
+ StreamFormatter: sf, |
|
| 465 |
+ }, |
|
| 466 |
+ !suppressOutput, !noCache, rm, job.Stdout, sf, authConfig, configFile) |
|
| 467 |
+ id, err := b.Build(context) |
|
| 468 |
+ if err != nil {
|
|
| 469 |
+ return job.Error(err) |
|
| 470 |
+ } |
|
| 471 |
+ if repoName != "" {
|
|
| 472 |
+ srv.runtime.Repositories().Set(repoName, tag, id, false) |
|
| 473 |
+ } |
|
| 474 |
+ return engine.StatusOK |
|
| 475 |
+} |
|
| 476 |
+ |
|
| 477 |
+// Loads a set of images into the repository. This is the complementary of ImageExport. |
|
| 478 |
+// The input stream is an uncompressed tar ball containing images and metadata. |
|
| 479 |
+func (srv *Server) ImageLoad(job *engine.Job) engine.Status {
|
|
| 480 |
+ tmpImageDir, err := ioutil.TempDir("", "docker-import-")
|
|
| 481 |
+ if err != nil {
|
|
| 482 |
+ return job.Error(err) |
|
| 483 |
+ } |
|
| 484 |
+ defer os.RemoveAll(tmpImageDir) |
|
| 485 |
+ |
|
| 486 |
+ var ( |
|
| 487 |
+ repoTarFile = path.Join(tmpImageDir, "repo.tar") |
|
| 488 |
+ repoDir = path.Join(tmpImageDir, "repo") |
|
| 489 |
+ ) |
|
| 490 |
+ |
|
| 491 |
+ tarFile, err := os.Create(repoTarFile) |
|
| 492 |
+ if err != nil {
|
|
| 493 |
+ return job.Error(err) |
|
| 494 |
+ } |
|
| 495 |
+ if _, err := io.Copy(tarFile, job.Stdin); err != nil {
|
|
| 496 |
+ return job.Error(err) |
|
| 497 |
+ } |
|
| 498 |
+ tarFile.Close() |
|
| 499 |
+ |
|
| 500 |
+ repoFile, err := os.Open(repoTarFile) |
|
| 501 |
+ if err != nil {
|
|
| 502 |
+ return job.Error(err) |
|
| 503 |
+ } |
|
| 504 |
+ if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
|
|
| 505 |
+ return job.Error(err) |
|
| 506 |
+ } |
|
| 507 |
+ if err := archive.Untar(repoFile, repoDir, nil); err != nil {
|
|
| 508 |
+ return job.Error(err) |
|
| 509 |
+ } |
|
| 510 |
+ |
|
| 511 |
+ dirs, err := ioutil.ReadDir(repoDir) |
|
| 512 |
+ if err != nil {
|
|
| 513 |
+ return job.Error(err) |
|
| 514 |
+ } |
|
| 515 |
+ |
|
| 516 |
+ for _, d := range dirs {
|
|
| 517 |
+ if d.IsDir() {
|
|
| 518 |
+ if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil {
|
|
| 519 |
+ return job.Error(err) |
|
| 520 |
+ } |
|
| 521 |
+ } |
|
| 522 |
+ } |
|
| 523 |
+ |
|
| 524 |
+ repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) |
|
| 525 |
+ if err == nil {
|
|
| 526 |
+ repositories := map[string]graph.Repository{}
|
|
| 527 |
+ if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
|
|
| 528 |
+ return job.Error(err) |
|
| 529 |
+ } |
|
| 530 |
+ |
|
| 531 |
+ for imageName, tagMap := range repositories {
|
|
| 532 |
+ for tag, address := range tagMap {
|
|
| 533 |
+ if err := srv.runtime.Repositories().Set(imageName, tag, address, true); err != nil {
|
|
| 534 |
+ return job.Error(err) |
|
| 535 |
+ } |
|
| 536 |
+ } |
|
| 537 |
+ } |
|
| 538 |
+ } else if !os.IsNotExist(err) {
|
|
| 539 |
+ return job.Error(err) |
|
| 540 |
+ } |
|
| 541 |
+ |
|
| 542 |
+ return engine.StatusOK |
|
| 543 |
+} |
|
| 544 |
+ |
|
| 545 |
+func (srv *Server) recursiveLoad(address, tmpImageDir string) error {
|
|
| 546 |
+ if _, err := srv.ImageInspect(address); err != nil {
|
|
| 547 |
+ utils.Debugf("Loading %s", address)
|
|
| 548 |
+ |
|
| 549 |
+ imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) |
|
| 550 |
+ if err != nil {
|
|
| 551 |
+ utils.Debugf("Error reading json", err)
|
|
| 552 |
+ return err |
|
| 553 |
+ } |
|
| 554 |
+ |
|
| 555 |
+ layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar")) |
|
| 556 |
+ if err != nil {
|
|
| 557 |
+ utils.Debugf("Error reading embedded tar", err)
|
|
| 558 |
+ return err |
|
| 559 |
+ } |
|
| 560 |
+ img, err := image.NewImgJSON(imageJson) |
|
| 561 |
+ if err != nil {
|
|
| 562 |
+ utils.Debugf("Error unmarshalling json", err)
|
|
| 563 |
+ return err |
|
| 564 |
+ } |
|
| 565 |
+ if img.Parent != "" {
|
|
| 566 |
+ if !srv.runtime.Graph().Exists(img.Parent) {
|
|
| 567 |
+ if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil {
|
|
| 568 |
+ return err |
|
| 569 |
+ } |
|
| 570 |
+ } |
|
| 571 |
+ } |
|
| 572 |
+ if err := srv.runtime.Graph().Register(imageJson, layer, img); err != nil {
|
|
| 573 |
+ return err |
|
| 574 |
+ } |
|
| 575 |
+ } |
|
| 576 |
+ utils.Debugf("Completed processing %s", address)
|
|
| 577 |
+ |
|
| 578 |
+ return nil |
|
| 579 |
+} |
|
| 580 |
+ |
|
| 581 |
+func (srv *Server) ImagesSearch(job *engine.Job) engine.Status {
|
|
| 582 |
+ if n := len(job.Args); n != 1 {
|
|
| 583 |
+ return job.Errorf("Usage: %s TERM", job.Name)
|
|
| 584 |
+ } |
|
| 585 |
+ var ( |
|
| 586 |
+ term = job.Args[0] |
|
| 587 |
+ metaHeaders = map[string][]string{}
|
|
| 588 |
+ authConfig = ®istry.AuthConfig{}
|
|
| 589 |
+ ) |
|
| 590 |
+ job.GetenvJson("authConfig", authConfig)
|
|
| 591 |
+ job.GetenvJson("metaHeaders", metaHeaders)
|
|
| 592 |
+ |
|
| 593 |
+ r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), registry.IndexServerAddress()) |
|
| 594 |
+ if err != nil {
|
|
| 595 |
+ return job.Error(err) |
|
| 596 |
+ } |
|
| 597 |
+ results, err := r.SearchRepositories(term) |
|
| 598 |
+ if err != nil {
|
|
| 599 |
+ return job.Error(err) |
|
| 600 |
+ } |
|
| 601 |
+ outs := engine.NewTable("star_count", 0)
|
|
| 602 |
+ for _, result := range results.Results {
|
|
| 603 |
+ out := &engine.Env{}
|
|
| 604 |
+ out.Import(result) |
|
| 605 |
+ outs.Add(out) |
|
| 606 |
+ } |
|
| 607 |
+ outs.ReverseSort() |
|
| 608 |
+ if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
|
| 609 |
+ return job.Error(err) |
|
| 610 |
+ } |
|
| 611 |
+ return engine.StatusOK |
|
| 612 |
+} |
|
| 613 |
+ |
|
| 614 |
+func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
|
|
| 615 |
+ if len(job.Args) != 3 {
|
|
| 616 |
+ return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name)
|
|
| 617 |
+ } |
|
| 618 |
+ |
|
| 619 |
+ var ( |
|
| 620 |
+ name = job.Args[0] |
|
| 621 |
+ url = job.Args[1] |
|
| 622 |
+ path = job.Args[2] |
|
| 623 |
+ ) |
|
| 624 |
+ |
|
| 625 |
+ sf := utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
| 626 |
+ |
|
| 627 |
+ out := utils.NewWriteFlusher(job.Stdout) |
|
| 628 |
+ img, err := srv.runtime.Repositories().LookupImage(name) |
|
| 629 |
+ if err != nil {
|
|
| 630 |
+ return job.Error(err) |
|
| 631 |
+ } |
|
| 632 |
+ |
|
| 633 |
+ file, err := utils.Download(url) |
|
| 634 |
+ if err != nil {
|
|
| 635 |
+ return job.Error(err) |
|
| 636 |
+ } |
|
| 637 |
+ defer file.Body.Close() |
|
| 638 |
+ |
|
| 639 |
+ config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.SystemConfig())
|
|
| 640 |
+ if err != nil {
|
|
| 641 |
+ return job.Error(err) |
|
| 642 |
+ } |
|
| 643 |
+ |
|
| 644 |
+ c, _, err := srv.runtime.Create(config, "") |
|
| 645 |
+ if err != nil {
|
|
| 646 |
+ return job.Error(err) |
|
| 647 |
+ } |
|
| 648 |
+ |
|
| 649 |
+ if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, utils.TruncateID(img.ID), "Downloading"), path); err != nil {
|
|
| 650 |
+ return job.Error(err) |
|
| 651 |
+ } |
|
| 652 |
+ // FIXME: Handle custom repo, tag comment, author |
|
| 653 |
+ img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil) |
|
| 654 |
+ if err != nil {
|
|
| 655 |
+ out.Write(sf.FormatError(err)) |
|
| 656 |
+ return engine.StatusErr |
|
| 657 |
+ } |
|
| 658 |
+ out.Write(sf.FormatStatus("", img.ID))
|
|
| 659 |
+ return engine.StatusOK |
|
| 660 |
+} |
|
| 661 |
+ |
|
| 662 |
+func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
|
|
| 663 |
+ images, _ := srv.runtime.Graph().Map() |
|
| 664 |
+ if images == nil {
|
|
| 665 |
+ return engine.StatusOK |
|
| 666 |
+ } |
|
| 667 |
+ job.Stdout.Write([]byte("digraph docker {\n"))
|
|
| 668 |
+ |
|
| 669 |
+ var ( |
|
| 670 |
+ parentImage *image.Image |
|
| 671 |
+ err error |
|
| 672 |
+ ) |
|
| 673 |
+ for _, image := range images {
|
|
| 674 |
+ parentImage, err = image.GetParent() |
|
| 675 |
+ if err != nil {
|
|
| 676 |
+ return job.Errorf("Error while getting parent image: %v", err)
|
|
| 677 |
+ } |
|
| 678 |
+ if parentImage != nil {
|
|
| 679 |
+ job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n"))
|
|
| 680 |
+ } else {
|
|
| 681 |
+ job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n"))
|
|
| 682 |
+ } |
|
| 683 |
+ } |
|
| 684 |
+ |
|
| 685 |
+ reporefs := make(map[string][]string) |
|
| 686 |
+ |
|
| 687 |
+ for name, repository := range srv.runtime.Repositories().Repositories {
|
|
| 688 |
+ for tag, id := range repository {
|
|
| 689 |
+ reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
|
|
| 690 |
+ } |
|
| 691 |
+ } |
|
| 692 |
+ |
|
| 693 |
+ for id, repos := range reporefs {
|
|
| 694 |
+ job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n"))
|
|
| 695 |
+ } |
|
| 696 |
+ job.Stdout.Write([]byte(" base [style=invisible]\n}\n"))
|
|
| 697 |
+ return engine.StatusOK |
|
| 698 |
+} |
|
| 699 |
+ |
|
| 700 |
+func (srv *Server) Images(job *engine.Job) engine.Status {
|
|
| 701 |
+ var ( |
|
| 702 |
+ allImages map[string]*image.Image |
|
| 703 |
+ err error |
|
| 704 |
+ ) |
|
| 705 |
+ if job.GetenvBool("all") {
|
|
| 706 |
+ allImages, err = srv.runtime.Graph().Map() |
|
| 707 |
+ } else {
|
|
| 708 |
+ allImages, err = srv.runtime.Graph().Heads() |
|
| 709 |
+ } |
|
| 710 |
+ if err != nil {
|
|
| 711 |
+ return job.Error(err) |
|
| 712 |
+ } |
|
| 713 |
+ lookup := make(map[string]*engine.Env) |
|
| 714 |
+ for name, repository := range srv.runtime.Repositories().Repositories {
|
|
| 715 |
+ if job.Getenv("filter") != "" {
|
|
| 716 |
+ if match, _ := path.Match(job.Getenv("filter"), name); !match {
|
|
| 717 |
+ continue |
|
| 718 |
+ } |
|
| 719 |
+ } |
|
| 720 |
+ for tag, id := range repository {
|
|
| 721 |
+ image, err := srv.runtime.Graph().Get(id) |
|
| 722 |
+ if err != nil {
|
|
| 723 |
+ log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
|
|
| 724 |
+ continue |
|
| 725 |
+ } |
|
| 726 |
+ |
|
| 727 |
+ if out, exists := lookup[id]; exists {
|
|
| 728 |
+ out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag)))
|
|
| 729 |
+ } else {
|
|
| 730 |
+ out := &engine.Env{}
|
|
| 731 |
+ delete(allImages, id) |
|
| 732 |
+ out.Set("ParentId", image.Parent)
|
|
| 733 |
+ out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)})
|
|
| 734 |
+ out.Set("Id", image.ID)
|
|
| 735 |
+ out.SetInt64("Created", image.Created.Unix())
|
|
| 736 |
+ out.SetInt64("Size", image.Size)
|
|
| 737 |
+ out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
|
|
| 738 |
+ lookup[id] = out |
|
| 739 |
+ } |
|
| 740 |
+ |
|
| 741 |
+ } |
|
| 742 |
+ } |
|
| 743 |
+ |
|
| 744 |
+ outs := engine.NewTable("Created", len(lookup))
|
|
| 745 |
+ for _, value := range lookup {
|
|
| 746 |
+ outs.Add(value) |
|
| 747 |
+ } |
|
| 748 |
+ |
|
| 749 |
+ // Display images which aren't part of a repository/tag |
|
| 750 |
+ if job.Getenv("filter") == "" {
|
|
| 751 |
+ for _, image := range allImages {
|
|
| 752 |
+ out := &engine.Env{}
|
|
| 753 |
+ out.Set("ParentId", image.Parent)
|
|
| 754 |
+ out.SetList("RepoTags", []string{"<none>:<none>"})
|
|
| 755 |
+ out.Set("Id", image.ID)
|
|
| 756 |
+ out.SetInt64("Created", image.Created.Unix())
|
|
| 757 |
+ out.SetInt64("Size", image.Size)
|
|
| 758 |
+ out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
|
|
| 759 |
+ outs.Add(out) |
|
| 760 |
+ } |
|
| 761 |
+ } |
|
| 762 |
+ |
|
| 763 |
+ outs.ReverseSort() |
|
| 764 |
+ if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
|
| 765 |
+ return job.Error(err) |
|
| 766 |
+ } |
|
| 767 |
+ return engine.StatusOK |
|
| 768 |
+} |
|
| 769 |
+ |
|
| 770 |
+func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
|
|
| 771 |
+ images, _ := srv.runtime.Graph().Map() |
|
| 772 |
+ var imgcount int |
|
| 773 |
+ if images == nil {
|
|
| 774 |
+ imgcount = 0 |
|
| 775 |
+ } else {
|
|
| 776 |
+ imgcount = len(images) |
|
| 777 |
+ } |
|
| 778 |
+ kernelVersion := "<unknown>" |
|
| 779 |
+ if kv, err := utils.GetKernelVersion(); err == nil {
|
|
| 780 |
+ kernelVersion = kv.String() |
|
| 781 |
+ } |
|
| 782 |
+ |
|
| 783 |
+ // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) |
|
| 784 |
+ initPath := utils.DockerInitPath("")
|
|
| 785 |
+ if initPath == "" {
|
|
| 786 |
+ // if that fails, we'll just return the path from the runtime |
|
| 787 |
+ initPath = srv.runtime.SystemInitPath() |
|
| 788 |
+ } |
|
| 789 |
+ |
|
| 790 |
+ v := &engine.Env{}
|
|
| 791 |
+ v.SetInt("Containers", len(srv.runtime.List()))
|
|
| 792 |
+ v.SetInt("Images", imgcount)
|
|
| 793 |
+ v.Set("Driver", srv.runtime.GraphDriver().String())
|
|
| 794 |
+ v.SetJson("DriverStatus", srv.runtime.GraphDriver().Status())
|
|
| 795 |
+ v.SetBool("MemoryLimit", srv.runtime.SystemConfig().MemoryLimit)
|
|
| 796 |
+ v.SetBool("SwapLimit", srv.runtime.SystemConfig().SwapLimit)
|
|
| 797 |
+ v.SetBool("IPv4Forwarding", !srv.runtime.SystemConfig().IPv4ForwardingDisabled)
|
|
| 798 |
+ v.SetBool("Debug", os.Getenv("DEBUG") != "")
|
|
| 799 |
+ v.SetInt("NFd", utils.GetTotalUsedFds())
|
|
| 800 |
+ v.SetInt("NGoroutines", goruntime.NumGoroutine())
|
|
| 801 |
+ v.Set("ExecutionDriver", srv.runtime.ExecutionDriver().Name())
|
|
| 802 |
+ v.SetInt("NEventsListener", len(srv.listeners))
|
|
| 803 |
+ v.Set("KernelVersion", kernelVersion)
|
|
| 804 |
+ v.Set("IndexServerAddress", registry.IndexServerAddress())
|
|
| 805 |
+ v.Set("InitSha1", dockerversion.INITSHA1)
|
|
| 806 |
+ v.Set("InitPath", initPath)
|
|
| 807 |
+ if _, err := v.WriteTo(job.Stdout); err != nil {
|
|
| 808 |
+ return job.Error(err) |
|
| 809 |
+ } |
|
| 810 |
+ return engine.StatusOK |
|
| 811 |
+} |
|
| 812 |
+ |
|
| 813 |
+func (srv *Server) DockerVersion(job *engine.Job) engine.Status {
|
|
| 814 |
+ v := &engine.Env{}
|
|
| 815 |
+ v.Set("Version", dockerversion.VERSION)
|
|
| 816 |
+ v.Set("GitCommit", dockerversion.GITCOMMIT)
|
|
| 817 |
+ v.Set("GoVersion", goruntime.Version())
|
|
| 818 |
+ v.Set("Os", goruntime.GOOS)
|
|
| 819 |
+ v.Set("Arch", goruntime.GOARCH)
|
|
| 820 |
+ if kernelVersion, err := utils.GetKernelVersion(); err == nil {
|
|
| 821 |
+ v.Set("KernelVersion", kernelVersion.String())
|
|
| 822 |
+ } |
|
| 823 |
+ if _, err := v.WriteTo(job.Stdout); err != nil {
|
|
| 824 |
+ return job.Error(err) |
|
| 825 |
+ } |
|
| 826 |
+ return engine.StatusOK |
|
| 827 |
+} |
|
| 828 |
+ |
|
| 829 |
+func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
|
|
| 830 |
+ if n := len(job.Args); n != 1 {
|
|
| 831 |
+ return job.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 832 |
+ } |
|
| 833 |
+ name := job.Args[0] |
|
| 834 |
+ foundImage, err := srv.runtime.Repositories().LookupImage(name) |
|
| 835 |
+ if err != nil {
|
|
| 836 |
+ return job.Error(err) |
|
| 837 |
+ } |
|
| 838 |
+ |
|
| 839 |
+ lookupMap := make(map[string][]string) |
|
| 840 |
+ for name, repository := range srv.runtime.Repositories().Repositories {
|
|
| 841 |
+ for tag, id := range repository {
|
|
| 842 |
+ // If the ID already has a reverse lookup, do not update it unless for "latest" |
|
| 843 |
+ if _, exists := lookupMap[id]; !exists {
|
|
| 844 |
+ lookupMap[id] = []string{}
|
|
| 845 |
+ } |
|
| 846 |
+ lookupMap[id] = append(lookupMap[id], name+":"+tag) |
|
| 847 |
+ } |
|
| 848 |
+ } |
|
| 849 |
+ |
|
| 850 |
+ outs := engine.NewTable("Created", 0)
|
|
| 851 |
+ err = foundImage.WalkHistory(func(img *image.Image) error {
|
|
| 852 |
+ out := &engine.Env{}
|
|
| 853 |
+ out.Set("Id", img.ID)
|
|
| 854 |
+ out.SetInt64("Created", img.Created.Unix())
|
|
| 855 |
+ out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " "))
|
|
| 856 |
+ out.SetList("Tags", lookupMap[img.ID])
|
|
| 857 |
+ out.SetInt64("Size", img.Size)
|
|
| 858 |
+ outs.Add(out) |
|
| 859 |
+ return nil |
|
| 860 |
+ }) |
|
| 861 |
+ outs.ReverseSort() |
|
| 862 |
+ if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
|
| 863 |
+ return job.Error(err) |
|
| 864 |
+ } |
|
| 865 |
+ return engine.StatusOK |
|
| 866 |
+} |
|
| 867 |
+ |
|
| 868 |
+func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
|
|
| 869 |
+ if len(job.Args) != 1 && len(job.Args) != 2 {
|
|
| 870 |
+ return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name)
|
|
| 871 |
+ } |
|
| 872 |
+ var ( |
|
| 873 |
+ name = job.Args[0] |
|
| 874 |
+ psArgs = "-ef" |
|
| 875 |
+ ) |
|
| 876 |
+ |
|
| 877 |
+ if len(job.Args) == 2 && job.Args[1] != "" {
|
|
| 878 |
+ psArgs = job.Args[1] |
|
| 879 |
+ } |
|
| 880 |
+ |
|
| 881 |
+ if container := srv.runtime.Get(name); container != nil {
|
|
| 882 |
+ if !container.State.IsRunning() {
|
|
| 883 |
+ return job.Errorf("Container %s is not running", name)
|
|
| 884 |
+ } |
|
| 885 |
+ pids, err := srv.runtime.ExecutionDriver().GetPidsForContainer(container.ID) |
|
| 886 |
+ if err != nil {
|
|
| 887 |
+ return job.Error(err) |
|
| 888 |
+ } |
|
| 889 |
+ output, err := exec.Command("ps", psArgs).Output()
|
|
| 890 |
+ if err != nil {
|
|
| 891 |
+ return job.Errorf("Error running ps: %s", err)
|
|
| 892 |
+ } |
|
| 893 |
+ |
|
| 894 |
+ lines := strings.Split(string(output), "\n") |
|
| 895 |
+ header := strings.Fields(lines[0]) |
|
| 896 |
+ out := &engine.Env{}
|
|
| 897 |
+ out.SetList("Titles", header)
|
|
| 898 |
+ |
|
| 899 |
+ pidIndex := -1 |
|
| 900 |
+ for i, name := range header {
|
|
| 901 |
+ if name == "PID" {
|
|
| 902 |
+ pidIndex = i |
|
| 903 |
+ } |
|
| 904 |
+ } |
|
| 905 |
+ if pidIndex == -1 {
|
|
| 906 |
+ return job.Errorf("Couldn't find PID field in ps output")
|
|
| 907 |
+ } |
|
| 908 |
+ |
|
| 909 |
+ processes := [][]string{}
|
|
| 910 |
+ for _, line := range lines[1:] {
|
|
| 911 |
+ if len(line) == 0 {
|
|
| 912 |
+ continue |
|
| 913 |
+ } |
|
| 914 |
+ fields := strings.Fields(line) |
|
| 915 |
+ p, err := strconv.Atoi(fields[pidIndex]) |
|
| 916 |
+ if err != nil {
|
|
| 917 |
+ return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
|
|
| 918 |
+ } |
|
| 919 |
+ |
|
| 920 |
+ for _, pid := range pids {
|
|
| 921 |
+ if pid == p {
|
|
| 922 |
+ // Make sure number of fields equals number of header titles |
|
| 923 |
+ // merging "overhanging" fields |
|
| 924 |
+ process := fields[:len(header)-1] |
|
| 925 |
+ process = append(process, strings.Join(fields[len(header)-1:], " ")) |
|
| 926 |
+ processes = append(processes, process) |
|
| 927 |
+ } |
|
| 928 |
+ } |
|
| 929 |
+ } |
|
| 930 |
+ out.SetJson("Processes", processes)
|
|
| 931 |
+ out.WriteTo(job.Stdout) |
|
| 932 |
+ return engine.StatusOK |
|
| 933 |
+ |
|
| 934 |
+ } |
|
| 935 |
+ return job.Errorf("No such container: %s", name)
|
|
| 936 |
+} |
|
| 937 |
+ |
|
| 938 |
+func (srv *Server) ContainerChanges(job *engine.Job) engine.Status {
|
|
| 939 |
+ if n := len(job.Args); n != 1 {
|
|
| 940 |
+ return job.Errorf("Usage: %s CONTAINER", job.Name)
|
|
| 941 |
+ } |
|
| 942 |
+ name := job.Args[0] |
|
| 943 |
+ if container := srv.runtime.Get(name); container != nil {
|
|
| 944 |
+ outs := engine.NewTable("", 0)
|
|
| 945 |
+ changes, err := container.Changes() |
|
| 946 |
+ if err != nil {
|
|
| 947 |
+ return job.Error(err) |
|
| 948 |
+ } |
|
| 949 |
+ for _, change := range changes {
|
|
| 950 |
+ out := &engine.Env{}
|
|
| 951 |
+ if err := out.Import(change); err != nil {
|
|
| 952 |
+ return job.Error(err) |
|
| 953 |
+ } |
|
| 954 |
+ outs.Add(out) |
|
| 955 |
+ } |
|
| 956 |
+ if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
|
| 957 |
+ return job.Error(err) |
|
| 958 |
+ } |
|
| 959 |
+ } else {
|
|
| 960 |
+ return job.Errorf("No such container: %s", name)
|
|
| 961 |
+ } |
|
| 962 |
+ return engine.StatusOK |
|
| 963 |
+} |
|
| 964 |
+ |
|
| 965 |
+func (srv *Server) Containers(job *engine.Job) engine.Status {
|
|
| 966 |
+ var ( |
|
| 967 |
+ foundBefore bool |
|
| 968 |
+ displayed int |
|
| 969 |
+ all = job.GetenvBool("all")
|
|
| 970 |
+ since = job.Getenv("since")
|
|
| 971 |
+ before = job.Getenv("before")
|
|
| 972 |
+ n = job.GetenvInt("limit")
|
|
| 973 |
+ size = job.GetenvBool("size")
|
|
| 974 |
+ ) |
|
| 975 |
+ outs := engine.NewTable("Created", 0)
|
|
| 976 |
+ |
|
| 977 |
+ names := map[string][]string{}
|
|
| 978 |
+ srv.runtime.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
|
|
| 979 |
+ names[e.ID()] = append(names[e.ID()], p) |
|
| 980 |
+ return nil |
|
| 981 |
+ }, -1) |
|
| 982 |
+ |
|
| 983 |
+ for _, container := range srv.runtime.List() {
|
|
| 984 |
+ if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
|
|
| 985 |
+ continue |
|
| 986 |
+ } |
|
| 987 |
+ if before != "" && !foundBefore {
|
|
| 988 |
+ if container.ID == before || utils.TruncateID(container.ID) == before {
|
|
| 989 |
+ foundBefore = true |
|
| 990 |
+ } |
|
| 991 |
+ continue |
|
| 992 |
+ } |
|
| 993 |
+ if n > 0 && displayed == n {
|
|
| 994 |
+ break |
|
| 995 |
+ } |
|
| 996 |
+ if container.ID == since || utils.TruncateID(container.ID) == since {
|
|
| 997 |
+ break |
|
| 998 |
+ } |
|
| 999 |
+ displayed++ |
|
| 1000 |
+ out := &engine.Env{}
|
|
| 1001 |
+ out.Set("Id", container.ID)
|
|
| 1002 |
+ out.SetList("Names", names[container.ID])
|
|
| 1003 |
+ out.Set("Image", srv.runtime.Repositories().ImageName(container.Image))
|
|
| 1004 |
+ if len(container.Args) > 0 {
|
|
| 1005 |
+ out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, strings.Join(container.Args, " ")))
|
|
| 1006 |
+ } else {
|
|
| 1007 |
+ out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
|
|
| 1008 |
+ } |
|
| 1009 |
+ out.SetInt64("Created", container.Created.Unix())
|
|
| 1010 |
+ out.Set("Status", container.State.String())
|
|
| 1011 |
+ str, err := container.NetworkSettings.PortMappingAPI().ToListString() |
|
| 1012 |
+ if err != nil {
|
|
| 1013 |
+ return job.Error(err) |
|
| 1014 |
+ } |
|
| 1015 |
+ out.Set("Ports", str)
|
|
| 1016 |
+ if size {
|
|
| 1017 |
+ sizeRw, sizeRootFs := container.GetSize() |
|
| 1018 |
+ out.SetInt64("SizeRw", sizeRw)
|
|
| 1019 |
+ out.SetInt64("SizeRootFs", sizeRootFs)
|
|
| 1020 |
+ } |
|
| 1021 |
+ outs.Add(out) |
|
| 1022 |
+ } |
|
| 1023 |
+ outs.ReverseSort() |
|
| 1024 |
+ if _, err := outs.WriteListTo(job.Stdout); err != nil {
|
|
| 1025 |
+ return job.Error(err) |
|
| 1026 |
+ } |
|
| 1027 |
+ return engine.StatusOK |
|
| 1028 |
+} |
|
| 1029 |
+ |
|
| 1030 |
+func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
|
|
| 1031 |
+ if len(job.Args) != 1 {
|
|
| 1032 |
+ return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
|
|
| 1033 |
+ } |
|
| 1034 |
+ name := job.Args[0] |
|
| 1035 |
+ |
|
| 1036 |
+ container := srv.runtime.Get(name) |
|
| 1037 |
+ if container == nil {
|
|
| 1038 |
+ return job.Errorf("No such container: %s", name)
|
|
| 1039 |
+ } |
|
| 1040 |
+ var config = container.Config |
|
| 1041 |
+ var newConfig runconfig.Config |
|
| 1042 |
+ if err := job.GetenvJson("config", &newConfig); err != nil {
|
|
| 1043 |
+ return job.Error(err) |
|
| 1044 |
+ } |
|
| 1045 |
+ |
|
| 1046 |
+ if err := runconfig.Merge(&newConfig, config); err != nil {
|
|
| 1047 |
+ return job.Error(err) |
|
| 1048 |
+ } |
|
| 1049 |
+ |
|
| 1050 |
+ img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
|
|
| 1051 |
+ if err != nil {
|
|
| 1052 |
+ return job.Error(err) |
|
| 1053 |
+ } |
|
| 1054 |
+ job.Printf("%s\n", img.ID)
|
|
| 1055 |
+ return engine.StatusOK |
|
| 1056 |
+} |
|
| 1057 |
+ |
|
| 1058 |
+func (srv *Server) ImageTag(job *engine.Job) engine.Status {
|
|
| 1059 |
+ if len(job.Args) != 2 && len(job.Args) != 3 {
|
|
| 1060 |
+ return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name)
|
|
| 1061 |
+ } |
|
| 1062 |
+ var tag string |
|
| 1063 |
+ if len(job.Args) == 3 {
|
|
| 1064 |
+ tag = job.Args[2] |
|
| 1065 |
+ } |
|
| 1066 |
+ if err := srv.runtime.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
|
|
| 1067 |
+ return job.Error(err) |
|
| 1068 |
+ } |
|
| 1069 |
+ return engine.StatusOK |
|
| 1070 |
+} |
|
| 1071 |
+ |
|
| 1072 |
+func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error {
|
|
| 1073 |
+ history, err := r.GetRemoteHistory(imgID, endpoint, token) |
|
| 1074 |
+ if err != nil {
|
|
| 1075 |
+ return err |
|
| 1076 |
+ } |
|
| 1077 |
+ out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil)) |
|
| 1078 |
+ // FIXME: Try to stream the images? |
|
| 1079 |
+ // FIXME: Launch the getRemoteImage() in goroutines |
|
| 1080 |
+ |
|
| 1081 |
+ for i := len(history) - 1; i >= 0; i-- {
|
|
| 1082 |
+ id := history[i] |
|
| 1083 |
+ |
|
| 1084 |
+ // ensure no two downloads of the same layer happen at the same time |
|
| 1085 |
+ if c, err := srv.poolAdd("pull", "layer:"+id); err != nil {
|
|
| 1086 |
+ utils.Errorf("Image (id: %s) pull is already running, skipping: %v", id, err)
|
|
| 1087 |
+ <-c |
|
| 1088 |
+ } |
|
| 1089 |
+ defer srv.poolRemove("pull", "layer:"+id)
|
|
| 1090 |
+ |
|
| 1091 |
+ if !srv.runtime.Graph().Exists(id) {
|
|
| 1092 |
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) |
|
| 1093 |
+ var ( |
|
| 1094 |
+ imgJSON []byte |
|
| 1095 |
+ imgSize int |
|
| 1096 |
+ err error |
|
| 1097 |
+ img *image.Image |
|
| 1098 |
+ ) |
|
| 1099 |
+ retries := 5 |
|
| 1100 |
+ for j := 1; j <= retries; j++ {
|
|
| 1101 |
+ imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) |
|
| 1102 |
+ if err != nil && j == retries {
|
|
| 1103 |
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) |
|
| 1104 |
+ return err |
|
| 1105 |
+ } else if err != nil {
|
|
| 1106 |
+ time.Sleep(time.Duration(j) * 500 * time.Millisecond) |
|
| 1107 |
+ continue |
|
| 1108 |
+ } |
|
| 1109 |
+ img, err = image.NewImgJSON(imgJSON) |
|
| 1110 |
+ if err != nil && j == retries {
|
|
| 1111 |
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) |
|
| 1112 |
+ return fmt.Errorf("Failed to parse json: %s", err)
|
|
| 1113 |
+ } else if err != nil {
|
|
| 1114 |
+ time.Sleep(time.Duration(j) * 500 * time.Millisecond) |
|
| 1115 |
+ continue |
|
| 1116 |
+ } else {
|
|
| 1117 |
+ break |
|
| 1118 |
+ } |
|
| 1119 |
+ } |
|
| 1120 |
+ |
|
| 1121 |
+ // Get the layer |
|
| 1122 |
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling fs layer", nil)) |
|
| 1123 |
+ layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token) |
|
| 1124 |
+ if err != nil {
|
|
| 1125 |
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) |
|
| 1126 |
+ return err |
|
| 1127 |
+ } |
|
| 1128 |
+ defer layer.Close() |
|
| 1129 |
+ if err := srv.runtime.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
|
|
| 1130 |
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) |
|
| 1131 |
+ return err |
|
| 1132 |
+ } |
|
| 1133 |
+ } |
|
| 1134 |
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) |
|
| 1135 |
+ |
|
| 1136 |
+ } |
|
| 1137 |
+ return nil |
|
| 1138 |
+} |
|
| 1139 |
+ |
|
| 1140 |
+func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error {
|
|
| 1141 |
+ out.Write(sf.FormatStatus("", "Pulling repository %s", localName))
|
|
| 1142 |
+ |
|
| 1143 |
+ repoData, err := r.GetRepositoryData(remoteName) |
|
| 1144 |
+ if err != nil {
|
|
| 1145 |
+ return err |
|
| 1146 |
+ } |
|
| 1147 |
+ |
|
| 1148 |
+ utils.Debugf("Retrieving the tag list")
|
|
| 1149 |
+ tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens) |
|
| 1150 |
+ if err != nil {
|
|
| 1151 |
+ utils.Errorf("%v", err)
|
|
| 1152 |
+ return err |
|
| 1153 |
+ } |
|
| 1154 |
+ |
|
| 1155 |
+ for tag, id := range tagsList {
|
|
| 1156 |
+ repoData.ImgList[id] = ®istry.ImgData{
|
|
| 1157 |
+ ID: id, |
|
| 1158 |
+ Tag: tag, |
|
| 1159 |
+ Checksum: "", |
|
| 1160 |
+ } |
|
| 1161 |
+ } |
|
| 1162 |
+ |
|
| 1163 |
+ utils.Debugf("Registering tags")
|
|
| 1164 |
+ // If no tag has been specified, pull them all |
|
| 1165 |
+ if askedTag == "" {
|
|
| 1166 |
+ for tag, id := range tagsList {
|
|
| 1167 |
+ repoData.ImgList[id].Tag = tag |
|
| 1168 |
+ } |
|
| 1169 |
+ } else {
|
|
| 1170 |
+ // Otherwise, check that the tag exists and use only that one |
|
| 1171 |
+ id, exists := tagsList[askedTag] |
|
| 1172 |
+ if !exists {
|
|
| 1173 |
+ return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName)
|
|
| 1174 |
+ } |
|
| 1175 |
+ repoData.ImgList[id].Tag = askedTag |
|
| 1176 |
+ } |
|
| 1177 |
+ |
|
| 1178 |
+ errors := make(chan error) |
|
| 1179 |
+ for _, image := range repoData.ImgList {
|
|
| 1180 |
+ downloadImage := func(img *registry.ImgData) {
|
|
| 1181 |
+ if askedTag != "" && img.Tag != askedTag {
|
|
| 1182 |
+ utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID)
|
|
| 1183 |
+ if parallel {
|
|
| 1184 |
+ errors <- nil |
|
| 1185 |
+ } |
|
| 1186 |
+ return |
|
| 1187 |
+ } |
|
| 1188 |
+ |
|
| 1189 |
+ if img.Tag == "" {
|
|
| 1190 |
+ utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
|
|
| 1191 |
+ if parallel {
|
|
| 1192 |
+ errors <- nil |
|
| 1193 |
+ } |
|
| 1194 |
+ return |
|
| 1195 |
+ } |
|
| 1196 |
+ |
|
| 1197 |
+ // ensure no two downloads of the same image happen at the same time |
|
| 1198 |
+ if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil {
|
|
| 1199 |
+ if c != nil {
|
|
| 1200 |
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) |
|
| 1201 |
+ <-c |
|
| 1202 |
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) |
|
| 1203 |
+ } else {
|
|
| 1204 |
+ utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
|
|
| 1205 |
+ } |
|
| 1206 |
+ if parallel {
|
|
| 1207 |
+ errors <- nil |
|
| 1208 |
+ } |
|
| 1209 |
+ return |
|
| 1210 |
+ } |
|
| 1211 |
+ defer srv.poolRemove("pull", "img:"+img.ID)
|
|
| 1212 |
+ |
|
| 1213 |
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil))
|
|
| 1214 |
+ success := false |
|
| 1215 |
+ var lastErr error |
|
| 1216 |
+ for _, ep := range repoData.Endpoints {
|
|
| 1217 |
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil))
|
|
| 1218 |
+ if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
|
|
| 1219 |
+ // Its not ideal that only the last error is returned, it would be better to concatenate the errors. |
|
| 1220 |
+ // As the error is also given to the output stream the user will see the error. |
|
| 1221 |
+ lastErr = err |
|
| 1222 |
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil))
|
|
| 1223 |
+ continue |
|
| 1224 |
+ } |
|
| 1225 |
+ success = true |
|
| 1226 |
+ break |
|
| 1227 |
+ } |
|
| 1228 |
+ if !success {
|
|
| 1229 |
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, %s", img.Tag, localName, lastErr), nil))
|
|
| 1230 |
+ if parallel {
|
|
| 1231 |
+ errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
|
|
| 1232 |
+ return |
|
| 1233 |
+ } |
|
| 1234 |
+ } |
|
| 1235 |
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) |
|
| 1236 |
+ |
|
| 1237 |
+ if parallel {
|
|
| 1238 |
+ errors <- nil |
|
| 1239 |
+ } |
|
| 1240 |
+ } |
|
| 1241 |
+ |
|
| 1242 |
+ if parallel {
|
|
| 1243 |
+ go downloadImage(image) |
|
| 1244 |
+ } else {
|
|
| 1245 |
+ downloadImage(image) |
|
| 1246 |
+ } |
|
| 1247 |
+ } |
|
| 1248 |
+ if parallel {
|
|
| 1249 |
+ var lastError error |
|
| 1250 |
+ for i := 0; i < len(repoData.ImgList); i++ {
|
|
| 1251 |
+ if err := <-errors; err != nil {
|
|
| 1252 |
+ lastError = err |
|
| 1253 |
+ } |
|
| 1254 |
+ } |
|
| 1255 |
+ if lastError != nil {
|
|
| 1256 |
+ return lastError |
|
| 1257 |
+ } |
|
| 1258 |
+ |
|
| 1259 |
+ } |
|
| 1260 |
+ for tag, id := range tagsList {
|
|
| 1261 |
+ if askedTag != "" && tag != askedTag {
|
|
| 1262 |
+ continue |
|
| 1263 |
+ } |
|
| 1264 |
+ if err := srv.runtime.Repositories().Set(localName, tag, id, true); err != nil {
|
|
| 1265 |
+ return err |
|
| 1266 |
+ } |
|
| 1267 |
+ } |
|
| 1268 |
+ if err := srv.runtime.Repositories().Save(); err != nil {
|
|
| 1269 |
+ return err |
|
| 1270 |
+ } |
|
| 1271 |
+ |
|
| 1272 |
+ return nil |
|
| 1273 |
+} |
|
| 1274 |
+ |
|
| 1275 |
+func (srv *Server) poolAdd(kind, key string) (chan struct{}, error) {
|
|
| 1276 |
+ srv.Lock() |
|
| 1277 |
+ defer srv.Unlock() |
|
| 1278 |
+ |
|
| 1279 |
+ if c, exists := srv.pullingPool[key]; exists {
|
|
| 1280 |
+ return c, fmt.Errorf("pull %s is already in progress", key)
|
|
| 1281 |
+ } |
|
| 1282 |
+ if c, exists := srv.pushingPool[key]; exists {
|
|
| 1283 |
+ return c, fmt.Errorf("push %s is already in progress", key)
|
|
| 1284 |
+ } |
|
| 1285 |
+ |
|
| 1286 |
+ c := make(chan struct{})
|
|
| 1287 |
+ switch kind {
|
|
| 1288 |
+ case "pull": |
|
| 1289 |
+ srv.pullingPool[key] = c |
|
| 1290 |
+ case "push": |
|
| 1291 |
+ srv.pushingPool[key] = c |
|
| 1292 |
+ default: |
|
| 1293 |
+ return nil, fmt.Errorf("Unknown pool type")
|
|
| 1294 |
+ } |
|
| 1295 |
+ return c, nil |
|
| 1296 |
+} |
|
| 1297 |
+ |
|
| 1298 |
+func (srv *Server) poolRemove(kind, key string) error {
|
|
| 1299 |
+ srv.Lock() |
|
| 1300 |
+ defer srv.Unlock() |
|
| 1301 |
+ switch kind {
|
|
| 1302 |
+ case "pull": |
|
| 1303 |
+ if c, exists := srv.pullingPool[key]; exists {
|
|
| 1304 |
+ close(c) |
|
| 1305 |
+ delete(srv.pullingPool, key) |
|
| 1306 |
+ } |
|
| 1307 |
+ case "push": |
|
| 1308 |
+ if c, exists := srv.pushingPool[key]; exists {
|
|
| 1309 |
+ close(c) |
|
| 1310 |
+ delete(srv.pushingPool, key) |
|
| 1311 |
+ } |
|
| 1312 |
+ default: |
|
| 1313 |
+ return fmt.Errorf("Unknown pool type")
|
|
| 1314 |
+ } |
|
| 1315 |
+ return nil |
|
| 1316 |
+} |
|
| 1317 |
+ |
|
| 1318 |
+func (srv *Server) ImagePull(job *engine.Job) engine.Status {
|
|
| 1319 |
+ if n := len(job.Args); n != 1 && n != 2 {
|
|
| 1320 |
+ return job.Errorf("Usage: %s IMAGE [TAG]", job.Name)
|
|
| 1321 |
+ } |
|
| 1322 |
+ var ( |
|
| 1323 |
+ localName = job.Args[0] |
|
| 1324 |
+ tag string |
|
| 1325 |
+ sf = utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
| 1326 |
+ authConfig = ®istry.AuthConfig{}
|
|
| 1327 |
+ metaHeaders map[string][]string |
|
| 1328 |
+ ) |
|
| 1329 |
+ if len(job.Args) > 1 {
|
|
| 1330 |
+ tag = job.Args[1] |
|
| 1331 |
+ } |
|
| 1332 |
+ |
|
| 1333 |
+ job.GetenvJson("authConfig", authConfig)
|
|
| 1334 |
+ job.GetenvJson("metaHeaders", metaHeaders)
|
|
| 1335 |
+ |
|
| 1336 |
+ c, err := srv.poolAdd("pull", localName+":"+tag)
|
|
| 1337 |
+ if err != nil {
|
|
| 1338 |
+ if c != nil {
|
|
| 1339 |
+ // Another pull of the same repository is already taking place; just wait for it to finish |
|
| 1340 |
+ job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName))
|
|
| 1341 |
+ <-c |
|
| 1342 |
+ return engine.StatusOK |
|
| 1343 |
+ } |
|
| 1344 |
+ return job.Error(err) |
|
| 1345 |
+ } |
|
| 1346 |
+ defer srv.poolRemove("pull", localName+":"+tag)
|
|
| 1347 |
+ |
|
| 1348 |
+ // Resolve the Repository name from fqn to endpoint + name |
|
| 1349 |
+ hostname, remoteName, err := registry.ResolveRepositoryName(localName) |
|
| 1350 |
+ if err != nil {
|
|
| 1351 |
+ return job.Error(err) |
|
| 1352 |
+ } |
|
| 1353 |
+ |
|
| 1354 |
+ endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) |
|
| 1355 |
+ if err != nil {
|
|
| 1356 |
+ return job.Error(err) |
|
| 1357 |
+ } |
|
| 1358 |
+ |
|
| 1359 |
+ r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) |
|
| 1360 |
+ if err != nil {
|
|
| 1361 |
+ return job.Error(err) |
|
| 1362 |
+ } |
|
| 1363 |
+ |
|
| 1364 |
+ if endpoint == registry.IndexServerAddress() {
|
|
| 1365 |
+ // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" |
|
| 1366 |
+ localName = remoteName |
|
| 1367 |
+ } |
|
| 1368 |
+ |
|
| 1369 |
+ if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil {
|
|
| 1370 |
+ return job.Error(err) |
|
| 1371 |
+ } |
|
| 1372 |
+ |
|
| 1373 |
+ return engine.StatusOK |
|
| 1374 |
+} |
|
| 1375 |
+ |
|
| 1376 |
+// Retrieve the all the images to be uploaded in the correct order |
|
| 1377 |
+func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[string][]string, error) {
|
|
| 1378 |
+ var ( |
|
| 1379 |
+ imageList []string |
|
| 1380 |
+ imagesSeen map[string]bool = make(map[string]bool) |
|
| 1381 |
+ tagsByImage map[string][]string = make(map[string][]string) |
|
| 1382 |
+ ) |
|
| 1383 |
+ |
|
| 1384 |
+ for tag, id := range localRepo {
|
|
| 1385 |
+ var imageListForThisTag []string |
|
| 1386 |
+ |
|
| 1387 |
+ tagsByImage[id] = append(tagsByImage[id], tag) |
|
| 1388 |
+ |
|
| 1389 |
+ for img, err := srv.runtime.Graph().Get(id); img != nil; img, err = img.GetParent() {
|
|
| 1390 |
+ if err != nil {
|
|
| 1391 |
+ return nil, nil, err |
|
| 1392 |
+ } |
|
| 1393 |
+ |
|
| 1394 |
+ if imagesSeen[img.ID] {
|
|
| 1395 |
+ // This image is already on the list, we can ignore it and all its parents |
|
| 1396 |
+ break |
|
| 1397 |
+ } |
|
| 1398 |
+ |
|
| 1399 |
+ imagesSeen[img.ID] = true |
|
| 1400 |
+ imageListForThisTag = append(imageListForThisTag, img.ID) |
|
| 1401 |
+ } |
|
| 1402 |
+ |
|
| 1403 |
+ // reverse the image list for this tag (so the "most"-parent image is first) |
|
| 1404 |
+ for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 {
|
|
| 1405 |
+ imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] |
|
| 1406 |
+ } |
|
| 1407 |
+ |
|
| 1408 |
+ // append to main image list |
|
| 1409 |
+ imageList = append(imageList, imageListForThisTag...) |
|
| 1410 |
+ } |
|
| 1411 |
+ |
|
| 1412 |
+ utils.Debugf("Image list: %v", imageList)
|
|
| 1413 |
+ utils.Debugf("Tags by image: %v", tagsByImage)
|
|
| 1414 |
+ |
|
| 1415 |
+ return imageList, tagsByImage, nil |
|
| 1416 |
+} |
|
| 1417 |
+ |
|
| 1418 |
+func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, sf *utils.StreamFormatter) error {
|
|
| 1419 |
+ out = utils.NewWriteFlusher(out) |
|
| 1420 |
+ utils.Debugf("Local repo: %s", localRepo)
|
|
| 1421 |
+ imgList, tagsByImage, err := srv.getImageList(localRepo) |
|
| 1422 |
+ if err != nil {
|
|
| 1423 |
+ return err |
|
| 1424 |
+ } |
|
| 1425 |
+ |
|
| 1426 |
+ out.Write(sf.FormatStatus("", "Sending image list"))
|
|
| 1427 |
+ |
|
| 1428 |
+ var repoData *registry.RepositoryData |
|
| 1429 |
+ var imageIndex []*registry.ImgData |
|
| 1430 |
+ |
|
| 1431 |
+ for _, imgId := range imgList {
|
|
| 1432 |
+ if tags, exists := tagsByImage[imgId]; exists {
|
|
| 1433 |
+ // If an image has tags you must add an entry in the image index |
|
| 1434 |
+ // for each tag |
|
| 1435 |
+ for _, tag := range tags {
|
|
| 1436 |
+ imageIndex = append(imageIndex, ®istry.ImgData{
|
|
| 1437 |
+ ID: imgId, |
|
| 1438 |
+ Tag: tag, |
|
| 1439 |
+ }) |
|
| 1440 |
+ } |
|
| 1441 |
+ } else {
|
|
| 1442 |
+ // If the image does not have a tag it still needs to be sent to the |
|
| 1443 |
+ // registry with an empty tag so that it is accociated with the repository |
|
| 1444 |
+ imageIndex = append(imageIndex, ®istry.ImgData{
|
|
| 1445 |
+ ID: imgId, |
|
| 1446 |
+ Tag: "", |
|
| 1447 |
+ }) |
|
| 1448 |
+ |
|
| 1449 |
+ } |
|
| 1450 |
+ } |
|
| 1451 |
+ |
|
| 1452 |
+ utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo)
|
|
| 1453 |
+ for _, data := range imageIndex {
|
|
| 1454 |
+ utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag)
|
|
| 1455 |
+ } |
|
| 1456 |
+ |
|
| 1457 |
+ // Register all the images in a repository with the registry |
|
| 1458 |
+ // If an image is not in this list it will not be associated with the repository |
|
| 1459 |
+ repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) |
|
| 1460 |
+ if err != nil {
|
|
| 1461 |
+ return err |
|
| 1462 |
+ } |
|
| 1463 |
+ |
|
| 1464 |
+ for _, ep := range repoData.Endpoints {
|
|
| 1465 |
+ out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo)))
|
|
| 1466 |
+ |
|
| 1467 |
+ for _, imgId := range imgList {
|
|
| 1468 |
+ if r.LookupRemoteImage(imgId, ep, repoData.Tokens) {
|
|
| 1469 |
+ out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId)))
|
|
| 1470 |
+ } else {
|
|
| 1471 |
+ if _, err := srv.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil {
|
|
| 1472 |
+ // FIXME: Continue on error? |
|
| 1473 |
+ return err |
|
| 1474 |
+ } |
|
| 1475 |
+ } |
|
| 1476 |
+ |
|
| 1477 |
+ for _, tag := range tagsByImage[imgId] {
|
|
| 1478 |
+ out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag))
|
|
| 1479 |
+ |
|
| 1480 |
+ if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil {
|
|
| 1481 |
+ return err |
|
| 1482 |
+ } |
|
| 1483 |
+ } |
|
| 1484 |
+ } |
|
| 1485 |
+ } |
|
| 1486 |
+ |
|
| 1487 |
+ if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil {
|
|
| 1488 |
+ return err |
|
| 1489 |
+ } |
|
| 1490 |
+ |
|
| 1491 |
+ return nil |
|
| 1492 |
+} |
|
| 1493 |
+ |
|
| 1494 |
+func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
|
|
| 1495 |
+ out = utils.NewWriteFlusher(out) |
|
| 1496 |
+ jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.Graph().Root, imgID, "json")) |
|
| 1497 |
+ if err != nil {
|
|
| 1498 |
+ return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
|
|
| 1499 |
+ } |
|
| 1500 |
+ out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) |
|
| 1501 |
+ |
|
| 1502 |
+ imgData := ®istry.ImgData{
|
|
| 1503 |
+ ID: imgID, |
|
| 1504 |
+ } |
|
| 1505 |
+ |
|
| 1506 |
+ // Send the json |
|
| 1507 |
+ if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil {
|
|
| 1508 |
+ if err == registry.ErrAlreadyExists {
|
|
| 1509 |
+ out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) |
|
| 1510 |
+ return "", nil |
|
| 1511 |
+ } |
|
| 1512 |
+ return "", err |
|
| 1513 |
+ } |
|
| 1514 |
+ |
|
| 1515 |
+ layerData, err := srv.runtime.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out) |
|
| 1516 |
+ if err != nil {
|
|
| 1517 |
+ return "", fmt.Errorf("Failed to generate layer archive: %s", err)
|
|
| 1518 |
+ } |
|
| 1519 |
+ defer os.RemoveAll(layerData.Name()) |
|
| 1520 |
+ |
|
| 1521 |
+ // Send the layer |
|
| 1522 |
+ checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) |
|
| 1523 |
+ if err != nil {
|
|
| 1524 |
+ return "", err |
|
| 1525 |
+ } |
|
| 1526 |
+ imgData.Checksum = checksum |
|
| 1527 |
+ imgData.ChecksumPayload = checksumPayload |
|
| 1528 |
+ // Send the checksum |
|
| 1529 |
+ if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil {
|
|
| 1530 |
+ return "", err |
|
| 1531 |
+ } |
|
| 1532 |
+ |
|
| 1533 |
+ out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) |
|
| 1534 |
+ return imgData.Checksum, nil |
|
| 1535 |
+} |
|
| 1536 |
+ |
|
| 1537 |
+// FIXME: Allow to interrupt current push when new push of same image is done. |
|
| 1538 |
+func (srv *Server) ImagePush(job *engine.Job) engine.Status {
|
|
| 1539 |
+ if n := len(job.Args); n != 1 {
|
|
| 1540 |
+ return job.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 1541 |
+ } |
|
| 1542 |
+ var ( |
|
| 1543 |
+ localName = job.Args[0] |
|
| 1544 |
+ sf = utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
| 1545 |
+ authConfig = ®istry.AuthConfig{}
|
|
| 1546 |
+ metaHeaders map[string][]string |
|
| 1547 |
+ ) |
|
| 1548 |
+ |
|
| 1549 |
+ job.GetenvJson("authConfig", authConfig)
|
|
| 1550 |
+ job.GetenvJson("metaHeaders", metaHeaders)
|
|
| 1551 |
+ if _, err := srv.poolAdd("push", localName); err != nil {
|
|
| 1552 |
+ return job.Error(err) |
|
| 1553 |
+ } |
|
| 1554 |
+ defer srv.poolRemove("push", localName)
|
|
| 1555 |
+ |
|
| 1556 |
+ // Resolve the Repository name from fqn to endpoint + name |
|
| 1557 |
+ hostname, remoteName, err := registry.ResolveRepositoryName(localName) |
|
| 1558 |
+ if err != nil {
|
|
| 1559 |
+ return job.Error(err) |
|
| 1560 |
+ } |
|
| 1561 |
+ |
|
| 1562 |
+ endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) |
|
| 1563 |
+ if err != nil {
|
|
| 1564 |
+ return job.Error(err) |
|
| 1565 |
+ } |
|
| 1566 |
+ |
|
| 1567 |
+ img, err := srv.runtime.Graph().Get(localName) |
|
| 1568 |
+ r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) |
|
| 1569 |
+ if err2 != nil {
|
|
| 1570 |
+ return job.Error(err2) |
|
| 1571 |
+ } |
|
| 1572 |
+ |
|
| 1573 |
+ if err != nil {
|
|
| 1574 |
+ reposLen := len(srv.runtime.Repositories().Repositories[localName]) |
|
| 1575 |
+ job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
|
|
| 1576 |
+ // If it fails, try to get the repository |
|
| 1577 |
+ if localRepo, exists := srv.runtime.Repositories().Repositories[localName]; exists {
|
|
| 1578 |
+ if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil {
|
|
| 1579 |
+ return job.Error(err) |
|
| 1580 |
+ } |
|
| 1581 |
+ return engine.StatusOK |
|
| 1582 |
+ } |
|
| 1583 |
+ return job.Error(err) |
|
| 1584 |
+ } |
|
| 1585 |
+ |
|
| 1586 |
+ var token []string |
|
| 1587 |
+ job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName))
|
|
| 1588 |
+ if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil {
|
|
| 1589 |
+ return job.Error(err) |
|
| 1590 |
+ } |
|
| 1591 |
+ return engine.StatusOK |
|
| 1592 |
+} |
|
| 1593 |
+ |
|
| 1594 |
+func (srv *Server) ImageImport(job *engine.Job) engine.Status {
|
|
| 1595 |
+ if n := len(job.Args); n != 2 && n != 3 {
|
|
| 1596 |
+ return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name)
|
|
| 1597 |
+ } |
|
| 1598 |
+ var ( |
|
| 1599 |
+ src = job.Args[0] |
|
| 1600 |
+ repo = job.Args[1] |
|
| 1601 |
+ tag string |
|
| 1602 |
+ sf = utils.NewStreamFormatter(job.GetenvBool("json"))
|
|
| 1603 |
+ archive archive.ArchiveReader |
|
| 1604 |
+ resp *http.Response |
|
| 1605 |
+ ) |
|
| 1606 |
+ if len(job.Args) > 2 {
|
|
| 1607 |
+ tag = job.Args[2] |
|
| 1608 |
+ } |
|
| 1609 |
+ |
|
| 1610 |
+ if src == "-" {
|
|
| 1611 |
+ archive = job.Stdin |
|
| 1612 |
+ } else {
|
|
| 1613 |
+ u, err := url.Parse(src) |
|
| 1614 |
+ if err != nil {
|
|
| 1615 |
+ return job.Error(err) |
|
| 1616 |
+ } |
|
| 1617 |
+ if u.Scheme == "" {
|
|
| 1618 |
+ u.Scheme = "http" |
|
| 1619 |
+ u.Host = src |
|
| 1620 |
+ u.Path = "" |
|
| 1621 |
+ } |
|
| 1622 |
+ job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u))
|
|
| 1623 |
+ // Download with curl (pretty progress bar) |
|
| 1624 |
+ // If curl is not available, fallback to http.Get() |
|
| 1625 |
+ resp, err = utils.Download(u.String()) |
|
| 1626 |
+ if err != nil {
|
|
| 1627 |
+ return job.Error(err) |
|
| 1628 |
+ } |
|
| 1629 |
+ progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") |
|
| 1630 |
+ defer progressReader.Close() |
|
| 1631 |
+ archive = progressReader |
|
| 1632 |
+ } |
|
| 1633 |
+ img, err := srv.runtime.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil) |
|
| 1634 |
+ if err != nil {
|
|
| 1635 |
+ return job.Error(err) |
|
| 1636 |
+ } |
|
| 1637 |
+ // Optionally register the image at REPO/TAG |
|
| 1638 |
+ if repo != "" {
|
|
| 1639 |
+ if err := srv.runtime.Repositories().Set(repo, tag, img.ID, true); err != nil {
|
|
| 1640 |
+ return job.Error(err) |
|
| 1641 |
+ } |
|
| 1642 |
+ } |
|
| 1643 |
+ job.Stdout.Write(sf.FormatStatus("", img.ID))
|
|
| 1644 |
+ return engine.StatusOK |
|
| 1645 |
+} |
|
| 1646 |
+ |
|
| 1647 |
+func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
|
|
| 1648 |
+ var name string |
|
| 1649 |
+ if len(job.Args) == 1 {
|
|
| 1650 |
+ name = job.Args[0] |
|
| 1651 |
+ } else if len(job.Args) > 1 {
|
|
| 1652 |
+ return job.Errorf("Usage: %s", job.Name)
|
|
| 1653 |
+ } |
|
| 1654 |
+ config := runconfig.ContainerConfigFromJob(job) |
|
| 1655 |
+ if config.Memory != 0 && config.Memory < 524288 {
|
|
| 1656 |
+ return job.Errorf("Minimum memory limit allowed is 512k")
|
|
| 1657 |
+ } |
|
| 1658 |
+ if config.Memory > 0 && !srv.runtime.SystemConfig().MemoryLimit {
|
|
| 1659 |
+ job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
|
|
| 1660 |
+ config.Memory = 0 |
|
| 1661 |
+ } |
|
| 1662 |
+ if config.Memory > 0 && !srv.runtime.SystemConfig().SwapLimit {
|
|
| 1663 |
+ job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
|
|
| 1664 |
+ config.MemorySwap = -1 |
|
| 1665 |
+ } |
|
| 1666 |
+ resolvConf, err := utils.GetResolvConf() |
|
| 1667 |
+ if err != nil {
|
|
| 1668 |
+ return job.Error(err) |
|
| 1669 |
+ } |
|
| 1670 |
+ if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.Config().Dns) == 0 && utils.CheckLocalDns(resolvConf) {
|
|
| 1671 |
+ job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", runtime.DefaultDns)
|
|
| 1672 |
+ config.Dns = runtime.DefaultDns |
|
| 1673 |
+ } |
|
| 1674 |
+ |
|
| 1675 |
+ container, buildWarnings, err := srv.runtime.Create(config, name) |
|
| 1676 |
+ if err != nil {
|
|
| 1677 |
+ if srv.runtime.Graph().IsNotExist(err) {
|
|
| 1678 |
+ _, tag := utils.ParseRepositoryTag(config.Image) |
|
| 1679 |
+ if tag == "" {
|
|
| 1680 |
+ tag = graph.DEFAULTTAG |
|
| 1681 |
+ } |
|
| 1682 |
+ return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
|
|
| 1683 |
+ } |
|
| 1684 |
+ return job.Error(err) |
|
| 1685 |
+ } |
|
| 1686 |
+ if !container.Config.NetworkDisabled && srv.runtime.SystemConfig().IPv4ForwardingDisabled {
|
|
| 1687 |
+ job.Errorf("IPv4 forwarding is disabled.\n")
|
|
| 1688 |
+ } |
|
| 1689 |
+ srv.LogEvent("create", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 1690 |
+ // FIXME: this is necessary because runtime.Create might return a nil container |
|
| 1691 |
+ // with a non-nil error. This should not happen! Once it's fixed we |
|
| 1692 |
+ // can remove this workaround. |
|
| 1693 |
+ if container != nil {
|
|
| 1694 |
+ job.Printf("%s\n", container.ID)
|
|
| 1695 |
+ } |
|
| 1696 |
+ for _, warning := range buildWarnings {
|
|
| 1697 |
+ job.Errorf("%s\n", warning)
|
|
| 1698 |
+ } |
|
| 1699 |
+ return engine.StatusOK |
|
| 1700 |
+} |
|
| 1701 |
+ |
|
| 1702 |
+func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
|
|
| 1703 |
+ if len(job.Args) != 1 {
|
|
| 1704 |
+ return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 1705 |
+ } |
|
| 1706 |
+ var ( |
|
| 1707 |
+ name = job.Args[0] |
|
| 1708 |
+ t = 10 |
|
| 1709 |
+ ) |
|
| 1710 |
+ if job.EnvExists("t") {
|
|
| 1711 |
+ t = job.GetenvInt("t")
|
|
| 1712 |
+ } |
|
| 1713 |
+ if container := srv.runtime.Get(name); container != nil {
|
|
| 1714 |
+ if err := container.Restart(int(t)); err != nil {
|
|
| 1715 |
+ return job.Errorf("Cannot restart container %s: %s\n", name, err)
|
|
| 1716 |
+ } |
|
| 1717 |
+ srv.LogEvent("restart", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 1718 |
+ } else {
|
|
| 1719 |
+ return job.Errorf("No such container: %s\n", name)
|
|
| 1720 |
+ } |
|
| 1721 |
+ return engine.StatusOK |
|
| 1722 |
+} |
|
| 1723 |
+ |
|
| 1724 |
+func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
|
|
| 1725 |
+ if len(job.Args) != 1 {
|
|
| 1726 |
+ return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name)
|
|
| 1727 |
+ } |
|
| 1728 |
+ name := job.Args[0] |
|
| 1729 |
+ removeVolume := job.GetenvBool("removeVolume")
|
|
| 1730 |
+ removeLink := job.GetenvBool("removeLink")
|
|
| 1731 |
+ forceRemove := job.GetenvBool("forceRemove")
|
|
| 1732 |
+ |
|
| 1733 |
+ container := srv.runtime.Get(name) |
|
| 1734 |
+ |
|
| 1735 |
+ if removeLink {
|
|
| 1736 |
+ if container == nil {
|
|
| 1737 |
+ return job.Errorf("No such link: %s", name)
|
|
| 1738 |
+ } |
|
| 1739 |
+ name, err := runtime.GetFullContainerName(name) |
|
| 1740 |
+ if err != nil {
|
|
| 1741 |
+ job.Error(err) |
|
| 1742 |
+ } |
|
| 1743 |
+ parent, n := path.Split(name) |
|
| 1744 |
+ if parent == "/" {
|
|
| 1745 |
+ return job.Errorf("Conflict, cannot remove the default name of the container")
|
|
| 1746 |
+ } |
|
| 1747 |
+ pe := srv.runtime.ContainerGraph().Get(parent) |
|
| 1748 |
+ if pe == nil {
|
|
| 1749 |
+ return job.Errorf("Cannot get parent %s for name %s", parent, name)
|
|
| 1750 |
+ } |
|
| 1751 |
+ parentContainer := srv.runtime.Get(pe.ID()) |
|
| 1752 |
+ |
|
| 1753 |
+ if parentContainer != nil {
|
|
| 1754 |
+ parentContainer.DisableLink(n) |
|
| 1755 |
+ } |
|
| 1756 |
+ |
|
| 1757 |
+ if err := srv.runtime.ContainerGraph().Delete(name); err != nil {
|
|
| 1758 |
+ return job.Error(err) |
|
| 1759 |
+ } |
|
| 1760 |
+ return engine.StatusOK |
|
| 1761 |
+ } |
|
| 1762 |
+ |
|
| 1763 |
+ if container != nil {
|
|
| 1764 |
+ if container.State.IsRunning() {
|
|
| 1765 |
+ if forceRemove {
|
|
| 1766 |
+ if err := container.Stop(5); err != nil {
|
|
| 1767 |
+ return job.Errorf("Could not stop running container, cannot remove - %v", err)
|
|
| 1768 |
+ } |
|
| 1769 |
+ } else {
|
|
| 1770 |
+ return job.Errorf("Impossible to remove a running container, please stop it first or use -f")
|
|
| 1771 |
+ } |
|
| 1772 |
+ } |
|
| 1773 |
+ if err := srv.runtime.Destroy(container); err != nil {
|
|
| 1774 |
+ return job.Errorf("Cannot destroy container %s: %s", name, err)
|
|
| 1775 |
+ } |
|
| 1776 |
+ srv.LogEvent("destroy", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 1777 |
+ |
|
| 1778 |
+ if removeVolume {
|
|
| 1779 |
+ var ( |
|
| 1780 |
+ volumes = make(map[string]struct{})
|
|
| 1781 |
+ binds = make(map[string]struct{})
|
|
| 1782 |
+ usedVolumes = make(map[string]*runtime.Container) |
|
| 1783 |
+ ) |
|
| 1784 |
+ |
|
| 1785 |
+ // the volume id is always the base of the path |
|
| 1786 |
+ getVolumeId := func(p string) string {
|
|
| 1787 |
+ return filepath.Base(strings.TrimSuffix(p, "/layer")) |
|
| 1788 |
+ } |
|
| 1789 |
+ |
|
| 1790 |
+ // populate bind map so that they can be skipped and not removed |
|
| 1791 |
+ for _, bind := range container.HostConfig().Binds {
|
|
| 1792 |
+ source := strings.Split(bind, ":")[0] |
|
| 1793 |
+ // TODO: refactor all volume stuff, all of it |
|
| 1794 |
+ // this is very important that we eval the link |
|
| 1795 |
+ // or comparing the keys to container.Volumes will not work |
|
| 1796 |
+ p, err := filepath.EvalSymlinks(source) |
|
| 1797 |
+ if err != nil {
|
|
| 1798 |
+ return job.Error(err) |
|
| 1799 |
+ } |
|
| 1800 |
+ source = p |
|
| 1801 |
+ binds[source] = struct{}{}
|
|
| 1802 |
+ } |
|
| 1803 |
+ |
|
| 1804 |
+ // Store all the deleted containers volumes |
|
| 1805 |
+ for _, volumeId := range container.Volumes {
|
|
| 1806 |
+ // Skip the volumes mounted from external |
|
| 1807 |
+ // bind mounts here will will be evaluated for a symlink |
|
| 1808 |
+ if _, exists := binds[volumeId]; exists {
|
|
| 1809 |
+ continue |
|
| 1810 |
+ } |
|
| 1811 |
+ |
|
| 1812 |
+ volumeId = getVolumeId(volumeId) |
|
| 1813 |
+ volumes[volumeId] = struct{}{}
|
|
| 1814 |
+ } |
|
| 1815 |
+ |
|
| 1816 |
+ // Retrieve all volumes from all remaining containers |
|
| 1817 |
+ for _, container := range srv.runtime.List() {
|
|
| 1818 |
+ for _, containerVolumeId := range container.Volumes {
|
|
| 1819 |
+ containerVolumeId = getVolumeId(containerVolumeId) |
|
| 1820 |
+ usedVolumes[containerVolumeId] = container |
|
| 1821 |
+ } |
|
| 1822 |
+ } |
|
| 1823 |
+ |
|
| 1824 |
+ for volumeId := range volumes {
|
|
| 1825 |
+ // If the requested volu |
|
| 1826 |
+ if c, exists := usedVolumes[volumeId]; exists {
|
|
| 1827 |
+ log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
|
|
| 1828 |
+ continue |
|
| 1829 |
+ } |
|
| 1830 |
+ if err := srv.runtime.Volumes().Delete(volumeId); err != nil {
|
|
| 1831 |
+ return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
|
|
| 1832 |
+ } |
|
| 1833 |
+ } |
|
| 1834 |
+ } |
|
| 1835 |
+ } else {
|
|
| 1836 |
+ return job.Errorf("No such container: %s", name)
|
|
| 1837 |
+ } |
|
| 1838 |
+ return engine.StatusOK |
|
| 1839 |
+} |
|
| 1840 |
+ |
|
| 1841 |
+func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force bool) error {
|
|
| 1842 |
+ var ( |
|
| 1843 |
+ repoName, tag string |
|
| 1844 |
+ tags = []string{}
|
|
| 1845 |
+ ) |
|
| 1846 |
+ |
|
| 1847 |
+ repoName, tag = utils.ParseRepositoryTag(name) |
|
| 1848 |
+ if tag == "" {
|
|
| 1849 |
+ tag = graph.DEFAULTTAG |
|
| 1850 |
+ } |
|
| 1851 |
+ |
|
| 1852 |
+ img, err := srv.runtime.Repositories().LookupImage(name) |
|
| 1853 |
+ if err != nil {
|
|
| 1854 |
+ if r, _ := srv.runtime.Repositories().Get(repoName); r != nil {
|
|
| 1855 |
+ return fmt.Errorf("No such image: %s:%s", repoName, tag)
|
|
| 1856 |
+ } |
|
| 1857 |
+ return fmt.Errorf("No such image: %s", name)
|
|
| 1858 |
+ } |
|
| 1859 |
+ |
|
| 1860 |
+ if strings.Contains(img.ID, name) {
|
|
| 1861 |
+ repoName = "" |
|
| 1862 |
+ tag = "" |
|
| 1863 |
+ } |
|
| 1864 |
+ |
|
| 1865 |
+ byParents, err := srv.runtime.Graph().ByParent() |
|
| 1866 |
+ if err != nil {
|
|
| 1867 |
+ return err |
|
| 1868 |
+ } |
|
| 1869 |
+ |
|
| 1870 |
+ //If delete by id, see if the id belong only to one repository |
|
| 1871 |
+ if repoName == "" {
|
|
| 1872 |
+ for _, repoAndTag := range srv.runtime.Repositories().ByID()[img.ID] {
|
|
| 1873 |
+ parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag) |
|
| 1874 |
+ if repoName == "" || repoName == parsedRepo {
|
|
| 1875 |
+ repoName = parsedRepo |
|
| 1876 |
+ if parsedTag != "" {
|
|
| 1877 |
+ tags = append(tags, parsedTag) |
|
| 1878 |
+ } |
|
| 1879 |
+ } else if repoName != parsedRepo && !force {
|
|
| 1880 |
+ // the id belongs to multiple repos, like base:latest and user:test, |
|
| 1881 |
+ // in that case return conflict |
|
| 1882 |
+ return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name)
|
|
| 1883 |
+ } |
|
| 1884 |
+ } |
|
| 1885 |
+ } else {
|
|
| 1886 |
+ tags = append(tags, tag) |
|
| 1887 |
+ } |
|
| 1888 |
+ |
|
| 1889 |
+ if !first && len(tags) > 0 {
|
|
| 1890 |
+ return nil |
|
| 1891 |
+ } |
|
| 1892 |
+ |
|
| 1893 |
+ //Untag the current image |
|
| 1894 |
+ for _, tag := range tags {
|
|
| 1895 |
+ tagDeleted, err := srv.runtime.Repositories().Delete(repoName, tag) |
|
| 1896 |
+ if err != nil {
|
|
| 1897 |
+ return err |
|
| 1898 |
+ } |
|
| 1899 |
+ if tagDeleted {
|
|
| 1900 |
+ out := &engine.Env{}
|
|
| 1901 |
+ out.Set("Untagged", repoName+":"+tag)
|
|
| 1902 |
+ imgs.Add(out) |
|
| 1903 |
+ srv.LogEvent("untag", img.ID, "")
|
|
| 1904 |
+ } |
|
| 1905 |
+ } |
|
| 1906 |
+ tags = srv.runtime.Repositories().ByID()[img.ID] |
|
| 1907 |
+ if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
|
|
| 1908 |
+ if len(byParents[img.ID]) == 0 {
|
|
| 1909 |
+ if err := srv.canDeleteImage(img.ID); err != nil {
|
|
| 1910 |
+ return err |
|
| 1911 |
+ } |
|
| 1912 |
+ if err := srv.runtime.Repositories().DeleteAll(img.ID); err != nil {
|
|
| 1913 |
+ return err |
|
| 1914 |
+ } |
|
| 1915 |
+ if err := srv.runtime.Graph().Delete(img.ID); err != nil {
|
|
| 1916 |
+ return err |
|
| 1917 |
+ } |
|
| 1918 |
+ out := &engine.Env{}
|
|
| 1919 |
+ out.Set("Deleted", img.ID)
|
|
| 1920 |
+ imgs.Add(out) |
|
| 1921 |
+ srv.LogEvent("delete", img.ID, "")
|
|
| 1922 |
+ if img.Parent != "" {
|
|
| 1923 |
+ err := srv.DeleteImage(img.Parent, imgs, false, force) |
|
| 1924 |
+ if first {
|
|
| 1925 |
+ return err |
|
| 1926 |
+ } |
|
| 1927 |
+ |
|
| 1928 |
+ } |
|
| 1929 |
+ |
|
| 1930 |
+ } |
|
| 1931 |
+ } |
|
| 1932 |
+ return nil |
|
| 1933 |
+} |
|
| 1934 |
+ |
|
| 1935 |
+func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
|
|
| 1936 |
+ if n := len(job.Args); n != 1 {
|
|
| 1937 |
+ return job.Errorf("Usage: %s IMAGE", job.Name)
|
|
| 1938 |
+ } |
|
| 1939 |
+ imgs := engine.NewTable("", 0)
|
|
| 1940 |
+ if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force")); err != nil {
|
|
| 1941 |
+ return job.Error(err) |
|
| 1942 |
+ } |
|
| 1943 |
+ if len(imgs.Data) == 0 {
|
|
| 1944 |
+ return job.Errorf("Conflict, %s wasn't deleted", job.Args[0])
|
|
| 1945 |
+ } |
|
| 1946 |
+ if _, err := imgs.WriteListTo(job.Stdout); err != nil {
|
|
| 1947 |
+ return job.Error(err) |
|
| 1948 |
+ } |
|
| 1949 |
+ return engine.StatusOK |
|
| 1950 |
+} |
|
| 1951 |
+ |
|
| 1952 |
+func (srv *Server) canDeleteImage(imgID string) error {
|
|
| 1953 |
+ for _, container := range srv.runtime.List() {
|
|
| 1954 |
+ parent, err := srv.runtime.Repositories().LookupImage(container.Image) |
|
| 1955 |
+ if err != nil {
|
|
| 1956 |
+ return err |
|
| 1957 |
+ } |
|
| 1958 |
+ |
|
| 1959 |
+ if err := parent.WalkHistory(func(p *image.Image) error {
|
|
| 1960 |
+ if imgID == p.ID {
|
|
| 1961 |
+ return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it", utils.TruncateID(imgID), utils.TruncateID(container.ID))
|
|
| 1962 |
+ } |
|
| 1963 |
+ return nil |
|
| 1964 |
+ }); err != nil {
|
|
| 1965 |
+ return err |
|
| 1966 |
+ } |
|
| 1967 |
+ } |
|
| 1968 |
+ return nil |
|
| 1969 |
+} |
|
| 1970 |
+ |
|
| 1971 |
+func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
|
|
| 1972 |
+ // Retrieve all images |
|
| 1973 |
+ images, err := srv.runtime.Graph().Map() |
|
| 1974 |
+ if err != nil {
|
|
| 1975 |
+ return nil, err |
|
| 1976 |
+ } |
|
| 1977 |
+ |
|
| 1978 |
+ // Store the tree in a map of map (map[parentId][childId]) |
|
| 1979 |
+ imageMap := make(map[string]map[string]struct{})
|
|
| 1980 |
+ for _, img := range images {
|
|
| 1981 |
+ if _, exists := imageMap[img.Parent]; !exists {
|
|
| 1982 |
+ imageMap[img.Parent] = make(map[string]struct{})
|
|
| 1983 |
+ } |
|
| 1984 |
+ imageMap[img.Parent][img.ID] = struct{}{}
|
|
| 1985 |
+ } |
|
| 1986 |
+ |
|
| 1987 |
+ // Loop on the children of the given image and check the config |
|
| 1988 |
+ var match *image.Image |
|
| 1989 |
+ for elem := range imageMap[imgID] {
|
|
| 1990 |
+ img, err := srv.runtime.Graph().Get(elem) |
|
| 1991 |
+ if err != nil {
|
|
| 1992 |
+ return nil, err |
|
| 1993 |
+ } |
|
| 1994 |
+ if runconfig.Compare(&img.ContainerConfig, config) {
|
|
| 1995 |
+ if match == nil || match.Created.Before(img.Created) {
|
|
| 1996 |
+ match = img |
|
| 1997 |
+ } |
|
| 1998 |
+ } |
|
| 1999 |
+ } |
|
| 2000 |
+ return match, nil |
|
| 2001 |
+} |
|
| 2002 |
+ |
|
| 2003 |
+func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runconfig.HostConfig) error {
|
|
| 2004 |
+ runtime := srv.runtime |
|
| 2005 |
+ |
|
| 2006 |
+ if hostConfig != nil && hostConfig.Links != nil {
|
|
| 2007 |
+ for _, l := range hostConfig.Links {
|
|
| 2008 |
+ parts, err := utils.PartParser("name:alias", l)
|
|
| 2009 |
+ if err != nil {
|
|
| 2010 |
+ return err |
|
| 2011 |
+ } |
|
| 2012 |
+ child, err := srv.runtime.GetByName(parts["name"]) |
|
| 2013 |
+ if err != nil {
|
|
| 2014 |
+ return err |
|
| 2015 |
+ } |
|
| 2016 |
+ if child == nil {
|
|
| 2017 |
+ return fmt.Errorf("Could not get container for %s", parts["name"])
|
|
| 2018 |
+ } |
|
| 2019 |
+ if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil {
|
|
| 2020 |
+ return err |
|
| 2021 |
+ } |
|
| 2022 |
+ } |
|
| 2023 |
+ |
|
| 2024 |
+ // After we load all the links into the runtime |
|
| 2025 |
+ // set them to nil on the hostconfig |
|
| 2026 |
+ hostConfig.Links = nil |
|
| 2027 |
+ if err := container.WriteHostConfig(); err != nil {
|
|
| 2028 |
+ return err |
|
| 2029 |
+ } |
|
| 2030 |
+ } |
|
| 2031 |
+ return nil |
|
| 2032 |
+} |
|
| 2033 |
+ |
|
| 2034 |
+func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
|
|
| 2035 |
+ if len(job.Args) < 1 {
|
|
| 2036 |
+ return job.Errorf("Usage: %s container_id", job.Name)
|
|
| 2037 |
+ } |
|
| 2038 |
+ name := job.Args[0] |
|
| 2039 |
+ runtime := srv.runtime |
|
| 2040 |
+ container := runtime.Get(name) |
|
| 2041 |
+ |
|
| 2042 |
+ if container == nil {
|
|
| 2043 |
+ return job.Errorf("No such container: %s", name)
|
|
| 2044 |
+ } |
|
| 2045 |
+ // If no environment was set, then no hostconfig was passed. |
|
| 2046 |
+ if len(job.Environ()) > 0 {
|
|
| 2047 |
+ hostConfig := runconfig.ContainerHostConfigFromJob(job) |
|
| 2048 |
+ // Validate the HostConfig binds. Make sure that: |
|
| 2049 |
+ // 1) the source of a bind mount isn't / |
|
| 2050 |
+ // The bind mount "/:/foo" isn't allowed. |
|
| 2051 |
+ // 2) Check that the source exists |
|
| 2052 |
+ // The source to be bind mounted must exist. |
|
| 2053 |
+ for _, bind := range hostConfig.Binds {
|
|
| 2054 |
+ splitBind := strings.Split(bind, ":") |
|
| 2055 |
+ source := splitBind[0] |
|
| 2056 |
+ |
|
| 2057 |
+ // refuse to bind mount "/" to the container |
|
| 2058 |
+ if source == "/" {
|
|
| 2059 |
+ return job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind)
|
|
| 2060 |
+ } |
|
| 2061 |
+ |
|
| 2062 |
+ // ensure the source exists on the host |
|
| 2063 |
+ _, err := os.Stat(source) |
|
| 2064 |
+ if err != nil && os.IsNotExist(err) {
|
|
| 2065 |
+ err = os.MkdirAll(source, 0755) |
|
| 2066 |
+ if err != nil {
|
|
| 2067 |
+ return job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error())
|
|
| 2068 |
+ } |
|
| 2069 |
+ } |
|
| 2070 |
+ } |
|
| 2071 |
+ // Register any links from the host config before starting the container |
|
| 2072 |
+ if err := srv.RegisterLinks(container, hostConfig); err != nil {
|
|
| 2073 |
+ return job.Error(err) |
|
| 2074 |
+ } |
|
| 2075 |
+ container.SetHostConfig(hostConfig) |
|
| 2076 |
+ container.ToDisk() |
|
| 2077 |
+ } |
|
| 2078 |
+ if err := container.Start(); err != nil {
|
|
| 2079 |
+ return job.Errorf("Cannot start container %s: %s", name, err)
|
|
| 2080 |
+ } |
|
| 2081 |
+ srv.LogEvent("start", container.ID, runtime.Repositories().ImageName(container.Image))
|
|
| 2082 |
+ |
|
| 2083 |
+ return engine.StatusOK |
|
| 2084 |
+} |
|
| 2085 |
+ |
|
| 2086 |
+func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
|
|
| 2087 |
+ if len(job.Args) != 1 {
|
|
| 2088 |
+ return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 2089 |
+ } |
|
| 2090 |
+ var ( |
|
| 2091 |
+ name = job.Args[0] |
|
| 2092 |
+ t = 10 |
|
| 2093 |
+ ) |
|
| 2094 |
+ if job.EnvExists("t") {
|
|
| 2095 |
+ t = job.GetenvInt("t")
|
|
| 2096 |
+ } |
|
| 2097 |
+ if container := srv.runtime.Get(name); container != nil {
|
|
| 2098 |
+ if err := container.Stop(int(t)); err != nil {
|
|
| 2099 |
+ return job.Errorf("Cannot stop container %s: %s\n", name, err)
|
|
| 2100 |
+ } |
|
| 2101 |
+ srv.LogEvent("stop", container.ID, srv.runtime.Repositories().ImageName(container.Image))
|
|
| 2102 |
+ } else {
|
|
| 2103 |
+ return job.Errorf("No such container: %s\n", name)
|
|
| 2104 |
+ } |
|
| 2105 |
+ return engine.StatusOK |
|
| 2106 |
+} |
|
| 2107 |
+ |
|
| 2108 |
+func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
|
|
| 2109 |
+ if len(job.Args) != 1 {
|
|
| 2110 |
+ return job.Errorf("Usage: %s", job.Name)
|
|
| 2111 |
+ } |
|
| 2112 |
+ name := job.Args[0] |
|
| 2113 |
+ if container := srv.runtime.Get(name); container != nil {
|
|
| 2114 |
+ status := container.Wait() |
|
| 2115 |
+ job.Printf("%d\n", status)
|
|
| 2116 |
+ return engine.StatusOK |
|
| 2117 |
+ } |
|
| 2118 |
+ return job.Errorf("%s: no such container: %s", job.Name, name)
|
|
| 2119 |
+} |
|
| 2120 |
+ |
|
| 2121 |
+func (srv *Server) ContainerResize(job *engine.Job) engine.Status {
|
|
| 2122 |
+ if len(job.Args) != 3 {
|
|
| 2123 |
+ return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name)
|
|
| 2124 |
+ } |
|
| 2125 |
+ name := job.Args[0] |
|
| 2126 |
+ height, err := strconv.Atoi(job.Args[1]) |
|
| 2127 |
+ if err != nil {
|
|
| 2128 |
+ return job.Error(err) |
|
| 2129 |
+ } |
|
| 2130 |
+ width, err := strconv.Atoi(job.Args[2]) |
|
| 2131 |
+ if err != nil {
|
|
| 2132 |
+ return job.Error(err) |
|
| 2133 |
+ } |
|
| 2134 |
+ if container := srv.runtime.Get(name); container != nil {
|
|
| 2135 |
+ if err := container.Resize(height, width); err != nil {
|
|
| 2136 |
+ return job.Error(err) |
|
| 2137 |
+ } |
|
| 2138 |
+ return engine.StatusOK |
|
| 2139 |
+ } |
|
| 2140 |
+ return job.Errorf("No such container: %s", name)
|
|
| 2141 |
+} |
|
| 2142 |
+ |
|
| 2143 |
+func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
|
|
| 2144 |
+ if len(job.Args) != 1 {
|
|
| 2145 |
+ return job.Errorf("Usage: %s CONTAINER\n", job.Name)
|
|
| 2146 |
+ } |
|
| 2147 |
+ |
|
| 2148 |
+ var ( |
|
| 2149 |
+ name = job.Args[0] |
|
| 2150 |
+ logs = job.GetenvBool("logs")
|
|
| 2151 |
+ stream = job.GetenvBool("stream")
|
|
| 2152 |
+ stdin = job.GetenvBool("stdin")
|
|
| 2153 |
+ stdout = job.GetenvBool("stdout")
|
|
| 2154 |
+ stderr = job.GetenvBool("stderr")
|
|
| 2155 |
+ ) |
|
| 2156 |
+ |
|
| 2157 |
+ container := srv.runtime.Get(name) |
|
| 2158 |
+ if container == nil {
|
|
| 2159 |
+ return job.Errorf("No such container: %s", name)
|
|
| 2160 |
+ } |
|
| 2161 |
+ |
|
| 2162 |
+ //logs |
|
| 2163 |
+ if logs {
|
|
| 2164 |
+ cLog, err := container.ReadLog("json")
|
|
| 2165 |
+ if err != nil && os.IsNotExist(err) {
|
|
| 2166 |
+ // Legacy logs |
|
| 2167 |
+ utils.Debugf("Old logs format")
|
|
| 2168 |
+ if stdout {
|
|
| 2169 |
+ cLog, err := container.ReadLog("stdout")
|
|
| 2170 |
+ if err != nil {
|
|
| 2171 |
+ utils.Errorf("Error reading logs (stdout): %s", err)
|
|
| 2172 |
+ } else if _, err := io.Copy(job.Stdout, cLog); err != nil {
|
|
| 2173 |
+ utils.Errorf("Error streaming logs (stdout): %s", err)
|
|
| 2174 |
+ } |
|
| 2175 |
+ } |
|
| 2176 |
+ if stderr {
|
|
| 2177 |
+ cLog, err := container.ReadLog("stderr")
|
|
| 2178 |
+ if err != nil {
|
|
| 2179 |
+ utils.Errorf("Error reading logs (stderr): %s", err)
|
|
| 2180 |
+ } else if _, err := io.Copy(job.Stderr, cLog); err != nil {
|
|
| 2181 |
+ utils.Errorf("Error streaming logs (stderr): %s", err)
|
|
| 2182 |
+ } |
|
| 2183 |
+ } |
|
| 2184 |
+ } else if err != nil {
|
|
| 2185 |
+ utils.Errorf("Error reading logs (json): %s", err)
|
|
| 2186 |
+ } else {
|
|
| 2187 |
+ dec := json.NewDecoder(cLog) |
|
| 2188 |
+ for {
|
|
| 2189 |
+ l := &utils.JSONLog{}
|
|
| 2190 |
+ |
|
| 2191 |
+ if err := dec.Decode(l); err == io.EOF {
|
|
| 2192 |
+ break |
|
| 2193 |
+ } else if err != nil {
|
|
| 2194 |
+ utils.Errorf("Error streaming logs: %s", err)
|
|
| 2195 |
+ break |
|
| 2196 |
+ } |
|
| 2197 |
+ if l.Stream == "stdout" && stdout {
|
|
| 2198 |
+ fmt.Fprintf(job.Stdout, "%s", l.Log) |
|
| 2199 |
+ } |
|
| 2200 |
+ if l.Stream == "stderr" && stderr {
|
|
| 2201 |
+ fmt.Fprintf(job.Stderr, "%s", l.Log) |
|
| 2202 |
+ } |
|
| 2203 |
+ } |
|
| 2204 |
+ } |
|
| 2205 |
+ } |
|
| 2206 |
+ |
|
| 2207 |
+ //stream |
|
| 2208 |
+ if stream {
|
|
| 2209 |
+ if container.State.IsGhost() {
|
|
| 2210 |
+ return job.Errorf("Impossible to attach to a ghost container")
|
|
| 2211 |
+ } |
|
| 2212 |
+ |
|
| 2213 |
+ var ( |
|
| 2214 |
+ cStdin io.ReadCloser |
|
| 2215 |
+ cStdout, cStderr io.Writer |
|
| 2216 |
+ cStdinCloser io.Closer |
|
| 2217 |
+ ) |
|
| 2218 |
+ |
|
| 2219 |
+ if stdin {
|
|
| 2220 |
+ r, w := io.Pipe() |
|
| 2221 |
+ go func() {
|
|
| 2222 |
+ defer w.Close() |
|
| 2223 |
+ defer utils.Debugf("Closing buffered stdin pipe")
|
|
| 2224 |
+ io.Copy(w, job.Stdin) |
|
| 2225 |
+ }() |
|
| 2226 |
+ cStdin = r |
|
| 2227 |
+ cStdinCloser = job.Stdin |
|
| 2228 |
+ } |
|
| 2229 |
+ if stdout {
|
|
| 2230 |
+ cStdout = job.Stdout |
|
| 2231 |
+ } |
|
| 2232 |
+ if stderr {
|
|
| 2233 |
+ cStderr = job.Stderr |
|
| 2234 |
+ } |
|
| 2235 |
+ |
|
| 2236 |
+ <-container.Attach(cStdin, cStdinCloser, cStdout, cStderr) |
|
| 2237 |
+ |
|
| 2238 |
+ // If we are in stdinonce mode, wait for the process to end |
|
| 2239 |
+ // otherwise, simply return |
|
| 2240 |
+ if container.Config.StdinOnce && !container.Config.Tty {
|
|
| 2241 |
+ container.Wait() |
|
| 2242 |
+ } |
|
| 2243 |
+ } |
|
| 2244 |
+ return engine.StatusOK |
|
| 2245 |
+} |
|
| 2246 |
+ |
|
| 2247 |
+func (srv *Server) ContainerInspect(name string) (*runtime.Container, error) {
|
|
| 2248 |
+ if container := srv.runtime.Get(name); container != nil {
|
|
| 2249 |
+ return container, nil |
|
| 2250 |
+ } |
|
| 2251 |
+ return nil, fmt.Errorf("No such container: %s", name)
|
|
| 2252 |
+} |
|
| 2253 |
+ |
|
| 2254 |
+func (srv *Server) ImageInspect(name string) (*image.Image, error) {
|
|
| 2255 |
+ if image, err := srv.runtime.Repositories().LookupImage(name); err == nil && image != nil {
|
|
| 2256 |
+ return image, nil |
|
| 2257 |
+ } |
|
| 2258 |
+ return nil, fmt.Errorf("No such image: %s", name)
|
|
| 2259 |
+} |
|
| 2260 |
+ |
|
| 2261 |
+func (srv *Server) JobInspect(job *engine.Job) engine.Status {
|
|
| 2262 |
+ // TODO: deprecate KIND/conflict |
|
| 2263 |
+ if n := len(job.Args); n != 2 {
|
|
| 2264 |
+ return job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name)
|
|
| 2265 |
+ } |
|
| 2266 |
+ var ( |
|
| 2267 |
+ name = job.Args[0] |
|
| 2268 |
+ kind = job.Args[1] |
|
| 2269 |
+ object interface{}
|
|
| 2270 |
+ conflict = job.GetenvBool("conflict") //should the job detect conflict between containers and images
|
|
| 2271 |
+ image, errImage = srv.ImageInspect(name) |
|
| 2272 |
+ container, errContainer = srv.ContainerInspect(name) |
|
| 2273 |
+ ) |
|
| 2274 |
+ |
|
| 2275 |
+ if conflict && image != nil && container != nil {
|
|
| 2276 |
+ return job.Errorf("Conflict between containers and images")
|
|
| 2277 |
+ } |
|
| 2278 |
+ |
|
| 2279 |
+ switch kind {
|
|
| 2280 |
+ case "image": |
|
| 2281 |
+ if errImage != nil {
|
|
| 2282 |
+ return job.Error(errImage) |
|
| 2283 |
+ } |
|
| 2284 |
+ object = image |
|
| 2285 |
+ case "container": |
|
| 2286 |
+ if errContainer != nil {
|
|
| 2287 |
+ return job.Error(errContainer) |
|
| 2288 |
+ } |
|
| 2289 |
+ object = &struct {
|
|
| 2290 |
+ *runtime.Container |
|
| 2291 |
+ HostConfig *runconfig.HostConfig |
|
| 2292 |
+ }{container, container.HostConfig()}
|
|
| 2293 |
+ default: |
|
| 2294 |
+ return job.Errorf("Unknown kind: %s", kind)
|
|
| 2295 |
+ } |
|
| 2296 |
+ |
|
| 2297 |
+ b, err := json.Marshal(object) |
|
| 2298 |
+ if err != nil {
|
|
| 2299 |
+ return job.Error(err) |
|
| 2300 |
+ } |
|
| 2301 |
+ job.Stdout.Write(b) |
|
| 2302 |
+ return engine.StatusOK |
|
| 2303 |
+} |
|
| 2304 |
+ |
|
| 2305 |
+func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
|
|
| 2306 |
+ if len(job.Args) != 2 {
|
|
| 2307 |
+ return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name)
|
|
| 2308 |
+ } |
|
| 2309 |
+ |
|
| 2310 |
+ var ( |
|
| 2311 |
+ name = job.Args[0] |
|
| 2312 |
+ resource = job.Args[1] |
|
| 2313 |
+ ) |
|
| 2314 |
+ |
|
| 2315 |
+ if container := srv.runtime.Get(name); container != nil {
|
|
| 2316 |
+ |
|
| 2317 |
+ data, err := container.Copy(resource) |
|
| 2318 |
+ if err != nil {
|
|
| 2319 |
+ return job.Error(err) |
|
| 2320 |
+ } |
|
| 2321 |
+ defer data.Close() |
|
| 2322 |
+ |
|
| 2323 |
+ if _, err := io.Copy(job.Stdout, data); err != nil {
|
|
| 2324 |
+ return job.Error(err) |
|
| 2325 |
+ } |
|
| 2326 |
+ return engine.StatusOK |
|
| 2327 |
+ } |
|
| 2328 |
+ return job.Errorf("No such container: %s", name)
|
|
| 2329 |
+} |
|
| 2330 |
+ |
|
| 2331 |
+func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
|
|
| 2332 |
+ runtime, err := runtime.NewRuntime(config, eng) |
|
| 2333 |
+ if err != nil {
|
|
| 2334 |
+ return nil, err |
|
| 2335 |
+ } |
|
| 2336 |
+ srv := &Server{
|
|
| 2337 |
+ Eng: eng, |
|
| 2338 |
+ runtime: runtime, |
|
| 2339 |
+ pullingPool: make(map[string]chan struct{}),
|
|
| 2340 |
+ pushingPool: make(map[string]chan struct{}),
|
|
| 2341 |
+ events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events |
|
| 2342 |
+ listeners: make(map[string]chan utils.JSONMessage), |
|
| 2343 |
+ running: true, |
|
| 2344 |
+ } |
|
| 2345 |
+ runtime.SetServer(srv) |
|
| 2346 |
+ return srv, nil |
|
| 2347 |
+} |
|
| 2348 |
+ |
|
| 2349 |
+func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
|
|
| 2350 |
+ httpVersion := make([]utils.VersionInfo, 0, 4) |
|
| 2351 |
+ httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION})
|
|
| 2352 |
+ httpVersion = append(httpVersion, &simpleVersionInfo{"go", goruntime.Version()})
|
|
| 2353 |
+ httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT})
|
|
| 2354 |
+ if kernelVersion, err := utils.GetKernelVersion(); err == nil {
|
|
| 2355 |
+ httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()})
|
|
| 2356 |
+ } |
|
| 2357 |
+ httpVersion = append(httpVersion, &simpleVersionInfo{"os", goruntime.GOOS})
|
|
| 2358 |
+ httpVersion = append(httpVersion, &simpleVersionInfo{"arch", goruntime.GOARCH})
|
|
| 2359 |
+ ud := utils.NewHTTPUserAgentDecorator(httpVersion...) |
|
| 2360 |
+ md := &utils.HTTPMetaHeadersDecorator{
|
|
| 2361 |
+ Headers: metaHeaders, |
|
| 2362 |
+ } |
|
| 2363 |
+ factory := utils.NewHTTPRequestFactory(ud, md) |
|
| 2364 |
+ return factory |
|
| 2365 |
+} |
|
| 2366 |
+ |
|
| 2367 |
+func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage {
|
|
| 2368 |
+ now := time.Now().UTC().Unix() |
|
| 2369 |
+ jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now}
|
|
| 2370 |
+ srv.AddEvent(jm) |
|
| 2371 |
+ for _, c := range srv.listeners {
|
|
| 2372 |
+ select { // non blocking channel
|
|
| 2373 |
+ case c <- jm: |
|
| 2374 |
+ default: |
|
| 2375 |
+ } |
|
| 2376 |
+ } |
|
| 2377 |
+ return &jm |
|
| 2378 |
+} |
|
| 2379 |
+ |
|
| 2380 |
+func (srv *Server) AddEvent(jm utils.JSONMessage) {
|
|
| 2381 |
+ srv.Lock() |
|
| 2382 |
+ defer srv.Unlock() |
|
| 2383 |
+ srv.events = append(srv.events, jm) |
|
| 2384 |
+} |
|
| 2385 |
+ |
|
| 2386 |
+func (srv *Server) GetEvents() []utils.JSONMessage {
|
|
| 2387 |
+ srv.RLock() |
|
| 2388 |
+ defer srv.RUnlock() |
|
| 2389 |
+ return srv.events |
|
| 2390 |
+} |
|
| 2391 |
+ |
|
| 2392 |
+func (srv *Server) SetRunning(status bool) {
|
|
| 2393 |
+ srv.Lock() |
|
| 2394 |
+ defer srv.Unlock() |
|
| 2395 |
+ |
|
| 2396 |
+ srv.running = status |
|
| 2397 |
+} |
|
| 2398 |
+ |
|
| 2399 |
+func (srv *Server) IsRunning() bool {
|
|
| 2400 |
+ srv.RLock() |
|
| 2401 |
+ defer srv.RUnlock() |
|
| 2402 |
+ return srv.running |
|
| 2403 |
+} |
|
| 2404 |
+ |
|
| 2405 |
+func (srv *Server) Close() error {
|
|
| 2406 |
+ if srv == nil {
|
|
| 2407 |
+ return nil |
|
| 2408 |
+ } |
|
| 2409 |
+ srv.SetRunning(false) |
|
| 2410 |
+ if srv.runtime == nil {
|
|
| 2411 |
+ return nil |
|
| 2412 |
+ } |
|
| 2413 |
+ return srv.runtime.Close() |
|
| 2414 |
+} |
|
| 2415 |
+ |
|
| 2416 |
+type Server struct {
|
|
| 2417 |
+ sync.RWMutex |
|
| 2418 |
+ runtime *runtime.Runtime |
|
| 2419 |
+ pullingPool map[string]chan struct{}
|
|
| 2420 |
+ pushingPool map[string]chan struct{}
|
|
| 2421 |
+ events []utils.JSONMessage |
|
| 2422 |
+ listeners map[string]chan utils.JSONMessage |
|
| 2423 |
+ Eng *engine.Engine |
|
| 2424 |
+ running bool |
|
| 2425 |
+} |
| 0 | 2426 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,99 @@ |
| 0 |
+package server |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "github.com/dotcloud/docker/utils" |
|
| 4 |
+ "testing" |
|
| 5 |
+ "time" |
|
| 6 |
+) |
|
| 7 |
+ |
|
| 8 |
+func TestPools(t *testing.T) {
|
|
| 9 |
+ srv := &Server{
|
|
| 10 |
+ pullingPool: make(map[string]chan struct{}),
|
|
| 11 |
+ pushingPool: make(map[string]chan struct{}),
|
|
| 12 |
+ } |
|
| 13 |
+ |
|
| 14 |
+ if _, err := srv.poolAdd("pull", "test1"); err != nil {
|
|
| 15 |
+ t.Fatal(err) |
|
| 16 |
+ } |
|
| 17 |
+ if _, err := srv.poolAdd("pull", "test2"); err != nil {
|
|
| 18 |
+ t.Fatal(err) |
|
| 19 |
+ } |
|
| 20 |
+ if _, err := srv.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" {
|
|
| 21 |
+ t.Fatalf("Expected `pull test1 is already in progress`")
|
|
| 22 |
+ } |
|
| 23 |
+ if _, err := srv.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" {
|
|
| 24 |
+ t.Fatalf("Expected `pull test1 is already in progress`")
|
|
| 25 |
+ } |
|
| 26 |
+ if _, err := srv.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" {
|
|
| 27 |
+ t.Fatalf("Expected `Unknown pool type`")
|
|
| 28 |
+ } |
|
| 29 |
+ if err := srv.poolRemove("pull", "test2"); err != nil {
|
|
| 30 |
+ t.Fatal(err) |
|
| 31 |
+ } |
|
| 32 |
+ if err := srv.poolRemove("pull", "test2"); err != nil {
|
|
| 33 |
+ t.Fatal(err) |
|
| 34 |
+ } |
|
| 35 |
+ if err := srv.poolRemove("pull", "test1"); err != nil {
|
|
| 36 |
+ t.Fatal(err) |
|
| 37 |
+ } |
|
| 38 |
+ if err := srv.poolRemove("push", "test1"); err != nil {
|
|
| 39 |
+ t.Fatal(err) |
|
| 40 |
+ } |
|
| 41 |
+ if err := srv.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" {
|
|
| 42 |
+ t.Fatalf("Expected `Unknown pool type`")
|
|
| 43 |
+ } |
|
| 44 |
+} |
|
| 45 |
+ |
|
| 46 |
+func TestLogEvent(t *testing.T) {
|
|
| 47 |
+ srv := &Server{
|
|
| 48 |
+ events: make([]utils.JSONMessage, 0, 64), |
|
| 49 |
+ listeners: make(map[string]chan utils.JSONMessage), |
|
| 50 |
+ } |
|
| 51 |
+ |
|
| 52 |
+ srv.LogEvent("fakeaction", "fakeid", "fakeimage")
|
|
| 53 |
+ |
|
| 54 |
+ listener := make(chan utils.JSONMessage) |
|
| 55 |
+ srv.Lock() |
|
| 56 |
+ srv.listeners["test"] = listener |
|
| 57 |
+ srv.Unlock() |
|
| 58 |
+ |
|
| 59 |
+ srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
|
|
| 60 |
+ |
|
| 61 |
+ numEvents := len(srv.GetEvents()) |
|
| 62 |
+ if numEvents != 2 {
|
|
| 63 |
+ t.Fatalf("Expected 2 events, found %d", numEvents)
|
|
| 64 |
+ } |
|
| 65 |
+ go func() {
|
|
| 66 |
+ time.Sleep(200 * time.Millisecond) |
|
| 67 |
+ srv.LogEvent("fakeaction3", "fakeid", "fakeimage")
|
|
| 68 |
+ time.Sleep(200 * time.Millisecond) |
|
| 69 |
+ srv.LogEvent("fakeaction4", "fakeid", "fakeimage")
|
|
| 70 |
+ }() |
|
| 71 |
+ |
|
| 72 |
+ setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
|
|
| 73 |
+ for i := 2; i < 4; i++ {
|
|
| 74 |
+ event := <-listener |
|
| 75 |
+ if event != srv.GetEvents()[i] {
|
|
| 76 |
+ t.Fatalf("Event received it different than expected")
|
|
| 77 |
+ } |
|
| 78 |
+ } |
|
| 79 |
+ }) |
|
| 80 |
+} |
|
| 81 |
+ |
|
| 82 |
+// FIXME: this is duplicated from integration/commands_test.go |
|
| 83 |
+func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
|
|
| 84 |
+ c := make(chan bool) |
|
| 85 |
+ |
|
| 86 |
+ // Make sure we are not too long |
|
| 87 |
+ go func() {
|
|
| 88 |
+ time.Sleep(d) |
|
| 89 |
+ c <- true |
|
| 90 |
+ }() |
|
| 91 |
+ go func() {
|
|
| 92 |
+ f() |
|
| 93 |
+ c <- false |
|
| 94 |
+ }() |
|
| 95 |
+ if <-c && msg != "" {
|
|
| 96 |
+ t.Fatal(msg) |
|
| 97 |
+ } |
|
| 98 |
+} |
| 0 | 99 |
deleted file mode 100644 |
| ... | ... |
@@ -1,99 +0,0 @@ |
| 1 |
-package docker |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "github.com/dotcloud/docker/utils" |
|
| 5 |
- "testing" |
|
| 6 |
- "time" |
|
| 7 |
-) |
|
| 8 |
- |
|
| 9 |
-func TestPools(t *testing.T) {
|
|
| 10 |
- srv := &Server{
|
|
| 11 |
- pullingPool: make(map[string]chan struct{}),
|
|
| 12 |
- pushingPool: make(map[string]chan struct{}),
|
|
| 13 |
- } |
|
| 14 |
- |
|
| 15 |
- if _, err := srv.poolAdd("pull", "test1"); err != nil {
|
|
| 16 |
- t.Fatal(err) |
|
| 17 |
- } |
|
| 18 |
- if _, err := srv.poolAdd("pull", "test2"); err != nil {
|
|
| 19 |
- t.Fatal(err) |
|
| 20 |
- } |
|
| 21 |
- if _, err := srv.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" {
|
|
| 22 |
- t.Fatalf("Expected `pull test1 is already in progress`")
|
|
| 23 |
- } |
|
| 24 |
- if _, err := srv.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" {
|
|
| 25 |
- t.Fatalf("Expected `pull test1 is already in progress`")
|
|
| 26 |
- } |
|
| 27 |
- if _, err := srv.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" {
|
|
| 28 |
- t.Fatalf("Expected `Unknown pool type`")
|
|
| 29 |
- } |
|
| 30 |
- if err := srv.poolRemove("pull", "test2"); err != nil {
|
|
| 31 |
- t.Fatal(err) |
|
| 32 |
- } |
|
| 33 |
- if err := srv.poolRemove("pull", "test2"); err != nil {
|
|
| 34 |
- t.Fatal(err) |
|
| 35 |
- } |
|
| 36 |
- if err := srv.poolRemove("pull", "test1"); err != nil {
|
|
| 37 |
- t.Fatal(err) |
|
| 38 |
- } |
|
| 39 |
- if err := srv.poolRemove("push", "test1"); err != nil {
|
|
| 40 |
- t.Fatal(err) |
|
| 41 |
- } |
|
| 42 |
- if err := srv.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" {
|
|
| 43 |
- t.Fatalf("Expected `Unknown pool type`")
|
|
| 44 |
- } |
|
| 45 |
-} |
|
| 46 |
- |
|
| 47 |
-func TestLogEvent(t *testing.T) {
|
|
| 48 |
- srv := &Server{
|
|
| 49 |
- events: make([]utils.JSONMessage, 0, 64), |
|
| 50 |
- listeners: make(map[string]chan utils.JSONMessage), |
|
| 51 |
- } |
|
| 52 |
- |
|
| 53 |
- srv.LogEvent("fakeaction", "fakeid", "fakeimage")
|
|
| 54 |
- |
|
| 55 |
- listener := make(chan utils.JSONMessage) |
|
| 56 |
- srv.Lock() |
|
| 57 |
- srv.listeners["test"] = listener |
|
| 58 |
- srv.Unlock() |
|
| 59 |
- |
|
| 60 |
- srv.LogEvent("fakeaction2", "fakeid", "fakeimage")
|
|
| 61 |
- |
|
| 62 |
- numEvents := len(srv.GetEvents()) |
|
| 63 |
- if numEvents != 2 {
|
|
| 64 |
- t.Fatalf("Expected 2 events, found %d", numEvents)
|
|
| 65 |
- } |
|
| 66 |
- go func() {
|
|
| 67 |
- time.Sleep(200 * time.Millisecond) |
|
| 68 |
- srv.LogEvent("fakeaction3", "fakeid", "fakeimage")
|
|
| 69 |
- time.Sleep(200 * time.Millisecond) |
|
| 70 |
- srv.LogEvent("fakeaction4", "fakeid", "fakeimage")
|
|
| 71 |
- }() |
|
| 72 |
- |
|
| 73 |
- setTimeout(t, "Listening for events timed out", 2*time.Second, func() {
|
|
| 74 |
- for i := 2; i < 4; i++ {
|
|
| 75 |
- event := <-listener |
|
| 76 |
- if event != srv.GetEvents()[i] {
|
|
| 77 |
- t.Fatalf("Event received it different than expected")
|
|
| 78 |
- } |
|
| 79 |
- } |
|
| 80 |
- }) |
|
| 81 |
-} |
|
| 82 |
- |
|
| 83 |
-// FIXME: this is duplicated from integration/commands_test.go |
|
| 84 |
-func setTimeout(t *testing.T, msg string, d time.Duration, f func()) {
|
|
| 85 |
- c := make(chan bool) |
|
| 86 |
- |
|
| 87 |
- // Make sure we are not too long |
|
| 88 |
- go func() {
|
|
| 89 |
- time.Sleep(d) |
|
| 90 |
- c <- true |
|
| 91 |
- }() |
|
| 92 |
- go func() {
|
|
| 93 |
- f() |
|
| 94 |
- c <- false |
|
| 95 |
- }() |
|
| 96 |
- if <-c && msg != "" {
|
|
| 97 |
- t.Fatal(msg) |
|
| 98 |
- } |
|
| 99 |
-} |
| ... | ... |
@@ -3,6 +3,7 @@ package utils |
| 3 | 3 |
import ( |
| 4 | 4 |
"encoding/json" |
| 5 | 5 |
"fmt" |
| 6 |
+ "io" |
|
| 6 | 7 |
) |
| 7 | 8 |
|
| 8 | 9 |
type StreamFormatter struct {
|
| ... | ... |
@@ -90,3 +91,31 @@ func (sf *StreamFormatter) Used() bool {
|
| 90 | 90 |
func (sf *StreamFormatter) Json() bool {
|
| 91 | 91 |
return sf.json |
| 92 | 92 |
} |
| 93 |
+ |
|
| 94 |
+type StdoutFormater struct {
|
|
| 95 |
+ io.Writer |
|
| 96 |
+ *StreamFormatter |
|
| 97 |
+} |
|
| 98 |
+ |
|
| 99 |
+func (sf *StdoutFormater) Write(buf []byte) (int, error) {
|
|
| 100 |
+ formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) |
|
| 101 |
+ n, err := sf.Writer.Write(formattedBuf) |
|
| 102 |
+ if n != len(formattedBuf) {
|
|
| 103 |
+ return n, io.ErrShortWrite |
|
| 104 |
+ } |
|
| 105 |
+ return len(buf), err |
|
| 106 |
+} |
|
| 107 |
+ |
|
| 108 |
+type StderrFormater struct {
|
|
| 109 |
+ io.Writer |
|
| 110 |
+ *StreamFormatter |
|
| 111 |
+} |
|
| 112 |
+ |
|
| 113 |
+func (sf *StderrFormater) Write(buf []byte) (int, error) {
|
|
| 114 |
+ formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m")
|
|
| 115 |
+ n, err := sf.Writer.Write(formattedBuf) |
|
| 116 |
+ if n != len(formattedBuf) {
|
|
| 117 |
+ return n, io.ErrShortWrite |
|
| 118 |
+ } |
|
| 119 |
+ return len(buf), err |
|
| 120 |
+} |