| ... | ... |
@@ -4,24 +4,24 @@ |
| 4 | 4 |
# |
| 5 | 5 |
# # Assemble the full dev environment. This is slow the first time. |
| 6 | 6 |
# docker build -t docker . |
| 7 |
-# # Apparmor messes with privileged mode: disable it |
|
| 8 |
-# /etc/init.d/apparmor stop ; /etc/init.d/apparmor teardown |
|
| 9 | 7 |
# |
| 10 | 8 |
# # Mount your source in an interactive container for quick testing: |
| 11 |
-# docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -lxc-conf=lxc.aa_profile=unconfined -i -t docker bash |
|
| 12 |
-# |
|
| 9 |
+# docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -i -t docker bash |
|
| 13 | 10 |
# |
| 14 | 11 |
# # Run the test suite: |
| 15 |
-# docker run -privileged -lxc-conf=lxc.aa_profile=unconfined docker hack/make.sh test |
|
| 12 |
+# docker run -privileged docker hack/make.sh test |
|
| 16 | 13 |
# |
| 17 | 14 |
# # Publish a release: |
| 18 |
-# docker run -privileged -lxc-conf=lxc.aa_profile=unconfined \ |
|
| 15 |
+# docker run -privileged \ |
|
| 19 | 16 |
# -e AWS_S3_BUCKET=baz \ |
| 20 | 17 |
# -e AWS_ACCESS_KEY=foo \ |
| 21 | 18 |
# -e AWS_SECRET_KEY=bar \ |
| 22 | 19 |
# -e GPG_PASSPHRASE=gloubiboulga \ |
| 23 | 20 |
# docker hack/release.sh |
| 24 | 21 |
# |
| 22 |
+# Note: Apparmor used to mess with privileged mode, but this is no longer |
|
| 23 |
+# the case. Therefore, you don't have to disable it anymore. |
|
| 24 |
+# |
|
| 25 | 25 |
|
| 26 | 26 |
docker-version 0.6.1 |
| 27 | 27 |
from ubuntu:12.04 |
| ... | ... |
@@ -499,8 +499,7 @@ func TestGetContainersTop(t *testing.T) {
|
| 499 | 499 |
container.WaitTimeout(2 * time.Second) |
| 500 | 500 |
}() |
| 501 | 501 |
|
| 502 |
- hostConfig := &HostConfig{}
|
|
| 503 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 502 |
+ if err := container.Start(); err != nil {
|
|
| 504 | 503 |
t.Fatal(err) |
| 505 | 504 |
} |
| 506 | 505 |
|
| ... | ... |
@@ -704,8 +703,7 @@ func TestPostContainersKill(t *testing.T) {
|
| 704 | 704 |
} |
| 705 | 705 |
defer runtime.Destroy(container) |
| 706 | 706 |
|
| 707 |
- hostConfig := &HostConfig{}
|
|
| 708 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 707 |
+ if err := container.Start(); err != nil {
|
|
| 709 | 708 |
t.Fatal(err) |
| 710 | 709 |
} |
| 711 | 710 |
|
| ... | ... |
@@ -747,8 +745,7 @@ func TestPostContainersRestart(t *testing.T) {
|
| 747 | 747 |
} |
| 748 | 748 |
defer runtime.Destroy(container) |
| 749 | 749 |
|
| 750 |
- hostConfig := &HostConfig{}
|
|
| 751 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 750 |
+ if err := container.Start(); err != nil {
|
|
| 752 | 751 |
t.Fatal(err) |
| 753 | 752 |
} |
| 754 | 753 |
|
| ... | ... |
@@ -855,8 +852,7 @@ func TestPostContainersStop(t *testing.T) {
|
| 855 | 855 |
} |
| 856 | 856 |
defer runtime.Destroy(container) |
| 857 | 857 |
|
| 858 |
- hostConfig := &HostConfig{}
|
|
| 859 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 858 |
+ if err := container.Start(); err != nil {
|
|
| 860 | 859 |
t.Fatal(err) |
| 861 | 860 |
} |
| 862 | 861 |
|
| ... | ... |
@@ -903,8 +899,7 @@ func TestPostContainersWait(t *testing.T) {
|
| 903 | 903 |
} |
| 904 | 904 |
defer runtime.Destroy(container) |
| 905 | 905 |
|
| 906 |
- hostConfig := &HostConfig{}
|
|
| 907 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 906 |
+ if err := container.Start(); err != nil {
|
|
| 908 | 907 |
t.Fatal(err) |
| 909 | 908 |
} |
| 910 | 909 |
|
| ... | ... |
@@ -947,8 +942,7 @@ func TestPostContainersAttach(t *testing.T) {
|
| 947 | 947 |
defer runtime.Destroy(container) |
| 948 | 948 |
|
| 949 | 949 |
// Start the process |
| 950 |
- hostConfig := &HostConfig{}
|
|
| 951 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 950 |
+ if err := container.Start(); err != nil {
|
|
| 952 | 951 |
t.Fatal(err) |
| 953 | 952 |
} |
| 954 | 953 |
|
| ... | ... |
@@ -1037,8 +1031,7 @@ func TestPostContainersAttachStderr(t *testing.T) {
|
| 1037 | 1037 |
defer runtime.Destroy(container) |
| 1038 | 1038 |
|
| 1039 | 1039 |
// Start the process |
| 1040 |
- hostConfig := &HostConfig{}
|
|
| 1041 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 1040 |
+ if err := container.Start(); err != nil {
|
|
| 1042 | 1041 |
t.Fatal(err) |
| 1043 | 1042 |
} |
| 1044 | 1043 |
|
| ... | ... |
@@ -60,11 +60,15 @@ type Container struct {
|
| 60 | 60 |
Volumes map[string]string |
| 61 | 61 |
// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. |
| 62 | 62 |
// Easier than migrating older container configs :) |
| 63 |
- VolumesRW map[string]bool |
|
| 63 |
+ VolumesRW map[string]bool |
|
| 64 |
+ hostConfig *HostConfig |
|
| 64 | 65 |
|
| 65 | 66 |
activeLinks map[string]*Link |
| 66 | 67 |
} |
| 67 | 68 |
|
| 69 |
+// Note: the Config structure should hold only portable information about the container. |
|
| 70 |
+// Here, "portable" means "independent from the host we are running on". |
|
| 71 |
+// Non-portable information *should* appear in HostConfig. |
|
| 68 | 72 |
type Config struct {
|
| 69 | 73 |
Hostname string |
| 70 | 74 |
Domainname string |
| ... | ... |
@@ -89,13 +93,13 @@ type Config struct {
|
| 89 | 89 |
WorkingDir string |
| 90 | 90 |
Entrypoint []string |
| 91 | 91 |
NetworkDisabled bool |
| 92 |
- Privileged bool |
|
| 93 | 92 |
} |
| 94 | 93 |
|
| 95 | 94 |
type HostConfig struct {
|
| 96 | 95 |
Binds []string |
| 97 | 96 |
ContainerIDFile string |
| 98 | 97 |
LxcConf []KeyValuePair |
| 98 |
+ Privileged bool |
|
| 99 | 99 |
PortBindings map[Port][]PortBinding |
| 100 | 100 |
Links []string |
| 101 | 101 |
PublishAllPorts bool |
| ... | ... |
@@ -320,7 +324,6 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, |
| 320 | 320 |
Volumes: flVolumes, |
| 321 | 321 |
VolumesFrom: strings.Join(flVolumesFrom, ","), |
| 322 | 322 |
Entrypoint: entrypoint, |
| 323 |
- Privileged: *flPrivileged, |
|
| 324 | 323 |
WorkingDir: *flWorkingDir, |
| 325 | 324 |
} |
| 326 | 325 |
|
| ... | ... |
@@ -328,6 +331,7 @@ func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, |
| 328 | 328 |
Binds: binds, |
| 329 | 329 |
ContainerIDFile: *flContainerIDFile, |
| 330 | 330 |
LxcConf: lxcConf, |
| 331 |
+ Privileged: *flPrivileged, |
|
| 331 | 332 |
PortBindings: portBindings, |
| 332 | 333 |
Links: flLinks, |
| 333 | 334 |
PublishAllPorts: *flPublishAll, |
| ... | ... |
@@ -416,7 +420,7 @@ func (container *Container) FromDisk() error {
|
| 416 | 416 |
if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") {
|
| 417 | 417 |
return err |
| 418 | 418 |
} |
| 419 |
- return nil |
|
| 419 |
+ return container.readHostConfig() |
|
| 420 | 420 |
} |
| 421 | 421 |
|
| 422 | 422 |
func (container *Container) ToDisk() (err error) {
|
| ... | ... |
@@ -424,23 +428,31 @@ func (container *Container) ToDisk() (err error) {
|
| 424 | 424 |
if err != nil {
|
| 425 | 425 |
return |
| 426 | 426 |
} |
| 427 |
- return ioutil.WriteFile(container.jsonPath(), data, 0666) |
|
| 427 |
+ err = ioutil.WriteFile(container.jsonPath(), data, 0666) |
|
| 428 |
+ if err != nil {
|
|
| 429 |
+ return |
|
| 430 |
+ } |
|
| 431 |
+ return container.writeHostConfig() |
|
| 428 | 432 |
} |
| 429 | 433 |
|
| 430 |
-func (container *Container) ReadHostConfig() (*HostConfig, error) {
|
|
| 434 |
+func (container *Container) readHostConfig() error {
|
|
| 435 |
+ container.hostConfig = &HostConfig{}
|
|
| 436 |
+ // If the hostconfig file does not exist, do not read it. |
|
| 437 |
+ // (We still have to initialize container.hostConfig, |
|
| 438 |
+ // but that's OK, since we just did that above.) |
|
| 439 |
+ _, err := os.Stat(container.hostConfigPath()) |
|
| 440 |
+ if os.IsNotExist(err) {
|
|
| 441 |
+ return nil |
|
| 442 |
+ } |
|
| 431 | 443 |
data, err := ioutil.ReadFile(container.hostConfigPath()) |
| 432 | 444 |
if err != nil {
|
| 433 |
- return &HostConfig{}, err
|
|
| 434 |
- } |
|
| 435 |
- hostConfig := &HostConfig{}
|
|
| 436 |
- if err := json.Unmarshal(data, hostConfig); err != nil {
|
|
| 437 |
- return &HostConfig{}, err
|
|
| 445 |
+ return err |
|
| 438 | 446 |
} |
| 439 |
- return hostConfig, nil |
|
| 447 |
+ return json.Unmarshal(data, container.hostConfig) |
|
| 440 | 448 |
} |
| 441 | 449 |
|
| 442 |
-func (container *Container) SaveHostConfig(hostConfig *HostConfig) (err error) {
|
|
| 443 |
- data, err := json.Marshal(hostConfig) |
|
| 450 |
+func (container *Container) writeHostConfig() (err error) {
|
|
| 451 |
+ data, err := json.Marshal(container.hostConfig) |
|
| 444 | 452 |
if err != nil {
|
| 445 | 453 |
return |
| 446 | 454 |
} |
| ... | ... |
@@ -456,21 +468,13 @@ func (container *Container) generateEnvConfig(env []string) error {
|
| 456 | 456 |
return nil |
| 457 | 457 |
} |
| 458 | 458 |
|
| 459 |
-func (container *Container) generateLXCConfig(hostConfig *HostConfig) error {
|
|
| 459 |
+func (container *Container) generateLXCConfig() error {
|
|
| 460 | 460 |
fo, err := os.Create(container.lxcConfigPath()) |
| 461 | 461 |
if err != nil {
|
| 462 | 462 |
return err |
| 463 | 463 |
} |
| 464 | 464 |
defer fo.Close() |
| 465 |
- if err := LxcTemplateCompiled.Execute(fo, container); err != nil {
|
|
| 466 |
- return err |
|
| 467 |
- } |
|
| 468 |
- if hostConfig != nil {
|
|
| 469 |
- if err := LxcHostConfigTemplateCompiled.Execute(fo, hostConfig); err != nil {
|
|
| 470 |
- return err |
|
| 471 |
- } |
|
| 472 |
- } |
|
| 473 |
- return nil |
|
| 465 |
+ return LxcTemplateCompiled.Execute(fo, container) |
|
| 474 | 466 |
} |
| 475 | 467 |
|
| 476 | 468 |
func (container *Container) startPty() error {
|
| ... | ... |
@@ -665,7 +669,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s |
| 665 | 665 |
}) |
| 666 | 666 |
} |
| 667 | 667 |
|
| 668 |
-func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
|
| 668 |
+func (container *Container) Start() (err error) {
|
|
| 669 | 669 |
container.State.Lock() |
| 670 | 670 |
defer container.State.Unlock() |
| 671 | 671 |
defer func() {
|
| ... | ... |
@@ -674,10 +678,6 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
| 674 | 674 |
} |
| 675 | 675 |
}() |
| 676 | 676 |
|
| 677 |
- if hostConfig == nil { // in docker start of docker restart we want to reuse previous HostConfigFile
|
|
| 678 |
- hostConfig, _ = container.ReadHostConfig() |
|
| 679 |
- } |
|
| 680 |
- |
|
| 681 | 677 |
if container.State.Running {
|
| 682 | 678 |
return fmt.Errorf("The container %s is already running.", container.ID)
|
| 683 | 679 |
} |
| ... | ... |
@@ -687,7 +687,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
| 687 | 687 |
if container.runtime.networkManager.disabled {
|
| 688 | 688 |
container.Config.NetworkDisabled = true |
| 689 | 689 |
} else {
|
| 690 |
- if err := container.allocateNetwork(hostConfig); err != nil {
|
|
| 690 |
+ if err := container.allocateNetwork(); err != nil {
|
|
| 691 | 691 |
return err |
| 692 | 692 |
} |
| 693 | 693 |
} |
| ... | ... |
@@ -711,7 +711,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
| 711 | 711 |
// Define illegal container destinations |
| 712 | 712 |
illegalDsts := []string{"/", "."}
|
| 713 | 713 |
|
| 714 |
- for _, bind := range hostConfig.Binds {
|
|
| 714 |
+ for _, bind := range container.hostConfig.Binds {
|
|
| 715 | 715 |
// FIXME: factorize bind parsing in parseBind |
| 716 | 716 |
var src, dst, mode string |
| 717 | 717 |
arr := strings.Split(bind, ":") |
| ... | ... |
@@ -845,7 +845,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
| 845 | 845 |
} |
| 846 | 846 |
} |
| 847 | 847 |
|
| 848 |
- if err := container.generateLXCConfig(hostConfig); err != nil {
|
|
| 848 |
+ if err := container.generateLXCConfig(); err != nil {
|
|
| 849 | 849 |
return err |
| 850 | 850 |
} |
| 851 | 851 |
|
| ... | ... |
@@ -942,8 +942,11 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
| 942 | 942 |
params = append(params, "--", container.Path) |
| 943 | 943 |
params = append(params, container.Args...) |
| 944 | 944 |
|
| 945 |
- container.cmd = exec.Command("lxc-start", params...)
|
|
| 946 |
- |
|
| 945 |
+ var lxcStart string = "lxc-start" |
|
| 946 |
+ if container.hostConfig.Privileged && container.runtime.capabilities.AppArmor {
|
|
| 947 |
+ lxcStart = path.Join(container.runtime.config.GraphPath, "lxc-start-unconfined") |
|
| 948 |
+ } |
|
| 949 |
+ container.cmd = exec.Command(lxcStart, params...) |
|
| 947 | 950 |
// Setup logging of stdout and stderr to disk |
| 948 | 951 |
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
|
| 949 | 952 |
return err |
| ... | ... |
@@ -970,8 +973,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
| 970 | 970 |
container.waitLock = make(chan struct{})
|
| 971 | 971 |
|
| 972 | 972 |
container.ToDisk() |
| 973 |
- container.SaveHostConfig(hostConfig) |
|
| 974 |
- go container.monitor(hostConfig) |
|
| 973 |
+ go container.monitor() |
|
| 975 | 974 |
|
| 976 | 975 |
defer utils.Debugf("Container running: %v", container.State.Running)
|
| 977 | 976 |
// We wait for the container to be fully running. |
| ... | ... |
@@ -1008,7 +1010,7 @@ func (container *Container) Start(hostConfig *HostConfig) (err error) {
|
| 1008 | 1008 |
} |
| 1009 | 1009 |
|
| 1010 | 1010 |
func (container *Container) Run() error {
|
| 1011 |
- if err := container.Start(&HostConfig{}); err != nil {
|
|
| 1011 |
+ if err := container.Start(); err != nil {
|
|
| 1012 | 1012 |
return err |
| 1013 | 1013 |
} |
| 1014 | 1014 |
container.Wait() |
| ... | ... |
@@ -1021,8 +1023,7 @@ func (container *Container) Output() (output []byte, err error) {
|
| 1021 | 1021 |
return nil, err |
| 1022 | 1022 |
} |
| 1023 | 1023 |
defer pipe.Close() |
| 1024 |
- hostConfig := &HostConfig{}
|
|
| 1025 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 1024 |
+ if err := container.Start(); err != nil {
|
|
| 1026 | 1025 |
return nil, err |
| 1027 | 1026 |
} |
| 1028 | 1027 |
output, err = ioutil.ReadAll(pipe) |
| ... | ... |
@@ -1054,7 +1055,7 @@ func (container *Container) StderrPipe() (io.ReadCloser, error) {
|
| 1054 | 1054 |
return utils.NewBufReader(reader), nil |
| 1055 | 1055 |
} |
| 1056 | 1056 |
|
| 1057 |
-func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
|
|
| 1057 |
+func (container *Container) allocateNetwork() error {
|
|
| 1058 | 1058 |
if container.Config.NetworkDisabled {
|
| 1059 | 1059 |
return nil |
| 1060 | 1060 |
} |
| ... | ... |
@@ -1083,11 +1084,11 @@ func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
|
| 1083 | 1083 |
|
| 1084 | 1084 |
if container.Config.PortSpecs != nil {
|
| 1085 | 1085 |
utils.Debugf("Migrating port mappings for container: %s", strings.Join(container.Config.PortSpecs, ", "))
|
| 1086 |
- if err := migratePortMappings(container.Config, hostConfig); err != nil {
|
|
| 1086 |
+ if err := migratePortMappings(container.Config, container.hostConfig); err != nil {
|
|
| 1087 | 1087 |
return err |
| 1088 | 1088 |
} |
| 1089 | 1089 |
container.Config.PortSpecs = nil |
| 1090 |
- if err := container.SaveHostConfig(hostConfig); err != nil {
|
|
| 1090 |
+ if err := container.writeHostConfig(); err != nil {
|
|
| 1091 | 1091 |
return err |
| 1092 | 1092 |
} |
| 1093 | 1093 |
} |
| ... | ... |
@@ -1099,8 +1100,8 @@ func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
|
| 1099 | 1099 |
if container.Config.ExposedPorts != nil {
|
| 1100 | 1100 |
portSpecs = container.Config.ExposedPorts |
| 1101 | 1101 |
} |
| 1102 |
- if hostConfig.PortBindings != nil {
|
|
| 1103 |
- bindings = hostConfig.PortBindings |
|
| 1102 |
+ if container.hostConfig.PortBindings != nil {
|
|
| 1103 |
+ bindings = container.hostConfig.PortBindings |
|
| 1104 | 1104 |
} |
| 1105 | 1105 |
} else {
|
| 1106 | 1106 |
if container.NetworkSettings.Ports != nil {
|
| ... | ... |
@@ -1130,7 +1131,7 @@ func (container *Container) allocateNetwork(hostConfig *HostConfig) error {
|
| 1130 | 1130 |
} |
| 1131 | 1131 |
bindings[port] = binding |
| 1132 | 1132 |
} |
| 1133 |
- container.SaveHostConfig(hostConfig) |
|
| 1133 |
+ container.writeHostConfig() |
|
| 1134 | 1134 |
|
| 1135 | 1135 |
container.NetworkSettings.Ports = bindings |
| 1136 | 1136 |
container.network = iface |
| ... | ... |
@@ -1166,7 +1167,7 @@ func (container *Container) waitLxc() error {
|
| 1166 | 1166 |
} |
| 1167 | 1167 |
} |
| 1168 | 1168 |
|
| 1169 |
-func (container *Container) monitor(hostConfig *HostConfig) {
|
|
| 1169 |
+func (container *Container) monitor() {
|
|
| 1170 | 1170 |
// Wait for the program to exit |
| 1171 | 1171 |
|
| 1172 | 1172 |
// If the command does not exist, try to wait via lxc |
| ... | ... |
@@ -1323,11 +1324,7 @@ func (container *Container) Restart(seconds int) error {
|
| 1323 | 1323 |
if err := container.Stop(seconds); err != nil {
|
| 1324 | 1324 |
return err |
| 1325 | 1325 |
} |
| 1326 |
- hostConfig := &HostConfig{}
|
|
| 1327 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 1328 |
- return err |
|
| 1329 |
- } |
|
| 1330 |
- return nil |
|
| 1326 |
+ return container.Start() |
|
| 1331 | 1327 |
} |
| 1332 | 1328 |
|
| 1333 | 1329 |
// Wait blocks until the container stops running, then returns its exit code. |
| ... | ... |
@@ -40,7 +40,7 @@ func TestIDFormat(t *testing.T) {
|
| 40 | 40 |
func TestMultipleAttachRestart(t *testing.T) {
|
| 41 | 41 |
runtime := mkRuntime(t) |
| 42 | 42 |
defer nuke(runtime) |
| 43 |
- container, hostConfig, _ := mkContainer( |
|
| 43 |
+ container, _ := mkContainer( |
|
| 44 | 44 |
runtime, |
| 45 | 45 |
[]string{"_", "/bin/sh", "-c", "i=1; while [ $i -le 5 ]; do i=`expr $i + 1`; echo hello; done"},
|
| 46 | 46 |
t, |
| ... | ... |
@@ -61,7 +61,7 @@ func TestMultipleAttachRestart(t *testing.T) {
|
| 61 | 61 |
if err != nil {
|
| 62 | 62 |
t.Fatal(err) |
| 63 | 63 |
} |
| 64 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 64 |
+ if err := container.Start(); err != nil {
|
|
| 65 | 65 |
t.Fatal(err) |
| 66 | 66 |
} |
| 67 | 67 |
l1, err := bufio.NewReader(stdout1).ReadString('\n')
|
| ... | ... |
@@ -102,7 +102,7 @@ func TestMultipleAttachRestart(t *testing.T) {
|
| 102 | 102 |
if err != nil {
|
| 103 | 103 |
t.Fatal(err) |
| 104 | 104 |
} |
| 105 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 105 |
+ if err := container.Start(); err != nil {
|
|
| 106 | 106 |
t.Fatal(err) |
| 107 | 107 |
} |
| 108 | 108 |
|
| ... | ... |
@@ -136,7 +136,7 @@ func TestDiff(t *testing.T) {
|
| 136 | 136 |
runtime := mkRuntime(t) |
| 137 | 137 |
defer nuke(runtime) |
| 138 | 138 |
// Create a container and remove a file |
| 139 |
- container1, _, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
|
|
| 139 |
+ container1, _ := mkContainer(runtime, []string{"_", "/bin/rm", "/etc/passwd"}, t)
|
|
| 140 | 140 |
defer runtime.Destroy(container1) |
| 141 | 141 |
|
| 142 | 142 |
// The changelog should be empty and not fail before run. See #1705 |
| ... | ... |
@@ -178,7 +178,7 @@ func TestDiff(t *testing.T) {
|
| 178 | 178 |
} |
| 179 | 179 |
|
| 180 | 180 |
// Create a new container from the commited image |
| 181 |
- container2, _, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
|
|
| 181 |
+ container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/etc/passwd"}, t)
|
|
| 182 | 182 |
defer runtime.Destroy(container2) |
| 183 | 183 |
|
| 184 | 184 |
if err := container2.Run(); err != nil {
|
| ... | ... |
@@ -197,7 +197,7 @@ func TestDiff(t *testing.T) {
|
| 197 | 197 |
} |
| 198 | 198 |
|
| 199 | 199 |
// Create a new container |
| 200 |
- container3, _, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
|
|
| 200 |
+ container3, _ := mkContainer(runtime, []string{"_", "rm", "/bin/httpd"}, t)
|
|
| 201 | 201 |
defer runtime.Destroy(container3) |
| 202 | 202 |
|
| 203 | 203 |
if err := container3.Run(); err != nil {
|
| ... | ... |
@@ -223,7 +223,7 @@ func TestDiff(t *testing.T) {
|
| 223 | 223 |
func TestCommitAutoRun(t *testing.T) {
|
| 224 | 224 |
runtime := mkRuntime(t) |
| 225 | 225 |
defer nuke(runtime) |
| 226 |
- container1, _, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
|
|
| 226 |
+ container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
|
|
| 227 | 227 |
defer runtime.Destroy(container1) |
| 228 | 228 |
|
| 229 | 229 |
if container1.State.Running {
|
| ... | ... |
@@ -246,7 +246,7 @@ func TestCommitAutoRun(t *testing.T) {
|
| 246 | 246 |
} |
| 247 | 247 |
|
| 248 | 248 |
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world |
| 249 |
- container2, hostConfig, _ := mkContainer(runtime, []string{img.ID}, t)
|
|
| 249 |
+ container2, _ := mkContainer(runtime, []string{img.ID}, t)
|
|
| 250 | 250 |
defer runtime.Destroy(container2) |
| 251 | 251 |
stdout, err := container2.StdoutPipe() |
| 252 | 252 |
if err != nil {
|
| ... | ... |
@@ -256,7 +256,7 @@ func TestCommitAutoRun(t *testing.T) {
|
| 256 | 256 |
if err != nil {
|
| 257 | 257 |
t.Fatal(err) |
| 258 | 258 |
} |
| 259 |
- if err := container2.Start(hostConfig); err != nil {
|
|
| 259 |
+ if err := container2.Start(); err != nil {
|
|
| 260 | 260 |
t.Fatal(err) |
| 261 | 261 |
} |
| 262 | 262 |
container2.Wait() |
| ... | ... |
@@ -283,7 +283,7 @@ func TestCommitRun(t *testing.T) {
|
| 283 | 283 |
runtime := mkRuntime(t) |
| 284 | 284 |
defer nuke(runtime) |
| 285 | 285 |
|
| 286 |
- container1, hostConfig, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
|
|
| 286 |
+ container1, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "echo hello > /world"}, t)
|
|
| 287 | 287 |
defer runtime.Destroy(container1) |
| 288 | 288 |
|
| 289 | 289 |
if container1.State.Running {
|
| ... | ... |
@@ -306,7 +306,7 @@ func TestCommitRun(t *testing.T) {
|
| 306 | 306 |
} |
| 307 | 307 |
|
| 308 | 308 |
// FIXME: Make a TestCommit that stops here and check docker.root/layers/img.id/world |
| 309 |
- container2, hostConfig, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
|
|
| 309 |
+ container2, _ := mkContainer(runtime, []string{img.ID, "cat", "/world"}, t)
|
|
| 310 | 310 |
defer runtime.Destroy(container2) |
| 311 | 311 |
stdout, err := container2.StdoutPipe() |
| 312 | 312 |
if err != nil {
|
| ... | ... |
@@ -316,7 +316,7 @@ func TestCommitRun(t *testing.T) {
|
| 316 | 316 |
if err != nil {
|
| 317 | 317 |
t.Fatal(err) |
| 318 | 318 |
} |
| 319 |
- if err := container2.Start(hostConfig); err != nil {
|
|
| 319 |
+ if err := container2.Start(); err != nil {
|
|
| 320 | 320 |
t.Fatal(err) |
| 321 | 321 |
} |
| 322 | 322 |
container2.Wait() |
| ... | ... |
@@ -342,7 +342,7 @@ func TestCommitRun(t *testing.T) {
|
| 342 | 342 |
func TestStart(t *testing.T) {
|
| 343 | 343 |
runtime := mkRuntime(t) |
| 344 | 344 |
defer nuke(runtime) |
| 345 |
- container, hostConfig, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
|
|
| 345 |
+ container, _ := mkContainer(runtime, []string{"-m", "33554432", "-c", "1000", "-i", "_", "/bin/cat"}, t)
|
|
| 346 | 346 |
defer runtime.Destroy(container) |
| 347 | 347 |
|
| 348 | 348 |
cStdin, err := container.StdinPipe() |
| ... | ... |
@@ -350,7 +350,7 @@ func TestStart(t *testing.T) {
|
| 350 | 350 |
t.Fatal(err) |
| 351 | 351 |
} |
| 352 | 352 |
|
| 353 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 353 |
+ if err := container.Start(); err != nil {
|
|
| 354 | 354 |
t.Fatal(err) |
| 355 | 355 |
} |
| 356 | 356 |
|
| ... | ... |
@@ -360,7 +360,7 @@ func TestStart(t *testing.T) {
|
| 360 | 360 |
if !container.State.Running {
|
| 361 | 361 |
t.Errorf("Container should be running")
|
| 362 | 362 |
} |
| 363 |
- if err := container.Start(hostConfig); err == nil {
|
|
| 363 |
+ if err := container.Start(); err == nil {
|
|
| 364 | 364 |
t.Fatalf("A running container should be able to be started")
|
| 365 | 365 |
} |
| 366 | 366 |
|
| ... | ... |
@@ -372,7 +372,7 @@ func TestStart(t *testing.T) {
|
| 372 | 372 |
func TestRun(t *testing.T) {
|
| 373 | 373 |
runtime := mkRuntime(t) |
| 374 | 374 |
defer nuke(runtime) |
| 375 |
- container, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
| 375 |
+ container, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
| 376 | 376 |
defer runtime.Destroy(container) |
| 377 | 377 |
|
| 378 | 378 |
if container.State.Running {
|
| ... | ... |
@@ -452,7 +452,7 @@ func TestKillDifferentUser(t *testing.T) {
|
| 452 | 452 |
if container.State.Running {
|
| 453 | 453 |
t.Errorf("Container shouldn't be running")
|
| 454 | 454 |
} |
| 455 |
- if err := container.Start(&HostConfig{}); err != nil {
|
|
| 455 |
+ if err := container.Start(); err != nil {
|
|
| 456 | 456 |
t.Fatal(err) |
| 457 | 457 |
} |
| 458 | 458 |
|
| ... | ... |
@@ -501,7 +501,8 @@ func TestCreateVolume(t *testing.T) {
|
| 501 | 501 |
t.Fatal(err) |
| 502 | 502 |
} |
| 503 | 503 |
defer runtime.Destroy(c) |
| 504 |
- if err := c.Start(hc); err != nil {
|
|
| 504 |
+ c.hostConfig = hc |
|
| 505 |
+ if err := c.Start(); err != nil {
|
|
| 505 | 506 |
t.Fatal(err) |
| 506 | 507 |
} |
| 507 | 508 |
c.WaitTimeout(500 * time.Millisecond) |
| ... | ... |
@@ -525,8 +526,7 @@ func TestKill(t *testing.T) {
|
| 525 | 525 |
if container.State.Running {
|
| 526 | 526 |
t.Errorf("Container shouldn't be running")
|
| 527 | 527 |
} |
| 528 |
- hostConfig := &HostConfig{}
|
|
| 529 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 528 |
+ if err := container.Start(); err != nil {
|
|
| 530 | 529 |
t.Fatal(err) |
| 531 | 530 |
} |
| 532 | 531 |
|
| ... | ... |
@@ -642,8 +642,7 @@ func TestRestartStdin(t *testing.T) {
|
| 642 | 642 |
if err != nil {
|
| 643 | 643 |
t.Fatal(err) |
| 644 | 644 |
} |
| 645 |
- hostConfig := &HostConfig{}
|
|
| 646 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 645 |
+ if err := container.Start(); err != nil {
|
|
| 647 | 646 |
t.Fatal(err) |
| 648 | 647 |
} |
| 649 | 648 |
if _, err := io.WriteString(stdin, "hello world"); err != nil {
|
| ... | ... |
@@ -673,7 +672,7 @@ func TestRestartStdin(t *testing.T) {
|
| 673 | 673 |
if err != nil {
|
| 674 | 674 |
t.Fatal(err) |
| 675 | 675 |
} |
| 676 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 676 |
+ if err := container.Start(); err != nil {
|
|
| 677 | 677 |
t.Fatal(err) |
| 678 | 678 |
} |
| 679 | 679 |
if _, err := io.WriteString(stdin, "hello world #2"); err != nil {
|
| ... | ... |
@@ -850,11 +849,10 @@ func TestMultipleContainers(t *testing.T) {
|
| 850 | 850 |
defer runtime.Destroy(container2) |
| 851 | 851 |
|
| 852 | 852 |
// Start both containers |
| 853 |
- hostConfig := &HostConfig{}
|
|
| 854 |
- if err := container1.Start(hostConfig); err != nil {
|
|
| 853 |
+ if err := container1.Start(); err != nil {
|
|
| 855 | 854 |
t.Fatal(err) |
| 856 | 855 |
} |
| 857 |
- if err := container2.Start(hostConfig); err != nil {
|
|
| 856 |
+ if err := container2.Start(); err != nil {
|
|
| 858 | 857 |
t.Fatal(err) |
| 859 | 858 |
} |
| 860 | 859 |
|
| ... | ... |
@@ -904,8 +902,7 @@ func TestStdin(t *testing.T) {
|
| 904 | 904 |
if err != nil {
|
| 905 | 905 |
t.Fatal(err) |
| 906 | 906 |
} |
| 907 |
- hostConfig := &HostConfig{}
|
|
| 908 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 907 |
+ if err := container.Start(); err != nil {
|
|
| 909 | 908 |
t.Fatal(err) |
| 910 | 909 |
} |
| 911 | 910 |
defer stdin.Close() |
| ... | ... |
@@ -950,8 +947,7 @@ func TestTty(t *testing.T) {
|
| 950 | 950 |
if err != nil {
|
| 951 | 951 |
t.Fatal(err) |
| 952 | 952 |
} |
| 953 |
- hostConfig := &HostConfig{}
|
|
| 954 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 953 |
+ if err := container.Start(); err != nil {
|
|
| 955 | 954 |
t.Fatal(err) |
| 956 | 955 |
} |
| 957 | 956 |
defer stdin.Close() |
| ... | ... |
@@ -992,8 +988,7 @@ func TestEnv(t *testing.T) {
|
| 992 | 992 |
t.Fatal(err) |
| 993 | 993 |
} |
| 994 | 994 |
defer stdout.Close() |
| 995 |
- hostConfig := &HostConfig{}
|
|
| 996 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 995 |
+ if err := container.Start(); err != nil {
|
|
| 997 | 996 |
t.Fatal(err) |
| 998 | 997 |
} |
| 999 | 998 |
container.Wait() |
| ... | ... |
@@ -1121,7 +1116,7 @@ func TestLXCConfig(t *testing.T) {
|
| 1121 | 1121 |
t.Fatal(err) |
| 1122 | 1122 |
} |
| 1123 | 1123 |
defer runtime.Destroy(container) |
| 1124 |
- container.generateLXCConfig(nil) |
|
| 1124 |
+ container.generateLXCConfig() |
|
| 1125 | 1125 |
grepFile(t, container.lxcConfigPath(), "lxc.utsname = foobar") |
| 1126 | 1126 |
grepFile(t, container.lxcConfigPath(), |
| 1127 | 1127 |
fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem))
|
| ... | ... |
@@ -1144,7 +1139,7 @@ func TestCustomLxcConfig(t *testing.T) {
|
| 1144 | 1144 |
t.Fatal(err) |
| 1145 | 1145 |
} |
| 1146 | 1146 |
defer runtime.Destroy(container) |
| 1147 |
- hostConfig := &HostConfig{LxcConf: []KeyValuePair{
|
|
| 1147 |
+ container.hostConfig = &HostConfig{LxcConf: []KeyValuePair{
|
|
| 1148 | 1148 |
{
|
| 1149 | 1149 |
Key: "lxc.utsname", |
| 1150 | 1150 |
Value: "docker", |
| ... | ... |
@@ -1155,7 +1150,7 @@ func TestCustomLxcConfig(t *testing.T) {
|
| 1155 | 1155 |
}, |
| 1156 | 1156 |
}} |
| 1157 | 1157 |
|
| 1158 |
- container.generateLXCConfig(hostConfig) |
|
| 1158 |
+ container.generateLXCConfig() |
|
| 1159 | 1159 |
grepFile(t, container.lxcConfigPath(), "lxc.utsname = docker") |
| 1160 | 1160 |
grepFile(t, container.lxcConfigPath(), "lxc.cgroup.cpuset.cpus = 0,1") |
| 1161 | 1161 |
} |
| ... | ... |
@@ -1208,8 +1203,7 @@ func BenchmarkRunParallel(b *testing.B) {
|
| 1208 | 1208 |
return |
| 1209 | 1209 |
} |
| 1210 | 1210 |
defer runtime.Destroy(container) |
| 1211 |
- hostConfig := &HostConfig{}
|
|
| 1212 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 1211 |
+ if err := container.Start(); err != nil {
|
|
| 1213 | 1212 |
complete <- err |
| 1214 | 1213 |
return |
| 1215 | 1214 |
} |
| ... | ... |
@@ -1253,7 +1247,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
|
| 1253 | 1253 |
defer nuke(r) |
| 1254 | 1254 |
|
| 1255 | 1255 |
// Add directory not owned by root |
| 1256 |
- container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t)
|
|
| 1256 |
+ container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t)
|
|
| 1257 | 1257 |
defer r.Destroy(container1) |
| 1258 | 1258 |
|
| 1259 | 1259 |
if container1.State.Running {
|
| ... | ... |
@@ -1290,7 +1284,7 @@ func TestCopyVolumeContent(t *testing.T) {
|
| 1290 | 1290 |
defer nuke(r) |
| 1291 | 1291 |
|
| 1292 | 1292 |
// Put some content in a directory of a container and commit it |
| 1293 |
- container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
|
|
| 1293 |
+ container1, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello/local && echo hello > /hello/local/world"}, t)
|
|
| 1294 | 1294 |
defer r.Destroy(container1) |
| 1295 | 1295 |
|
| 1296 | 1296 |
if container1.State.Running {
|
| ... | ... |
@@ -1527,9 +1521,9 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
|
| 1527 | 1527 |
if err != nil {
|
| 1528 | 1528 |
t.Fatal(err) |
| 1529 | 1529 |
} |
| 1530 |
- |
|
| 1531 | 1530 |
defer runtime.Destroy(c) |
| 1532 |
- if err := c.Start(hc); err != nil {
|
|
| 1531 |
+ c.hostConfig = hc |
|
| 1532 |
+ if err := c.Start(); err != nil {
|
|
| 1533 | 1533 |
t.Fatal(err) |
| 1534 | 1534 |
} |
| 1535 | 1535 |
c.WaitTimeout(500 * time.Millisecond) |
| ... | ... |
@@ -56,7 +56,7 @@ To create the Docker binary, run this command: |
| 56 | 56 |
|
| 57 | 57 |
.. code-block:: bash |
| 58 | 58 |
|
| 59 |
- sudo docker run -lxc-conf=lxc.aa_profile=unconfined -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh binary |
|
| 59 |
+ sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh binary |
|
| 60 | 60 |
|
| 61 | 61 |
This will create the Docker binary in ``./bundles/<version>-dev/binary/`` |
| 62 | 62 |
|
| ... | ... |
@@ -64,18 +64,11 @@ This will create the Docker binary in ``./bundles/<version>-dev/binary/`` |
| 64 | 64 |
Step 5: Run the Tests |
| 65 | 65 |
--------------------- |
| 66 | 66 |
|
| 67 |
-To run the Docker test cases you first need to disable `AppArmor <https://wiki.ubuntu.com/AppArmor>`_ using the following commands |
|
| 68 |
- |
|
| 69 |
-.. code-block:: bash |
|
| 70 |
- |
|
| 71 |
- sudo /etc/init.d/apparmor stop |
|
| 72 |
- sudo /etc/init.d/apparmor teardown |
|
| 73 |
- |
|
| 74 | 67 |
To execute the test cases, run this command: |
| 75 | 68 |
|
| 76 | 69 |
.. code-block:: bash |
| 77 | 70 |
|
| 78 |
- sudo docker run -lxc-conf=lxc.aa_profile=unconfined -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test |
|
| 71 |
+ sudo docker run -privileged -v `pwd`:/go/src/github.com/dotcloud/docker docker hack/make.sh test |
|
| 79 | 72 |
|
| 80 | 73 |
|
| 81 | 74 |
Note: if you're running the tests in vagrant, you need to specify a dns entry in the command: `-dns 8.8.8.8` |
| ... | ... |
@@ -21,6 +21,14 @@ mountpoint -q $CGROUP || |
| 21 | 21 |
exit 1 |
| 22 | 22 |
} |
| 23 | 23 |
|
| 24 |
+if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security |
|
| 25 |
+then |
|
| 26 |
+ mount -t securityfs none /sys/kernel/security || {
|
|
| 27 |
+ echo "Could not mount /sys/kernel/security." |
|
| 28 |
+ echo "AppArmor detection and -privileged mode might break." |
|
| 29 |
+ } |
|
| 30 |
+fi |
|
| 31 |
+ |
|
| 24 | 32 |
# Mount the cgroup hierarchies exactly as they are in the parent system. |
| 25 | 33 |
for SUBSYS in $(cut -d: -f2 /proc/1/cgroup) |
| 26 | 34 |
do |
| ... | ... |
@@ -135,11 +135,6 @@ sudo('curl -s https://phantomjs.googlecode.com/files/'
|
| 135 | 135 |
'phantomjs-1.9.1-linux-x86_64.tar.bz2 | tar jx -C /usr/bin' |
| 136 | 136 |
' --strip-components=2 phantomjs-1.9.1-linux-x86_64/bin/phantomjs') |
| 137 | 137 |
|
| 138 |
-#### FIXME. Temporarily install docker with proper apparmor handling |
|
| 139 |
-sudo('stop docker')
|
|
| 140 |
-sudo('wget -q -O /usr/bin/docker http://test.docker.io/test/docker')
|
|
| 141 |
-sudo('start docker')
|
|
| 142 |
- |
|
| 143 | 138 |
# Preventively reboot docker-ci daily |
| 144 | 139 |
sudo('ln -s /sbin/reboot /etc/cron.daily')
|
| 145 | 140 |
|
| ... | ... |
@@ -11,7 +11,7 @@ |
| 11 | 11 |
# "GPG_PASSPHRASE='Test_docker_GPG_passphrase_signature' |
| 12 | 12 |
# "INDEX_AUTH='Encripted_index_authentication' } |
| 13 | 13 |
# TO_BUILD: docker build -t dockerbuilder . |
| 14 |
-# TO_RELEASE: docker run -i -t -privileged -lxc-conf="lxc.aa_profile = unconfined" -e AWS_S3_BUCKET="test.docker.io" dockerbuilder |
|
| 14 |
+# TO_RELEASE: docker run -i -t -privileged -e AWS_S3_BUCKET="test.docker.io" dockerbuilder |
|
| 15 | 15 |
|
| 16 | 16 |
from docker |
| 17 | 17 |
maintainer Daniel Mizyrycki <daniel@dotcloud.com> |
| ... | ... |
@@ -23,9 +23,6 @@ run apt-get update; apt-get install -y -q wget python2.7 |
| 23 | 23 |
# Add production docker binary |
| 24 | 24 |
run wget -q -O /usr/bin/docker http://get.docker.io/builds/Linux/x86_64/docker-latest; chmod +x /usr/bin/docker |
| 25 | 25 |
|
| 26 |
-#### FIXME. Temporarily install docker with proper apparmor handling |
|
| 27 |
-run wget -q -O /usr/bin/docker http://test.docker.io/test/docker; chmod +x /usr/bin/docker |
|
| 28 |
- |
|
| 29 | 26 |
# Add proto docker builder |
| 30 | 27 |
add ./dockerbuild /usr/bin/dockerbuild |
| 31 | 28 |
run chmod +x /usr/bin/dockerbuild |
| ... | ... |
@@ -13,9 +13,6 @@ cd / |
| 13 | 13 |
git clone -q http://github.com/dotcloud/docker /go/src/github.com/dotcloud/docker |
| 14 | 14 |
cd /go/src/github.com/dotcloud/docker |
| 15 | 15 |
|
| 16 |
-echo FIXME. Temporarily skip TestPrivilegedCanMount until DinD works reliable on AWS |
|
| 17 |
-git pull -q https://github.com/mzdaniel/docker.git dind-aws || exit 1 |
|
| 18 |
- |
|
| 19 | 16 |
# Launch docker daemon using dind inside the container |
| 20 | 17 |
./hack/dind /usr/bin/docker -d & |
| 21 | 18 |
sleep 5 |
| ... | ... |
@@ -27,7 +24,7 @@ date > timestamp |
| 27 | 27 |
docker build -t docker . |
| 28 | 28 |
|
| 29 | 29 |
# Run Docker unittests binary and Ubuntu package |
| 30 |
-docker run -privileged -lxc-conf=lxc.aa_profile=unconfined docker hack/make.sh |
|
| 30 |
+docker run -privileged docker hack/make.sh |
|
| 31 | 31 |
exit_status=$? |
| 32 | 32 |
|
| 33 | 33 |
# Display load if test fails |
| ... | ... |
@@ -11,7 +11,6 @@ lxc.utsname = {{.Config.Hostname}}
|
| 11 | 11 |
{{else}}
|
| 12 | 12 |
lxc.utsname = {{.Id}}
|
| 13 | 13 |
{{end}}
|
| 14 |
-#lxc.aa_profile = unconfined |
|
| 15 | 14 |
|
| 16 | 15 |
{{if .Config.NetworkDisabled}}
|
| 17 | 16 |
# network is disabled (-n=false) |
| ... | ... |
@@ -46,7 +45,7 @@ lxc.console = none |
| 46 | 46 |
# no controlling tty at all |
| 47 | 47 |
lxc.tty = 1 |
| 48 | 48 |
|
| 49 |
-{{if .Config.Privileged}}
|
|
| 49 |
+{{if (getHostConfig .).Privileged}}
|
|
| 50 | 50 |
lxc.cgroup.devices.allow = a |
| 51 | 51 |
{{else}}
|
| 52 | 52 |
# no implicit access to devices |
| ... | ... |
@@ -66,7 +65,7 @@ lxc.cgroup.devices.allow = c 4:1 rwm |
| 66 | 66 |
lxc.cgroup.devices.allow = c 1:9 rwm |
| 67 | 67 |
lxc.cgroup.devices.allow = c 1:8 rwm |
| 68 | 68 |
|
| 69 |
-# /dev/pts/* - pts namespaces are "coming soon" |
|
| 69 |
+# /dev/pts/ - pts namespaces are "coming soon" |
|
| 70 | 70 |
lxc.cgroup.devices.allow = c 136:* rwm |
| 71 | 71 |
lxc.cgroup.devices.allow = c 5:2 rwm |
| 72 | 72 |
|
| ... | ... |
@@ -109,8 +108,13 @@ lxc.mount.entry = {{$realPath}} {{$ROOTFS}}/{{$virtualPath}} none bind,{{ if ind
|
| 109 | 109 |
{{end}}
|
| 110 | 110 |
{{end}}
|
| 111 | 111 |
|
| 112 |
-{{if .Config.Privileged}}
|
|
| 112 |
+{{if (getHostConfig .).Privileged}}
|
|
| 113 | 113 |
# retain all capabilities; no lxc.cap.drop line |
| 114 |
+{{if (getCapabilities .).AppArmor}}
|
|
| 115 |
+lxc.aa_profile = unconfined |
|
| 116 |
+{{else}}
|
|
| 117 |
+#lxc.aa_profile = unconfined |
|
| 118 |
+{{end}}
|
|
| 114 | 119 |
{{else}}
|
| 115 | 120 |
# drop linux capabilities (apply mainly to the user root in the container) |
| 116 | 121 |
# (Note: 'lxc.cap.keep' is coming soon and should replace this under the |
| ... | ... |
@@ -130,18 +134,15 @@ lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}}
|
| 130 | 130 |
{{if .Config.CpuShares}}
|
| 131 | 131 |
lxc.cgroup.cpu.shares = {{.Config.CpuShares}}
|
| 132 | 132 |
{{end}}
|
| 133 |
-` |
|
| 134 | 133 |
|
| 135 |
-const LxcHostConfigTemplate = ` |
|
| 136 |
-{{if .LxcConf}}
|
|
| 137 |
-{{range $pair := .LxcConf}}
|
|
| 134 |
+{{if (getHostConfig .).LxcConf}}
|
|
| 135 |
+{{range $pair := (getHostConfig .).LxcConf}}
|
|
| 138 | 136 |
{{$pair.Key}} = {{$pair.Value}}
|
| 139 | 137 |
{{end}}
|
| 140 | 138 |
{{end}}
|
| 141 | 139 |
` |
| 142 | 140 |
|
| 143 | 141 |
var LxcTemplateCompiled *template.Template |
| 144 |
-var LxcHostConfigTemplateCompiled *template.Template |
|
| 145 | 142 |
|
| 146 | 143 |
func getMemorySwap(config *Config) int64 {
|
| 147 | 144 |
// By default, MemorySwap is set to twice the size of RAM. |
| ... | ... |
@@ -152,17 +153,23 @@ func getMemorySwap(config *Config) int64 {
|
| 152 | 152 |
return config.Memory * 2 |
| 153 | 153 |
} |
| 154 | 154 |
|
| 155 |
+func getHostConfig(container *Container) *HostConfig {
|
|
| 156 |
+ return container.hostConfig |
|
| 157 |
+} |
|
| 158 |
+ |
|
| 159 |
+func getCapabilities(container *Container) *Capabilities {
|
|
| 160 |
+ return container.runtime.capabilities |
|
| 161 |
+} |
|
| 162 |
+ |
|
| 155 | 163 |
func init() {
|
| 156 | 164 |
var err error |
| 157 | 165 |
funcMap := template.FuncMap{
|
| 158 |
- "getMemorySwap": getMemorySwap, |
|
| 166 |
+ "getMemorySwap": getMemorySwap, |
|
| 167 |
+ "getHostConfig": getHostConfig, |
|
| 168 |
+ "getCapabilities": getCapabilities, |
|
| 159 | 169 |
} |
| 160 | 170 |
LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate)
|
| 161 | 171 |
if err != nil {
|
| 162 | 172 |
panic(err) |
| 163 | 173 |
} |
| 164 |
- LxcHostConfigTemplateCompiled, err = template.New("lxc-hostconfig").Funcs(funcMap).Parse(LxcHostConfigTemplate)
|
|
| 165 |
- if err != nil {
|
|
| 166 |
- panic(err) |
|
| 167 |
- } |
|
| 168 | 174 |
} |
| ... | ... |
@@ -24,6 +24,7 @@ type Capabilities struct {
|
| 24 | 24 |
MemoryLimit bool |
| 25 | 25 |
SwapLimit bool |
| 26 | 26 |
IPv4ForwardingDisabled bool |
| 27 |
+ AppArmor bool |
|
| 27 | 28 |
} |
| 28 | 29 |
|
| 29 | 30 |
type Runtime struct {
|
| ... | ... |
@@ -149,8 +150,7 @@ func (runtime *Runtime) Register(container *Container) error {
|
| 149 | 149 |
utils.Debugf("Restarting")
|
| 150 | 150 |
container.State.Ghost = false |
| 151 | 151 |
container.State.setStopped(0) |
| 152 |
- hostConfig, _ := container.ReadHostConfig() |
|
| 153 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 152 |
+ if err := container.Start(); err != nil {
|
|
| 154 | 153 |
return err |
| 155 | 154 |
} |
| 156 | 155 |
nomonitor = true |
| ... | ... |
@@ -169,9 +169,7 @@ func (runtime *Runtime) Register(container *Container) error {
|
| 169 | 169 |
if !container.State.Running {
|
| 170 | 170 |
close(container.waitLock) |
| 171 | 171 |
} else if !nomonitor {
|
| 172 |
- hostConfig, _ := container.ReadHostConfig() |
|
| 173 |
- container.allocateNetwork(hostConfig) |
|
| 174 |
- go container.monitor(hostConfig) |
|
| 172 |
+ go container.monitor() |
|
| 175 | 173 |
} |
| 176 | 174 |
return nil |
| 177 | 175 |
} |
| ... | ... |
@@ -310,6 +308,15 @@ func (runtime *Runtime) UpdateCapabilities(quiet bool) {
|
| 310 | 310 |
if runtime.capabilities.IPv4ForwardingDisabled && !quiet {
|
| 311 | 311 |
log.Printf("WARNING: IPv4 forwarding is disabled.")
|
| 312 | 312 |
} |
| 313 |
+ |
|
| 314 |
+ // Check if AppArmor seems to be enabled on this system. |
|
| 315 |
+ if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) {
|
|
| 316 |
+ utils.Debugf("/sys/kernel/security/apparmor not found; assuming AppArmor is not enabled.")
|
|
| 317 |
+ runtime.capabilities.AppArmor = false |
|
| 318 |
+ } else {
|
|
| 319 |
+ utils.Debugf("/sys/kernel/security/apparmor found; assuming AppArmor is enabled.")
|
|
| 320 |
+ runtime.capabilities.AppArmor = true |
|
| 321 |
+ } |
|
| 313 | 322 |
} |
| 314 | 323 |
|
| 315 | 324 |
// Create creates a new container from the given configuration with a given name. |
| ... | ... |
@@ -400,6 +407,7 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin |
| 400 | 400 |
Path: entrypoint, |
| 401 | 401 |
Args: args, //FIXME: de-duplicate from config |
| 402 | 402 |
Config: config, |
| 403 |
+ hostConfig: &HostConfig{},
|
|
| 403 | 404 |
Image: img.ID, // Always use the resolved image id |
| 404 | 405 |
NetworkSettings: &NetworkSettings{},
|
| 405 | 406 |
// FIXME: do we need to store this in the container? |
| ... | ... |
@@ -574,6 +582,9 @@ func NewRuntimeFromDirectory(config *DaemonConfig) (*Runtime, error) {
|
| 574 | 574 |
return nil, err |
| 575 | 575 |
} |
| 576 | 576 |
|
| 577 |
+ if err := copyLxcStart(config.Root); err != nil {
|
|
| 578 |
+ return nil, err |
|
| 579 |
+ } |
|
| 577 | 580 |
g, err := NewGraph(path.Join(config.Root, "graph")) |
| 578 | 581 |
if err != nil {
|
| 579 | 582 |
return nil, err |
| ... | ... |
@@ -636,6 +647,27 @@ func (runtime *Runtime) Close() error {
|
| 636 | 636 |
return runtime.containerGraph.Close() |
| 637 | 637 |
} |
| 638 | 638 |
|
| 639 |
+func copyLxcStart(root string) error {
|
|
| 640 |
+ sourcePath, err := exec.LookPath("lxc-start")
|
|
| 641 |
+ if err != nil {
|
|
| 642 |
+ return err |
|
| 643 |
+ } |
|
| 644 |
+ targetPath := path.Join(root, "lxc-start-unconfined") |
|
| 645 |
+ sourceFile, err := os.Open(sourcePath) |
|
| 646 |
+ if err != nil {
|
|
| 647 |
+ return err |
|
| 648 |
+ } |
|
| 649 |
+ defer sourceFile.Close() |
|
| 650 |
+ targetFile, err := os.Create(targetPath) |
|
| 651 |
+ if err != nil {
|
|
| 652 |
+ return err |
|
| 653 |
+ } |
|
| 654 |
+ defer targetFile.Close() |
|
| 655 |
+ os.Chmod(targetPath, 0755) |
|
| 656 |
+ _, err = io.Copy(targetFile, sourceFile) |
|
| 657 |
+ return err |
|
| 658 |
+} |
|
| 659 |
+ |
|
| 639 | 660 |
// History is a convenience type for storing a list of containers, |
| 640 | 661 |
// ordered by creation date. |
| 641 | 662 |
type History []*Container |
| ... | ... |
@@ -325,13 +325,13 @@ func TestGet(t *testing.T) {
|
| 325 | 325 |
runtime := mkRuntime(t) |
| 326 | 326 |
defer nuke(runtime) |
| 327 | 327 |
|
| 328 |
- container1, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
| 328 |
+ container1, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
| 329 | 329 |
defer runtime.Destroy(container1) |
| 330 | 330 |
|
| 331 |
- container2, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
| 331 |
+ container2, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
| 332 | 332 |
defer runtime.Destroy(container2) |
| 333 | 333 |
|
| 334 |
- container3, _, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
| 334 |
+ container3, _ := mkContainer(runtime, []string{"_", "ls", "-al"}, t)
|
|
| 335 | 335 |
defer runtime.Destroy(container3) |
| 336 | 336 |
|
| 337 | 337 |
if runtime.Get(container1.ID) != container1 {
|
| ... | ... |
@@ -390,13 +390,13 @@ func startEchoServerContainer(t *testing.T, proto string) (*Runtime, *Container, |
| 390 | 390 |
t.Logf("Port %v already in use, trying another one", strPort)
|
| 391 | 391 |
} |
| 392 | 392 |
|
| 393 |
- hostConfig := &HostConfig{
|
|
| 393 |
+ container.hostConfig = &HostConfig{
|
|
| 394 | 394 |
PortBindings: make(map[Port][]PortBinding), |
| 395 | 395 |
} |
| 396 |
- hostConfig.PortBindings[p] = []PortBinding{
|
|
| 396 |
+ container.hostConfig.PortBindings[p] = []PortBinding{
|
|
| 397 | 397 |
{},
|
| 398 | 398 |
} |
| 399 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 399 |
+ if err := container.Start(); err != nil {
|
|
| 400 | 400 |
nuke(runtime) |
| 401 | 401 |
t.Fatal(err) |
| 402 | 402 |
} |
| ... | ... |
@@ -503,16 +503,15 @@ func TestRestore(t *testing.T) {
|
| 503 | 503 |
runtime1 := mkRuntime(t) |
| 504 | 504 |
defer nuke(runtime1) |
| 505 | 505 |
// Create a container with one instance of docker |
| 506 |
- container1, _, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
|
|
| 506 |
+ container1, _ := mkContainer(runtime1, []string{"_", "ls", "-al"}, t)
|
|
| 507 | 507 |
defer runtime1.Destroy(container1) |
| 508 | 508 |
|
| 509 | 509 |
// Create a second container meant to be killed |
| 510 |
- container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
|
| 510 |
+ container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
|
| 511 | 511 |
defer runtime1.Destroy(container2) |
| 512 | 512 |
|
| 513 | 513 |
// Start the container non blocking |
| 514 |
- hostConfig := &HostConfig{}
|
|
| 515 |
- if err := container2.Start(hostConfig); err != nil {
|
|
| 514 |
+ if err := container2.Start(); err != nil {
|
|
| 516 | 515 |
t.Fatal(err) |
| 517 | 516 |
} |
| 518 | 517 |
|
| ... | ... |
@@ -575,25 +574,23 @@ func TestReloadContainerLinks(t *testing.T) {
|
| 575 | 575 |
runtime1 := mkRuntime(t) |
| 576 | 576 |
defer nuke(runtime1) |
| 577 | 577 |
// Create a container with one instance of docker |
| 578 |
- container1, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
|
|
| 578 |
+ container1, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/sh"}, t)
|
|
| 579 | 579 |
defer runtime1.Destroy(container1) |
| 580 | 580 |
|
| 581 | 581 |
// Create a second container meant to be killed |
| 582 |
- container2, _, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
|
| 582 |
+ container2, _ := mkContainer(runtime1, []string{"-i", "_", "/bin/cat"}, t)
|
|
| 583 | 583 |
defer runtime1.Destroy(container2) |
| 584 | 584 |
|
| 585 | 585 |
// Start the container non blocking |
| 586 |
- hostConfig := &HostConfig{}
|
|
| 587 |
- if err := container2.Start(hostConfig); err != nil {
|
|
| 586 |
+ if err := container2.Start(); err != nil {
|
|
| 588 | 587 |
t.Fatal(err) |
| 589 | 588 |
} |
| 590 |
- h1 := &HostConfig{}
|
|
| 591 | 589 |
// Add a link to container 2 |
| 592 |
- h1.Links = []string{"/" + container2.ID + ":first"}
|
|
| 590 |
+ container1.hostConfig.Links = []string{"/" + container2.ID + ":first"}
|
|
| 593 | 591 |
if err := runtime1.RegisterLink(container1, container2, "first"); err != nil {
|
| 594 | 592 |
t.Fatal(err) |
| 595 | 593 |
} |
| 596 |
- if err := container1.Start(h1); err != nil {
|
|
| 594 |
+ if err := container1.Start(); err != nil {
|
|
| 597 | 595 |
t.Fatal(err) |
| 598 | 596 |
} |
| 599 | 597 |
|
| ... | ... |
@@ -1304,7 +1304,7 @@ func (srv *Server) RegisterLinks(name string, hostConfig *HostConfig) error {
|
| 1304 | 1304 |
// After we load all the links into the runtime |
| 1305 | 1305 |
// set them to nil on the hostconfig |
| 1306 | 1306 |
hostConfig.Links = nil |
| 1307 |
- if err := container.SaveHostConfig(hostConfig); err != nil {
|
|
| 1307 |
+ if err := container.writeHostConfig(); err != nil {
|
|
| 1308 | 1308 |
return err |
| 1309 | 1309 |
} |
| 1310 | 1310 |
} |
| ... | ... |
@@ -1317,8 +1317,11 @@ func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
|
| 1317 | 1317 |
if container == nil {
|
| 1318 | 1318 |
return fmt.Errorf("No such container: %s", name)
|
| 1319 | 1319 |
} |
| 1320 |
- |
|
| 1321 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 1320 |
+ if hostConfig != nil {
|
|
| 1321 |
+ container.hostConfig = hostConfig |
|
| 1322 |
+ container.ToDisk() |
|
| 1323 |
+ } |
|
| 1324 |
+ if err := container.Start(); err != nil {
|
|
| 1322 | 1325 |
return fmt.Errorf("Cannot start container %s: %s", name, err)
|
| 1323 | 1326 |
} |
| 1324 | 1327 |
srv.LogEvent("start", container.ShortID(), runtime.repositories.ImageName(container.Image))
|
| ... | ... |
@@ -246,14 +246,14 @@ func TestContainerTop(t *testing.T) {
|
| 246 | 246 |
|
| 247 | 247 |
srv := &Server{runtime: runtime}
|
| 248 | 248 |
|
| 249 |
- c, hostConfig, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
|
|
| 250 |
- c, hostConfig, err := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
|
|
| 249 |
+ c, _ := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
|
|
| 250 |
+ c, err := mkContainer(runtime, []string{"_", "/bin/sh", "-c", "sleep 2"}, t)
|
|
| 251 | 251 |
if err != nil {
|
| 252 | 252 |
t.Fatal(err) |
| 253 | 253 |
} |
| 254 | 254 |
|
| 255 | 255 |
defer runtime.Destroy(c) |
| 256 |
- if err := c.Start(hostConfig); err != nil {
|
|
| 256 |
+ if err := c.Start(); err != nil {
|
|
| 257 | 257 |
t.Fatal(err) |
| 258 | 258 |
} |
| 259 | 259 |
|
| ... | ... |
@@ -116,7 +116,7 @@ func readFile(src string, t *testing.T) (content string) {
|
| 116 | 116 |
// dynamically replaced by the current test image. |
| 117 | 117 |
// The caller is responsible for destroying the container. |
| 118 | 118 |
// Call t.Fatal() at the first error. |
| 119 |
-func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, *HostConfig, error) {
|
|
| 119 |
+func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, error) {
|
|
| 120 | 120 |
config, hostConfig, _, err := ParseRun(args, nil) |
| 121 | 121 |
defer func() {
|
| 122 | 122 |
if err != nil && t != nil {
|
| ... | ... |
@@ -124,16 +124,17 @@ func mkContainer(r *Runtime, args []string, t *testing.T) (*Container, *HostConf |
| 124 | 124 |
} |
| 125 | 125 |
}() |
| 126 | 126 |
if err != nil {
|
| 127 |
- return nil, nil, err |
|
| 127 |
+ return nil, err |
|
| 128 | 128 |
} |
| 129 | 129 |
if config.Image == "_" {
|
| 130 | 130 |
config.Image = GetTestImage(r).ID |
| 131 | 131 |
} |
| 132 | 132 |
c, _, err := r.Create(config, "") |
| 133 | 133 |
if err != nil {
|
| 134 |
- return nil, nil, err |
|
| 134 |
+ return nil, err |
|
| 135 | 135 |
} |
| 136 |
- return c, hostConfig, nil |
|
| 136 |
+ c.hostConfig = hostConfig |
|
| 137 |
+ return c, nil |
|
| 137 | 138 |
} |
| 138 | 139 |
|
| 139 | 140 |
// Create a test container, start it, wait for it to complete, destroy it, |
| ... | ... |
@@ -146,7 +147,7 @@ func runContainer(r *Runtime, args []string, t *testing.T) (output string, err e |
| 146 | 146 |
t.Fatal(err) |
| 147 | 147 |
} |
| 148 | 148 |
}() |
| 149 |
- container, hostConfig, err := mkContainer(r, args, t) |
|
| 149 |
+ container, err := mkContainer(r, args, t) |
|
| 150 | 150 |
if err != nil {
|
| 151 | 151 |
return "", err |
| 152 | 152 |
} |
| ... | ... |
@@ -156,7 +157,7 @@ func runContainer(r *Runtime, args []string, t *testing.T) (output string, err e |
| 156 | 156 |
return "", err |
| 157 | 157 |
} |
| 158 | 158 |
defer stdout.Close() |
| 159 |
- if err := container.Start(hostConfig); err != nil {
|
|
| 159 |
+ if err := container.Start(); err != nil {
|
|
| 160 | 160 |
return "", err |
| 161 | 161 |
} |
| 162 | 162 |
container.Wait() |