Signed-off-by: Vincent Demeester <vincent@sbr.pm>
| ... | ... |
@@ -11,6 +11,7 @@ import ( |
| 11 | 11 |
|
| 12 | 12 |
"github.com/docker/docker/api/types/swarm" |
| 13 | 13 |
"github.com/docker/docker/cliconfig" |
| 14 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 14 | 15 |
"github.com/docker/docker/pkg/reexec" |
| 15 | 16 |
"github.com/go-check/check" |
| 16 | 17 |
) |
| ... | ... |
@@ -39,7 +40,7 @@ type DockerSuite struct {
|
| 39 | 39 |
|
| 40 | 40 |
func (s *DockerSuite) OnTimeout(c *check.C) {
|
| 41 | 41 |
if daemonPid > 0 && isLocalDaemon {
|
| 42 |
- signalDaemonDump(daemonPid) |
|
| 42 |
+ daemon.SignalDaemonDump(daemonPid) |
|
| 43 | 43 |
} |
| 44 | 44 |
} |
| 45 | 45 |
|
| ... | ... |
@@ -63,7 +64,7 @@ func init() {
|
| 63 | 63 |
type DockerRegistrySuite struct {
|
| 64 | 64 |
ds *DockerSuite |
| 65 | 65 |
reg *testRegistryV2 |
| 66 |
- d *Daemon |
|
| 66 |
+ d *daemon.Daemon |
|
| 67 | 67 |
} |
| 68 | 68 |
|
| 69 | 69 |
func (s *DockerRegistrySuite) OnTimeout(c *check.C) {
|
| ... | ... |
@@ -73,7 +74,9 @@ func (s *DockerRegistrySuite) OnTimeout(c *check.C) {
|
| 73 | 73 |
func (s *DockerRegistrySuite) SetUpTest(c *check.C) {
|
| 74 | 74 |
testRequires(c, DaemonIsLinux, RegistryHosting) |
| 75 | 75 |
s.reg = setupRegistry(c, false, "", "") |
| 76 |
- s.d = NewDaemon(c) |
|
| 76 |
+ s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 77 |
+ Experimental: experimentalDaemon, |
|
| 78 |
+ }) |
|
| 77 | 79 |
} |
| 78 | 80 |
|
| 79 | 81 |
func (s *DockerRegistrySuite) TearDownTest(c *check.C) {
|
| ... | ... |
@@ -95,7 +98,7 @@ func init() {
|
| 95 | 95 |
type DockerSchema1RegistrySuite struct {
|
| 96 | 96 |
ds *DockerSuite |
| 97 | 97 |
reg *testRegistryV2 |
| 98 |
- d *Daemon |
|
| 98 |
+ d *daemon.Daemon |
|
| 99 | 99 |
} |
| 100 | 100 |
|
| 101 | 101 |
func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) {
|
| ... | ... |
@@ -105,7 +108,9 @@ func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) {
|
| 105 | 105 |
func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) {
|
| 106 | 106 |
testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64) |
| 107 | 107 |
s.reg = setupRegistry(c, true, "", "") |
| 108 |
- s.d = NewDaemon(c) |
|
| 108 |
+ s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 109 |
+ Experimental: experimentalDaemon, |
|
| 110 |
+ }) |
|
| 109 | 111 |
} |
| 110 | 112 |
|
| 111 | 113 |
func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) {
|
| ... | ... |
@@ -127,7 +132,7 @@ func init() {
|
| 127 | 127 |
type DockerRegistryAuthHtpasswdSuite struct {
|
| 128 | 128 |
ds *DockerSuite |
| 129 | 129 |
reg *testRegistryV2 |
| 130 |
- d *Daemon |
|
| 130 |
+ d *daemon.Daemon |
|
| 131 | 131 |
} |
| 132 | 132 |
|
| 133 | 133 |
func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) {
|
| ... | ... |
@@ -137,7 +142,9 @@ func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) {
|
| 137 | 137 |
func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) {
|
| 138 | 138 |
testRequires(c, DaemonIsLinux, RegistryHosting) |
| 139 | 139 |
s.reg = setupRegistry(c, false, "htpasswd", "") |
| 140 |
- s.d = NewDaemon(c) |
|
| 140 |
+ s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 141 |
+ Experimental: experimentalDaemon, |
|
| 142 |
+ }) |
|
| 141 | 143 |
} |
| 142 | 144 |
|
| 143 | 145 |
func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) {
|
| ... | ... |
@@ -161,7 +168,7 @@ func init() {
|
| 161 | 161 |
type DockerRegistryAuthTokenSuite struct {
|
| 162 | 162 |
ds *DockerSuite |
| 163 | 163 |
reg *testRegistryV2 |
| 164 |
- d *Daemon |
|
| 164 |
+ d *daemon.Daemon |
|
| 165 | 165 |
} |
| 166 | 166 |
|
| 167 | 167 |
func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) {
|
| ... | ... |
@@ -170,7 +177,9 @@ func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) {
|
| 170 | 170 |
|
| 171 | 171 |
func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) {
|
| 172 | 172 |
testRequires(c, DaemonIsLinux, RegistryHosting) |
| 173 |
- s.d = NewDaemon(c) |
|
| 173 |
+ s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 174 |
+ Experimental: experimentalDaemon, |
|
| 175 |
+ }) |
|
| 174 | 176 |
} |
| 175 | 177 |
|
| 176 | 178 |
func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) {
|
| ... | ... |
@@ -200,7 +209,7 @@ func init() {
|
| 200 | 200 |
|
| 201 | 201 |
type DockerDaemonSuite struct {
|
| 202 | 202 |
ds *DockerSuite |
| 203 |
- d *Daemon |
|
| 203 |
+ d *daemon.Daemon |
|
| 204 | 204 |
} |
| 205 | 205 |
|
| 206 | 206 |
func (s *DockerDaemonSuite) OnTimeout(c *check.C) {
|
| ... | ... |
@@ -209,7 +218,9 @@ func (s *DockerDaemonSuite) OnTimeout(c *check.C) {
|
| 209 | 209 |
|
| 210 | 210 |
func (s *DockerDaemonSuite) SetUpTest(c *check.C) {
|
| 211 | 211 |
testRequires(c, DaemonIsLinux) |
| 212 |
- s.d = NewDaemon(c) |
|
| 212 |
+ s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 213 |
+ Experimental: experimentalDaemon, |
|
| 214 |
+ }) |
|
| 213 | 215 |
} |
| 214 | 216 |
|
| 215 | 217 |
func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
|
| ... | ... |
@@ -221,7 +232,7 @@ func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
|
| 221 | 221 |
} |
| 222 | 222 |
|
| 223 | 223 |
func (s *DockerDaemonSuite) TearDownSuite(c *check.C) {
|
| 224 |
- filepath.Walk(daemonSockRoot, func(path string, fi os.FileInfo, err error) error {
|
|
| 224 |
+ filepath.Walk(daemon.SockRoot, func(path string, fi os.FileInfo, err error) error {
|
|
| 225 | 225 |
if err != nil {
|
| 226 | 226 |
// ignore errors here |
| 227 | 227 |
// not cleaning up sockets is not really an error |
| ... | ... |
@@ -232,7 +243,7 @@ func (s *DockerDaemonSuite) TearDownSuite(c *check.C) {
|
| 232 | 232 |
} |
| 233 | 233 |
return nil |
| 234 | 234 |
}) |
| 235 |
- os.RemoveAll(daemonSockRoot) |
|
| 235 |
+ os.RemoveAll(daemon.SockRoot) |
|
| 236 | 236 |
} |
| 237 | 237 |
|
| 238 | 238 |
const defaultSwarmPort = 2477 |
| ... | ... |
@@ -246,7 +257,7 @@ func init() {
|
| 246 | 246 |
type DockerSwarmSuite struct {
|
| 247 | 247 |
server *httptest.Server |
| 248 | 248 |
ds *DockerSuite |
| 249 |
- daemons []*SwarmDaemon |
|
| 249 |
+ daemons []*daemon.Swarm |
|
| 250 | 250 |
daemonsLock sync.Mutex // protect access to daemons |
| 251 | 251 |
portIndex int |
| 252 | 252 |
} |
| ... | ... |
@@ -263,28 +274,27 @@ func (s *DockerSwarmSuite) SetUpTest(c *check.C) {
|
| 263 | 263 |
testRequires(c, DaemonIsLinux) |
| 264 | 264 |
} |
| 265 | 265 |
|
| 266 |
-func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *SwarmDaemon {
|
|
| 267 |
- d := &SwarmDaemon{
|
|
| 268 |
- Daemon: NewDaemon(c), |
|
| 269 |
- port: defaultSwarmPort + s.portIndex, |
|
| 266 |
+func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemon.Swarm {
|
|
| 267 |
+ d := &daemon.Swarm{
|
|
| 268 |
+ Daemon: daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 269 |
+ Experimental: experimentalDaemon, |
|
| 270 |
+ }), |
|
| 271 |
+ Port: defaultSwarmPort + s.portIndex, |
|
| 270 | 272 |
} |
| 271 |
- d.listenAddr = fmt.Sprintf("0.0.0.0:%d", d.port)
|
|
| 273 |
+ d.ListenAddr = fmt.Sprintf("0.0.0.0:%d", d.Port)
|
|
| 272 | 274 |
args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} // avoid networking conflicts
|
| 273 |
- if experimentalDaemon {
|
|
| 274 |
- args = append(args, "--experimental") |
|
| 275 |
- } |
|
| 276 | 275 |
err := d.StartWithBusybox(args...) |
| 277 | 276 |
c.Assert(err, check.IsNil) |
| 278 | 277 |
|
| 279 | 278 |
if joinSwarm == true {
|
| 280 | 279 |
if len(s.daemons) > 0 {
|
| 281 |
- tokens := s.daemons[0].joinTokens(c) |
|
| 280 |
+ tokens := s.daemons[0].JoinTokens(c) |
|
| 282 | 281 |
token := tokens.Worker |
| 283 | 282 |
if manager {
|
| 284 | 283 |
token = tokens.Manager |
| 285 | 284 |
} |
| 286 | 285 |
c.Assert(d.Join(swarm.JoinRequest{
|
| 287 |
- RemoteAddrs: []string{s.daemons[0].listenAddr},
|
|
| 286 |
+ RemoteAddrs: []string{s.daemons[0].ListenAddr},
|
|
| 288 | 287 |
JoinToken: token, |
| 289 | 288 |
}), check.IsNil) |
| 290 | 289 |
} else {
|
| ... | ... |
@@ -306,13 +316,14 @@ func (s *DockerSwarmSuite) TearDownTest(c *check.C) {
|
| 306 | 306 |
for _, d := range s.daemons {
|
| 307 | 307 |
if d != nil {
|
| 308 | 308 |
d.Stop() |
| 309 |
+ // FIXME(vdemeester) should be handled by SwarmDaemon ? |
|
| 309 | 310 |
// raft state file is quite big (64MB) so remove it after every test |
| 310 |
- walDir := filepath.Join(d.root, "swarm/raft/wal") |
|
| 311 |
+ walDir := filepath.Join(d.Root, "swarm/raft/wal") |
|
| 311 | 312 |
if err := os.RemoveAll(walDir); err != nil {
|
| 312 | 313 |
c.Logf("error removing %v: %v", walDir, err)
|
| 313 | 314 |
} |
| 314 | 315 |
|
| 315 |
- cleanupExecRoot(c, d.execRoot) |
|
| 316 |
+ d.CleanupExecRoot(c) |
|
| 316 | 317 |
} |
| 317 | 318 |
} |
| 318 | 319 |
s.daemons = nil |
| 319 | 320 |
deleted file mode 100644 |
| ... | ... |
@@ -1,608 +0,0 @@ |
| 1 |
-package main |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "bytes" |
|
| 5 |
- "encoding/json" |
|
| 6 |
- "errors" |
|
| 7 |
- "fmt" |
|
| 8 |
- "io" |
|
| 9 |
- "net/http" |
|
| 10 |
- "os" |
|
| 11 |
- "os/exec" |
|
| 12 |
- "path/filepath" |
|
| 13 |
- "strconv" |
|
| 14 |
- "strings" |
|
| 15 |
- "time" |
|
| 16 |
- |
|
| 17 |
- "github.com/docker/docker/api/types/events" |
|
| 18 |
- "github.com/docker/docker/opts" |
|
| 19 |
- "github.com/docker/docker/pkg/integration/checker" |
|
| 20 |
- "github.com/docker/docker/pkg/ioutils" |
|
| 21 |
- "github.com/docker/docker/pkg/stringid" |
|
| 22 |
- "github.com/docker/go-connections/sockets" |
|
| 23 |
- "github.com/docker/go-connections/tlsconfig" |
|
| 24 |
- "github.com/go-check/check" |
|
| 25 |
-) |
|
| 26 |
- |
|
| 27 |
-var daemonSockRoot = filepath.Join(os.TempDir(), "docker-integration") |
|
| 28 |
- |
|
| 29 |
-// Daemon represents a Docker daemon for the testing framework. |
|
| 30 |
-type Daemon struct {
|
|
| 31 |
- GlobalFlags []string |
|
| 32 |
- |
|
| 33 |
- id string |
|
| 34 |
- c *check.C |
|
| 35 |
- logFile *os.File |
|
| 36 |
- folder string |
|
| 37 |
- root string |
|
| 38 |
- stdin io.WriteCloser |
|
| 39 |
- stdout, stderr io.ReadCloser |
|
| 40 |
- cmd *exec.Cmd |
|
| 41 |
- storageDriver string |
|
| 42 |
- wait chan error |
|
| 43 |
- userlandProxy bool |
|
| 44 |
- useDefaultHost bool |
|
| 45 |
- useDefaultTLSHost bool |
|
| 46 |
- execRoot string |
|
| 47 |
-} |
|
| 48 |
- |
|
| 49 |
-type clientConfig struct {
|
|
| 50 |
- transport *http.Transport |
|
| 51 |
- scheme string |
|
| 52 |
- addr string |
|
| 53 |
-} |
|
| 54 |
- |
|
| 55 |
-// NewDaemon returns a Daemon instance to be used for testing. |
|
| 56 |
-// This will create a directory such as d123456789 in the folder specified by $DEST. |
|
| 57 |
-// The daemon will not automatically start. |
|
| 58 |
-func NewDaemon(c *check.C) *Daemon {
|
|
| 59 |
- dest := os.Getenv("DEST")
|
|
| 60 |
- c.Assert(dest, check.Not(check.Equals), "", check.Commentf("Please set the DEST environment variable"))
|
|
| 61 |
- |
|
| 62 |
- err := os.MkdirAll(daemonSockRoot, 0700) |
|
| 63 |
- c.Assert(err, checker.IsNil, check.Commentf("could not create daemon socket root"))
|
|
| 64 |
- |
|
| 65 |
- id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID()))
|
|
| 66 |
- dir := filepath.Join(dest, id) |
|
| 67 |
- daemonFolder, err := filepath.Abs(dir) |
|
| 68 |
- c.Assert(err, check.IsNil, check.Commentf("Could not make %q an absolute path", dir))
|
|
| 69 |
- daemonRoot := filepath.Join(daemonFolder, "root") |
|
| 70 |
- |
|
| 71 |
- c.Assert(os.MkdirAll(daemonRoot, 0755), check.IsNil, check.Commentf("Could not create daemon root %q", dir))
|
|
| 72 |
- |
|
| 73 |
- userlandProxy := true |
|
| 74 |
- if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
|
|
| 75 |
- if val, err := strconv.ParseBool(env); err != nil {
|
|
| 76 |
- userlandProxy = val |
|
| 77 |
- } |
|
| 78 |
- } |
|
| 79 |
- |
|
| 80 |
- return &Daemon{
|
|
| 81 |
- id: id, |
|
| 82 |
- c: c, |
|
| 83 |
- folder: daemonFolder, |
|
| 84 |
- root: daemonRoot, |
|
| 85 |
- storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"),
|
|
| 86 |
- userlandProxy: userlandProxy, |
|
| 87 |
- execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), |
|
| 88 |
- } |
|
| 89 |
-} |
|
| 90 |
- |
|
| 91 |
-// RootDir returns the root directory of the daemon. |
|
| 92 |
-func (d *Daemon) RootDir() string {
|
|
| 93 |
- return d.root |
|
| 94 |
-} |
|
| 95 |
- |
|
| 96 |
-func (d *Daemon) getClientConfig() (*clientConfig, error) {
|
|
| 97 |
- var ( |
|
| 98 |
- transport *http.Transport |
|
| 99 |
- scheme string |
|
| 100 |
- addr string |
|
| 101 |
- proto string |
|
| 102 |
- ) |
|
| 103 |
- if d.useDefaultTLSHost {
|
|
| 104 |
- option := &tlsconfig.Options{
|
|
| 105 |
- CAFile: "fixtures/https/ca.pem", |
|
| 106 |
- CertFile: "fixtures/https/client-cert.pem", |
|
| 107 |
- KeyFile: "fixtures/https/client-key.pem", |
|
| 108 |
- } |
|
| 109 |
- tlsConfig, err := tlsconfig.Client(*option) |
|
| 110 |
- if err != nil {
|
|
| 111 |
- return nil, err |
|
| 112 |
- } |
|
| 113 |
- transport = &http.Transport{
|
|
| 114 |
- TLSClientConfig: tlsConfig, |
|
| 115 |
- } |
|
| 116 |
- addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort)
|
|
| 117 |
- scheme = "https" |
|
| 118 |
- proto = "tcp" |
|
| 119 |
- } else if d.useDefaultHost {
|
|
| 120 |
- addr = opts.DefaultUnixSocket |
|
| 121 |
- proto = "unix" |
|
| 122 |
- scheme = "http" |
|
| 123 |
- transport = &http.Transport{}
|
|
| 124 |
- } else {
|
|
| 125 |
- addr = d.sockPath() |
|
| 126 |
- proto = "unix" |
|
| 127 |
- scheme = "http" |
|
| 128 |
- transport = &http.Transport{}
|
|
| 129 |
- } |
|
| 130 |
- |
|
| 131 |
- d.c.Assert(sockets.ConfigureTransport(transport, proto, addr), check.IsNil) |
|
| 132 |
- |
|
| 133 |
- return &clientConfig{
|
|
| 134 |
- transport: transport, |
|
| 135 |
- scheme: scheme, |
|
| 136 |
- addr: addr, |
|
| 137 |
- }, nil |
|
| 138 |
-} |
|
| 139 |
- |
|
| 140 |
-// Start will start the daemon and return once it is ready to receive requests. |
|
| 141 |
-// You can specify additional daemon flags. |
|
| 142 |
-func (d *Daemon) Start(args ...string) error {
|
|
| 143 |
- logFile, err := os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) |
|
| 144 |
- d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.folder))
|
|
| 145 |
- |
|
| 146 |
- return d.StartWithLogFile(logFile, args...) |
|
| 147 |
-} |
|
| 148 |
- |
|
| 149 |
-// StartWithLogFile will start the daemon and attach its streams to a given file. |
|
| 150 |
-func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
|
|
| 151 |
- dockerdBinary, err := exec.LookPath(dockerdBinary) |
|
| 152 |
- d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not find docker binary in $PATH", d.id))
|
|
| 153 |
- |
|
| 154 |
- args := append(d.GlobalFlags, |
|
| 155 |
- "--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock", |
|
| 156 |
- "--graph", d.root, |
|
| 157 |
- "--exec-root", d.execRoot, |
|
| 158 |
- "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder),
|
|
| 159 |
- fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
|
|
| 160 |
- ) |
|
| 161 |
- if experimentalDaemon {
|
|
| 162 |
- args = append(args, "--experimental", "--init") |
|
| 163 |
- } |
|
| 164 |
- if !(d.useDefaultHost || d.useDefaultTLSHost) {
|
|
| 165 |
- args = append(args, []string{"--host", d.sock()}...)
|
|
| 166 |
- } |
|
| 167 |
- if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
|
| 168 |
- args = append(args, []string{"--userns-remap", root}...)
|
|
| 169 |
- } |
|
| 170 |
- |
|
| 171 |
- // If we don't explicitly set the log-level or debug flag(-D) then |
|
| 172 |
- // turn on debug mode |
|
| 173 |
- foundLog := false |
|
| 174 |
- foundSd := false |
|
| 175 |
- for _, a := range providedArgs {
|
|
| 176 |
- if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") {
|
|
| 177 |
- foundLog = true |
|
| 178 |
- } |
|
| 179 |
- if strings.Contains(a, "--storage-driver") {
|
|
| 180 |
- foundSd = true |
|
| 181 |
- } |
|
| 182 |
- } |
|
| 183 |
- if !foundLog {
|
|
| 184 |
- args = append(args, "--debug") |
|
| 185 |
- } |
|
| 186 |
- if d.storageDriver != "" && !foundSd {
|
|
| 187 |
- args = append(args, "--storage-driver", d.storageDriver) |
|
| 188 |
- } |
|
| 189 |
- |
|
| 190 |
- args = append(args, providedArgs...) |
|
| 191 |
- d.cmd = exec.Command(dockerdBinary, args...) |
|
| 192 |
- d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") |
|
| 193 |
- d.cmd.Stdout = out |
|
| 194 |
- d.cmd.Stderr = out |
|
| 195 |
- d.logFile = out |
|
| 196 |
- |
|
| 197 |
- if err := d.cmd.Start(); err != nil {
|
|
| 198 |
- return fmt.Errorf("[%s] could not start daemon container: %v", d.id, err)
|
|
| 199 |
- } |
|
| 200 |
- |
|
| 201 |
- wait := make(chan error) |
|
| 202 |
- |
|
| 203 |
- go func() {
|
|
| 204 |
- wait <- d.cmd.Wait() |
|
| 205 |
- d.c.Logf("[%s] exiting daemon", d.id)
|
|
| 206 |
- close(wait) |
|
| 207 |
- }() |
|
| 208 |
- |
|
| 209 |
- d.wait = wait |
|
| 210 |
- |
|
| 211 |
- tick := time.Tick(500 * time.Millisecond) |
|
| 212 |
- // make sure daemon is ready to receive requests |
|
| 213 |
- startTime := time.Now().Unix() |
|
| 214 |
- for {
|
|
| 215 |
- d.c.Logf("[%s] waiting for daemon to start", d.id)
|
|
| 216 |
- if time.Now().Unix()-startTime > 5 {
|
|
| 217 |
- // After 5 seconds, give up |
|
| 218 |
- return fmt.Errorf("[%s] Daemon exited and never started", d.id)
|
|
| 219 |
- } |
|
| 220 |
- select {
|
|
| 221 |
- case <-time.After(2 * time.Second): |
|
| 222 |
- return fmt.Errorf("[%s] timeout: daemon does not respond", d.id)
|
|
| 223 |
- case <-tick: |
|
| 224 |
- clientConfig, err := d.getClientConfig() |
|
| 225 |
- if err != nil {
|
|
| 226 |
- return err |
|
| 227 |
- } |
|
| 228 |
- |
|
| 229 |
- client := &http.Client{
|
|
| 230 |
- Transport: clientConfig.transport, |
|
| 231 |
- } |
|
| 232 |
- |
|
| 233 |
- req, err := http.NewRequest("GET", "/_ping", nil)
|
|
| 234 |
- d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not create new request", d.id))
|
|
| 235 |
- req.URL.Host = clientConfig.addr |
|
| 236 |
- req.URL.Scheme = clientConfig.scheme |
|
| 237 |
- resp, err := client.Do(req) |
|
| 238 |
- if err != nil {
|
|
| 239 |
- continue |
|
| 240 |
- } |
|
| 241 |
- if resp.StatusCode != http.StatusOK {
|
|
| 242 |
- d.c.Logf("[%s] received status != 200 OK: %s", d.id, resp.Status)
|
|
| 243 |
- } |
|
| 244 |
- d.c.Logf("[%s] daemon started", d.id)
|
|
| 245 |
- d.root, err = d.queryRootDir() |
|
| 246 |
- if err != nil {
|
|
| 247 |
- return fmt.Errorf("[%s] error querying daemon for root directory: %v", d.id, err)
|
|
| 248 |
- } |
|
| 249 |
- return nil |
|
| 250 |
- case <-d.wait: |
|
| 251 |
- return fmt.Errorf("[%s] Daemon exited during startup", d.id)
|
|
| 252 |
- } |
|
| 253 |
- } |
|
| 254 |
-} |
|
| 255 |
- |
|
| 256 |
-// StartWithBusybox will first start the daemon with Daemon.Start() |
|
| 257 |
-// then save the busybox image from the main daemon and load it into this Daemon instance. |
|
| 258 |
-func (d *Daemon) StartWithBusybox(arg ...string) error {
|
|
| 259 |
- if err := d.Start(arg...); err != nil {
|
|
| 260 |
- return err |
|
| 261 |
- } |
|
| 262 |
- return d.LoadBusybox() |
|
| 263 |
-} |
|
| 264 |
- |
|
| 265 |
-// Kill will send a SIGKILL to the daemon |
|
| 266 |
-func (d *Daemon) Kill() error {
|
|
| 267 |
- if d.cmd == nil || d.wait == nil {
|
|
| 268 |
- return errors.New("daemon not started")
|
|
| 269 |
- } |
|
| 270 |
- |
|
| 271 |
- defer func() {
|
|
| 272 |
- d.logFile.Close() |
|
| 273 |
- d.cmd = nil |
|
| 274 |
- }() |
|
| 275 |
- |
|
| 276 |
- if err := d.cmd.Process.Kill(); err != nil {
|
|
| 277 |
- d.c.Logf("Could not kill daemon: %v", err)
|
|
| 278 |
- return err |
|
| 279 |
- } |
|
| 280 |
- |
|
| 281 |
- if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil {
|
|
| 282 |
- return err |
|
| 283 |
- } |
|
| 284 |
- |
|
| 285 |
- return nil |
|
| 286 |
-} |
|
| 287 |
- |
|
| 288 |
-// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its |
|
| 289 |
-// stack to its log file and exit |
|
| 290 |
-// This is used primarily for gathering debug information on test timeout |
|
| 291 |
-func (d *Daemon) DumpStackAndQuit() {
|
|
| 292 |
- if d.cmd == nil || d.cmd.Process == nil {
|
|
| 293 |
- return |
|
| 294 |
- } |
|
| 295 |
- signalDaemonDump(d.cmd.Process.Pid) |
|
| 296 |
-} |
|
| 297 |
- |
|
| 298 |
-// Stop will send a SIGINT every second and wait for the daemon to stop. |
|
| 299 |
-// If it timeouts, a SIGKILL is sent. |
|
| 300 |
-// Stop will not delete the daemon directory. If a purged daemon is needed, |
|
| 301 |
-// instantiate a new one with NewDaemon. |
|
| 302 |
-func (d *Daemon) Stop() error {
|
|
| 303 |
- if d.cmd == nil || d.wait == nil {
|
|
| 304 |
- return errors.New("daemon not started")
|
|
| 305 |
- } |
|
| 306 |
- |
|
| 307 |
- defer func() {
|
|
| 308 |
- d.logFile.Close() |
|
| 309 |
- d.cmd = nil |
|
| 310 |
- }() |
|
| 311 |
- |
|
| 312 |
- i := 1 |
|
| 313 |
- tick := time.Tick(time.Second) |
|
| 314 |
- |
|
| 315 |
- if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
|
|
| 316 |
- return fmt.Errorf("could not send signal: %v", err)
|
|
| 317 |
- } |
|
| 318 |
-out1: |
|
| 319 |
- for {
|
|
| 320 |
- select {
|
|
| 321 |
- case err := <-d.wait: |
|
| 322 |
- return err |
|
| 323 |
- case <-time.After(20 * time.Second): |
|
| 324 |
- // time for stopping jobs and run onShutdown hooks |
|
| 325 |
- d.c.Logf("timeout: %v", d.id)
|
|
| 326 |
- break out1 |
|
| 327 |
- } |
|
| 328 |
- } |
|
| 329 |
- |
|
| 330 |
-out2: |
|
| 331 |
- for {
|
|
| 332 |
- select {
|
|
| 333 |
- case err := <-d.wait: |
|
| 334 |
- return err |
|
| 335 |
- case <-tick: |
|
| 336 |
- i++ |
|
| 337 |
- if i > 5 {
|
|
| 338 |
- d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i)
|
|
| 339 |
- break out2 |
|
| 340 |
- } |
|
| 341 |
- d.c.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid)
|
|
| 342 |
- if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
|
|
| 343 |
- return fmt.Errorf("could not send signal: %v", err)
|
|
| 344 |
- } |
|
| 345 |
- } |
|
| 346 |
- } |
|
| 347 |
- |
|
| 348 |
- if err := d.cmd.Process.Kill(); err != nil {
|
|
| 349 |
- d.c.Logf("Could not kill daemon: %v", err)
|
|
| 350 |
- return err |
|
| 351 |
- } |
|
| 352 |
- |
|
| 353 |
- if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil {
|
|
| 354 |
- return err |
|
| 355 |
- } |
|
| 356 |
- |
|
| 357 |
- return nil |
|
| 358 |
-} |
|
| 359 |
- |
|
| 360 |
-// Restart will restart the daemon by first stopping it and then starting it. |
|
| 361 |
-func (d *Daemon) Restart(arg ...string) error {
|
|
| 362 |
- d.Stop() |
|
| 363 |
- // in the case of tests running a user namespace-enabled daemon, we have resolved |
|
| 364 |
- // d.root to be the actual final path of the graph dir after the "uid.gid" of |
|
| 365 |
- // remapped root is added--we need to subtract it from the path before calling |
|
| 366 |
- // start or else we will continue making subdirectories rather than truly restarting |
|
| 367 |
- // with the same location/root: |
|
| 368 |
- if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
|
| 369 |
- d.root = filepath.Dir(d.root) |
|
| 370 |
- } |
|
| 371 |
- return d.Start(arg...) |
|
| 372 |
-} |
|
| 373 |
- |
|
| 374 |
-// LoadBusybox will load the stored busybox into a newly started daemon |
|
| 375 |
-func (d *Daemon) LoadBusybox() error {
|
|
| 376 |
- bb := filepath.Join(d.folder, "busybox.tar") |
|
| 377 |
- if _, err := os.Stat(bb); err != nil {
|
|
| 378 |
- if !os.IsNotExist(err) {
|
|
| 379 |
- return fmt.Errorf("unexpected error on busybox.tar stat: %v", err)
|
|
| 380 |
- } |
|
| 381 |
- // saving busybox image from main daemon |
|
| 382 |
- if out, err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil {
|
|
| 383 |
- imagesOut, _ := exec.Command(dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput()
|
|
| 384 |
- return fmt.Errorf("could not save busybox image: %s\n%s", string(out), strings.TrimSpace(string(imagesOut)))
|
|
| 385 |
- } |
|
| 386 |
- } |
|
| 387 |
- // loading busybox image to this daemon |
|
| 388 |
- if out, err := d.Cmd("load", "--input", bb); err != nil {
|
|
| 389 |
- return fmt.Errorf("could not load busybox image: %s", out)
|
|
| 390 |
- } |
|
| 391 |
- if err := os.Remove(bb); err != nil {
|
|
| 392 |
- d.c.Logf("could not remove %s: %v", bb, err)
|
|
| 393 |
- } |
|
| 394 |
- return nil |
|
| 395 |
-} |
|
| 396 |
- |
|
| 397 |
-func (d *Daemon) queryRootDir() (string, error) {
|
|
| 398 |
- // update daemon root by asking /info endpoint (to support user |
|
| 399 |
- // namespaced daemon with root remapped uid.gid directory) |
|
| 400 |
- clientConfig, err := d.getClientConfig() |
|
| 401 |
- if err != nil {
|
|
| 402 |
- return "", err |
|
| 403 |
- } |
|
| 404 |
- |
|
| 405 |
- client := &http.Client{
|
|
| 406 |
- Transport: clientConfig.transport, |
|
| 407 |
- } |
|
| 408 |
- |
|
| 409 |
- req, err := http.NewRequest("GET", "/info", nil)
|
|
| 410 |
- if err != nil {
|
|
| 411 |
- return "", err |
|
| 412 |
- } |
|
| 413 |
- req.Header.Set("Content-Type", "application/json")
|
|
| 414 |
- req.URL.Host = clientConfig.addr |
|
| 415 |
- req.URL.Scheme = clientConfig.scheme |
|
| 416 |
- |
|
| 417 |
- resp, err := client.Do(req) |
|
| 418 |
- if err != nil {
|
|
| 419 |
- return "", err |
|
| 420 |
- } |
|
| 421 |
- body := ioutils.NewReadCloserWrapper(resp.Body, func() error {
|
|
| 422 |
- return resp.Body.Close() |
|
| 423 |
- }) |
|
| 424 |
- |
|
| 425 |
- type Info struct {
|
|
| 426 |
- DockerRootDir string |
|
| 427 |
- } |
|
| 428 |
- var b []byte |
|
| 429 |
- var i Info |
|
| 430 |
- b, err = readBody(body) |
|
| 431 |
- if err == nil && resp.StatusCode == http.StatusOK {
|
|
| 432 |
- // read the docker root dir |
|
| 433 |
- if err = json.Unmarshal(b, &i); err == nil {
|
|
| 434 |
- return i.DockerRootDir, nil |
|
| 435 |
- } |
|
| 436 |
- } |
|
| 437 |
- return "", err |
|
| 438 |
-} |
|
| 439 |
- |
|
| 440 |
-func (d *Daemon) sock() string {
|
|
| 441 |
- return fmt.Sprintf("unix://" + d.sockPath())
|
|
| 442 |
-} |
|
| 443 |
- |
|
| 444 |
-func (d *Daemon) sockPath() string {
|
|
| 445 |
- return filepath.Join(daemonSockRoot, d.id+".sock") |
|
| 446 |
-} |
|
| 447 |
- |
|
| 448 |
-func (d *Daemon) waitRun(contID string) error {
|
|
| 449 |
- args := []string{"--host", d.sock()}
|
|
| 450 |
- return waitInspectWithArgs(contID, "{{.State.Running}}", "true", 10*time.Second, args...)
|
|
| 451 |
-} |
|
| 452 |
- |
|
| 453 |
-func (d *Daemon) getBaseDeviceSize(c *check.C) int64 {
|
|
| 454 |
- infoCmdOutput, _, err := runCommandPipelineWithOutput( |
|
| 455 |
- exec.Command(dockerBinary, "-H", d.sock(), "info"), |
|
| 456 |
- exec.Command("grep", "Base Device Size"),
|
|
| 457 |
- ) |
|
| 458 |
- c.Assert(err, checker.IsNil) |
|
| 459 |
- basesizeSlice := strings.Split(infoCmdOutput, ":") |
|
| 460 |
- basesize := strings.Trim(basesizeSlice[1], " ") |
|
| 461 |
- basesize = strings.Trim(basesize, "\n")[:len(basesize)-3] |
|
| 462 |
- basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) |
|
| 463 |
- c.Assert(err, checker.IsNil) |
|
| 464 |
- basesizeBytes := int64(basesizeFloat) * (1024 * 1024 * 1024) |
|
| 465 |
- return basesizeBytes |
|
| 466 |
-} |
|
| 467 |
- |
|
| 468 |
-// Cmd will execute a docker CLI command against this Daemon. |
|
| 469 |
-// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version
|
|
| 470 |
-func (d *Daemon) Cmd(args ...string) (string, error) {
|
|
| 471 |
- b, err := d.command(args...).CombinedOutput() |
|
| 472 |
- return string(b), err |
|
| 473 |
-} |
|
| 474 |
- |
|
| 475 |
-func (d *Daemon) command(args ...string) *exec.Cmd {
|
|
| 476 |
- return exec.Command(dockerBinary, d.prependHostArg(args)...) |
|
| 477 |
-} |
|
| 478 |
- |
|
| 479 |
-func (d *Daemon) prependHostArg(args []string) []string {
|
|
| 480 |
- for _, arg := range args {
|
|
| 481 |
- if arg == "--host" || arg == "-H" {
|
|
| 482 |
- return args |
|
| 483 |
- } |
|
| 484 |
- } |
|
| 485 |
- return append([]string{"--host", d.sock()}, args...)
|
|
| 486 |
-} |
|
| 487 |
- |
|
| 488 |
-// SockRequest executes a socket request on a daemon and returns statuscode and output. |
|
| 489 |
-func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, []byte, error) {
|
|
| 490 |
- jsonData := bytes.NewBuffer(nil) |
|
| 491 |
- if err := json.NewEncoder(jsonData).Encode(data); err != nil {
|
|
| 492 |
- return -1, nil, err |
|
| 493 |
- } |
|
| 494 |
- |
|
| 495 |
- res, body, err := d.SockRequestRaw(method, endpoint, jsonData, "application/json") |
|
| 496 |
- if err != nil {
|
|
| 497 |
- return -1, nil, err |
|
| 498 |
- } |
|
| 499 |
- b, err := readBody(body) |
|
| 500 |
- return res.StatusCode, b, err |
|
| 501 |
-} |
|
| 502 |
- |
|
| 503 |
-// SockRequestRaw executes a socket request on a daemon and returns an http |
|
| 504 |
-// response and a reader for the output data. |
|
| 505 |
-func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) {
|
|
| 506 |
- return sockRequestRawToDaemon(method, endpoint, data, ct, d.sock()) |
|
| 507 |
-} |
|
| 508 |
- |
|
| 509 |
-// LogFileName returns the path the the daemon's log file |
|
| 510 |
-func (d *Daemon) LogFileName() string {
|
|
| 511 |
- return d.logFile.Name() |
|
| 512 |
-} |
|
| 513 |
- |
|
| 514 |
-func (d *Daemon) getIDByName(name string) (string, error) {
|
|
| 515 |
- return d.inspectFieldWithError(name, "Id") |
|
| 516 |
-} |
|
| 517 |
- |
|
| 518 |
-func (d *Daemon) activeContainers() (ids []string) {
|
|
| 519 |
- out, _ := d.Cmd("ps", "-q")
|
|
| 520 |
- for _, id := range strings.Split(out, "\n") {
|
|
| 521 |
- if id = strings.TrimSpace(id); id != "" {
|
|
| 522 |
- ids = append(ids, id) |
|
| 523 |
- } |
|
| 524 |
- } |
|
| 525 |
- return |
|
| 526 |
-} |
|
| 527 |
- |
|
| 528 |
-func (d *Daemon) inspectFilter(name, filter string) (string, error) {
|
|
| 529 |
- format := fmt.Sprintf("{{%s}}", filter)
|
|
| 530 |
- out, err := d.Cmd("inspect", "-f", format, name)
|
|
| 531 |
- if err != nil {
|
|
| 532 |
- return "", fmt.Errorf("failed to inspect %s: %s", name, out)
|
|
| 533 |
- } |
|
| 534 |
- return strings.TrimSpace(out), nil |
|
| 535 |
-} |
|
| 536 |
- |
|
| 537 |
-func (d *Daemon) inspectFieldWithError(name, field string) (string, error) {
|
|
| 538 |
- return d.inspectFilter(name, fmt.Sprintf(".%s", field))
|
|
| 539 |
-} |
|
| 540 |
- |
|
| 541 |
-func (d *Daemon) findContainerIP(id string) string {
|
|
| 542 |
- out, err := d.Cmd("inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'"), id)
|
|
| 543 |
- if err != nil {
|
|
| 544 |
- d.c.Log(err) |
|
| 545 |
- } |
|
| 546 |
- return strings.Trim(out, " \r\n'") |
|
| 547 |
-} |
|
| 548 |
- |
|
| 549 |
-func (d *Daemon) buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) {
|
|
| 550 |
- buildCmd := buildImageCmdWithHost(name, dockerfile, d.sock(), useCache, buildFlags...) |
|
| 551 |
- return runCommandWithOutput(buildCmd) |
|
| 552 |
-} |
|
| 553 |
- |
|
| 554 |
-func (d *Daemon) checkActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 555 |
- out, err := d.Cmd("ps", "-q")
|
|
| 556 |
- c.Assert(err, checker.IsNil) |
|
| 557 |
- if len(strings.TrimSpace(out)) == 0 {
|
|
| 558 |
- return 0, nil |
|
| 559 |
- } |
|
| 560 |
- return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out))
|
|
| 561 |
-} |
|
| 562 |
- |
|
| 563 |
-func (d *Daemon) reloadConfig() error {
|
|
| 564 |
- if d.cmd == nil || d.cmd.Process == nil {
|
|
| 565 |
- return fmt.Errorf("daemon is not running")
|
|
| 566 |
- } |
|
| 567 |
- |
|
| 568 |
- errCh := make(chan error) |
|
| 569 |
- started := make(chan struct{})
|
|
| 570 |
- go func() {
|
|
| 571 |
- _, body, err := sockRequestRawToDaemon("GET", "/events", nil, "", d.sock())
|
|
| 572 |
- close(started) |
|
| 573 |
- if err != nil {
|
|
| 574 |
- errCh <- err |
|
| 575 |
- } |
|
| 576 |
- defer body.Close() |
|
| 577 |
- dec := json.NewDecoder(body) |
|
| 578 |
- for {
|
|
| 579 |
- var e events.Message |
|
| 580 |
- if err := dec.Decode(&e); err != nil {
|
|
| 581 |
- errCh <- err |
|
| 582 |
- return |
|
| 583 |
- } |
|
| 584 |
- if e.Type != events.DaemonEventType {
|
|
| 585 |
- continue |
|
| 586 |
- } |
|
| 587 |
- if e.Action != "reload" {
|
|
| 588 |
- continue |
|
| 589 |
- } |
|
| 590 |
- close(errCh) // notify that we are done |
|
| 591 |
- return |
|
| 592 |
- } |
|
| 593 |
- }() |
|
| 594 |
- |
|
| 595 |
- <-started |
|
| 596 |
- if err := signalDaemonReload(d.cmd.Process.Pid); err != nil {
|
|
| 597 |
- return fmt.Errorf("error signaling daemon reload: %v", err)
|
|
| 598 |
- } |
|
| 599 |
- select {
|
|
| 600 |
- case err := <-errCh: |
|
| 601 |
- if err != nil {
|
|
| 602 |
- return fmt.Errorf("error waiting for daemon reload event: %v", err)
|
|
| 603 |
- } |
|
| 604 |
- case <-time.After(30 * time.Second): |
|
| 605 |
- return fmt.Errorf("timeout waiting for daemon reload event")
|
|
| 606 |
- } |
|
| 607 |
- return nil |
|
| 608 |
-} |
| 609 | 1 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,834 @@ |
| 0 |
+package daemon |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "bytes" |
|
| 4 |
+ "crypto/tls" |
|
| 5 |
+ "encoding/json" |
|
| 6 |
+ "errors" |
|
| 7 |
+ "fmt" |
|
| 8 |
+ "io" |
|
| 9 |
+ "io/ioutil" |
|
| 10 |
+ "net" |
|
| 11 |
+ "net/http" |
|
| 12 |
+ "net/http/httputil" |
|
| 13 |
+ "net/url" |
|
| 14 |
+ "os" |
|
| 15 |
+ "os/exec" |
|
| 16 |
+ "path/filepath" |
|
| 17 |
+ "strconv" |
|
| 18 |
+ "strings" |
|
| 19 |
+ "time" |
|
| 20 |
+ |
|
| 21 |
+ "github.com/docker/docker/api/types/events" |
|
| 22 |
+ "github.com/docker/docker/opts" |
|
| 23 |
+ "github.com/docker/docker/pkg/integration" |
|
| 24 |
+ "github.com/docker/docker/pkg/integration/checker" |
|
| 25 |
+ icmd "github.com/docker/docker/pkg/integration/cmd" |
|
| 26 |
+ "github.com/docker/docker/pkg/ioutils" |
|
| 27 |
+ "github.com/docker/docker/pkg/stringid" |
|
| 28 |
+ "github.com/docker/go-connections/sockets" |
|
| 29 |
+ "github.com/docker/go-connections/tlsconfig" |
|
| 30 |
+ "github.com/go-check/check" |
|
| 31 |
+) |
|
| 32 |
+ |
|
| 33 |
+// SockRoot holds the path of the default docker integration daemon socket |
|
| 34 |
+var SockRoot = filepath.Join(os.TempDir(), "docker-integration") |
|
| 35 |
+ |
|
| 36 |
+// Daemon represents a Docker daemon for the testing framework. |
|
| 37 |
+type Daemon struct {
|
|
| 38 |
+ GlobalFlags []string |
|
| 39 |
+ Root string |
|
| 40 |
+ Folder string |
|
| 41 |
+ Wait chan error |
|
| 42 |
+ UseDefaultHost bool |
|
| 43 |
+ UseDefaultTLSHost bool |
|
| 44 |
+ |
|
| 45 |
+ // FIXME(vdemeester) either should be used everywhere (do not return error) or nowhere, |
|
| 46 |
+ // so I think we should remove it or use it for everything |
|
| 47 |
+ c *check.C |
|
| 48 |
+ id string |
|
| 49 |
+ logFile *os.File |
|
| 50 |
+ stdin io.WriteCloser |
|
| 51 |
+ stdout, stderr io.ReadCloser |
|
| 52 |
+ cmd *exec.Cmd |
|
| 53 |
+ storageDriver string |
|
| 54 |
+ userlandProxy bool |
|
| 55 |
+ execRoot string |
|
| 56 |
+ experimental bool |
|
| 57 |
+ dockerBinary string |
|
| 58 |
+ dockerdBinary string |
|
| 59 |
+} |
|
| 60 |
+ |
|
| 61 |
+// Config holds docker daemon integration configuration |
|
| 62 |
+type Config struct {
|
|
| 63 |
+ Experimental bool |
|
| 64 |
+} |
|
| 65 |
+ |
|
| 66 |
+type clientConfig struct {
|
|
| 67 |
+ transport *http.Transport |
|
| 68 |
+ scheme string |
|
| 69 |
+ addr string |
|
| 70 |
+} |
|
| 71 |
+ |
|
| 72 |
+// New returns a Daemon instance to be used for testing. |
|
| 73 |
+// This will create a directory such as d123456789 in the folder specified by $DEST. |
|
| 74 |
+// The daemon will not automatically start. |
|
| 75 |
+func New(c *check.C, dockerBinary string, dockerdBinary string, config Config) *Daemon {
|
|
| 76 |
+ dest := os.Getenv("DEST")
|
|
| 77 |
+ c.Assert(dest, check.Not(check.Equals), "", check.Commentf("Please set the DEST environment variable"))
|
|
| 78 |
+ |
|
| 79 |
+ err := os.MkdirAll(SockRoot, 0700) |
|
| 80 |
+ c.Assert(err, checker.IsNil, check.Commentf("could not create daemon socket root"))
|
|
| 81 |
+ |
|
| 82 |
+ id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID()))
|
|
| 83 |
+ dir := filepath.Join(dest, id) |
|
| 84 |
+ daemonFolder, err := filepath.Abs(dir) |
|
| 85 |
+ c.Assert(err, check.IsNil, check.Commentf("Could not make %q an absolute path", dir))
|
|
| 86 |
+ daemonRoot := filepath.Join(daemonFolder, "root") |
|
| 87 |
+ |
|
| 88 |
+ c.Assert(os.MkdirAll(daemonRoot, 0755), check.IsNil, check.Commentf("Could not create daemon root %q", dir))
|
|
| 89 |
+ |
|
| 90 |
+ userlandProxy := true |
|
| 91 |
+ if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
|
|
| 92 |
+ if val, err := strconv.ParseBool(env); err != nil {
|
|
| 93 |
+ userlandProxy = val |
|
| 94 |
+ } |
|
| 95 |
+ } |
|
| 96 |
+ |
|
| 97 |
+ return &Daemon{
|
|
| 98 |
+ id: id, |
|
| 99 |
+ c: c, |
|
| 100 |
+ Folder: daemonFolder, |
|
| 101 |
+ Root: daemonRoot, |
|
| 102 |
+ storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"),
|
|
| 103 |
+ userlandProxy: userlandProxy, |
|
| 104 |
+ execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), |
|
| 105 |
+ dockerBinary: dockerBinary, |
|
| 106 |
+ dockerdBinary: dockerdBinary, |
|
| 107 |
+ experimental: config.Experimental, |
|
| 108 |
+ } |
|
| 109 |
+} |
|
| 110 |
+ |
|
| 111 |
+// RootDir returns the root directory of the daemon. |
|
| 112 |
+func (d *Daemon) RootDir() string {
|
|
| 113 |
+ return d.Root |
|
| 114 |
+} |
|
| 115 |
+ |
|
| 116 |
+// ID returns the generated id of the daemon |
|
| 117 |
+func (d *Daemon) ID() string {
|
|
| 118 |
+ return d.id |
|
| 119 |
+} |
|
| 120 |
+ |
|
| 121 |
+// StorageDriver returns the configured storage driver of the daemon |
|
| 122 |
+func (d *Daemon) StorageDriver() string {
|
|
| 123 |
+ return d.storageDriver |
|
| 124 |
+} |
|
| 125 |
+ |
|
| 126 |
+// CleanupExecRoot cleans the daemon exec root (network namespaces, ...) |
|
| 127 |
+func (d *Daemon) CleanupExecRoot(c *check.C) {
|
|
| 128 |
+ cleanupExecRoot(c, d.execRoot) |
|
| 129 |
+} |
|
| 130 |
+ |
|
| 131 |
+func (d *Daemon) getClientConfig() (*clientConfig, error) {
|
|
| 132 |
+ var ( |
|
| 133 |
+ transport *http.Transport |
|
| 134 |
+ scheme string |
|
| 135 |
+ addr string |
|
| 136 |
+ proto string |
|
| 137 |
+ ) |
|
| 138 |
+ if d.UseDefaultTLSHost {
|
|
| 139 |
+ option := &tlsconfig.Options{
|
|
| 140 |
+ CAFile: "fixtures/https/ca.pem", |
|
| 141 |
+ CertFile: "fixtures/https/client-cert.pem", |
|
| 142 |
+ KeyFile: "fixtures/https/client-key.pem", |
|
| 143 |
+ } |
|
| 144 |
+ tlsConfig, err := tlsconfig.Client(*option) |
|
| 145 |
+ if err != nil {
|
|
| 146 |
+ return nil, err |
|
| 147 |
+ } |
|
| 148 |
+ transport = &http.Transport{
|
|
| 149 |
+ TLSClientConfig: tlsConfig, |
|
| 150 |
+ } |
|
| 151 |
+ addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort)
|
|
| 152 |
+ scheme = "https" |
|
| 153 |
+ proto = "tcp" |
|
| 154 |
+ } else if d.UseDefaultHost {
|
|
| 155 |
+ addr = opts.DefaultUnixSocket |
|
| 156 |
+ proto = "unix" |
|
| 157 |
+ scheme = "http" |
|
| 158 |
+ transport = &http.Transport{}
|
|
| 159 |
+ } else {
|
|
| 160 |
+ addr = d.sockPath() |
|
| 161 |
+ proto = "unix" |
|
| 162 |
+ scheme = "http" |
|
| 163 |
+ transport = &http.Transport{}
|
|
| 164 |
+ } |
|
| 165 |
+ |
|
| 166 |
+ d.c.Assert(sockets.ConfigureTransport(transport, proto, addr), check.IsNil) |
|
| 167 |
+ |
|
| 168 |
+ return &clientConfig{
|
|
| 169 |
+ transport: transport, |
|
| 170 |
+ scheme: scheme, |
|
| 171 |
+ addr: addr, |
|
| 172 |
+ }, nil |
|
| 173 |
+} |
|
| 174 |
+ |
|
| 175 |
+// Start will start the daemon and return once it is ready to receive requests. |
|
| 176 |
+// You can specify additional daemon flags. |
|
| 177 |
+func (d *Daemon) Start(args ...string) error {
|
|
| 178 |
+ logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) |
|
| 179 |
+ d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.Folder))
|
|
| 180 |
+ |
|
| 181 |
+ return d.StartWithLogFile(logFile, args...) |
|
| 182 |
+} |
|
| 183 |
+ |
|
| 184 |
+// StartWithLogFile will start the daemon and attach its streams to a given file. |
|
| 185 |
+func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
|
|
| 186 |
+ dockerdBinary, err := exec.LookPath(d.dockerdBinary) |
|
| 187 |
+ d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not find docker binary in $PATH", d.id))
|
|
| 188 |
+ |
|
| 189 |
+ args := append(d.GlobalFlags, |
|
| 190 |
+ "--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock", |
|
| 191 |
+ "--graph", d.Root, |
|
| 192 |
+ "--exec-root", d.execRoot, |
|
| 193 |
+ "--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder),
|
|
| 194 |
+ fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
|
|
| 195 |
+ ) |
|
| 196 |
+ if d.experimental {
|
|
| 197 |
+ args = append(args, "--experimental", "--init") |
|
| 198 |
+ } |
|
| 199 |
+ if !(d.UseDefaultHost || d.UseDefaultTLSHost) {
|
|
| 200 |
+ args = append(args, []string{"--host", d.Sock()}...)
|
|
| 201 |
+ } |
|
| 202 |
+ if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
|
| 203 |
+ args = append(args, []string{"--userns-remap", root}...)
|
|
| 204 |
+ } |
|
| 205 |
+ |
|
| 206 |
+ // If we don't explicitly set the log-level or debug flag(-D) then |
|
| 207 |
+ // turn on debug mode |
|
| 208 |
+ foundLog := false |
|
| 209 |
+ foundSd := false |
|
| 210 |
+ for _, a := range providedArgs {
|
|
| 211 |
+ if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") {
|
|
| 212 |
+ foundLog = true |
|
| 213 |
+ } |
|
| 214 |
+ if strings.Contains(a, "--storage-driver") {
|
|
| 215 |
+ foundSd = true |
|
| 216 |
+ } |
|
| 217 |
+ } |
|
| 218 |
+ if !foundLog {
|
|
| 219 |
+ args = append(args, "--debug") |
|
| 220 |
+ } |
|
| 221 |
+ if d.storageDriver != "" && !foundSd {
|
|
| 222 |
+ args = append(args, "--storage-driver", d.storageDriver) |
|
| 223 |
+ } |
|
| 224 |
+ |
|
| 225 |
+ args = append(args, providedArgs...) |
|
| 226 |
+ d.cmd = exec.Command(dockerdBinary, args...) |
|
| 227 |
+ d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") |
|
| 228 |
+ d.cmd.Stdout = out |
|
| 229 |
+ d.cmd.Stderr = out |
|
| 230 |
+ d.logFile = out |
|
| 231 |
+ |
|
| 232 |
+ if err := d.cmd.Start(); err != nil {
|
|
| 233 |
+ return fmt.Errorf("[%s] could not start daemon container: %v", d.id, err)
|
|
| 234 |
+ } |
|
| 235 |
+ |
|
| 236 |
+ wait := make(chan error) |
|
| 237 |
+ |
|
| 238 |
+ go func() {
|
|
| 239 |
+ wait <- d.cmd.Wait() |
|
| 240 |
+ d.c.Logf("[%s] exiting daemon", d.id)
|
|
| 241 |
+ close(wait) |
|
| 242 |
+ }() |
|
| 243 |
+ |
|
| 244 |
+ d.Wait = wait |
|
| 245 |
+ |
|
| 246 |
+ tick := time.Tick(500 * time.Millisecond) |
|
| 247 |
+ // make sure daemon is ready to receive requests |
|
| 248 |
+ startTime := time.Now().Unix() |
|
| 249 |
+ for {
|
|
| 250 |
+ d.c.Logf("[%s] waiting for daemon to start", d.id)
|
|
| 251 |
+ if time.Now().Unix()-startTime > 5 {
|
|
| 252 |
+ // After 5 seconds, give up |
|
| 253 |
+ return fmt.Errorf("[%s] Daemon exited and never started", d.id)
|
|
| 254 |
+ } |
|
| 255 |
+ select {
|
|
| 256 |
+ case <-time.After(2 * time.Second): |
|
| 257 |
+ return fmt.Errorf("[%s] timeout: daemon does not respond", d.id)
|
|
| 258 |
+ case <-tick: |
|
| 259 |
+ clientConfig, err := d.getClientConfig() |
|
| 260 |
+ if err != nil {
|
|
| 261 |
+ return err |
|
| 262 |
+ } |
|
| 263 |
+ |
|
| 264 |
+ client := &http.Client{
|
|
| 265 |
+ Transport: clientConfig.transport, |
|
| 266 |
+ } |
|
| 267 |
+ |
|
| 268 |
+ req, err := http.NewRequest("GET", "/_ping", nil)
|
|
| 269 |
+ d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not create new request", d.id))
|
|
| 270 |
+ req.URL.Host = clientConfig.addr |
|
| 271 |
+ req.URL.Scheme = clientConfig.scheme |
|
| 272 |
+ resp, err := client.Do(req) |
|
| 273 |
+ if err != nil {
|
|
| 274 |
+ continue |
|
| 275 |
+ } |
|
| 276 |
+ if resp.StatusCode != http.StatusOK {
|
|
| 277 |
+ d.c.Logf("[%s] received status != 200 OK: %s", d.id, resp.Status)
|
|
| 278 |
+ } |
|
| 279 |
+ d.c.Logf("[%s] daemon started", d.id)
|
|
| 280 |
+ d.Root, err = d.queryRootDir() |
|
| 281 |
+ if err != nil {
|
|
| 282 |
+ return fmt.Errorf("[%s] error querying daemon for root directory: %v", d.id, err)
|
|
| 283 |
+ } |
|
| 284 |
+ return nil |
|
| 285 |
+ case <-d.Wait: |
|
| 286 |
+ return fmt.Errorf("[%s] Daemon exited during startup", d.id)
|
|
| 287 |
+ } |
|
| 288 |
+ } |
|
| 289 |
+} |
|
| 290 |
+ |
|
| 291 |
+// StartWithBusybox will first start the daemon with Daemon.Start() |
|
| 292 |
+// then save the busybox image from the main daemon and load it into this Daemon instance. |
|
| 293 |
+func (d *Daemon) StartWithBusybox(arg ...string) error {
|
|
| 294 |
+ if err := d.Start(arg...); err != nil {
|
|
| 295 |
+ return err |
|
| 296 |
+ } |
|
| 297 |
+ return d.LoadBusybox() |
|
| 298 |
+} |
|
| 299 |
+ |
|
| 300 |
+// Kill will send a SIGKILL to the daemon |
|
| 301 |
+func (d *Daemon) Kill() error {
|
|
| 302 |
+ if d.cmd == nil || d.Wait == nil {
|
|
| 303 |
+ return errors.New("daemon not started")
|
|
| 304 |
+ } |
|
| 305 |
+ |
|
| 306 |
+ defer func() {
|
|
| 307 |
+ d.logFile.Close() |
|
| 308 |
+ d.cmd = nil |
|
| 309 |
+ }() |
|
| 310 |
+ |
|
| 311 |
+ if err := d.cmd.Process.Kill(); err != nil {
|
|
| 312 |
+ d.c.Logf("Could not kill daemon: %v", err)
|
|
| 313 |
+ return err |
|
| 314 |
+ } |
|
| 315 |
+ |
|
| 316 |
+ if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil {
|
|
| 317 |
+ return err |
|
| 318 |
+ } |
|
| 319 |
+ |
|
| 320 |
+ return nil |
|
| 321 |
+} |
|
| 322 |
+ |
|
| 323 |
+// Pid returns the pid of the daemon |
|
| 324 |
+func (d *Daemon) Pid() int {
|
|
| 325 |
+ return d.cmd.Process.Pid |
|
| 326 |
+} |
|
| 327 |
+ |
|
| 328 |
+// Interrupt stops the daemon by sending it an Interrupt signal |
|
| 329 |
+func (d *Daemon) Interrupt() error {
|
|
| 330 |
+ return d.Signal(os.Interrupt) |
|
| 331 |
+} |
|
| 332 |
+ |
|
| 333 |
+// Signal sends the specified signal to the daemon if running |
|
| 334 |
+func (d *Daemon) Signal(signal os.Signal) error {
|
|
| 335 |
+ if d.cmd == nil || d.Wait == nil {
|
|
| 336 |
+ return errors.New("daemon not started")
|
|
| 337 |
+ } |
|
| 338 |
+ return d.cmd.Process.Signal(signal) |
|
| 339 |
+} |
|
| 340 |
+ |
|
| 341 |
+// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its |
|
| 342 |
+// stack to its log file and exit |
|
| 343 |
+// This is used primarily for gathering debug information on test timeout |
|
| 344 |
+func (d *Daemon) DumpStackAndQuit() {
|
|
| 345 |
+ if d.cmd == nil || d.cmd.Process == nil {
|
|
| 346 |
+ return |
|
| 347 |
+ } |
|
| 348 |
+ SignalDaemonDump(d.cmd.Process.Pid) |
|
| 349 |
+} |
|
| 350 |
+ |
|
| 351 |
+// Stop will send a SIGINT every second and wait for the daemon to stop. |
|
| 352 |
+// If it timeouts, a SIGKILL is sent. |
|
| 353 |
+// Stop will not delete the daemon directory. If a purged daemon is needed, |
|
| 354 |
+// instantiate a new one with NewDaemon. |
|
| 355 |
+func (d *Daemon) Stop() error {
|
|
| 356 |
+ if d.cmd == nil || d.Wait == nil {
|
|
| 357 |
+ return errors.New("daemon not started")
|
|
| 358 |
+ } |
|
| 359 |
+ |
|
| 360 |
+ defer func() {
|
|
| 361 |
+ d.logFile.Close() |
|
| 362 |
+ d.cmd = nil |
|
| 363 |
+ }() |
|
| 364 |
+ |
|
| 365 |
+ i := 1 |
|
| 366 |
+ tick := time.Tick(time.Second) |
|
| 367 |
+ |
|
| 368 |
+ if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
|
|
| 369 |
+ return fmt.Errorf("could not send signal: %v", err)
|
|
| 370 |
+ } |
|
| 371 |
+out1: |
|
| 372 |
+ for {
|
|
| 373 |
+ select {
|
|
| 374 |
+ case err := <-d.Wait: |
|
| 375 |
+ return err |
|
| 376 |
+ case <-time.After(20 * time.Second): |
|
| 377 |
+ // time for stopping jobs and run onShutdown hooks |
|
| 378 |
+ d.c.Logf("timeout: %v", d.id)
|
|
| 379 |
+ break out1 |
|
| 380 |
+ } |
|
| 381 |
+ } |
|
| 382 |
+ |
|
| 383 |
+out2: |
|
| 384 |
+ for {
|
|
| 385 |
+ select {
|
|
| 386 |
+ case err := <-d.Wait: |
|
| 387 |
+ return err |
|
| 388 |
+ case <-tick: |
|
| 389 |
+ i++ |
|
| 390 |
+ if i > 5 {
|
|
| 391 |
+ d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i)
|
|
| 392 |
+ break out2 |
|
| 393 |
+ } |
|
| 394 |
+ d.c.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid)
|
|
| 395 |
+ if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
|
|
| 396 |
+ return fmt.Errorf("could not send signal: %v", err)
|
|
| 397 |
+ } |
|
| 398 |
+ } |
|
| 399 |
+ } |
|
| 400 |
+ |
|
| 401 |
+ if err := d.cmd.Process.Kill(); err != nil {
|
|
| 402 |
+ d.c.Logf("Could not kill daemon: %v", err)
|
|
| 403 |
+ return err |
|
| 404 |
+ } |
|
| 405 |
+ |
|
| 406 |
+ if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil {
|
|
| 407 |
+ return err |
|
| 408 |
+ } |
|
| 409 |
+ |
|
| 410 |
+ return nil |
|
| 411 |
+} |
|
| 412 |
+ |
|
| 413 |
+// Restart will restart the daemon by first stopping it and then starting it. |
|
| 414 |
+func (d *Daemon) Restart(arg ...string) error {
|
|
| 415 |
+ d.Stop() |
|
| 416 |
+ // in the case of tests running a user namespace-enabled daemon, we have resolved |
|
| 417 |
+ // d.Root to be the actual final path of the graph dir after the "uid.gid" of |
|
| 418 |
+ // remapped root is added--we need to subtract it from the path before calling |
|
| 419 |
+ // start or else we will continue making subdirectories rather than truly restarting |
|
| 420 |
+ // with the same location/root: |
|
| 421 |
+ if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
|
| 422 |
+ d.Root = filepath.Dir(d.Root) |
|
| 423 |
+ } |
|
| 424 |
+ return d.Start(arg...) |
|
| 425 |
+} |
|
| 426 |
+ |
|
| 427 |
+// LoadBusybox will load the stored busybox into a newly started daemon |
|
| 428 |
+func (d *Daemon) LoadBusybox() error {
|
|
| 429 |
+ bb := filepath.Join(d.Folder, "busybox.tar") |
|
| 430 |
+ if _, err := os.Stat(bb); err != nil {
|
|
| 431 |
+ if !os.IsNotExist(err) {
|
|
| 432 |
+ return fmt.Errorf("unexpected error on busybox.tar stat: %v", err)
|
|
| 433 |
+ } |
|
| 434 |
+ // saving busybox image from main daemon |
|
| 435 |
+ if out, err := exec.Command(d.dockerBinary, "save", "--output", bb, "busybox:latest").CombinedOutput(); err != nil {
|
|
| 436 |
+ imagesOut, _ := exec.Command(d.dockerBinary, "images", "--format", "{{ .Repository }}:{{ .Tag }}").CombinedOutput()
|
|
| 437 |
+ return fmt.Errorf("could not save busybox image: %s\n%s", string(out), strings.TrimSpace(string(imagesOut)))
|
|
| 438 |
+ } |
|
| 439 |
+ } |
|
| 440 |
+ // loading busybox image to this daemon |
|
| 441 |
+ if out, err := d.Cmd("load", "--input", bb); err != nil {
|
|
| 442 |
+ return fmt.Errorf("could not load busybox image: %s", out)
|
|
| 443 |
+ } |
|
| 444 |
+ if err := os.Remove(bb); err != nil {
|
|
| 445 |
+ d.c.Logf("could not remove %s: %v", bb, err)
|
|
| 446 |
+ } |
|
| 447 |
+ return nil |
|
| 448 |
+} |
|
| 449 |
+ |
|
| 450 |
+func (d *Daemon) queryRootDir() (string, error) {
|
|
| 451 |
+ // update daemon root by asking /info endpoint (to support user |
|
| 452 |
+ // namespaced daemon with root remapped uid.gid directory) |
|
| 453 |
+ clientConfig, err := d.getClientConfig() |
|
| 454 |
+ if err != nil {
|
|
| 455 |
+ return "", err |
|
| 456 |
+ } |
|
| 457 |
+ |
|
| 458 |
+ client := &http.Client{
|
|
| 459 |
+ Transport: clientConfig.transport, |
|
| 460 |
+ } |
|
| 461 |
+ |
|
| 462 |
+ req, err := http.NewRequest("GET", "/info", nil)
|
|
| 463 |
+ if err != nil {
|
|
| 464 |
+ return "", err |
|
| 465 |
+ } |
|
| 466 |
+ req.Header.Set("Content-Type", "application/json")
|
|
| 467 |
+ req.URL.Host = clientConfig.addr |
|
| 468 |
+ req.URL.Scheme = clientConfig.scheme |
|
| 469 |
+ |
|
| 470 |
+ resp, err := client.Do(req) |
|
| 471 |
+ if err != nil {
|
|
| 472 |
+ return "", err |
|
| 473 |
+ } |
|
| 474 |
+ body := ioutils.NewReadCloserWrapper(resp.Body, func() error {
|
|
| 475 |
+ return resp.Body.Close() |
|
| 476 |
+ }) |
|
| 477 |
+ |
|
| 478 |
+ type Info struct {
|
|
| 479 |
+ DockerRootDir string |
|
| 480 |
+ } |
|
| 481 |
+ var b []byte |
|
| 482 |
+ var i Info |
|
| 483 |
+ b, err = integration.ReadBody(body) |
|
| 484 |
+ if err == nil && resp.StatusCode == http.StatusOK {
|
|
| 485 |
+ // read the docker root dir |
|
| 486 |
+ if err = json.Unmarshal(b, &i); err == nil {
|
|
| 487 |
+ return i.DockerRootDir, nil |
|
| 488 |
+ } |
|
| 489 |
+ } |
|
| 490 |
+ return "", err |
|
| 491 |
+} |
|
| 492 |
+ |
|
| 493 |
+// Sock returns the socket path of the daemon |
|
| 494 |
+func (d *Daemon) Sock() string {
|
|
| 495 |
+ return fmt.Sprintf("unix://" + d.sockPath())
|
|
| 496 |
+} |
|
| 497 |
+ |
|
| 498 |
+func (d *Daemon) sockPath() string {
|
|
| 499 |
+ return filepath.Join(SockRoot, d.id+".sock") |
|
| 500 |
+} |
|
| 501 |
+ |
|
| 502 |
+// WaitRun waits for a container to be running for 10s |
|
| 503 |
+func (d *Daemon) WaitRun(contID string) error {
|
|
| 504 |
+ args := []string{"--host", d.Sock()}
|
|
| 505 |
+ return WaitInspectWithArgs(d.dockerBinary, contID, "{{.State.Running}}", "true", 10*time.Second, args...)
|
|
| 506 |
+} |
|
| 507 |
+ |
|
| 508 |
+// GetBaseDeviceSize returns the base device size of the daemon |
|
| 509 |
+func (d *Daemon) GetBaseDeviceSize(c *check.C) int64 {
|
|
| 510 |
+ infoCmdOutput, _, err := integration.RunCommandPipelineWithOutput( |
|
| 511 |
+ exec.Command(d.dockerBinary, "-H", d.Sock(), "info"), |
|
| 512 |
+ exec.Command("grep", "Base Device Size"),
|
|
| 513 |
+ ) |
|
| 514 |
+ c.Assert(err, checker.IsNil) |
|
| 515 |
+ basesizeSlice := strings.Split(infoCmdOutput, ":") |
|
| 516 |
+ basesize := strings.Trim(basesizeSlice[1], " ") |
|
| 517 |
+ basesize = strings.Trim(basesize, "\n")[:len(basesize)-3] |
|
| 518 |
+ basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) |
|
| 519 |
+ c.Assert(err, checker.IsNil) |
|
| 520 |
+ basesizeBytes := int64(basesizeFloat) * (1024 * 1024 * 1024) |
|
| 521 |
+ return basesizeBytes |
|
| 522 |
+} |
|
| 523 |
+ |
|
| 524 |
+// Cmd will execute a docker CLI command against this Daemon. |
|
| 525 |
+// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version
|
|
| 526 |
+func (d *Daemon) Cmd(args ...string) (string, error) {
|
|
| 527 |
+ b, err := d.Command(args...).CombinedOutput() |
|
| 528 |
+ return string(b), err |
|
| 529 |
+} |
|
| 530 |
+ |
|
| 531 |
+// Command will create a docker CLI command against this Daeomn. |
|
| 532 |
+func (d *Daemon) Command(args ...string) *exec.Cmd {
|
|
| 533 |
+ return exec.Command(d.dockerBinary, d.PrependHostArg(args)...) |
|
| 534 |
+} |
|
| 535 |
+ |
|
| 536 |
+// PrependHostArg prepend the specified arguments by the daemon host flags |
|
| 537 |
+func (d *Daemon) PrependHostArg(args []string) []string {
|
|
| 538 |
+ for _, arg := range args {
|
|
| 539 |
+ if arg == "--host" || arg == "-H" {
|
|
| 540 |
+ return args |
|
| 541 |
+ } |
|
| 542 |
+ } |
|
| 543 |
+ return append([]string{"--host", d.Sock()}, args...)
|
|
| 544 |
+} |
|
| 545 |
+ |
|
| 546 |
+// SockRequest executes a socket request on a daemon and returns statuscode and output. |
|
| 547 |
+func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, []byte, error) {
|
|
| 548 |
+ jsonData := bytes.NewBuffer(nil) |
|
| 549 |
+ if err := json.NewEncoder(jsonData).Encode(data); err != nil {
|
|
| 550 |
+ return -1, nil, err |
|
| 551 |
+ } |
|
| 552 |
+ |
|
| 553 |
+ res, body, err := d.SockRequestRaw(method, endpoint, jsonData, "application/json") |
|
| 554 |
+ if err != nil {
|
|
| 555 |
+ return -1, nil, err |
|
| 556 |
+ } |
|
| 557 |
+ b, err := integration.ReadBody(body) |
|
| 558 |
+ return res.StatusCode, b, err |
|
| 559 |
+} |
|
| 560 |
+ |
|
| 561 |
+// SockRequestRaw executes a socket request on a daemon and returns an http |
|
| 562 |
+// response and a reader for the output data. |
|
| 563 |
+func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) {
|
|
| 564 |
+ return SockRequestRawToDaemon(method, endpoint, data, ct, d.Sock()) |
|
| 565 |
+} |
|
| 566 |
+ |
|
| 567 |
+// LogFileName returns the path the the daemon's log file |
|
| 568 |
+func (d *Daemon) LogFileName() string {
|
|
| 569 |
+ return d.logFile.Name() |
|
| 570 |
+} |
|
| 571 |
+ |
|
| 572 |
+// GetIDByName returns the ID of an object (container, volume, …) given its name |
|
| 573 |
+func (d *Daemon) GetIDByName(name string) (string, error) {
|
|
| 574 |
+ return d.inspectFieldWithError(name, "Id") |
|
| 575 |
+} |
|
| 576 |
+ |
|
| 577 |
+// ActiveContainers returns the list of ids of the currently running containers |
|
| 578 |
+func (d *Daemon) ActiveContainers() (ids []string) {
|
|
| 579 |
+ // FIXME(vdemeester) shouldn't ignore the error |
|
| 580 |
+ out, _ := d.Cmd("ps", "-q")
|
|
| 581 |
+ for _, id := range strings.Split(out, "\n") {
|
|
| 582 |
+ if id = strings.TrimSpace(id); id != "" {
|
|
| 583 |
+ ids = append(ids, id) |
|
| 584 |
+ } |
|
| 585 |
+ } |
|
| 586 |
+ return |
|
| 587 |
+} |
|
| 588 |
+ |
|
| 589 |
+// ReadLogFile returns the content of the daemon log file |
|
| 590 |
+func (d *Daemon) ReadLogFile() ([]byte, error) {
|
|
| 591 |
+ return ioutil.ReadFile(d.logFile.Name()) |
|
| 592 |
+} |
|
| 593 |
+ |
|
| 594 |
+func (d *Daemon) inspectFilter(name, filter string) (string, error) {
|
|
| 595 |
+ format := fmt.Sprintf("{{%s}}", filter)
|
|
| 596 |
+ out, err := d.Cmd("inspect", "-f", format, name)
|
|
| 597 |
+ if err != nil {
|
|
| 598 |
+ return "", fmt.Errorf("failed to inspect %s: %s", name, out)
|
|
| 599 |
+ } |
|
| 600 |
+ return strings.TrimSpace(out), nil |
|
| 601 |
+} |
|
| 602 |
+ |
|
| 603 |
+func (d *Daemon) inspectFieldWithError(name, field string) (string, error) {
|
|
| 604 |
+ return d.inspectFilter(name, fmt.Sprintf(".%s", field))
|
|
| 605 |
+} |
|
| 606 |
+ |
|
| 607 |
+// FindContainerIP returns the ip of the specified container |
|
| 608 |
+// FIXME(vdemeester) should probably erroring out |
|
| 609 |
+func (d *Daemon) FindContainerIP(id string) string {
|
|
| 610 |
+ out, err := d.Cmd("inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'"), id)
|
|
| 611 |
+ if err != nil {
|
|
| 612 |
+ d.c.Log(err) |
|
| 613 |
+ } |
|
| 614 |
+ return strings.Trim(out, " \r\n'") |
|
| 615 |
+} |
|
| 616 |
+ |
|
| 617 |
+// BuildImageWithOut builds an image with the specified dockerfile and options and returns the output |
|
| 618 |
+func (d *Daemon) BuildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) {
|
|
| 619 |
+ buildCmd := BuildImageCmdWithHost(d.dockerBinary, name, dockerfile, d.Sock(), useCache, buildFlags...) |
|
| 620 |
+ result := icmd.RunCmd(icmd.Cmd{
|
|
| 621 |
+ Command: buildCmd.Args, |
|
| 622 |
+ Env: buildCmd.Env, |
|
| 623 |
+ Dir: buildCmd.Dir, |
|
| 624 |
+ Stdin: buildCmd.Stdin, |
|
| 625 |
+ Stdout: buildCmd.Stdout, |
|
| 626 |
+ }) |
|
| 627 |
+ return result.Combined(), result.ExitCode, result.Error |
|
| 628 |
+} |
|
| 629 |
+ |
|
| 630 |
+// CheckActiveContainerCount returns the number of active containers |
|
| 631 |
+// FIXME(vdemeester) should re-use ActivateContainers in some way |
|
| 632 |
+func (d *Daemon) CheckActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 633 |
+ out, err := d.Cmd("ps", "-q")
|
|
| 634 |
+ c.Assert(err, checker.IsNil) |
|
| 635 |
+ if len(strings.TrimSpace(out)) == 0 {
|
|
| 636 |
+ return 0, nil |
|
| 637 |
+ } |
|
| 638 |
+ return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out))
|
|
| 639 |
+} |
|
| 640 |
+ |
|
| 641 |
+// ReloadConfig asks the daemon to reload its configuration |
|
| 642 |
+func (d *Daemon) ReloadConfig() error {
|
|
| 643 |
+ if d.cmd == nil || d.cmd.Process == nil {
|
|
| 644 |
+ return fmt.Errorf("daemon is not running")
|
|
| 645 |
+ } |
|
| 646 |
+ |
|
| 647 |
+ errCh := make(chan error) |
|
| 648 |
+ started := make(chan struct{})
|
|
| 649 |
+ go func() {
|
|
| 650 |
+ _, body, err := SockRequestRawToDaemon("GET", "/events", nil, "", d.Sock())
|
|
| 651 |
+ close(started) |
|
| 652 |
+ if err != nil {
|
|
| 653 |
+ errCh <- err |
|
| 654 |
+ } |
|
| 655 |
+ defer body.Close() |
|
| 656 |
+ dec := json.NewDecoder(body) |
|
| 657 |
+ for {
|
|
| 658 |
+ var e events.Message |
|
| 659 |
+ if err := dec.Decode(&e); err != nil {
|
|
| 660 |
+ errCh <- err |
|
| 661 |
+ return |
|
| 662 |
+ } |
|
| 663 |
+ if e.Type != events.DaemonEventType {
|
|
| 664 |
+ continue |
|
| 665 |
+ } |
|
| 666 |
+ if e.Action != "reload" {
|
|
| 667 |
+ continue |
|
| 668 |
+ } |
|
| 669 |
+ close(errCh) // notify that we are done |
|
| 670 |
+ return |
|
| 671 |
+ } |
|
| 672 |
+ }() |
|
| 673 |
+ |
|
| 674 |
+ <-started |
|
| 675 |
+ if err := signalDaemonReload(d.cmd.Process.Pid); err != nil {
|
|
| 676 |
+ return fmt.Errorf("error signaling daemon reload: %v", err)
|
|
| 677 |
+ } |
|
| 678 |
+ select {
|
|
| 679 |
+ case err := <-errCh: |
|
| 680 |
+ if err != nil {
|
|
| 681 |
+ return fmt.Errorf("error waiting for daemon reload event: %v", err)
|
|
| 682 |
+ } |
|
| 683 |
+ case <-time.After(30 * time.Second): |
|
| 684 |
+ return fmt.Errorf("timeout waiting for daemon reload event")
|
|
| 685 |
+ } |
|
| 686 |
+ return nil |
|
| 687 |
+} |
|
| 688 |
+ |
|
| 689 |
+// WaitInspectWithArgs waits for the specified expression to be equals to the specified expected string in the given time. |
|
| 690 |
+// FIXME(vdemeester) Attach this to the Daemon struct |
|
| 691 |
+func WaitInspectWithArgs(dockerBinary, name, expr, expected string, timeout time.Duration, arg ...string) error {
|
|
| 692 |
+ after := time.After(timeout) |
|
| 693 |
+ |
|
| 694 |
+ args := append(arg, "inspect", "-f", expr, name) |
|
| 695 |
+ for {
|
|
| 696 |
+ result := icmd.RunCommand(dockerBinary, args...) |
|
| 697 |
+ if result.Error != nil {
|
|
| 698 |
+ if !strings.Contains(result.Stderr(), "No such") {
|
|
| 699 |
+ return fmt.Errorf("error executing docker inspect: %v\n%s",
|
|
| 700 |
+ result.Stderr(), result.Stdout()) |
|
| 701 |
+ } |
|
| 702 |
+ select {
|
|
| 703 |
+ case <-after: |
|
| 704 |
+ return result.Error |
|
| 705 |
+ default: |
|
| 706 |
+ time.Sleep(10 * time.Millisecond) |
|
| 707 |
+ continue |
|
| 708 |
+ } |
|
| 709 |
+ } |
|
| 710 |
+ |
|
| 711 |
+ out := strings.TrimSpace(result.Stdout()) |
|
| 712 |
+ if out == expected {
|
|
| 713 |
+ break |
|
| 714 |
+ } |
|
| 715 |
+ |
|
| 716 |
+ select {
|
|
| 717 |
+ case <-after: |
|
| 718 |
+ return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected)
|
|
| 719 |
+ default: |
|
| 720 |
+ } |
|
| 721 |
+ |
|
| 722 |
+ time.Sleep(100 * time.Millisecond) |
|
| 723 |
+ } |
|
| 724 |
+ return nil |
|
| 725 |
+} |
|
| 726 |
+ |
|
| 727 |
+// SockRequestRawToDaemon creates an http request against the specified daemon socket |
|
| 728 |
+// FIXME(vdemeester) attach this to daemon ? |
|
| 729 |
+func SockRequestRawToDaemon(method, endpoint string, data io.Reader, ct, daemon string) (*http.Response, io.ReadCloser, error) {
|
|
| 730 |
+ req, client, err := newRequestClient(method, endpoint, data, ct, daemon) |
|
| 731 |
+ if err != nil {
|
|
| 732 |
+ return nil, nil, err |
|
| 733 |
+ } |
|
| 734 |
+ |
|
| 735 |
+ resp, err := client.Do(req) |
|
| 736 |
+ if err != nil {
|
|
| 737 |
+ client.Close() |
|
| 738 |
+ return nil, nil, err |
|
| 739 |
+ } |
|
| 740 |
+ body := ioutils.NewReadCloserWrapper(resp.Body, func() error {
|
|
| 741 |
+ defer resp.Body.Close() |
|
| 742 |
+ return client.Close() |
|
| 743 |
+ }) |
|
| 744 |
+ |
|
| 745 |
+ return resp, body, nil |
|
| 746 |
+} |
|
| 747 |
+ |
|
| 748 |
+func getTLSConfig() (*tls.Config, error) {
|
|
| 749 |
+ dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
|
|
| 750 |
+ |
|
| 751 |
+ if dockerCertPath == "" {
|
|
| 752 |
+ return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable")
|
|
| 753 |
+ } |
|
| 754 |
+ |
|
| 755 |
+ option := &tlsconfig.Options{
|
|
| 756 |
+ CAFile: filepath.Join(dockerCertPath, "ca.pem"), |
|
| 757 |
+ CertFile: filepath.Join(dockerCertPath, "cert.pem"), |
|
| 758 |
+ KeyFile: filepath.Join(dockerCertPath, "key.pem"), |
|
| 759 |
+ } |
|
| 760 |
+ tlsConfig, err := tlsconfig.Client(*option) |
|
| 761 |
+ if err != nil {
|
|
| 762 |
+ return nil, err |
|
| 763 |
+ } |
|
| 764 |
+ |
|
| 765 |
+ return tlsConfig, nil |
|
| 766 |
+} |
|
| 767 |
+ |
|
| 768 |
+// SockConn opens a connection on the specified socket |
|
| 769 |
+func SockConn(timeout time.Duration, daemon string) (net.Conn, error) {
|
|
| 770 |
+ daemonURL, err := url.Parse(daemon) |
|
| 771 |
+ if err != nil {
|
|
| 772 |
+ return nil, fmt.Errorf("could not parse url %q: %v", daemon, err)
|
|
| 773 |
+ } |
|
| 774 |
+ |
|
| 775 |
+ var c net.Conn |
|
| 776 |
+ switch daemonURL.Scheme {
|
|
| 777 |
+ case "npipe": |
|
| 778 |
+ return npipeDial(daemonURL.Path, timeout) |
|
| 779 |
+ case "unix": |
|
| 780 |
+ return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) |
|
| 781 |
+ case "tcp": |
|
| 782 |
+ if os.Getenv("DOCKER_TLS_VERIFY") != "" {
|
|
| 783 |
+ // Setup the socket TLS configuration. |
|
| 784 |
+ tlsConfig, err := getTLSConfig() |
|
| 785 |
+ if err != nil {
|
|
| 786 |
+ return nil, err |
|
| 787 |
+ } |
|
| 788 |
+ dialer := &net.Dialer{Timeout: timeout}
|
|
| 789 |
+ return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) |
|
| 790 |
+ } |
|
| 791 |
+ return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) |
|
| 792 |
+ default: |
|
| 793 |
+ return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon)
|
|
| 794 |
+ } |
|
| 795 |
+} |
|
| 796 |
+ |
|
| 797 |
+func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string) (*http.Request, *httputil.ClientConn, error) {
|
|
| 798 |
+ c, err := SockConn(time.Duration(10*time.Second), daemon) |
|
| 799 |
+ if err != nil {
|
|
| 800 |
+ return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err)
|
|
| 801 |
+ } |
|
| 802 |
+ |
|
| 803 |
+ client := httputil.NewClientConn(c, nil) |
|
| 804 |
+ |
|
| 805 |
+ req, err := http.NewRequest(method, endpoint, data) |
|
| 806 |
+ if err != nil {
|
|
| 807 |
+ client.Close() |
|
| 808 |
+ return nil, nil, fmt.Errorf("could not create new request: %v", err)
|
|
| 809 |
+ } |
|
| 810 |
+ |
|
| 811 |
+ if ct != "" {
|
|
| 812 |
+ req.Header.Set("Content-Type", ct)
|
|
| 813 |
+ } |
|
| 814 |
+ return req, client, nil |
|
| 815 |
+} |
|
| 816 |
+ |
|
| 817 |
+// BuildImageCmdWithHost create a build command with the specified arguments. |
|
| 818 |
+// FIXME(vdemeester) move this away |
|
| 819 |
+func BuildImageCmdWithHost(dockerBinary, name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd {
|
|
| 820 |
+ args := []string{}
|
|
| 821 |
+ if host != "" {
|
|
| 822 |
+ args = append(args, "--host", host) |
|
| 823 |
+ } |
|
| 824 |
+ args = append(args, "build", "-t", name) |
|
| 825 |
+ if !useCache {
|
|
| 826 |
+ args = append(args, "--no-cache") |
|
| 827 |
+ } |
|
| 828 |
+ args = append(args, buildFlags...) |
|
| 829 |
+ args = append(args, "-") |
|
| 830 |
+ buildCmd := exec.Command(dockerBinary, args...) |
|
| 831 |
+ buildCmd.Stdin = strings.NewReader(dockerfile) |
|
| 832 |
+ return buildCmd |
|
| 833 |
+} |
| 0 | 834 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,450 @@ |
| 0 |
+package daemon |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "encoding/json" |
|
| 4 |
+ "fmt" |
|
| 5 |
+ "net/http" |
|
| 6 |
+ "strings" |
|
| 7 |
+ "time" |
|
| 8 |
+ |
|
| 9 |
+ "github.com/docker/docker/api/types" |
|
| 10 |
+ "github.com/docker/docker/api/types/filters" |
|
| 11 |
+ "github.com/docker/docker/api/types/swarm" |
|
| 12 |
+ "github.com/docker/docker/pkg/integration/checker" |
|
| 13 |
+ "github.com/go-check/check" |
|
| 14 |
+) |
|
| 15 |
+ |
|
| 16 |
+// Swarm is a test daemon with helpers for participating in a swarm. |
|
| 17 |
+type Swarm struct {
|
|
| 18 |
+ *Daemon |
|
| 19 |
+ swarm.Info |
|
| 20 |
+ Port int |
|
| 21 |
+ ListenAddr string |
|
| 22 |
+} |
|
| 23 |
+ |
|
| 24 |
+// Init initializes a new swarm cluster. |
|
| 25 |
+func (d *Swarm) Init(req swarm.InitRequest) error {
|
|
| 26 |
+ if req.ListenAddr == "" {
|
|
| 27 |
+ req.ListenAddr = d.ListenAddr |
|
| 28 |
+ } |
|
| 29 |
+ status, out, err := d.SockRequest("POST", "/swarm/init", req)
|
|
| 30 |
+ if status != http.StatusOK {
|
|
| 31 |
+ return fmt.Errorf("initializing swarm: invalid statuscode %v, %q", status, out)
|
|
| 32 |
+ } |
|
| 33 |
+ if err != nil {
|
|
| 34 |
+ return fmt.Errorf("initializing swarm: %v", err)
|
|
| 35 |
+ } |
|
| 36 |
+ info, err := d.SwarmInfo() |
|
| 37 |
+ if err != nil {
|
|
| 38 |
+ return err |
|
| 39 |
+ } |
|
| 40 |
+ d.Info = info |
|
| 41 |
+ return nil |
|
| 42 |
+} |
|
| 43 |
+ |
|
| 44 |
+// Join joins a daemon to an existing cluster. |
|
| 45 |
+func (d *Swarm) Join(req swarm.JoinRequest) error {
|
|
| 46 |
+ if req.ListenAddr == "" {
|
|
| 47 |
+ req.ListenAddr = d.ListenAddr |
|
| 48 |
+ } |
|
| 49 |
+ status, out, err := d.SockRequest("POST", "/swarm/join", req)
|
|
| 50 |
+ if status != http.StatusOK {
|
|
| 51 |
+ return fmt.Errorf("joining swarm: invalid statuscode %v, %q", status, out)
|
|
| 52 |
+ } |
|
| 53 |
+ if err != nil {
|
|
| 54 |
+ return fmt.Errorf("joining swarm: %v", err)
|
|
| 55 |
+ } |
|
| 56 |
+ info, err := d.SwarmInfo() |
|
| 57 |
+ if err != nil {
|
|
| 58 |
+ return err |
|
| 59 |
+ } |
|
| 60 |
+ d.Info = info |
|
| 61 |
+ return nil |
|
| 62 |
+} |
|
| 63 |
+ |
|
| 64 |
+// Leave forces daemon to leave current cluster. |
|
| 65 |
+func (d *Swarm) Leave(force bool) error {
|
|
| 66 |
+ url := "/swarm/leave" |
|
| 67 |
+ if force {
|
|
| 68 |
+ url += "?force=1" |
|
| 69 |
+ } |
|
| 70 |
+ status, out, err := d.SockRequest("POST", url, nil)
|
|
| 71 |
+ if status != http.StatusOK {
|
|
| 72 |
+ return fmt.Errorf("leaving swarm: invalid statuscode %v, %q", status, out)
|
|
| 73 |
+ } |
|
| 74 |
+ if err != nil {
|
|
| 75 |
+ err = fmt.Errorf("leaving swarm: %v", err)
|
|
| 76 |
+ } |
|
| 77 |
+ return err |
|
| 78 |
+} |
|
| 79 |
+ |
|
| 80 |
+// SwarmInfo returns the swarm information of the daemon |
|
| 81 |
+func (d *Swarm) SwarmInfo() (swarm.Info, error) {
|
|
| 82 |
+ var info struct {
|
|
| 83 |
+ Swarm swarm.Info |
|
| 84 |
+ } |
|
| 85 |
+ status, dt, err := d.SockRequest("GET", "/info", nil)
|
|
| 86 |
+ if status != http.StatusOK {
|
|
| 87 |
+ return info.Swarm, fmt.Errorf("get swarm info: invalid statuscode %v", status)
|
|
| 88 |
+ } |
|
| 89 |
+ if err != nil {
|
|
| 90 |
+ return info.Swarm, fmt.Errorf("get swarm info: %v", err)
|
|
| 91 |
+ } |
|
| 92 |
+ if err := json.Unmarshal(dt, &info); err != nil {
|
|
| 93 |
+ return info.Swarm, err |
|
| 94 |
+ } |
|
| 95 |
+ return info.Swarm, nil |
|
| 96 |
+} |
|
| 97 |
+ |
|
| 98 |
+// ServiceConstructor defines a swarm service constructor function |
|
| 99 |
+type ServiceConstructor func(*swarm.Service) |
|
| 100 |
+ |
|
| 101 |
+// NodeConstructor defines a swarm node constructor |
|
| 102 |
+type NodeConstructor func(*swarm.Node) |
|
| 103 |
+ |
|
| 104 |
+// SpecConstructor defines a swarm spec constructor |
|
| 105 |
+type SpecConstructor func(*swarm.Spec) |
|
| 106 |
+ |
|
| 107 |
+// CreateService creates a swarm service given the specified service constructor |
|
| 108 |
+func (d *Swarm) CreateService(c *check.C, f ...ServiceConstructor) string {
|
|
| 109 |
+ var service swarm.Service |
|
| 110 |
+ for _, fn := range f {
|
|
| 111 |
+ fn(&service) |
|
| 112 |
+ } |
|
| 113 |
+ status, out, err := d.SockRequest("POST", "/services/create", service.Spec)
|
|
| 114 |
+ |
|
| 115 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 116 |
+ c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out)))
|
|
| 117 |
+ |
|
| 118 |
+ var scr types.ServiceCreateResponse |
|
| 119 |
+ c.Assert(json.Unmarshal(out, &scr), checker.IsNil) |
|
| 120 |
+ return scr.ID |
|
| 121 |
+} |
|
| 122 |
+ |
|
| 123 |
+// GetService returns the swarm service corresponding to the specified id |
|
| 124 |
+func (d *Swarm) GetService(c *check.C, id string) *swarm.Service {
|
|
| 125 |
+ var service swarm.Service |
|
| 126 |
+ status, out, err := d.SockRequest("GET", "/services/"+id, nil)
|
|
| 127 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 128 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 129 |
+ c.Assert(json.Unmarshal(out, &service), checker.IsNil) |
|
| 130 |
+ return &service |
|
| 131 |
+} |
|
| 132 |
+ |
|
| 133 |
+// GetServiceTasks returns the swarm tasks for the specified service |
|
| 134 |
+func (d *Swarm) GetServiceTasks(c *check.C, service string) []swarm.Task {
|
|
| 135 |
+ var tasks []swarm.Task |
|
| 136 |
+ |
|
| 137 |
+ filterArgs := filters.NewArgs() |
|
| 138 |
+ filterArgs.Add("desired-state", "running")
|
|
| 139 |
+ filterArgs.Add("service", service)
|
|
| 140 |
+ filters, err := filters.ToParam(filterArgs) |
|
| 141 |
+ c.Assert(err, checker.IsNil) |
|
| 142 |
+ |
|
| 143 |
+ status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil)
|
|
| 144 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 145 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 146 |
+ c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) |
|
| 147 |
+ return tasks |
|
| 148 |
+} |
|
| 149 |
+ |
|
| 150 |
+// CheckServiceRunningTasks returns the number of running tasks for the specified service |
|
| 151 |
+func (d *Swarm) CheckServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) {
|
|
| 152 |
+ return func(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 153 |
+ tasks := d.GetServiceTasks(c, service) |
|
| 154 |
+ var runningCount int |
|
| 155 |
+ for _, task := range tasks {
|
|
| 156 |
+ if task.Status.State == swarm.TaskStateRunning {
|
|
| 157 |
+ runningCount++ |
|
| 158 |
+ } |
|
| 159 |
+ } |
|
| 160 |
+ return runningCount, nil |
|
| 161 |
+ } |
|
| 162 |
+} |
|
| 163 |
+ |
|
| 164 |
+// CheckServiceUpdateState returns the current update state for the specified service |
|
| 165 |
+func (d *Swarm) CheckServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) {
|
|
| 166 |
+ return func(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 167 |
+ service := d.GetService(c, service) |
|
| 168 |
+ if service.UpdateStatus == nil {
|
|
| 169 |
+ return "", nil |
|
| 170 |
+ } |
|
| 171 |
+ return service.UpdateStatus.State, nil |
|
| 172 |
+ } |
|
| 173 |
+} |
|
| 174 |
+ |
|
| 175 |
+// CheckServiceTasks returns the number of tasks for the specified service |
|
| 176 |
+func (d *Swarm) CheckServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) {
|
|
| 177 |
+ return func(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 178 |
+ tasks := d.GetServiceTasks(c, service) |
|
| 179 |
+ return len(tasks), nil |
|
| 180 |
+ } |
|
| 181 |
+} |
|
| 182 |
+ |
|
| 183 |
+// CheckRunningTaskImages returns the number of different images attached to a running task |
|
| 184 |
+func (d *Swarm) CheckRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 185 |
+ var tasks []swarm.Task |
|
| 186 |
+ |
|
| 187 |
+ filterArgs := filters.NewArgs() |
|
| 188 |
+ filterArgs.Add("desired-state", "running")
|
|
| 189 |
+ filters, err := filters.ToParam(filterArgs) |
|
| 190 |
+ c.Assert(err, checker.IsNil) |
|
| 191 |
+ |
|
| 192 |
+ status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil)
|
|
| 193 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 194 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 195 |
+ c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) |
|
| 196 |
+ |
|
| 197 |
+ result := make(map[string]int) |
|
| 198 |
+ for _, task := range tasks {
|
|
| 199 |
+ if task.Status.State == swarm.TaskStateRunning {
|
|
| 200 |
+ result[task.Spec.ContainerSpec.Image]++ |
|
| 201 |
+ } |
|
| 202 |
+ } |
|
| 203 |
+ return result, nil |
|
| 204 |
+} |
|
| 205 |
+ |
|
| 206 |
+// CheckNodeReadyCount returns the number of ready node on the swarm |
|
| 207 |
+func (d *Swarm) CheckNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 208 |
+ nodes := d.ListNodes(c) |
|
| 209 |
+ var readyCount int |
|
| 210 |
+ for _, node := range nodes {
|
|
| 211 |
+ if node.Status.State == swarm.NodeStateReady {
|
|
| 212 |
+ readyCount++ |
|
| 213 |
+ } |
|
| 214 |
+ } |
|
| 215 |
+ return readyCount, nil |
|
| 216 |
+} |
|
| 217 |
+ |
|
| 218 |
+// GetTask returns the swarm task identified by the specified id |
|
| 219 |
+func (d *Swarm) GetTask(c *check.C, id string) swarm.Task {
|
|
| 220 |
+ var task swarm.Task |
|
| 221 |
+ |
|
| 222 |
+ status, out, err := d.SockRequest("GET", "/tasks/"+id, nil)
|
|
| 223 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 224 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 225 |
+ c.Assert(json.Unmarshal(out, &task), checker.IsNil) |
|
| 226 |
+ return task |
|
| 227 |
+} |
|
| 228 |
+ |
|
| 229 |
+// UpdateService updates a swarm service with the specified service constructor |
|
| 230 |
+func (d *Swarm) UpdateService(c *check.C, service *swarm.Service, f ...ServiceConstructor) {
|
|
| 231 |
+ for _, fn := range f {
|
|
| 232 |
+ fn(service) |
|
| 233 |
+ } |
|
| 234 |
+ url := fmt.Sprintf("/services/%s/update?version=%d", service.ID, service.Version.Index)
|
|
| 235 |
+ status, out, err := d.SockRequest("POST", url, service.Spec)
|
|
| 236 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 237 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 238 |
+} |
|
| 239 |
+ |
|
| 240 |
+// RemoveService removes the specified service |
|
| 241 |
+func (d *Swarm) RemoveService(c *check.C, id string) {
|
|
| 242 |
+ status, out, err := d.SockRequest("DELETE", "/services/"+id, nil)
|
|
| 243 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 244 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 245 |
+} |
|
| 246 |
+ |
|
| 247 |
+// GetNode returns a swarm node identified by the specified id |
|
| 248 |
+func (d *Swarm) GetNode(c *check.C, id string) *swarm.Node {
|
|
| 249 |
+ var node swarm.Node |
|
| 250 |
+ status, out, err := d.SockRequest("GET", "/nodes/"+id, nil)
|
|
| 251 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 252 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 253 |
+ c.Assert(json.Unmarshal(out, &node), checker.IsNil) |
|
| 254 |
+ c.Assert(node.ID, checker.Equals, id) |
|
| 255 |
+ return &node |
|
| 256 |
+} |
|
| 257 |
+ |
|
| 258 |
+// RemoveNode removes the specified node |
|
| 259 |
+func (d *Swarm) RemoveNode(c *check.C, id string, force bool) {
|
|
| 260 |
+ url := "/nodes/" + id |
|
| 261 |
+ if force {
|
|
| 262 |
+ url += "?force=1" |
|
| 263 |
+ } |
|
| 264 |
+ |
|
| 265 |
+ status, out, err := d.SockRequest("DELETE", url, nil)
|
|
| 266 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 267 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 268 |
+} |
|
| 269 |
+ |
|
| 270 |
+// UpdateNode updates a swarm node with the specified node constructor |
|
| 271 |
+func (d *Swarm) UpdateNode(c *check.C, id string, f ...NodeConstructor) {
|
|
| 272 |
+ for i := 0; ; i++ {
|
|
| 273 |
+ node := d.GetNode(c, id) |
|
| 274 |
+ for _, fn := range f {
|
|
| 275 |
+ fn(node) |
|
| 276 |
+ } |
|
| 277 |
+ url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
|
|
| 278 |
+ status, out, err := d.SockRequest("POST", url, node.Spec)
|
|
| 279 |
+ if i < 10 && strings.Contains(string(out), "update out of sequence") {
|
|
| 280 |
+ time.Sleep(100 * time.Millisecond) |
|
| 281 |
+ continue |
|
| 282 |
+ } |
|
| 283 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 284 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 285 |
+ return |
|
| 286 |
+ } |
|
| 287 |
+} |
|
| 288 |
+ |
|
| 289 |
+// ListNodes returns the list of the current swarm nodes |
|
| 290 |
+func (d *Swarm) ListNodes(c *check.C) []swarm.Node {
|
|
| 291 |
+ status, out, err := d.SockRequest("GET", "/nodes", nil)
|
|
| 292 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 293 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 294 |
+ |
|
| 295 |
+ nodes := []swarm.Node{}
|
|
| 296 |
+ c.Assert(json.Unmarshal(out, &nodes), checker.IsNil) |
|
| 297 |
+ return nodes |
|
| 298 |
+} |
|
| 299 |
+ |
|
| 300 |
+// ListServices return the list of the current swarm services |
|
| 301 |
+func (d *Swarm) ListServices(c *check.C) []swarm.Service {
|
|
| 302 |
+ status, out, err := d.SockRequest("GET", "/services", nil)
|
|
| 303 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 304 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 305 |
+ |
|
| 306 |
+ services := []swarm.Service{}
|
|
| 307 |
+ c.Assert(json.Unmarshal(out, &services), checker.IsNil) |
|
| 308 |
+ return services |
|
| 309 |
+} |
|
| 310 |
+ |
|
| 311 |
+// CreateSecret creates a secret given the specified spec |
|
| 312 |
+func (d *Swarm) CreateSecret(c *check.C, secretSpec swarm.SecretSpec) string {
|
|
| 313 |
+ status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec)
|
|
| 314 |
+ |
|
| 315 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 316 |
+ c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out)))
|
|
| 317 |
+ |
|
| 318 |
+ var scr types.SecretCreateResponse |
|
| 319 |
+ c.Assert(json.Unmarshal(out, &scr), checker.IsNil) |
|
| 320 |
+ return scr.ID |
|
| 321 |
+} |
|
| 322 |
+ |
|
| 323 |
+// ListSecrets returns the list of the current swarm secrets |
|
| 324 |
+func (d *Swarm) ListSecrets(c *check.C) []swarm.Secret {
|
|
| 325 |
+ status, out, err := d.SockRequest("GET", "/secrets", nil)
|
|
| 326 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 327 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 328 |
+ |
|
| 329 |
+ secrets := []swarm.Secret{}
|
|
| 330 |
+ c.Assert(json.Unmarshal(out, &secrets), checker.IsNil) |
|
| 331 |
+ return secrets |
|
| 332 |
+} |
|
| 333 |
+ |
|
| 334 |
+// GetSecret returns a swarm secret identified by the specified id |
|
| 335 |
+func (d *Swarm) GetSecret(c *check.C, id string) *swarm.Secret {
|
|
| 336 |
+ var secret swarm.Secret |
|
| 337 |
+ status, out, err := d.SockRequest("GET", "/secrets/"+id, nil)
|
|
| 338 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 339 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 340 |
+ c.Assert(json.Unmarshal(out, &secret), checker.IsNil) |
|
| 341 |
+ return &secret |
|
| 342 |
+} |
|
| 343 |
+ |
|
| 344 |
+// DeleteSecret removes the swarm secret identified by the specified id |
|
| 345 |
+func (d *Swarm) DeleteSecret(c *check.C, id string) {
|
|
| 346 |
+ status, out, err := d.SockRequest("DELETE", "/secrets/"+id, nil)
|
|
| 347 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 348 |
+ c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out)))
|
|
| 349 |
+} |
|
| 350 |
+ |
|
| 351 |
+// GetSwarm return the current swarm object |
|
| 352 |
+func (d *Swarm) GetSwarm(c *check.C) swarm.Swarm {
|
|
| 353 |
+ var sw swarm.Swarm |
|
| 354 |
+ status, out, err := d.SockRequest("GET", "/swarm", nil)
|
|
| 355 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 356 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 357 |
+ c.Assert(json.Unmarshal(out, &sw), checker.IsNil) |
|
| 358 |
+ return sw |
|
| 359 |
+} |
|
| 360 |
+ |
|
| 361 |
+// UpdateSwarm updates the current swarm object with the specified spec constructors |
|
| 362 |
+func (d *Swarm) UpdateSwarm(c *check.C, f ...SpecConstructor) {
|
|
| 363 |
+ sw := d.GetSwarm(c) |
|
| 364 |
+ for _, fn := range f {
|
|
| 365 |
+ fn(&sw.Spec) |
|
| 366 |
+ } |
|
| 367 |
+ url := fmt.Sprintf("/swarm/update?version=%d", sw.Version.Index)
|
|
| 368 |
+ status, out, err := d.SockRequest("POST", url, sw.Spec)
|
|
| 369 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 370 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 371 |
+} |
|
| 372 |
+ |
|
| 373 |
+// RotateTokens update the swarm to rotate tokens |
|
| 374 |
+func (d *Swarm) RotateTokens(c *check.C) {
|
|
| 375 |
+ var sw swarm.Swarm |
|
| 376 |
+ status, out, err := d.SockRequest("GET", "/swarm", nil)
|
|
| 377 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 378 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 379 |
+ c.Assert(json.Unmarshal(out, &sw), checker.IsNil) |
|
| 380 |
+ |
|
| 381 |
+ url := fmt.Sprintf("/swarm/update?version=%d&rotateWorkerToken=true&rotateManagerToken=true", sw.Version.Index)
|
|
| 382 |
+ status, out, err = d.SockRequest("POST", url, sw.Spec)
|
|
| 383 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 384 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 385 |
+} |
|
| 386 |
+ |
|
| 387 |
+// JoinTokens returns the current swarm join tokens |
|
| 388 |
+func (d *Swarm) JoinTokens(c *check.C) swarm.JoinTokens {
|
|
| 389 |
+ var sw swarm.Swarm |
|
| 390 |
+ status, out, err := d.SockRequest("GET", "/swarm", nil)
|
|
| 391 |
+ c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 392 |
+ c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 393 |
+ c.Assert(json.Unmarshal(out, &sw), checker.IsNil) |
|
| 394 |
+ return sw.JoinTokens |
|
| 395 |
+} |
|
| 396 |
+ |
|
| 397 |
+// CheckLocalNodeState returns the current swarm node state |
|
| 398 |
+func (d *Swarm) CheckLocalNodeState(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 399 |
+ info, err := d.SwarmInfo() |
|
| 400 |
+ c.Assert(err, checker.IsNil) |
|
| 401 |
+ return info.LocalNodeState, nil |
|
| 402 |
+} |
|
| 403 |
+ |
|
| 404 |
+// CheckControlAvailable returns the current swarm control available |
|
| 405 |
+func (d *Swarm) CheckControlAvailable(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 406 |
+ info, err := d.SwarmInfo() |
|
| 407 |
+ c.Assert(err, checker.IsNil) |
|
| 408 |
+ c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
|
| 409 |
+ return info.ControlAvailable, nil |
|
| 410 |
+} |
|
| 411 |
+ |
|
| 412 |
+// CheckLeader returns whether there is a leader on the swarm or not |
|
| 413 |
+func (d *Swarm) CheckLeader(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 414 |
+ errList := check.Commentf("could not get node list")
|
|
| 415 |
+ status, out, err := d.SockRequest("GET", "/nodes", nil)
|
|
| 416 |
+ if err != nil {
|
|
| 417 |
+ return err, errList |
|
| 418 |
+ } |
|
| 419 |
+ if status != http.StatusOK {
|
|
| 420 |
+ return fmt.Errorf("expected http status OK, got: %d", status), errList
|
|
| 421 |
+ } |
|
| 422 |
+ |
|
| 423 |
+ var ls []swarm.Node |
|
| 424 |
+ if err := json.Unmarshal(out, &ls); err != nil {
|
|
| 425 |
+ return err, errList |
|
| 426 |
+ } |
|
| 427 |
+ |
|
| 428 |
+ for _, node := range ls {
|
|
| 429 |
+ if node.ManagerStatus != nil && node.ManagerStatus.Leader {
|
|
| 430 |
+ return nil, nil |
|
| 431 |
+ } |
|
| 432 |
+ } |
|
| 433 |
+ return fmt.Errorf("no leader"), check.Commentf("could not find leader")
|
|
| 434 |
+} |
|
| 435 |
+ |
|
| 436 |
+// CmdRetryOutOfSequence tries the specified command against the current daemon for 10 times |
|
| 437 |
+func (d *Swarm) CmdRetryOutOfSequence(args ...string) (string, error) {
|
|
| 438 |
+ for i := 0; ; i++ {
|
|
| 439 |
+ out, err := d.Cmd(args...) |
|
| 440 |
+ if err != nil {
|
|
| 441 |
+ if strings.Contains(out, "update out of sequence") {
|
|
| 442 |
+ if i < 10 {
|
|
| 443 |
+ continue |
|
| 444 |
+ } |
|
| 445 |
+ } |
|
| 446 |
+ } |
|
| 447 |
+ return out, err |
|
| 448 |
+ } |
|
| 449 |
+} |
| 0 | 450 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,36 @@ |
| 0 |
+// +build !windows |
|
| 1 |
+ |
|
| 2 |
+package daemon |
|
| 3 |
+ |
|
| 4 |
+import ( |
|
| 5 |
+ "os" |
|
| 6 |
+ "path/filepath" |
|
| 7 |
+ "syscall" |
|
| 8 |
+ |
|
| 9 |
+ "github.com/go-check/check" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+func cleanupExecRoot(c *check.C, execRoot string) {
|
|
| 13 |
+ // Cleanup network namespaces in the exec root of this |
|
| 14 |
+ // daemon because this exec root is specific to this |
|
| 15 |
+ // daemon instance and has no chance of getting |
|
| 16 |
+ // cleaned up when a new daemon is instantiated with a |
|
| 17 |
+ // new exec root. |
|
| 18 |
+ netnsPath := filepath.Join(execRoot, "netns") |
|
| 19 |
+ filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error {
|
|
| 20 |
+ if err := syscall.Unmount(path, syscall.MNT_FORCE); err != nil {
|
|
| 21 |
+ c.Logf("unmount of %s failed: %v", path, err)
|
|
| 22 |
+ } |
|
| 23 |
+ os.Remove(path) |
|
| 24 |
+ return nil |
|
| 25 |
+ }) |
|
| 26 |
+} |
|
| 27 |
+ |
|
| 28 |
+// SignalDaemonDump sends a signal to the daemon to write a dump file |
|
| 29 |
+func SignalDaemonDump(pid int) {
|
|
| 30 |
+ syscall.Kill(pid, syscall.SIGQUIT) |
|
| 31 |
+} |
|
| 32 |
+ |
|
| 33 |
+func signalDaemonReload(pid int) error {
|
|
| 34 |
+ return syscall.Kill(pid, syscall.SIGHUP) |
|
| 35 |
+} |
| 0 | 36 |
new file mode 100644 |
| ... | ... |
@@ -0,0 +1,54 @@ |
| 0 |
+package daemon |
|
| 1 |
+ |
|
| 2 |
+import ( |
|
| 3 |
+ "fmt" |
|
| 4 |
+ "strconv" |
|
| 5 |
+ "syscall" |
|
| 6 |
+ "unsafe" |
|
| 7 |
+ |
|
| 8 |
+ "github.com/go-check/check" |
|
| 9 |
+ "golang.org/x/sys/windows" |
|
| 10 |
+) |
|
| 11 |
+ |
|
| 12 |
+func openEvent(desiredAccess uint32, inheritHandle bool, name string, proc *windows.LazyProc) (handle syscall.Handle, err error) {
|
|
| 13 |
+ namep, _ := syscall.UTF16PtrFromString(name) |
|
| 14 |
+ var _p2 uint32 |
|
| 15 |
+ if inheritHandle {
|
|
| 16 |
+ _p2 = 1 |
|
| 17 |
+ } |
|
| 18 |
+ r0, _, e1 := proc.Call(uintptr(desiredAccess), uintptr(_p2), uintptr(unsafe.Pointer(namep))) |
|
| 19 |
+ handle = syscall.Handle(r0) |
|
| 20 |
+ if handle == syscall.InvalidHandle {
|
|
| 21 |
+ err = e1 |
|
| 22 |
+ } |
|
| 23 |
+ return |
|
| 24 |
+} |
|
| 25 |
+ |
|
| 26 |
+func pulseEvent(handle syscall.Handle, proc *windows.LazyProc) (err error) {
|
|
| 27 |
+ r0, _, _ := proc.Call(uintptr(handle)) |
|
| 28 |
+ if r0 != 0 {
|
|
| 29 |
+ err = syscall.Errno(r0) |
|
| 30 |
+ } |
|
| 31 |
+ return |
|
| 32 |
+} |
|
| 33 |
+ |
|
| 34 |
+// SignalDaemonDump sends a signal to the daemon to write a dump file |
|
| 35 |
+func SignalDaemonDump(pid int) {
|
|
| 36 |
+ modkernel32 := windows.NewLazySystemDLL("kernel32.dll")
|
|
| 37 |
+ procOpenEvent := modkernel32.NewProc("OpenEventW")
|
|
| 38 |
+ procPulseEvent := modkernel32.NewProc("PulseEvent")
|
|
| 39 |
+ |
|
| 40 |
+ ev := "Global\\docker-daemon-" + strconv.Itoa(pid) |
|
| 41 |
+ h2, _ := openEvent(0x0002, false, ev, procOpenEvent) |
|
| 42 |
+ if h2 == 0 {
|
|
| 43 |
+ return |
|
| 44 |
+ } |
|
| 45 |
+ pulseEvent(h2, procPulseEvent) |
|
| 46 |
+} |
|
| 47 |
+ |
|
| 48 |
+func signalDaemonReload(pid int) error {
|
|
| 49 |
+ return fmt.Errorf("daemon reload not supported")
|
|
| 50 |
+} |
|
| 51 |
+ |
|
| 52 |
+func cleanupExecRoot(c *check.C, execRoot string) {
|
|
| 53 |
+} |
| 0 | 12 |
deleted file mode 100644 |
| ... | ... |
@@ -1,416 +0,0 @@ |
| 1 |
-package main |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "encoding/json" |
|
| 5 |
- "fmt" |
|
| 6 |
- "net/http" |
|
| 7 |
- "strings" |
|
| 8 |
- "time" |
|
| 9 |
- |
|
| 10 |
- "github.com/docker/docker/api/types" |
|
| 11 |
- "github.com/docker/docker/api/types/filters" |
|
| 12 |
- "github.com/docker/docker/api/types/swarm" |
|
| 13 |
- "github.com/docker/docker/pkg/integration/checker" |
|
| 14 |
- "github.com/go-check/check" |
|
| 15 |
-) |
|
| 16 |
- |
|
| 17 |
-// SwarmDaemon is a test daemon with helpers for participating in a swarm. |
|
| 18 |
-type SwarmDaemon struct {
|
|
| 19 |
- *Daemon |
|
| 20 |
- swarm.Info |
|
| 21 |
- port int |
|
| 22 |
- listenAddr string |
|
| 23 |
-} |
|
| 24 |
- |
|
| 25 |
-// Init initializes a new swarm cluster. |
|
| 26 |
-func (d *SwarmDaemon) Init(req swarm.InitRequest) error {
|
|
| 27 |
- if req.ListenAddr == "" {
|
|
| 28 |
- req.ListenAddr = d.listenAddr |
|
| 29 |
- } |
|
| 30 |
- status, out, err := d.SockRequest("POST", "/swarm/init", req)
|
|
| 31 |
- if status != http.StatusOK {
|
|
| 32 |
- return fmt.Errorf("initializing swarm: invalid statuscode %v, %q", status, out)
|
|
| 33 |
- } |
|
| 34 |
- if err != nil {
|
|
| 35 |
- return fmt.Errorf("initializing swarm: %v", err)
|
|
| 36 |
- } |
|
| 37 |
- info, err := d.info() |
|
| 38 |
- if err != nil {
|
|
| 39 |
- return err |
|
| 40 |
- } |
|
| 41 |
- d.Info = info |
|
| 42 |
- return nil |
|
| 43 |
-} |
|
| 44 |
- |
|
| 45 |
-// Join joins a daemon to an existing cluster. |
|
| 46 |
-func (d *SwarmDaemon) Join(req swarm.JoinRequest) error {
|
|
| 47 |
- if req.ListenAddr == "" {
|
|
| 48 |
- req.ListenAddr = d.listenAddr |
|
| 49 |
- } |
|
| 50 |
- status, out, err := d.SockRequest("POST", "/swarm/join", req)
|
|
| 51 |
- if status != http.StatusOK {
|
|
| 52 |
- return fmt.Errorf("joining swarm: invalid statuscode %v, %q", status, out)
|
|
| 53 |
- } |
|
| 54 |
- if err != nil {
|
|
| 55 |
- return fmt.Errorf("joining swarm: %v", err)
|
|
| 56 |
- } |
|
| 57 |
- info, err := d.info() |
|
| 58 |
- if err != nil {
|
|
| 59 |
- return err |
|
| 60 |
- } |
|
| 61 |
- d.Info = info |
|
| 62 |
- return nil |
|
| 63 |
-} |
|
| 64 |
- |
|
| 65 |
-// Leave forces daemon to leave current cluster. |
|
| 66 |
-func (d *SwarmDaemon) Leave(force bool) error {
|
|
| 67 |
- url := "/swarm/leave" |
|
| 68 |
- if force {
|
|
| 69 |
- url += "?force=1" |
|
| 70 |
- } |
|
| 71 |
- status, out, err := d.SockRequest("POST", url, nil)
|
|
| 72 |
- if status != http.StatusOK {
|
|
| 73 |
- return fmt.Errorf("leaving swarm: invalid statuscode %v, %q", status, out)
|
|
| 74 |
- } |
|
| 75 |
- if err != nil {
|
|
| 76 |
- err = fmt.Errorf("leaving swarm: %v", err)
|
|
| 77 |
- } |
|
| 78 |
- return err |
|
| 79 |
-} |
|
| 80 |
- |
|
| 81 |
-func (d *SwarmDaemon) info() (swarm.Info, error) {
|
|
| 82 |
- var info struct {
|
|
| 83 |
- Swarm swarm.Info |
|
| 84 |
- } |
|
| 85 |
- status, dt, err := d.SockRequest("GET", "/info", nil)
|
|
| 86 |
- if status != http.StatusOK {
|
|
| 87 |
- return info.Swarm, fmt.Errorf("get swarm info: invalid statuscode %v", status)
|
|
| 88 |
- } |
|
| 89 |
- if err != nil {
|
|
| 90 |
- return info.Swarm, fmt.Errorf("get swarm info: %v", err)
|
|
| 91 |
- } |
|
| 92 |
- if err := json.Unmarshal(dt, &info); err != nil {
|
|
| 93 |
- return info.Swarm, err |
|
| 94 |
- } |
|
| 95 |
- return info.Swarm, nil |
|
| 96 |
-} |
|
| 97 |
- |
|
| 98 |
-type serviceConstructor func(*swarm.Service) |
|
| 99 |
-type nodeConstructor func(*swarm.Node) |
|
| 100 |
-type specConstructor func(*swarm.Spec) |
|
| 101 |
- |
|
| 102 |
-func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string {
|
|
| 103 |
- var service swarm.Service |
|
| 104 |
- for _, fn := range f {
|
|
| 105 |
- fn(&service) |
|
| 106 |
- } |
|
| 107 |
- status, out, err := d.SockRequest("POST", "/services/create", service.Spec)
|
|
| 108 |
- |
|
| 109 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 110 |
- c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out)))
|
|
| 111 |
- |
|
| 112 |
- var scr types.ServiceCreateResponse |
|
| 113 |
- c.Assert(json.Unmarshal(out, &scr), checker.IsNil) |
|
| 114 |
- return scr.ID |
|
| 115 |
-} |
|
| 116 |
- |
|
| 117 |
-func (d *SwarmDaemon) getService(c *check.C, id string) *swarm.Service {
|
|
| 118 |
- var service swarm.Service |
|
| 119 |
- status, out, err := d.SockRequest("GET", "/services/"+id, nil)
|
|
| 120 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 121 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 122 |
- c.Assert(json.Unmarshal(out, &service), checker.IsNil) |
|
| 123 |
- return &service |
|
| 124 |
-} |
|
| 125 |
- |
|
| 126 |
-func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task {
|
|
| 127 |
- var tasks []swarm.Task |
|
| 128 |
- |
|
| 129 |
- filterArgs := filters.NewArgs() |
|
| 130 |
- filterArgs.Add("desired-state", "running")
|
|
| 131 |
- filterArgs.Add("service", service)
|
|
| 132 |
- filters, err := filters.ToParam(filterArgs) |
|
| 133 |
- c.Assert(err, checker.IsNil) |
|
| 134 |
- |
|
| 135 |
- status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil)
|
|
| 136 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 137 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 138 |
- c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) |
|
| 139 |
- return tasks |
|
| 140 |
-} |
|
| 141 |
- |
|
| 142 |
-func (d *SwarmDaemon) checkServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) {
|
|
| 143 |
- return func(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 144 |
- tasks := d.getServiceTasks(c, service) |
|
| 145 |
- var runningCount int |
|
| 146 |
- for _, task := range tasks {
|
|
| 147 |
- if task.Status.State == swarm.TaskStateRunning {
|
|
| 148 |
- runningCount++ |
|
| 149 |
- } |
|
| 150 |
- } |
|
| 151 |
- return runningCount, nil |
|
| 152 |
- } |
|
| 153 |
-} |
|
| 154 |
- |
|
| 155 |
-func (d *SwarmDaemon) checkServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) {
|
|
| 156 |
- return func(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 157 |
- service := d.getService(c, service) |
|
| 158 |
- if service.UpdateStatus == nil {
|
|
| 159 |
- return "", nil |
|
| 160 |
- } |
|
| 161 |
- return service.UpdateStatus.State, nil |
|
| 162 |
- } |
|
| 163 |
-} |
|
| 164 |
- |
|
| 165 |
-func (d *SwarmDaemon) checkServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) {
|
|
| 166 |
- return func(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 167 |
- tasks := d.getServiceTasks(c, service) |
|
| 168 |
- return len(tasks), nil |
|
| 169 |
- } |
|
| 170 |
-} |
|
| 171 |
- |
|
| 172 |
-func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 173 |
- var tasks []swarm.Task |
|
| 174 |
- |
|
| 175 |
- filterArgs := filters.NewArgs() |
|
| 176 |
- filterArgs.Add("desired-state", "running")
|
|
| 177 |
- filters, err := filters.ToParam(filterArgs) |
|
| 178 |
- c.Assert(err, checker.IsNil) |
|
| 179 |
- |
|
| 180 |
- status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil)
|
|
| 181 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 182 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 183 |
- c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) |
|
| 184 |
- |
|
| 185 |
- result := make(map[string]int) |
|
| 186 |
- for _, task := range tasks {
|
|
| 187 |
- if task.Status.State == swarm.TaskStateRunning {
|
|
| 188 |
- result[task.Spec.ContainerSpec.Image]++ |
|
| 189 |
- } |
|
| 190 |
- } |
|
| 191 |
- return result, nil |
|
| 192 |
-} |
|
| 193 |
- |
|
| 194 |
-func (d *SwarmDaemon) checkNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 195 |
- nodes := d.listNodes(c) |
|
| 196 |
- var readyCount int |
|
| 197 |
- for _, node := range nodes {
|
|
| 198 |
- if node.Status.State == swarm.NodeStateReady {
|
|
| 199 |
- readyCount++ |
|
| 200 |
- } |
|
| 201 |
- } |
|
| 202 |
- return readyCount, nil |
|
| 203 |
-} |
|
| 204 |
- |
|
| 205 |
-func (d *SwarmDaemon) getTask(c *check.C, id string) swarm.Task {
|
|
| 206 |
- var task swarm.Task |
|
| 207 |
- |
|
| 208 |
- status, out, err := d.SockRequest("GET", "/tasks/"+id, nil)
|
|
| 209 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 210 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 211 |
- c.Assert(json.Unmarshal(out, &task), checker.IsNil) |
|
| 212 |
- return task |
|
| 213 |
-} |
|
| 214 |
- |
|
| 215 |
-func (d *SwarmDaemon) updateService(c *check.C, service *swarm.Service, f ...serviceConstructor) {
|
|
| 216 |
- for _, fn := range f {
|
|
| 217 |
- fn(service) |
|
| 218 |
- } |
|
| 219 |
- url := fmt.Sprintf("/services/%s/update?version=%d", service.ID, service.Version.Index)
|
|
| 220 |
- status, out, err := d.SockRequest("POST", url, service.Spec)
|
|
| 221 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 222 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 223 |
-} |
|
| 224 |
- |
|
| 225 |
-func (d *SwarmDaemon) removeService(c *check.C, id string) {
|
|
| 226 |
- status, out, err := d.SockRequest("DELETE", "/services/"+id, nil)
|
|
| 227 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 228 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 229 |
-} |
|
| 230 |
- |
|
| 231 |
-func (d *SwarmDaemon) getNode(c *check.C, id string) *swarm.Node {
|
|
| 232 |
- var node swarm.Node |
|
| 233 |
- status, out, err := d.SockRequest("GET", "/nodes/"+id, nil)
|
|
| 234 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 235 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 236 |
- c.Assert(json.Unmarshal(out, &node), checker.IsNil) |
|
| 237 |
- c.Assert(node.ID, checker.Equals, id) |
|
| 238 |
- return &node |
|
| 239 |
-} |
|
| 240 |
- |
|
| 241 |
-func (d *SwarmDaemon) removeNode(c *check.C, id string, force bool) {
|
|
| 242 |
- url := "/nodes/" + id |
|
| 243 |
- if force {
|
|
| 244 |
- url += "?force=1" |
|
| 245 |
- } |
|
| 246 |
- |
|
| 247 |
- status, out, err := d.SockRequest("DELETE", url, nil)
|
|
| 248 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 249 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 250 |
-} |
|
| 251 |
- |
|
| 252 |
-func (d *SwarmDaemon) updateNode(c *check.C, id string, f ...nodeConstructor) {
|
|
| 253 |
- for i := 0; ; i++ {
|
|
| 254 |
- node := d.getNode(c, id) |
|
| 255 |
- for _, fn := range f {
|
|
| 256 |
- fn(node) |
|
| 257 |
- } |
|
| 258 |
- url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
|
|
| 259 |
- status, out, err := d.SockRequest("POST", url, node.Spec)
|
|
| 260 |
- if i < 10 && strings.Contains(string(out), "update out of sequence") {
|
|
| 261 |
- time.Sleep(100 * time.Millisecond) |
|
| 262 |
- continue |
|
| 263 |
- } |
|
| 264 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 265 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 266 |
- return |
|
| 267 |
- } |
|
| 268 |
-} |
|
| 269 |
- |
|
| 270 |
-func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node {
|
|
| 271 |
- status, out, err := d.SockRequest("GET", "/nodes", nil)
|
|
| 272 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 273 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 274 |
- |
|
| 275 |
- nodes := []swarm.Node{}
|
|
| 276 |
- c.Assert(json.Unmarshal(out, &nodes), checker.IsNil) |
|
| 277 |
- return nodes |
|
| 278 |
-} |
|
| 279 |
- |
|
| 280 |
-func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service {
|
|
| 281 |
- status, out, err := d.SockRequest("GET", "/services", nil)
|
|
| 282 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 283 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 284 |
- |
|
| 285 |
- services := []swarm.Service{}
|
|
| 286 |
- c.Assert(json.Unmarshal(out, &services), checker.IsNil) |
|
| 287 |
- return services |
|
| 288 |
-} |
|
| 289 |
- |
|
| 290 |
-func (d *SwarmDaemon) createSecret(c *check.C, secretSpec swarm.SecretSpec) string {
|
|
| 291 |
- status, out, err := d.SockRequest("POST", "/secrets/create", secretSpec)
|
|
| 292 |
- |
|
| 293 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 294 |
- c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out)))
|
|
| 295 |
- |
|
| 296 |
- var scr types.SecretCreateResponse |
|
| 297 |
- c.Assert(json.Unmarshal(out, &scr), checker.IsNil) |
|
| 298 |
- return scr.ID |
|
| 299 |
-} |
|
| 300 |
- |
|
| 301 |
-func (d *SwarmDaemon) listSecrets(c *check.C) []swarm.Secret {
|
|
| 302 |
- status, out, err := d.SockRequest("GET", "/secrets", nil)
|
|
| 303 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 304 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 305 |
- |
|
| 306 |
- secrets := []swarm.Secret{}
|
|
| 307 |
- c.Assert(json.Unmarshal(out, &secrets), checker.IsNil) |
|
| 308 |
- return secrets |
|
| 309 |
-} |
|
| 310 |
- |
|
| 311 |
-func (d *SwarmDaemon) getSecret(c *check.C, id string) *swarm.Secret {
|
|
| 312 |
- var secret swarm.Secret |
|
| 313 |
- status, out, err := d.SockRequest("GET", "/secrets/"+id, nil)
|
|
| 314 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 315 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 316 |
- c.Assert(json.Unmarshal(out, &secret), checker.IsNil) |
|
| 317 |
- return &secret |
|
| 318 |
-} |
|
| 319 |
- |
|
| 320 |
-func (d *SwarmDaemon) deleteSecret(c *check.C, id string) {
|
|
| 321 |
- status, out, err := d.SockRequest("DELETE", "/secrets/"+id, nil)
|
|
| 322 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 323 |
- c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf("output: %q", string(out)))
|
|
| 324 |
-} |
|
| 325 |
- |
|
| 326 |
-func (d *SwarmDaemon) getSwarm(c *check.C) swarm.Swarm {
|
|
| 327 |
- var sw swarm.Swarm |
|
| 328 |
- status, out, err := d.SockRequest("GET", "/swarm", nil)
|
|
| 329 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 330 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 331 |
- c.Assert(json.Unmarshal(out, &sw), checker.IsNil) |
|
| 332 |
- return sw |
|
| 333 |
-} |
|
| 334 |
- |
|
| 335 |
-func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) {
|
|
| 336 |
- sw := d.getSwarm(c) |
|
| 337 |
- for _, fn := range f {
|
|
| 338 |
- fn(&sw.Spec) |
|
| 339 |
- } |
|
| 340 |
- url := fmt.Sprintf("/swarm/update?version=%d", sw.Version.Index)
|
|
| 341 |
- status, out, err := d.SockRequest("POST", url, sw.Spec)
|
|
| 342 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 343 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 344 |
-} |
|
| 345 |
- |
|
| 346 |
-func (d *SwarmDaemon) rotateTokens(c *check.C) {
|
|
| 347 |
- var sw swarm.Swarm |
|
| 348 |
- status, out, err := d.SockRequest("GET", "/swarm", nil)
|
|
| 349 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 350 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 351 |
- c.Assert(json.Unmarshal(out, &sw), checker.IsNil) |
|
| 352 |
- |
|
| 353 |
- url := fmt.Sprintf("/swarm/update?version=%d&rotateWorkerToken=true&rotateManagerToken=true", sw.Version.Index)
|
|
| 354 |
- status, out, err = d.SockRequest("POST", url, sw.Spec)
|
|
| 355 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 356 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 357 |
-} |
|
| 358 |
- |
|
| 359 |
-func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens {
|
|
| 360 |
- var sw swarm.Swarm |
|
| 361 |
- status, out, err := d.SockRequest("GET", "/swarm", nil)
|
|
| 362 |
- c.Assert(err, checker.IsNil, check.Commentf(string(out))) |
|
| 363 |
- c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
|
| 364 |
- c.Assert(json.Unmarshal(out, &sw), checker.IsNil) |
|
| 365 |
- return sw.JoinTokens |
|
| 366 |
-} |
|
| 367 |
- |
|
| 368 |
-func (d *SwarmDaemon) checkLocalNodeState(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 369 |
- info, err := d.info() |
|
| 370 |
- c.Assert(err, checker.IsNil) |
|
| 371 |
- return info.LocalNodeState, nil |
|
| 372 |
-} |
|
| 373 |
- |
|
| 374 |
-func (d *SwarmDaemon) checkControlAvailable(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 375 |
- info, err := d.info() |
|
| 376 |
- c.Assert(err, checker.IsNil) |
|
| 377 |
- c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
|
| 378 |
- return info.ControlAvailable, nil |
|
| 379 |
-} |
|
| 380 |
- |
|
| 381 |
-func (d *SwarmDaemon) checkLeader(c *check.C) (interface{}, check.CommentInterface) {
|
|
| 382 |
- errList := check.Commentf("could not get node list")
|
|
| 383 |
- status, out, err := d.SockRequest("GET", "/nodes", nil)
|
|
| 384 |
- if err != nil {
|
|
| 385 |
- return err, errList |
|
| 386 |
- } |
|
| 387 |
- if status != http.StatusOK {
|
|
| 388 |
- return fmt.Errorf("expected http status OK, got: %d", status), errList
|
|
| 389 |
- } |
|
| 390 |
- |
|
| 391 |
- var ls []swarm.Node |
|
| 392 |
- if err := json.Unmarshal(out, &ls); err != nil {
|
|
| 393 |
- return err, errList |
|
| 394 |
- } |
|
| 395 |
- |
|
| 396 |
- for _, node := range ls {
|
|
| 397 |
- if node.ManagerStatus != nil && node.ManagerStatus.Leader {
|
|
| 398 |
- return nil, nil |
|
| 399 |
- } |
|
| 400 |
- } |
|
| 401 |
- return fmt.Errorf("no leader"), check.Commentf("could not find leader")
|
|
| 402 |
-} |
|
| 403 |
- |
|
| 404 |
-func (d *SwarmDaemon) cmdRetryOutOfSequence(args ...string) (string, error) {
|
|
| 405 |
- for i := 0; ; i++ {
|
|
| 406 |
- out, err := d.Cmd(args...) |
|
| 407 |
- if err != nil {
|
|
| 408 |
- if strings.Contains(out, "update out of sequence") {
|
|
| 409 |
- if i < 10 {
|
|
| 410 |
- continue |
|
| 411 |
- } |
|
| 412 |
- } |
|
| 413 |
- } |
|
| 414 |
- return out, err |
|
| 415 |
- } |
|
| 416 |
-} |
| ... | ... |
@@ -1,8 +1,11 @@ |
| 1 | 1 |
package main |
| 2 | 2 |
|
| 3 |
-import "github.com/go-check/check" |
|
| 3 |
+import ( |
|
| 4 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 5 |
+ "github.com/go-check/check" |
|
| 6 |
+) |
|
| 4 | 7 |
|
| 5 |
-func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *SwarmDaemon {
|
|
| 8 |
+func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *daemon.Swarm {
|
|
| 6 | 9 |
s.daemonsLock.Lock() |
| 7 | 10 |
defer s.daemonsLock.Unlock() |
| 8 | 11 |
for _, d := range s.daemons {
|
| 9 | 12 |
deleted file mode 100644 |
| ... | ... |
@@ -1,35 +0,0 @@ |
| 1 |
-// +build !windows |
|
| 2 |
- |
|
| 3 |
-package main |
|
| 4 |
- |
|
| 5 |
-import ( |
|
| 6 |
- "os" |
|
| 7 |
- "path/filepath" |
|
| 8 |
- "syscall" |
|
| 9 |
- |
|
| 10 |
- "github.com/go-check/check" |
|
| 11 |
-) |
|
| 12 |
- |
|
| 13 |
-func cleanupExecRoot(c *check.C, execRoot string) {
|
|
| 14 |
- // Cleanup network namespaces in the exec root of this |
|
| 15 |
- // daemon because this exec root is specific to this |
|
| 16 |
- // daemon instance and has no chance of getting |
|
| 17 |
- // cleaned up when a new daemon is instantiated with a |
|
| 18 |
- // new exec root. |
|
| 19 |
- netnsPath := filepath.Join(execRoot, "netns") |
|
| 20 |
- filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error {
|
|
| 21 |
- if err := syscall.Unmount(path, syscall.MNT_FORCE); err != nil {
|
|
| 22 |
- c.Logf("unmount of %s failed: %v", path, err)
|
|
| 23 |
- } |
|
| 24 |
- os.Remove(path) |
|
| 25 |
- return nil |
|
| 26 |
- }) |
|
| 27 |
-} |
|
| 28 |
- |
|
| 29 |
-func signalDaemonDump(pid int) {
|
|
| 30 |
- syscall.Kill(pid, syscall.SIGQUIT) |
|
| 31 |
-} |
|
| 32 |
- |
|
| 33 |
-func signalDaemonReload(pid int) error {
|
|
| 34 |
- return syscall.Kill(pid, syscall.SIGHUP) |
|
| 35 |
-} |
| 36 | 1 |
deleted file mode 100644 |
| ... | ... |
@@ -1,53 +0,0 @@ |
| 1 |
-package main |
|
| 2 |
- |
|
| 3 |
-import ( |
|
| 4 |
- "fmt" |
|
| 5 |
- "strconv" |
|
| 6 |
- "syscall" |
|
| 7 |
- "unsafe" |
|
| 8 |
- |
|
| 9 |
- "github.com/go-check/check" |
|
| 10 |
- "golang.org/x/sys/windows" |
|
| 11 |
-) |
|
| 12 |
- |
|
| 13 |
-func openEvent(desiredAccess uint32, inheritHandle bool, name string, proc *windows.LazyProc) (handle syscall.Handle, err error) {
|
|
| 14 |
- namep, _ := syscall.UTF16PtrFromString(name) |
|
| 15 |
- var _p2 uint32 |
|
| 16 |
- if inheritHandle {
|
|
| 17 |
- _p2 = 1 |
|
| 18 |
- } |
|
| 19 |
- r0, _, e1 := proc.Call(uintptr(desiredAccess), uintptr(_p2), uintptr(unsafe.Pointer(namep))) |
|
| 20 |
- handle = syscall.Handle(r0) |
|
| 21 |
- if handle == syscall.InvalidHandle {
|
|
| 22 |
- err = e1 |
|
| 23 |
- } |
|
| 24 |
- return |
|
| 25 |
-} |
|
| 26 |
- |
|
| 27 |
-func pulseEvent(handle syscall.Handle, proc *windows.LazyProc) (err error) {
|
|
| 28 |
- r0, _, _ := proc.Call(uintptr(handle)) |
|
| 29 |
- if r0 != 0 {
|
|
| 30 |
- err = syscall.Errno(r0) |
|
| 31 |
- } |
|
| 32 |
- return |
|
| 33 |
-} |
|
| 34 |
- |
|
| 35 |
-func signalDaemonDump(pid int) {
|
|
| 36 |
- modkernel32 := windows.NewLazySystemDLL("kernel32.dll")
|
|
| 37 |
- procOpenEvent := modkernel32.NewProc("OpenEventW")
|
|
| 38 |
- procPulseEvent := modkernel32.NewProc("PulseEvent")
|
|
| 39 |
- |
|
| 40 |
- ev := "Global\\docker-daemon-" + strconv.Itoa(pid) |
|
| 41 |
- h2, _ := openEvent(0x0002, false, ev, procOpenEvent) |
|
| 42 |
- if h2 == 0 {
|
|
| 43 |
- return |
|
| 44 |
- } |
|
| 45 |
- pulseEvent(h2, procPulseEvent) |
|
| 46 |
-} |
|
| 47 |
- |
|
| 48 |
-func signalDaemonReload(pid int) error {
|
|
| 49 |
- return fmt.Errorf("daemon reload not supported")
|
|
| 50 |
-} |
|
| 51 |
- |
|
| 52 |
-func cleanupExecRoot(c *check.C, execRoot string) {
|
|
| 53 |
-} |
| ... | ... |
@@ -12,6 +12,7 @@ import ( |
| 12 | 12 |
|
| 13 | 13 |
"github.com/docker/docker/api/types" |
| 14 | 14 |
"github.com/docker/docker/client" |
| 15 |
+ "github.com/docker/docker/pkg/integration" |
|
| 15 | 16 |
"github.com/docker/docker/pkg/integration/checker" |
| 16 | 17 |
"github.com/docker/docker/pkg/stdcopy" |
| 17 | 18 |
"github.com/go-check/check" |
| ... | ... |
@@ -79,7 +80,7 @@ func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) {
|
| 79 | 79 |
// connection will shutdown, err should be "persistent connection closed" |
| 80 | 80 |
c.Assert(err, checker.NotNil) // Server shutdown connection |
| 81 | 81 |
|
| 82 |
- body, err := readBody(resp.Body) |
|
| 82 |
+ body, err := integration.ReadBody(resp.Body) |
|
| 83 | 83 |
c.Assert(err, checker.IsNil) |
| 84 | 84 |
c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) |
| 85 | 85 |
expected := "No such container: doesnotexist\r\n" |
| ... | ... |
@@ -7,6 +7,7 @@ import ( |
| 7 | 7 |
"regexp" |
| 8 | 8 |
"strings" |
| 9 | 9 |
|
| 10 |
+ "github.com/docker/docker/pkg/integration" |
|
| 10 | 11 |
"github.com/docker/docker/pkg/integration/checker" |
| 11 | 12 |
"github.com/go-check/check" |
| 12 | 13 |
) |
| ... | ... |
@@ -34,7 +35,7 @@ RUN find /tmp/` |
| 34 | 34 |
c.Assert(err, checker.IsNil) |
| 35 | 35 |
c.Assert(res.StatusCode, checker.Equals, http.StatusOK) |
| 36 | 36 |
|
| 37 |
- buf, err := readBody(body) |
|
| 37 |
+ buf, err := integration.ReadBody(body) |
|
| 38 | 38 |
c.Assert(err, checker.IsNil) |
| 39 | 39 |
|
| 40 | 40 |
// Make sure Dockerfile exists. |
| ... | ... |
@@ -125,7 +126,7 @@ RUN echo 'right' |
| 125 | 125 |
c.Assert(res.StatusCode, checker.Equals, http.StatusOK) |
| 126 | 126 |
|
| 127 | 127 |
defer body.Close() |
| 128 |
- content, err := readBody(body) |
|
| 128 |
+ content, err := integration.ReadBody(body) |
|
| 129 | 129 |
c.Assert(err, checker.IsNil) |
| 130 | 130 |
|
| 131 | 131 |
// Build used the wrong dockerfile. |
| ... | ... |
@@ -144,7 +145,7 @@ RUN echo from dockerfile`, |
| 144 | 144 |
c.Assert(err, checker.IsNil) |
| 145 | 145 |
c.Assert(res.StatusCode, checker.Equals, http.StatusOK) |
| 146 | 146 |
|
| 147 |
- buf, err := readBody(body) |
|
| 147 |
+ buf, err := integration.ReadBody(body) |
|
| 148 | 148 |
c.Assert(err, checker.IsNil) |
| 149 | 149 |
|
| 150 | 150 |
out := string(buf) |
| ... | ... |
@@ -166,7 +167,7 @@ RUN echo from Dockerfile`, |
| 166 | 166 |
c.Assert(err, checker.IsNil) |
| 167 | 167 |
c.Assert(res.StatusCode, checker.Equals, http.StatusOK) |
| 168 | 168 |
|
| 169 |
- buf, err := readBody(body) |
|
| 169 |
+ buf, err := integration.ReadBody(body) |
|
| 170 | 170 |
c.Assert(err, checker.IsNil) |
| 171 | 171 |
|
| 172 | 172 |
out := string(buf) |
| ... | ... |
@@ -189,7 +190,7 @@ RUN echo from dockerfile`, |
| 189 | 189 |
c.Assert(err, checker.IsNil) |
| 190 | 190 |
c.Assert(res.StatusCode, checker.Equals, http.StatusOK) |
| 191 | 191 |
|
| 192 |
- buf, err := readBody(body) |
|
| 192 |
+ buf, err := integration.ReadBody(body) |
|
| 193 | 193 |
c.Assert(err, checker.IsNil) |
| 194 | 194 |
|
| 195 | 195 |
out := string(buf) |
| ... | ... |
@@ -236,7 +237,7 @@ func (s *DockerSuite) TestBuildAPIUnnormalizedTarPaths(c *check.C) {
|
| 236 | 236 |
c.Assert(err, checker.IsNil) |
| 237 | 237 |
c.Assert(res.StatusCode, checker.Equals, http.StatusOK) |
| 238 | 238 |
|
| 239 |
- out, err := readBody(body) |
|
| 239 |
+ out, err := integration.ReadBody(body) |
|
| 240 | 240 |
c.Assert(err, checker.IsNil) |
| 241 | 241 |
lines := strings.Split(string(out), "\n") |
| 242 | 242 |
c.Assert(len(lines), checker.GreaterThan, 1) |
| ... | ... |
@@ -723,7 +723,7 @@ func (s *DockerSuite) TestContainerAPIInvalidPortSyntax(c *check.C) {
|
| 723 | 723 |
c.Assert(err, checker.IsNil) |
| 724 | 724 |
c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) |
| 725 | 725 |
|
| 726 |
- b, err := readBody(body) |
|
| 726 |
+ b, err := integration.ReadBody(body) |
|
| 727 | 727 |
c.Assert(err, checker.IsNil) |
| 728 | 728 |
c.Assert(string(b[:]), checker.Contains, "invalid port") |
| 729 | 729 |
} |
| ... | ... |
@@ -743,7 +743,7 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyInvalidPolicyName(c *check.C) |
| 743 | 743 |
c.Assert(err, checker.IsNil) |
| 744 | 744 |
c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) |
| 745 | 745 |
|
| 746 |
- b, err := readBody(body) |
|
| 746 |
+ b, err := integration.ReadBody(body) |
|
| 747 | 747 |
c.Assert(err, checker.IsNil) |
| 748 | 748 |
c.Assert(string(b[:]), checker.Contains, "invalid restart policy") |
| 749 | 749 |
} |
| ... | ... |
@@ -763,7 +763,7 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyRetryMismatch(c *check.C) {
|
| 763 | 763 |
c.Assert(err, checker.IsNil) |
| 764 | 764 |
c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) |
| 765 | 765 |
|
| 766 |
- b, err := readBody(body) |
|
| 766 |
+ b, err := integration.ReadBody(body) |
|
| 767 | 767 |
c.Assert(err, checker.IsNil) |
| 768 | 768 |
c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be used with restart policy") |
| 769 | 769 |
} |
| ... | ... |
@@ -783,7 +783,7 @@ func (s *DockerSuite) TestContainerAPIRestartPolicyNegativeRetryCount(c *check.C |
| 783 | 783 |
c.Assert(err, checker.IsNil) |
| 784 | 784 |
c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) |
| 785 | 785 |
|
| 786 |
- b, err := readBody(body) |
|
| 786 |
+ b, err := integration.ReadBody(body) |
|
| 787 | 787 |
c.Assert(err, checker.IsNil) |
| 788 | 788 |
c.Assert(string(b[:]), checker.Contains, "maximum retry count cannot be negative") |
| 789 | 789 |
} |
| ... | ... |
@@ -834,7 +834,7 @@ func (s *DockerSuite) TestContainerAPIPostCreateNull(c *check.C) {
|
| 834 | 834 |
c.Assert(err, checker.IsNil) |
| 835 | 835 |
c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) |
| 836 | 836 |
|
| 837 |
- b, err := readBody(body) |
|
| 837 |
+ b, err := integration.ReadBody(body) |
|
| 838 | 838 |
c.Assert(err, checker.IsNil) |
| 839 | 839 |
type createResp struct {
|
| 840 | 840 |
ID string |
| ... | ... |
@@ -863,7 +863,7 @@ func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) {
|
| 863 | 863 |
|
| 864 | 864 |
res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json")
|
| 865 | 865 |
c.Assert(err, checker.IsNil) |
| 866 |
- b, err2 := readBody(body) |
|
| 866 |
+ b, err2 := integration.ReadBody(body) |
|
| 867 | 867 |
c.Assert(err2, checker.IsNil) |
| 868 | 868 |
|
| 869 | 869 |
c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) |
| ... | ... |
@@ -10,6 +10,7 @@ import ( |
| 10 | 10 |
"strings" |
| 11 | 11 |
"time" |
| 12 | 12 |
|
| 13 |
+ "github.com/docker/docker/pkg/integration" |
|
| 13 | 14 |
"github.com/docker/docker/pkg/integration/checker" |
| 14 | 15 |
"github.com/go-check/check" |
| 15 | 16 |
) |
| ... | ... |
@@ -40,7 +41,7 @@ func (s *DockerSuite) TestExecAPICreateNoValidContentType(c *check.C) {
|
| 40 | 40 |
c.Assert(err, checker.IsNil) |
| 41 | 41 |
c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) |
| 42 | 42 |
|
| 43 |
- b, err := readBody(body) |
|
| 43 |
+ b, err := integration.ReadBody(body) |
|
| 44 | 44 |
c.Assert(err, checker.IsNil) |
| 45 | 45 |
|
| 46 | 46 |
comment := check.Commentf("Expected message when creating exec command with invalid Content-Type specified")
|
| ... | ... |
@@ -107,7 +108,7 @@ func (s *DockerSuite) TestExecAPIStartBackwardsCompatible(c *check.C) {
|
| 107 | 107 |
resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/v1.20/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "text/plain")
|
| 108 | 108 |
c.Assert(err, checker.IsNil) |
| 109 | 109 |
|
| 110 |
- b, err := readBody(body) |
|
| 110 |
+ b, err := integration.ReadBody(body) |
|
| 111 | 111 |
comment := check.Commentf("response body: %s", b)
|
| 112 | 112 |
c.Assert(err, checker.IsNil, comment) |
| 113 | 113 |
c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) |
| ... | ... |
@@ -156,7 +157,7 @@ func (s *DockerSuite) TestExecAPIStartWithDetach(c *check.C) {
|
| 156 | 156 |
_, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", createResp.ID), strings.NewReader(`{"Detach": true}`), "application/json")
|
| 157 | 157 |
c.Assert(err, checker.IsNil) |
| 158 | 158 |
|
| 159 |
- b, err = readBody(body) |
|
| 159 |
+ b, err = integration.ReadBody(body) |
|
| 160 | 160 |
comment := check.Commentf("response body: %s", b)
|
| 161 | 161 |
c.Assert(err, checker.IsNil, comment) |
| 162 | 162 |
|
| ... | ... |
@@ -182,7 +183,7 @@ func startExec(c *check.C, id string, code int) {
|
| 182 | 182 |
resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json")
|
| 183 | 183 |
c.Assert(err, checker.IsNil) |
| 184 | 184 |
|
| 185 |
- b, err := readBody(body) |
|
| 185 |
+ b, err := integration.ReadBody(body) |
|
| 186 | 186 |
comment := check.Commentf("response body: %s", b)
|
| 187 | 187 |
c.Assert(err, checker.IsNil, comment) |
| 188 | 188 |
c.Assert(resp.StatusCode, checker.Equals, code, comment) |
| ... | ... |
@@ -4,11 +4,12 @@ package main |
| 4 | 4 |
|
| 5 | 5 |
import ( |
| 6 | 6 |
"github.com/docker/docker/api/types/swarm" |
| 7 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 7 | 8 |
"github.com/docker/docker/pkg/integration/checker" |
| 8 | 9 |
"github.com/go-check/check" |
| 9 | 10 |
) |
| 10 | 11 |
|
| 11 |
-func setPortConfig(portConfig []swarm.PortConfig) serviceConstructor {
|
|
| 12 |
+func setPortConfig(portConfig []swarm.PortConfig) daemon.ServiceConstructor {
|
|
| 12 | 13 |
return func(s *swarm.Service) {
|
| 13 | 14 |
if s.Spec.EndpointSpec == nil {
|
| 14 | 15 |
s.Spec.EndpointSpec = &swarm.EndpointSpec{}
|
| ... | ... |
@@ -22,16 +23,16 @@ func (s *DockerSwarmSuite) TestAPIServiceUpdatePort(c *check.C) {
|
| 22 | 22 |
|
| 23 | 23 |
// Create a service with a port mapping of 8080:8081. |
| 24 | 24 |
portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}}
|
| 25 |
- serviceID := d.createService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) |
|
| 26 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 25 |
+ serviceID := d.CreateService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) |
|
| 26 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 27 | 27 |
|
| 28 | 28 |
// Update the service: changed the port mapping from 8080:8081 to 8082:8083. |
| 29 | 29 |
updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}}
|
| 30 |
- remoteService := d.getService(c, serviceID) |
|
| 31 |
- d.updateService(c, remoteService, setPortConfig(updatedPortConfig)) |
|
| 30 |
+ remoteService := d.GetService(c, serviceID) |
|
| 31 |
+ d.UpdateService(c, remoteService, setPortConfig(updatedPortConfig)) |
|
| 32 | 32 |
|
| 33 | 33 |
// Inspect the service and verify port mapping. |
| 34 |
- updatedService := d.getService(c, serviceID) |
|
| 34 |
+ updatedService := d.GetService(c, serviceID) |
|
| 35 | 35 |
c.Assert(updatedService.Spec.EndpointSpec, check.NotNil) |
| 36 | 36 |
c.Assert(len(updatedService.Spec.EndpointSpec.Ports), check.Equals, 1) |
| 37 | 37 |
c.Assert(updatedService.Spec.EndpointSpec.Ports[0].TargetPort, check.Equals, uint32(8083)) |
| ... | ... |
@@ -14,6 +14,7 @@ import ( |
| 14 | 14 |
"time" |
| 15 | 15 |
|
| 16 | 16 |
"github.com/docker/docker/api/types/swarm" |
| 17 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 17 | 18 |
"github.com/docker/docker/pkg/integration/checker" |
| 18 | 19 |
"github.com/go-check/check" |
| 19 | 20 |
) |
| ... | ... |
@@ -23,13 +24,13 @@ var defaultReconciliationTimeout = 30 * time.Second |
| 23 | 23 |
func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
|
| 24 | 24 |
// todo: should find a better way to verify that components are running than /info |
| 25 | 25 |
d1 := s.AddDaemon(c, true, true) |
| 26 |
- info, err := d1.info() |
|
| 26 |
+ info, err := d1.SwarmInfo() |
|
| 27 | 27 |
c.Assert(err, checker.IsNil) |
| 28 | 28 |
c.Assert(info.ControlAvailable, checker.True) |
| 29 | 29 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| 30 | 30 |
|
| 31 | 31 |
d2 := s.AddDaemon(c, true, false) |
| 32 |
- info, err = d2.info() |
|
| 32 |
+ info, err = d2.SwarmInfo() |
|
| 33 | 33 |
c.Assert(err, checker.IsNil) |
| 34 | 34 |
c.Assert(info.ControlAvailable, checker.False) |
| 35 | 35 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| ... | ... |
@@ -37,14 +38,14 @@ func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
|
| 37 | 37 |
// Leaving cluster |
| 38 | 38 |
c.Assert(d2.Leave(false), checker.IsNil) |
| 39 | 39 |
|
| 40 |
- info, err = d2.info() |
|
| 40 |
+ info, err = d2.SwarmInfo() |
|
| 41 | 41 |
c.Assert(err, checker.IsNil) |
| 42 | 42 |
c.Assert(info.ControlAvailable, checker.False) |
| 43 | 43 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 44 | 44 |
|
| 45 |
- c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.joinTokens(c).Worker, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 45 |
+ c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.JoinTokens(c).Worker, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
|
|
| 46 | 46 |
|
| 47 |
- info, err = d2.info() |
|
| 47 |
+ info, err = d2.SwarmInfo() |
|
| 48 | 48 |
c.Assert(err, checker.IsNil) |
| 49 | 49 |
c.Assert(info.ControlAvailable, checker.False) |
| 50 | 50 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| ... | ... |
@@ -60,12 +61,12 @@ func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
|
| 60 | 60 |
err = d2.Start() |
| 61 | 61 |
c.Assert(err, checker.IsNil) |
| 62 | 62 |
|
| 63 |
- info, err = d1.info() |
|
| 63 |
+ info, err = d1.SwarmInfo() |
|
| 64 | 64 |
c.Assert(err, checker.IsNil) |
| 65 | 65 |
c.Assert(info.ControlAvailable, checker.True) |
| 66 | 66 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| 67 | 67 |
|
| 68 |
- info, err = d2.info() |
|
| 68 |
+ info, err = d2.SwarmInfo() |
|
| 69 | 69 |
c.Assert(err, checker.IsNil) |
| 70 | 70 |
c.Assert(info.ControlAvailable, checker.False) |
| 71 | 71 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| ... | ... |
@@ -78,68 +79,68 @@ func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
|
| 78 | 78 |
// todo: error message differs depending if some components of token are valid |
| 79 | 79 |
|
| 80 | 80 |
d2 := s.AddDaemon(c, false, false) |
| 81 |
- err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
|
|
| 81 |
+ err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}})
|
|
| 82 | 82 |
c.Assert(err, checker.NotNil) |
| 83 | 83 |
c.Assert(err.Error(), checker.Contains, "join token is necessary") |
| 84 |
- info, err := d2.info() |
|
| 84 |
+ info, err := d2.SwarmInfo() |
|
| 85 | 85 |
c.Assert(err, checker.IsNil) |
| 86 | 86 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 87 | 87 |
|
| 88 |
- err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.listenAddr}})
|
|
| 88 |
+ err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.ListenAddr}})
|
|
| 89 | 89 |
c.Assert(err, checker.NotNil) |
| 90 | 90 |
c.Assert(err.Error(), checker.Contains, "invalid join token") |
| 91 |
- info, err = d2.info() |
|
| 91 |
+ info, err = d2.SwarmInfo() |
|
| 92 | 92 |
c.Assert(err, checker.IsNil) |
| 93 | 93 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 94 | 94 |
|
| 95 |
- workerToken := d1.joinTokens(c).Worker |
|
| 95 |
+ workerToken := d1.JoinTokens(c).Worker |
|
| 96 | 96 |
|
| 97 |
- c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 98 |
- info, err = d2.info() |
|
| 97 |
+ c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
|
|
| 98 |
+ info, err = d2.SwarmInfo() |
|
| 99 | 99 |
c.Assert(err, checker.IsNil) |
| 100 | 100 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| 101 | 101 |
c.Assert(d2.Leave(false), checker.IsNil) |
| 102 |
- info, err = d2.info() |
|
| 102 |
+ info, err = d2.SwarmInfo() |
|
| 103 | 103 |
c.Assert(err, checker.IsNil) |
| 104 | 104 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 105 | 105 |
|
| 106 | 106 |
// change tokens |
| 107 |
- d1.rotateTokens(c) |
|
| 107 |
+ d1.RotateTokens(c) |
|
| 108 | 108 |
|
| 109 |
- err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}})
|
|
| 109 |
+ err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}})
|
|
| 110 | 110 |
c.Assert(err, checker.NotNil) |
| 111 | 111 |
c.Assert(err.Error(), checker.Contains, "join token is necessary") |
| 112 |
- info, err = d2.info() |
|
| 112 |
+ info, err = d2.SwarmInfo() |
|
| 113 | 113 |
c.Assert(err, checker.IsNil) |
| 114 | 114 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 115 | 115 |
|
| 116 |
- workerToken = d1.joinTokens(c).Worker |
|
| 116 |
+ workerToken = d1.JoinTokens(c).Worker |
|
| 117 | 117 |
|
| 118 |
- c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 119 |
- info, err = d2.info() |
|
| 118 |
+ c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
|
|
| 119 |
+ info, err = d2.SwarmInfo() |
|
| 120 | 120 |
c.Assert(err, checker.IsNil) |
| 121 | 121 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| 122 | 122 |
c.Assert(d2.Leave(false), checker.IsNil) |
| 123 |
- info, err = d2.info() |
|
| 123 |
+ info, err = d2.SwarmInfo() |
|
| 124 | 124 |
c.Assert(err, checker.IsNil) |
| 125 | 125 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 126 | 126 |
|
| 127 | 127 |
// change spec, don't change tokens |
| 128 |
- d1.updateSwarm(c, func(s *swarm.Spec) {})
|
|
| 128 |
+ d1.UpdateSwarm(c, func(s *swarm.Spec) {})
|
|
| 129 | 129 |
|
| 130 |
- err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}})
|
|
| 130 |
+ err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}})
|
|
| 131 | 131 |
c.Assert(err, checker.NotNil) |
| 132 | 132 |
c.Assert(err.Error(), checker.Contains, "join token is necessary") |
| 133 |
- info, err = d2.info() |
|
| 133 |
+ info, err = d2.SwarmInfo() |
|
| 134 | 134 |
c.Assert(err, checker.IsNil) |
| 135 | 135 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 136 | 136 |
|
| 137 |
- c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil)
|
|
| 138 |
- info, err = d2.info() |
|
| 137 |
+ c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
|
|
| 138 |
+ info, err = d2.SwarmInfo() |
|
| 139 | 139 |
c.Assert(err, checker.IsNil) |
| 140 | 140 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| 141 | 141 |
c.Assert(d2.Leave(false), checker.IsNil) |
| 142 |
- info, err = d2.info() |
|
| 142 |
+ info, err = d2.SwarmInfo() |
|
| 143 | 143 |
c.Assert(err, checker.IsNil) |
| 144 | 144 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 145 | 145 |
} |
| ... | ... |
@@ -147,10 +148,10 @@ func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
|
| 147 | 147 |
func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
|
| 148 | 148 |
d1 := s.AddDaemon(c, true, true) |
| 149 | 149 |
d2 := s.AddDaemon(c, false, false) |
| 150 |
- splitToken := strings.Split(d1.joinTokens(c).Worker, "-") |
|
| 150 |
+ splitToken := strings.Split(d1.JoinTokens(c).Worker, "-") |
|
| 151 | 151 |
splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e" |
| 152 | 152 |
replacementToken := strings.Join(splitToken, "-") |
| 153 |
- err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.listenAddr}})
|
|
| 153 |
+ err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.ListenAddr}})
|
|
| 154 | 154 |
c.Assert(err, checker.NotNil) |
| 155 | 155 |
c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint") |
| 156 | 156 |
} |
| ... | ... |
@@ -160,48 +161,48 @@ func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
|
| 160 | 160 |
c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
|
| 161 | 161 |
d2 := s.AddDaemon(c, true, false) |
| 162 | 162 |
|
| 163 |
- info, err := d2.info() |
|
| 163 |
+ info, err := d2.SwarmInfo() |
|
| 164 | 164 |
c.Assert(err, checker.IsNil) |
| 165 | 165 |
c.Assert(info.ControlAvailable, checker.False) |
| 166 | 166 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| 167 | 167 |
|
| 168 |
- d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 168 |
+ d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 169 | 169 |
n.Spec.Role = swarm.NodeRoleManager |
| 170 | 170 |
}) |
| 171 | 171 |
|
| 172 |
- waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) |
|
| 172 |
+ waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) |
|
| 173 | 173 |
|
| 174 |
- d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 174 |
+ d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 175 | 175 |
n.Spec.Role = swarm.NodeRoleWorker |
| 176 | 176 |
}) |
| 177 | 177 |
|
| 178 |
- waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.False) |
|
| 178 |
+ waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False) |
|
| 179 | 179 |
|
| 180 | 180 |
// Demoting last node should fail |
| 181 |
- node := d1.getNode(c, d1.NodeID) |
|
| 181 |
+ node := d1.GetNode(c, d1.NodeID) |
|
| 182 | 182 |
node.Spec.Role = swarm.NodeRoleWorker |
| 183 | 183 |
url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
|
| 184 | 184 |
status, out, err := d1.SockRequest("POST", url, node.Spec)
|
| 185 | 185 |
c.Assert(err, checker.IsNil) |
| 186 | 186 |
c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("output: %q", string(out)))
|
| 187 | 187 |
c.Assert(string(out), checker.Contains, "last manager of the swarm") |
| 188 |
- info, err = d1.info() |
|
| 188 |
+ info, err = d1.SwarmInfo() |
|
| 189 | 189 |
c.Assert(err, checker.IsNil) |
| 190 | 190 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| 191 | 191 |
c.Assert(info.ControlAvailable, checker.True) |
| 192 | 192 |
|
| 193 | 193 |
// Promote already demoted node |
| 194 |
- d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 194 |
+ d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 195 | 195 |
n.Spec.Role = swarm.NodeRoleManager |
| 196 | 196 |
}) |
| 197 | 197 |
|
| 198 |
- waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) |
|
| 198 |
+ waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True) |
|
| 199 | 199 |
} |
| 200 | 200 |
|
| 201 | 201 |
func (s *DockerSwarmSuite) TestAPISwarmServicesEmptyList(c *check.C) {
|
| 202 | 202 |
d := s.AddDaemon(c, true, true) |
| 203 | 203 |
|
| 204 |
- services := d.listServices(c) |
|
| 204 |
+ services := d.ListServices(c) |
|
| 205 | 205 |
c.Assert(services, checker.NotNil) |
| 206 | 206 |
c.Assert(len(services), checker.Equals, 0, check.Commentf("services: %#v", services))
|
| 207 | 207 |
} |
| ... | ... |
@@ -210,16 +211,16 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesCreate(c *check.C) {
|
| 210 | 210 |
d := s.AddDaemon(c, true, true) |
| 211 | 211 |
|
| 212 | 212 |
instances := 2 |
| 213 |
- id := d.createService(c, simpleTestService, setInstances(instances)) |
|
| 214 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) |
|
| 213 |
+ id := d.CreateService(c, simpleTestService, setInstances(instances)) |
|
| 214 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) |
|
| 215 | 215 |
|
| 216 |
- service := d.getService(c, id) |
|
| 216 |
+ service := d.GetService(c, id) |
|
| 217 | 217 |
instances = 5 |
| 218 |
- d.updateService(c, service, setInstances(instances)) |
|
| 219 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) |
|
| 218 |
+ d.UpdateService(c, service, setInstances(instances)) |
|
| 219 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) |
|
| 220 | 220 |
|
| 221 |
- d.removeService(c, service.ID) |
|
| 222 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) |
|
| 221 |
+ d.RemoveService(c, service.ID) |
|
| 222 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) |
|
| 223 | 223 |
} |
| 224 | 224 |
|
| 225 | 225 |
func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) {
|
| ... | ... |
@@ -230,23 +231,23 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesMultipleAgents(c *check.C) {
|
| 230 | 230 |
time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks |
| 231 | 231 |
|
| 232 | 232 |
instances := 9 |
| 233 |
- id := d1.createService(c, simpleTestService, setInstances(instances)) |
|
| 233 |
+ id := d1.CreateService(c, simpleTestService, setInstances(instances)) |
|
| 234 | 234 |
|
| 235 |
- waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) |
|
| 236 |
- waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) |
|
| 237 |
- waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.GreaterThan, 0) |
|
| 235 |
+ waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) |
|
| 236 |
+ waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) |
|
| 237 |
+ waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.GreaterThan, 0) |
|
| 238 | 238 |
|
| 239 |
- waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) |
|
| 239 |
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) |
|
| 240 | 240 |
|
| 241 | 241 |
// reconciliation on d2 node down |
| 242 | 242 |
c.Assert(d2.Stop(), checker.IsNil) |
| 243 | 243 |
|
| 244 |
- waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) |
|
| 244 |
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) |
|
| 245 | 245 |
|
| 246 | 246 |
// test downscaling |
| 247 | 247 |
instances = 5 |
| 248 |
- d1.updateService(c, d1.getService(c, id), setInstances(instances)) |
|
| 249 |
- waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) |
|
| 248 |
+ d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) |
|
| 249 |
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) |
|
| 250 | 250 |
|
| 251 | 251 |
} |
| 252 | 252 |
|
| ... | ... |
@@ -255,27 +256,27 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesCreateGlobal(c *check.C) {
|
| 255 | 255 |
d2 := s.AddDaemon(c, true, false) |
| 256 | 256 |
d3 := s.AddDaemon(c, true, false) |
| 257 | 257 |
|
| 258 |
- d1.createService(c, simpleTestService, setGlobalMode) |
|
| 258 |
+ d1.CreateService(c, simpleTestService, setGlobalMode) |
|
| 259 | 259 |
|
| 260 |
- waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, 1) |
|
| 261 |
- waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) |
|
| 262 |
- waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.Equals, 1) |
|
| 260 |
+ waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, 1) |
|
| 261 |
+ waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) |
|
| 262 |
+ waitAndAssert(c, defaultReconciliationTimeout, d3.CheckActiveContainerCount, checker.Equals, 1) |
|
| 263 | 263 |
|
| 264 | 264 |
d4 := s.AddDaemon(c, true, false) |
| 265 | 265 |
d5 := s.AddDaemon(c, true, false) |
| 266 | 266 |
|
| 267 |
- waitAndAssert(c, defaultReconciliationTimeout, d4.checkActiveContainerCount, checker.Equals, 1) |
|
| 268 |
- waitAndAssert(c, defaultReconciliationTimeout, d5.checkActiveContainerCount, checker.Equals, 1) |
|
| 267 |
+ waitAndAssert(c, defaultReconciliationTimeout, d4.CheckActiveContainerCount, checker.Equals, 1) |
|
| 268 |
+ waitAndAssert(c, defaultReconciliationTimeout, d5.CheckActiveContainerCount, checker.Equals, 1) |
|
| 269 | 269 |
} |
| 270 | 270 |
|
| 271 | 271 |
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) {
|
| 272 | 272 |
const nodeCount = 3 |
| 273 |
- var daemons [nodeCount]*SwarmDaemon |
|
| 273 |
+ var daemons [nodeCount]*daemon.Swarm |
|
| 274 | 274 |
for i := 0; i < nodeCount; i++ {
|
| 275 | 275 |
daemons[i] = s.AddDaemon(c, true, i == 0) |
| 276 | 276 |
} |
| 277 | 277 |
// wait for nodes ready |
| 278 |
- waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) |
|
| 278 |
+ waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) |
|
| 279 | 279 |
|
| 280 | 280 |
// service image at start |
| 281 | 281 |
image1 := "busybox:latest" |
| ... | ... |
@@ -291,26 +292,26 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) {
|
| 291 | 291 |
// create service |
| 292 | 292 |
instances := 5 |
| 293 | 293 |
parallelism := 2 |
| 294 |
- id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) |
|
| 294 |
+ id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) |
|
| 295 | 295 |
|
| 296 | 296 |
// wait for tasks ready |
| 297 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, |
|
| 297 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, |
|
| 298 | 298 |
map[string]int{image1: instances})
|
| 299 | 299 |
|
| 300 | 300 |
// issue service update |
| 301 |
- service := daemons[0].getService(c, id) |
|
| 302 |
- daemons[0].updateService(c, service, setImage(image2)) |
|
| 301 |
+ service := daemons[0].GetService(c, id) |
|
| 302 |
+ daemons[0].UpdateService(c, service, setImage(image2)) |
|
| 303 | 303 |
|
| 304 | 304 |
// first batch |
| 305 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, |
|
| 305 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, |
|
| 306 | 306 |
map[string]int{image1: instances - parallelism, image2: parallelism})
|
| 307 | 307 |
|
| 308 | 308 |
// 2nd batch |
| 309 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, |
|
| 309 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, |
|
| 310 | 310 |
map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism})
|
| 311 | 311 |
|
| 312 | 312 |
// 3nd batch |
| 313 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, |
|
| 313 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, |
|
| 314 | 314 |
map[string]int{image2: instances})
|
| 315 | 315 |
|
| 316 | 316 |
// Roll back to the previous version. This uses the CLI because |
| ... | ... |
@@ -319,26 +320,26 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdate(c *check.C) {
|
| 319 | 319 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 320 | 320 |
|
| 321 | 321 |
// first batch |
| 322 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, |
|
| 322 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, |
|
| 323 | 323 |
map[string]int{image2: instances - parallelism, image1: parallelism})
|
| 324 | 324 |
|
| 325 | 325 |
// 2nd batch |
| 326 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, |
|
| 326 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, |
|
| 327 | 327 |
map[string]int{image2: instances - 2*parallelism, image1: 2 * parallelism})
|
| 328 | 328 |
|
| 329 | 329 |
// 3nd batch |
| 330 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, |
|
| 330 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, |
|
| 331 | 331 |
map[string]int{image1: instances})
|
| 332 | 332 |
} |
| 333 | 333 |
|
| 334 | 334 |
func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) {
|
| 335 | 335 |
const nodeCount = 3 |
| 336 |
- var daemons [nodeCount]*SwarmDaemon |
|
| 336 |
+ var daemons [nodeCount]*daemon.Swarm |
|
| 337 | 337 |
for i := 0; i < nodeCount; i++ {
|
| 338 | 338 |
daemons[i] = s.AddDaemon(c, true, i == 0) |
| 339 | 339 |
} |
| 340 | 340 |
// wait for nodes ready |
| 341 |
- waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) |
|
| 341 |
+ waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) |
|
| 342 | 342 |
|
| 343 | 343 |
// service image at start |
| 344 | 344 |
image1 := "busybox:latest" |
| ... | ... |
@@ -347,19 +348,19 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) {
|
| 347 | 347 |
|
| 348 | 348 |
// create service |
| 349 | 349 |
instances := 5 |
| 350 |
- id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) |
|
| 350 |
+ id := daemons[0].CreateService(c, serviceForUpdate, setInstances(instances)) |
|
| 351 | 351 |
|
| 352 | 352 |
// wait for tasks ready |
| 353 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, |
|
| 353 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, |
|
| 354 | 354 |
map[string]int{image1: instances})
|
| 355 | 355 |
|
| 356 | 356 |
// issue service update |
| 357 |
- service := daemons[0].getService(c, id) |
|
| 358 |
- daemons[0].updateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) |
|
| 357 |
+ service := daemons[0].GetService(c, id) |
|
| 358 |
+ daemons[0].UpdateService(c, service, setImage(image2), setFailureAction(swarm.UpdateFailureActionPause), setMaxFailureRatio(0.25), setParallelism(1)) |
|
| 359 | 359 |
|
| 360 | 360 |
// should update 2 tasks and then pause |
| 361 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) |
|
| 362 |
- v, _ := daemons[0].checkServiceRunningTasks(id)(c) |
|
| 361 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceUpdateState(id), checker.Equals, swarm.UpdateStatePaused) |
|
| 362 |
+ v, _ := daemons[0].CheckServiceRunningTasks(id)(c) |
|
| 363 | 363 |
c.Assert(v, checker.Equals, instances-2) |
| 364 | 364 |
|
| 365 | 365 |
// Roll back to the previous version. This uses the CLI because |
| ... | ... |
@@ -367,57 +368,57 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesFailedUpdate(c *check.C) {
|
| 367 | 367 |
out, err := daemons[0].Cmd("service", "update", "--rollback", id)
|
| 368 | 368 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 369 | 369 |
|
| 370 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, |
|
| 370 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals, |
|
| 371 | 371 |
map[string]int{image1: instances})
|
| 372 | 372 |
} |
| 373 | 373 |
|
| 374 | 374 |
func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) {
|
| 375 | 375 |
const nodeCount = 3 |
| 376 |
- var daemons [nodeCount]*SwarmDaemon |
|
| 376 |
+ var daemons [nodeCount]*daemon.Swarm |
|
| 377 | 377 |
for i := 0; i < nodeCount; i++ {
|
| 378 | 378 |
daemons[i] = s.AddDaemon(c, true, i == 0) |
| 379 | 379 |
} |
| 380 | 380 |
// wait for nodes ready |
| 381 |
- waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) |
|
| 381 |
+ waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) |
|
| 382 | 382 |
|
| 383 | 383 |
// create service |
| 384 | 384 |
constraints := []string{"node.role==worker"}
|
| 385 | 385 |
instances := 3 |
| 386 |
- id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 386 |
+ id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 387 | 387 |
// wait for tasks ready |
| 388 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) |
|
| 388 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) |
|
| 389 | 389 |
// validate tasks are running on worker nodes |
| 390 |
- tasks := daemons[0].getServiceTasks(c, id) |
|
| 390 |
+ tasks := daemons[0].GetServiceTasks(c, id) |
|
| 391 | 391 |
for _, task := range tasks {
|
| 392 |
- node := daemons[0].getNode(c, task.NodeID) |
|
| 392 |
+ node := daemons[0].GetNode(c, task.NodeID) |
|
| 393 | 393 |
c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleWorker) |
| 394 | 394 |
} |
| 395 | 395 |
//remove service |
| 396 |
- daemons[0].removeService(c, id) |
|
| 396 |
+ daemons[0].RemoveService(c, id) |
|
| 397 | 397 |
|
| 398 | 398 |
// create service |
| 399 | 399 |
constraints = []string{"node.role!=worker"}
|
| 400 |
- id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 400 |
+ id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 401 | 401 |
// wait for tasks ready |
| 402 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) |
|
| 403 |
- tasks = daemons[0].getServiceTasks(c, id) |
|
| 402 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) |
|
| 403 |
+ tasks = daemons[0].GetServiceTasks(c, id) |
|
| 404 | 404 |
// validate tasks are running on manager nodes |
| 405 | 405 |
for _, task := range tasks {
|
| 406 |
- node := daemons[0].getNode(c, task.NodeID) |
|
| 406 |
+ node := daemons[0].GetNode(c, task.NodeID) |
|
| 407 | 407 |
c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleManager) |
| 408 | 408 |
} |
| 409 | 409 |
//remove service |
| 410 |
- daemons[0].removeService(c, id) |
|
| 410 |
+ daemons[0].RemoveService(c, id) |
|
| 411 | 411 |
|
| 412 | 412 |
// create service |
| 413 | 413 |
constraints = []string{"node.role==nosuchrole"}
|
| 414 |
- id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 414 |
+ id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 415 | 415 |
// wait for tasks created |
| 416 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) |
|
| 416 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) |
|
| 417 | 417 |
// let scheduler try |
| 418 | 418 |
time.Sleep(250 * time.Millisecond) |
| 419 | 419 |
// validate tasks are not assigned to any node |
| 420 |
- tasks = daemons[0].getServiceTasks(c, id) |
|
| 420 |
+ tasks = daemons[0].GetServiceTasks(c, id) |
|
| 421 | 421 |
for _, task := range tasks {
|
| 422 | 422 |
c.Assert(task.NodeID, checker.Equals, "") |
| 423 | 423 |
} |
| ... | ... |
@@ -425,23 +426,23 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintRole(c *check.C) {
|
| 425 | 425 |
|
| 426 | 426 |
func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) {
|
| 427 | 427 |
const nodeCount = 3 |
| 428 |
- var daemons [nodeCount]*SwarmDaemon |
|
| 428 |
+ var daemons [nodeCount]*daemon.Swarm |
|
| 429 | 429 |
for i := 0; i < nodeCount; i++ {
|
| 430 | 430 |
daemons[i] = s.AddDaemon(c, true, i == 0) |
| 431 | 431 |
} |
| 432 | 432 |
// wait for nodes ready |
| 433 |
- waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) |
|
| 434 |
- nodes := daemons[0].listNodes(c) |
|
| 433 |
+ waitAndAssert(c, 5*time.Second, daemons[0].CheckNodeReadyCount, checker.Equals, nodeCount) |
|
| 434 |
+ nodes := daemons[0].ListNodes(c) |
|
| 435 | 435 |
c.Assert(len(nodes), checker.Equals, nodeCount) |
| 436 | 436 |
|
| 437 | 437 |
// add labels to nodes |
| 438 |
- daemons[0].updateNode(c, nodes[0].ID, func(n *swarm.Node) {
|
|
| 438 |
+ daemons[0].UpdateNode(c, nodes[0].ID, func(n *swarm.Node) {
|
|
| 439 | 439 |
n.Spec.Annotations.Labels = map[string]string{
|
| 440 | 440 |
"security": "high", |
| 441 | 441 |
} |
| 442 | 442 |
}) |
| 443 | 443 |
for i := 1; i < nodeCount; i++ {
|
| 444 |
- daemons[0].updateNode(c, nodes[i].ID, func(n *swarm.Node) {
|
|
| 444 |
+ daemons[0].UpdateNode(c, nodes[i].ID, func(n *swarm.Node) {
|
|
| 445 | 445 |
n.Spec.Annotations.Labels = map[string]string{
|
| 446 | 446 |
"security": "low", |
| 447 | 447 |
} |
| ... | ... |
@@ -451,68 +452,68 @@ func (s *DockerSwarmSuite) TestAPISwarmServiceConstraintLabel(c *check.C) {
|
| 451 | 451 |
// create service |
| 452 | 452 |
instances := 3 |
| 453 | 453 |
constraints := []string{"node.labels.security==high"}
|
| 454 |
- id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 454 |
+ id := daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 455 | 455 |
// wait for tasks ready |
| 456 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) |
|
| 457 |
- tasks := daemons[0].getServiceTasks(c, id) |
|
| 456 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) |
|
| 457 |
+ tasks := daemons[0].GetServiceTasks(c, id) |
|
| 458 | 458 |
// validate all tasks are running on nodes[0] |
| 459 | 459 |
for _, task := range tasks {
|
| 460 | 460 |
c.Assert(task.NodeID, checker.Equals, nodes[0].ID) |
| 461 | 461 |
} |
| 462 | 462 |
//remove service |
| 463 |
- daemons[0].removeService(c, id) |
|
| 463 |
+ daemons[0].RemoveService(c, id) |
|
| 464 | 464 |
|
| 465 | 465 |
// create service |
| 466 | 466 |
constraints = []string{"node.labels.security!=high"}
|
| 467 |
- id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 467 |
+ id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 468 | 468 |
// wait for tasks ready |
| 469 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) |
|
| 470 |
- tasks = daemons[0].getServiceTasks(c, id) |
|
| 469 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) |
|
| 470 |
+ tasks = daemons[0].GetServiceTasks(c, id) |
|
| 471 | 471 |
// validate all tasks are NOT running on nodes[0] |
| 472 | 472 |
for _, task := range tasks {
|
| 473 | 473 |
c.Assert(task.NodeID, checker.Not(checker.Equals), nodes[0].ID) |
| 474 | 474 |
} |
| 475 | 475 |
//remove service |
| 476 |
- daemons[0].removeService(c, id) |
|
| 476 |
+ daemons[0].RemoveService(c, id) |
|
| 477 | 477 |
|
| 478 | 478 |
constraints = []string{"node.labels.security==medium"}
|
| 479 |
- id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 479 |
+ id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 480 | 480 |
// wait for tasks created |
| 481 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) |
|
| 481 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) |
|
| 482 | 482 |
// let scheduler try |
| 483 | 483 |
time.Sleep(250 * time.Millisecond) |
| 484 |
- tasks = daemons[0].getServiceTasks(c, id) |
|
| 484 |
+ tasks = daemons[0].GetServiceTasks(c, id) |
|
| 485 | 485 |
// validate tasks are not assigned |
| 486 | 486 |
for _, task := range tasks {
|
| 487 | 487 |
c.Assert(task.NodeID, checker.Equals, "") |
| 488 | 488 |
} |
| 489 | 489 |
//remove service |
| 490 |
- daemons[0].removeService(c, id) |
|
| 490 |
+ daemons[0].RemoveService(c, id) |
|
| 491 | 491 |
|
| 492 | 492 |
// multiple constraints |
| 493 | 493 |
constraints = []string{
|
| 494 | 494 |
"node.labels.security==high", |
| 495 | 495 |
fmt.Sprintf("node.id==%s", nodes[1].ID),
|
| 496 | 496 |
} |
| 497 |
- id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 497 |
+ id = daemons[0].CreateService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 498 | 498 |
// wait for tasks created |
| 499 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(id), checker.Equals, instances) |
|
| 499 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceTasks(id), checker.Equals, instances) |
|
| 500 | 500 |
// let scheduler try |
| 501 | 501 |
time.Sleep(250 * time.Millisecond) |
| 502 |
- tasks = daemons[0].getServiceTasks(c, id) |
|
| 502 |
+ tasks = daemons[0].GetServiceTasks(c, id) |
|
| 503 | 503 |
// validate tasks are not assigned |
| 504 | 504 |
for _, task := range tasks {
|
| 505 | 505 |
c.Assert(task.NodeID, checker.Equals, "") |
| 506 | 506 |
} |
| 507 | 507 |
// make nodes[1] fulfills the constraints |
| 508 |
- daemons[0].updateNode(c, nodes[1].ID, func(n *swarm.Node) {
|
|
| 508 |
+ daemons[0].UpdateNode(c, nodes[1].ID, func(n *swarm.Node) {
|
|
| 509 | 509 |
n.Spec.Annotations.Labels = map[string]string{
|
| 510 | 510 |
"security": "high", |
| 511 | 511 |
} |
| 512 | 512 |
}) |
| 513 | 513 |
// wait for tasks ready |
| 514 |
- waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(id), checker.Equals, instances) |
|
| 515 |
- tasks = daemons[0].getServiceTasks(c, id) |
|
| 514 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckServiceRunningTasks(id), checker.Equals, instances) |
|
| 515 |
+ tasks = daemons[0].GetServiceTasks(c, id) |
|
| 516 | 516 |
for _, task := range tasks {
|
| 517 | 517 |
c.Assert(task.NodeID, checker.Equals, nodes[1].ID) |
| 518 | 518 |
} |
| ... | ... |
@@ -529,14 +530,14 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) {
|
| 529 | 529 |
time.Sleep(1 * time.Second) // make sure all daemons are ready to accept |
| 530 | 530 |
|
| 531 | 531 |
instances := 9 |
| 532 |
- d1.createService(c, simpleTestService, setInstances(instances)) |
|
| 532 |
+ d1.CreateService(c, simpleTestService, setInstances(instances)) |
|
| 533 | 533 |
|
| 534 |
- waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) |
|
| 534 |
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) |
|
| 535 | 535 |
|
| 536 |
- getContainers := func() map[string]*SwarmDaemon {
|
|
| 537 |
- m := make(map[string]*SwarmDaemon) |
|
| 538 |
- for _, d := range []*SwarmDaemon{d1, d2, d3} {
|
|
| 539 |
- for _, id := range d.activeContainers() {
|
|
| 536 |
+ getContainers := func() map[string]*daemon.Swarm {
|
|
| 537 |
+ m := make(map[string]*daemon.Swarm) |
|
| 538 |
+ for _, d := range []*daemon.Swarm{d1, d2, d3} {
|
|
| 539 |
+ for _, id := range d.ActiveContainers() {
|
|
| 540 | 540 |
m[id] = d |
| 541 | 541 |
} |
| 542 | 542 |
} |
| ... | ... |
@@ -553,7 +554,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) {
|
| 553 | 553 |
_, err := containers[toRemove].Cmd("stop", toRemove)
|
| 554 | 554 |
c.Assert(err, checker.IsNil) |
| 555 | 555 |
|
| 556 |
- waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) |
|
| 556 |
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) |
|
| 557 | 557 |
|
| 558 | 558 |
containers2 := getContainers() |
| 559 | 559 |
c.Assert(containers2, checker.HasLen, instances) |
| ... | ... |
@@ -579,7 +580,7 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesStateReporting(c *check.C) {
|
| 579 | 579 |
|
| 580 | 580 |
time.Sleep(time.Second) // give some time to handle the signal |
| 581 | 581 |
|
| 582 |
- waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) |
|
| 582 |
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) |
|
| 583 | 583 |
|
| 584 | 584 |
containers2 = getContainers() |
| 585 | 585 |
c.Assert(containers2, checker.HasLen, instances) |
| ... | ... |
@@ -599,20 +600,20 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) {
|
| 599 | 599 |
d3 := s.AddDaemon(c, true, true) |
| 600 | 600 |
|
| 601 | 601 |
// start a service by hitting each of the 3 managers |
| 602 |
- d1.createService(c, simpleTestService, func(s *swarm.Service) {
|
|
| 602 |
+ d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
|
| 603 | 603 |
s.Spec.Name = "test1" |
| 604 | 604 |
}) |
| 605 |
- d2.createService(c, simpleTestService, func(s *swarm.Service) {
|
|
| 605 |
+ d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
|
| 606 | 606 |
s.Spec.Name = "test2" |
| 607 | 607 |
}) |
| 608 |
- d3.createService(c, simpleTestService, func(s *swarm.Service) {
|
|
| 608 |
+ d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
|
| 609 | 609 |
s.Spec.Name = "test3" |
| 610 | 610 |
}) |
| 611 | 611 |
|
| 612 | 612 |
// 3 services should be started now, because the requests were proxied to leader |
| 613 | 613 |
// query each node and make sure it returns 3 services |
| 614 |
- for _, d := range []*SwarmDaemon{d1, d2, d3} {
|
|
| 615 |
- services := d.listServices(c) |
|
| 614 |
+ for _, d := range []*daemon.Swarm{d1, d2, d3} {
|
|
| 615 |
+ services := d.ListServices(c) |
|
| 616 | 616 |
c.Assert(services, checker.HasLen, 3) |
| 617 | 617 |
} |
| 618 | 618 |
} |
| ... | ... |
@@ -624,23 +625,23 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
|
| 624 | 624 |
d3 := s.AddDaemon(c, true, true) |
| 625 | 625 |
|
| 626 | 626 |
// assert that the first node we made is the leader, and the other two are followers |
| 627 |
- c.Assert(d1.getNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) |
|
| 628 |
- c.Assert(d1.getNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) |
|
| 629 |
- c.Assert(d1.getNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) |
|
| 627 |
+ c.Assert(d1.GetNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) |
|
| 628 |
+ c.Assert(d1.GetNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) |
|
| 629 |
+ c.Assert(d1.GetNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) |
|
| 630 | 630 |
|
| 631 | 631 |
d1.Stop() // stop the leader |
| 632 | 632 |
|
| 633 | 633 |
var ( |
| 634 |
- leader *SwarmDaemon // keep track of leader |
|
| 635 |
- followers []*SwarmDaemon // keep track of followers |
|
| 634 |
+ leader *daemon.Swarm // keep track of leader |
|
| 635 |
+ followers []*daemon.Swarm // keep track of followers |
|
| 636 | 636 |
) |
| 637 |
- checkLeader := func(nodes ...*SwarmDaemon) checkF {
|
|
| 637 |
+ checkLeader := func(nodes ...*daemon.Swarm) checkF {
|
|
| 638 | 638 |
return func(c *check.C) (interface{}, check.CommentInterface) {
|
| 639 | 639 |
// clear these out before each run |
| 640 | 640 |
leader = nil |
| 641 | 641 |
followers = nil |
| 642 | 642 |
for _, d := range nodes {
|
| 643 |
- if d.getNode(c, d.NodeID).ManagerStatus.Leader {
|
|
| 643 |
+ if d.GetNode(c, d.NodeID).ManagerStatus.Leader {
|
|
| 644 | 644 |
leader = d |
| 645 | 645 |
} else {
|
| 646 | 646 |
followers = append(followers, d) |
| ... | ... |
@@ -651,7 +652,7 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
|
| 651 | 651 |
return false, check.Commentf("no leader elected")
|
| 652 | 652 |
} |
| 653 | 653 |
|
| 654 |
- return true, check.Commentf("elected %v", leader.id)
|
|
| 654 |
+ return true, check.Commentf("elected %v", leader.ID())
|
|
| 655 | 655 |
} |
| 656 | 656 |
} |
| 657 | 657 |
|
| ... | ... |
@@ -685,21 +686,21 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
|
| 685 | 685 |
d2 := s.AddDaemon(c, true, true) |
| 686 | 686 |
d3 := s.AddDaemon(c, true, true) |
| 687 | 687 |
|
| 688 |
- d1.createService(c, simpleTestService) |
|
| 688 |
+ d1.CreateService(c, simpleTestService) |
|
| 689 | 689 |
|
| 690 | 690 |
c.Assert(d2.Stop(), checker.IsNil) |
| 691 | 691 |
|
| 692 | 692 |
// make sure there is a leader |
| 693 |
- waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) |
|
| 693 |
+ waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) |
|
| 694 | 694 |
|
| 695 |
- d1.createService(c, simpleTestService, func(s *swarm.Service) {
|
|
| 695 |
+ d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
|
| 696 | 696 |
s.Spec.Name = "top1" |
| 697 | 697 |
}) |
| 698 | 698 |
|
| 699 | 699 |
c.Assert(d3.Stop(), checker.IsNil) |
| 700 | 700 |
|
| 701 | 701 |
// make sure there is a leader |
| 702 |
- waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) |
|
| 702 |
+ waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) |
|
| 703 | 703 |
|
| 704 | 704 |
var service swarm.Service |
| 705 | 705 |
simpleTestService(&service) |
| ... | ... |
@@ -711,9 +712,9 @@ func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
|
| 711 | 711 |
c.Assert(d2.Start(), checker.IsNil) |
| 712 | 712 |
|
| 713 | 713 |
// make sure there is a leader |
| 714 |
- waitAndAssert(c, defaultReconciliationTimeout, d1.checkLeader, checker.IsNil) |
|
| 714 |
+ waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil) |
|
| 715 | 715 |
|
| 716 |
- d1.createService(c, simpleTestService, func(s *swarm.Service) {
|
|
| 716 |
+ d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
|
|
| 717 | 717 |
s.Spec.Name = "top3" |
| 718 | 718 |
}) |
| 719 | 719 |
} |
| ... | ... |
@@ -723,12 +724,12 @@ func (s *DockerSwarmSuite) TestAPISwarmListNodes(c *check.C) {
|
| 723 | 723 |
d2 := s.AddDaemon(c, true, false) |
| 724 | 724 |
d3 := s.AddDaemon(c, true, false) |
| 725 | 725 |
|
| 726 |
- nodes := d1.listNodes(c) |
|
| 726 |
+ nodes := d1.ListNodes(c) |
|
| 727 | 727 |
c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes))
|
| 728 | 728 |
|
| 729 | 729 |
loop0: |
| 730 | 730 |
for _, n := range nodes {
|
| 731 |
- for _, d := range []*SwarmDaemon{d1, d2, d3} {
|
|
| 731 |
+ for _, d := range []*daemon.Swarm{d1, d2, d3} {
|
|
| 732 | 732 |
if n.ID == d.NodeID {
|
| 733 | 733 |
continue loop0 |
| 734 | 734 |
} |
| ... | ... |
@@ -740,13 +741,13 @@ loop0: |
| 740 | 740 |
func (s *DockerSwarmSuite) TestAPISwarmNodeUpdate(c *check.C) {
|
| 741 | 741 |
d := s.AddDaemon(c, true, true) |
| 742 | 742 |
|
| 743 |
- nodes := d.listNodes(c) |
|
| 743 |
+ nodes := d.ListNodes(c) |
|
| 744 | 744 |
|
| 745 |
- d.updateNode(c, nodes[0].ID, func(n *swarm.Node) {
|
|
| 745 |
+ d.UpdateNode(c, nodes[0].ID, func(n *swarm.Node) {
|
|
| 746 | 746 |
n.Spec.Availability = swarm.NodeAvailabilityPause |
| 747 | 747 |
}) |
| 748 | 748 |
|
| 749 |
- n := d.getNode(c, nodes[0].ID) |
|
| 749 |
+ n := d.GetNode(c, nodes[0].ID) |
|
| 750 | 750 |
c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause) |
| 751 | 751 |
} |
| 752 | 752 |
|
| ... | ... |
@@ -756,17 +757,17 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) {
|
| 756 | 756 |
d2 := s.AddDaemon(c, true, false) |
| 757 | 757 |
_ = s.AddDaemon(c, true, false) |
| 758 | 758 |
|
| 759 |
- nodes := d1.listNodes(c) |
|
| 759 |
+ nodes := d1.ListNodes(c) |
|
| 760 | 760 |
c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes))
|
| 761 | 761 |
|
| 762 | 762 |
// Getting the info so we can take the NodeID |
| 763 |
- d2Info, err := d2.info() |
|
| 763 |
+ d2Info, err := d2.SwarmInfo() |
|
| 764 | 764 |
c.Assert(err, checker.IsNil) |
| 765 | 765 |
|
| 766 | 766 |
// forceful removal of d2 should work |
| 767 |
- d1.removeNode(c, d2Info.NodeID, true) |
|
| 767 |
+ d1.RemoveNode(c, d2Info.NodeID, true) |
|
| 768 | 768 |
|
| 769 |
- nodes = d1.listNodes(c) |
|
| 769 |
+ nodes = d1.ListNodes(c) |
|
| 770 | 770 |
c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes))
|
| 771 | 771 |
|
| 772 | 772 |
// Restart the node that was removed |
| ... | ... |
@@ -777,7 +778,7 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeRemove(c *check.C) {
|
| 777 | 777 |
time.Sleep(1 * time.Second) |
| 778 | 778 |
|
| 779 | 779 |
// Make sure the node didn't rejoin |
| 780 |
- nodes = d1.listNodes(c) |
|
| 780 |
+ nodes = d1.ListNodes(c) |
|
| 781 | 781 |
c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes))
|
| 782 | 782 |
} |
| 783 | 783 |
|
| ... | ... |
@@ -789,49 +790,49 @@ func (s *DockerSwarmSuite) TestAPISwarmNodeDrainPause(c *check.C) {
|
| 789 | 789 |
|
| 790 | 790 |
// start a service, expect balanced distribution |
| 791 | 791 |
instances := 8 |
| 792 |
- id := d1.createService(c, simpleTestService, setInstances(instances)) |
|
| 792 |
+ id := d1.CreateService(c, simpleTestService, setInstances(instances)) |
|
| 793 | 793 |
|
| 794 |
- waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) |
|
| 795 |
- waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) |
|
| 796 |
- waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) |
|
| 794 |
+ waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) |
|
| 795 |
+ waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) |
|
| 796 |
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) |
|
| 797 | 797 |
|
| 798 | 798 |
// drain d2, all containers should move to d1 |
| 799 |
- d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 799 |
+ d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 800 | 800 |
n.Spec.Availability = swarm.NodeAvailabilityDrain |
| 801 | 801 |
}) |
| 802 |
- waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) |
|
| 803 |
- waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) |
|
| 802 |
+ waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) |
|
| 803 |
+ waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) |
|
| 804 | 804 |
|
| 805 | 805 |
// set d2 back to active |
| 806 |
- d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 806 |
+ d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 807 | 807 |
n.Spec.Availability = swarm.NodeAvailabilityActive |
| 808 | 808 |
}) |
| 809 | 809 |
|
| 810 | 810 |
instances = 1 |
| 811 |
- d1.updateService(c, d1.getService(c, id), setInstances(instances)) |
|
| 811 |
+ d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) |
|
| 812 | 812 |
|
| 813 |
- waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) |
|
| 813 |
+ waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) |
|
| 814 | 814 |
|
| 815 | 815 |
instances = 8 |
| 816 |
- d1.updateService(c, d1.getService(c, id), setInstances(instances)) |
|
| 816 |
+ d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) |
|
| 817 | 817 |
|
| 818 | 818 |
// drained node first so we don't get any old containers |
| 819 |
- waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) |
|
| 820 |
- waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) |
|
| 821 |
- waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) |
|
| 819 |
+ waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.GreaterThan, 0) |
|
| 820 |
+ waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.GreaterThan, 0) |
|
| 821 |
+ waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) |
|
| 822 | 822 |
|
| 823 |
- d2ContainerCount := len(d2.activeContainers()) |
|
| 823 |
+ d2ContainerCount := len(d2.ActiveContainers()) |
|
| 824 | 824 |
|
| 825 | 825 |
// set d2 to paused, scale service up, only d1 gets new tasks |
| 826 |
- d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 826 |
+ d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 827 | 827 |
n.Spec.Availability = swarm.NodeAvailabilityPause |
| 828 | 828 |
}) |
| 829 | 829 |
|
| 830 | 830 |
instances = 14 |
| 831 |
- d1.updateService(c, d1.getService(c, id), setInstances(instances)) |
|
| 831 |
+ d1.UpdateService(c, d1.GetService(c, id), setInstances(instances)) |
|
| 832 | 832 |
|
| 833 |
- waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances-d2ContainerCount) |
|
| 834 |
- waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, d2ContainerCount) |
|
| 833 |
+ waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances-d2ContainerCount) |
|
| 834 |
+ waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, d2ContainerCount) |
|
| 835 | 835 |
|
| 836 | 836 |
} |
| 837 | 837 |
|
| ... | ... |
@@ -839,18 +840,18 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
|
| 839 | 839 |
d := s.AddDaemon(c, true, true) |
| 840 | 840 |
|
| 841 | 841 |
instances := 2 |
| 842 |
- d.createService(c, simpleTestService, setInstances(instances)) |
|
| 842 |
+ d.CreateService(c, simpleTestService, setInstances(instances)) |
|
| 843 | 843 |
|
| 844 | 844 |
id, err := d.Cmd("run", "-d", "busybox", "top")
|
| 845 | 845 |
c.Assert(err, checker.IsNil) |
| 846 | 846 |
id = strings.TrimSpace(id) |
| 847 | 847 |
|
| 848 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances+1) |
|
| 848 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1) |
|
| 849 | 849 |
|
| 850 | 850 |
c.Assert(d.Leave(false), checker.NotNil) |
| 851 | 851 |
c.Assert(d.Leave(true), checker.IsNil) |
| 852 | 852 |
|
| 853 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 853 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 854 | 854 |
|
| 855 | 855 |
id2, err := d.Cmd("ps", "-q")
|
| 856 | 856 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -873,13 +874,13 @@ func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
|
| 873 | 873 |
c.Assert(err, check.NotNil) |
| 874 | 874 |
c.Assert(err.Error(), checker.Contains, "Timeout was reached") |
| 875 | 875 |
|
| 876 |
- info, err := d2.info() |
|
| 876 |
+ info, err := d2.SwarmInfo() |
|
| 877 | 877 |
c.Assert(err, checker.IsNil) |
| 878 | 878 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending) |
| 879 | 879 |
|
| 880 | 880 |
c.Assert(d2.Leave(true), checker.IsNil) |
| 881 | 881 |
|
| 882 |
- waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) |
|
| 882 |
+ waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1) |
|
| 883 | 883 |
|
| 884 | 884 |
id2, err := d2.Cmd("ps", "-q")
|
| 885 | 885 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -896,12 +897,12 @@ func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
|
| 896 | 896 |
c.Assert(err, check.NotNil) |
| 897 | 897 |
c.Assert(err.Error(), checker.Contains, "Timeout was reached") |
| 898 | 898 |
|
| 899 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) |
|
| 899 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending) |
|
| 900 | 900 |
|
| 901 | 901 |
c.Assert(d.Stop(), checker.IsNil) |
| 902 | 902 |
c.Assert(d.Start(), checker.IsNil) |
| 903 | 903 |
|
| 904 |
- info, err := d.info() |
|
| 904 |
+ info, err := d.SwarmInfo() |
|
| 905 | 905 |
c.Assert(err, checker.IsNil) |
| 906 | 906 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 907 | 907 |
} |
| ... | ... |
@@ -910,43 +911,43 @@ func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
|
| 910 | 910 |
d1 := s.AddDaemon(c, true, true) |
| 911 | 911 |
|
| 912 | 912 |
instances := 2 |
| 913 |
- id := d1.createService(c, simpleTestService, setInstances(instances)) |
|
| 913 |
+ id := d1.CreateService(c, simpleTestService, setInstances(instances)) |
|
| 914 | 914 |
|
| 915 |
- d1.getService(c, id) |
|
| 915 |
+ d1.GetService(c, id) |
|
| 916 | 916 |
d1.Stop() |
| 917 | 917 |
d1.Start() |
| 918 |
- d1.getService(c, id) |
|
| 918 |
+ d1.GetService(c, id) |
|
| 919 | 919 |
|
| 920 | 920 |
d2 := s.AddDaemon(c, true, true) |
| 921 |
- d2.getService(c, id) |
|
| 921 |
+ d2.GetService(c, id) |
|
| 922 | 922 |
d2.Stop() |
| 923 | 923 |
d2.Start() |
| 924 |
- d2.getService(c, id) |
|
| 924 |
+ d2.GetService(c, id) |
|
| 925 | 925 |
|
| 926 | 926 |
d3 := s.AddDaemon(c, true, true) |
| 927 |
- d3.getService(c, id) |
|
| 927 |
+ d3.GetService(c, id) |
|
| 928 | 928 |
d3.Stop() |
| 929 | 929 |
d3.Start() |
| 930 |
- d3.getService(c, id) |
|
| 930 |
+ d3.GetService(c, id) |
|
| 931 | 931 |
|
| 932 | 932 |
d3.Kill() |
| 933 | 933 |
time.Sleep(1 * time.Second) // time to handle signal |
| 934 | 934 |
d3.Start() |
| 935 |
- d3.getService(c, id) |
|
| 935 |
+ d3.GetService(c, id) |
|
| 936 | 936 |
} |
| 937 | 937 |
|
| 938 | 938 |
func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) {
|
| 939 | 939 |
d := s.AddDaemon(c, true, true) |
| 940 | 940 |
|
| 941 | 941 |
instances := 2 |
| 942 |
- id := d.createService(c, simpleTestService, setInstances(instances)) |
|
| 942 |
+ id := d.CreateService(c, simpleTestService, setInstances(instances)) |
|
| 943 | 943 |
|
| 944 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) |
|
| 945 |
- containers := d.activeContainers() |
|
| 944 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) |
|
| 945 |
+ containers := d.ActiveContainers() |
|
| 946 | 946 |
instances = 4 |
| 947 |
- d.updateService(c, d.getService(c, id), setInstances(instances)) |
|
| 948 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) |
|
| 949 |
- containers2 := d.activeContainers() |
|
| 947 |
+ d.UpdateService(c, d.GetService(c, id), setInstances(instances)) |
|
| 948 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) |
|
| 949 |
+ containers2 := d.ActiveContainers() |
|
| 950 | 950 |
|
| 951 | 951 |
loop0: |
| 952 | 952 |
for _, c1 := range containers {
|
| ... | ... |
@@ -982,15 +983,15 @@ func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
|
| 982 | 982 |
d2 := s.AddDaemon(c, true, true) |
| 983 | 983 |
|
| 984 | 984 |
instances := 2 |
| 985 |
- id := d1.createService(c, simpleTestService, setInstances(instances)) |
|
| 986 |
- waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) |
|
| 985 |
+ id := d1.CreateService(c, simpleTestService, setInstances(instances)) |
|
| 986 |
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances) |
|
| 987 | 987 |
|
| 988 | 988 |
// drain d2, all containers should move to d1 |
| 989 |
- d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 989 |
+ d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
|
|
| 990 | 990 |
n.Spec.Availability = swarm.NodeAvailabilityDrain |
| 991 | 991 |
}) |
| 992 |
- waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) |
|
| 993 |
- waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) |
|
| 992 |
+ waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) |
|
| 993 |
+ waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0) |
|
| 994 | 994 |
|
| 995 | 995 |
c.Assert(d2.Stop(), checker.IsNil) |
| 996 | 996 |
|
| ... | ... |
@@ -999,18 +1000,18 @@ func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
|
| 999 | 999 |
Spec: swarm.Spec{},
|
| 1000 | 1000 |
}), checker.IsNil) |
| 1001 | 1001 |
|
| 1002 |
- waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) |
|
| 1002 |
+ waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances) |
|
| 1003 | 1003 |
|
| 1004 | 1004 |
d3 := s.AddDaemon(c, true, true) |
| 1005 |
- info, err := d3.info() |
|
| 1005 |
+ info, err := d3.SwarmInfo() |
|
| 1006 | 1006 |
c.Assert(err, checker.IsNil) |
| 1007 | 1007 |
c.Assert(info.ControlAvailable, checker.True) |
| 1008 | 1008 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| 1009 | 1009 |
|
| 1010 | 1010 |
instances = 4 |
| 1011 |
- d3.updateService(c, d3.getService(c, id), setInstances(instances)) |
|
| 1011 |
+ d3.UpdateService(c, d3.GetService(c, id), setInstances(instances)) |
|
| 1012 | 1012 |
|
| 1013 |
- waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) |
|
| 1013 |
+ waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances) |
|
| 1014 | 1014 |
} |
| 1015 | 1015 |
|
| 1016 | 1016 |
func simpleTestService(s *swarm.Service) {
|
| ... | ... |
@@ -1064,7 +1065,7 @@ func serviceForUpdate(s *swarm.Service) {
|
| 1064 | 1064 |
s.Spec.Name = "updatetest" |
| 1065 | 1065 |
} |
| 1066 | 1066 |
|
| 1067 |
-func setInstances(replicas int) serviceConstructor {
|
|
| 1067 |
+func setInstances(replicas int) daemon.ServiceConstructor {
|
|
| 1068 | 1068 |
ureplicas := uint64(replicas) |
| 1069 | 1069 |
return func(s *swarm.Service) {
|
| 1070 | 1070 |
s.Spec.Mode = swarm.ServiceMode{
|
| ... | ... |
@@ -1075,31 +1076,31 @@ func setInstances(replicas int) serviceConstructor {
|
| 1075 | 1075 |
} |
| 1076 | 1076 |
} |
| 1077 | 1077 |
|
| 1078 |
-func setImage(image string) serviceConstructor {
|
|
| 1078 |
+func setImage(image string) daemon.ServiceConstructor {
|
|
| 1079 | 1079 |
return func(s *swarm.Service) {
|
| 1080 | 1080 |
s.Spec.TaskTemplate.ContainerSpec.Image = image |
| 1081 | 1081 |
} |
| 1082 | 1082 |
} |
| 1083 | 1083 |
|
| 1084 |
-func setFailureAction(failureAction string) serviceConstructor {
|
|
| 1084 |
+func setFailureAction(failureAction string) daemon.ServiceConstructor {
|
|
| 1085 | 1085 |
return func(s *swarm.Service) {
|
| 1086 | 1086 |
s.Spec.UpdateConfig.FailureAction = failureAction |
| 1087 | 1087 |
} |
| 1088 | 1088 |
} |
| 1089 | 1089 |
|
| 1090 |
-func setMaxFailureRatio(maxFailureRatio float32) serviceConstructor {
|
|
| 1090 |
+func setMaxFailureRatio(maxFailureRatio float32) daemon.ServiceConstructor {
|
|
| 1091 | 1091 |
return func(s *swarm.Service) {
|
| 1092 | 1092 |
s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio |
| 1093 | 1093 |
} |
| 1094 | 1094 |
} |
| 1095 | 1095 |
|
| 1096 |
-func setParallelism(parallelism uint64) serviceConstructor {
|
|
| 1096 |
+func setParallelism(parallelism uint64) daemon.ServiceConstructor {
|
|
| 1097 | 1097 |
return func(s *swarm.Service) {
|
| 1098 | 1098 |
s.Spec.UpdateConfig.Parallelism = parallelism |
| 1099 | 1099 |
} |
| 1100 | 1100 |
} |
| 1101 | 1101 |
|
| 1102 |
-func setConstraints(constraints []string) serviceConstructor {
|
|
| 1102 |
+func setConstraints(constraints []string) daemon.ServiceConstructor {
|
|
| 1103 | 1103 |
return func(s *swarm.Service) {
|
| 1104 | 1104 |
if s.Spec.TaskTemplate.Placement == nil {
|
| 1105 | 1105 |
s.Spec.TaskTemplate.Placement = &swarm.Placement{}
|
| ... | ... |
@@ -1114,7 +1115,7 @@ func setGlobalMode(s *swarm.Service) {
|
| 1114 | 1114 |
} |
| 1115 | 1115 |
} |
| 1116 | 1116 |
|
| 1117 |
-func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount int) {
|
|
| 1117 |
+func checkClusterHealth(c *check.C, cl []*daemon.Swarm, managerCount, workerCount int) {
|
|
| 1118 | 1118 |
var totalMCount, totalWCount int |
| 1119 | 1119 |
|
| 1120 | 1120 |
for _, d := range cl {
|
| ... | ... |
@@ -1125,7 +1126,7 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount |
| 1125 | 1125 |
|
| 1126 | 1126 |
// check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error |
| 1127 | 1127 |
checkInfo := func(c *check.C) (interface{}, check.CommentInterface) {
|
| 1128 |
- info, err = d.info() |
|
| 1128 |
+ info, err = d.SwarmInfo() |
|
| 1129 | 1129 |
return err, check.Commentf("cluster not ready in time")
|
| 1130 | 1130 |
} |
| 1131 | 1131 |
waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil) |
| ... | ... |
@@ -1138,12 +1139,12 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount |
| 1138 | 1138 |
totalMCount++ |
| 1139 | 1139 |
var mCount, wCount int |
| 1140 | 1140 |
|
| 1141 |
- for _, n := range d.listNodes(c) {
|
|
| 1141 |
+ for _, n := range d.ListNodes(c) {
|
|
| 1142 | 1142 |
waitReady := func(c *check.C) (interface{}, check.CommentInterface) {
|
| 1143 | 1143 |
if n.Status.State == swarm.NodeStateReady {
|
| 1144 | 1144 |
return true, nil |
| 1145 | 1145 |
} |
| 1146 |
- nn := d.getNode(c, n.ID) |
|
| 1146 |
+ nn := d.GetNode(c, n.ID) |
|
| 1147 | 1147 |
n = *nn |
| 1148 | 1148 |
return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID)
|
| 1149 | 1149 |
} |
| ... | ... |
@@ -1153,7 +1154,7 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount |
| 1153 | 1153 |
if n.Spec.Availability == swarm.NodeAvailabilityActive {
|
| 1154 | 1154 |
return true, nil |
| 1155 | 1155 |
} |
| 1156 |
- nn := d.getNode(c, n.ID) |
|
| 1156 |
+ nn := d.GetNode(c, n.ID) |
|
| 1157 | 1157 |
n = *nn |
| 1158 | 1158 |
return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID)
|
| 1159 | 1159 |
} |
| ... | ... |
@@ -1181,10 +1182,10 @@ func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount |
| 1181 | 1181 |
func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
|
| 1182 | 1182 |
mCount, wCount := 5, 1 |
| 1183 | 1183 |
|
| 1184 |
- var nodes []*SwarmDaemon |
|
| 1184 |
+ var nodes []*daemon.Swarm |
|
| 1185 | 1185 |
for i := 0; i < mCount; i++ {
|
| 1186 | 1186 |
manager := s.AddDaemon(c, true, true) |
| 1187 |
- info, err := manager.info() |
|
| 1187 |
+ info, err := manager.SwarmInfo() |
|
| 1188 | 1188 |
c.Assert(err, checker.IsNil) |
| 1189 | 1189 |
c.Assert(info.ControlAvailable, checker.True) |
| 1190 | 1190 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| ... | ... |
@@ -1193,7 +1194,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
|
| 1193 | 1193 |
|
| 1194 | 1194 |
for i := 0; i < wCount; i++ {
|
| 1195 | 1195 |
worker := s.AddDaemon(c, true, false) |
| 1196 |
- info, err := worker.info() |
|
| 1196 |
+ info, err := worker.SwarmInfo() |
|
| 1197 | 1197 |
c.Assert(err, checker.IsNil) |
| 1198 | 1198 |
c.Assert(info.ControlAvailable, checker.False) |
| 1199 | 1199 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| ... | ... |
@@ -1207,13 +1208,14 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
|
| 1207 | 1207 |
errs := make(chan error, len(nodes)) |
| 1208 | 1208 |
|
| 1209 | 1209 |
for _, d := range nodes {
|
| 1210 |
- go func(daemon *SwarmDaemon) {
|
|
| 1210 |
+ go func(daemon *daemon.Swarm) {
|
|
| 1211 | 1211 |
defer wg.Done() |
| 1212 | 1212 |
if err := daemon.Stop(); err != nil {
|
| 1213 | 1213 |
errs <- err |
| 1214 | 1214 |
} |
| 1215 |
+ // FIXME(vdemeester) This is duplicated… |
|
| 1215 | 1216 |
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
|
| 1216 |
- daemon.root = filepath.Dir(daemon.root) |
|
| 1217 |
+ daemon.Root = filepath.Dir(daemon.Root) |
|
| 1217 | 1218 |
} |
| 1218 | 1219 |
}(d) |
| 1219 | 1220 |
} |
| ... | ... |
@@ -1231,7 +1233,7 @@ func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
|
| 1231 | 1231 |
errs := make(chan error, len(nodes)) |
| 1232 | 1232 |
|
| 1233 | 1233 |
for _, d := range nodes {
|
| 1234 |
- go func(daemon *SwarmDaemon) {
|
|
| 1234 |
+ go func(daemon *daemon.Swarm) {
|
|
| 1235 | 1235 |
defer wg.Done() |
| 1236 | 1236 |
if err := daemon.Start("--iptables=false"); err != nil {
|
| 1237 | 1237 |
errs <- err |
| ... | ... |
@@ -1252,10 +1254,10 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
|
| 1252 | 1252 |
d := s.AddDaemon(c, true, true) |
| 1253 | 1253 |
|
| 1254 | 1254 |
instances := 2 |
| 1255 |
- id := d.createService(c, simpleTestService, setInstances(instances)) |
|
| 1256 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) |
|
| 1255 |
+ id := d.CreateService(c, simpleTestService, setInstances(instances)) |
|
| 1256 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) |
|
| 1257 | 1257 |
|
| 1258 |
- service := d.getService(c, id) |
|
| 1258 |
+ service := d.GetService(c, id) |
|
| 1259 | 1259 |
instances = 5 |
| 1260 | 1260 |
|
| 1261 | 1261 |
setInstances(instances)(service) |
| ... | ... |
@@ -1263,13 +1265,13 @@ func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
|
| 1263 | 1263 |
status, out, err := d.SockRequest("POST", url, service.Spec)
|
| 1264 | 1264 |
c.Assert(err, checker.IsNil) |
| 1265 | 1265 |
c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
|
| 1266 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) |
|
| 1266 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances) |
|
| 1267 | 1267 |
} |
| 1268 | 1268 |
|
| 1269 | 1269 |
func (s *DockerSwarmSuite) TestAPISwarmSecretsEmptyList(c *check.C) {
|
| 1270 | 1270 |
d := s.AddDaemon(c, true, true) |
| 1271 | 1271 |
|
| 1272 |
- secrets := d.listSecrets(c) |
|
| 1272 |
+ secrets := d.ListSecrets(c) |
|
| 1273 | 1273 |
c.Assert(secrets, checker.NotNil) |
| 1274 | 1274 |
c.Assert(len(secrets), checker.Equals, 0, check.Commentf("secrets: %#v", secrets))
|
| 1275 | 1275 |
} |
| ... | ... |
@@ -1278,7 +1280,7 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) {
|
| 1278 | 1278 |
d := s.AddDaemon(c, true, true) |
| 1279 | 1279 |
|
| 1280 | 1280 |
testName := "test_secret" |
| 1281 |
- id := d.createSecret(c, swarm.SecretSpec{
|
|
| 1281 |
+ id := d.CreateSecret(c, swarm.SecretSpec{
|
|
| 1282 | 1282 |
swarm.Annotations{
|
| 1283 | 1283 |
Name: testName, |
| 1284 | 1284 |
}, |
| ... | ... |
@@ -1286,7 +1288,7 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsCreate(c *check.C) {
|
| 1286 | 1286 |
}) |
| 1287 | 1287 |
c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
|
| 1288 | 1288 |
|
| 1289 |
- secrets := d.listSecrets(c) |
|
| 1289 |
+ secrets := d.ListSecrets(c) |
|
| 1290 | 1290 |
c.Assert(len(secrets), checker.Equals, 1, check.Commentf("secrets: %#v", secrets))
|
| 1291 | 1291 |
name := secrets[0].Spec.Annotations.Name |
| 1292 | 1292 |
c.Assert(name, checker.Equals, testName, check.Commentf("secret: %s", name))
|
| ... | ... |
@@ -1296,7 +1298,7 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) {
|
| 1296 | 1296 |
d := s.AddDaemon(c, true, true) |
| 1297 | 1297 |
|
| 1298 | 1298 |
testName := "test_secret" |
| 1299 |
- id := d.createSecret(c, swarm.SecretSpec{
|
|
| 1299 |
+ id := d.CreateSecret(c, swarm.SecretSpec{
|
|
| 1300 | 1300 |
swarm.Annotations{
|
| 1301 | 1301 |
Name: testName, |
| 1302 | 1302 |
}, |
| ... | ... |
@@ -1304,10 +1306,10 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) {
|
| 1304 | 1304 |
}) |
| 1305 | 1305 |
c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
|
| 1306 | 1306 |
|
| 1307 |
- secret := d.getSecret(c, id) |
|
| 1307 |
+ secret := d.GetSecret(c, id) |
|
| 1308 | 1308 |
c.Assert(secret.ID, checker.Equals, id, check.Commentf("secret: %v", secret))
|
| 1309 | 1309 |
|
| 1310 |
- d.deleteSecret(c, secret.ID) |
|
| 1310 |
+ d.DeleteSecret(c, secret.ID) |
|
| 1311 | 1311 |
status, out, err := d.SockRequest("GET", "/secrets/"+id, nil)
|
| 1312 | 1312 |
c.Assert(err, checker.IsNil) |
| 1313 | 1313 |
c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf("secret delete: %s", string(out)))
|
| ... | ... |
@@ -9,6 +9,7 @@ import ( |
| 9 | 9 |
"strings" |
| 10 | 10 |
|
| 11 | 11 |
"github.com/docker/docker/api" |
| 12 |
+ "github.com/docker/docker/pkg/integration" |
|
| 12 | 13 |
"github.com/docker/docker/pkg/integration/checker" |
| 13 | 14 |
icmd "github.com/docker/docker/pkg/integration/cmd" |
| 14 | 15 |
"github.com/go-check/check" |
| ... | ... |
@@ -78,7 +79,7 @@ func (s *DockerSuite) TestAPIErrorJSON(c *check.C) {
|
| 78 | 78 |
c.Assert(err, checker.IsNil) |
| 79 | 79 |
c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) |
| 80 | 80 |
c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json")
|
| 81 |
- b, err := readBody(body) |
|
| 81 |
+ b, err := integration.ReadBody(body) |
|
| 82 | 82 |
c.Assert(err, checker.IsNil) |
| 83 | 83 |
c.Assert(getErrorMessage(c, b), checker.Equals, "Config cannot be empty in order to create a container") |
| 84 | 84 |
} |
| ... | ... |
@@ -91,7 +92,7 @@ func (s *DockerSuite) TestAPIErrorPlainText(c *check.C) {
|
| 91 | 91 |
c.Assert(err, checker.IsNil) |
| 92 | 92 |
c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) |
| 93 | 93 |
c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain")
|
| 94 |
- b, err := readBody(body) |
|
| 94 |
+ b, err := integration.ReadBody(body) |
|
| 95 | 95 |
c.Assert(err, checker.IsNil) |
| 96 | 96 |
c.Assert(strings.TrimSpace(string(b)), checker.Equals, "Config cannot be empty in order to create a container") |
| 97 | 97 |
} |
| ... | ... |
@@ -102,7 +103,7 @@ func (s *DockerSuite) TestAPIErrorNotFoundJSON(c *check.C) {
|
| 102 | 102 |
c.Assert(err, checker.IsNil) |
| 103 | 103 |
c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) |
| 104 | 104 |
c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json")
|
| 105 |
- b, err := readBody(body) |
|
| 105 |
+ b, err := integration.ReadBody(body) |
|
| 106 | 106 |
c.Assert(err, checker.IsNil) |
| 107 | 107 |
c.Assert(getErrorMessage(c, b), checker.Equals, "page not found") |
| 108 | 108 |
} |
| ... | ... |
@@ -112,7 +113,7 @@ func (s *DockerSuite) TestAPIErrorNotFoundPlainText(c *check.C) {
|
| 112 | 112 |
c.Assert(err, checker.IsNil) |
| 113 | 113 |
c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) |
| 114 | 114 |
c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain")
|
| 115 |
- b, err := readBody(body) |
|
| 115 |
+ b, err := integration.ReadBody(body) |
|
| 116 | 116 |
c.Assert(err, checker.IsNil) |
| 117 | 117 |
c.Assert(strings.TrimSpace(string(b)), checker.Equals, "page not found") |
| 118 | 118 |
} |
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"fmt" |
| 7 | 7 |
"strings" |
| 8 | 8 |
|
| 9 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 9 | 10 |
"github.com/docker/docker/pkg/integration/checker" |
| 10 | 11 |
"github.com/go-check/check" |
| 11 | 12 |
) |
| ... | ... |
@@ -26,12 +27,14 @@ func init() {
|
| 26 | 26 |
|
| 27 | 27 |
type DockerAuthzV2Suite struct {
|
| 28 | 28 |
ds *DockerSuite |
| 29 |
- d *Daemon |
|
| 29 |
+ d *daemon.Daemon |
|
| 30 | 30 |
} |
| 31 | 31 |
|
| 32 | 32 |
func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) {
|
| 33 | 33 |
testRequires(c, DaemonIsLinux, Network) |
| 34 |
- s.d = NewDaemon(c) |
|
| 34 |
+ s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 35 |
+ Experimental: experimentalDaemon, |
|
| 36 |
+ }) |
|
| 35 | 37 |
c.Assert(s.d.Start(), check.IsNil) |
| 36 | 38 |
} |
| 37 | 39 |
|
| ... | ... |
@@ -22,6 +22,7 @@ import ( |
| 22 | 22 |
"net/http/httputil" |
| 23 | 23 |
"net/url" |
| 24 | 24 |
|
| 25 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 25 | 26 |
"github.com/docker/docker/pkg/authorization" |
| 26 | 27 |
"github.com/docker/docker/pkg/integration/checker" |
| 27 | 28 |
"github.com/docker/docker/pkg/plugins" |
| ... | ... |
@@ -48,7 +49,7 @@ func init() {
|
| 48 | 48 |
type DockerAuthzSuite struct {
|
| 49 | 49 |
server *httptest.Server |
| 50 | 50 |
ds *DockerSuite |
| 51 |
- d *Daemon |
|
| 51 |
+ d *daemon.Daemon |
|
| 52 | 52 |
ctrl *authorizationController |
| 53 | 53 |
} |
| 54 | 54 |
|
| ... | ... |
@@ -63,7 +64,9 @@ type authorizationController struct {
|
| 63 | 63 |
} |
| 64 | 64 |
|
| 65 | 65 |
func (s *DockerAuthzSuite) SetUpTest(c *check.C) {
|
| 66 |
- s.d = NewDaemon(c) |
|
| 66 |
+ s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 67 |
+ Experimental: experimentalDaemon, |
|
| 68 |
+ }) |
|
| 67 | 69 |
s.ctrl = &authorizationController{}
|
| 68 | 70 |
} |
| 69 | 71 |
|
| ... | ... |
@@ -285,7 +288,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) {
|
| 285 | 285 |
s.ctrl.reqRes.Allow = false |
| 286 | 286 |
s.ctrl.resRes.Msg = unauthorizedMessage |
| 287 | 287 |
|
| 288 |
- daemonURL, err := url.Parse(s.d.sock()) |
|
| 288 |
+ daemonURL, err := url.Parse(s.d.Sock()) |
|
| 289 | 289 |
|
| 290 | 290 |
conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) |
| 291 | 291 |
c.Assert(err, check.IsNil) |
| ... | ... |
@@ -328,7 +331,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) {
|
| 328 | 328 |
|
| 329 | 329 |
startTime := strconv.FormatInt(daemonTime(c).Unix(), 10) |
| 330 | 330 |
// Add another command to to enable event pipelining |
| 331 |
- eventsCmd := exec.Command(dockerBinary, "--host", s.d.sock(), "events", "--since", startTime) |
|
| 331 |
+ eventsCmd := exec.Command(dockerBinary, "--host", s.d.Sock(), "events", "--since", startTime) |
|
| 332 | 332 |
stdout, err := eventsCmd.StdoutPipe() |
| 333 | 333 |
if err != nil {
|
| 334 | 334 |
c.Assert(err, check.IsNil) |
| ... | ... |
@@ -349,7 +352,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) {
|
| 349 | 349 |
out, err := s.d.Cmd("run", "-d", "busybox", "top")
|
| 350 | 350 |
c.Assert(err, check.IsNil, check.Commentf(out)) |
| 351 | 351 |
containerID := strings.TrimSpace(out) |
| 352 |
- c.Assert(s.d.waitRun(containerID), checker.IsNil) |
|
| 352 |
+ c.Assert(s.d.WaitRun(containerID), checker.IsNil) |
|
| 353 | 353 |
|
| 354 | 354 |
events := map[string]chan bool{
|
| 355 | 355 |
"create": make(chan bool, 1), |
| ... | ... |
@@ -451,7 +454,7 @@ func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) {
|
| 451 | 451 |
s.ctrl.resRes.Allow = true |
| 452 | 452 |
c.Assert(s.d.LoadBusybox(), check.IsNil) |
| 453 | 453 |
|
| 454 |
- daemonURL, err := url.Parse(s.d.sock()) |
|
| 454 |
+ daemonURL, err := url.Parse(s.d.Sock()) |
|
| 455 | 455 |
|
| 456 | 456 |
conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) |
| 457 | 457 |
c.Assert(err, check.IsNil) |
| ... | ... |
@@ -636,7 +636,7 @@ func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
|
| 636 | 636 |
// digest verification for the target layer digest. |
| 637 | 637 |
|
| 638 | 638 |
// Remove distribution cache to force a re-pull of the blobs |
| 639 |
- if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil {
|
|
| 639 |
+ if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.StorageDriver(), "distribution")); err != nil {
|
|
| 640 | 640 |
c.Fatalf("error clearing distribution cache: %v", err)
|
| 641 | 641 |
} |
| 642 | 642 |
|
| ... | ... |
@@ -679,7 +679,7 @@ func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
|
| 679 | 679 |
// digest verification for the target layer digest. |
| 680 | 680 |
|
| 681 | 681 |
// Remove distribution cache to force a re-pull of the blobs |
| 682 |
- if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil {
|
|
| 682 |
+ if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.StorageDriver(), "distribution")); err != nil {
|
|
| 683 | 683 |
c.Fatalf("error clearing distribution cache: %v", err)
|
| 684 | 684 |
} |
| 685 | 685 |
|
| ... | ... |
@@ -133,7 +133,7 @@ func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) |
| 133 | 133 |
} |
| 134 | 134 |
}() |
| 135 | 135 |
|
| 136 |
- if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil {
|
|
| 136 |
+ if err := s.d.Interrupt(); err != nil {
|
|
| 137 | 137 |
c.Fatalf("Could not kill daemon: %v", err)
|
| 138 | 138 |
} |
| 139 | 139 |
|
| ... | ... |
@@ -166,12 +166,12 @@ func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) {
|
| 166 | 166 |
} |
| 167 | 167 |
}() |
| 168 | 168 |
|
| 169 |
- if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil {
|
|
| 169 |
+ if err := s.d.Interrupt(); err != nil {
|
|
| 170 | 170 |
c.Fatalf("Could not kill daemon: %v", err)
|
| 171 | 171 |
} |
| 172 | 172 |
|
| 173 | 173 |
for {
|
| 174 |
- if err := syscall.Kill(s.d.cmd.Process.Pid, 0); err == syscall.ESRCH {
|
|
| 174 |
+ if err := syscall.Kill(s.d.Pid(), 0); err == syscall.ESRCH {
|
|
| 175 | 175 |
break |
| 176 | 176 |
} |
| 177 | 177 |
} |
| ... | ... |
@@ -20,6 +20,7 @@ import ( |
| 20 | 20 |
"syscall" |
| 21 | 21 |
"time" |
| 22 | 22 |
|
| 23 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 23 | 24 |
"github.com/docker/docker/pkg/integration/checker" |
| 24 | 25 |
icmd "github.com/docker/docker/pkg/integration/cmd" |
| 25 | 26 |
"github.com/docker/docker/pkg/mount" |
| ... | ... |
@@ -171,7 +172,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) {
|
| 171 | 171 |
c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out))
|
| 172 | 172 |
|
| 173 | 173 |
// wait test1 to stop |
| 174 |
- hostArgs := []string{"--host", s.d.sock()}
|
|
| 174 |
+ hostArgs := []string{"--host", s.d.Sock()}
|
|
| 175 | 175 |
err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 10*time.Second, hostArgs...)
|
| 176 | 176 |
c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not"))
|
| 177 | 177 |
|
| ... | ... |
@@ -205,7 +206,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *check.C) {
|
| 205 | 205 |
testRequires(c, Devicemapper) |
| 206 | 206 |
c.Assert(s.d.Start(), check.IsNil) |
| 207 | 207 |
|
| 208 |
- oldBasesizeBytes := s.d.getBaseDeviceSize(c) |
|
| 208 |
+ oldBasesizeBytes := s.d.GetBaseDeviceSize(c) |
|
| 209 | 209 |
var newBasesizeBytes int64 = 1073741824 //1GB in bytes |
| 210 | 210 |
|
| 211 | 211 |
if newBasesizeBytes < oldBasesizeBytes {
|
| ... | ... |
@@ -220,7 +221,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) {
|
| 220 | 220 |
testRequires(c, Devicemapper) |
| 221 | 221 |
c.Assert(s.d.Start(), check.IsNil) |
| 222 | 222 |
|
| 223 |
- oldBasesizeBytes := s.d.getBaseDeviceSize(c) |
|
| 223 |
+ oldBasesizeBytes := s.d.GetBaseDeviceSize(c) |
|
| 224 | 224 |
|
| 225 | 225 |
var newBasesizeBytes int64 = 53687091200 //50GB in bytes |
| 226 | 226 |
|
| ... | ... |
@@ -231,7 +232,7 @@ func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) {
|
| 231 | 231 |
err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes))
|
| 232 | 232 |
c.Assert(err, check.IsNil, check.Commentf("we should have been able to start the daemon with increased base device size: %v", err))
|
| 233 | 233 |
|
| 234 |
- basesizeAfterRestart := s.d.getBaseDeviceSize(c) |
|
| 234 |
+ basesizeAfterRestart := s.d.GetBaseDeviceSize(c) |
|
| 235 | 235 |
newBasesize, err := convertBasesize(newBasesizeBytes) |
| 236 | 236 |
c.Assert(err, check.IsNil, check.Commentf("Error in converting base device size: %v", err))
|
| 237 | 237 |
c.Assert(newBasesize, check.Equals, basesizeAfterRestart, check.Commentf("Basesize passed is not equal to Basesize set"))
|
| ... | ... |
@@ -466,7 +467,8 @@ func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) {
|
| 466 | 466 |
if err := s.d.Start("--log-level=debug"); err != nil {
|
| 467 | 467 |
c.Fatal(err) |
| 468 | 468 |
} |
| 469 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 469 |
+ content, err := s.d.ReadLogFile() |
|
| 470 |
+ c.Assert(err, checker.IsNil) |
|
| 470 | 471 |
if !strings.Contains(string(content), `level=debug`) {
|
| 471 | 472 |
c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) |
| 472 | 473 |
} |
| ... | ... |
@@ -477,7 +479,8 @@ func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) {
|
| 477 | 477 |
if err := s.d.Start("--log-level=fatal"); err != nil {
|
| 478 | 478 |
c.Fatal(err) |
| 479 | 479 |
} |
| 480 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 480 |
+ content, err := s.d.ReadLogFile() |
|
| 481 |
+ c.Assert(err, checker.IsNil) |
|
| 481 | 482 |
if strings.Contains(string(content), `level=debug`) {
|
| 482 | 483 |
c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) |
| 483 | 484 |
} |
| ... | ... |
@@ -487,7 +490,8 @@ func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) {
|
| 487 | 487 |
if err := s.d.Start("-D"); err != nil {
|
| 488 | 488 |
c.Fatal(err) |
| 489 | 489 |
} |
| 490 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 490 |
+ content, err := s.d.ReadLogFile() |
|
| 491 |
+ c.Assert(err, checker.IsNil) |
|
| 491 | 492 |
if !strings.Contains(string(content), `level=debug`) {
|
| 492 | 493 |
c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) |
| 493 | 494 |
} |
| ... | ... |
@@ -497,7 +501,8 @@ func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) {
|
| 497 | 497 |
if err := s.d.Start("--debug"); err != nil {
|
| 498 | 498 |
c.Fatal(err) |
| 499 | 499 |
} |
| 500 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 500 |
+ content, err := s.d.ReadLogFile() |
|
| 501 |
+ c.Assert(err, checker.IsNil) |
|
| 501 | 502 |
if !strings.Contains(string(content), `level=debug`) {
|
| 502 | 503 |
c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) |
| 503 | 504 |
} |
| ... | ... |
@@ -507,7 +512,8 @@ func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) {
|
| 507 | 507 |
if err := s.d.Start("--debug", "--log-level=fatal"); err != nil {
|
| 508 | 508 |
c.Fatal(err) |
| 509 | 509 |
} |
| 510 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 510 |
+ content, err := s.d.ReadLogFile() |
|
| 511 |
+ c.Assert(err, checker.IsNil) |
|
| 511 | 512 |
if !strings.Contains(string(content), `level=debug`) {
|
| 512 | 513 |
c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) |
| 513 | 514 |
} |
| ... | ... |
@@ -636,7 +642,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) {
|
| 636 | 636 |
_, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top")
|
| 637 | 637 |
c.Assert(err, check.IsNil) |
| 638 | 638 |
|
| 639 |
- containerIP := d.findContainerIP("ExtContainer")
|
|
| 639 |
+ containerIP := d.FindContainerIP("ExtContainer")
|
|
| 640 | 640 |
ip := net.ParseIP(containerIP) |
| 641 | 641 |
c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, |
| 642 | 642 |
check.Commentf("Container IP-Address must be in the same subnet range : %s",
|
| ... | ... |
@@ -731,7 +737,7 @@ func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) {
|
| 731 | 731 |
out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top")
|
| 732 | 732 |
c.Assert(err, check.IsNil) |
| 733 | 733 |
|
| 734 |
- containerIP := d.findContainerIP("test")
|
|
| 734 |
+ containerIP := d.FindContainerIP("test")
|
|
| 735 | 735 |
ip = net.ParseIP(containerIP) |
| 736 | 736 |
c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, |
| 737 | 737 |
check.Commentf("Container IP-Address must be in the same subnet range : %s",
|
| ... | ... |
@@ -1041,8 +1047,8 @@ func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *che |
| 1041 | 1041 |
_, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top")
|
| 1042 | 1042 |
c.Assert(err, check.IsNil) |
| 1043 | 1043 |
|
| 1044 |
- childIP := s.d.findContainerIP("child")
|
|
| 1045 |
- parentIP := s.d.findContainerIP("parent")
|
|
| 1044 |
+ childIP := s.d.FindContainerIP("child")
|
|
| 1045 |
+ parentIP := s.d.FindContainerIP("parent")
|
|
| 1046 | 1046 |
|
| 1047 | 1047 |
sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"}
|
| 1048 | 1048 |
destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"}
|
| ... | ... |
@@ -1140,10 +1146,10 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) {
|
| 1140 | 1140 |
|
| 1141 | 1141 |
out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline")
|
| 1142 | 1142 |
c.Assert(err, check.IsNil, check.Commentf(out)) |
| 1143 |
- id, err := s.d.getIDByName("test")
|
|
| 1143 |
+ id, err := s.d.GetIDByName("test")
|
|
| 1144 | 1144 |
c.Assert(err, check.IsNil) |
| 1145 | 1145 |
|
| 1146 |
- logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") |
|
| 1146 |
+ logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") |
|
| 1147 | 1147 |
|
| 1148 | 1148 |
if _, err := os.Stat(logPath); err != nil {
|
| 1149 | 1149 |
c.Fatal(err) |
| ... | ... |
@@ -1182,10 +1188,10 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) {
|
| 1182 | 1182 |
if err != nil {
|
| 1183 | 1183 |
c.Fatal(out, err) |
| 1184 | 1184 |
} |
| 1185 |
- id, err := s.d.getIDByName("test")
|
|
| 1185 |
+ id, err := s.d.GetIDByName("test")
|
|
| 1186 | 1186 |
c.Assert(err, check.IsNil) |
| 1187 | 1187 |
|
| 1188 |
- logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") |
|
| 1188 |
+ logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") |
|
| 1189 | 1189 |
|
| 1190 | 1190 |
if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) {
|
| 1191 | 1191 |
c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err)
|
| ... | ... |
@@ -1201,10 +1207,10 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) {
|
| 1201 | 1201 |
if err != nil {
|
| 1202 | 1202 |
c.Fatal(out, err) |
| 1203 | 1203 |
} |
| 1204 |
- id, err := s.d.getIDByName("test")
|
|
| 1204 |
+ id, err := s.d.GetIDByName("test")
|
|
| 1205 | 1205 |
c.Assert(err, check.IsNil) |
| 1206 | 1206 |
|
| 1207 |
- logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") |
|
| 1207 |
+ logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") |
|
| 1208 | 1208 |
|
| 1209 | 1209 |
if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) {
|
| 1210 | 1210 |
c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err)
|
| ... | ... |
@@ -1220,10 +1226,10 @@ func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) {
|
| 1220 | 1220 |
if err != nil {
|
| 1221 | 1221 |
c.Fatal(out, err) |
| 1222 | 1222 |
} |
| 1223 |
- id, err := s.d.getIDByName("test")
|
|
| 1223 |
+ id, err := s.d.GetIDByName("test")
|
|
| 1224 | 1224 |
c.Assert(err, check.IsNil) |
| 1225 | 1225 |
|
| 1226 |
- logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") |
|
| 1226 |
+ logPath := filepath.Join(s.d.Root, "containers", id, id+"-json.log") |
|
| 1227 | 1227 |
|
| 1228 | 1228 |
if _, err := os.Stat(logPath); err != nil {
|
| 1229 | 1229 |
c.Fatal(err) |
| ... | ... |
@@ -1340,7 +1346,8 @@ func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) {
|
| 1340 | 1340 |
c.Fatalf("It should not be successful to start daemon with wrong key: %v", err)
|
| 1341 | 1341 |
} |
| 1342 | 1342 |
|
| 1343 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 1343 |
+ content, err := s.d.ReadLogFile() |
|
| 1344 |
+ c.Assert(err, checker.IsNil) |
|
| 1344 | 1345 |
|
| 1345 | 1346 |
if !strings.Contains(string(content), "Public Key ID does not match") {
|
| 1346 | 1347 |
c.Fatal("Missing KeyID message from daemon logs")
|
| ... | ... |
@@ -1496,10 +1503,10 @@ func (s *DockerDaemonSuite) TestHTTPSInfoRogueServerCert(c *check.C) {
|
| 1496 | 1496 |
} |
| 1497 | 1497 |
} |
| 1498 | 1498 |
|
| 1499 |
-func pingContainers(c *check.C, d *Daemon, expectFailure bool) {
|
|
| 1499 |
+func pingContainers(c *check.C, d *daemon.Daemon, expectFailure bool) {
|
|
| 1500 | 1500 |
var dargs []string |
| 1501 | 1501 |
if d != nil {
|
| 1502 |
- dargs = []string{"--host", d.sock()}
|
|
| 1502 |
+ dargs = []string{"--host", d.Sock()}
|
|
| 1503 | 1503 |
} |
| 1504 | 1504 |
|
| 1505 | 1505 |
args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") |
| ... | ... |
@@ -1523,7 +1530,8 @@ func pingContainers(c *check.C, d *Daemon, expectFailure bool) {
|
| 1523 | 1523 |
func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) {
|
| 1524 | 1524 |
c.Assert(s.d.StartWithBusybox(), check.IsNil) |
| 1525 | 1525 |
|
| 1526 |
- socket := filepath.Join(s.d.folder, "docker.sock") |
|
| 1526 |
+ // socket := filepath.Join(s.d.folder, "docker.sock") |
|
| 1527 |
+ socket := s.d.Sock() |
|
| 1527 | 1528 |
|
| 1528 | 1529 |
out, err := s.d.Cmd("run", "--restart=always", "-v", socket+":/sock", "busybox")
|
| 1529 | 1530 |
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
|
| ... | ... |
@@ -1538,12 +1546,12 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *chec |
| 1538 | 1538 |
out, err := s.d.Cmd("run", "-d", "busybox", "top")
|
| 1539 | 1539 |
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
|
| 1540 | 1540 |
id := strings.TrimSpace(out) |
| 1541 |
- c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) |
|
| 1541 |
+ c.Assert(s.d.Signal(os.Kill), check.IsNil) |
|
| 1542 | 1542 |
mountOut, err := ioutil.ReadFile("/proc/self/mountinfo")
|
| 1543 | 1543 |
c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
|
| 1544 | 1544 |
|
| 1545 | 1545 |
// container mounts should exist even after daemon has crashed. |
| 1546 |
- comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
|
|
| 1546 |
+ comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut)
|
|
| 1547 | 1547 |
c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) |
| 1548 | 1548 |
|
| 1549 | 1549 |
// kill the container |
| ... | ... |
@@ -1560,7 +1568,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *chec |
| 1560 | 1560 |
// Now, container mounts should be gone. |
| 1561 | 1561 |
mountOut, err = ioutil.ReadFile("/proc/self/mountinfo")
|
| 1562 | 1562 |
c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
|
| 1563 |
- comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
|
|
| 1563 |
+ comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut)
|
|
| 1564 | 1564 |
c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) |
| 1565 | 1565 |
} |
| 1566 | 1566 |
|
| ... | ... |
@@ -1573,14 +1581,14 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *check.C) {
|
| 1573 | 1573 |
id := strings.TrimSpace(out) |
| 1574 | 1574 |
|
| 1575 | 1575 |
// Send SIGINT and daemon should clean up |
| 1576 |
- c.Assert(s.d.cmd.Process.Signal(os.Interrupt), check.IsNil) |
|
| 1576 |
+ c.Assert(s.d.Signal(os.Interrupt), check.IsNil) |
|
| 1577 | 1577 |
// Wait for the daemon to stop. |
| 1578 |
- c.Assert(<-s.d.wait, checker.IsNil) |
|
| 1578 |
+ c.Assert(<-s.d.Wait, checker.IsNil) |
|
| 1579 | 1579 |
|
| 1580 | 1580 |
mountOut, err := ioutil.ReadFile("/proc/self/mountinfo")
|
| 1581 | 1581 |
c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
|
| 1582 | 1582 |
|
| 1583 |
- comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
|
|
| 1583 |
+ comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut)
|
|
| 1584 | 1584 |
c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) |
| 1585 | 1585 |
} |
| 1586 | 1586 |
|
| ... | ... |
@@ -1813,18 +1821,20 @@ func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) {
|
| 1813 | 1813 |
} |
| 1814 | 1814 |
} |
| 1815 | 1815 |
|
| 1816 |
+// FIXME(vdemeester) Use a new daemon instance instead of the Suite one |
|
| 1816 | 1817 |
func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) {
|
| 1817 |
- s.d.useDefaultHost = true |
|
| 1818 |
+ s.d.UseDefaultHost = true |
|
| 1818 | 1819 |
defer func() {
|
| 1819 |
- s.d.useDefaultHost = false |
|
| 1820 |
+ s.d.UseDefaultHost = false |
|
| 1820 | 1821 |
}() |
| 1821 | 1822 |
c.Assert(s.d.Start(), check.IsNil) |
| 1822 | 1823 |
} |
| 1823 | 1824 |
|
| 1825 |
+// FIXME(vdemeester) Use a new daemon instance instead of the Suite one |
|
| 1824 | 1826 |
func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTLSHost(c *check.C) {
|
| 1825 |
- s.d.useDefaultTLSHost = true |
|
| 1827 |
+ s.d.UseDefaultTLSHost = true |
|
| 1826 | 1828 |
defer func() {
|
| 1827 |
- s.d.useDefaultTLSHost = false |
|
| 1829 |
+ s.d.UseDefaultTLSHost = false |
|
| 1828 | 1830 |
}() |
| 1829 | 1831 |
if err := s.d.Start( |
| 1830 | 1832 |
"--tlsverify", |
| ... | ... |
@@ -2144,12 +2154,12 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) {
|
| 2144 | 2144 |
c.Assert(err, check.IsNil, check.Commentf("Output: %s", out))
|
| 2145 | 2145 |
id := strings.TrimSpace(out) |
| 2146 | 2146 |
|
| 2147 |
- c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) |
|
| 2147 |
+ c.Assert(s.d.Signal(os.Kill), check.IsNil) |
|
| 2148 | 2148 |
mountOut, err := ioutil.ReadFile("/proc/self/mountinfo")
|
| 2149 | 2149 |
c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
|
| 2150 | 2150 |
|
| 2151 | 2151 |
// container mounts should exist even after daemon has crashed. |
| 2152 |
- comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
|
|
| 2152 |
+ comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut)
|
|
| 2153 | 2153 |
c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) |
| 2154 | 2154 |
|
| 2155 | 2155 |
// restart daemon. |
| ... | ... |
@@ -2172,7 +2182,7 @@ func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) {
|
| 2172 | 2172 |
// Now, container mounts should be gone. |
| 2173 | 2173 |
mountOut, err = ioutil.ReadFile("/proc/self/mountinfo")
|
| 2174 | 2174 |
c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut))
|
| 2175 |
- comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut)
|
|
| 2175 |
+ comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.Root, mountOut)
|
|
| 2176 | 2176 |
c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) |
| 2177 | 2177 |
} |
| 2178 | 2178 |
|
| ... | ... |
@@ -2350,7 +2360,7 @@ func (s *DockerDaemonSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) {
|
| 2350 | 2350 |
_, err = configFile.Write([]byte(daemonConfig)) |
| 2351 | 2351 |
c.Assert(err, checker.IsNil) |
| 2352 | 2352 |
|
| 2353 |
- err = s.d.reloadConfig() |
|
| 2353 |
+ err = s.d.ReloadConfig() |
|
| 2354 | 2354 |
c.Assert(err, checker.IsNil, check.Commentf("error reloading daemon config"))
|
| 2355 | 2355 |
|
| 2356 | 2356 |
out, err := s.d.Cmd("info")
|
| ... | ... |
@@ -2380,7 +2390,8 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrency(c *check.C) {
|
| 2380 | 2380 |
|
| 2381 | 2381 |
expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 6"` |
| 2382 | 2382 |
expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` |
| 2383 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 2383 |
+ content, err := s.d.ReadLogFile() |
|
| 2384 |
+ c.Assert(err, checker.IsNil) |
|
| 2384 | 2385 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) |
| 2385 | 2386 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) |
| 2386 | 2387 |
} |
| ... | ... |
@@ -2402,7 +2413,8 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) {
|
| 2402 | 2402 |
|
| 2403 | 2403 |
expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` |
| 2404 | 2404 |
expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` |
| 2405 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 2405 |
+ content, err := s.d.ReadLogFile() |
|
| 2406 |
+ c.Assert(err, checker.IsNil) |
|
| 2406 | 2407 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) |
| 2407 | 2408 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) |
| 2408 | 2409 |
|
| ... | ... |
@@ -2412,13 +2424,15 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) {
|
| 2412 | 2412 |
fmt.Fprintf(configFile, "%s", daemonConfig) |
| 2413 | 2413 |
configFile.Close() |
| 2414 | 2414 |
|
| 2415 |
- syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) |
|
| 2415 |
+ c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) |
|
| 2416 |
+ // syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) |
|
| 2416 | 2417 |
|
| 2417 | 2418 |
time.Sleep(3 * time.Second) |
| 2418 | 2419 |
|
| 2419 | 2420 |
expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 7"` |
| 2420 | 2421 |
expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 9"` |
| 2421 |
- content, _ = ioutil.ReadFile(s.d.logFile.Name()) |
|
| 2422 |
+ content, err = s.d.ReadLogFile() |
|
| 2423 |
+ c.Assert(err, checker.IsNil) |
|
| 2422 | 2424 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) |
| 2423 | 2425 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) |
| 2424 | 2426 |
} |
| ... | ... |
@@ -2440,7 +2454,8 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec |
| 2440 | 2440 |
|
| 2441 | 2441 |
expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` |
| 2442 | 2442 |
expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 3"` |
| 2443 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 2443 |
+ content, err := s.d.ReadLogFile() |
|
| 2444 |
+ c.Assert(err, checker.IsNil) |
|
| 2444 | 2445 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) |
| 2445 | 2446 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) |
| 2446 | 2447 |
|
| ... | ... |
@@ -2450,13 +2465,15 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec |
| 2450 | 2450 |
fmt.Fprintf(configFile, "%s", daemonConfig) |
| 2451 | 2451 |
configFile.Close() |
| 2452 | 2452 |
|
| 2453 |
- syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) |
|
| 2453 |
+ c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) |
|
| 2454 |
+ // syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) |
|
| 2454 | 2455 |
|
| 2455 | 2456 |
time.Sleep(3 * time.Second) |
| 2456 | 2457 |
|
| 2457 | 2458 |
expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 1"` |
| 2458 | 2459 |
expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` |
| 2459 |
- content, _ = ioutil.ReadFile(s.d.logFile.Name()) |
|
| 2460 |
+ content, err = s.d.ReadLogFile() |
|
| 2461 |
+ c.Assert(err, checker.IsNil) |
|
| 2460 | 2462 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) |
| 2461 | 2463 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) |
| 2462 | 2464 |
|
| ... | ... |
@@ -2466,13 +2483,14 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec |
| 2466 | 2466 |
fmt.Fprintf(configFile, "%s", daemonConfig) |
| 2467 | 2467 |
configFile.Close() |
| 2468 | 2468 |
|
| 2469 |
- syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) |
|
| 2469 |
+ c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) |
|
| 2470 | 2470 |
|
| 2471 | 2471 |
time.Sleep(3 * time.Second) |
| 2472 | 2472 |
|
| 2473 | 2473 |
expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 5"` |
| 2474 | 2474 |
expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` |
| 2475 |
- content, _ = ioutil.ReadFile(s.d.logFile.Name()) |
|
| 2475 |
+ content, err = s.d.ReadLogFile() |
|
| 2476 |
+ c.Assert(err, checker.IsNil) |
|
| 2476 | 2477 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) |
| 2477 | 2478 |
c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) |
| 2478 | 2479 |
} |
| ... | ... |
@@ -2480,8 +2498,9 @@ func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *chec |
| 2480 | 2480 |
func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) {
|
| 2481 | 2481 |
err := s.d.StartWithBusybox("-b=none", "--iptables=false")
|
| 2482 | 2482 |
c.Assert(err, check.IsNil) |
| 2483 |
- s.d.c.Logf("dockerBinary %s", dockerBinary)
|
|
| 2484 |
- out, code, err := s.d.buildImageWithOut("busyboxs",
|
|
| 2483 |
+ // s.d.c.Logf("dockerBinary %s", dockerBinary)
|
|
| 2484 |
+ c.Logf("dockerBinary %s", dockerBinary)
|
|
| 2485 |
+ out, code, err := s.d.BuildImageWithOut("busyboxs",
|
|
| 2485 | 2486 |
`FROM busybox |
| 2486 | 2487 |
RUN cat /etc/hosts`, false) |
| 2487 | 2488 |
comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err)
|
| ... | ... |
@@ -2576,7 +2595,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) {
|
| 2576 | 2576 |
} |
| 2577 | 2577 |
` |
| 2578 | 2578 |
ioutil.WriteFile(configName, []byte(config), 0644) |
| 2579 |
- syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) |
|
| 2579 |
+ c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) |
|
| 2580 | 2580 |
// Give daemon time to reload config |
| 2581 | 2581 |
<-time.After(1 * time.Second) |
| 2582 | 2582 |
|
| ... | ... |
@@ -2605,11 +2624,12 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) {
|
| 2605 | 2605 |
} |
| 2606 | 2606 |
` |
| 2607 | 2607 |
ioutil.WriteFile(configName, []byte(config), 0644) |
| 2608 |
- syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) |
|
| 2608 |
+ c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) |
|
| 2609 | 2609 |
// Give daemon time to reload config |
| 2610 | 2610 |
<-time.After(1 * time.Second) |
| 2611 | 2611 |
|
| 2612 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 2612 |
+ content, err := s.d.ReadLogFile() |
|
| 2613 |
+ c.Assert(err, checker.IsNil) |
|
| 2613 | 2614 |
c.Assert(string(content), checker.Contains, `file configuration validation failed (runtime name 'runc' is reserved)`) |
| 2614 | 2615 |
|
| 2615 | 2616 |
// Check that we can select a default runtime |
| ... | ... |
@@ -2630,7 +2650,7 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) {
|
| 2630 | 2630 |
} |
| 2631 | 2631 |
` |
| 2632 | 2632 |
ioutil.WriteFile(configName, []byte(config), 0644) |
| 2633 |
- syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) |
|
| 2633 |
+ c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) |
|
| 2634 | 2634 |
// Give daemon time to reload config |
| 2635 | 2635 |
<-time.After(1 * time.Second) |
| 2636 | 2636 |
|
| ... | ... |
@@ -2688,7 +2708,8 @@ func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) {
|
| 2688 | 2688 |
err = s.d.Start("--add-runtime", "runc=my-runc")
|
| 2689 | 2689 |
c.Assert(err, check.NotNil) |
| 2690 | 2690 |
|
| 2691 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 2691 |
+ content, err := s.d.ReadLogFile() |
|
| 2692 |
+ c.Assert(err, checker.IsNil) |
|
| 2692 | 2693 |
c.Assert(string(content), checker.Contains, `runtime name 'runc' is reserved`) |
| 2693 | 2694 |
|
| 2694 | 2695 |
// Check that we can select a default runtime |
| ... | ... |
@@ -2778,18 +2799,18 @@ func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) {
|
| 2778 | 2778 |
out, err = d.Cmd("inspect", "--type=image", "--format={{.ID}}", "busybox:latest")
|
| 2779 | 2779 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 2780 | 2780 |
c.Assert(d.Stop(), checker.IsNil) |
| 2781 |
- <-d.wait |
|
| 2781 |
+ <-d.Wait |
|
| 2782 | 2782 |
|
| 2783 | 2783 |
imageID := strings.TrimSpace(out) |
| 2784 | 2784 |
volumeID := stringid.GenerateNonCryptoID() |
| 2785 |
- vfsPath := filepath.Join(d.root, "vfs", "dir", volumeID) |
|
| 2785 |
+ vfsPath := filepath.Join(d.Root, "vfs", "dir", volumeID) |
|
| 2786 | 2786 |
c.Assert(os.MkdirAll(vfsPath, 0755), checker.IsNil) |
| 2787 | 2787 |
|
| 2788 | 2788 |
config := []byte(` |
| 2789 | 2789 |
{
|
| 2790 | 2790 |
"ID": "` + id + `", |
| 2791 | 2791 |
"Name": "hello", |
| 2792 |
- "Driver": "` + d.storageDriver + `", |
|
| 2792 |
+ "Driver": "` + d.StorageDriver() + `", |
|
| 2793 | 2793 |
"Image": "` + imageID + `", |
| 2794 | 2794 |
"Config": {"Image": "busybox:latest"},
|
| 2795 | 2795 |
"NetworkSettings": {},
|
| ... | ... |
@@ -2806,7 +2827,7 @@ func (s *DockerDaemonSuite) TestDaemonBackcompatPre17Volumes(c *check.C) {
|
| 2806 | 2806 |
} |
| 2807 | 2807 |
`) |
| 2808 | 2808 |
|
| 2809 |
- configPath := filepath.Join(d.root, "containers", id, "config.v2.json") |
|
| 2809 |
+ configPath := filepath.Join(d.Root, "containers", id, "config.v2.json") |
|
| 2810 | 2810 |
err = ioutil.WriteFile(configPath, config, 600) |
| 2811 | 2811 |
err = d.Start() |
| 2812 | 2812 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -2883,15 +2904,16 @@ func (s *DockerDaemonSuite) TestDaemonShutdownTimeout(c *check.C) {
|
| 2883 | 2883 |
_, err := s.d.Cmd("run", "-d", "busybox", "top")
|
| 2884 | 2884 |
c.Assert(err, check.IsNil) |
| 2885 | 2885 |
|
| 2886 |
- syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGINT) |
|
| 2886 |
+ c.Assert(s.d.Signal(syscall.SIGINT), checker.IsNil) |
|
| 2887 | 2887 |
|
| 2888 | 2888 |
select {
|
| 2889 |
- case <-s.d.wait: |
|
| 2889 |
+ case <-s.d.Wait: |
|
| 2890 | 2890 |
case <-time.After(5 * time.Second): |
| 2891 | 2891 |
} |
| 2892 | 2892 |
|
| 2893 | 2893 |
expectedMessage := `level=debug msg="start clean shutdown of all containers with a 3 seconds timeout..."` |
| 2894 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 2894 |
+ content, err := s.d.ReadLogFile() |
|
| 2895 |
+ c.Assert(err, checker.IsNil) |
|
| 2895 | 2896 |
c.Assert(string(content), checker.Contains, expectedMessage) |
| 2896 | 2897 |
} |
| 2897 | 2898 |
|
| ... | ... |
@@ -2916,14 +2938,15 @@ func (s *DockerDaemonSuite) TestDaemonShutdownTimeoutWithConfigFile(c *check.C) |
| 2916 | 2916 |
fmt.Fprintf(configFile, "%s", daemonConfig) |
| 2917 | 2917 |
configFile.Close() |
| 2918 | 2918 |
|
| 2919 |
- syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) |
|
| 2919 |
+ c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) |
|
| 2920 | 2920 |
|
| 2921 | 2921 |
select {
|
| 2922 |
- case <-s.d.wait: |
|
| 2922 |
+ case <-s.d.Wait: |
|
| 2923 | 2923 |
case <-time.After(3 * time.Second): |
| 2924 | 2924 |
} |
| 2925 | 2925 |
|
| 2926 | 2926 |
expectedMessage := `level=debug msg="Reset Shutdown Timeout: 5"` |
| 2927 |
- content, _ := ioutil.ReadFile(s.d.logFile.Name()) |
|
| 2927 |
+ content, err := s.d.ReadLogFile() |
|
| 2928 |
+ c.Assert(err, checker.IsNil) |
|
| 2928 | 2929 |
c.Assert(string(content), checker.Contains, expectedMessage) |
| 2929 | 2930 |
} |
| ... | ... |
@@ -422,7 +422,7 @@ func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) {
|
| 422 | 422 |
fmt.Fprintf(configFile, "%s", daemonConfig) |
| 423 | 423 |
configFile.Close() |
| 424 | 424 |
|
| 425 |
- syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) |
|
| 425 |
+ c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) |
|
| 426 | 426 |
|
| 427 | 427 |
time.Sleep(3 * time.Second) |
| 428 | 428 |
|
| ... | ... |
@@ -460,7 +460,7 @@ func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) {
|
| 460 | 460 |
} |
| 461 | 461 |
c.Assert(daemonID, checker.Not(checker.Equals), "") |
| 462 | 462 |
|
| 463 |
- syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) |
|
| 463 |
+ c.Assert(s.d.Signal(syscall.SIGHUP), checker.IsNil) |
|
| 464 | 464 |
|
| 465 | 465 |
time.Sleep(3 * time.Second) |
| 466 | 466 |
|
| ... | ... |
@@ -14,6 +14,7 @@ import ( |
| 14 | 14 |
|
| 15 | 15 |
"github.com/docker/docker/daemon/graphdriver" |
| 16 | 16 |
"github.com/docker/docker/daemon/graphdriver/vfs" |
| 17 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 17 | 18 |
"github.com/docker/docker/pkg/archive" |
| 18 | 19 |
"github.com/docker/docker/pkg/plugins" |
| 19 | 20 |
"github.com/go-check/check" |
| ... | ... |
@@ -29,7 +30,7 @@ type DockerExternalGraphdriverSuite struct {
|
| 29 | 29 |
server *httptest.Server |
| 30 | 30 |
jserver *httptest.Server |
| 31 | 31 |
ds *DockerSuite |
| 32 |
- d *Daemon |
|
| 32 |
+ d *daemon.Daemon |
|
| 33 | 33 |
ec map[string]*graphEventsCounter |
| 34 | 34 |
} |
| 35 | 35 |
|
| ... | ... |
@@ -51,7 +52,9 @@ type graphEventsCounter struct {
|
| 51 | 51 |
} |
| 52 | 52 |
|
| 53 | 53 |
func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) {
|
| 54 |
- s.d = NewDaemon(c) |
|
| 54 |
+ s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 55 |
+ Experimental: experimentalDaemon, |
|
| 56 |
+ }) |
|
| 55 | 57 |
} |
| 56 | 58 |
|
| 57 | 59 |
func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) {
|
| ... | ... |
@@ -16,6 +16,7 @@ import ( |
| 16 | 16 |
"time" |
| 17 | 17 |
|
| 18 | 18 |
"github.com/docker/docker/api/types" |
| 19 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 19 | 20 |
"github.com/docker/docker/pkg/integration/checker" |
| 20 | 21 |
"github.com/docker/docker/pkg/stringid" |
| 21 | 22 |
"github.com/docker/docker/volume" |
| ... | ... |
@@ -44,12 +45,14 @@ type eventCounter struct {
|
| 44 | 44 |
|
| 45 | 45 |
type DockerExternalVolumeSuite struct {
|
| 46 | 46 |
ds *DockerSuite |
| 47 |
- d *Daemon |
|
| 47 |
+ d *daemon.Daemon |
|
| 48 | 48 |
*volumePlugin |
| 49 | 49 |
} |
| 50 | 50 |
|
| 51 | 51 |
func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) {
|
| 52 |
- s.d = NewDaemon(c) |
|
| 52 |
+ s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 53 |
+ Experimental: experimentalDaemon, |
|
| 54 |
+ }) |
|
| 53 | 55 |
s.ec = &eventCounter{}
|
| 54 | 56 |
} |
| 55 | 57 |
|
| ... | ... |
@@ -6,6 +6,7 @@ import ( |
| 6 | 6 |
"net" |
| 7 | 7 |
"strings" |
| 8 | 8 |
|
| 9 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 9 | 10 |
"github.com/docker/docker/pkg/integration/checker" |
| 10 | 11 |
"github.com/go-check/check" |
| 11 | 12 |
) |
| ... | ... |
@@ -70,7 +71,9 @@ func (s *DockerSuite) TestInfoFormat(c *check.C) {
|
| 70 | 70 |
func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) {
|
| 71 | 71 |
testRequires(c, SameHostDaemon, DaemonIsLinux) |
| 72 | 72 |
|
| 73 |
- d := NewDaemon(c) |
|
| 73 |
+ d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 74 |
+ Experimental: experimentalDaemon, |
|
| 75 |
+ }) |
|
| 74 | 76 |
discoveryBackend := "consul://consuladdr:consulport/some/path" |
| 75 | 77 |
discoveryAdvertise := "1.1.1.1:2375" |
| 76 | 78 |
err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise))
|
| ... | ... |
@@ -88,7 +91,9 @@ func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) {
|
| 88 | 88 |
func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) {
|
| 89 | 89 |
testRequires(c, SameHostDaemon, DaemonIsLinux) |
| 90 | 90 |
|
| 91 |
- d := NewDaemon(c) |
|
| 91 |
+ d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 92 |
+ Experimental: experimentalDaemon, |
|
| 93 |
+ }) |
|
| 92 | 94 |
discoveryBackend := "consul://consuladdr:consulport/some/path" |
| 93 | 95 |
|
| 94 | 96 |
// --cluster-advertise with an invalid string is an error |
| ... | ... |
@@ -105,7 +110,9 @@ func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) {
|
| 105 | 105 |
func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) {
|
| 106 | 106 |
testRequires(c, SameHostDaemon, Network, DaemonIsLinux) |
| 107 | 107 |
|
| 108 |
- d := NewDaemon(c) |
|
| 108 |
+ d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 109 |
+ Experimental: experimentalDaemon, |
|
| 110 |
+ }) |
|
| 109 | 111 |
discoveryBackend := "consul://consuladdr:consulport/some/path" |
| 110 | 112 |
discoveryAdvertise := "eth0" |
| 111 | 113 |
|
| ... | ... |
@@ -171,7 +178,9 @@ func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) {
|
| 171 | 171 |
func (s *DockerSuite) TestInfoDebug(c *check.C) {
|
| 172 | 172 |
testRequires(c, SameHostDaemon, DaemonIsLinux) |
| 173 | 173 |
|
| 174 |
- d := NewDaemon(c) |
|
| 174 |
+ d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 175 |
+ Experimental: experimentalDaemon, |
|
| 176 |
+ }) |
|
| 175 | 177 |
err := d.Start("--debug")
|
| 176 | 178 |
c.Assert(err, checker.IsNil) |
| 177 | 179 |
defer d.Stop() |
| ... | ... |
@@ -193,7 +202,9 @@ func (s *DockerSuite) TestInsecureRegistries(c *check.C) {
|
| 193 | 193 |
registryCIDR := "192.168.1.0/24" |
| 194 | 194 |
registryHost := "insecurehost.com:5000" |
| 195 | 195 |
|
| 196 |
- d := NewDaemon(c) |
|
| 196 |
+ d := daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 197 |
+ Experimental: experimentalDaemon, |
|
| 198 |
+ }) |
|
| 197 | 199 |
err := d.Start("--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost)
|
| 198 | 200 |
c.Assert(err, checker.IsNil) |
| 199 | 201 |
defer d.Stop() |
| ... | ... |
@@ -16,6 +16,7 @@ import ( |
| 16 | 16 |
|
| 17 | 17 |
"github.com/docker/docker/api/types" |
| 18 | 18 |
"github.com/docker/docker/api/types/versions/v1p20" |
| 19 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 19 | 20 |
"github.com/docker/docker/pkg/integration/checker" |
| 20 | 21 |
icmd "github.com/docker/docker/pkg/integration/cmd" |
| 21 | 22 |
"github.com/docker/docker/pkg/stringid" |
| ... | ... |
@@ -43,11 +44,13 @@ func init() {
|
| 43 | 43 |
type DockerNetworkSuite struct {
|
| 44 | 44 |
server *httptest.Server |
| 45 | 45 |
ds *DockerSuite |
| 46 |
- d *Daemon |
|
| 46 |
+ d *daemon.Daemon |
|
| 47 | 47 |
} |
| 48 | 48 |
|
| 49 | 49 |
func (s *DockerNetworkSuite) SetUpTest(c *check.C) {
|
| 50 |
- s.d = NewDaemon(c) |
|
| 50 |
+ s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 51 |
+ Experimental: experimentalDaemon, |
|
| 52 |
+ }) |
|
| 51 | 53 |
} |
| 52 | 54 |
|
| 53 | 55 |
func (s *DockerNetworkSuite) TearDownTest(c *check.C) {
|
| ... | ... |
@@ -994,9 +997,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C |
| 994 | 994 |
c.Assert(err, checker.IsNil) |
| 995 | 995 |
|
| 996 | 996 |
// Kill daemon and restart |
| 997 |
- if err = s.d.cmd.Process.Kill(); err != nil {
|
|
| 998 |
- c.Fatal(err) |
|
| 999 |
- } |
|
| 997 |
+ c.Assert(s.d.Kill(), checker.IsNil) |
|
| 1000 | 998 |
|
| 1001 | 999 |
server.Close() |
| 1002 | 1000 |
|
| ... | ... |
@@ -1064,7 +1065,7 @@ func (s *DockerSuite) TestInspectAPIMultipleNetworks(c *check.C) {
|
| 1064 | 1064 |
c.Assert(bridge.IPAddress, checker.Equals, inspect121.NetworkSettings.IPAddress) |
| 1065 | 1065 |
} |
| 1066 | 1066 |
|
| 1067 |
-func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []string) {
|
|
| 1067 |
+func connectContainerToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) {
|
|
| 1068 | 1068 |
// Run a container on the default network |
| 1069 | 1069 |
out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top")
|
| 1070 | 1070 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| ... | ... |
@@ -1078,7 +1079,7 @@ func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []strin |
| 1078 | 1078 |
} |
| 1079 | 1079 |
} |
| 1080 | 1080 |
|
| 1081 |
-func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, nws []string) {
|
|
| 1081 |
+func verifyContainerIsConnectedToNetworks(c *check.C, d *daemon.Daemon, cName string, nws []string) {
|
|
| 1082 | 1082 |
// Verify container is connected to all the networks |
| 1083 | 1083 |
for _, nw := range nws {
|
| 1084 | 1084 |
out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName)
|
| ... | ... |
@@ -1115,10 +1116,8 @@ func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRe |
| 1115 | 1115 |
verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) |
| 1116 | 1116 |
|
| 1117 | 1117 |
// Kill daemon and restart |
| 1118 |
- if err := s.d.cmd.Process.Kill(); err != nil {
|
|
| 1119 |
- c.Fatal(err) |
|
| 1120 |
- } |
|
| 1121 |
- s.d.Restart() |
|
| 1118 |
+ c.Assert(s.d.Kill(), checker.IsNil) |
|
| 1119 |
+ c.Assert(s.d.Restart(), checker.IsNil) |
|
| 1122 | 1120 |
|
| 1123 | 1121 |
// Restart container |
| 1124 | 1122 |
_, err := s.d.Cmd("start", cName)
|
| ... | ... |
@@ -1144,21 +1143,17 @@ func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c |
| 1144 | 1144 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 1145 | 1145 |
|
| 1146 | 1146 |
// verfiy container has finished starting before killing daemon |
| 1147 |
- err = s.d.waitRun(cName) |
|
| 1147 |
+ err = s.d.WaitRun(cName) |
|
| 1148 | 1148 |
c.Assert(err, checker.IsNil) |
| 1149 | 1149 |
} |
| 1150 | 1150 |
|
| 1151 | 1151 |
// Kill daemon ungracefully and restart |
| 1152 |
- if err := s.d.cmd.Process.Kill(); err != nil {
|
|
| 1153 |
- c.Fatal(err) |
|
| 1154 |
- } |
|
| 1155 |
- if err := s.d.Restart(); err != nil {
|
|
| 1156 |
- c.Fatal(err) |
|
| 1157 |
- } |
|
| 1152 |
+ c.Assert(s.d.Kill(), checker.IsNil) |
|
| 1153 |
+ c.Assert(s.d.Restart(), checker.IsNil) |
|
| 1158 | 1154 |
|
| 1159 | 1155 |
// make sure all the containers are up and running |
| 1160 | 1156 |
for i := 0; i < 10; i++ {
|
| 1161 |
- err := s.d.waitRun(fmt.Sprintf("hostc-%d", i))
|
|
| 1157 |
+ err := s.d.WaitRun(fmt.Sprintf("hostc-%d", i))
|
|
| 1162 | 1158 |
c.Assert(err, checker.IsNil) |
| 1163 | 1159 |
} |
| 1164 | 1160 |
} |
| ... | ... |
@@ -6,11 +6,12 @@ import ( |
| 6 | 6 |
"strconv" |
| 7 | 7 |
"strings" |
| 8 | 8 |
|
| 9 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 9 | 10 |
"github.com/docker/docker/pkg/integration/checker" |
| 10 | 11 |
"github.com/go-check/check" |
| 11 | 12 |
) |
| 12 | 13 |
|
| 13 |
-func pruneNetworkAndVerify(c *check.C, d *SwarmDaemon, kept, pruned []string) {
|
|
| 14 |
+func pruneNetworkAndVerify(c *check.C, d *daemon.Swarm, kept, pruned []string) {
|
|
| 14 | 15 |
_, err := d.Cmd("network", "prune", "--force")
|
| 15 | 16 |
c.Assert(err, checker.IsNil) |
| 16 | 17 |
out, err := d.Cmd("network", "ls", "--format", "{{.Name}}")
|
| ... | ... |
@@ -46,7 +47,7 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) {
|
| 46 | 46 |
"busybox", "top") |
| 47 | 47 |
c.Assert(err, checker.IsNil) |
| 48 | 48 |
c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") |
| 49 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, replicas+1) |
|
| 49 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, replicas+1) |
|
| 50 | 50 |
|
| 51 | 51 |
// prune and verify |
| 52 | 52 |
pruneNetworkAndVerify(c, d, []string{"n1", "n3"}, []string{"n2", "n4"})
|
| ... | ... |
@@ -56,14 +57,14 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) {
|
| 56 | 56 |
c.Assert(err, checker.IsNil) |
| 57 | 57 |
_, err = d.Cmd("service", "rm", serviceName)
|
| 58 | 58 |
c.Assert(err, checker.IsNil) |
| 59 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) |
|
| 59 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) |
|
| 60 | 60 |
pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"})
|
| 61 | 61 |
} |
| 62 | 62 |
|
| 63 | 63 |
func (s *DockerDaemonSuite) TestPruneImageDangling(c *check.C) {
|
| 64 | 64 |
c.Assert(s.d.StartWithBusybox(), checker.IsNil) |
| 65 | 65 |
|
| 66 |
- out, _, err := s.d.buildImageWithOut("test",
|
|
| 66 |
+ out, _, err := s.d.BuildImageWithOut("test",
|
|
| 67 | 67 |
`FROM busybox |
| 68 | 68 |
LABEL foo=bar`, true, "-q") |
| 69 | 69 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -4439,7 +4439,7 @@ func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) {
|
| 4439 | 4439 |
name := "test-A" |
| 4440 | 4440 |
_, err := s.d.Cmd("run", "--name", name, "-d", "busybox", "top")
|
| 4441 | 4441 |
c.Assert(err, checker.IsNil) |
| 4442 |
- c.Assert(s.d.waitRun(name), check.IsNil) |
|
| 4442 |
+ c.Assert(s.d.WaitRun(name), check.IsNil) |
|
| 4443 | 4443 |
|
| 4444 | 4444 |
out, err := s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
|
| 4445 | 4445 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -4448,7 +4448,7 @@ func (s *DockerDaemonSuite) TestRunWithUlimitAndDaemonDefault(c *check.C) {
|
| 4448 | 4448 |
name = "test-B" |
| 4449 | 4449 |
_, err = s.d.Cmd("run", "--name", name, "--ulimit=nofile=42", "-d", "busybox", "top")
|
| 4450 | 4450 |
c.Assert(err, checker.IsNil) |
| 4451 |
- c.Assert(s.d.waitRun(name), check.IsNil) |
|
| 4451 |
+ c.Assert(s.d.WaitRun(name), check.IsNil) |
|
| 4452 | 4452 |
|
| 4453 | 4453 |
out, err = s.d.Cmd("inspect", "--format", "{{.HostConfig.Ulimits}}", name)
|
| 4454 | 4454 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -12,7 +12,7 @@ func (s *DockerSwarmSuite) TestSecretCreate(c *check.C) {
|
| 12 | 12 |
d := s.AddDaemon(c, true, true) |
| 13 | 13 |
|
| 14 | 14 |
testName := "test_secret" |
| 15 |
- id := d.createSecret(c, swarm.SecretSpec{
|
|
| 15 |
+ id := d.CreateSecret(c, swarm.SecretSpec{
|
|
| 16 | 16 |
swarm.Annotations{
|
| 17 | 17 |
Name: testName, |
| 18 | 18 |
}, |
| ... | ... |
@@ -20,7 +20,7 @@ func (s *DockerSwarmSuite) TestSecretCreate(c *check.C) {
|
| 20 | 20 |
}) |
| 21 | 21 |
c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
|
| 22 | 22 |
|
| 23 |
- secret := d.getSecret(c, id) |
|
| 23 |
+ secret := d.GetSecret(c, id) |
|
| 24 | 24 |
c.Assert(secret.Spec.Name, checker.Equals, testName) |
| 25 | 25 |
} |
| 26 | 26 |
|
| ... | ... |
@@ -28,7 +28,7 @@ func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) {
|
| 28 | 28 |
d := s.AddDaemon(c, true, true) |
| 29 | 29 |
|
| 30 | 30 |
testName := "test_secret" |
| 31 |
- id := d.createSecret(c, swarm.SecretSpec{
|
|
| 31 |
+ id := d.CreateSecret(c, swarm.SecretSpec{
|
|
| 32 | 32 |
swarm.Annotations{
|
| 33 | 33 |
Name: testName, |
| 34 | 34 |
Labels: map[string]string{
|
| ... | ... |
@@ -40,7 +40,7 @@ func (s *DockerSwarmSuite) TestSecretCreateWithLabels(c *check.C) {
|
| 40 | 40 |
}) |
| 41 | 41 |
c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
|
| 42 | 42 |
|
| 43 |
- secret := d.getSecret(c, id) |
|
| 43 |
+ secret := d.GetSecret(c, id) |
|
| 44 | 44 |
c.Assert(secret.Spec.Name, checker.Equals, testName) |
| 45 | 45 |
c.Assert(len(secret.Spec.Labels), checker.Equals, 2) |
| 46 | 46 |
c.Assert(secret.Spec.Labels["key1"], checker.Equals, "value1") |
| ... | ... |
@@ -52,7 +52,7 @@ func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) {
|
| 52 | 52 |
d := s.AddDaemon(c, true, true) |
| 53 | 53 |
|
| 54 | 54 |
name := "foo" |
| 55 |
- id := d.createSecret(c, swarm.SecretSpec{
|
|
| 55 |
+ id := d.CreateSecret(c, swarm.SecretSpec{
|
|
| 56 | 56 |
swarm.Annotations{
|
| 57 | 57 |
Name: name, |
| 58 | 58 |
}, |
| ... | ... |
@@ -60,7 +60,7 @@ func (s *DockerSwarmSuite) TestSecretCreateResolve(c *check.C) {
|
| 60 | 60 |
}) |
| 61 | 61 |
c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
|
| 62 | 62 |
|
| 63 |
- fake := d.createSecret(c, swarm.SecretSpec{
|
|
| 63 |
+ fake := d.CreateSecret(c, swarm.SecretSpec{
|
|
| 64 | 64 |
swarm.Annotations{
|
| 65 | 65 |
Name: id, |
| 66 | 66 |
}, |
| ... | ... |
@@ -14,7 +14,7 @@ func (s *DockerSwarmSuite) TestSecretInspect(c *check.C) {
|
| 14 | 14 |
d := s.AddDaemon(c, true, true) |
| 15 | 15 |
|
| 16 | 16 |
testName := "test_secret" |
| 17 |
- id := d.createSecret(c, swarm.SecretSpec{
|
|
| 17 |
+ id := d.CreateSecret(c, swarm.SecretSpec{
|
|
| 18 | 18 |
swarm.Annotations{
|
| 19 | 19 |
Name: testName, |
| 20 | 20 |
}, |
| ... | ... |
@@ -22,7 +22,7 @@ func (s *DockerSwarmSuite) TestSecretInspect(c *check.C) {
|
| 22 | 22 |
}) |
| 23 | 23 |
c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
|
| 24 | 24 |
|
| 25 |
- secret := d.getSecret(c, id) |
|
| 25 |
+ secret := d.GetSecret(c, id) |
|
| 26 | 26 |
c.Assert(secret.Spec.Name, checker.Equals, testName) |
| 27 | 27 |
|
| 28 | 28 |
out, err := d.Cmd("secret", "inspect", testName)
|
| ... | ... |
@@ -41,7 +41,7 @@ func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) {
|
| 41 | 41 |
"test1", |
| 42 | 42 |
} |
| 43 | 43 |
for _, n := range testNames {
|
| 44 |
- id := d.createSecret(c, swarm.SecretSpec{
|
|
| 44 |
+ id := d.CreateSecret(c, swarm.SecretSpec{
|
|
| 45 | 45 |
swarm.Annotations{
|
| 46 | 46 |
Name: n, |
| 47 | 47 |
}, |
| ... | ... |
@@ -49,7 +49,7 @@ func (s *DockerSwarmSuite) TestSecretInspectMultiple(c *check.C) {
|
| 49 | 49 |
}) |
| 50 | 50 |
c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
|
| 51 | 51 |
|
| 52 |
- secret := d.getSecret(c, id) |
|
| 52 |
+ secret := d.GetSecret(c, id) |
|
| 53 | 53 |
c.Assert(secret.Spec.Name, checker.Equals, n) |
| 54 | 54 |
|
| 55 | 55 |
} |
| ... | ... |
@@ -22,14 +22,14 @@ func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) {
|
| 22 | 22 |
|
| 23 | 23 |
var tasks []swarm.Task |
| 24 | 24 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 25 |
- tasks = d.getServiceTasks(c, id) |
|
| 25 |
+ tasks = d.GetServiceTasks(c, id) |
|
| 26 | 26 |
return len(tasks) > 0, nil |
| 27 | 27 |
}, checker.Equals, true) |
| 28 | 28 |
|
| 29 | 29 |
task := tasks[0] |
| 30 | 30 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 31 | 31 |
if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" {
|
| 32 |
- task = d.getTask(c, task.ID) |
|
| 32 |
+ task = d.GetTask(c, task.ID) |
|
| 33 | 33 |
} |
| 34 | 34 |
return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil |
| 35 | 35 |
}, checker.Equals, true) |
| ... | ... |
@@ -67,7 +67,7 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSimple(c *check.C) {
|
| 67 | 67 |
|
| 68 | 68 |
serviceName := "test-service-secret" |
| 69 | 69 |
testName := "test_secret" |
| 70 |
- id := d.createSecret(c, swarm.SecretSpec{
|
|
| 70 |
+ id := d.CreateSecret(c, swarm.SecretSpec{
|
|
| 71 | 71 |
swarm.Annotations{
|
| 72 | 72 |
Name: testName, |
| 73 | 73 |
}, |
| ... | ... |
@@ -97,7 +97,7 @@ func (s *DockerSwarmSuite) TestServiceCreateWithSecretSourceTarget(c *check.C) {
|
| 97 | 97 |
|
| 98 | 98 |
serviceName := "test-service-secret" |
| 99 | 99 |
testName := "test_secret" |
| 100 |
- id := d.createSecret(c, swarm.SecretSpec{
|
|
| 100 |
+ id := d.CreateSecret(c, swarm.SecretSpec{
|
|
| 101 | 101 |
swarm.Annotations{
|
| 102 | 102 |
Name: testName, |
| 103 | 103 |
}, |
| ... | ... |
@@ -129,14 +129,14 @@ func (s *DockerSwarmSuite) TestServiceCreateMountTmpfs(c *check.C) {
|
| 129 | 129 |
|
| 130 | 130 |
var tasks []swarm.Task |
| 131 | 131 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 132 |
- tasks = d.getServiceTasks(c, id) |
|
| 132 |
+ tasks = d.GetServiceTasks(c, id) |
|
| 133 | 133 |
return len(tasks) > 0, nil |
| 134 | 134 |
}, checker.Equals, true) |
| 135 | 135 |
|
| 136 | 136 |
task := tasks[0] |
| 137 | 137 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 138 | 138 |
if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" {
|
| 139 |
- task = d.getTask(c, task.ID) |
|
| 139 |
+ task = d.GetTask(c, task.ID) |
|
| 140 | 140 |
} |
| 141 | 141 |
return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil |
| 142 | 142 |
}, checker.Equals, true) |
| ... | ... |
@@ -22,7 +22,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) {
|
| 22 | 22 |
// build image with health-check |
| 23 | 23 |
// note: use `daemon.buildImageWithOut` to build, do not use `buildImage` to build |
| 24 | 24 |
imageName := "testhealth" |
| 25 |
- _, _, err := d.buildImageWithOut(imageName, |
|
| 25 |
+ _, _, err := d.BuildImageWithOut(imageName, |
|
| 26 | 26 |
`FROM busybox |
| 27 | 27 |
RUN touch /status |
| 28 | 28 |
HEALTHCHECK --interval=1s --timeout=1s --retries=1\ |
| ... | ... |
@@ -37,7 +37,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) {
|
| 37 | 37 |
|
| 38 | 38 |
var tasks []swarm.Task |
| 39 | 39 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 40 |
- tasks = d.getServiceTasks(c, id) |
|
| 40 |
+ tasks = d.GetServiceTasks(c, id) |
|
| 41 | 41 |
return tasks, nil |
| 42 | 42 |
}, checker.HasLen, 1) |
| 43 | 43 |
|
| ... | ... |
@@ -45,7 +45,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) {
|
| 45 | 45 |
|
| 46 | 46 |
// wait for task to start |
| 47 | 47 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 48 |
- task = d.getTask(c, task.ID) |
|
| 48 |
+ task = d.GetTask(c, task.ID) |
|
| 49 | 49 |
return task.Status.State, nil |
| 50 | 50 |
}, checker.Equals, swarm.TaskStateRunning) |
| 51 | 51 |
containerID := task.Status.ContainerStatus.ContainerID |
| ... | ... |
@@ -66,7 +66,7 @@ func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) {
|
| 66 | 66 |
|
| 67 | 67 |
// Task should be terminated |
| 68 | 68 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 69 |
- task = d.getTask(c, task.ID) |
|
| 69 |
+ task = d.GetTask(c, task.ID) |
|
| 70 | 70 |
return task.Status.State, nil |
| 71 | 71 |
}, checker.Equals, swarm.TaskStateFailed) |
| 72 | 72 |
|
| ... | ... |
@@ -84,7 +84,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) {
|
| 84 | 84 |
|
| 85 | 85 |
// service started from this image won't pass health check |
| 86 | 86 |
imageName := "testhealth" |
| 87 |
- _, _, err := d.buildImageWithOut(imageName, |
|
| 87 |
+ _, _, err := d.BuildImageWithOut(imageName, |
|
| 88 | 88 |
`FROM busybox |
| 89 | 89 |
HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ |
| 90 | 90 |
CMD cat /status`, |
| ... | ... |
@@ -98,7 +98,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) {
|
| 98 | 98 |
|
| 99 | 99 |
var tasks []swarm.Task |
| 100 | 100 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 101 |
- tasks = d.getServiceTasks(c, id) |
|
| 101 |
+ tasks = d.GetServiceTasks(c, id) |
|
| 102 | 102 |
return tasks, nil |
| 103 | 103 |
}, checker.HasLen, 1) |
| 104 | 104 |
|
| ... | ... |
@@ -106,7 +106,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) {
|
| 106 | 106 |
|
| 107 | 107 |
// wait for task to start |
| 108 | 108 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 109 |
- task = d.getTask(c, task.ID) |
|
| 109 |
+ task = d.GetTask(c, task.ID) |
|
| 110 | 110 |
return task.Status.State, nil |
| 111 | 111 |
}, checker.Equals, swarm.TaskStateStarting) |
| 112 | 112 |
|
| ... | ... |
@@ -120,7 +120,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) {
|
| 120 | 120 |
}, checker.GreaterThan, 0) |
| 121 | 121 |
|
| 122 | 122 |
// task should be blocked at starting status |
| 123 |
- task = d.getTask(c, task.ID) |
|
| 123 |
+ task = d.GetTask(c, task.ID) |
|
| 124 | 124 |
c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) |
| 125 | 125 |
|
| 126 | 126 |
// make it healthy |
| ... | ... |
@@ -128,7 +128,7 @@ func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) {
|
| 128 | 128 |
|
| 129 | 129 |
// Task should be at running status |
| 130 | 130 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 131 |
- task = d.getTask(c, task.ID) |
|
| 131 |
+ task = d.GetTask(c, task.ID) |
|
| 132 | 132 |
return task.Status.State, nil |
| 133 | 133 |
}, checker.Equals, swarm.TaskStateRunning) |
| 134 | 134 |
} |
| ... | ... |
@@ -142,7 +142,7 @@ func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) {
|
| 142 | 142 |
|
| 143 | 143 |
// service started from this image won't pass health check |
| 144 | 144 |
imageName := "testhealth" |
| 145 |
- _, _, err := d.buildImageWithOut(imageName, |
|
| 145 |
+ _, _, err := d.BuildImageWithOut(imageName, |
|
| 146 | 146 |
`FROM busybox |
| 147 | 147 |
HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ |
| 148 | 148 |
CMD cat /status`, |
| ... | ... |
@@ -156,7 +156,7 @@ func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) {
|
| 156 | 156 |
|
| 157 | 157 |
var tasks []swarm.Task |
| 158 | 158 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 159 |
- tasks = d.getServiceTasks(c, id) |
|
| 159 |
+ tasks = d.GetServiceTasks(c, id) |
|
| 160 | 160 |
return tasks, nil |
| 161 | 161 |
}, checker.HasLen, 1) |
| 162 | 162 |
|
| ... | ... |
@@ -164,7 +164,7 @@ func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) {
|
| 164 | 164 |
|
| 165 | 165 |
// wait for task to start |
| 166 | 166 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 167 |
- task = d.getTask(c, task.ID) |
|
| 167 |
+ task = d.GetTask(c, task.ID) |
|
| 168 | 168 |
return task.Status.State, nil |
| 169 | 169 |
}, checker.Equals, swarm.TaskStateStarting) |
| 170 | 170 |
|
| ... | ... |
@@ -178,14 +178,14 @@ func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) {
|
| 178 | 178 |
}, checker.GreaterThan, 0) |
| 179 | 179 |
|
| 180 | 180 |
// task should be blocked at starting status |
| 181 |
- task = d.getTask(c, task.ID) |
|
| 181 |
+ task = d.GetTask(c, task.ID) |
|
| 182 | 182 |
c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) |
| 183 | 183 |
|
| 184 | 184 |
// make it healthy |
| 185 | 185 |
d.Cmd("exec", containerID, "touch", "/status")
|
| 186 | 186 |
// Task should be at running status |
| 187 | 187 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 188 |
- task = d.getTask(c, task.ID) |
|
| 188 |
+ task = d.GetTask(c, task.ID) |
|
| 189 | 189 |
return task.Status.State, nil |
| 190 | 190 |
}, checker.Equals, swarm.TaskStateRunning) |
| 191 | 191 |
} |
| ... | ... |
@@ -38,7 +38,7 @@ func (s *DockerSwarmSuite) TestServiceLogs(c *check.C) {
|
| 38 | 38 |
|
| 39 | 39 |
// make sure task has been deployed. |
| 40 | 40 |
waitAndAssert(c, defaultReconciliationTimeout, |
| 41 |
- d.checkActiveContainerCount, checker.Equals, len(services)) |
|
| 41 |
+ d.CheckActiveContainerCount, checker.Equals, len(services)) |
|
| 42 | 42 |
|
| 43 | 43 |
for name, message := range services {
|
| 44 | 44 |
out, err := d.Cmd("service", "logs", name)
|
| ... | ... |
@@ -60,10 +60,10 @@ func (s *DockerSwarmSuite) TestServiceLogsFollow(c *check.C) {
|
| 60 | 60 |
c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") |
| 61 | 61 |
|
| 62 | 62 |
// make sure task has been deployed. |
| 63 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 63 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 64 | 64 |
|
| 65 | 65 |
args := []string{"service", "logs", "-f", name}
|
| 66 |
- cmd := exec.Command(dockerBinary, d.prependHostArg(args)...) |
|
| 66 |
+ cmd := exec.Command(dockerBinary, d.PrependHostArg(args)...) |
|
| 67 | 67 |
r, w := io.Pipe() |
| 68 | 68 |
cmd.Stdout = w |
| 69 | 69 |
cmd.Stderr = w |
| ... | ... |
@@ -20,7 +20,7 @@ func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) {
|
| 20 | 20 |
// Create a service with a port mapping of 8080:8081. |
| 21 | 21 |
out, err := d.Cmd(serviceArgs...) |
| 22 | 22 |
c.Assert(err, checker.IsNil) |
| 23 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 23 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 24 | 24 |
|
| 25 | 25 |
// Update the service: changed the port mapping from 8080:8081 to 8082:8083. |
| 26 | 26 |
_, err = d.Cmd("service", "update", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName)
|
| ... | ... |
@@ -50,39 +50,39 @@ func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) {
|
| 50 | 50 |
d := s.AddDaemon(c, true, true) |
| 51 | 51 |
out, err := d.Cmd("service", "create", "--name=test", "busybox", "top")
|
| 52 | 52 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 53 |
- service := d.getService(c, "test") |
|
| 53 |
+ service := d.GetService(c, "test") |
|
| 54 | 54 |
c.Assert(service.Spec.Labels, checker.HasLen, 0) |
| 55 | 55 |
|
| 56 | 56 |
// add label to empty set |
| 57 | 57 |
out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar")
|
| 58 | 58 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 59 |
- service = d.getService(c, "test") |
|
| 59 |
+ service = d.GetService(c, "test") |
|
| 60 | 60 |
c.Assert(service.Spec.Labels, checker.HasLen, 1) |
| 61 | 61 |
c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") |
| 62 | 62 |
|
| 63 | 63 |
// add label to non-empty set |
| 64 | 64 |
out, err = d.Cmd("service", "update", "test", "--label-add", "foo2=bar")
|
| 65 | 65 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 66 |
- service = d.getService(c, "test") |
|
| 66 |
+ service = d.GetService(c, "test") |
|
| 67 | 67 |
c.Assert(service.Spec.Labels, checker.HasLen, 2) |
| 68 | 68 |
c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar") |
| 69 | 69 |
|
| 70 | 70 |
out, err = d.Cmd("service", "update", "test", "--label-rm", "foo2")
|
| 71 | 71 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 72 |
- service = d.getService(c, "test") |
|
| 72 |
+ service = d.GetService(c, "test") |
|
| 73 | 73 |
c.Assert(service.Spec.Labels, checker.HasLen, 1) |
| 74 | 74 |
c.Assert(service.Spec.Labels["foo2"], checker.Equals, "") |
| 75 | 75 |
|
| 76 | 76 |
out, err = d.Cmd("service", "update", "test", "--label-rm", "foo")
|
| 77 | 77 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 78 |
- service = d.getService(c, "test") |
|
| 78 |
+ service = d.GetService(c, "test") |
|
| 79 | 79 |
c.Assert(service.Spec.Labels, checker.HasLen, 0) |
| 80 | 80 |
c.Assert(service.Spec.Labels["foo"], checker.Equals, "") |
| 81 | 81 |
|
| 82 | 82 |
// now make sure we can add again |
| 83 | 83 |
out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar")
|
| 84 | 84 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 85 |
- service = d.getService(c, "test") |
|
| 85 |
+ service = d.GetService(c, "test") |
|
| 86 | 86 |
c.Assert(service.Spec.Labels, checker.HasLen, 1) |
| 87 | 87 |
c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") |
| 88 | 88 |
} |
| ... | ... |
@@ -90,7 +90,7 @@ func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) {
|
| 90 | 90 |
func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) {
|
| 91 | 91 |
d := s.AddDaemon(c, true, true) |
| 92 | 92 |
testName := "test_secret" |
| 93 |
- id := d.createSecret(c, swarm.SecretSpec{
|
|
| 93 |
+ id := d.CreateSecret(c, swarm.SecretSpec{
|
|
| 94 | 94 |
swarm.Annotations{
|
| 95 | 95 |
Name: testName, |
| 96 | 96 |
}, |
| ... | ... |
@@ -104,7 +104,7 @@ func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) {
|
| 104 | 104 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 105 | 105 |
|
| 106 | 106 |
// add secret |
| 107 |
- out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget))
|
|
| 107 |
+ out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget))
|
|
| 108 | 108 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 109 | 109 |
|
| 110 | 110 |
out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName)
|
| ... | ... |
@@ -119,7 +119,7 @@ func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) {
|
| 119 | 119 |
c.Assert(refs[0].File.Name, checker.Equals, testTarget) |
| 120 | 120 |
|
| 121 | 121 |
// remove |
| 122 |
- out, err = d.cmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName)
|
|
| 122 |
+ out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName)
|
|
| 123 | 123 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 124 | 124 |
|
| 125 | 125 |
out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName)
|
| ... | ... |
@@ -15,6 +15,7 @@ import ( |
| 15 | 15 |
"time" |
| 16 | 16 |
|
| 17 | 17 |
"github.com/docker/docker/api/types/swarm" |
| 18 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 18 | 19 |
"github.com/docker/docker/pkg/integration/checker" |
| 19 | 20 |
"github.com/docker/libnetwork/driverapi" |
| 20 | 21 |
"github.com/docker/libnetwork/ipamapi" |
| ... | ... |
@@ -27,7 +28,7 @@ func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) {
|
| 27 | 27 |
d := s.AddDaemon(c, true, true) |
| 28 | 28 |
|
| 29 | 29 |
getSpec := func() swarm.Spec {
|
| 30 |
- sw := d.getSwarm(c) |
|
| 30 |
+ sw := d.GetSwarm(c) |
|
| 31 | 31 |
return sw.Spec |
| 32 | 32 |
} |
| 33 | 33 |
|
| ... | ... |
@@ -50,7 +51,7 @@ func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) {
|
| 50 | 50 |
d := s.AddDaemon(c, false, false) |
| 51 | 51 |
|
| 52 | 52 |
getSpec := func() swarm.Spec {
|
| 53 |
- sw := d.getSwarm(c) |
|
| 53 |
+ sw := d.GetSwarm(c) |
|
| 54 | 54 |
return sw.Spec |
| 55 | 55 |
} |
| 56 | 56 |
|
| ... | ... |
@@ -96,7 +97,7 @@ func (s *DockerSwarmSuite) TestSwarmInitUnspecifiedAdvertiseAddr(c *check.C) {
|
| 96 | 96 |
func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) {
|
| 97 | 97 |
// init swarm mode and stop a daemon |
| 98 | 98 |
d := s.AddDaemon(c, true, true) |
| 99 |
- info, err := d.info() |
|
| 99 |
+ info, err := d.SwarmInfo() |
|
| 100 | 100 |
c.Assert(err, checker.IsNil) |
| 101 | 101 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| 102 | 102 |
c.Assert(d.Stop(), checker.IsNil) |
| ... | ... |
@@ -104,13 +105,15 @@ func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) {
|
| 104 | 104 |
// start a daemon with --cluster-store and --cluster-advertise |
| 105 | 105 |
err = d.Start("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375")
|
| 106 | 106 |
c.Assert(err, checker.NotNil) |
| 107 |
- content, _ := ioutil.ReadFile(d.logFile.Name()) |
|
| 107 |
+ content, err := d.ReadLogFile() |
|
| 108 |
+ c.Assert(err, checker.IsNil) |
|
| 108 | 109 |
c.Assert(string(content), checker.Contains, "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") |
| 109 | 110 |
|
| 110 | 111 |
// start a daemon with --live-restore |
| 111 | 112 |
err = d.Start("--live-restore")
|
| 112 | 113 |
c.Assert(err, checker.NotNil) |
| 113 |
- content, _ = ioutil.ReadFile(d.logFile.Name()) |
|
| 114 |
+ content, err = d.ReadLogFile() |
|
| 115 |
+ c.Assert(err, checker.IsNil) |
|
| 114 | 116 |
c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode") |
| 115 | 117 |
// restart for teardown |
| 116 | 118 |
c.Assert(d.Start(), checker.IsNil) |
| ... | ... |
@@ -133,9 +136,9 @@ func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) {
|
| 133 | 133 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 134 | 134 |
|
| 135 | 135 |
// make sure task has been deployed. |
| 136 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 136 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 137 | 137 |
|
| 138 |
- containers := d.activeContainers() |
|
| 138 |
+ containers := d.ActiveContainers() |
|
| 139 | 139 |
out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0])
|
| 140 | 140 |
c.Assert(err, checker.IsNil, check.Commentf(out)) |
| 141 | 141 |
c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1", check.Commentf("hostname with templating invalid"))
|
| ... | ... |
@@ -211,7 +214,7 @@ func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) {
|
| 211 | 211 |
c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") |
| 212 | 212 |
|
| 213 | 213 |
// make sure task has been deployed. |
| 214 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 3) |
|
| 214 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 3) |
|
| 215 | 215 |
|
| 216 | 216 |
filter := "name=redis-cluster" |
| 217 | 217 |
|
| ... | ... |
@@ -240,10 +243,10 @@ func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *check.C) {
|
| 240 | 240 |
out, err = d.Cmd("service", "update", "--publish-add", "80:80", name)
|
| 241 | 241 |
c.Assert(err, checker.IsNil) |
| 242 | 242 |
|
| 243 |
- out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", name)
|
|
| 243 |
+ out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", name)
|
|
| 244 | 244 |
c.Assert(err, checker.IsNil) |
| 245 | 245 |
|
| 246 |
- out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", "--publish-add", "80:20", name)
|
|
| 246 |
+ out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", "--publish-add", "80:20", name)
|
|
| 247 | 247 |
c.Assert(err, checker.NotNil) |
| 248 | 248 |
|
| 249 | 249 |
out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", name)
|
| ... | ... |
@@ -260,7 +263,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceWithGroup(c *check.C) {
|
| 260 | 260 |
c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") |
| 261 | 261 |
|
| 262 | 262 |
// make sure task has been deployed. |
| 263 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 263 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 264 | 264 |
|
| 265 | 265 |
out, err = d.Cmd("ps", "-q")
|
| 266 | 266 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -326,7 +329,7 @@ func (s *DockerSwarmSuite) TestSwarmContainerAttachByNetworkId(c *check.C) {
|
| 326 | 326 |
out, err = d.Cmd("run", "-d", "--net", networkID, "busybox", "top")
|
| 327 | 327 |
c.Assert(err, checker.IsNil) |
| 328 | 328 |
cID := strings.TrimSpace(out) |
| 329 |
- d.waitRun(cID) |
|
| 329 |
+ d.WaitRun(cID) |
|
| 330 | 330 |
|
| 331 | 331 |
_, err = d.Cmd("rm", "-f", cID)
|
| 332 | 332 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -449,7 +452,7 @@ func (s *DockerSwarmSuite) TestPsListContainersFilterIsTask(c *check.C) {
|
| 449 | 449 |
c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") |
| 450 | 450 |
|
| 451 | 451 |
// make sure task has been deployed. |
| 452 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkServiceRunningTasks(name), checker.Equals, 1) |
|
| 452 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckServiceRunningTasks(name), checker.Equals, 1) |
|
| 453 | 453 |
|
| 454 | 454 |
// Filter non-tasks |
| 455 | 455 |
out, err = d.Cmd("ps", "-a", "-q", "--filter=is-task=false")
|
| ... | ... |
@@ -664,7 +667,7 @@ func (s *DockerSwarmSuite) TestSwarmNetworkPlugin(c *check.C) {
|
| 664 | 664 |
func (s *DockerSwarmSuite) TestSwarmServiceEnvFile(c *check.C) {
|
| 665 | 665 |
d := s.AddDaemon(c, true, true) |
| 666 | 666 |
|
| 667 |
- path := filepath.Join(d.folder, "env.txt") |
|
| 667 |
+ path := filepath.Join(d.Folder, "env.txt") |
|
| 668 | 668 |
err := ioutil.WriteFile(path, []byte("VAR1=A\nVAR2=A\n"), 0644)
|
| 669 | 669 |
c.Assert(err, checker.IsNil) |
| 670 | 670 |
|
| ... | ... |
@@ -692,7 +695,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) {
|
| 692 | 692 |
c.Assert(err, checker.IsNil) |
| 693 | 693 |
|
| 694 | 694 |
// Make sure task has been deployed. |
| 695 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 695 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 696 | 696 |
|
| 697 | 697 |
// We need to get the container id. |
| 698 | 698 |
out, err = d.Cmd("ps", "-a", "-q", "--no-trunc")
|
| ... | ... |
@@ -707,7 +710,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) {
|
| 707 | 707 |
out, err = d.Cmd("service", "rm", name)
|
| 708 | 708 |
c.Assert(err, checker.IsNil) |
| 709 | 709 |
// Make sure container has been destroyed. |
| 710 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) |
|
| 710 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) |
|
| 711 | 711 |
|
| 712 | 712 |
// With --tty |
| 713 | 713 |
expectedOutput = "TTY" |
| ... | ... |
@@ -715,7 +718,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTY(c *check.C) {
|
| 715 | 715 |
c.Assert(err, checker.IsNil) |
| 716 | 716 |
|
| 717 | 717 |
// Make sure task has been deployed. |
| 718 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 718 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 719 | 719 |
|
| 720 | 720 |
// We need to get the container id. |
| 721 | 721 |
out, err = d.Cmd("ps", "-a", "-q", "--no-trunc")
|
| ... | ... |
@@ -736,7 +739,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTTYUpdate(c *check.C) {
|
| 736 | 736 |
c.Assert(err, checker.IsNil) |
| 737 | 737 |
|
| 738 | 738 |
// Make sure task has been deployed. |
| 739 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 739 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 740 | 740 |
|
| 741 | 741 |
out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name)
|
| 742 | 742 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -759,7 +762,7 @@ func (s *DockerSwarmSuite) TestDNSConfig(c *check.C) {
|
| 759 | 759 |
c.Assert(err, checker.IsNil) |
| 760 | 760 |
|
| 761 | 761 |
// Make sure task has been deployed. |
| 762 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 762 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 763 | 763 |
|
| 764 | 764 |
// We need to get the container id. |
| 765 | 765 |
out, err := d.Cmd("ps", "-a", "-q", "--no-trunc")
|
| ... | ... |
@@ -786,7 +789,7 @@ func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) {
|
| 786 | 786 |
c.Assert(err, checker.IsNil) |
| 787 | 787 |
|
| 788 | 788 |
// Make sure task has been deployed. |
| 789 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 789 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 790 | 790 |
|
| 791 | 791 |
_, err = d.Cmd("service", "update", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name)
|
| 792 | 792 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -796,18 +799,18 @@ func (s *DockerSwarmSuite) TestDNSConfigUpdate(c *check.C) {
|
| 796 | 796 |
c.Assert(strings.TrimSpace(out), checker.Equals, "{[1.2.3.4] [example.com] [timeout:3]}")
|
| 797 | 797 |
} |
| 798 | 798 |
|
| 799 |
-func getNodeStatus(c *check.C, d *SwarmDaemon) swarm.LocalNodeState {
|
|
| 800 |
- info, err := d.info() |
|
| 799 |
+func getNodeStatus(c *check.C, d *daemon.Swarm) swarm.LocalNodeState {
|
|
| 800 |
+ info, err := d.SwarmInfo() |
|
| 801 | 801 |
c.Assert(err, checker.IsNil) |
| 802 | 802 |
return info.LocalNodeState |
| 803 | 803 |
} |
| 804 | 804 |
|
| 805 |
-func checkSwarmLockedToUnlocked(c *check.C, d *SwarmDaemon, unlockKey string) {
|
|
| 805 |
+func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Swarm, unlockKey string) {
|
|
| 806 | 806 |
c.Assert(d.Restart(), checker.IsNil) |
| 807 | 807 |
status := getNodeStatus(c, d) |
| 808 | 808 |
if status == swarm.LocalNodeStateLocked {
|
| 809 | 809 |
// it must not have updated to be unlocked in time - unlock, wait 3 seconds, and try again |
| 810 |
- cmd := d.command("swarm", "unlock")
|
|
| 810 |
+ cmd := d.Command("swarm", "unlock")
|
|
| 811 | 811 |
cmd.Stdin = bytes.NewBufferString(unlockKey) |
| 812 | 812 |
out, err := cmd.CombinedOutput() |
| 813 | 813 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out)))
|
| ... | ... |
@@ -821,7 +824,7 @@ func checkSwarmLockedToUnlocked(c *check.C, d *SwarmDaemon, unlockKey string) {
|
| 821 | 821 |
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) |
| 822 | 822 |
} |
| 823 | 823 |
|
| 824 |
-func checkSwarmUnlockedToLocked(c *check.C, d *SwarmDaemon) {
|
|
| 824 |
+func checkSwarmUnlockedToLocked(c *check.C, d *daemon.Swarm) {
|
|
| 825 | 825 |
c.Assert(d.Restart(), checker.IsNil) |
| 826 | 826 |
status := getNodeStatus(c, d) |
| 827 | 827 |
if status == swarm.LocalNodeStateActive {
|
| ... | ... |
@@ -859,7 +862,7 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) {
|
| 859 | 859 |
c.Assert(d.Restart(), checker.IsNil) |
| 860 | 860 |
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) |
| 861 | 861 |
|
| 862 |
- cmd := d.command("swarm", "unlock")
|
|
| 862 |
+ cmd := d.Command("swarm", "unlock")
|
|
| 863 | 863 |
cmd.Stdin = bytes.NewBufferString("wrong-secret-key")
|
| 864 | 864 |
out, err := cmd.CombinedOutput() |
| 865 | 865 |
c.Assert(err, checker.NotNil, check.Commentf("out: %v", string(out)))
|
| ... | ... |
@@ -867,7 +870,7 @@ func (s *DockerSwarmSuite) TestSwarmInitLocked(c *check.C) {
|
| 867 | 867 |
|
| 868 | 868 |
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) |
| 869 | 869 |
|
| 870 |
- cmd = d.command("swarm", "unlock")
|
|
| 870 |
+ cmd = d.Command("swarm", "unlock")
|
|
| 871 | 871 |
cmd.Stdin = bytes.NewBufferString(unlockKey) |
| 872 | 872 |
out, err = cmd.CombinedOutput() |
| 873 | 873 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out)))
|
| ... | ... |
@@ -897,7 +900,7 @@ func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) {
|
| 897 | 897 |
// It starts off locked |
| 898 | 898 |
c.Assert(d.Restart("--swarm-default-advertise-addr=lo"), checker.IsNil)
|
| 899 | 899 |
|
| 900 |
- info, err := d.info() |
|
| 900 |
+ info, err := d.SwarmInfo() |
|
| 901 | 901 |
c.Assert(err, checker.IsNil) |
| 902 | 902 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) |
| 903 | 903 |
|
| ... | ... |
@@ -912,14 +915,14 @@ func (s *DockerSwarmSuite) TestSwarmLeaveLocked(c *check.C) {
|
| 912 | 912 |
outs, err = d.Cmd("swarm", "leave", "--force")
|
| 913 | 913 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs))
|
| 914 | 914 |
|
| 915 |
- info, err = d.info() |
|
| 915 |
+ info, err = d.SwarmInfo() |
|
| 916 | 916 |
c.Assert(err, checker.IsNil) |
| 917 | 917 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) |
| 918 | 918 |
|
| 919 | 919 |
outs, err = d.Cmd("swarm", "init")
|
| 920 | 920 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs))
|
| 921 | 921 |
|
| 922 |
- info, err = d.info() |
|
| 922 |
+ info, err = d.SwarmInfo() |
|
| 923 | 923 |
c.Assert(err, checker.IsNil) |
| 924 | 924 |
c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
| 925 | 925 |
} |
| ... | ... |
@@ -956,10 +959,10 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) {
|
| 956 | 956 |
c.Assert(outs, checker.Equals, unlockKey+"\n") |
| 957 | 957 |
|
| 958 | 958 |
// The ones that got the cluster update should be set to locked |
| 959 |
- for _, d := range []*SwarmDaemon{d1, d3} {
|
|
| 959 |
+ for _, d := range []*daemon.Swarm{d1, d3} {
|
|
| 960 | 960 |
checkSwarmUnlockedToLocked(c, d) |
| 961 | 961 |
|
| 962 |
- cmd := d.command("swarm", "unlock")
|
|
| 962 |
+ cmd := d.Command("swarm", "unlock")
|
|
| 963 | 963 |
cmd.Stdin = bytes.NewBufferString(unlockKey) |
| 964 | 964 |
out, err := cmd.CombinedOutput() |
| 965 | 965 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out)))
|
| ... | ... |
@@ -978,7 +981,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) {
|
| 978 | 978 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", outs))
|
| 979 | 979 |
|
| 980 | 980 |
// the ones that got the update are now set to unlocked |
| 981 |
- for _, d := range []*SwarmDaemon{d1, d3} {
|
|
| 981 |
+ for _, d := range []*daemon.Swarm{d1, d3} {
|
|
| 982 | 982 |
checkSwarmLockedToUnlocked(c, d, unlockKey) |
| 983 | 983 |
} |
| 984 | 984 |
|
| ... | ... |
@@ -986,7 +989,7 @@ func (s *DockerSwarmSuite) TestSwarmLockUnlockCluster(c *check.C) {
|
| 986 | 986 |
c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateLocked) |
| 987 | 987 |
|
| 988 | 988 |
// unlock it |
| 989 |
- cmd := d2.command("swarm", "unlock")
|
|
| 989 |
+ cmd := d2.Command("swarm", "unlock")
|
|
| 990 | 990 |
cmd.Stdin = bytes.NewBufferString(unlockKey) |
| 991 | 991 |
out, err := cmd.CombinedOutput() |
| 992 | 992 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out)))
|
| ... | ... |
@@ -1037,10 +1040,10 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) {
|
| 1037 | 1037 |
d3 := s.AddDaemon(c, true, true) |
| 1038 | 1038 |
|
| 1039 | 1039 |
// both new nodes are locked |
| 1040 |
- for _, d := range []*SwarmDaemon{d2, d3} {
|
|
| 1040 |
+ for _, d := range []*daemon.Swarm{d2, d3} {
|
|
| 1041 | 1041 |
checkSwarmUnlockedToLocked(c, d) |
| 1042 | 1042 |
|
| 1043 |
- cmd := d.command("swarm", "unlock")
|
|
| 1043 |
+ cmd := d.Command("swarm", "unlock")
|
|
| 1044 | 1044 |
cmd.Stdin = bytes.NewBufferString(unlockKey) |
| 1045 | 1045 |
out, err := cmd.CombinedOutput() |
| 1046 | 1046 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out)))
|
| ... | ... |
@@ -1048,7 +1051,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) {
|
| 1048 | 1048 |
} |
| 1049 | 1049 |
|
| 1050 | 1050 |
// get d3's cert |
| 1051 |
- d3cert, err := ioutil.ReadFile(filepath.Join(d3.folder, "root", "swarm", "certificates", "swarm-node.crt")) |
|
| 1051 |
+ d3cert, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt")) |
|
| 1052 | 1052 |
c.Assert(err, checker.IsNil) |
| 1053 | 1053 |
|
| 1054 | 1054 |
// demote manager back to worker - workers are not locked |
| ... | ... |
@@ -1061,9 +1064,9 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) {
|
| 1061 | 1061 |
// to be replaced, then the node still has the manager TLS key which is still locked |
| 1062 | 1062 |
// (because we never want a manager TLS key to be on disk unencrypted if the cluster |
| 1063 | 1063 |
// is set to autolock) |
| 1064 |
- waitAndAssert(c, defaultReconciliationTimeout, d3.checkControlAvailable, checker.False) |
|
| 1064 |
+ waitAndAssert(c, defaultReconciliationTimeout, d3.CheckControlAvailable, checker.False) |
|
| 1065 | 1065 |
waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
|
| 1066 |
- cert, err := ioutil.ReadFile(filepath.Join(d3.folder, "root", "swarm", "certificates", "swarm-node.crt")) |
|
| 1066 |
+ cert, err := ioutil.ReadFile(filepath.Join(d3.Folder, "root", "swarm", "certificates", "swarm-node.crt")) |
|
| 1067 | 1067 |
if err != nil {
|
| 1068 | 1068 |
return "", check.Commentf("error: %v", err)
|
| 1069 | 1069 |
} |
| ... | ... |
@@ -1111,7 +1114,7 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) {
|
| 1111 | 1111 |
outs, _ = d.Cmd("node", "ls")
|
| 1112 | 1112 |
c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") |
| 1113 | 1113 |
|
| 1114 |
- cmd := d.command("swarm", "unlock")
|
|
| 1114 |
+ cmd := d.Command("swarm", "unlock")
|
|
| 1115 | 1115 |
cmd.Stdin = bytes.NewBufferString(unlockKey) |
| 1116 | 1116 |
out, err := cmd.CombinedOutput() |
| 1117 | 1117 |
|
| ... | ... |
@@ -1128,7 +1131,7 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) {
|
| 1128 | 1128 |
|
| 1129 | 1129 |
c.Assert(d.Restart(), checker.IsNil) |
| 1130 | 1130 |
|
| 1131 |
- cmd = d.command("swarm", "unlock")
|
|
| 1131 |
+ cmd = d.Command("swarm", "unlock")
|
|
| 1132 | 1132 |
cmd.Stdin = bytes.NewBufferString(unlockKey) |
| 1133 | 1133 |
out, err = cmd.CombinedOutput() |
| 1134 | 1134 |
} |
| ... | ... |
@@ -1138,7 +1141,7 @@ func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) {
|
| 1138 | 1138 |
outs, _ = d.Cmd("node", "ls")
|
| 1139 | 1139 |
c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") |
| 1140 | 1140 |
|
| 1141 |
- cmd = d.command("swarm", "unlock")
|
|
| 1141 |
+ cmd = d.Command("swarm", "unlock")
|
|
| 1142 | 1142 |
cmd.Stdin = bytes.NewBufferString(newUnlockKey) |
| 1143 | 1143 |
out, err = cmd.CombinedOutput() |
| 1144 | 1144 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out)))
|
| ... | ... |
@@ -1191,13 +1194,13 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) {
|
| 1191 | 1191 |
c.Assert(d2.Restart(), checker.IsNil) |
| 1192 | 1192 |
c.Assert(d3.Restart(), checker.IsNil) |
| 1193 | 1193 |
|
| 1194 |
- for _, d := range []*SwarmDaemon{d2, d3} {
|
|
| 1194 |
+ for _, d := range []*daemon.Swarm{d2, d3} {
|
|
| 1195 | 1195 |
c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) |
| 1196 | 1196 |
|
| 1197 | 1197 |
outs, _ := d.Cmd("node", "ls")
|
| 1198 | 1198 |
c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") |
| 1199 | 1199 |
|
| 1200 |
- cmd := d.command("swarm", "unlock")
|
|
| 1200 |
+ cmd := d.Command("swarm", "unlock")
|
|
| 1201 | 1201 |
cmd.Stdin = bytes.NewBufferString(unlockKey) |
| 1202 | 1202 |
out, err := cmd.CombinedOutput() |
| 1203 | 1203 |
|
| ... | ... |
@@ -1214,7 +1217,7 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) {
|
| 1214 | 1214 |
|
| 1215 | 1215 |
c.Assert(d.Restart(), checker.IsNil) |
| 1216 | 1216 |
|
| 1217 |
- cmd = d.command("swarm", "unlock")
|
|
| 1217 |
+ cmd = d.Command("swarm", "unlock")
|
|
| 1218 | 1218 |
cmd.Stdin = bytes.NewBufferString(unlockKey) |
| 1219 | 1219 |
out, err = cmd.CombinedOutput() |
| 1220 | 1220 |
} |
| ... | ... |
@@ -1224,7 +1227,7 @@ func (s *DockerSwarmSuite) TestSwarmClusterRotateUnlockKey(c *check.C) {
|
| 1224 | 1224 |
outs, _ = d.Cmd("node", "ls")
|
| 1225 | 1225 |
c.Assert(outs, checker.Contains, "Swarm is encrypted and needs to be unlocked") |
| 1226 | 1226 |
|
| 1227 |
- cmd = d.command("swarm", "unlock")
|
|
| 1227 |
+ cmd = d.Command("swarm", "unlock")
|
|
| 1228 | 1228 |
cmd.Stdin = bytes.NewBufferString(newUnlockKey) |
| 1229 | 1229 |
out, err = cmd.CombinedOutput() |
| 1230 | 1230 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out)))
|
| ... | ... |
@@ -1260,7 +1263,7 @@ func (s *DockerSwarmSuite) TestSwarmAlternateLockUnlock(c *check.C) {
|
| 1260 | 1260 |
c.Assert(unlockKey, checker.Not(checker.Equals), "") |
| 1261 | 1261 |
checkSwarmUnlockedToLocked(c, d) |
| 1262 | 1262 |
|
| 1263 |
- cmd := d.command("swarm", "unlock")
|
|
| 1263 |
+ cmd := d.Command("swarm", "unlock")
|
|
| 1264 | 1264 |
cmd.Stdin = bytes.NewBufferString(unlockKey) |
| 1265 | 1265 |
out, err := cmd.CombinedOutput() |
| 1266 | 1266 |
c.Assert(err, checker.IsNil, check.Commentf("out: %v", string(out)))
|
| ... | ... |
@@ -1283,7 +1286,7 @@ func (s *DockerSwarmSuite) TestExtraHosts(c *check.C) {
|
| 1283 | 1283 |
c.Assert(err, checker.IsNil) |
| 1284 | 1284 |
|
| 1285 | 1285 |
// Make sure task has been deployed. |
| 1286 |
- waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) |
|
| 1286 |
+ waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1) |
|
| 1287 | 1287 |
|
| 1288 | 1288 |
// We need to get the container id. |
| 1289 | 1289 |
out, err := d.Cmd("ps", "-a", "-q", "--no-trunc")
|
| ... | ... |
@@ -1303,7 +1306,7 @@ func (s *DockerSwarmSuite) TestSwarmManagerAddress(c *check.C) {
|
| 1303 | 1303 |
d3 := s.AddDaemon(c, true, false) |
| 1304 | 1304 |
|
| 1305 | 1305 |
// Manager Addresses will always show Node 1's address |
| 1306 |
- expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.port)
|
|
| 1306 |
+ expectedOutput := fmt.Sprintf("Manager Addresses:\n 127.0.0.1:%d\n", d1.Port)
|
|
| 1307 | 1307 |
|
| 1308 | 1308 |
out, err := d1.Cmd("info")
|
| 1309 | 1309 |
c.Assert(err, checker.IsNil) |
| ... | ... |
@@ -36,8 +36,8 @@ func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) {
|
| 36 | 36 |
defer os.RemoveAll(tmpDirNotExists) |
| 37 | 37 |
|
| 38 | 38 |
// we need to find the uid and gid of the remapped root from the daemon's root dir info |
| 39 |
- uidgid := strings.Split(filepath.Base(s.d.root), ".") |
|
| 40 |
- c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.root)))
|
|
| 39 |
+ uidgid := strings.Split(filepath.Base(s.d.Root), ".") |
|
| 40 |
+ c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.Root)))
|
|
| 41 | 41 |
uid, err := strconv.Atoi(uidgid[0]) |
| 42 | 42 |
c.Assert(err, checker.IsNil, check.Commentf("Can't parse uid"))
|
| 43 | 43 |
gid, err := strconv.Atoi(uidgid[1]) |
| ... | ... |
@@ -7,6 +7,7 @@ import ( |
| 7 | 7 |
"net/http" |
| 8 | 8 |
"strings" |
| 9 | 9 |
|
| 10 |
+ "github.com/docker/docker/pkg/integration" |
|
| 10 | 11 |
"github.com/docker/docker/pkg/integration/checker" |
| 11 | 12 |
"github.com/go-check/check" |
| 12 | 13 |
) |
| ... | ... |
@@ -150,7 +151,7 @@ func (s *DockerSuite) TestDeprecatedStartWithTooLowMemoryLimit(c *check.C) {
|
| 150 | 150 |
|
| 151 | 151 |
res, body, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json")
|
| 152 | 152 |
c.Assert(err, checker.IsNil) |
| 153 |
- b, err2 := readBody(body) |
|
| 153 |
+ b, err2 := integration.ReadBody(body) |
|
| 154 | 154 |
c.Assert(err2, checker.IsNil) |
| 155 | 155 |
c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) |
| 156 | 156 |
c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") |
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"runtime" |
| 6 | 6 |
"strings" |
| 7 | 7 |
|
| 8 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 8 | 9 |
"github.com/docker/docker/pkg/integration/checker" |
| 9 | 10 |
"github.com/go-check/check" |
| 10 | 11 |
) |
| ... | ... |
@@ -25,7 +26,7 @@ func init() {
|
| 25 | 25 |
// relative impact of each individual operation. As part of this suite, all |
| 26 | 26 |
// images are removed after each test. |
| 27 | 27 |
type DockerHubPullSuite struct {
|
| 28 |
- d *Daemon |
|
| 28 |
+ d *daemon.Daemon |
|
| 29 | 29 |
ds *DockerSuite |
| 30 | 30 |
} |
| 31 | 31 |
|
| ... | ... |
@@ -39,7 +40,9 @@ func newDockerHubPullSuite() *DockerHubPullSuite {
|
| 39 | 39 |
// SetUpSuite starts the suite daemon. |
| 40 | 40 |
func (s *DockerHubPullSuite) SetUpSuite(c *check.C) {
|
| 41 | 41 |
testRequires(c, DaemonIsLinux) |
| 42 |
- s.d = NewDaemon(c) |
|
| 42 |
+ s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
|
|
| 43 |
+ Experimental: experimentalDaemon, |
|
| 44 |
+ }) |
|
| 43 | 45 |
err := s.d.Start() |
| 44 | 46 |
c.Assert(err, checker.IsNil, check.Commentf("starting push/pull test daemon: %v", err))
|
| 45 | 47 |
} |
| ... | ... |
@@ -84,7 +87,7 @@ func (s *DockerHubPullSuite) CmdWithError(name string, arg ...string) (string, e |
| 84 | 84 |
|
| 85 | 85 |
// MakeCmd returns an exec.Cmd command to run against the suite daemon. |
| 86 | 86 |
func (s *DockerHubPullSuite) MakeCmd(name string, arg ...string) *exec.Cmd {
|
| 87 |
- args := []string{"--host", s.d.sock(), name}
|
|
| 87 |
+ args := []string{"--host", s.d.Sock(), name}
|
|
| 88 | 88 |
args = append(args, arg...) |
| 89 | 89 |
return exec.Command(dockerBinary, args...) |
| 90 | 90 |
} |
| ... | ... |
@@ -3,7 +3,6 @@ package main |
| 3 | 3 |
import ( |
| 4 | 4 |
"bufio" |
| 5 | 5 |
"bytes" |
| 6 |
- "crypto/tls" |
|
| 7 | 6 |
"encoding/json" |
| 8 | 7 |
"errors" |
| 9 | 8 |
"fmt" |
| ... | ... |
@@ -24,13 +23,14 @@ import ( |
| 24 | 24 |
|
| 25 | 25 |
"github.com/docker/docker/api/types" |
| 26 | 26 |
volumetypes "github.com/docker/docker/api/types/volume" |
| 27 |
+ "github.com/docker/docker/integration-cli/daemon" |
|
| 27 | 28 |
"github.com/docker/docker/opts" |
| 28 | 29 |
"github.com/docker/docker/pkg/httputils" |
| 30 |
+ "github.com/docker/docker/pkg/integration" |
|
| 29 | 31 |
"github.com/docker/docker/pkg/integration/checker" |
| 30 | 32 |
icmd "github.com/docker/docker/pkg/integration/cmd" |
| 31 | 33 |
"github.com/docker/docker/pkg/ioutils" |
| 32 | 34 |
"github.com/docker/docker/pkg/stringutils" |
| 33 |
- "github.com/docker/go-connections/tlsconfig" |
|
| 34 | 35 |
"github.com/docker/go-units" |
| 35 | 36 |
"github.com/go-check/check" |
| 36 | 37 |
) |
| ... | ... |
@@ -107,55 +107,12 @@ func daemonHost() string {
|
| 107 | 107 |
return daemonURLStr |
| 108 | 108 |
} |
| 109 | 109 |
|
| 110 |
-func getTLSConfig() (*tls.Config, error) {
|
|
| 111 |
- dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
|
|
| 112 |
- |
|
| 113 |
- if dockerCertPath == "" {
|
|
| 114 |
- return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable")
|
|
| 115 |
- } |
|
| 116 |
- |
|
| 117 |
- option := &tlsconfig.Options{
|
|
| 118 |
- CAFile: filepath.Join(dockerCertPath, "ca.pem"), |
|
| 119 |
- CertFile: filepath.Join(dockerCertPath, "cert.pem"), |
|
| 120 |
- KeyFile: filepath.Join(dockerCertPath, "key.pem"), |
|
| 121 |
- } |
|
| 122 |
- tlsConfig, err := tlsconfig.Client(*option) |
|
| 123 |
- if err != nil {
|
|
| 124 |
- return nil, err |
|
| 125 |
- } |
|
| 126 |
- |
|
| 127 |
- return tlsConfig, nil |
|
| 128 |
-} |
|
| 129 |
- |
|
| 130 |
-func sockConn(timeout time.Duration, daemon string) (net.Conn, error) {
|
|
| 131 |
- if daemon == "" {
|
|
| 132 |
- daemon = daemonHost() |
|
| 133 |
- } |
|
| 134 |
- daemonURL, err := url.Parse(daemon) |
|
| 135 |
- if err != nil {
|
|
| 136 |
- return nil, fmt.Errorf("could not parse url %q: %v", daemon, err)
|
|
| 137 |
- } |
|
| 138 |
- |
|
| 139 |
- var c net.Conn |
|
| 140 |
- switch daemonURL.Scheme {
|
|
| 141 |
- case "npipe": |
|
| 142 |
- return npipeDial(daemonURL.Path, timeout) |
|
| 143 |
- case "unix": |
|
| 144 |
- return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) |
|
| 145 |
- case "tcp": |
|
| 146 |
- if os.Getenv("DOCKER_TLS_VERIFY") != "" {
|
|
| 147 |
- // Setup the socket TLS configuration. |
|
| 148 |
- tlsConfig, err := getTLSConfig() |
|
| 149 |
- if err != nil {
|
|
| 150 |
- return nil, err |
|
| 151 |
- } |
|
| 152 |
- dialer := &net.Dialer{Timeout: timeout}
|
|
| 153 |
- return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) |
|
| 154 |
- } |
|
| 155 |
- return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) |
|
| 156 |
- default: |
|
| 157 |
- return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon)
|
|
| 110 |
+// FIXME(vdemeester) should probably completely move to daemon struct/methods |
|
| 111 |
+func sockConn(timeout time.Duration, daemonStr string) (net.Conn, error) {
|
|
| 112 |
+ if daemonStr == "" {
|
|
| 113 |
+ daemonStr = daemonHost() |
|
| 158 | 114 |
} |
| 115 |
+ return daemon.SockConn(timeout, daemonStr) |
|
| 159 | 116 |
} |
| 160 | 117 |
|
| 161 | 118 |
func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) {
|
| ... | ... |
@@ -168,7 +125,7 @@ func sockRequest(method, endpoint string, data interface{}) (int, []byte, error)
|
| 168 | 168 |
if err != nil {
|
| 169 | 169 |
return -1, nil, err |
| 170 | 170 |
} |
| 171 |
- b, err := readBody(body) |
|
| 171 |
+ b, err := integration.ReadBody(body) |
|
| 172 | 172 |
return res.StatusCode, b, err |
| 173 | 173 |
} |
| 174 | 174 |
|
| ... | ... |
@@ -226,11 +183,6 @@ func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string |
| 226 | 226 |
return req, client, nil |
| 227 | 227 |
} |
| 228 | 228 |
|
| 229 |
-func readBody(b io.ReadCloser) ([]byte, error) {
|
|
| 230 |
- defer b.Close() |
|
| 231 |
- return ioutil.ReadAll(b) |
|
| 232 |
-} |
|
| 233 |
- |
|
| 234 | 229 |
func deleteContainer(container ...string) error {
|
| 235 | 230 |
result := icmd.RunCommand(dockerBinary, append([]string{"rm", "-fv"}, container...)...)
|
| 236 | 231 |
return result.Compare(icmd.Success) |
| ... | ... |
@@ -950,23 +902,7 @@ func getContainerState(c *check.C, id string) (int, bool, error) {
|
| 950 | 950 |
} |
| 951 | 951 |
|
| 952 | 952 |
func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd {
|
| 953 |
- return buildImageCmdWithHost(name, dockerfile, "", useCache, buildFlags...) |
|
| 954 |
-} |
|
| 955 |
- |
|
| 956 |
-func buildImageCmdWithHost(name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd {
|
|
| 957 |
- args := []string{}
|
|
| 958 |
- if host != "" {
|
|
| 959 |
- args = append(args, "--host", host) |
|
| 960 |
- } |
|
| 961 |
- args = append(args, "build", "-t", name) |
|
| 962 |
- if !useCache {
|
|
| 963 |
- args = append(args, "--no-cache") |
|
| 964 |
- } |
|
| 965 |
- args = append(args, buildFlags...) |
|
| 966 |
- args = append(args, "-") |
|
| 967 |
- buildCmd := exec.Command(dockerBinary, args...) |
|
| 968 |
- buildCmd.Stdin = strings.NewReader(dockerfile) |
|
| 969 |
- return buildCmd |
|
| 953 |
+ return daemon.BuildImageCmdWithHost(dockerBinary, name, dockerfile, "", useCache, buildFlags...) |
|
| 970 | 954 |
} |
| 971 | 955 |
|
| 972 | 956 |
func buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, error) {
|
| ... | ... |
@@ -1401,39 +1337,7 @@ func waitInspect(name, expr, expected string, timeout time.Duration) error {
|
| 1401 | 1401 |
} |
| 1402 | 1402 |
|
| 1403 | 1403 |
func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error {
|
| 1404 |
- after := time.After(timeout) |
|
| 1405 |
- |
|
| 1406 |
- args := append(arg, "inspect", "-f", expr, name) |
|
| 1407 |
- for {
|
|
| 1408 |
- result := icmd.RunCommand(dockerBinary, args...) |
|
| 1409 |
- if result.Error != nil {
|
|
| 1410 |
- if !strings.Contains(result.Stderr(), "No such") {
|
|
| 1411 |
- return fmt.Errorf("error executing docker inspect: %v\n%s",
|
|
| 1412 |
- result.Stderr(), result.Stdout()) |
|
| 1413 |
- } |
|
| 1414 |
- select {
|
|
| 1415 |
- case <-after: |
|
| 1416 |
- return result.Error |
|
| 1417 |
- default: |
|
| 1418 |
- time.Sleep(10 * time.Millisecond) |
|
| 1419 |
- continue |
|
| 1420 |
- } |
|
| 1421 |
- } |
|
| 1422 |
- |
|
| 1423 |
- out := strings.TrimSpace(result.Stdout()) |
|
| 1424 |
- if out == expected {
|
|
| 1425 |
- break |
|
| 1426 |
- } |
|
| 1427 |
- |
|
| 1428 |
- select {
|
|
| 1429 |
- case <-after: |
|
| 1430 |
- return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected)
|
|
| 1431 |
- default: |
|
| 1432 |
- } |
|
| 1433 |
- |
|
| 1434 |
- time.Sleep(100 * time.Millisecond) |
|
| 1435 |
- } |
|
| 1436 |
- return nil |
|
| 1404 |
+ return daemon.WaitInspectWithArgs(dockerBinary, name, expr, expected, timeout, arg...) |
|
| 1437 | 1405 |
} |
| 1438 | 1406 |
|
| 1439 | 1407 |
func getInspectBody(c *check.C, version, id string) []byte {
|
| ... | ... |
@@ -5,6 +5,7 @@ import ( |
| 5 | 5 |
"errors" |
| 6 | 6 |
"fmt" |
| 7 | 7 |
"io" |
| 8 |
+ "io/ioutil" |
|
| 8 | 9 |
"os" |
| 9 | 10 |
"os/exec" |
| 10 | 11 |
"path/filepath" |
| ... | ... |
@@ -225,3 +226,9 @@ func RunAtDifferentDate(date time.Time, block func()) {
|
| 225 | 225 |
block() |
| 226 | 226 |
return |
| 227 | 227 |
} |
| 228 |
+ |
|
| 229 |
+// ReadBody read the specified ReadCloser content and returns it |
|
| 230 |
+func ReadBody(b io.ReadCloser) ([]byte, error) {
|
|
| 231 |
+ defer b.Close() |
|
| 232 |
+ return ioutil.ReadAll(b) |
|
| 233 |
+} |