integration-cli/docker_api_swarm_test.go
0d88d5b6
 // +build !windows
 
 package main
 
 import (
05a831a7
 	"encoding/json"
a3f15773
 	"fmt"
39433318
 	"io/ioutil"
d377b074
 	"net"
0d88d5b6
 	"net/http"
158b2a18
 	"net/url"
ae4137ae
 	"os"
 	"path/filepath"
0d88d5b6
 	"strings"
ae4137ae
 	"sync"
0d88d5b6
 	"time"
 
376c75d1
 	"github.com/cloudflare/cfssl/csr"
39433318
 	"github.com/cloudflare/cfssl/helpers"
376c75d1
 	"github.com/cloudflare/cfssl/initca"
05a831a7
 	"github.com/docker/docker/api/types"
8feb5c5a
 	"github.com/docker/docker/api/types/container"
91e197d6
 	"github.com/docker/docker/api/types/swarm"
33968e6c
 	"github.com/docker/docker/integration-cli/checker"
48de91a3
 	"github.com/docker/docker/integration-cli/daemon"
376c75d1
 	"github.com/docker/swarmkit/ca"
0d88d5b6
 	"github.com/go-check/check"
 )
 
 var defaultReconciliationTimeout = 30 * time.Second
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmInit(c *check.C) {
0d88d5b6
 	// todo: should find a better way to verify that components are running than /info
 	d1 := s.AddDaemon(c, true, true)
48de91a3
 	info, err := d1.SwarmInfo()
0d88d5b6
 	c.Assert(err, checker.IsNil)
f02ec39e
 	c.Assert(info.ControlAvailable, checker.True)
0d88d5b6
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
64cccedb
 	c.Assert(info.Cluster.RootRotationInProgress, checker.False)
0d88d5b6
 
 	d2 := s.AddDaemon(c, true, false)
48de91a3
 	info, err = d2.SwarmInfo()
0d88d5b6
 	c.Assert(err, checker.IsNil)
f02ec39e
 	c.Assert(info.ControlAvailable, checker.False)
0d88d5b6
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 
 	// Leaving cluster
 	c.Assert(d2.Leave(false), checker.IsNil)
 
48de91a3
 	info, err = d2.SwarmInfo()
0d88d5b6
 	c.Assert(err, checker.IsNil)
f02ec39e
 	c.Assert(info.ControlAvailable, checker.False)
0d88d5b6
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
 
48de91a3
 	c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.JoinTokens(c).Worker, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
0d88d5b6
 
48de91a3
 	info, err = d2.SwarmInfo()
0d88d5b6
 	c.Assert(err, checker.IsNil)
f02ec39e
 	c.Assert(info.ControlAvailable, checker.False)
0d88d5b6
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 
 	// Current state restoring after restarts
c502fb49
 	d1.Stop(c)
 	d2.Stop(c)
0d88d5b6
 
c502fb49
 	d1.Start(c)
 	d2.Start(c)
0d88d5b6
 
48de91a3
 	info, err = d1.SwarmInfo()
0d88d5b6
 	c.Assert(err, checker.IsNil)
f02ec39e
 	c.Assert(info.ControlAvailable, checker.True)
0d88d5b6
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 
48de91a3
 	info, err = d2.SwarmInfo()
0d88d5b6
 	c.Assert(err, checker.IsNil)
f02ec39e
 	c.Assert(info.ControlAvailable, checker.False)
0d88d5b6
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *check.C) {
0d88d5b6
 	d1 := s.AddDaemon(c, false, false)
2cc5bd33
 	c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
0d88d5b6
 
b7ea1bdb
 	// todo: error message differs depending if some components of token are valid
 
0d88d5b6
 	d2 := s.AddDaemon(c, false, false)
48de91a3
 	err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}})
0d88d5b6
 	c.Assert(err, checker.NotNil)
2cc5bd33
 	c.Assert(err.Error(), checker.Contains, "join token is necessary")
48de91a3
 	info, err := d2.SwarmInfo()
0d88d5b6
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
 
48de91a3
 	err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.ListenAddr}})
0d88d5b6
 	c.Assert(err, checker.NotNil)
b7ea1bdb
 	c.Assert(err.Error(), checker.Contains, "invalid join token")
48de91a3
 	info, err = d2.SwarmInfo()
0d88d5b6
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
 
48de91a3
 	workerToken := d1.JoinTokens(c).Worker
2cc5bd33
 
48de91a3
 	c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
 	info, err = d2.SwarmInfo()
0d88d5b6
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 	c.Assert(d2.Leave(false), checker.IsNil)
48de91a3
 	info, err = d2.SwarmInfo()
0d88d5b6
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
aed7667b
 
2cc5bd33
 	// change tokens
48de91a3
 	d1.RotateTokens(c)
aed7667b
 
48de91a3
 	err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}})
aed7667b
 	c.Assert(err, checker.NotNil)
2cc5bd33
 	c.Assert(err.Error(), checker.Contains, "join token is necessary")
48de91a3
 	info, err = d2.SwarmInfo()
aed7667b
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
 
48de91a3
 	workerToken = d1.JoinTokens(c).Worker
2cc5bd33
 
48de91a3
 	c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
 	info, err = d2.SwarmInfo()
aed7667b
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 	c.Assert(d2.Leave(false), checker.IsNil)
48de91a3
 	info, err = d2.SwarmInfo()
aed7667b
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
 
2cc5bd33
 	// change spec, don't change tokens
48de91a3
 	d1.UpdateSwarm(c, func(s *swarm.Spec) {})
aed7667b
 
48de91a3
 	err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.ListenAddr}})
aed7667b
 	c.Assert(err, checker.NotNil)
2cc5bd33
 	c.Assert(err.Error(), checker.Contains, "join token is necessary")
48de91a3
 	info, err = d2.SwarmInfo()
aed7667b
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
 
48de91a3
 	c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.ListenAddr}}), checker.IsNil)
 	info, err = d2.SwarmInfo()
aed7667b
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 	c.Assert(d2.Leave(false), checker.IsNil)
48de91a3
 	info, err = d2.SwarmInfo()
aed7667b
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
0d88d5b6
 }
 
9b96b2d2
 func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *check.C) {
 	d1 := s.AddDaemon(c, false, false)
 	c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
 	d1.UpdateSwarm(c, func(s *swarm.Spec) {
 		s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
 			{
 				Protocol: swarm.ExternalCAProtocolCFSSL,
 				URL:      "https://thishasnoca.org",
 			},
b0401a71
 			{
 				Protocol: swarm.ExternalCAProtocolCFSSL,
 				URL:      "https://thishasacacert.org",
 				CACert:   "cacert",
 			},
9b96b2d2
 		}
 	})
 	info, err := d1.SwarmInfo()
 	c.Assert(err, checker.IsNil)
b0401a71
 	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs, checker.HasLen, 2)
 	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, checker.Equals, "")
 	c.Assert(info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, checker.Equals, "cacert")
9b96b2d2
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *check.C) {
0d88d5b6
 	d1 := s.AddDaemon(c, true, true)
 	d2 := s.AddDaemon(c, false, false)
48de91a3
 	splitToken := strings.Split(d1.JoinTokens(c).Worker, "-")
2cc5bd33
 	splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
 	replacementToken := strings.Join(splitToken, "-")
48de91a3
 	err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.ListenAddr}})
0d88d5b6
 	c.Assert(err, checker.NotNil)
2cc5bd33
 	c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint")
0d88d5b6
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *check.C) {
0d88d5b6
 	d1 := s.AddDaemon(c, false, false)
2cc5bd33
 	c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil)
0d88d5b6
 	d2 := s.AddDaemon(c, true, false)
 
48de91a3
 	info, err := d2.SwarmInfo()
0d88d5b6
 	c.Assert(err, checker.IsNil)
f02ec39e
 	c.Assert(info.ControlAvailable, checker.False)
0d88d5b6
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 
48de91a3
 	d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
0d88d5b6
 		n.Spec.Role = swarm.NodeRoleManager
 	})
 
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
0d88d5b6
 
48de91a3
 	d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
0d88d5b6
 		n.Spec.Role = swarm.NodeRoleWorker
 	})
 
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.False)
0d88d5b6
 
39433318
 	// Wait for the role to change to worker in the cert. This is partially
 	// done because it's something worth testing in its own right, and
 	// partially because changing the role from manager to worker and then
 	// back to manager quickly might cause the node to pause for awhile
 	// while waiting for the role to change to worker, and the test can
 	// time out during this interval.
 	waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
 		certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
 		if err != nil {
 			return "", check.Commentf("error: %v", err)
 		}
 		certs, err := helpers.ParseCertificatesPEM(certBytes)
 		if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 {
 			return certs[0].Subject.OrganizationalUnit[0], nil
 		}
 		return "", check.Commentf("could not get organizational unit from certificate")
 	}, checker.Equals, "swarm-worker")
 
a3f15773
 	// Demoting last node should fail
48de91a3
 	node := d1.GetNode(c, d1.NodeID)
a3f15773
 	node.Spec.Role = swarm.NodeRoleWorker
 	url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
 	status, out, err := d1.SockRequest("POST", url, node.Spec)
 	c.Assert(err, checker.IsNil)
f257f77c
 	c.Assert(status, checker.Equals, http.StatusBadRequest, check.Commentf("output: %q", string(out)))
99119fca
 	// The warning specific to demoting the last manager is best-effort and
 	// won't appear until the Role field of the demoted manager has been
 	// updated.
 	// Yes, I know this looks silly, but checker.Matches is broken, since
 	// it anchors the regexp contrary to the documentation, and this makes
 	// it impossible to match something that includes a line break.
 	if !strings.Contains(string(out), "last manager of the swarm") {
 		c.Assert(string(out), checker.Contains, "this would result in a loss of quorum")
 	}
48de91a3
 	info, err = d1.SwarmInfo()
a3f15773
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
f02ec39e
 	c.Assert(info.ControlAvailable, checker.True)
a3f15773
 
 	// Promote already demoted node
48de91a3
 	d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
a3f15773
 		n.Spec.Role = swarm.NodeRoleManager
 	})
 
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckControlAvailable, checker.True)
0d88d5b6
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *check.C) {
d305aa48
 	// add three managers, one of these is leader
 	d1 := s.AddDaemon(c, true, true)
 	d2 := s.AddDaemon(c, true, true)
 	d3 := s.AddDaemon(c, true, true)
 
 	// start a service by hitting each of the 3 managers
48de91a3
 	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
d305aa48
 		s.Spec.Name = "test1"
 	})
48de91a3
 	d2.CreateService(c, simpleTestService, func(s *swarm.Service) {
d305aa48
 		s.Spec.Name = "test2"
 	})
48de91a3
 	d3.CreateService(c, simpleTestService, func(s *swarm.Service) {
d305aa48
 		s.Spec.Name = "test3"
 	})
 
 	// 3 services should be started now, because the requests were proxied to leader
 	// query each node and make sure it returns 3 services
48de91a3
 	for _, d := range []*daemon.Swarm{d1, d2, d3} {
 		services := d.ListServices(c)
d305aa48
 		c.Assert(services, checker.HasLen, 3)
 	}
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *check.C) {
3489e765
 	// Create 3 nodes
 	d1 := s.AddDaemon(c, true, true)
 	d2 := s.AddDaemon(c, true, true)
 	d3 := s.AddDaemon(c, true, true)
 
 	// assert that the first node we made is the leader, and the other two are followers
48de91a3
 	c.Assert(d1.GetNode(c, d1.NodeID).ManagerStatus.Leader, checker.True)
 	c.Assert(d1.GetNode(c, d2.NodeID).ManagerStatus.Leader, checker.False)
 	c.Assert(d1.GetNode(c, d3.NodeID).ManagerStatus.Leader, checker.False)
3489e765
 
c502fb49
 	d1.Stop(c)
946e2377
 
 	var (
48de91a3
 		leader    *daemon.Swarm   // keep track of leader
 		followers []*daemon.Swarm // keep track of followers
946e2377
 	)
48de91a3
 	checkLeader := func(nodes ...*daemon.Swarm) checkF {
946e2377
 		return func(c *check.C) (interface{}, check.CommentInterface) {
 			// clear these out before each run
 			leader = nil
 			followers = nil
 			for _, d := range nodes {
48de91a3
 				if d.GetNode(c, d.NodeID).ManagerStatus.Leader {
946e2377
 					leader = d
 				} else {
 					followers = append(followers, d)
 				}
 			}
3489e765
 
946e2377
 			if leader == nil {
 				return false, check.Commentf("no leader elected")
 			}
3489e765
 
48de91a3
 			return true, check.Commentf("elected %v", leader.ID())
3489e765
 		}
 	}
 
946e2377
 	// wait for an election to occur
 	waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d2, d3), checker.True)
 
3489e765
 	// assert that we have a new leader
946e2377
 	c.Assert(leader, checker.NotNil)
 
 	// Keep track of the current leader, since we want that to be chosen.
 	stableleader := leader
3489e765
 
946e2377
 	// add the d1, the initial leader, back
c502fb49
 	d1.Start(c)
3489e765
 
946e2377
 	// TODO(stevvooe): may need to wait for rejoin here
3489e765
 
946e2377
 	// wait for possible election
 	waitAndAssert(c, defaultReconciliationTimeout, checkLeader(d1, d2, d3), checker.True)
3489e765
 	// pick out the leader and the followers again
 
 	// verify that we still only have 1 leader and 2 followers
 	c.Assert(leader, checker.NotNil)
 	c.Assert(followers, checker.HasLen, 2)
 	// and that after we added d1 back, the leader hasn't changed
946e2377
 	c.Assert(leader.NodeID, checker.Equals, stableleader.NodeID)
3489e765
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *check.C) {
0d88d5b6
 	d1 := s.AddDaemon(c, true, true)
 	d2 := s.AddDaemon(c, true, true)
 	d3 := s.AddDaemon(c, true, true)
 
48de91a3
 	d1.CreateService(c, simpleTestService)
0d88d5b6
 
c502fb49
 	d2.Stop(c)
0d88d5b6
 
4a856d7a
 	// make sure there is a leader
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
4a856d7a
 
48de91a3
 	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
0d88d5b6
 		s.Spec.Name = "top1"
 	})
 
c502fb49
 	d3.Stop(c)
0d88d5b6
 
 	var service swarm.Service
 	simpleTestService(&service)
 	service.Spec.Name = "top2"
 	status, out, err := d1.SockRequest("POST", "/services/create", service.Spec)
 	c.Assert(err, checker.IsNil)
 	c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out)))
 
c502fb49
 	d2.Start(c)
0d88d5b6
 
4a856d7a
 	// make sure there is a leader
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckLeader, checker.IsNil)
4a856d7a
 
48de91a3
 	d1.CreateService(c, simpleTestService, func(s *swarm.Service) {
0d88d5b6
 		s.Spec.Name = "top3"
 	})
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *check.C) {
0d88d5b6
 	d := s.AddDaemon(c, true, true)
 
 	instances := 2
48de91a3
 	d.CreateService(c, simpleTestService, setInstances(instances))
0d88d5b6
 
 	id, err := d.Cmd("run", "-d", "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	id = strings.TrimSpace(id)
 
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances+1)
0d88d5b6
 
 	c.Assert(d.Leave(false), checker.NotNil)
 	c.Assert(d.Leave(true), checker.IsNil)
 
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
0d88d5b6
 
 	id2, err := d.Cmd("ps", "-q")
 	c.Assert(err, checker.IsNil)
 	c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
 }
 
826f6f07
 // #23629
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *check.C) {
da9ef68f
 	testRequires(c, Network)
826f6f07
 	s.AddDaemon(c, true, true)
 	d2 := s.AddDaemon(c, false, false)
 
 	id, err := d2.Cmd("run", "-d", "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	id = strings.TrimSpace(id)
 
fa3b5964
 	err = d2.Join(swarm.JoinRequest{
 		RemoteAddrs: []string{"123.123.123.123:1234"},
a0ccd0d4
 	})
fa3b5964
 	c.Assert(err, check.NotNil)
 	c.Assert(err.Error(), checker.Contains, "Timeout was reached")
826f6f07
 
48de91a3
 	info, err := d2.SwarmInfo()
fa3b5964
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
 
 	c.Assert(d2.Leave(true), checker.IsNil)
826f6f07
 
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 1)
826f6f07
 
 	id2, err := d2.Cmd("ps", "-q")
 	c.Assert(err, checker.IsNil)
 	c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
 }
 
ded1d9af
 // #23705
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *check.C) {
da9ef68f
 	testRequires(c, Network)
ded1d9af
 	d := s.AddDaemon(c, false, false)
fa3b5964
 	err := d.Join(swarm.JoinRequest{
 		RemoteAddrs: []string{"123.123.123.123:1234"},
a0ccd0d4
 	})
fa3b5964
 	c.Assert(err, check.NotNil)
 	c.Assert(err.Error(), checker.Contains, "Timeout was reached")
ded1d9af
 
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
ded1d9af
 
c502fb49
 	d.Stop(c)
 	d.Start(c)
ded1d9af
 
48de91a3
 	info, err := d.SwarmInfo()
ded1d9af
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *check.C) {
0d88d5b6
 	d1 := s.AddDaemon(c, true, true)
 
 	instances := 2
48de91a3
 	id := d1.CreateService(c, simpleTestService, setInstances(instances))
0d88d5b6
 
48de91a3
 	d1.GetService(c, id)
c502fb49
 	d1.Stop(c)
 	d1.Start(c)
48de91a3
 	d1.GetService(c, id)
0d88d5b6
 
 	d2 := s.AddDaemon(c, true, true)
48de91a3
 	d2.GetService(c, id)
c502fb49
 	d2.Stop(c)
 	d2.Start(c)
48de91a3
 	d2.GetService(c, id)
0d88d5b6
 
 	d3 := s.AddDaemon(c, true, true)
48de91a3
 	d3.GetService(c, id)
c502fb49
 	d3.Stop(c)
 	d3.Start(c)
48de91a3
 	d3.GetService(c, id)
0d88d5b6
 
 	d3.Kill()
 	time.Sleep(1 * time.Second) // time to handle signal
c502fb49
 	d3.Start(c)
48de91a3
 	d3.GetService(c, id)
0d88d5b6
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *check.C) {
0d88d5b6
 	d := s.AddDaemon(c, true, true)
 
 	instances := 2
48de91a3
 	id := d.CreateService(c, simpleTestService, setInstances(instances))
0d88d5b6
 
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
 	containers := d.ActiveContainers()
0d88d5b6
 	instances = 4
48de91a3
 	d.UpdateService(c, d.GetService(c, id), setInstances(instances))
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
 	containers2 := d.ActiveContainers()
0d88d5b6
 
 loop0:
 	for _, c1 := range containers {
 		for _, c2 := range containers2 {
 			if c1 == c2 {
 				continue loop0
 			}
 		}
 		c.Errorf("container %v not found in new set %#v", c1, containers2)
 	}
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *check.C) {
fb3eb1c2
 	d := s.AddDaemon(c, false, false)
 	req := swarm.InitRequest{
 		ListenAddr: "",
 	}
 	status, _, err := d.SockRequest("POST", "/swarm/init", req)
 	c.Assert(err, checker.IsNil)
29d4a7f5
 	c.Assert(status, checker.Equals, http.StatusBadRequest)
fb3eb1c2
 
 	req2 := swarm.JoinRequest{
 		ListenAddr:  "0.0.0.0:2377",
 		RemoteAddrs: []string{""},
 	}
 	status, _, err = d.SockRequest("POST", "/swarm/join", req2)
 	c.Assert(err, checker.IsNil)
29d4a7f5
 	c.Assert(status, checker.Equals, http.StatusBadRequest)
fb3eb1c2
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *check.C) {
1acb8ef8
 	d1 := s.AddDaemon(c, true, true)
 	d2 := s.AddDaemon(c, true, true)
 
 	instances := 2
48de91a3
 	id := d1.CreateService(c, simpleTestService, setInstances(instances))
 	waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals, instances)
1acb8ef8
 
307b7b0d
 	// drain d2, all containers should move to d1
48de91a3
 	d1.UpdateNode(c, d2.NodeID, func(n *swarm.Node) {
307b7b0d
 		n.Spec.Availability = swarm.NodeAvailabilityDrain
 	})
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
 	waitAndAssert(c, defaultReconciliationTimeout, d2.CheckActiveContainerCount, checker.Equals, 0)
1acb8ef8
 
c502fb49
 	d2.Stop(c)
1acb8ef8
 
 	c.Assert(d1.Init(swarm.InitRequest{
 		ForceNewCluster: true,
2cc5bd33
 		Spec:            swarm.Spec{},
1acb8ef8
 	}), checker.IsNil)
 
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckActiveContainerCount, checker.Equals, instances)
1acb8ef8
 
 	d3 := s.AddDaemon(c, true, true)
48de91a3
 	info, err := d3.SwarmInfo()
1acb8ef8
 	c.Assert(err, checker.IsNil)
f02ec39e
 	c.Assert(info.ControlAvailable, checker.True)
1acb8ef8
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 
 	instances = 4
48de91a3
 	d3.UpdateService(c, d3.GetService(c, id), setInstances(instances))
1acb8ef8
 
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals, instances)
1acb8ef8
 }
 
0d88d5b6
 func simpleTestService(s *swarm.Service) {
c93c6492
 	ureplicas := uint64(1)
 	restartDelay := time.Duration(100 * time.Millisecond)
 
0d88d5b6
 	s.Spec = swarm.ServiceSpec{
 		TaskTemplate: swarm.TaskSpec{
 			ContainerSpec: swarm.ContainerSpec{
 				Image:   "busybox:latest",
 				Command: []string{"/bin/top"},
 			},
c93c6492
 			RestartPolicy: &swarm.RestartPolicy{
 				Delay: &restartDelay,
 			},
0d88d5b6
 		},
 		Mode: swarm.ServiceMode{
 			Replicated: &swarm.ReplicatedService{
 				Replicas: &ureplicas,
 			},
 		},
 	}
 	s.Spec.Name = "top"
 }
 
d327765a
 func serviceForUpdate(s *swarm.Service) {
c93c6492
 	ureplicas := uint64(1)
 	restartDelay := time.Duration(100 * time.Millisecond)
 
d327765a
 	s.Spec = swarm.ServiceSpec{
 		TaskTemplate: swarm.TaskSpec{
 			ContainerSpec: swarm.ContainerSpec{
 				Image:   "busybox:latest",
 				Command: []string{"/bin/top"},
 			},
c93c6492
 			RestartPolicy: &swarm.RestartPolicy{
 				Delay: &restartDelay,
 			},
d327765a
 		},
 		Mode: swarm.ServiceMode{
 			Replicated: &swarm.ReplicatedService{
 				Replicas: &ureplicas,
 			},
 		},
 		UpdateConfig: &swarm.UpdateConfig{
57ae29aa
 			Parallelism:   2,
c93c6492
 			Delay:         4 * time.Second,
57ae29aa
 			FailureAction: swarm.UpdateFailureActionContinue,
d327765a
 		},
f9bd8ec8
 		RollbackConfig: &swarm.UpdateConfig{
 			Parallelism:   3,
 			Delay:         4 * time.Second,
 			FailureAction: swarm.UpdateFailureActionContinue,
 		},
d327765a
 	}
 	s.Spec.Name = "updatetest"
 }
 
48de91a3
 func setInstances(replicas int) daemon.ServiceConstructor {
0d88d5b6
 	ureplicas := uint64(replicas)
 	return func(s *swarm.Service) {
 		s.Spec.Mode = swarm.ServiceMode{
 			Replicated: &swarm.ReplicatedService{
 				Replicas: &ureplicas,
 			},
 		}
 	}
 }
 
6763641d
 func setUpdateOrder(order string) daemon.ServiceConstructor {
 	return func(s *swarm.Service) {
 		if s.Spec.UpdateConfig == nil {
 			s.Spec.UpdateConfig = &swarm.UpdateConfig{}
 		}
 		s.Spec.UpdateConfig.Order = order
 	}
 }
 
 func setRollbackOrder(order string) daemon.ServiceConstructor {
 	return func(s *swarm.Service) {
 		if s.Spec.RollbackConfig == nil {
 			s.Spec.RollbackConfig = &swarm.UpdateConfig{}
 		}
 		s.Spec.RollbackConfig.Order = order
 	}
 }
 
48de91a3
 func setImage(image string) daemon.ServiceConstructor {
d327765a
 	return func(s *swarm.Service) {
 		s.Spec.TaskTemplate.ContainerSpec.Image = image
 	}
 }
 
48de91a3
 func setFailureAction(failureAction string) daemon.ServiceConstructor {
6d4b5276
 	return func(s *swarm.Service) {
 		s.Spec.UpdateConfig.FailureAction = failureAction
 	}
 }
 
48de91a3
 func setMaxFailureRatio(maxFailureRatio float32) daemon.ServiceConstructor {
6d4b5276
 	return func(s *swarm.Service) {
 		s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio
 	}
 }
 
48de91a3
 func setParallelism(parallelism uint64) daemon.ServiceConstructor {
6d4b5276
 	return func(s *swarm.Service) {
 		s.Spec.UpdateConfig.Parallelism = parallelism
 	}
 }
 
48de91a3
 func setConstraints(constraints []string) daemon.ServiceConstructor {
1b1a7f29
 	return func(s *swarm.Service) {
 		if s.Spec.TaskTemplate.Placement == nil {
 			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
 		}
 		s.Spec.TaskTemplate.Placement.Constraints = constraints
 	}
 }
 
17288c61
 func setPlacementPrefs(prefs []swarm.PlacementPreference) daemon.ServiceConstructor {
 	return func(s *swarm.Service) {
 		if s.Spec.TaskTemplate.Placement == nil {
 			s.Spec.TaskTemplate.Placement = &swarm.Placement{}
 		}
 		s.Spec.TaskTemplate.Placement.Preferences = prefs
 	}
 }
 
0d88d5b6
 func setGlobalMode(s *swarm.Service) {
 	s.Spec.Mode = swarm.ServiceMode{
 		Global: &swarm.GlobalService{},
 	}
 }
ae4137ae
 
48de91a3
 func checkClusterHealth(c *check.C, cl []*daemon.Swarm, managerCount, workerCount int) {
ae4137ae
 	var totalMCount, totalWCount int
fdcde8bb
 
ae4137ae
 	for _, d := range cl {
fdcde8bb
 		var (
 			info swarm.Info
 			err  error
 		)
 
 		// check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error
 		checkInfo := func(c *check.C) (interface{}, check.CommentInterface) {
48de91a3
 			info, err = d.SwarmInfo()
fdcde8bb
 			return err, check.Commentf("cluster not ready in time")
 		}
 		waitAndAssert(c, defaultReconciliationTimeout, checkInfo, checker.IsNil)
ae4137ae
 		if !info.ControlAvailable {
 			totalWCount++
 			continue
 		}
fdcde8bb
 
ae4137ae
 		var leaderFound bool
 		totalMCount++
 		var mCount, wCount int
fdcde8bb
 
48de91a3
 		for _, n := range d.ListNodes(c) {
fdcde8bb
 			waitReady := func(c *check.C) (interface{}, check.CommentInterface) {
 				if n.Status.State == swarm.NodeStateReady {
 					return true, nil
 				}
48de91a3
 				nn := d.GetNode(c, n.ID)
fdcde8bb
 				n = *nn
 				return n.Status.State == swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID)
 			}
 			waitAndAssert(c, defaultReconciliationTimeout, waitReady, checker.True)
 
 			waitActive := func(c *check.C) (interface{}, check.CommentInterface) {
 				if n.Spec.Availability == swarm.NodeAvailabilityActive {
 					return true, nil
 				}
48de91a3
 				nn := d.GetNode(c, n.ID)
fdcde8bb
 				n = *nn
 				return n.Spec.Availability == swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID)
 			}
 			waitAndAssert(c, defaultReconciliationTimeout, waitActive, checker.True)
 
ae4137ae
 			if n.Spec.Role == swarm.NodeRoleManager {
 				c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID))
 				if n.ManagerStatus.Leader {
 					leaderFound = true
 				}
 				mCount++
 			} else {
 				c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.Info.NodeID))
 				wCount++
 			}
 		}
 		c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID))
 		c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID))
 		c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID))
 	}
 	c.Assert(totalMCount, checker.Equals, managerCount)
 	c.Assert(totalWCount, checker.Equals, workerCount)
 }
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *check.C) {
ae4137ae
 	mCount, wCount := 5, 1
 
48de91a3
 	var nodes []*daemon.Swarm
ae4137ae
 	for i := 0; i < mCount; i++ {
 		manager := s.AddDaemon(c, true, true)
48de91a3
 		info, err := manager.SwarmInfo()
ae4137ae
 		c.Assert(err, checker.IsNil)
 		c.Assert(info.ControlAvailable, checker.True)
 		c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 		nodes = append(nodes, manager)
 	}
 
 	for i := 0; i < wCount; i++ {
 		worker := s.AddDaemon(c, true, false)
48de91a3
 		info, err := worker.SwarmInfo()
ae4137ae
 		c.Assert(err, checker.IsNil)
 		c.Assert(info.ControlAvailable, checker.False)
 		c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 		nodes = append(nodes, worker)
 	}
 
 	// stop whole cluster
 	{
 		var wg sync.WaitGroup
 		wg.Add(len(nodes))
 		errs := make(chan error, len(nodes))
 
 		for _, d := range nodes {
48de91a3
 			go func(daemon *daemon.Swarm) {
ae4137ae
 				defer wg.Done()
c502fb49
 				if err := daemon.StopWithError(); err != nil {
ae4137ae
 					errs <- err
 				}
48de91a3
 				// FIXME(vdemeester) This is duplicated…
ae4137ae
 				if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
48de91a3
 					daemon.Root = filepath.Dir(daemon.Root)
ae4137ae
 				}
 			}(d)
 		}
 		wg.Wait()
 		close(errs)
 		for err := range errs {
 			c.Assert(err, check.IsNil)
 		}
 	}
 
 	// start whole cluster
 	{
 		var wg sync.WaitGroup
 		wg.Add(len(nodes))
 		errs := make(chan error, len(nodes))
 
 		for _, d := range nodes {
48de91a3
 			go func(daemon *daemon.Swarm) {
ae4137ae
 				defer wg.Done()
c502fb49
 				if err := daemon.StartWithError("--iptables=false"); err != nil {
ae4137ae
 					errs <- err
 				}
 			}(d)
 		}
 		wg.Wait()
 		close(errs)
 		for err := range errs {
 			c.Assert(err, check.IsNil)
 		}
 	}
 
 	checkClusterHealth(c, nodes, mCount, wCount)
 }
80e39751
 
7fb7a477
 func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *check.C) {
80e39751
 	d := s.AddDaemon(c, true, true)
 
 	instances := 2
48de91a3
 	id := d.CreateService(c, simpleTestService, setInstances(instances))
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
80e39751
 
48de91a3
 	service := d.GetService(c, id)
80e39751
 	instances = 5
 
 	setInstances(instances)(service)
 	url := fmt.Sprintf("/services/%s/update?version=%d", service.Spec.Name, service.Version.Index)
 	status, out, err := d.SockRequest("POST", url, service.Spec)
 	c.Assert(err, checker.IsNil)
 	c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
48de91a3
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
80e39751
 }
857e60c2
 
a6a0880a
 // Unlocking an unlocked swarm results in an error
 func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *check.C) {
 	d := s.AddDaemon(c, true, true)
 	err := d.Unlock(swarm.UnlockRequest{UnlockKey: "wrong-key"})
 	c.Assert(err, checker.NotNil)
 	c.Assert(err.Error(), checker.Contains, "swarm is not locked")
 }
d377b074
 
 // #29885
 func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *check.C) {
 	ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort))
 	c.Assert(err, checker.IsNil)
 	defer ln.Close()
 	d := s.AddDaemon(c, false, false)
 	err = d.Init(swarm.InitRequest{})
 	c.Assert(err, checker.NotNil)
 	c.Assert(err.Error(), checker.Contains, "address already in use")
 }
05a831a7
 
 // Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
 // caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
 // This test makes sure the fixes correctly output scopes instead.
 func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *check.C) {
 	d := s.AddDaemon(c, true, true)
 
 	name := "foo"
 	networkCreateRequest := types.NetworkCreateRequest{
 		Name: name,
 		NetworkCreate: types.NetworkCreate{
 			CheckDuplicate: false,
 		},
 	}
 
 	var n1 types.NetworkCreateResponse
 	networkCreateRequest.NetworkCreate.Driver = "bridge"
 
 	status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest)
 	c.Assert(err, checker.IsNil, check.Commentf(string(out)))
 	c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out)))
 
 	c.Assert(json.Unmarshal(out, &n1), checker.IsNil)
 
 	var n2 types.NetworkCreateResponse
 	networkCreateRequest.NetworkCreate.Driver = "overlay"
 
 	status, out, err = d.SockRequest("POST", "/networks/create", networkCreateRequest)
 	c.Assert(err, checker.IsNil, check.Commentf(string(out)))
 	c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out)))
 
 	c.Assert(json.Unmarshal(out, &n2), checker.IsNil)
 
 	var r1 types.NetworkResource
 
 	status, out, err = d.SockRequest("GET", "/networks/"+n1.ID, nil)
 	c.Assert(err, checker.IsNil, check.Commentf(string(out)))
 	c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out)))
 
 	c.Assert(json.Unmarshal(out, &r1), checker.IsNil)
 
 	c.Assert(r1.Scope, checker.Equals, "local")
 
 	var r2 types.NetworkResource
 
 	status, out, err = d.SockRequest("GET", "/networks/"+n2.ID, nil)
 	c.Assert(err, checker.IsNil, check.Commentf(string(out)))
 	c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out)))
 
 	c.Assert(json.Unmarshal(out, &r2), checker.IsNil)
 
 	c.Assert(r2.Scope, checker.Equals, "swarm")
 }
8feb5c5a
 
 // Test case for 30178
 func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *check.C) {
 	d := s.AddDaemon(c, true, true)
 
 	out, err := d.Cmd("network", "create", "-d", "overlay", "lb")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	instances := 1
 	d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) {
 		s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{}
 		s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{
 			{Target: "lb"},
 		}
 	})
 
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, instances)
 
 	containers := d.ActiveContainers()
 
 	out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 }
376c75d1
 
 func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *check.C) {
 	m := s.AddDaemon(c, true, true)
 	w := s.AddDaemon(c, true, false)
 
 	info, err := m.SwarmInfo()
 	c.Assert(err, checker.IsNil)
 
 	currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
 
 	// rotate multiple times
 	for i := 0; i < 4; i++ {
 		var cert, key []byte
 		if i%2 != 0 {
 			cert, _, key, err = initca.New(&csr.CertificateRequest{
 				CN:         "newRoot",
 				KeyRequest: csr.NewBasicKeyRequest(),
 				CA:         &csr.CAConfig{Expiry: ca.RootCAExpiration},
 			})
 			c.Assert(err, checker.IsNil)
 		}
 		expectedCert := string(cert)
 		m.UpdateSwarm(c, func(s *swarm.Spec) {
 			s.CAConfig.SigningCACert = expectedCert
 			s.CAConfig.SigningCAKey = string(key)
 			s.CAConfig.ForceRotate++
 		})
 
 		// poll to make sure update succeeds
 		var clusterTLSInfo swarm.TLSInfo
 		for j := 0; j < 18; j++ {
 			info, err := m.SwarmInfo()
 			c.Assert(err, checker.IsNil)
bdfbd22a
 
 			// the desired CA cert and key is always redacted
376c75d1
 			c.Assert(info.Cluster.Spec.CAConfig.SigningCAKey, checker.Equals, "")
bdfbd22a
 			c.Assert(info.Cluster.Spec.CAConfig.SigningCACert, checker.Equals, "")
376c75d1
 
 			clusterTLSInfo = info.Cluster.TLSInfo
 
bdfbd22a
 			// if root rotation is done and the trust root has changed, we don't have to poll anymore
 			if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot {
376c75d1
 				break
 			}
 
 			// root rotation not done
 			time.Sleep(250 * time.Millisecond)
 		}
 		if cert != nil {
 			c.Assert(clusterTLSInfo.TrustRoot, checker.Equals, expectedCert)
 		}
39bcaee4
 		// could take another second or two for the nodes to trust the new roots after they've all gotten
376c75d1
 		// new TLS certificates
 		for j := 0; j < 18; j++ {
 			mInfo := m.GetNode(c, m.NodeID).Description.TLSInfo
 			wInfo := m.GetNode(c, w.NodeID).Description.TLSInfo
 
 			if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
 				break
 			}
 
 			// nodes don't trust root certs yet
 			time.Sleep(250 * time.Millisecond)
 		}
 
 		c.Assert(m.GetNode(c, m.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
 		c.Assert(m.GetNode(c, w.NodeID).Description.TLSInfo, checker.DeepEquals, clusterTLSInfo)
 		currentTrustRoot = clusterTLSInfo.TrustRoot
 	}
 }
158b2a18
 
 func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *check.C) {
 	d := s.AddDaemon(c, true, true)
 
 	name := "foo"
 	networkCreateRequest := types.NetworkCreateRequest{
 		Name: name,
 	}
 
 	var n types.NetworkCreateResponse
 	networkCreateRequest.NetworkCreate.Driver = "overlay"
 
 	status, out, err := d.SockRequest("POST", "/networks/create", networkCreateRequest)
 	c.Assert(err, checker.IsNil, check.Commentf(string(out)))
 	c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf(string(out)))
 	c.Assert(json.Unmarshal(out, &n), checker.IsNil)
 
 	var r types.NetworkResource
 
 	status, body, err := d.SockRequest("GET", "/networks/"+name, nil)
 	c.Assert(err, checker.IsNil, check.Commentf(string(out)))
 	c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(out)))
 	c.Assert(json.Unmarshal(body, &r), checker.IsNil)
 	c.Assert(r.Scope, checker.Equals, "swarm")
 	c.Assert(r.ID, checker.Equals, n.ID)
 
 	v := url.Values{}
 	v.Set("scope", "local")
 
 	status, body, err = d.SockRequest("GET", "/networks/"+name+"?"+v.Encode(), nil)
 	c.Assert(err, checker.IsNil, check.Commentf(string(out)))
 	c.Assert(status, checker.Equals, http.StatusNotFound, check.Commentf(string(out)))
 }