0d88d5b6 |
// +build !windows
package main
import ( |
7d62e40f |
"context" |
a3f15773 |
"fmt" |
39433318 |
"io/ioutil" |
d377b074 |
"net" |
0d88d5b6 |
"net/http" |
ae4137ae |
"path/filepath" |
02157c63 |
"runtime" |
0d88d5b6 |
"strings" |
ae4137ae |
"sync" |
e25352a4 |
"testing" |
0d88d5b6 |
"time"
|
376c75d1 |
"github.com/cloudflare/cfssl/csr" |
39433318 |
"github.com/cloudflare/cfssl/helpers" |
376c75d1 |
"github.com/cloudflare/cfssl/initca" |
05a831a7 |
"github.com/docker/docker/api/types" |
8feb5c5a |
"github.com/docker/docker/api/types/container" |
91e197d6 |
"github.com/docker/docker/api/types/swarm" |
0a91ba2d |
"github.com/docker/docker/client" |
33968e6c |
"github.com/docker/docker/integration-cli/checker" |
48de91a3 |
"github.com/docker/docker/integration-cli/daemon" |
83d18cf4 |
testdaemon "github.com/docker/docker/internal/test/daemon" |
42f6fdf0 |
"github.com/docker/docker/internal/test/request" |
376c75d1 |
"github.com/docker/swarmkit/ca" |
c502db49 |
"github.com/pkg/errors" |
38457285 |
"gotest.tools/assert"
is "gotest.tools/assert/cmp" |
9266ff78 |
"gotest.tools/poll" |
0d88d5b6 |
)
var defaultReconciliationTimeout = 30 * time.Second
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmInit(c *testing.T) { |
0d88d5b6 |
// todo: should find a better way to verify that components are running than /info
d1 := s.AddDaemon(c, true, true) |
83d18cf4 |
info := d1.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.ControlAvailable, true)
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
assert.Equal(c, info.Cluster.RootRotationInProgress, false) |
0d88d5b6 |
d2 := s.AddDaemon(c, true, false) |
83d18cf4 |
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.ControlAvailable, false)
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive) |
0d88d5b6 |
// Leaving cluster |
6345208b |
assert.NilError(c, d2.SwarmLeave(c, false)) |
0d88d5b6 |
|
83d18cf4 |
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.ControlAvailable, false)
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive) |
0d88d5b6 |
|
83d18cf4 |
d2.SwarmJoin(c, swarm.JoinRequest{
ListenAddr: d1.SwarmListenAddr(),
JoinToken: d1.JoinTokens(c).Worker,
RemoteAddrs: []string{d1.SwarmListenAddr()},
}) |
0d88d5b6 |
|
83d18cf4 |
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.ControlAvailable, false)
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive) |
0d88d5b6 |
// Current state restoring after restarts |
c502fb49 |
d1.Stop(c)
d2.Stop(c) |
0d88d5b6 |
|
2ed512c7 |
d1.StartNode(c)
d2.StartNode(c) |
0d88d5b6 |
|
83d18cf4 |
info = d1.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.ControlAvailable, true)
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive) |
0d88d5b6 |
|
83d18cf4 |
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.ControlAvailable, false)
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive) |
0d88d5b6 |
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmJoinToken(c *testing.T) { |
0d88d5b6 |
d1 := s.AddDaemon(c, false, false) |
83d18cf4 |
d1.SwarmInit(c, swarm.InitRequest{}) |
0d88d5b6 |
|
b7ea1bdb |
// todo: error message differs depending if some components of token are valid
|
0d88d5b6 |
d2 := s.AddDaemon(c, false, false) |
83d18cf4 |
c2 := d2.NewClientT(c)
err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
ListenAddr: d2.SwarmListenAddr(),
RemoteAddrs: []string{d1.SwarmListenAddr()},
}) |
6345208b |
assert.ErrorContains(c, err, "join token is necessary") |
83d18cf4 |
info := d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive) |
0d88d5b6 |
|
83d18cf4 |
err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
ListenAddr: d2.SwarmListenAddr(),
JoinToken: "foobaz",
RemoteAddrs: []string{d1.SwarmListenAddr()},
}) |
6345208b |
assert.ErrorContains(c, err, "invalid join token") |
83d18cf4 |
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive) |
0d88d5b6 |
|
48de91a3 |
workerToken := d1.JoinTokens(c).Worker |
2cc5bd33 |
|
83d18cf4 |
d2.SwarmJoin(c, swarm.JoinRequest{
ListenAddr: d2.SwarmListenAddr(),
JoinToken: workerToken,
RemoteAddrs: []string{d1.SwarmListenAddr()},
})
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
assert.NilError(c, d2.SwarmLeave(c, false)) |
83d18cf4 |
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive) |
aed7667b |
|
2cc5bd33 |
// change tokens |
48de91a3 |
d1.RotateTokens(c) |
aed7667b |
|
83d18cf4 |
err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
ListenAddr: d2.SwarmListenAddr(),
JoinToken: workerToken,
RemoteAddrs: []string{d1.SwarmListenAddr()},
}) |
6345208b |
assert.ErrorContains(c, err, "join token is necessary") |
83d18cf4 |
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive) |
aed7667b |
|
48de91a3 |
workerToken = d1.JoinTokens(c).Worker |
2cc5bd33 |
|
83d18cf4 |
d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
assert.NilError(c, d2.SwarmLeave(c, false)) |
83d18cf4 |
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive) |
aed7667b |
|
2cc5bd33 |
// change spec, don't change tokens |
48de91a3 |
d1.UpdateSwarm(c, func(s *swarm.Spec) {}) |
aed7667b |
|
83d18cf4 |
err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
ListenAddr: d2.SwarmListenAddr(),
RemoteAddrs: []string{d1.SwarmListenAddr()},
}) |
6345208b |
assert.ErrorContains(c, err, "join token is necessary") |
83d18cf4 |
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive) |
aed7667b |
|
83d18cf4 |
d2.SwarmJoin(c, swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.SwarmListenAddr()}})
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
assert.NilError(c, d2.SwarmLeave(c, false)) |
83d18cf4 |
info = d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive) |
0d88d5b6 |
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestUpdateSwarmAddExternalCA(c *testing.T) { |
9b96b2d2 |
d1 := s.AddDaemon(c, false, false) |
83d18cf4 |
d1.SwarmInit(c, swarm.InitRequest{}) |
9b96b2d2 |
d1.UpdateSwarm(c, func(s *swarm.Spec) {
s.CAConfig.ExternalCAs = []*swarm.ExternalCA{
{
Protocol: swarm.ExternalCAProtocolCFSSL,
URL: "https://thishasnoca.org",
}, |
b0401a71 |
{
Protocol: swarm.ExternalCAProtocolCFSSL,
URL: "https://thishasacacert.org",
CACert: "cacert",
}, |
9b96b2d2 |
}
}) |
83d18cf4 |
info := d1.SwarmInfo(c) |
6345208b |
assert.Equal(c, len(info.Cluster.Spec.CAConfig.ExternalCAs), 2)
assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[0].CACert, "")
assert.Equal(c, info.Cluster.Spec.CAConfig.ExternalCAs[1].CACert, "cacert") |
9b96b2d2 |
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmCAHash(c *testing.T) { |
0d88d5b6 |
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, false, false) |
48de91a3 |
splitToken := strings.Split(d1.JoinTokens(c).Worker, "-") |
2cc5bd33 |
splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e"
replacementToken := strings.Join(splitToken, "-") |
83d18cf4 |
c2 := d2.NewClientT(c)
err := c2.SwarmJoin(context.Background(), swarm.JoinRequest{
ListenAddr: d2.SwarmListenAddr(),
JoinToken: replacementToken,
RemoteAddrs: []string{d1.SwarmListenAddr()},
}) |
6345208b |
assert.ErrorContains(c, err, "remote CA does not match fingerprint") |
0d88d5b6 |
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmPromoteDemote(c *testing.T) { |
0d88d5b6 |
d1 := s.AddDaemon(c, false, false) |
83d18cf4 |
d1.SwarmInit(c, swarm.InitRequest{}) |
0d88d5b6 |
d2 := s.AddDaemon(c, true, false)
|
83d18cf4 |
info := d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.ControlAvailable, false)
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive) |
0d88d5b6 |
|
83d18cf4 |
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) { |
0d88d5b6 |
n.Spec.Role = swarm.NodeRoleManager
})
|
9266ff78 |
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout)) |
0d88d5b6 |
|
83d18cf4 |
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) { |
0d88d5b6 |
n.Spec.Role = swarm.NodeRoleWorker
})
|
9266ff78 |
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.False()), poll.WithTimeout(defaultReconciliationTimeout)) |
0d88d5b6 |
|
39433318 |
// Wait for the role to change to worker in the cert. This is partially
// done because it's something worth testing in its own right, and
// partially because changing the role from manager to worker and then
// back to manager quickly might cause the node to pause for awhile
// while waiting for the role to change to worker, and the test can
// time out during this interval. |
9266ff78 |
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) { |
39433318 |
certBytes, err := ioutil.ReadFile(filepath.Join(d2.Folder, "root", "swarm", "certificates", "swarm-node.crt"))
if err != nil { |
673cf751 |
return "", fmt.Sprintf("error: %v", err) |
39433318 |
}
certs, err := helpers.ParseCertificatesPEM(certBytes)
if err == nil && len(certs) > 0 && len(certs[0].Subject.OrganizationalUnit) > 0 { |
be66788e |
return certs[0].Subject.OrganizationalUnit[0], "" |
39433318 |
} |
2f069fa3 |
return "", "could not get organizational unit from certificate" |
9266ff78 |
}, checker.Equals("swarm-worker")), poll.WithTimeout(defaultReconciliationTimeout)) |
39433318 |
|
a3f15773 |
// Demoting last node should fail |
83d18cf4 |
node := d1.GetNode(c, d1.NodeID()) |
a3f15773 |
node.Spec.Role = swarm.NodeRoleWorker
url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) |
42f6fdf0 |
res, body, err := request.Post(url, request.Host(d1.Sock()), request.JSONBody(node.Spec)) |
6345208b |
assert.NilError(c, err) |
0fd5a654 |
b, err := request.ReadBody(body) |
6345208b |
assert.NilError(c, err)
assert.Equal(c, res.StatusCode, http.StatusBadRequest, "output: %q", string(b)) |
0fd5a654 |
|
99119fca |
// The warning specific to demoting the last manager is best-effort and
// won't appear until the Role field of the demoted manager has been
// updated.
// Yes, I know this looks silly, but checker.Matches is broken, since
// it anchors the regexp contrary to the documentation, and this makes
// it impossible to match something that includes a line break. |
0fd5a654 |
if !strings.Contains(string(b), "last manager of the swarm") { |
6345208b |
assert.Assert(c, strings.Contains(string(b), "this would result in a loss of quorum")) |
99119fca |
} |
83d18cf4 |
info = d1.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive)
assert.Equal(c, info.ControlAvailable, true) |
a3f15773 |
// Promote already demoted node |
83d18cf4 |
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) { |
a3f15773 |
n.Spec.Role = swarm.NodeRoleManager
})
|
9266ff78 |
poll.WaitOn(c, pollCheck(c, d2.CheckControlAvailable, checker.True()), poll.WithTimeout(defaultReconciliationTimeout)) |
0d88d5b6 |
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmLeaderProxy(c *testing.T) { |
d305aa48 |
// add three managers, one of these is leader
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, true, true)
d3 := s.AddDaemon(c, true, true)
// start a service by hitting each of the 3 managers |
48de91a3 |
d1.CreateService(c, simpleTestService, func(s *swarm.Service) { |
d305aa48 |
s.Spec.Name = "test1"
}) |
48de91a3 |
d2.CreateService(c, simpleTestService, func(s *swarm.Service) { |
d305aa48 |
s.Spec.Name = "test2"
}) |
48de91a3 |
d3.CreateService(c, simpleTestService, func(s *swarm.Service) { |
d305aa48 |
s.Spec.Name = "test3"
})
// 3 services should be started now, because the requests were proxied to leader
// query each node and make sure it returns 3 services |
83d18cf4 |
for _, d := range []*daemon.Daemon{d1, d2, d3} { |
48de91a3 |
services := d.ListServices(c) |
6345208b |
assert.Equal(c, len(services), 3) |
d305aa48 |
}
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmLeaderElection(c *testing.T) { |
02157c63 |
if runtime.GOARCH == "s390x" {
c.Skip("Disabled on s390x")
}
if runtime.GOARCH == "ppc64le" {
c.Skip("Disabled on ppc64le")
}
|
3489e765 |
// Create 3 nodes
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, true, true)
d3 := s.AddDaemon(c, true, true)
// assert that the first node we made is the leader, and the other two are followers |
6345208b |
assert.Equal(c, d1.GetNode(c, d1.NodeID()).ManagerStatus.Leader, true)
assert.Equal(c, d1.GetNode(c, d2.NodeID()).ManagerStatus.Leader, false)
assert.Equal(c, d1.GetNode(c, d3.NodeID()).ManagerStatus.Leader, false) |
3489e765 |
|
c502fb49 |
d1.Stop(c) |
946e2377 |
var ( |
83d18cf4 |
leader *daemon.Daemon // keep track of leader
followers []*daemon.Daemon // keep track of followers |
946e2377 |
) |
c502db49 |
var lastErr error |
37555cde |
checkLeader := func(nodes ...*daemon.Daemon) checkF { |
1b1fe4cc |
return func(c *testing.T) (interface{}, string) { |
946e2377 |
// clear these out before each run
leader = nil
followers = nil
for _, d := range nodes { |
c502db49 |
n := d.GetNode(c, d.NodeID(), func(err error) bool {
if strings.Contains(errors.Cause(err).Error(), context.DeadlineExceeded.Error()) || strings.Contains(err.Error(), "swarm does not have a leader") {
lastErr = err
return true
}
return false
})
if n == nil { |
673cf751 |
return false, fmt.Sprintf("failed to get node: %v", lastErr) |
c502db49 |
}
if n.ManagerStatus.Leader { |
946e2377 |
leader = d
} else {
followers = append(followers, d)
}
} |
3489e765 |
|
946e2377 |
if leader == nil { |
2f069fa3 |
return false, "no leader elected" |
946e2377 |
} |
3489e765 |
|
673cf751 |
return true, fmt.Sprintf("elected %v", leader.ID()) |
3489e765 |
}
}
|
946e2377 |
// wait for an election to occur |
06afc2d1 |
c.Logf("Waiting for election to occur...") |
9266ff78 |
poll.WaitOn(c, pollCheck(c, checkLeader(d2, d3), checker.True()), poll.WithTimeout(defaultReconciliationTimeout)) |
946e2377 |
|
3489e765 |
// assert that we have a new leader |
6345208b |
assert.Assert(c, leader != nil) |
946e2377 |
// Keep track of the current leader, since we want that to be chosen.
stableleader := leader |
3489e765 |
|
946e2377 |
// add the d1, the initial leader, back |
2ed512c7 |
d1.StartNode(c) |
3489e765 |
|
946e2377 |
// wait for possible election |
06afc2d1 |
c.Logf("Waiting for possible election...") |
9266ff78 |
poll.WaitOn(c, pollCheck(c, checkLeader(d1, d2, d3), checker.True()), poll.WithTimeout(defaultReconciliationTimeout)) |
3489e765 |
// pick out the leader and the followers again
// verify that we still only have 1 leader and 2 followers |
6345208b |
assert.Assert(c, leader != nil)
assert.Equal(c, len(followers), 2) |
3489e765 |
// and that after we added d1 back, the leader hasn't changed |
6345208b |
assert.Equal(c, leader.NodeID(), stableleader.NodeID()) |
3489e765 |
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmRaftQuorum(c *testing.T) { |
02157c63 |
if runtime.GOARCH == "s390x" {
c.Skip("Disabled on s390x")
}
if runtime.GOARCH == "ppc64le" {
c.Skip("Disabled on ppc64le")
}
|
0d88d5b6 |
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, true, true)
d3 := s.AddDaemon(c, true, true)
|
48de91a3 |
d1.CreateService(c, simpleTestService) |
0d88d5b6 |
|
c502fb49 |
d2.Stop(c) |
0d88d5b6 |
|
4a856d7a |
// make sure there is a leader |
9266ff78 |
poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout)) |
4a856d7a |
|
48de91a3 |
d1.CreateService(c, simpleTestService, func(s *swarm.Service) { |
0d88d5b6 |
s.Spec.Name = "top1"
})
|
c502fb49 |
d3.Stop(c) |
0d88d5b6 |
var service swarm.Service
simpleTestService(&service)
service.Spec.Name = "top2" |
2cb7b73a |
cli := d1.NewClientT(c) |
0fd5a654 |
defer cli.Close() |
fd32d49c |
// d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen |
9266ff78 |
poll.WaitOn(c, pollCheck(c, func(c *testing.T) (interface{}, string) { |
2cb7b73a |
_, err := cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{}) |
be66788e |
return err.Error(), "" |
9266ff78 |
}, checker.Contains("Make sure more than half of the managers are online.")), poll.WithTimeout(defaultReconciliationTimeout*2)) |
0d88d5b6 |
|
2ed512c7 |
d2.StartNode(c) |
0d88d5b6 |
|
4a856d7a |
// make sure there is a leader |
9266ff78 |
poll.WaitOn(c, pollCheck(c, d1.CheckLeader, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout)) |
4a856d7a |
|
48de91a3 |
d1.CreateService(c, simpleTestService, func(s *swarm.Service) { |
0d88d5b6 |
s.Spec.Name = "top3"
})
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmLeaveRemovesContainer(c *testing.T) { |
0d88d5b6 |
d := s.AddDaemon(c, true, true)
instances := 2 |
48de91a3 |
d.CreateService(c, simpleTestService, setInstances(instances)) |
0d88d5b6 |
id, err := d.Cmd("run", "-d", "busybox", "top") |
6345208b |
assert.NilError(c, err, id) |
0d88d5b6 |
id = strings.TrimSpace(id)
|
9266ff78 |
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances+1)), poll.WithTimeout(defaultReconciliationTimeout)) |
0d88d5b6 |
|
6345208b |
assert.ErrorContains(c, d.SwarmLeave(c, false), "")
assert.NilError(c, d.SwarmLeave(c, true)) |
0d88d5b6 |
|
9266ff78 |
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout)) |
0d88d5b6 |
id2, err := d.Cmd("ps", "-q") |
6345208b |
assert.NilError(c, err, id2)
assert.Assert(c, strings.HasPrefix(id, strings.TrimSpace(id2))) |
0d88d5b6 |
}
|
826f6f07 |
// #23629 |
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmLeaveOnPendingJoin(c *testing.T) { |
da9ef68f |
testRequires(c, Network) |
826f6f07 |
s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, false, false)
id, err := d2.Cmd("run", "-d", "busybox", "top") |
6345208b |
assert.NilError(c, err, id) |
826f6f07 |
id = strings.TrimSpace(id)
|
83d18cf4 |
c2 := d2.NewClientT(c)
err = c2.SwarmJoin(context.Background(), swarm.JoinRequest{
ListenAddr: d2.SwarmListenAddr(), |
fa3b5964 |
RemoteAddrs: []string{"123.123.123.123:1234"}, |
a0ccd0d4 |
}) |
6345208b |
assert.ErrorContains(c, err, "Timeout was reached") |
826f6f07 |
|
83d18cf4 |
info := d2.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStatePending) |
fa3b5964 |
|
6345208b |
assert.NilError(c, d2.SwarmLeave(c, true)) |
826f6f07 |
|
9266ff78 |
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(1)), poll.WithTimeout(defaultReconciliationTimeout)) |
826f6f07 |
id2, err := d2.Cmd("ps", "-q") |
6345208b |
assert.NilError(c, err, id2)
assert.Assert(c, strings.HasPrefix(id, strings.TrimSpace(id2))) |
826f6f07 |
}
|
ded1d9af |
// #23705 |
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmRestoreOnPendingJoin(c *testing.T) { |
da9ef68f |
testRequires(c, Network) |
ded1d9af |
d := s.AddDaemon(c, false, false) |
83d18cf4 |
client := d.NewClientT(c)
err := client.SwarmJoin(context.Background(), swarm.JoinRequest{
ListenAddr: d.SwarmListenAddr(), |
fa3b5964 |
RemoteAddrs: []string{"123.123.123.123:1234"}, |
a0ccd0d4 |
}) |
6345208b |
assert.ErrorContains(c, err, "Timeout was reached") |
ded1d9af |
|
9266ff78 |
poll.WaitOn(c, pollCheck(c, d.CheckLocalNodeState, checker.Equals(swarm.LocalNodeStatePending)), poll.WithTimeout(defaultReconciliationTimeout)) |
ded1d9af |
|
2ed512c7 |
d.RestartNode(c) |
ded1d9af |
|
83d18cf4 |
info := d.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateInactive) |
ded1d9af |
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmManagerRestore(c *testing.T) { |
0d88d5b6 |
d1 := s.AddDaemon(c, true, true)
instances := 2 |
48de91a3 |
id := d1.CreateService(c, simpleTestService, setInstances(instances)) |
0d88d5b6 |
|
48de91a3 |
d1.GetService(c, id) |
2ed512c7 |
d1.RestartNode(c) |
48de91a3 |
d1.GetService(c, id) |
0d88d5b6 |
d2 := s.AddDaemon(c, true, true) |
48de91a3 |
d2.GetService(c, id) |
2ed512c7 |
d2.RestartNode(c) |
48de91a3 |
d2.GetService(c, id) |
0d88d5b6 |
d3 := s.AddDaemon(c, true, true) |
48de91a3 |
d3.GetService(c, id) |
2ed512c7 |
d3.RestartNode(c) |
48de91a3 |
d3.GetService(c, id) |
0d88d5b6 |
|
2e326eba |
err := d3.Kill()
assert.NilError(c, err) |
0d88d5b6 |
time.Sleep(1 * time.Second) // time to handle signal |
2ed512c7 |
d3.StartNode(c) |
48de91a3 |
d3.GetService(c, id) |
0d88d5b6 |
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmScaleNoRollingUpdate(c *testing.T) { |
0d88d5b6 |
d := s.AddDaemon(c, true, true)
instances := 2 |
48de91a3 |
id := d.CreateService(c, simpleTestService, setInstances(instances)) |
0d88d5b6 |
|
9266ff78 |
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout)) |
9722214c |
containers := d.ActiveContainers(c) |
0d88d5b6 |
instances = 4 |
48de91a3 |
d.UpdateService(c, d.GetService(c, id), setInstances(instances)) |
9266ff78 |
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout)) |
9722214c |
containers2 := d.ActiveContainers(c) |
0d88d5b6 |
loop0:
for _, c1 := range containers {
for _, c2 := range containers2 {
if c1 == c2 {
continue loop0
}
}
c.Errorf("container %v not found in new set %#v", c1, containers2)
}
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmInvalidAddress(c *testing.T) { |
fb3eb1c2 |
d := s.AddDaemon(c, false, false)
req := swarm.InitRequest{
ListenAddr: "",
} |
42f6fdf0 |
res, _, err := request.Post("/swarm/init", request.Host(d.Sock()), request.JSONBody(req)) |
6345208b |
assert.NilError(c, err)
assert.Equal(c, res.StatusCode, http.StatusBadRequest) |
fb3eb1c2 |
req2 := swarm.JoinRequest{
ListenAddr: "0.0.0.0:2377",
RemoteAddrs: []string{""},
} |
42f6fdf0 |
res, _, err = request.Post("/swarm/join", request.Host(d.Sock()), request.JSONBody(req2)) |
6345208b |
assert.NilError(c, err)
assert.Equal(c, res.StatusCode, http.StatusBadRequest) |
fb3eb1c2 |
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmForceNewCluster(c *testing.T) { |
1acb8ef8 |
d1 := s.AddDaemon(c, true, true)
d2 := s.AddDaemon(c, true, true)
instances := 2 |
48de91a3 |
id := d1.CreateService(c, simpleTestService, setInstances(instances)) |
9266ff78 |
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d2.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout)) |
1acb8ef8 |
|
307b7b0d |
// drain d2, all containers should move to d1 |
83d18cf4 |
d1.UpdateNode(c, d2.NodeID(), func(n *swarm.Node) { |
307b7b0d |
n.Spec.Availability = swarm.NodeAvailabilityDrain
}) |
9266ff78 |
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout))
poll.WaitOn(c, pollCheck(c, d2.CheckActiveContainerCount, checker.Equals(0)), poll.WithTimeout(defaultReconciliationTimeout)) |
1acb8ef8 |
|
c502fb49 |
d2.Stop(c) |
1acb8ef8 |
|
83d18cf4 |
d1.SwarmInit(c, swarm.InitRequest{ |
1acb8ef8 |
ForceNewCluster: true, |
2cc5bd33 |
Spec: swarm.Spec{}, |
83d18cf4 |
}) |
1acb8ef8 |
|
9266ff78 |
poll.WaitOn(c, pollCheck(c, d1.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout)) |
1acb8ef8 |
d3 := s.AddDaemon(c, true, true) |
83d18cf4 |
info := d3.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.ControlAvailable, true)
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive) |
1acb8ef8 |
instances = 4 |
48de91a3 |
d3.UpdateService(c, d3.GetService(c, id), setInstances(instances)) |
1acb8ef8 |
|
9266ff78 |
poll.WaitOn(c, pollCheck(c, reducedCheck(sumAsIntegers, d1.CheckActiveContainerCount, d3.CheckActiveContainerCount), checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout)) |
1acb8ef8 |
}
|
0d88d5b6 |
func simpleTestService(s *swarm.Service) { |
c93c6492 |
ureplicas := uint64(1)
restartDelay := time.Duration(100 * time.Millisecond)
|
0d88d5b6 |
s.Spec = swarm.ServiceSpec{
TaskTemplate: swarm.TaskSpec{ |
72c3bcf2 |
ContainerSpec: &swarm.ContainerSpec{ |
0d88d5b6 |
Image: "busybox:latest",
Command: []string{"/bin/top"},
}, |
c93c6492 |
RestartPolicy: &swarm.RestartPolicy{
Delay: &restartDelay,
}, |
0d88d5b6 |
},
Mode: swarm.ServiceMode{
Replicated: &swarm.ReplicatedService{
Replicas: &ureplicas,
},
},
}
s.Spec.Name = "top"
}
|
d327765a |
func serviceForUpdate(s *swarm.Service) { |
c93c6492 |
ureplicas := uint64(1)
restartDelay := time.Duration(100 * time.Millisecond)
|
d327765a |
s.Spec = swarm.ServiceSpec{
TaskTemplate: swarm.TaskSpec{ |
72c3bcf2 |
ContainerSpec: &swarm.ContainerSpec{ |
d327765a |
Image: "busybox:latest",
Command: []string{"/bin/top"},
}, |
c93c6492 |
RestartPolicy: &swarm.RestartPolicy{
Delay: &restartDelay,
}, |
d327765a |
},
Mode: swarm.ServiceMode{
Replicated: &swarm.ReplicatedService{
Replicas: &ureplicas,
},
},
UpdateConfig: &swarm.UpdateConfig{ |
57ae29aa |
Parallelism: 2, |
c93c6492 |
Delay: 4 * time.Second, |
57ae29aa |
FailureAction: swarm.UpdateFailureActionContinue, |
d327765a |
}, |
f9bd8ec8 |
RollbackConfig: &swarm.UpdateConfig{
Parallelism: 3,
Delay: 4 * time.Second,
FailureAction: swarm.UpdateFailureActionContinue,
}, |
d327765a |
}
s.Spec.Name = "updatetest"
}
|
83d18cf4 |
func setInstances(replicas int) testdaemon.ServiceConstructor { |
0d88d5b6 |
ureplicas := uint64(replicas)
return func(s *swarm.Service) {
s.Spec.Mode = swarm.ServiceMode{
Replicated: &swarm.ReplicatedService{
Replicas: &ureplicas,
},
}
}
}
|
83d18cf4 |
func setUpdateOrder(order string) testdaemon.ServiceConstructor { |
6763641d |
return func(s *swarm.Service) {
if s.Spec.UpdateConfig == nil {
s.Spec.UpdateConfig = &swarm.UpdateConfig{}
}
s.Spec.UpdateConfig.Order = order
}
}
|
83d18cf4 |
func setRollbackOrder(order string) testdaemon.ServiceConstructor { |
6763641d |
return func(s *swarm.Service) {
if s.Spec.RollbackConfig == nil {
s.Spec.RollbackConfig = &swarm.UpdateConfig{}
}
s.Spec.RollbackConfig.Order = order
}
}
|
83d18cf4 |
func setImage(image string) testdaemon.ServiceConstructor { |
d327765a |
return func(s *swarm.Service) { |
72c3bcf2 |
if s.Spec.TaskTemplate.ContainerSpec == nil {
s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
} |
d327765a |
s.Spec.TaskTemplate.ContainerSpec.Image = image
}
}
|
83d18cf4 |
func setFailureAction(failureAction string) testdaemon.ServiceConstructor { |
6d4b5276 |
return func(s *swarm.Service) {
s.Spec.UpdateConfig.FailureAction = failureAction
}
}
|
83d18cf4 |
func setMaxFailureRatio(maxFailureRatio float32) testdaemon.ServiceConstructor { |
6d4b5276 |
return func(s *swarm.Service) {
s.Spec.UpdateConfig.MaxFailureRatio = maxFailureRatio
}
}
|
83d18cf4 |
func setParallelism(parallelism uint64) testdaemon.ServiceConstructor { |
6d4b5276 |
return func(s *swarm.Service) {
s.Spec.UpdateConfig.Parallelism = parallelism
}
}
|
83d18cf4 |
func setConstraints(constraints []string) testdaemon.ServiceConstructor { |
1b1a7f29 |
return func(s *swarm.Service) {
if s.Spec.TaskTemplate.Placement == nil {
s.Spec.TaskTemplate.Placement = &swarm.Placement{}
}
s.Spec.TaskTemplate.Placement.Constraints = constraints
}
}
|
83d18cf4 |
func setPlacementPrefs(prefs []swarm.PlacementPreference) testdaemon.ServiceConstructor { |
17288c61 |
return func(s *swarm.Service) {
if s.Spec.TaskTemplate.Placement == nil {
s.Spec.TaskTemplate.Placement = &swarm.Placement{}
}
s.Spec.TaskTemplate.Placement.Preferences = prefs
}
}
|
0d88d5b6 |
func setGlobalMode(s *swarm.Service) {
s.Spec.Mode = swarm.ServiceMode{
Global: &swarm.GlobalService{},
}
} |
ae4137ae |
|
64a928a3 |
func checkClusterHealth(c *testing.T, cl []*daemon.Daemon, managerCount, workerCount int) { |
ae4137ae |
var totalMCount, totalWCount int |
fdcde8bb |
|
ae4137ae |
for _, d := range cl { |
fdcde8bb |
var (
info swarm.Info
)
// check info in a waitAndAssert, because if the cluster doesn't have a leader, `info` will return an error |
1b1fe4cc |
checkInfo := func(c *testing.T) (interface{}, string) { |
83d18cf4 |
client := d.NewClientT(c)
daemonInfo, err := client.Info(context.Background())
info = daemonInfo.Swarm |
2f069fa3 |
return err, "cluster not ready in time" |
fdcde8bb |
} |
9266ff78 |
poll.WaitOn(c, pollCheck(c, checkInfo, checker.IsNil()), poll.WithTimeout(defaultReconciliationTimeout)) |
ae4137ae |
if !info.ControlAvailable {
totalWCount++
continue
} |
fdcde8bb |
|
ae4137ae |
var leaderFound bool
totalMCount++
var mCount, wCount int |
fdcde8bb |
|
48de91a3 |
for _, n := range d.ListNodes(c) { |
1b1fe4cc |
waitReady := func(c *testing.T) (interface{}, string) { |
fdcde8bb |
if n.Status.State == swarm.NodeStateReady { |
be66788e |
return true, "" |
fdcde8bb |
} |
48de91a3 |
nn := d.GetNode(c, n.ID) |
fdcde8bb |
n = *nn |
673cf751 |
return n.Status.State == swarm.NodeStateReady, fmt.Sprintf("state of node %s, reported by %s", n.ID, d.NodeID()) |
fdcde8bb |
} |
9266ff78 |
poll.WaitOn(c, pollCheck(c, waitReady, checker.True()), poll.WithTimeout(defaultReconciliationTimeout)) |
fdcde8bb |
|
1b1fe4cc |
waitActive := func(c *testing.T) (interface{}, string) { |
fdcde8bb |
if n.Spec.Availability == swarm.NodeAvailabilityActive { |
be66788e |
return true, "" |
fdcde8bb |
} |
48de91a3 |
nn := d.GetNode(c, n.ID) |
fdcde8bb |
n = *nn |
673cf751 |
return n.Spec.Availability == swarm.NodeAvailabilityActive, fmt.Sprintf("availability of node %s, reported by %s", n.ID, d.NodeID()) |
fdcde8bb |
} |
9266ff78 |
poll.WaitOn(c, pollCheck(c, waitActive, checker.True()), poll.WithTimeout(defaultReconciliationTimeout)) |
fdcde8bb |
|
ae4137ae |
if n.Spec.Role == swarm.NodeRoleManager { |
6345208b |
assert.Assert(c, n.ManagerStatus != nil, "manager status of node %s (manager), reported by %s", n.ID, d.NodeID()) |
ae4137ae |
if n.ManagerStatus.Leader {
leaderFound = true
}
mCount++
} else { |
6345208b |
assert.Assert(c, n.ManagerStatus == nil, "manager status of node %s (worker), reported by %s", n.ID, d.NodeID()) |
ae4137ae |
wCount++
}
} |
6345208b |
assert.Equal(c, leaderFound, true, "lack of leader reported by node %s", info.NodeID)
assert.Equal(c, mCount, managerCount, "managers count reported by node %s", info.NodeID)
assert.Equal(c, wCount, workerCount, "workers count reported by node %s", info.NodeID) |
ae4137ae |
} |
6345208b |
assert.Equal(c, totalMCount, managerCount)
assert.Equal(c, totalWCount, workerCount) |
ae4137ae |
}
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmRestartCluster(c *testing.T) { |
ae4137ae |
mCount, wCount := 5, 1
|
83d18cf4 |
var nodes []*daemon.Daemon |
ae4137ae |
for i := 0; i < mCount; i++ {
manager := s.AddDaemon(c, true, true) |
83d18cf4 |
info := manager.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.ControlAvailable, true)
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive) |
ae4137ae |
nodes = append(nodes, manager)
}
for i := 0; i < wCount; i++ {
worker := s.AddDaemon(c, true, false) |
83d18cf4 |
info := worker.SwarmInfo(c) |
6345208b |
assert.Equal(c, info.ControlAvailable, false)
assert.Equal(c, info.LocalNodeState, swarm.LocalNodeStateActive) |
ae4137ae |
nodes = append(nodes, worker)
}
// stop whole cluster
{
var wg sync.WaitGroup
wg.Add(len(nodes))
errs := make(chan error, len(nodes))
for _, d := range nodes { |
83d18cf4 |
go func(daemon *daemon.Daemon) { |
ae4137ae |
defer wg.Done() |
c502fb49 |
if err := daemon.StopWithError(); err != nil { |
ae4137ae |
errs <- err
}
}(d)
}
wg.Wait()
close(errs)
for err := range errs { |
6345208b |
assert.NilError(c, err) |
ae4137ae |
}
}
// start whole cluster
{
var wg sync.WaitGroup
wg.Add(len(nodes))
errs := make(chan error, len(nodes))
for _, d := range nodes { |
83d18cf4 |
go func(daemon *daemon.Daemon) { |
ae4137ae |
defer wg.Done() |
c502fb49 |
if err := daemon.StartWithError("--iptables=false"); err != nil { |
ae4137ae |
errs <- err
}
}(d)
}
wg.Wait()
close(errs)
for err := range errs { |
6345208b |
assert.NilError(c, err) |
ae4137ae |
}
}
checkClusterHealth(c, nodes, mCount, wCount)
} |
80e39751 |
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmServicesUpdateWithName(c *testing.T) { |
80e39751 |
d := s.AddDaemon(c, true, true)
instances := 2 |
48de91a3 |
id := d.CreateService(c, simpleTestService, setInstances(instances)) |
9266ff78 |
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout)) |
80e39751 |
|
48de91a3 |
service := d.GetService(c, id) |
80e39751 |
instances = 5
setInstances(instances)(service) |
2cb7b73a |
cli := d.NewClientT(c) |
0fd5a654 |
defer cli.Close() |
2cb7b73a |
_, err := cli.ServiceUpdate(context.Background(), service.Spec.Name, service.Version, service.Spec, types.ServiceUpdateOptions{}) |
6345208b |
assert.NilError(c, err) |
9266ff78 |
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout)) |
80e39751 |
} |
857e60c2 |
|
a6a0880a |
// Unlocking an unlocked swarm results in an error |
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmUnlockNotLocked(c *testing.T) { |
a6a0880a |
d := s.AddDaemon(c, true, true) |
e063099f |
err := d.SwarmUnlock(c, swarm.UnlockRequest{UnlockKey: "wrong-key"}) |
6345208b |
assert.ErrorContains(c, err, "swarm is not locked") |
a6a0880a |
} |
d377b074 |
// #29885 |
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmErrorHandling(c *testing.T) { |
d377b074 |
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", defaultSwarmPort)) |
6345208b |
assert.NilError(c, err) |
d377b074 |
defer ln.Close()
d := s.AddDaemon(c, false, false) |
83d18cf4 |
client := d.NewClientT(c)
_, err = client.SwarmInit(context.Background(), swarm.InitRequest{
ListenAddr: d.SwarmListenAddr(),
}) |
6345208b |
assert.ErrorContains(c, err, "address already in use") |
d377b074 |
} |
05a831a7 |
// Test case for 30242, where duplicate networks, with different drivers `bridge` and `overlay`,
// caused both scopes to be `swarm` for `docker network inspect` and `docker network ls`.
// This test makes sure the fixes correctly output scopes instead. |
64a928a3 |
func (s *DockerSwarmSuite) TestAPIDuplicateNetworks(c *testing.T) { |
05a831a7 |
d := s.AddDaemon(c, true, true) |
2cb7b73a |
cli := d.NewClientT(c) |
0fd5a654 |
defer cli.Close() |
05a831a7 |
name := "foo" |
0fd5a654 |
networkCreate := types.NetworkCreate{
CheckDuplicate: false, |
05a831a7 |
}
|
0fd5a654 |
networkCreate.Driver = "bridge" |
05a831a7 |
|
0fd5a654 |
n1, err := cli.NetworkCreate(context.Background(), name, networkCreate) |
6345208b |
assert.NilError(c, err) |
05a831a7 |
|
0fd5a654 |
networkCreate.Driver = "overlay" |
05a831a7 |
|
0fd5a654 |
n2, err := cli.NetworkCreate(context.Background(), name, networkCreate) |
6345208b |
assert.NilError(c, err) |
05a831a7 |
|
0fd5a654 |
r1, err := cli.NetworkInspect(context.Background(), n1.ID, types.NetworkInspectOptions{}) |
6345208b |
assert.NilError(c, err)
assert.Equal(c, r1.Scope, "local") |
05a831a7 |
|
0fd5a654 |
r2, err := cli.NetworkInspect(context.Background(), n2.ID, types.NetworkInspectOptions{}) |
6345208b |
assert.NilError(c, err)
assert.Equal(c, r2.Scope, "swarm") |
05a831a7 |
} |
8feb5c5a |
// Test case for 30178 |
64a928a3 |
func (s *DockerSwarmSuite) TestAPISwarmHealthcheckNone(c *testing.T) { |
0d31dee5 |
// Issue #36386 can be a independent one, which is worth further investigation.
c.Skip("Root cause of Issue #36386 is needed") |
8feb5c5a |
d := s.AddDaemon(c, true, true)
out, err := d.Cmd("network", "create", "-d", "overlay", "lb") |
6345208b |
assert.NilError(c, err, out) |
8feb5c5a |
instances := 1
d.CreateService(c, simpleTestService, setInstances(instances), func(s *swarm.Service) { |
72c3bcf2 |
if s.Spec.TaskTemplate.ContainerSpec == nil {
s.Spec.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
} |
8feb5c5a |
s.Spec.TaskTemplate.ContainerSpec.Healthcheck = &container.HealthConfig{}
s.Spec.TaskTemplate.Networks = []swarm.NetworkAttachmentConfig{
{Target: "lb"},
}
})
|
9266ff78 |
poll.WaitOn(c, pollCheck(c, d.CheckActiveContainerCount, checker.Equals(instances)), poll.WithTimeout(defaultReconciliationTimeout)) |
8feb5c5a |
|
9722214c |
containers := d.ActiveContainers(c) |
8feb5c5a |
out, err = d.Cmd("exec", containers[0], "ping", "-c1", "-W3", "top") |
6345208b |
assert.NilError(c, err, out) |
8feb5c5a |
} |
376c75d1 |
|
64a928a3 |
func (s *DockerSwarmSuite) TestSwarmRepeatedRootRotation(c *testing.T) { |
376c75d1 |
m := s.AddDaemon(c, true, true)
w := s.AddDaemon(c, true, false)
|
83d18cf4 |
info := m.SwarmInfo(c) |
376c75d1 |
currentTrustRoot := info.Cluster.TLSInfo.TrustRoot
// rotate multiple times
for i := 0; i < 4; i++ { |
83d18cf4 |
var err error |
376c75d1 |
var cert, key []byte
if i%2 != 0 {
cert, _, key, err = initca.New(&csr.CertificateRequest{
CN: "newRoot",
KeyRequest: csr.NewBasicKeyRequest(),
CA: &csr.CAConfig{Expiry: ca.RootCAExpiration},
}) |
6345208b |
assert.NilError(c, err) |
376c75d1 |
}
expectedCert := string(cert)
m.UpdateSwarm(c, func(s *swarm.Spec) {
s.CAConfig.SigningCACert = expectedCert
s.CAConfig.SigningCAKey = string(key)
s.CAConfig.ForceRotate++
})
// poll to make sure update succeeds
var clusterTLSInfo swarm.TLSInfo
for j := 0; j < 18; j++ { |
83d18cf4 |
info := m.SwarmInfo(c) |
bdfbd22a |
// the desired CA cert and key is always redacted |
6345208b |
assert.Equal(c, info.Cluster.Spec.CAConfig.SigningCAKey, "")
assert.Equal(c, info.Cluster.Spec.CAConfig.SigningCACert, "") |
376c75d1 |
clusterTLSInfo = info.Cluster.TLSInfo
|
bdfbd22a |
// if root rotation is done and the trust root has changed, we don't have to poll anymore
if !info.Cluster.RootRotationInProgress && clusterTLSInfo.TrustRoot != currentTrustRoot { |
376c75d1 |
break
}
// root rotation not done
time.Sleep(250 * time.Millisecond)
}
if cert != nil { |
6345208b |
assert.Equal(c, clusterTLSInfo.TrustRoot, expectedCert) |
376c75d1 |
} |
39bcaee4 |
// could take another second or two for the nodes to trust the new roots after they've all gotten |
376c75d1 |
// new TLS certificates
for j := 0; j < 18; j++ { |
83d18cf4 |
mInfo := m.GetNode(c, m.NodeID()).Description.TLSInfo
wInfo := m.GetNode(c, w.NodeID()).Description.TLSInfo |
376c75d1 |
if mInfo.TrustRoot == clusterTLSInfo.TrustRoot && wInfo.TrustRoot == clusterTLSInfo.TrustRoot {
break
}
// nodes don't trust root certs yet
time.Sleep(250 * time.Millisecond)
}
|
6345208b |
assert.DeepEqual(c, m.GetNode(c, m.NodeID()).Description.TLSInfo, clusterTLSInfo)
assert.DeepEqual(c, m.GetNode(c, w.NodeID()).Description.TLSInfo, clusterTLSInfo) |
376c75d1 |
currentTrustRoot = clusterTLSInfo.TrustRoot
}
} |
158b2a18 |
|
64a928a3 |
func (s *DockerSwarmSuite) TestAPINetworkInspectWithScope(c *testing.T) { |
158b2a18 |
d := s.AddDaemon(c, true, true)
|
0a91ba2d |
name := "test-scoped-network"
ctx := context.Background() |
2cb7b73a |
apiclient := d.NewClientT(c) |
158b2a18 |
|
0a91ba2d |
resp, err := apiclient.NetworkCreate(ctx, name, types.NetworkCreate{Driver: "overlay"}) |
6be0f709 |
assert.NilError(c, err) |
158b2a18 |
|
0a91ba2d |
network, err := apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{}) |
6be0f709 |
assert.NilError(c, err)
assert.Check(c, is.Equal("swarm", network.Scope))
assert.Check(c, is.Equal(resp.ID, network.ID)) |
158b2a18 |
|
0a91ba2d |
_, err = apiclient.NetworkInspect(ctx, name, types.NetworkInspectOptions{Scope: "local"}) |
6be0f709 |
assert.Check(c, client.IsErrNotFound(err)) |
158b2a18 |
} |