I noticed that this test failed, because the node was in status "pending".
The test checks for the node's status immediately after it was restarted, so
possibly it needs some time to unlock.
14:07:10 FAIL: docker_cli_swarm_test.go:1128: DockerSwarmSuite.TestSwarmLockUnlockCluster
...
14:07:10 docker_cli_swarm_test.go:1168:
14:07:10 checkSwarmLockedToUnlocked(c, d)
14:07:10 docker_cli_swarm_test.go:1017:
14:07:10 c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive)
14:07:10 ... obtained swarm.LocalNodeState = "pending"
14:07:10 ... expected swarm.LocalNodeState = "active"
This patch adds a `waitAndAssert` for the node's status, with a 1 second timeout.
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
| ... | ... |
@@ -1014,7 +1014,7 @@ func checkSwarmLockedToUnlocked(c *check.C, d *daemon.Daemon) {
|
| 1014 | 1014 |
waitAndAssert(c, defaultReconciliationTimeout, checkKeyIsEncrypted(d), checker.Equals, false) |
| 1015 | 1015 |
|
| 1016 | 1016 |
d.RestartNode(c) |
| 1017 |
- c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateActive) |
|
| 1017 |
+ waitAndAssert(c, time.Second, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
|
| 1018 | 1018 |
} |
| 1019 | 1019 |
|
| 1020 | 1020 |
func checkSwarmUnlockedToLocked(c *check.C, d *daemon.Daemon) {
|
| ... | ... |
@@ -1022,7 +1022,7 @@ func checkSwarmUnlockedToLocked(c *check.C, d *daemon.Daemon) {
|
| 1022 | 1022 |
waitAndAssert(c, defaultReconciliationTimeout, checkKeyIsEncrypted(d), checker.Equals, true) |
| 1023 | 1023 |
|
| 1024 | 1024 |
d.RestartNode(c) |
| 1025 |
- c.Assert(getNodeStatus(c, d), checker.Equals, swarm.LocalNodeStateLocked) |
|
| 1025 |
+ waitAndAssert(c, time.Second, d.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStateLocked) |
|
| 1026 | 1026 |
} |
| 1027 | 1027 |
|
| 1028 | 1028 |
func (s *DockerSwarmSuite) TestUnlockEngineAndUnlockedSwarm(c *check.C) {
|
| ... | ... |
@@ -1197,7 +1197,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) {
|
| 1197 | 1197 |
// joined workers start off unlocked |
| 1198 | 1198 |
d2 := s.AddDaemon(c, true, false) |
| 1199 | 1199 |
d2.RestartNode(c) |
| 1200 |
- c.Assert(getNodeStatus(c, d2), checker.Equals, swarm.LocalNodeStateActive) |
|
| 1200 |
+ waitAndAssert(c, time.Second, d2.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
|
| 1201 | 1201 |
|
| 1202 | 1202 |
// promote worker |
| 1203 | 1203 |
outs, err = d1.Cmd("node", "promote", d2.NodeID())
|
| ... | ... |
@@ -1242,7 +1242,7 @@ func (s *DockerSwarmSuite) TestSwarmJoinPromoteLocked(c *check.C) {
|
| 1242 | 1242 |
|
| 1243 | 1243 |
// by now, it should *never* be locked on restart |
| 1244 | 1244 |
d3.RestartNode(c) |
| 1245 |
- c.Assert(getNodeStatus(c, d3), checker.Equals, swarm.LocalNodeStateActive) |
|
| 1245 |
+ waitAndAssert(c, time.Second, d3.CheckLocalNodeState, checker.Equals, swarm.LocalNodeStateActive) |
|
| 1246 | 1246 |
} |
| 1247 | 1247 |
|
| 1248 | 1248 |
func (s *DockerSwarmSuite) TestSwarmRotateUnlockKey(c *check.C) {
|