Signed-off-by: Dong Chen <dongluo.chen@docker.com>
| ... | ... |
@@ -140,6 +140,26 @@ func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task {
|
| 140 | 140 |
return tasks |
| 141 | 141 |
} |
| 142 | 142 |
|
| 143 |
+func (d *SwarmDaemon) checkServiceRunningTasks(c *check.C, service string) func(*check.C) (interface{}, check.CommentInterface) {
|
|
| 144 |
+ return func(*check.C) (interface{}, check.CommentInterface) {
|
|
| 145 |
+ tasks := d.getServiceTasks(c, service) |
|
| 146 |
+ var runningCount int |
|
| 147 |
+ for _, task := range tasks {
|
|
| 148 |
+ if task.Status.State == swarm.TaskStateRunning {
|
|
| 149 |
+ runningCount++ |
|
| 150 |
+ } |
|
| 151 |
+ } |
|
| 152 |
+ return runningCount, nil |
|
| 153 |
+ } |
|
| 154 |
+} |
|
| 155 |
+ |
|
| 156 |
+func (d *SwarmDaemon) checkServiceTasks(c *check.C, service string) func(*check.C) (interface{}, check.CommentInterface) {
|
|
| 157 |
+ return func(*check.C) (interface{}, check.CommentInterface) {
|
|
| 158 |
+ tasks := d.getServiceTasks(c, service) |
|
| 159 |
+ return len(tasks), nil |
|
| 160 |
+ } |
|
| 161 |
+} |
|
| 162 |
+ |
|
| 143 | 163 |
func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) {
|
| 144 | 164 |
var tasks []swarm.Task |
| 145 | 165 |
|
| ... | ... |
@@ -320,6 +320,153 @@ func (s *DockerSwarmSuite) TestApiSwarmServicesUpdate(c *check.C) {
|
| 320 | 320 |
map[string]int{image2: instances})
|
| 321 | 321 |
} |
| 322 | 322 |
|
| 323 |
+func (s *DockerSwarmSuite) TestApiSwarmServiceConstraintRole(c *check.C) {
|
|
| 324 |
+ const nodeCount = 3 |
|
| 325 |
+ var daemons [nodeCount]*SwarmDaemon |
|
| 326 |
+ for i := 0; i < nodeCount; i++ {
|
|
| 327 |
+ daemons[i] = s.AddDaemon(c, true, i == 0) |
|
| 328 |
+ } |
|
| 329 |
+ // wait for nodes ready |
|
| 330 |
+ waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) |
|
| 331 |
+ |
|
| 332 |
+ // create service |
|
| 333 |
+ constraints := []string{"node.role==worker"}
|
|
| 334 |
+ instances := 3 |
|
| 335 |
+ id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 336 |
+ // wait for tasks ready |
|
| 337 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(c, id), checker.Equals, instances) |
|
| 338 |
+ // validate tasks are running on worker nodes |
|
| 339 |
+ tasks := daemons[0].getServiceTasks(c, id) |
|
| 340 |
+ for _, task := range tasks {
|
|
| 341 |
+ node := daemons[0].getNode(c, task.NodeID) |
|
| 342 |
+ c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleWorker) |
|
| 343 |
+ } |
|
| 344 |
+ //remove service |
|
| 345 |
+ daemons[0].removeService(c, id) |
|
| 346 |
+ |
|
| 347 |
+ // create service |
|
| 348 |
+ constraints = []string{"node.role!=worker"}
|
|
| 349 |
+ id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 350 |
+ // wait for tasks ready |
|
| 351 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(c, id), checker.Equals, instances) |
|
| 352 |
+ tasks = daemons[0].getServiceTasks(c, id) |
|
| 353 |
+ // validate tasks are running on manager nodes |
|
| 354 |
+ for _, task := range tasks {
|
|
| 355 |
+ node := daemons[0].getNode(c, task.NodeID) |
|
| 356 |
+ c.Assert(node.Spec.Role, checker.Equals, swarm.NodeRoleManager) |
|
| 357 |
+ } |
|
| 358 |
+ //remove service |
|
| 359 |
+ daemons[0].removeService(c, id) |
|
| 360 |
+ |
|
| 361 |
+ // create service |
|
| 362 |
+ constraints = []string{"node.role==nosuchrole"}
|
|
| 363 |
+ id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 364 |
+ // wait for tasks created |
|
| 365 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(c, id), checker.Equals, instances) |
|
| 366 |
+ // let scheduler try |
|
| 367 |
+ time.Sleep(250 * time.Millisecond) |
|
| 368 |
+ // validate tasks are not assigned to any node |
|
| 369 |
+ tasks = daemons[0].getServiceTasks(c, id) |
|
| 370 |
+ for _, task := range tasks {
|
|
| 371 |
+ c.Assert(task.NodeID, checker.Equals, "") |
|
| 372 |
+ } |
|
| 373 |
+} |
|
| 374 |
+ |
|
| 375 |
+func (s *DockerSwarmSuite) TestApiSwarmServiceConstraintLabel(c *check.C) {
|
|
| 376 |
+ const nodeCount = 3 |
|
| 377 |
+ var daemons [nodeCount]*SwarmDaemon |
|
| 378 |
+ for i := 0; i < nodeCount; i++ {
|
|
| 379 |
+ daemons[i] = s.AddDaemon(c, true, i == 0) |
|
| 380 |
+ } |
|
| 381 |
+ // wait for nodes ready |
|
| 382 |
+ waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) |
|
| 383 |
+ nodes := daemons[0].listNodes(c) |
|
| 384 |
+ c.Assert(len(nodes), checker.Equals, nodeCount) |
|
| 385 |
+ |
|
| 386 |
+ // add labels to nodes |
|
| 387 |
+ daemons[0].updateNode(c, nodes[0].ID, func(n *swarm.Node) {
|
|
| 388 |
+ n.Spec.Annotations.Labels = map[string]string{
|
|
| 389 |
+ "security": "high", |
|
| 390 |
+ } |
|
| 391 |
+ }) |
|
| 392 |
+ for i := 1; i < nodeCount; i++ {
|
|
| 393 |
+ daemons[0].updateNode(c, nodes[i].ID, func(n *swarm.Node) {
|
|
| 394 |
+ n.Spec.Annotations.Labels = map[string]string{
|
|
| 395 |
+ "security": "low", |
|
| 396 |
+ } |
|
| 397 |
+ }) |
|
| 398 |
+ } |
|
| 399 |
+ |
|
| 400 |
+ // create service |
|
| 401 |
+ instances := 3 |
|
| 402 |
+ constraints := []string{"node.labels.security==high"}
|
|
| 403 |
+ id := daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 404 |
+ // wait for tasks ready |
|
| 405 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(c, id), checker.Equals, instances) |
|
| 406 |
+ tasks := daemons[0].getServiceTasks(c, id) |
|
| 407 |
+ // validate all tasks are running on nodes[0] |
|
| 408 |
+ for _, task := range tasks {
|
|
| 409 |
+ c.Assert(task.NodeID, checker.Equals, nodes[0].ID) |
|
| 410 |
+ } |
|
| 411 |
+ //remove service |
|
| 412 |
+ daemons[0].removeService(c, id) |
|
| 413 |
+ |
|
| 414 |
+ // create service |
|
| 415 |
+ constraints = []string{"node.labels.security!=high"}
|
|
| 416 |
+ id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 417 |
+ // wait for tasks ready |
|
| 418 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(c, id), checker.Equals, instances) |
|
| 419 |
+ tasks = daemons[0].getServiceTasks(c, id) |
|
| 420 |
+ // validate all tasks are NOT running on nodes[0] |
|
| 421 |
+ for _, task := range tasks {
|
|
| 422 |
+ c.Assert(task.NodeID, checker.Not(checker.Equals), nodes[0].ID) |
|
| 423 |
+ } |
|
| 424 |
+ //remove service |
|
| 425 |
+ daemons[0].removeService(c, id) |
|
| 426 |
+ |
|
| 427 |
+ constraints = []string{"node.labels.security==medium"}
|
|
| 428 |
+ id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 429 |
+ // wait for tasks created |
|
| 430 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(c, id), checker.Equals, instances) |
|
| 431 |
+ // let scheduler try |
|
| 432 |
+ time.Sleep(250 * time.Millisecond) |
|
| 433 |
+ tasks = daemons[0].getServiceTasks(c, id) |
|
| 434 |
+ // validate tasks are not assigned |
|
| 435 |
+ for _, task := range tasks {
|
|
| 436 |
+ c.Assert(task.NodeID, checker.Equals, "") |
|
| 437 |
+ } |
|
| 438 |
+ //remove service |
|
| 439 |
+ daemons[0].removeService(c, id) |
|
| 440 |
+ |
|
| 441 |
+ // multiple constraints |
|
| 442 |
+ constraints = []string{
|
|
| 443 |
+ "node.labels.security==high", |
|
| 444 |
+ fmt.Sprintf("node.id==%s", nodes[1].ID),
|
|
| 445 |
+ } |
|
| 446 |
+ id = daemons[0].createService(c, simpleTestService, setConstraints(constraints), setInstances(instances)) |
|
| 447 |
+ // wait for tasks created |
|
| 448 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceTasks(c, id), checker.Equals, instances) |
|
| 449 |
+ // let scheduler try |
|
| 450 |
+ time.Sleep(250 * time.Millisecond) |
|
| 451 |
+ tasks = daemons[0].getServiceTasks(c, id) |
|
| 452 |
+ // validate tasks are not assigned |
|
| 453 |
+ for _, task := range tasks {
|
|
| 454 |
+ c.Assert(task.NodeID, checker.Equals, "") |
|
| 455 |
+ } |
|
| 456 |
+ // make nodes[1] fulfills the constraints |
|
| 457 |
+ daemons[0].updateNode(c, nodes[1].ID, func(n *swarm.Node) {
|
|
| 458 |
+ n.Spec.Annotations.Labels = map[string]string{
|
|
| 459 |
+ "security": "high", |
|
| 460 |
+ } |
|
| 461 |
+ }) |
|
| 462 |
+ // wait for tasks ready |
|
| 463 |
+ waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkServiceRunningTasks(c, id), checker.Equals, instances) |
|
| 464 |
+ tasks = daemons[0].getServiceTasks(c, id) |
|
| 465 |
+ for _, task := range tasks {
|
|
| 466 |
+ c.Assert(task.NodeID, checker.Equals, nodes[1].ID) |
|
| 467 |
+ } |
|
| 468 |
+} |
|
| 469 |
+ |
|
| 323 | 470 |
func (s *DockerSwarmSuite) TestApiSwarmServicesStateReporting(c *check.C) {
|
| 324 | 471 |
testRequires(c, Network) |
| 325 | 472 |
testRequires(c, SameHostDaemon) |
| ... | ... |
@@ -817,6 +964,15 @@ func setImage(image string) serviceConstructor {
|
| 817 | 817 |
} |
| 818 | 818 |
} |
| 819 | 819 |
|
| 820 |
+func setConstraints(constraints []string) serviceConstructor {
|
|
| 821 |
+ return func(s *swarm.Service) {
|
|
| 822 |
+ if s.Spec.TaskTemplate.Placement == nil {
|
|
| 823 |
+ s.Spec.TaskTemplate.Placement = &swarm.Placement{}
|
|
| 824 |
+ } |
|
| 825 |
+ s.Spec.TaskTemplate.Placement.Constraints = constraints |
|
| 826 |
+ } |
|
| 827 |
+} |
|
| 828 |
+ |
|
| 820 | 829 |
func setGlobalMode(s *swarm.Service) {
|
| 821 | 830 |
s.Spec.Mode = swarm.ServiceMode{
|
| 822 | 831 |
Global: &swarm.GlobalService{},
|