github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/swarmkit/manager/orchestrator/replicated/replicated_test.go (about) 1 package replicated 2 3 import ( 4 "context" 5 "testing" 6 "time" 7 8 "github.com/docker/swarmkit/api" 9 "github.com/docker/swarmkit/manager/orchestrator/testutils" 10 "github.com/docker/swarmkit/manager/state" 11 "github.com/docker/swarmkit/manager/state/store" 12 "github.com/docker/swarmkit/protobuf/ptypes" 13 gogotypes "github.com/gogo/protobuf/types" 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/require" 16 ) 17 18 func TestReplicatedOrchestrator(t *testing.T) { 19 ctx := context.Background() 20 s := store.NewMemoryStore(nil) 21 assert.NotNil(t, s) 22 defer s.Close() 23 24 orchestrator := NewReplicatedOrchestrator(s) 25 defer orchestrator.Stop() 26 27 watch, cancel := state.Watch(s.WatchQueue() /*api.EventCreateTask{}, api.EventUpdateTask{}*/) 28 defer cancel() 29 30 // Create a service with two instances specified before the orchestrator is 31 // started. This should result in two tasks when the orchestrator 32 // starts up. 33 err := s.Update(func(tx store.Tx) error { 34 s1 := &api.Service{ 35 ID: "id1", 36 Spec: api.ServiceSpec{ 37 Annotations: api.Annotations{ 38 Name: "name1", 39 }, 40 Task: api.TaskSpec{ 41 Runtime: &api.TaskSpec_Container{ 42 Container: &api.ContainerSpec{}, 43 }, 44 }, 45 Mode: &api.ServiceSpec_Replicated{ 46 Replicated: &api.ReplicatedService{ 47 Replicas: 2, 48 }, 49 }, 50 }, 51 } 52 assert.NoError(t, store.CreateService(tx, s1)) 53 return nil 54 }) 55 assert.NoError(t, err) 56 57 // Start the orchestrator. 58 go func() { 59 assert.NoError(t, orchestrator.Run(ctx)) 60 }() 61 62 observedTask1 := testutils.WatchTaskCreate(t, watch) 63 assert.Equal(t, observedTask1.Status.State, api.TaskStateNew) 64 assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1") 65 66 observedTask2 := testutils.WatchTaskCreate(t, watch) 67 assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) 68 assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1") 69 70 // Create a second service. 71 err = s.Update(func(tx store.Tx) error { 72 s2 := &api.Service{ 73 ID: "id2", 74 Spec: api.ServiceSpec{ 75 Annotations: api.Annotations{ 76 Name: "name2", 77 }, 78 Task: api.TaskSpec{ 79 Runtime: &api.TaskSpec_Container{ 80 Container: &api.ContainerSpec{}, 81 }, 82 }, 83 Mode: &api.ServiceSpec_Replicated{ 84 Replicated: &api.ReplicatedService{ 85 Replicas: 1, 86 }, 87 }, 88 }, 89 } 90 assert.NoError(t, store.CreateService(tx, s2)) 91 return nil 92 }) 93 assert.NoError(t, err) 94 95 observedTask3 := testutils.WatchTaskCreate(t, watch) 96 assert.Equal(t, observedTask3.Status.State, api.TaskStateNew) 97 assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name2") 98 99 // Update a service to scale it out to 3 instances 100 err = s.Update(func(tx store.Tx) error { 101 s2 := &api.Service{ 102 ID: "id2", 103 Spec: api.ServiceSpec{ 104 Annotations: api.Annotations{ 105 Name: "name2", 106 }, 107 Task: api.TaskSpec{ 108 Runtime: &api.TaskSpec_Container{ 109 Container: &api.ContainerSpec{}, 110 }, 111 }, 112 Mode: &api.ServiceSpec_Replicated{ 113 Replicated: &api.ReplicatedService{ 114 Replicas: 3, 115 }, 116 }, 117 }, 118 } 119 assert.NoError(t, store.UpdateService(tx, s2)) 120 return nil 121 }) 122 assert.NoError(t, err) 123 124 observedTask4 := testutils.WatchTaskCreate(t, watch) 125 assert.Equal(t, observedTask4.Status.State, api.TaskStateNew) 126 assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name2") 127 128 observedTask5 := testutils.WatchTaskCreate(t, watch) 129 assert.Equal(t, observedTask5.Status.State, api.TaskStateNew) 130 assert.Equal(t, observedTask5.ServiceAnnotations.Name, "name2") 131 132 // Now scale it back down to 1 instance 133 err = s.Update(func(tx store.Tx) error { 134 s2 := &api.Service{ 135 ID: "id2", 136 Spec: api.ServiceSpec{ 137 Annotations: api.Annotations{ 138 Name: "name2", 139 }, 140 Task: api.TaskSpec{ 141 Runtime: &api.TaskSpec_Container{ 142 Container: &api.ContainerSpec{}, 143 }, 144 }, 145 Mode: &api.ServiceSpec_Replicated{ 146 Replicated: &api.ReplicatedService{ 147 Replicas: 1, 148 }, 149 }, 150 }, 151 } 152 assert.NoError(t, store.UpdateService(tx, s2)) 153 return nil 154 }) 155 assert.NoError(t, err) 156 157 observedUpdateRemove1 := testutils.WatchTaskUpdate(t, watch) 158 assert.Equal(t, observedUpdateRemove1.DesiredState, api.TaskStateRemove) 159 assert.Equal(t, observedUpdateRemove1.ServiceAnnotations.Name, "name2") 160 161 observedUpdateRemove2 := testutils.WatchTaskUpdate(t, watch) 162 assert.Equal(t, observedUpdateRemove2.DesiredState, api.TaskStateRemove) 163 assert.Equal(t, observedUpdateRemove2.ServiceAnnotations.Name, "name2") 164 165 // There should be one remaining task attached to service id2/name2. 166 var liveTasks []*api.Task 167 s.View(func(readTx store.ReadTx) { 168 var tasks []*api.Task 169 tasks, err = store.FindTasks(readTx, store.ByServiceID("id2")) 170 for _, t := range tasks { 171 if t.DesiredState == api.TaskStateRunning { 172 liveTasks = append(liveTasks, t) 173 } 174 } 175 }) 176 assert.NoError(t, err) 177 assert.Len(t, liveTasks, 1) 178 179 // Delete the remaining task directly. It should be recreated by the 180 // orchestrator. 181 err = s.Update(func(tx store.Tx) error { 182 assert.NoError(t, store.DeleteTask(tx, liveTasks[0].ID)) 183 return nil 184 }) 185 assert.NoError(t, err) 186 187 observedTask6 := testutils.WatchTaskCreate(t, watch) 188 assert.Equal(t, observedTask6.Status.State, api.TaskStateNew) 189 assert.Equal(t, observedTask6.ServiceAnnotations.Name, "name2") 190 191 // Delete the service. Its remaining task should go away. 192 err = s.Update(func(tx store.Tx) error { 193 assert.NoError(t, store.DeleteService(tx, "id2")) 194 return nil 195 }) 196 assert.NoError(t, err) 197 198 deletedTask := testutils.WatchTaskDelete(t, watch) 199 assert.Equal(t, deletedTask.Status.State, api.TaskStateNew) 200 assert.Equal(t, deletedTask.ServiceAnnotations.Name, "name2") 201 } 202 203 func TestReplicatedScaleDown(t *testing.T) { 204 ctx := context.Background() 205 s := store.NewMemoryStore(nil) 206 assert.NotNil(t, s) 207 defer s.Close() 208 209 orchestrator := NewReplicatedOrchestrator(s) 210 defer orchestrator.Stop() 211 212 watch, cancel := state.Watch(s.WatchQueue(), api.EventUpdateTask{}, api.EventDeleteTask{}) 213 defer cancel() 214 215 s1 := &api.Service{ 216 ID: "id1", 217 Spec: api.ServiceSpec{ 218 Annotations: api.Annotations{ 219 Name: "name1", 220 }, 221 Mode: &api.ServiceSpec_Replicated{ 222 Replicated: &api.ReplicatedService{ 223 Replicas: 6, 224 }, 225 }, 226 }, 227 } 228 229 err := s.Update(func(tx store.Tx) error { 230 assert.NoError(t, store.CreateService(tx, s1)) 231 232 nodes := []*api.Node{ 233 { 234 ID: "node1", 235 Spec: api.NodeSpec{ 236 Annotations: api.Annotations{ 237 Name: "name1", 238 }, 239 Availability: api.NodeAvailabilityActive, 240 }, 241 Status: api.NodeStatus{ 242 State: api.NodeStatus_READY, 243 }, 244 }, 245 { 246 ID: "node2", 247 Spec: api.NodeSpec{ 248 Annotations: api.Annotations{ 249 Name: "name2", 250 }, 251 Availability: api.NodeAvailabilityActive, 252 }, 253 Status: api.NodeStatus{ 254 State: api.NodeStatus_READY, 255 }, 256 }, 257 { 258 ID: "node3", 259 Spec: api.NodeSpec{ 260 Annotations: api.Annotations{ 261 Name: "name3", 262 }, 263 Availability: api.NodeAvailabilityActive, 264 }, 265 Status: api.NodeStatus{ 266 State: api.NodeStatus_READY, 267 }, 268 }, 269 } 270 for _, node := range nodes { 271 assert.NoError(t, store.CreateNode(tx, node)) 272 } 273 274 // task1 is assigned to node1 275 // task2 - task3 are assigned to node2 276 // task4 - task6 are assigned to node3 277 // task7 is unassigned 278 279 tasks := []*api.Task{ 280 { 281 ID: "task1", 282 Slot: 1, 283 DesiredState: api.TaskStateRunning, 284 Status: api.TaskStatus{ 285 State: api.TaskStateStarting, 286 }, 287 ServiceAnnotations: api.Annotations{ 288 Name: "task1", 289 }, 290 ServiceID: "id1", 291 NodeID: "node1", 292 }, 293 { 294 ID: "task2", 295 Slot: 2, 296 DesiredState: api.TaskStateRunning, 297 Status: api.TaskStatus{ 298 State: api.TaskStateRunning, 299 }, 300 ServiceAnnotations: api.Annotations{ 301 Name: "task2", 302 }, 303 ServiceID: "id1", 304 NodeID: "node2", 305 }, 306 { 307 ID: "task3", 308 Slot: 3, 309 DesiredState: api.TaskStateRunning, 310 Status: api.TaskStatus{ 311 State: api.TaskStateRunning, 312 }, 313 ServiceAnnotations: api.Annotations{ 314 Name: "task3", 315 }, 316 ServiceID: "id1", 317 NodeID: "node2", 318 }, 319 { 320 ID: "task4", 321 Slot: 4, 322 DesiredState: api.TaskStateRunning, 323 Status: api.TaskStatus{ 324 State: api.TaskStateRunning, 325 }, 326 ServiceAnnotations: api.Annotations{ 327 Name: "task4", 328 }, 329 ServiceID: "id1", 330 NodeID: "node3", 331 }, 332 { 333 ID: "task5", 334 Slot: 5, 335 DesiredState: api.TaskStateRunning, 336 Status: api.TaskStatus{ 337 State: api.TaskStateRunning, 338 }, 339 ServiceAnnotations: api.Annotations{ 340 Name: "task5", 341 }, 342 ServiceID: "id1", 343 NodeID: "node3", 344 }, 345 { 346 ID: "task6", 347 Slot: 6, 348 DesiredState: api.TaskStateRunning, 349 Status: api.TaskStatus{ 350 State: api.TaskStateRunning, 351 }, 352 ServiceAnnotations: api.Annotations{ 353 Name: "task6", 354 }, 355 ServiceID: "id1", 356 NodeID: "node3", 357 }, 358 { 359 ID: "task7", 360 Slot: 7, 361 DesiredState: api.TaskStateRunning, 362 Status: api.TaskStatus{ 363 State: api.TaskStateNew, 364 }, 365 ServiceAnnotations: api.Annotations{ 366 Name: "task7", 367 }, 368 ServiceID: "id1", 369 }, 370 } 371 for _, task := range tasks { 372 assert.NoError(t, store.CreateTask(tx, task)) 373 } 374 375 return nil 376 }) 377 assert.NoError(t, err) 378 379 // Start the orchestrator. 380 go func() { 381 assert.NoError(t, orchestrator.Run(ctx)) 382 }() 383 384 // Replicas was set to 6, but we started with 7 tasks. task7 should 385 // be the one the orchestrator chose to shut down because it was not 386 // assigned yet. The desired state of task7 will be set to "REMOVE" 387 388 observedUpdateRemove := testutils.WatchTaskUpdate(t, watch) 389 assert.Equal(t, api.TaskStateRemove, observedUpdateRemove.DesiredState) 390 assert.Equal(t, "task7", observedUpdateRemove.ID) 391 392 // Now scale down to 4 instances. 393 err = s.Update(func(tx store.Tx) error { 394 s1.Spec.Mode = &api.ServiceSpec_Replicated{ 395 Replicated: &api.ReplicatedService{ 396 Replicas: 4, 397 }, 398 } 399 assert.NoError(t, store.UpdateService(tx, s1)) 400 return nil 401 }) 402 assert.NoError(t, err) 403 404 // Tasks should be shut down in a way that balances the remaining tasks. 405 // node2 should be preferred over node3 because node2's tasks have 406 // lower Slot numbers than node3's tasks. 407 408 shutdowns := make(map[string]int) 409 for i := 0; i != 2; i++ { 410 observedUpdateDesiredRemove := testutils.WatchTaskUpdate(t, watch) 411 assert.Equal(t, api.TaskStateRemove, observedUpdateDesiredRemove.DesiredState) 412 shutdowns[observedUpdateDesiredRemove.NodeID]++ 413 } 414 415 assert.Equal(t, 0, shutdowns["node1"]) 416 assert.Equal(t, 0, shutdowns["node2"]) 417 assert.Equal(t, 2, shutdowns["node3"]) 418 419 // task4 should be preferred over task5 and task6. 420 s.View(func(readTx store.ReadTx) { 421 tasks, err := store.FindTasks(readTx, store.ByNodeID("node3")) 422 require.NoError(t, err) 423 for _, task := range tasks { 424 if task.DesiredState == api.TaskStateRunning { 425 assert.Equal(t, "task4", task.ID) 426 } 427 } 428 }) 429 430 // Now scale down to 2 instances. 431 err = s.Update(func(tx store.Tx) error { 432 s1.Spec.Mode = &api.ServiceSpec_Replicated{ 433 Replicated: &api.ReplicatedService{ 434 Replicas: 2, 435 }, 436 } 437 assert.NoError(t, store.UpdateService(tx, s1)) 438 return nil 439 }) 440 assert.NoError(t, err) 441 442 // Tasks should be shut down in a way that balances the remaining tasks. 443 // node2 and node3 should be preferred over node1 because node1's task 444 // is not running yet. 445 446 shutdowns = make(map[string]int) 447 for i := 0; i != 2; i++ { 448 observedUpdateDesiredRemove := testutils.WatchTaskUpdate(t, watch) 449 assert.Equal(t, api.TaskStateRemove, observedUpdateDesiredRemove.DesiredState) 450 shutdowns[observedUpdateDesiredRemove.NodeID]++ 451 } 452 453 assert.Equal(t, 1, shutdowns["node1"]) 454 assert.Equal(t, 1, shutdowns["node2"]) 455 assert.Equal(t, 0, shutdowns["node3"]) 456 457 // There should be remaining tasks on node2 and node3. task2 should be 458 // preferred over task3 on node2. 459 s.View(func(readTx store.ReadTx) { 460 tasks, err := store.FindTasks(readTx, store.ByDesiredState(api.TaskStateRunning)) 461 require.NoError(t, err) 462 require.Len(t, tasks, 2) 463 if tasks[0].NodeID == "node2" { 464 assert.Equal(t, "task2", tasks[0].ID) 465 assert.Equal(t, "node3", tasks[1].NodeID) 466 } else { 467 assert.Equal(t, "node3", tasks[0].NodeID) 468 assert.Equal(t, "node2", tasks[1].NodeID) 469 assert.Equal(t, "task2", tasks[1].ID) 470 } 471 }) 472 } 473 474 func TestInitializationRejectedTasks(t *testing.T) { 475 ctx := context.Background() 476 s := store.NewMemoryStore(nil) 477 assert.NotNil(t, s) 478 defer s.Close() 479 480 service1 := &api.Service{ 481 ID: "serviceid1", 482 Spec: api.ServiceSpec{ 483 Annotations: api.Annotations{ 484 Name: "name1", 485 }, 486 Task: api.TaskSpec{ 487 Runtime: &api.TaskSpec_Container{ 488 Container: &api.ContainerSpec{}, 489 }, 490 }, 491 Mode: &api.ServiceSpec_Replicated{ 492 Replicated: &api.ReplicatedService{ 493 Replicas: 1, 494 }, 495 }, 496 }, 497 } 498 499 err := s.Update(func(tx store.Tx) error { 500 assert.NoError(t, store.CreateService(tx, service1)) 501 502 nodes := []*api.Node{ 503 { 504 ID: "node1", 505 Spec: api.NodeSpec{ 506 Annotations: api.Annotations{ 507 Name: "name1", 508 }, 509 Availability: api.NodeAvailabilityActive, 510 }, 511 Status: api.NodeStatus{ 512 State: api.NodeStatus_READY, 513 }, 514 }, 515 } 516 for _, node := range nodes { 517 assert.NoError(t, store.CreateNode(tx, node)) 518 } 519 520 // 1 rejected task is in store before orchestrator starts 521 tasks := []*api.Task{ 522 { 523 ID: "task1", 524 Slot: 1, 525 DesiredState: api.TaskStateReady, 526 Status: api.TaskStatus{ 527 State: api.TaskStateRejected, 528 }, 529 Spec: api.TaskSpec{ 530 Runtime: &api.TaskSpec_Container{ 531 Container: &api.ContainerSpec{}, 532 }, 533 }, 534 ServiceAnnotations: api.Annotations{ 535 Name: "task1", 536 }, 537 ServiceID: "serviceid1", 538 NodeID: "node1", 539 }, 540 } 541 for _, task := range tasks { 542 assert.NoError(t, store.CreateTask(tx, task)) 543 } 544 545 return nil 546 }) 547 assert.NoError(t, err) 548 549 // watch orchestration events 550 watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) 551 defer cancel() 552 553 orchestrator := NewReplicatedOrchestrator(s) 554 defer orchestrator.Stop() 555 556 go func() { 557 assert.NoError(t, orchestrator.Run(ctx)) 558 }() 559 560 // initTask triggers an update event 561 observedTask1 := testutils.WatchTaskUpdate(t, watch) 562 assert.Equal(t, observedTask1.ID, "task1") 563 assert.Equal(t, observedTask1.Status.State, api.TaskStateRejected) 564 assert.Equal(t, observedTask1.DesiredState, api.TaskStateShutdown) 565 566 // a new task is created 567 observedTask2 := testutils.WatchTaskCreate(t, watch) 568 assert.Equal(t, observedTask2.ServiceID, "serviceid1") 569 // it has not been scheduled 570 assert.Equal(t, observedTask2.NodeID, "") 571 assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) 572 assert.Equal(t, observedTask2.DesiredState, api.TaskStateReady) 573 574 var deadCnt, liveCnt int 575 s.View(func(readTx store.ReadTx) { 576 var tasks []*api.Task 577 tasks, err = store.FindTasks(readTx, store.ByServiceID("serviceid1")) 578 for _, task := range tasks { 579 if task.DesiredState == api.TaskStateShutdown { 580 assert.Equal(t, task.ID, "task1") 581 deadCnt++ 582 } else { 583 liveCnt++ 584 } 585 } 586 }) 587 assert.NoError(t, err) 588 assert.Equal(t, deadCnt, 1) 589 assert.Equal(t, liveCnt, 1) 590 } 591 592 func TestInitializationFailedTasks(t *testing.T) { 593 ctx := context.Background() 594 s := store.NewMemoryStore(nil) 595 assert.NotNil(t, s) 596 defer s.Close() 597 598 service1 := &api.Service{ 599 ID: "serviceid1", 600 Spec: api.ServiceSpec{ 601 Annotations: api.Annotations{ 602 Name: "name1", 603 }, 604 Task: api.TaskSpec{ 605 Runtime: &api.TaskSpec_Container{ 606 Container: &api.ContainerSpec{}, 607 }, 608 }, 609 Mode: &api.ServiceSpec_Replicated{ 610 Replicated: &api.ReplicatedService{ 611 Replicas: 2, 612 }, 613 }, 614 }, 615 } 616 617 err := s.Update(func(tx store.Tx) error { 618 assert.NoError(t, store.CreateService(tx, service1)) 619 620 nodes := []*api.Node{ 621 { 622 ID: "node1", 623 Spec: api.NodeSpec{ 624 Annotations: api.Annotations{ 625 Name: "name1", 626 }, 627 Availability: api.NodeAvailabilityActive, 628 }, 629 Status: api.NodeStatus{ 630 State: api.NodeStatus_READY, 631 }, 632 }, 633 } 634 for _, node := range nodes { 635 assert.NoError(t, store.CreateNode(tx, node)) 636 } 637 638 // 1 failed task is in store before orchestrator starts 639 tasks := []*api.Task{ 640 { 641 ID: "task1", 642 Slot: 1, 643 DesiredState: api.TaskStateRunning, 644 Status: api.TaskStatus{ 645 State: api.TaskStateFailed, 646 }, 647 Spec: api.TaskSpec{ 648 Runtime: &api.TaskSpec_Container{ 649 Container: &api.ContainerSpec{}, 650 }, 651 }, 652 ServiceAnnotations: api.Annotations{ 653 Name: "task1", 654 }, 655 ServiceID: "serviceid1", 656 NodeID: "node1", 657 }, 658 { 659 ID: "task2", 660 Slot: 2, 661 DesiredState: api.TaskStateRunning, 662 Status: api.TaskStatus{ 663 State: api.TaskStateStarting, 664 }, 665 Spec: api.TaskSpec{ 666 Runtime: &api.TaskSpec_Container{ 667 Container: &api.ContainerSpec{}, 668 }, 669 }, 670 ServiceAnnotations: api.Annotations{ 671 Name: "task2", 672 }, 673 ServiceID: "serviceid1", 674 NodeID: "node1", 675 }, 676 } 677 for _, task := range tasks { 678 assert.NoError(t, store.CreateTask(tx, task)) 679 } 680 681 return nil 682 }) 683 assert.NoError(t, err) 684 685 // watch orchestration events 686 watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) 687 defer cancel() 688 689 orchestrator := NewReplicatedOrchestrator(s) 690 defer orchestrator.Stop() 691 692 go func() { 693 assert.NoError(t, orchestrator.Run(ctx)) 694 }() 695 696 // initTask triggers an update 697 observedTask1 := testutils.WatchTaskUpdate(t, watch) 698 assert.Equal(t, observedTask1.ID, "task1") 699 assert.Equal(t, observedTask1.Status.State, api.TaskStateFailed) 700 assert.Equal(t, observedTask1.DesiredState, api.TaskStateShutdown) 701 702 // a new task is created 703 observedTask2 := testutils.WatchTaskCreate(t, watch) 704 assert.Equal(t, observedTask2.ServiceID, "serviceid1") 705 assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) 706 assert.Equal(t, observedTask2.DesiredState, api.TaskStateReady) 707 708 var deadCnt, liveCnt int 709 s.View(func(readTx store.ReadTx) { 710 var tasks []*api.Task 711 tasks, err = store.FindTasks(readTx, store.ByServiceID("serviceid1")) 712 for _, task := range tasks { 713 if task.DesiredState == api.TaskStateShutdown { 714 assert.Equal(t, task.ID, "task1") 715 deadCnt++ 716 } else { 717 liveCnt++ 718 } 719 } 720 }) 721 assert.NoError(t, err) 722 assert.Equal(t, deadCnt, 1) 723 assert.Equal(t, liveCnt, 2) 724 } 725 726 func TestInitializationNodeDown(t *testing.T) { 727 ctx := context.Background() 728 s := store.NewMemoryStore(nil) 729 assert.NotNil(t, s) 730 defer s.Close() 731 732 service1 := &api.Service{ 733 ID: "serviceid1", 734 Spec: api.ServiceSpec{ 735 Annotations: api.Annotations{ 736 Name: "name1", 737 }, 738 Task: api.TaskSpec{ 739 Runtime: &api.TaskSpec_Container{ 740 Container: &api.ContainerSpec{}, 741 }, 742 }, 743 Mode: &api.ServiceSpec_Replicated{ 744 Replicated: &api.ReplicatedService{ 745 Replicas: 1, 746 }, 747 }, 748 }, 749 } 750 751 err := s.Update(func(tx store.Tx) error { 752 assert.NoError(t, store.CreateService(tx, service1)) 753 754 nodes := []*api.Node{ 755 { 756 ID: "node1", 757 Spec: api.NodeSpec{ 758 Annotations: api.Annotations{ 759 Name: "name1", 760 }, 761 Availability: api.NodeAvailabilityActive, 762 }, 763 Status: api.NodeStatus{ 764 State: api.NodeStatus_DOWN, 765 }, 766 }, 767 } 768 for _, node := range nodes { 769 assert.NoError(t, store.CreateNode(tx, node)) 770 } 771 772 // 1 failed task is in store before orchestrator starts 773 tasks := []*api.Task{ 774 { 775 ID: "task1", 776 Slot: 1, 777 DesiredState: api.TaskStateRunning, 778 Status: api.TaskStatus{ 779 State: api.TaskStateRunning, 780 }, 781 Spec: api.TaskSpec{ 782 Runtime: &api.TaskSpec_Container{ 783 Container: &api.ContainerSpec{}, 784 }, 785 }, 786 ServiceAnnotations: api.Annotations{ 787 Name: "task1", 788 }, 789 ServiceID: "serviceid1", 790 NodeID: "node1", 791 }, 792 } 793 for _, task := range tasks { 794 assert.NoError(t, store.CreateTask(tx, task)) 795 } 796 797 return nil 798 }) 799 assert.NoError(t, err) 800 801 // watch orchestration events 802 watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) 803 defer cancel() 804 805 orchestrator := NewReplicatedOrchestrator(s) 806 defer orchestrator.Stop() 807 808 go func() { 809 assert.NoError(t, orchestrator.Run(ctx)) 810 }() 811 812 // initTask triggers an update 813 observedTask1 := testutils.WatchTaskUpdate(t, watch) 814 assert.Equal(t, observedTask1.ID, "task1") 815 assert.Equal(t, observedTask1.Status.State, api.TaskStateRunning) 816 assert.Equal(t, observedTask1.DesiredState, api.TaskStateShutdown) 817 818 // a new task is created 819 observedTask2 := testutils.WatchTaskCreate(t, watch) 820 assert.Equal(t, observedTask2.ServiceID, "serviceid1") 821 assert.Equal(t, observedTask2.Status.State, api.TaskStateNew) 822 assert.Equal(t, observedTask2.DesiredState, api.TaskStateReady) 823 } 824 825 func TestInitializationDelayStart(t *testing.T) { 826 ctx := context.Background() 827 s := store.NewMemoryStore(nil) 828 assert.NotNil(t, s) 829 defer s.Close() 830 831 service1 := &api.Service{ 832 ID: "serviceid1", 833 Spec: api.ServiceSpec{ 834 Annotations: api.Annotations{ 835 Name: "name1", 836 }, 837 Task: api.TaskSpec{ 838 Runtime: &api.TaskSpec_Container{ 839 Container: &api.ContainerSpec{}, 840 }, 841 Restart: &api.RestartPolicy{ 842 Condition: api.RestartOnAny, 843 Delay: gogotypes.DurationProto(100 * time.Millisecond), 844 }, 845 }, 846 Mode: &api.ServiceSpec_Replicated{ 847 Replicated: &api.ReplicatedService{ 848 Replicas: 1, 849 }, 850 }, 851 }, 852 } 853 854 before := time.Now() 855 err := s.Update(func(tx store.Tx) error { 856 assert.NoError(t, store.CreateService(tx, service1)) 857 858 nodes := []*api.Node{ 859 { 860 ID: "node1", 861 Spec: api.NodeSpec{ 862 Annotations: api.Annotations{ 863 Name: "name1", 864 }, 865 Availability: api.NodeAvailabilityActive, 866 }, 867 Status: api.NodeStatus{ 868 State: api.NodeStatus_READY, 869 }, 870 }, 871 } 872 for _, node := range nodes { 873 assert.NoError(t, store.CreateNode(tx, node)) 874 } 875 876 // 1 failed task is in store before orchestrator starts 877 tasks := []*api.Task{ 878 { 879 ID: "task1", 880 Slot: 1, 881 DesiredState: api.TaskStateReady, 882 Status: api.TaskStatus{ 883 State: api.TaskStateReady, 884 Timestamp: ptypes.MustTimestampProto(before), 885 }, 886 Spec: api.TaskSpec{ 887 Runtime: &api.TaskSpec_Container{ 888 Container: &api.ContainerSpec{}, 889 }, 890 Restart: &api.RestartPolicy{ 891 Condition: api.RestartOnAny, 892 Delay: gogotypes.DurationProto(100 * time.Millisecond), 893 }, 894 }, 895 ServiceAnnotations: api.Annotations{ 896 Name: "task1", 897 }, 898 ServiceID: "serviceid1", 899 NodeID: "node1", 900 }, 901 } 902 for _, task := range tasks { 903 assert.NoError(t, store.CreateTask(tx, task)) 904 } 905 906 return nil 907 }) 908 assert.NoError(t, err) 909 910 // watch orchestration events 911 watch, cancel := state.Watch(s.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventDeleteTask{}) 912 defer cancel() 913 914 orchestrator := NewReplicatedOrchestrator(s) 915 defer orchestrator.Stop() 916 917 go func() { 918 assert.NoError(t, orchestrator.Run(ctx)) 919 }() 920 921 // initTask triggers an update 922 observedTask1 := testutils.WatchTaskUpdate(t, watch) 923 after := time.Now() 924 assert.Equal(t, observedTask1.ID, "task1") 925 assert.Equal(t, observedTask1.Status.State, api.TaskStateReady) 926 assert.Equal(t, observedTask1.DesiredState, api.TaskStateRunning) 927 928 // At least 100 ms should have elapsed 929 if after.Sub(before) < 100*time.Millisecond { 930 t.Fatalf("restart delay should have elapsed. Got: %v", after.Sub(before)) 931 } 932 }