github.com/anuvu/nomad@v0.8.7-atom1/api/tasks_test.go (about) 1 package api 2 3 import ( 4 "reflect" 5 "testing" 6 "time" 7 8 "github.com/hashicorp/nomad/helper" 9 "github.com/hashicorp/nomad/nomad/structs" 10 "github.com/stretchr/testify/assert" 11 ) 12 13 func TestTaskGroup_NewTaskGroup(t *testing.T) { 14 t.Parallel() 15 grp := NewTaskGroup("grp1", 2) 16 expect := &TaskGroup{ 17 Name: helper.StringToPtr("grp1"), 18 Count: helper.IntToPtr(2), 19 } 20 if !reflect.DeepEqual(grp, expect) { 21 t.Fatalf("expect: %#v, got: %#v", expect, grp) 22 } 23 } 24 25 func TestTaskGroup_Constrain(t *testing.T) { 26 t.Parallel() 27 grp := NewTaskGroup("grp1", 1) 28 29 // Add a constraint to the group 30 out := grp.Constrain(NewConstraint("kernel.name", "=", "darwin")) 31 if n := len(grp.Constraints); n != 1 { 32 t.Fatalf("expected 1 constraint, got: %d", n) 33 } 34 35 // Check that the group was returned 36 if out != grp { 37 t.Fatalf("expected: %#v, got: %#v", grp, out) 38 } 39 40 // Add a second constraint 41 grp.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000")) 42 expect := []*Constraint{ 43 { 44 LTarget: "kernel.name", 45 RTarget: "darwin", 46 Operand: "=", 47 }, 48 { 49 LTarget: "memory.totalbytes", 50 RTarget: "128000000", 51 Operand: ">=", 52 }, 53 } 54 if !reflect.DeepEqual(grp.Constraints, expect) { 55 t.Fatalf("expect: %#v, got: %#v", expect, grp.Constraints) 56 } 57 } 58 59 func TestTaskGroup_SetMeta(t *testing.T) { 60 t.Parallel() 61 grp := NewTaskGroup("grp1", 1) 62 63 // Initializes an empty map 64 out := grp.SetMeta("foo", "bar") 65 if grp.Meta == nil { 66 t.Fatalf("should be initialized") 67 } 68 69 // Check that we returned the group 70 if out != grp { 71 t.Fatalf("expect: %#v, got: %#v", grp, out) 72 } 73 74 // Add a second meta k/v 75 grp.SetMeta("baz", "zip") 76 expect := map[string]string{"foo": "bar", "baz": "zip"} 77 if !reflect.DeepEqual(grp.Meta, expect) { 78 t.Fatalf("expect: %#v, got: %#v", expect, grp.Meta) 79 } 80 } 81 82 func TestTaskGroup_AddTask(t *testing.T) { 83 t.Parallel() 84 grp := NewTaskGroup("grp1", 1) 85 86 // Add the task to the task group 87 out := grp.AddTask(NewTask("task1", "java")) 88 if n := len(grp.Tasks); n != 1 { 89 t.Fatalf("expected 1 task, got: %d", n) 90 } 91 92 // Check that we returned the group 93 if out != grp { 94 t.Fatalf("expect: %#v, got: %#v", grp, out) 95 } 96 97 // Add a second task 98 grp.AddTask(NewTask("task2", "exec")) 99 expect := []*Task{ 100 { 101 Name: "task1", 102 Driver: "java", 103 }, 104 { 105 Name: "task2", 106 Driver: "exec", 107 }, 108 } 109 if !reflect.DeepEqual(grp.Tasks, expect) { 110 t.Fatalf("expect: %#v, got: %#v", expect, grp.Tasks) 111 } 112 } 113 114 func TestTask_NewTask(t *testing.T) { 115 t.Parallel() 116 task := NewTask("task1", "exec") 117 expect := &Task{ 118 Name: "task1", 119 Driver: "exec", 120 } 121 if !reflect.DeepEqual(task, expect) { 122 t.Fatalf("expect: %#v, got: %#v", expect, task) 123 } 124 } 125 126 func TestTask_SetConfig(t *testing.T) { 127 t.Parallel() 128 task := NewTask("task1", "exec") 129 130 // Initializes an empty map 131 out := task.SetConfig("foo", "bar") 132 if task.Config == nil { 133 t.Fatalf("should be initialized") 134 } 135 136 // Check that we returned the task 137 if out != task { 138 t.Fatalf("expect: %#v, got: %#v", task, out) 139 } 140 141 // Set another config value 142 task.SetConfig("baz", "zip") 143 expect := map[string]interface{}{"foo": "bar", "baz": "zip"} 144 if !reflect.DeepEqual(task.Config, expect) { 145 t.Fatalf("expect: %#v, got: %#v", expect, task.Config) 146 } 147 } 148 149 func TestTask_SetMeta(t *testing.T) { 150 t.Parallel() 151 task := NewTask("task1", "exec") 152 153 // Initializes an empty map 154 out := task.SetMeta("foo", "bar") 155 if task.Meta == nil { 156 t.Fatalf("should be initialized") 157 } 158 159 // Check that we returned the task 160 if out != task { 161 t.Fatalf("expect: %#v, got: %#v", task, out) 162 } 163 164 // Set another meta k/v 165 task.SetMeta("baz", "zip") 166 expect := map[string]string{"foo": "bar", "baz": "zip"} 167 if !reflect.DeepEqual(task.Meta, expect) { 168 t.Fatalf("expect: %#v, got: %#v", expect, task.Meta) 169 } 170 } 171 172 func TestTask_Require(t *testing.T) { 173 t.Parallel() 174 task := NewTask("task1", "exec") 175 176 // Create some require resources 177 resources := &Resources{ 178 CPU: helper.IntToPtr(1250), 179 MemoryMB: helper.IntToPtr(128), 180 DiskMB: helper.IntToPtr(2048), 181 IOPS: helper.IntToPtr(500), 182 Networks: []*NetworkResource{ 183 { 184 CIDR: "0.0.0.0/0", 185 MBits: helper.IntToPtr(100), 186 ReservedPorts: []Port{{"", 80}, {"", 443}}, 187 }, 188 }, 189 } 190 out := task.Require(resources) 191 if !reflect.DeepEqual(task.Resources, resources) { 192 t.Fatalf("expect: %#v, got: %#v", resources, task.Resources) 193 } 194 195 // Check that we returned the task 196 if out != task { 197 t.Fatalf("expect: %#v, got: %#v", task, out) 198 } 199 } 200 201 func TestTask_Constrain(t *testing.T) { 202 t.Parallel() 203 task := NewTask("task1", "exec") 204 205 // Add a constraint to the task 206 out := task.Constrain(NewConstraint("kernel.name", "=", "darwin")) 207 if n := len(task.Constraints); n != 1 { 208 t.Fatalf("expected 1 constraint, got: %d", n) 209 } 210 211 // Check that the task was returned 212 if out != task { 213 t.Fatalf("expected: %#v, got: %#v", task, out) 214 } 215 216 // Add a second constraint 217 task.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000")) 218 expect := []*Constraint{ 219 { 220 LTarget: "kernel.name", 221 RTarget: "darwin", 222 Operand: "=", 223 }, 224 { 225 LTarget: "memory.totalbytes", 226 RTarget: "128000000", 227 Operand: ">=", 228 }, 229 } 230 if !reflect.DeepEqual(task.Constraints, expect) { 231 t.Fatalf("expect: %#v, got: %#v", expect, task.Constraints) 232 } 233 } 234 235 func TestTask_Artifact(t *testing.T) { 236 t.Parallel() 237 a := TaskArtifact{ 238 GetterSource: helper.StringToPtr("http://localhost/foo.txt"), 239 GetterMode: helper.StringToPtr("file"), 240 } 241 a.Canonicalize() 242 if *a.GetterMode != "file" { 243 t.Errorf("expected file but found %q", *a.GetterMode) 244 } 245 if *a.RelativeDest != "local/foo.txt" { 246 t.Errorf("expected local/foo.txt but found %q", *a.RelativeDest) 247 } 248 } 249 250 // Ensures no regression on https://github.com/hashicorp/nomad/issues/3132 251 func TestTaskGroup_Canonicalize_Update(t *testing.T) { 252 job := &Job{ 253 ID: helper.StringToPtr("test"), 254 Update: &UpdateStrategy{ 255 AutoRevert: helper.BoolToPtr(false), 256 Canary: helper.IntToPtr(0), 257 HealthCheck: helper.StringToPtr(""), 258 HealthyDeadline: helper.TimeToPtr(0), 259 ProgressDeadline: helper.TimeToPtr(0), 260 MaxParallel: helper.IntToPtr(0), 261 MinHealthyTime: helper.TimeToPtr(0), 262 Stagger: helper.TimeToPtr(0), 263 }, 264 } 265 job.Canonicalize() 266 tg := &TaskGroup{ 267 Name: helper.StringToPtr("foo"), 268 } 269 tg.Canonicalize(job) 270 assert.Nil(t, tg.Update) 271 } 272 273 // Verifies that reschedule policy is merged correctly 274 func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) { 275 type testCase struct { 276 desc string 277 jobReschedulePolicy *ReschedulePolicy 278 taskReschedulePolicy *ReschedulePolicy 279 expected *ReschedulePolicy 280 } 281 282 testCases := []testCase{ 283 { 284 desc: "Default", 285 jobReschedulePolicy: nil, 286 taskReschedulePolicy: nil, 287 expected: &ReschedulePolicy{ 288 Attempts: helper.IntToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts), 289 Interval: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval), 290 Delay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay), 291 DelayFunction: helper.StringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction), 292 MaxDelay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay), 293 Unlimited: helper.BoolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited), 294 }, 295 }, 296 { 297 desc: "Empty job reschedule policy", 298 jobReschedulePolicy: &ReschedulePolicy{ 299 Attempts: helper.IntToPtr(0), 300 Interval: helper.TimeToPtr(0), 301 Delay: helper.TimeToPtr(0), 302 MaxDelay: helper.TimeToPtr(0), 303 DelayFunction: helper.StringToPtr(""), 304 Unlimited: helper.BoolToPtr(false), 305 }, 306 taskReschedulePolicy: nil, 307 expected: &ReschedulePolicy{ 308 Attempts: helper.IntToPtr(0), 309 Interval: helper.TimeToPtr(0), 310 Delay: helper.TimeToPtr(0), 311 MaxDelay: helper.TimeToPtr(0), 312 DelayFunction: helper.StringToPtr(""), 313 Unlimited: helper.BoolToPtr(false), 314 }, 315 }, 316 { 317 desc: "Inherit from job", 318 jobReschedulePolicy: &ReschedulePolicy{ 319 Attempts: helper.IntToPtr(1), 320 Interval: helper.TimeToPtr(20 * time.Second), 321 Delay: helper.TimeToPtr(20 * time.Second), 322 MaxDelay: helper.TimeToPtr(10 * time.Minute), 323 DelayFunction: helper.StringToPtr("constant"), 324 Unlimited: helper.BoolToPtr(false), 325 }, 326 taskReschedulePolicy: nil, 327 expected: &ReschedulePolicy{ 328 Attempts: helper.IntToPtr(1), 329 Interval: helper.TimeToPtr(20 * time.Second), 330 Delay: helper.TimeToPtr(20 * time.Second), 331 MaxDelay: helper.TimeToPtr(10 * time.Minute), 332 DelayFunction: helper.StringToPtr("constant"), 333 Unlimited: helper.BoolToPtr(false), 334 }, 335 }, 336 { 337 desc: "Set in task", 338 jobReschedulePolicy: nil, 339 taskReschedulePolicy: &ReschedulePolicy{ 340 Attempts: helper.IntToPtr(5), 341 Interval: helper.TimeToPtr(2 * time.Minute), 342 Delay: helper.TimeToPtr(20 * time.Second), 343 MaxDelay: helper.TimeToPtr(10 * time.Minute), 344 DelayFunction: helper.StringToPtr("constant"), 345 Unlimited: helper.BoolToPtr(false), 346 }, 347 expected: &ReschedulePolicy{ 348 Attempts: helper.IntToPtr(5), 349 Interval: helper.TimeToPtr(2 * time.Minute), 350 Delay: helper.TimeToPtr(20 * time.Second), 351 MaxDelay: helper.TimeToPtr(10 * time.Minute), 352 DelayFunction: helper.StringToPtr("constant"), 353 Unlimited: helper.BoolToPtr(false), 354 }, 355 }, 356 { 357 desc: "Merge from job", 358 jobReschedulePolicy: &ReschedulePolicy{ 359 Attempts: helper.IntToPtr(1), 360 Delay: helper.TimeToPtr(20 * time.Second), 361 MaxDelay: helper.TimeToPtr(10 * time.Minute), 362 }, 363 taskReschedulePolicy: &ReschedulePolicy{ 364 Interval: helper.TimeToPtr(5 * time.Minute), 365 DelayFunction: helper.StringToPtr("constant"), 366 Unlimited: helper.BoolToPtr(false), 367 }, 368 expected: &ReschedulePolicy{ 369 Attempts: helper.IntToPtr(1), 370 Interval: helper.TimeToPtr(5 * time.Minute), 371 Delay: helper.TimeToPtr(20 * time.Second), 372 MaxDelay: helper.TimeToPtr(10 * time.Minute), 373 DelayFunction: helper.StringToPtr("constant"), 374 Unlimited: helper.BoolToPtr(false), 375 }, 376 }, 377 { 378 desc: "Override from group", 379 jobReschedulePolicy: &ReschedulePolicy{ 380 Attempts: helper.IntToPtr(1), 381 MaxDelay: helper.TimeToPtr(10 * time.Second), 382 }, 383 taskReschedulePolicy: &ReschedulePolicy{ 384 Attempts: helper.IntToPtr(5), 385 Delay: helper.TimeToPtr(20 * time.Second), 386 MaxDelay: helper.TimeToPtr(20 * time.Minute), 387 DelayFunction: helper.StringToPtr("constant"), 388 Unlimited: helper.BoolToPtr(false), 389 }, 390 expected: &ReschedulePolicy{ 391 Attempts: helper.IntToPtr(5), 392 Interval: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval), 393 Delay: helper.TimeToPtr(20 * time.Second), 394 MaxDelay: helper.TimeToPtr(20 * time.Minute), 395 DelayFunction: helper.StringToPtr("constant"), 396 Unlimited: helper.BoolToPtr(false), 397 }, 398 }, 399 { 400 desc: "Attempts from job, default interval", 401 jobReschedulePolicy: &ReschedulePolicy{ 402 Attempts: helper.IntToPtr(1), 403 }, 404 taskReschedulePolicy: nil, 405 expected: &ReschedulePolicy{ 406 Attempts: helper.IntToPtr(1), 407 Interval: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval), 408 Delay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay), 409 DelayFunction: helper.StringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction), 410 MaxDelay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay), 411 Unlimited: helper.BoolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited), 412 }, 413 }, 414 } 415 416 for _, tc := range testCases { 417 t.Run(tc.desc, func(t *testing.T) { 418 job := &Job{ 419 ID: helper.StringToPtr("test"), 420 Reschedule: tc.jobReschedulePolicy, 421 Type: helper.StringToPtr(JobTypeBatch), 422 } 423 job.Canonicalize() 424 tg := &TaskGroup{ 425 Name: helper.StringToPtr("foo"), 426 ReschedulePolicy: tc.taskReschedulePolicy, 427 } 428 tg.Canonicalize(job) 429 assert.Equal(t, tc.expected, tg.ReschedulePolicy) 430 }) 431 } 432 } 433 434 // Verifies that migrate strategy is merged correctly 435 func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { 436 type testCase struct { 437 desc string 438 jobType string 439 jobMigrate *MigrateStrategy 440 taskMigrate *MigrateStrategy 441 expected *MigrateStrategy 442 } 443 444 testCases := []testCase{ 445 { 446 desc: "Default batch", 447 jobType: "batch", 448 jobMigrate: nil, 449 taskMigrate: nil, 450 expected: nil, 451 }, 452 { 453 desc: "Default service", 454 jobType: "service", 455 jobMigrate: nil, 456 taskMigrate: nil, 457 expected: &MigrateStrategy{ 458 MaxParallel: helper.IntToPtr(1), 459 HealthCheck: helper.StringToPtr("checks"), 460 MinHealthyTime: helper.TimeToPtr(10 * time.Second), 461 HealthyDeadline: helper.TimeToPtr(5 * time.Minute), 462 }, 463 }, 464 { 465 desc: "Empty job migrate strategy", 466 jobType: "service", 467 jobMigrate: &MigrateStrategy{ 468 MaxParallel: helper.IntToPtr(0), 469 HealthCheck: helper.StringToPtr(""), 470 MinHealthyTime: helper.TimeToPtr(0), 471 HealthyDeadline: helper.TimeToPtr(0), 472 }, 473 taskMigrate: nil, 474 expected: &MigrateStrategy{ 475 MaxParallel: helper.IntToPtr(0), 476 HealthCheck: helper.StringToPtr(""), 477 MinHealthyTime: helper.TimeToPtr(0), 478 HealthyDeadline: helper.TimeToPtr(0), 479 }, 480 }, 481 { 482 desc: "Inherit from job", 483 jobType: "service", 484 jobMigrate: &MigrateStrategy{ 485 MaxParallel: helper.IntToPtr(3), 486 HealthCheck: helper.StringToPtr("checks"), 487 MinHealthyTime: helper.TimeToPtr(2), 488 HealthyDeadline: helper.TimeToPtr(2), 489 }, 490 taskMigrate: nil, 491 expected: &MigrateStrategy{ 492 MaxParallel: helper.IntToPtr(3), 493 HealthCheck: helper.StringToPtr("checks"), 494 MinHealthyTime: helper.TimeToPtr(2), 495 HealthyDeadline: helper.TimeToPtr(2), 496 }, 497 }, 498 { 499 desc: "Set in task", 500 jobType: "service", 501 jobMigrate: nil, 502 taskMigrate: &MigrateStrategy{ 503 MaxParallel: helper.IntToPtr(3), 504 HealthCheck: helper.StringToPtr("checks"), 505 MinHealthyTime: helper.TimeToPtr(2), 506 HealthyDeadline: helper.TimeToPtr(2), 507 }, 508 expected: &MigrateStrategy{ 509 MaxParallel: helper.IntToPtr(3), 510 HealthCheck: helper.StringToPtr("checks"), 511 MinHealthyTime: helper.TimeToPtr(2), 512 HealthyDeadline: helper.TimeToPtr(2), 513 }, 514 }, 515 { 516 desc: "Merge from job", 517 jobType: "service", 518 jobMigrate: &MigrateStrategy{ 519 MaxParallel: helper.IntToPtr(11), 520 }, 521 taskMigrate: &MigrateStrategy{ 522 HealthCheck: helper.StringToPtr("checks"), 523 MinHealthyTime: helper.TimeToPtr(2), 524 HealthyDeadline: helper.TimeToPtr(2), 525 }, 526 expected: &MigrateStrategy{ 527 MaxParallel: helper.IntToPtr(11), 528 HealthCheck: helper.StringToPtr("checks"), 529 MinHealthyTime: helper.TimeToPtr(2), 530 HealthyDeadline: helper.TimeToPtr(2), 531 }, 532 }, 533 { 534 desc: "Override from group", 535 jobType: "service", 536 jobMigrate: &MigrateStrategy{ 537 MaxParallel: helper.IntToPtr(11), 538 }, 539 taskMigrate: &MigrateStrategy{ 540 MaxParallel: helper.IntToPtr(5), 541 HealthCheck: helper.StringToPtr("checks"), 542 MinHealthyTime: helper.TimeToPtr(2), 543 HealthyDeadline: helper.TimeToPtr(2), 544 }, 545 expected: &MigrateStrategy{ 546 MaxParallel: helper.IntToPtr(5), 547 HealthCheck: helper.StringToPtr("checks"), 548 MinHealthyTime: helper.TimeToPtr(2), 549 HealthyDeadline: helper.TimeToPtr(2), 550 }, 551 }, 552 { 553 desc: "Parallel from job, defaulting", 554 jobType: "service", 555 jobMigrate: &MigrateStrategy{ 556 MaxParallel: helper.IntToPtr(5), 557 }, 558 taskMigrate: nil, 559 expected: &MigrateStrategy{ 560 MaxParallel: helper.IntToPtr(5), 561 HealthCheck: helper.StringToPtr("checks"), 562 MinHealthyTime: helper.TimeToPtr(10 * time.Second), 563 HealthyDeadline: helper.TimeToPtr(5 * time.Minute), 564 }, 565 }, 566 } 567 568 for _, tc := range testCases { 569 t.Run(tc.desc, func(t *testing.T) { 570 job := &Job{ 571 ID: helper.StringToPtr("test"), 572 Migrate: tc.jobMigrate, 573 Type: helper.StringToPtr(tc.jobType), 574 } 575 job.Canonicalize() 576 tg := &TaskGroup{ 577 Name: helper.StringToPtr("foo"), 578 Migrate: tc.taskMigrate, 579 } 580 tg.Canonicalize(job) 581 assert.Equal(t, tc.expected, tg.Migrate) 582 }) 583 } 584 } 585 586 // TestService_CheckRestart asserts Service.CheckRestart settings are properly 587 // inherited by Checks. 588 func TestService_CheckRestart(t *testing.T) { 589 job := &Job{Name: helper.StringToPtr("job")} 590 tg := &TaskGroup{Name: helper.StringToPtr("group")} 591 task := &Task{Name: "task"} 592 service := &Service{ 593 CheckRestart: &CheckRestart{ 594 Limit: 11, 595 Grace: helper.TimeToPtr(11 * time.Second), 596 IgnoreWarnings: true, 597 }, 598 Checks: []ServiceCheck{ 599 { 600 Name: "all-set", 601 CheckRestart: &CheckRestart{ 602 Limit: 22, 603 Grace: helper.TimeToPtr(22 * time.Second), 604 IgnoreWarnings: true, 605 }, 606 }, 607 { 608 Name: "some-set", 609 CheckRestart: &CheckRestart{ 610 Limit: 33, 611 Grace: helper.TimeToPtr(33 * time.Second), 612 }, 613 }, 614 { 615 Name: "unset", 616 }, 617 }, 618 } 619 620 service.Canonicalize(task, tg, job) 621 assert.Equal(t, service.Checks[0].CheckRestart.Limit, 22) 622 assert.Equal(t, *service.Checks[0].CheckRestart.Grace, 22*time.Second) 623 assert.True(t, service.Checks[0].CheckRestart.IgnoreWarnings) 624 625 assert.Equal(t, service.Checks[1].CheckRestart.Limit, 33) 626 assert.Equal(t, *service.Checks[1].CheckRestart.Grace, 33*time.Second) 627 assert.True(t, service.Checks[1].CheckRestart.IgnoreWarnings) 628 629 assert.Equal(t, service.Checks[2].CheckRestart.Limit, 11) 630 assert.Equal(t, *service.Checks[2].CheckRestart.Grace, 11*time.Second) 631 assert.True(t, service.Checks[2].CheckRestart.IgnoreWarnings) 632 }