github.com/prestonp/nomad@v0.10.4/command/agent/job_endpoint_test.go (about) 1 package agent 2 3 import ( 4 "net/http" 5 "net/http/httptest" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/golang/snappy" 12 "github.com/hashicorp/nomad/api" 13 "github.com/hashicorp/nomad/helper" 14 "github.com/hashicorp/nomad/nomad/mock" 15 "github.com/hashicorp/nomad/nomad/structs" 16 "github.com/kr/pretty" 17 "github.com/stretchr/testify/assert" 18 "github.com/stretchr/testify/require" 19 ) 20 21 func TestHTTP_JobsList(t *testing.T) { 22 t.Parallel() 23 httpTest(t, nil, func(s *TestAgent) { 24 for i := 0; i < 3; i++ { 25 // Create the job 26 job := mock.Job() 27 args := structs.JobRegisterRequest{ 28 Job: job, 29 WriteRequest: structs.WriteRequest{ 30 Region: "global", 31 Namespace: structs.DefaultNamespace, 32 }, 33 } 34 var resp structs.JobRegisterResponse 35 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 36 t.Fatalf("err: %v", err) 37 } 38 } 39 40 // Make the HTTP request 41 req, err := http.NewRequest("GET", "/v1/jobs", nil) 42 if err != nil { 43 t.Fatalf("err: %v", err) 44 } 45 respW := httptest.NewRecorder() 46 47 // Make the request 48 obj, err := s.Server.JobsRequest(respW, req) 49 if err != nil { 50 t.Fatalf("err: %v", err) 51 } 52 53 // Check for the index 54 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 55 t.Fatalf("missing index") 56 } 57 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 58 t.Fatalf("missing known leader") 59 } 60 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 61 t.Fatalf("missing last contact") 62 } 63 64 // Check the job 65 j := obj.([]*structs.JobListStub) 66 if len(j) != 3 { 67 t.Fatalf("bad: %#v", j) 68 } 69 }) 70 } 71 72 func TestHTTP_PrefixJobsList(t *testing.T) { 73 ids := []string{ 74 "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706", 75 "aabbbbbb-e8f7-fd38-c855-ab94ceb89706", 76 "aabbcccc-e8f7-fd38-c855-ab94ceb89706", 77 } 78 t.Parallel() 79 httpTest(t, nil, func(s *TestAgent) { 80 for i := 0; i < 3; i++ { 81 // Create the job 82 job := mock.Job() 83 job.ID = ids[i] 84 job.TaskGroups[0].Count = 1 85 args := structs.JobRegisterRequest{ 86 Job: job, 87 WriteRequest: structs.WriteRequest{ 88 Region: "global", 89 Namespace: structs.DefaultNamespace, 90 }, 91 } 92 var resp structs.JobRegisterResponse 93 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 94 t.Fatalf("err: %v", err) 95 } 96 } 97 98 // Make the HTTP request 99 req, err := http.NewRequest("GET", "/v1/jobs?prefix=aabb", nil) 100 if err != nil { 101 t.Fatalf("err: %v", err) 102 } 103 respW := httptest.NewRecorder() 104 105 // Make the request 106 obj, err := s.Server.JobsRequest(respW, req) 107 if err != nil { 108 t.Fatalf("err: %v", err) 109 } 110 111 // Check for the index 112 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 113 t.Fatalf("missing index") 114 } 115 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 116 t.Fatalf("missing known leader") 117 } 118 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 119 t.Fatalf("missing last contact") 120 } 121 122 // Check the job 123 j := obj.([]*structs.JobListStub) 124 if len(j) != 2 { 125 t.Fatalf("bad: %#v", j) 126 } 127 }) 128 } 129 130 func TestHTTP_JobsRegister(t *testing.T) { 131 t.Parallel() 132 httpTest(t, nil, func(s *TestAgent) { 133 // Create the job 134 job := MockJob() 135 args := api.JobRegisterRequest{ 136 Job: job, 137 WriteRequest: api.WriteRequest{Region: "global"}, 138 } 139 buf := encodeReq(args) 140 141 // Make the HTTP request 142 req, err := http.NewRequest("PUT", "/v1/jobs", buf) 143 if err != nil { 144 t.Fatalf("err: %v", err) 145 } 146 respW := httptest.NewRecorder() 147 148 // Make the request 149 obj, err := s.Server.JobsRequest(respW, req) 150 if err != nil { 151 t.Fatalf("err: %v", err) 152 } 153 154 // Check the response 155 dereg := obj.(structs.JobRegisterResponse) 156 if dereg.EvalID == "" { 157 t.Fatalf("bad: %v", dereg) 158 } 159 160 // Check for the index 161 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 162 t.Fatalf("missing index") 163 } 164 165 // Check the job is registered 166 getReq := structs.JobSpecificRequest{ 167 JobID: *job.ID, 168 QueryOptions: structs.QueryOptions{ 169 Region: "global", 170 Namespace: structs.DefaultNamespace, 171 }, 172 } 173 var getResp structs.SingleJobResponse 174 if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil { 175 t.Fatalf("err: %v", err) 176 } 177 178 if getResp.Job == nil { 179 t.Fatalf("job does not exist") 180 } 181 }) 182 } 183 184 // Test that ACL token is properly threaded through to the RPC endpoint 185 func TestHTTP_JobsRegister_ACL(t *testing.T) { 186 t.Parallel() 187 httpACLTest(t, nil, func(s *TestAgent) { 188 // Create the job 189 job := MockJob() 190 args := api.JobRegisterRequest{ 191 Job: job, 192 WriteRequest: api.WriteRequest{ 193 Region: "global", 194 }, 195 } 196 buf := encodeReq(args) 197 198 // Make the HTTP request 199 req, err := http.NewRequest("PUT", "/v1/jobs", buf) 200 if err != nil { 201 t.Fatalf("err: %v", err) 202 } 203 respW := httptest.NewRecorder() 204 setToken(req, s.RootToken) 205 206 // Make the request 207 obj, err := s.Server.JobsRequest(respW, req) 208 if err != nil { 209 t.Fatalf("err: %v", err) 210 } 211 assert.NotNil(t, obj) 212 }) 213 } 214 215 func TestHTTP_JobsRegister_Defaulting(t *testing.T) { 216 t.Parallel() 217 httpTest(t, nil, func(s *TestAgent) { 218 // Create the job 219 job := MockJob() 220 221 // Do not set its priority 222 job.Priority = nil 223 224 args := api.JobRegisterRequest{ 225 Job: job, 226 WriteRequest: api.WriteRequest{Region: "global"}, 227 } 228 buf := encodeReq(args) 229 230 // Make the HTTP request 231 req, err := http.NewRequest("PUT", "/v1/jobs", buf) 232 if err != nil { 233 t.Fatalf("err: %v", err) 234 } 235 respW := httptest.NewRecorder() 236 237 // Make the request 238 obj, err := s.Server.JobsRequest(respW, req) 239 if err != nil { 240 t.Fatalf("err: %v", err) 241 } 242 243 // Check the response 244 dereg := obj.(structs.JobRegisterResponse) 245 if dereg.EvalID == "" { 246 t.Fatalf("bad: %v", dereg) 247 } 248 249 // Check for the index 250 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 251 t.Fatalf("missing index") 252 } 253 254 // Check the job is registered 255 getReq := structs.JobSpecificRequest{ 256 JobID: *job.ID, 257 QueryOptions: structs.QueryOptions{ 258 Region: "global", 259 Namespace: structs.DefaultNamespace, 260 }, 261 } 262 var getResp structs.SingleJobResponse 263 if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil { 264 t.Fatalf("err: %v", err) 265 } 266 267 if getResp.Job == nil { 268 t.Fatalf("job does not exist") 269 } 270 if getResp.Job.Priority != 50 { 271 t.Fatalf("job didn't get defaulted") 272 } 273 }) 274 } 275 276 func TestHTTP_JobsParse(t *testing.T) { 277 t.Parallel() 278 httpTest(t, nil, func(s *TestAgent) { 279 buf := encodeReq(api.JobsParseRequest{JobHCL: mock.HCL()}) 280 req, err := http.NewRequest("POST", "/v1/jobs/parse", buf) 281 if err != nil { 282 t.Fatalf("err: %v", err) 283 } 284 285 respW := httptest.NewRecorder() 286 287 obj, err := s.Server.JobsParseRequest(respW, req) 288 if err != nil { 289 t.Fatalf("err: %v", err) 290 } 291 if obj == nil { 292 t.Fatal("response should not be nil") 293 } 294 295 job := obj.(*api.Job) 296 expected := mock.Job() 297 if job.Name == nil || *job.Name != expected.Name { 298 t.Fatalf("job name is '%s', expected '%s'", *job.Name, expected.Name) 299 } 300 301 if job.Datacenters == nil || 302 job.Datacenters[0] != expected.Datacenters[0] { 303 t.Fatalf("job datacenters is '%s', expected '%s'", 304 job.Datacenters[0], expected.Datacenters[0]) 305 } 306 }) 307 } 308 func TestHTTP_JobQuery(t *testing.T) { 309 t.Parallel() 310 httpTest(t, nil, func(s *TestAgent) { 311 // Create the job 312 job := mock.Job() 313 args := structs.JobRegisterRequest{ 314 Job: job, 315 WriteRequest: structs.WriteRequest{ 316 Region: "global", 317 Namespace: structs.DefaultNamespace, 318 }, 319 } 320 var resp structs.JobRegisterResponse 321 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 322 t.Fatalf("err: %v", err) 323 } 324 325 // Make the HTTP request 326 req, err := http.NewRequest("GET", "/v1/job/"+job.ID, nil) 327 if err != nil { 328 t.Fatalf("err: %v", err) 329 } 330 respW := httptest.NewRecorder() 331 332 // Make the request 333 obj, err := s.Server.JobSpecificRequest(respW, req) 334 if err != nil { 335 t.Fatalf("err: %v", err) 336 } 337 338 // Check for the index 339 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 340 t.Fatalf("missing index") 341 } 342 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 343 t.Fatalf("missing known leader") 344 } 345 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 346 t.Fatalf("missing last contact") 347 } 348 349 // Check the job 350 j := obj.(*structs.Job) 351 if j.ID != job.ID { 352 t.Fatalf("bad: %#v", j) 353 } 354 }) 355 } 356 357 func TestHTTP_JobQuery_Payload(t *testing.T) { 358 t.Parallel() 359 httpTest(t, nil, func(s *TestAgent) { 360 // Create the job 361 job := mock.Job() 362 363 // Insert Payload compressed 364 expected := []byte("hello world") 365 compressed := snappy.Encode(nil, expected) 366 job.Payload = compressed 367 368 // Directly manipulate the state 369 state := s.Agent.server.State() 370 if err := state.UpsertJob(1000, job); err != nil { 371 t.Fatalf("Failed to upsert job: %v", err) 372 } 373 374 // Make the HTTP request 375 req, err := http.NewRequest("GET", "/v1/job/"+job.ID, nil) 376 if err != nil { 377 t.Fatalf("err: %v", err) 378 } 379 respW := httptest.NewRecorder() 380 381 // Make the request 382 obj, err := s.Server.JobSpecificRequest(respW, req) 383 if err != nil { 384 t.Fatalf("err: %v", err) 385 } 386 387 // Check for the index 388 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 389 t.Fatalf("missing index") 390 } 391 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 392 t.Fatalf("missing known leader") 393 } 394 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 395 t.Fatalf("missing last contact") 396 } 397 398 // Check the job 399 j := obj.(*structs.Job) 400 if j.ID != job.ID { 401 t.Fatalf("bad: %#v", j) 402 } 403 404 // Check the payload is decompressed 405 if !reflect.DeepEqual(j.Payload, expected) { 406 t.Fatalf("Payload not decompressed properly; got %#v; want %#v", j.Payload, expected) 407 } 408 }) 409 } 410 411 func TestHTTP_JobUpdate(t *testing.T) { 412 t.Parallel() 413 httpTest(t, nil, func(s *TestAgent) { 414 // Create the job 415 job := MockJob() 416 args := api.JobRegisterRequest{ 417 Job: job, 418 WriteRequest: api.WriteRequest{ 419 Region: "global", 420 Namespace: api.DefaultNamespace, 421 }, 422 } 423 buf := encodeReq(args) 424 425 // Make the HTTP request 426 req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID, buf) 427 if err != nil { 428 t.Fatalf("err: %v", err) 429 } 430 respW := httptest.NewRecorder() 431 432 // Make the request 433 obj, err := s.Server.JobSpecificRequest(respW, req) 434 if err != nil { 435 t.Fatalf("err: %v", err) 436 } 437 438 // Check the response 439 dereg := obj.(structs.JobRegisterResponse) 440 if dereg.EvalID == "" { 441 t.Fatalf("bad: %v", dereg) 442 } 443 444 // Check for the index 445 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 446 t.Fatalf("missing index") 447 } 448 449 // Check the job is registered 450 getReq := structs.JobSpecificRequest{ 451 JobID: *job.ID, 452 QueryOptions: structs.QueryOptions{ 453 Region: "global", 454 Namespace: structs.DefaultNamespace, 455 }, 456 } 457 var getResp structs.SingleJobResponse 458 if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil { 459 t.Fatalf("err: %v", err) 460 } 461 462 if getResp.Job == nil { 463 t.Fatalf("job does not exist") 464 } 465 }) 466 } 467 468 func TestHTTP_JobUpdateRegion(t *testing.T) { 469 t.Parallel() 470 471 cases := []struct { 472 Name string 473 ConfigRegion string 474 APIRegion string 475 ExpectedRegion string 476 }{ 477 { 478 Name: "api region takes precedence", 479 ConfigRegion: "not-global", 480 APIRegion: "north-america", 481 ExpectedRegion: "north-america", 482 }, 483 { 484 Name: "config region is set", 485 ConfigRegion: "north-america", 486 APIRegion: "", 487 ExpectedRegion: "north-america", 488 }, 489 { 490 Name: "api region is set", 491 ConfigRegion: "", 492 APIRegion: "north-america", 493 ExpectedRegion: "north-america", 494 }, 495 { 496 Name: "defaults to node region global if no region is provided", 497 ConfigRegion: "", 498 APIRegion: "", 499 ExpectedRegion: "global", 500 }, 501 { 502 Name: "defaults to node region not-global if no region is provided", 503 ConfigRegion: "", 504 APIRegion: "", 505 ExpectedRegion: "not-global", 506 }, 507 } 508 509 for _, tc := range cases { 510 t.Run(tc.Name, func(t *testing.T) { 511 httpTest(t, func(c *Config) { c.Region = tc.ExpectedRegion }, func(s *TestAgent) { 512 // Create the job 513 job := MockRegionalJob() 514 515 if tc.ConfigRegion == "" { 516 job.Region = nil 517 } else { 518 job.Region = &tc.ConfigRegion 519 } 520 521 args := api.JobRegisterRequest{ 522 Job: job, 523 WriteRequest: api.WriteRequest{ 524 Namespace: api.DefaultNamespace, 525 Region: tc.APIRegion, 526 }, 527 } 528 529 buf := encodeReq(args) 530 531 // Make the HTTP request 532 url := "/v1/job/" + *job.ID 533 534 req, err := http.NewRequest("PUT", url, buf) 535 require.NoError(t, err) 536 respW := httptest.NewRecorder() 537 538 // Make the request 539 obj, err := s.Server.JobSpecificRequest(respW, req) 540 require.NoError(t, err) 541 542 // Check the response 543 dereg := obj.(structs.JobRegisterResponse) 544 require.NotEmpty(t, dereg.EvalID) 545 546 // Check for the index 547 require.NotEmpty(t, respW.HeaderMap.Get("X-Nomad-Index"), "missing index") 548 549 // Check the job is registered 550 getReq := structs.JobSpecificRequest{ 551 JobID: *job.ID, 552 QueryOptions: structs.QueryOptions{ 553 Region: tc.ExpectedRegion, 554 Namespace: structs.DefaultNamespace, 555 }, 556 } 557 var getResp structs.SingleJobResponse 558 err = s.Agent.RPC("Job.GetJob", &getReq, &getResp) 559 require.NoError(t, err) 560 require.NotNil(t, getResp.Job, "job does not exist") 561 require.Equal(t, tc.ExpectedRegion, getResp.Job.Region) 562 }) 563 }) 564 } 565 } 566 567 func TestHTTP_JobDelete(t *testing.T) { 568 t.Parallel() 569 httpTest(t, nil, func(s *TestAgent) { 570 // Create the job 571 job := mock.Job() 572 args := structs.JobRegisterRequest{ 573 Job: job, 574 WriteRequest: structs.WriteRequest{ 575 Region: "global", 576 Namespace: structs.DefaultNamespace, 577 }, 578 } 579 var resp structs.JobRegisterResponse 580 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 581 t.Fatalf("err: %v", err) 582 } 583 584 // Make the HTTP request to do a soft delete 585 req, err := http.NewRequest("DELETE", "/v1/job/"+job.ID, nil) 586 if err != nil { 587 t.Fatalf("err: %v", err) 588 } 589 respW := httptest.NewRecorder() 590 591 // Make the request 592 obj, err := s.Server.JobSpecificRequest(respW, req) 593 if err != nil { 594 t.Fatalf("err: %v", err) 595 } 596 597 // Check the response 598 dereg := obj.(structs.JobDeregisterResponse) 599 if dereg.EvalID == "" { 600 t.Fatalf("bad: %v", dereg) 601 } 602 603 // Check for the index 604 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 605 t.Fatalf("missing index") 606 } 607 608 // Check the job is still queryable 609 getReq1 := structs.JobSpecificRequest{ 610 JobID: job.ID, 611 QueryOptions: structs.QueryOptions{ 612 Region: "global", 613 Namespace: structs.DefaultNamespace, 614 }, 615 } 616 var getResp1 structs.SingleJobResponse 617 if err := s.Agent.RPC("Job.GetJob", &getReq1, &getResp1); err != nil { 618 t.Fatalf("err: %v", err) 619 } 620 if getResp1.Job == nil { 621 t.Fatalf("job doesn't exists") 622 } 623 if !getResp1.Job.Stop { 624 t.Fatalf("job should be marked as stop") 625 } 626 627 // Make the HTTP request to do a purge delete 628 req2, err := http.NewRequest("DELETE", "/v1/job/"+job.ID+"?purge=true", nil) 629 if err != nil { 630 t.Fatalf("err: %v", err) 631 } 632 respW.Flush() 633 634 // Make the request 635 obj, err = s.Server.JobSpecificRequest(respW, req2) 636 if err != nil { 637 t.Fatalf("err: %v", err) 638 } 639 640 // Check the response 641 dereg = obj.(structs.JobDeregisterResponse) 642 if dereg.EvalID == "" { 643 t.Fatalf("bad: %v", dereg) 644 } 645 646 // Check for the index 647 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 648 t.Fatalf("missing index") 649 } 650 651 // Check the job is gone 652 getReq2 := structs.JobSpecificRequest{ 653 JobID: job.ID, 654 QueryOptions: structs.QueryOptions{ 655 Region: "global", 656 Namespace: structs.DefaultNamespace, 657 }, 658 } 659 var getResp2 structs.SingleJobResponse 660 if err := s.Agent.RPC("Job.GetJob", &getReq2, &getResp2); err != nil { 661 t.Fatalf("err: %v", err) 662 } 663 if getResp2.Job != nil { 664 t.Fatalf("job still exists") 665 } 666 }) 667 } 668 669 func TestHTTP_JobForceEvaluate(t *testing.T) { 670 t.Parallel() 671 httpTest(t, nil, func(s *TestAgent) { 672 // Create the job 673 job := mock.Job() 674 args := structs.JobRegisterRequest{ 675 Job: job, 676 WriteRequest: structs.WriteRequest{ 677 Region: "global", 678 Namespace: structs.DefaultNamespace, 679 }, 680 } 681 var resp structs.JobRegisterResponse 682 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 683 t.Fatalf("err: %v", err) 684 } 685 686 // Make the HTTP request 687 req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/evaluate", nil) 688 if err != nil { 689 t.Fatalf("err: %v", err) 690 } 691 respW := httptest.NewRecorder() 692 693 // Make the request 694 obj, err := s.Server.JobSpecificRequest(respW, req) 695 if err != nil { 696 t.Fatalf("err: %v", err) 697 } 698 699 // Check the response 700 reg := obj.(structs.JobRegisterResponse) 701 if reg.EvalID == "" { 702 t.Fatalf("bad: %v", reg) 703 } 704 705 // Check for the index 706 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 707 t.Fatalf("missing index") 708 } 709 }) 710 } 711 712 func TestHTTP_JobEvaluate_ForceReschedule(t *testing.T) { 713 t.Parallel() 714 httpTest(t, nil, func(s *TestAgent) { 715 // Create the job 716 job := mock.Job() 717 args := structs.JobRegisterRequest{ 718 Job: job, 719 WriteRequest: structs.WriteRequest{ 720 Region: "global", 721 Namespace: structs.DefaultNamespace, 722 }, 723 } 724 var resp structs.JobRegisterResponse 725 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 726 t.Fatalf("err: %v", err) 727 } 728 jobEvalReq := api.JobEvaluateRequest{ 729 JobID: job.ID, 730 EvalOptions: api.EvalOptions{ 731 ForceReschedule: true, 732 }, 733 } 734 735 buf := encodeReq(jobEvalReq) 736 737 // Make the HTTP request 738 req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/evaluate", buf) 739 if err != nil { 740 t.Fatalf("err: %v", err) 741 } 742 respW := httptest.NewRecorder() 743 744 // Make the request 745 obj, err := s.Server.JobSpecificRequest(respW, req) 746 if err != nil { 747 t.Fatalf("err: %v", err) 748 } 749 750 // Check the response 751 reg := obj.(structs.JobRegisterResponse) 752 if reg.EvalID == "" { 753 t.Fatalf("bad: %v", reg) 754 } 755 756 // Check for the index 757 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 758 t.Fatalf("missing index") 759 } 760 }) 761 } 762 763 func TestHTTP_JobEvaluations(t *testing.T) { 764 t.Parallel() 765 httpTest(t, nil, func(s *TestAgent) { 766 // Create the job 767 job := mock.Job() 768 args := structs.JobRegisterRequest{ 769 Job: job, 770 WriteRequest: structs.WriteRequest{ 771 Region: "global", 772 Namespace: structs.DefaultNamespace, 773 }, 774 } 775 var resp structs.JobRegisterResponse 776 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 777 t.Fatalf("err: %v", err) 778 } 779 780 // Make the HTTP request 781 req, err := http.NewRequest("GET", "/v1/job/"+job.ID+"/evaluations", nil) 782 if err != nil { 783 t.Fatalf("err: %v", err) 784 } 785 respW := httptest.NewRecorder() 786 787 // Make the request 788 obj, err := s.Server.JobSpecificRequest(respW, req) 789 if err != nil { 790 t.Fatalf("err: %v", err) 791 } 792 793 // Check the response 794 evals := obj.([]*structs.Evaluation) 795 // Can be multiple evals, use the last one, since they are in order 796 idx := len(evals) - 1 797 if len(evals) < 0 || evals[idx].ID != resp.EvalID { 798 t.Fatalf("bad: %v", evals) 799 } 800 801 // Check for the index 802 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 803 t.Fatalf("missing index") 804 } 805 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 806 t.Fatalf("missing known leader") 807 } 808 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 809 t.Fatalf("missing last contact") 810 } 811 }) 812 } 813 814 func TestHTTP_JobAllocations(t *testing.T) { 815 t.Parallel() 816 httpTest(t, nil, func(s *TestAgent) { 817 // Create the job 818 alloc1 := mock.Alloc() 819 args := structs.JobRegisterRequest{ 820 Job: alloc1.Job, 821 WriteRequest: structs.WriteRequest{ 822 Region: "global", 823 Namespace: structs.DefaultNamespace, 824 }, 825 } 826 var resp structs.JobRegisterResponse 827 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 828 t.Fatalf("err: %v", err) 829 } 830 831 // Directly manipulate the state 832 expectedDisplayMsg := "test message" 833 testEvent := structs.NewTaskEvent("test event").SetMessage(expectedDisplayMsg) 834 var events []*structs.TaskEvent 835 events = append(events, testEvent) 836 taskState := &structs.TaskState{Events: events} 837 alloc1.TaskStates = make(map[string]*structs.TaskState) 838 alloc1.TaskStates["test"] = taskState 839 state := s.Agent.server.State() 840 err := state.UpsertAllocs(1000, []*structs.Allocation{alloc1}) 841 if err != nil { 842 t.Fatalf("err: %v", err) 843 } 844 845 // Make the HTTP request 846 req, err := http.NewRequest("GET", "/v1/job/"+alloc1.Job.ID+"/allocations?all=true", nil) 847 if err != nil { 848 t.Fatalf("err: %v", err) 849 } 850 respW := httptest.NewRecorder() 851 852 // Make the request 853 obj, err := s.Server.JobSpecificRequest(respW, req) 854 if err != nil { 855 t.Fatalf("err: %v", err) 856 } 857 858 // Check the response 859 allocs := obj.([]*structs.AllocListStub) 860 if len(allocs) != 1 && allocs[0].ID != alloc1.ID { 861 t.Fatalf("bad: %v", allocs) 862 } 863 displayMsg := allocs[0].TaskStates["test"].Events[0].DisplayMessage 864 assert.Equal(t, expectedDisplayMsg, displayMsg) 865 866 // Check for the index 867 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 868 t.Fatalf("missing index") 869 } 870 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 871 t.Fatalf("missing known leader") 872 } 873 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 874 t.Fatalf("missing last contact") 875 } 876 }) 877 } 878 879 func TestHTTP_JobDeployments(t *testing.T) { 880 assert := assert.New(t) 881 t.Parallel() 882 httpTest(t, nil, func(s *TestAgent) { 883 // Create the job 884 j := mock.Job() 885 args := structs.JobRegisterRequest{ 886 Job: j, 887 WriteRequest: structs.WriteRequest{ 888 Region: "global", 889 Namespace: structs.DefaultNamespace, 890 }, 891 } 892 var resp structs.JobRegisterResponse 893 assert.Nil(s.Agent.RPC("Job.Register", &args, &resp), "JobRegister") 894 895 // Directly manipulate the state 896 state := s.Agent.server.State() 897 d := mock.Deployment() 898 d.JobID = j.ID 899 d.JobCreateIndex = resp.JobModifyIndex 900 901 assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") 902 903 // Make the HTTP request 904 req, err := http.NewRequest("GET", "/v1/job/"+j.ID+"/deployments", nil) 905 assert.Nil(err, "HTTP") 906 respW := httptest.NewRecorder() 907 908 // Make the request 909 obj, err := s.Server.JobSpecificRequest(respW, req) 910 assert.Nil(err, "JobSpecificRequest") 911 912 // Check the response 913 deploys := obj.([]*structs.Deployment) 914 assert.Len(deploys, 1, "deployments") 915 assert.Equal(d.ID, deploys[0].ID, "deployment id") 916 917 assert.NotZero(respW.HeaderMap.Get("X-Nomad-Index"), "missing index") 918 assert.Equal("true", respW.HeaderMap.Get("X-Nomad-KnownLeader"), "missing known leader") 919 assert.NotZero(respW.HeaderMap.Get("X-Nomad-LastContact"), "missing last contact") 920 }) 921 } 922 923 func TestHTTP_JobDeployment(t *testing.T) { 924 assert := assert.New(t) 925 t.Parallel() 926 httpTest(t, nil, func(s *TestAgent) { 927 // Create the job 928 j := mock.Job() 929 args := structs.JobRegisterRequest{ 930 Job: j, 931 WriteRequest: structs.WriteRequest{ 932 Region: "global", 933 Namespace: structs.DefaultNamespace, 934 }, 935 } 936 var resp structs.JobRegisterResponse 937 assert.Nil(s.Agent.RPC("Job.Register", &args, &resp), "JobRegister") 938 939 // Directly manipulate the state 940 state := s.Agent.server.State() 941 d := mock.Deployment() 942 d.JobID = j.ID 943 d.JobCreateIndex = resp.JobModifyIndex 944 assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") 945 946 // Make the HTTP request 947 req, err := http.NewRequest("GET", "/v1/job/"+j.ID+"/deployment", nil) 948 assert.Nil(err, "HTTP") 949 respW := httptest.NewRecorder() 950 951 // Make the request 952 obj, err := s.Server.JobSpecificRequest(respW, req) 953 assert.Nil(err, "JobSpecificRequest") 954 955 // Check the response 956 out := obj.(*structs.Deployment) 957 assert.NotNil(out, "deployment") 958 assert.Equal(d.ID, out.ID, "deployment id") 959 960 assert.NotZero(respW.HeaderMap.Get("X-Nomad-Index"), "missing index") 961 assert.Equal("true", respW.HeaderMap.Get("X-Nomad-KnownLeader"), "missing known leader") 962 assert.NotZero(respW.HeaderMap.Get("X-Nomad-LastContact"), "missing last contact") 963 }) 964 } 965 966 func TestHTTP_JobVersions(t *testing.T) { 967 t.Parallel() 968 httpTest(t, nil, func(s *TestAgent) { 969 // Create the job 970 job := mock.Job() 971 args := structs.JobRegisterRequest{ 972 Job: job, 973 WriteRequest: structs.WriteRequest{ 974 Region: "global", 975 Namespace: structs.DefaultNamespace, 976 }, 977 } 978 var resp structs.JobRegisterResponse 979 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 980 t.Fatalf("err: %v", err) 981 } 982 983 job2 := mock.Job() 984 job2.ID = job.ID 985 job2.Priority = 100 986 987 args2 := structs.JobRegisterRequest{ 988 Job: job2, 989 WriteRequest: structs.WriteRequest{ 990 Region: "global", 991 Namespace: structs.DefaultNamespace, 992 }, 993 } 994 var resp2 structs.JobRegisterResponse 995 if err := s.Agent.RPC("Job.Register", &args2, &resp2); err != nil { 996 t.Fatalf("err: %v", err) 997 } 998 999 // Make the HTTP request 1000 req, err := http.NewRequest("GET", "/v1/job/"+job.ID+"/versions?diffs=true", nil) 1001 if err != nil { 1002 t.Fatalf("err: %v", err) 1003 } 1004 respW := httptest.NewRecorder() 1005 1006 // Make the request 1007 obj, err := s.Server.JobSpecificRequest(respW, req) 1008 if err != nil { 1009 t.Fatalf("err: %v", err) 1010 } 1011 1012 // Check the response 1013 vResp := obj.(structs.JobVersionsResponse) 1014 versions := vResp.Versions 1015 if len(versions) != 2 { 1016 t.Fatalf("got %d versions; want 2", len(versions)) 1017 } 1018 1019 if v := versions[0]; v.Version != 1 || v.Priority != 100 { 1020 t.Fatalf("bad %v", v) 1021 } 1022 1023 if v := versions[1]; v.Version != 0 { 1024 t.Fatalf("bad %v", v) 1025 } 1026 1027 if len(vResp.Diffs) != 1 { 1028 t.Fatalf("bad %v", vResp) 1029 } 1030 1031 // Check for the index 1032 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 1033 t.Fatalf("missing index") 1034 } 1035 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 1036 t.Fatalf("missing known leader") 1037 } 1038 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 1039 t.Fatalf("missing last contact") 1040 } 1041 }) 1042 } 1043 1044 func TestHTTP_PeriodicForce(t *testing.T) { 1045 t.Parallel() 1046 httpTest(t, nil, func(s *TestAgent) { 1047 // Create and register a periodic job. 1048 job := mock.PeriodicJob() 1049 args := structs.JobRegisterRequest{ 1050 Job: job, 1051 WriteRequest: structs.WriteRequest{ 1052 Region: "global", 1053 Namespace: structs.DefaultNamespace, 1054 }, 1055 } 1056 var resp structs.JobRegisterResponse 1057 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 1058 t.Fatalf("err: %v", err) 1059 } 1060 1061 // Make the HTTP request 1062 req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/periodic/force", nil) 1063 if err != nil { 1064 t.Fatalf("err: %v", err) 1065 } 1066 respW := httptest.NewRecorder() 1067 1068 // Make the request 1069 obj, err := s.Server.JobSpecificRequest(respW, req) 1070 if err != nil { 1071 t.Fatalf("err: %v", err) 1072 } 1073 1074 // Check for the index 1075 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 1076 t.Fatalf("missing index") 1077 } 1078 1079 // Check the response 1080 r := obj.(structs.PeriodicForceResponse) 1081 if r.EvalID == "" { 1082 t.Fatalf("bad: %#v", r) 1083 } 1084 }) 1085 } 1086 1087 func TestHTTP_JobPlan(t *testing.T) { 1088 t.Parallel() 1089 httpTest(t, nil, func(s *TestAgent) { 1090 // Create the job 1091 job := MockJob() 1092 args := api.JobPlanRequest{ 1093 Job: job, 1094 Diff: true, 1095 WriteRequest: api.WriteRequest{ 1096 Region: "global", 1097 Namespace: api.DefaultNamespace, 1098 }, 1099 } 1100 buf := encodeReq(args) 1101 1102 // Make the HTTP request 1103 req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID+"/plan", buf) 1104 if err != nil { 1105 t.Fatalf("err: %v", err) 1106 } 1107 respW := httptest.NewRecorder() 1108 1109 // Make the request 1110 obj, err := s.Server.JobSpecificRequest(respW, req) 1111 if err != nil { 1112 t.Fatalf("err: %v", err) 1113 } 1114 1115 // Check the response 1116 plan := obj.(structs.JobPlanResponse) 1117 if plan.Annotations == nil { 1118 t.Fatalf("bad: %v", plan) 1119 } 1120 1121 if plan.Diff == nil { 1122 t.Fatalf("bad: %v", plan) 1123 } 1124 }) 1125 } 1126 1127 func TestHTTP_JobPlanRegion(t *testing.T) { 1128 t.Parallel() 1129 1130 cases := []struct { 1131 Name string 1132 ConfigRegion string 1133 APIRegion string 1134 ExpectedRegion string 1135 }{ 1136 { 1137 Name: "api region takes precedence", 1138 ConfigRegion: "not-global", 1139 APIRegion: "north-america", 1140 ExpectedRegion: "north-america", 1141 }, 1142 { 1143 Name: "config region is set", 1144 ConfigRegion: "north-america", 1145 APIRegion: "", 1146 ExpectedRegion: "north-america", 1147 }, 1148 { 1149 Name: "api region is set", 1150 ConfigRegion: "", 1151 APIRegion: "north-america", 1152 ExpectedRegion: "north-america", 1153 }, 1154 { 1155 Name: "falls back to default if no region is provided", 1156 ConfigRegion: "", 1157 APIRegion: "", 1158 ExpectedRegion: "global", 1159 }, 1160 } 1161 1162 for _, tc := range cases { 1163 t.Run(tc.Name, func(t *testing.T) { 1164 httpTest(t, func(c *Config) { c.Region = tc.ExpectedRegion }, func(s *TestAgent) { 1165 // Create the job 1166 job := MockRegionalJob() 1167 1168 if tc.ConfigRegion == "" { 1169 job.Region = nil 1170 } else { 1171 job.Region = &tc.ConfigRegion 1172 } 1173 1174 args := api.JobPlanRequest{ 1175 Job: job, 1176 Diff: true, 1177 WriteRequest: api.WriteRequest{ 1178 Region: tc.APIRegion, 1179 Namespace: api.DefaultNamespace, 1180 }, 1181 } 1182 buf := encodeReq(args) 1183 1184 // Make the HTTP request 1185 req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID+"/plan", buf) 1186 require.NoError(t, err) 1187 respW := httptest.NewRecorder() 1188 1189 // Make the request 1190 obj, err := s.Server.JobSpecificRequest(respW, req) 1191 require.NoError(t, err) 1192 1193 // Check the response 1194 plan := obj.(structs.JobPlanResponse) 1195 require.NotNil(t, plan.Annotations) 1196 require.NotNil(t, plan.Diff) 1197 }) 1198 }) 1199 } 1200 } 1201 1202 func TestHTTP_JobDispatch(t *testing.T) { 1203 t.Parallel() 1204 httpTest(t, nil, func(s *TestAgent) { 1205 // Create the parameterized job 1206 job := mock.BatchJob() 1207 job.ParameterizedJob = &structs.ParameterizedJobConfig{} 1208 1209 args := structs.JobRegisterRequest{ 1210 Job: job, 1211 WriteRequest: structs.WriteRequest{ 1212 Region: "global", 1213 Namespace: structs.DefaultNamespace, 1214 }, 1215 } 1216 var resp structs.JobRegisterResponse 1217 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 1218 t.Fatalf("err: %v", err) 1219 } 1220 1221 // Make the request 1222 respW := httptest.NewRecorder() 1223 args2 := structs.JobDispatchRequest{ 1224 WriteRequest: structs.WriteRequest{ 1225 Region: "global", 1226 Namespace: structs.DefaultNamespace, 1227 }, 1228 } 1229 buf := encodeReq(args2) 1230 1231 // Make the HTTP request 1232 req2, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/dispatch", buf) 1233 if err != nil { 1234 t.Fatalf("err: %v", err) 1235 } 1236 respW.Flush() 1237 1238 // Make the request 1239 obj, err := s.Server.JobSpecificRequest(respW, req2) 1240 if err != nil { 1241 t.Fatalf("err: %v", err) 1242 } 1243 1244 // Check the response 1245 dispatch := obj.(structs.JobDispatchResponse) 1246 if dispatch.EvalID == "" { 1247 t.Fatalf("bad: %v", dispatch) 1248 } 1249 1250 if dispatch.DispatchedJobID == "" { 1251 t.Fatalf("bad: %v", dispatch) 1252 } 1253 }) 1254 } 1255 1256 func TestHTTP_JobRevert(t *testing.T) { 1257 t.Parallel() 1258 httpTest(t, nil, func(s *TestAgent) { 1259 // Create the job and register it twice 1260 job := mock.Job() 1261 regReq := structs.JobRegisterRequest{ 1262 Job: job, 1263 WriteRequest: structs.WriteRequest{ 1264 Region: "global", 1265 Namespace: structs.DefaultNamespace, 1266 }, 1267 } 1268 var regResp structs.JobRegisterResponse 1269 if err := s.Agent.RPC("Job.Register", ®Req, ®Resp); err != nil { 1270 t.Fatalf("err: %v", err) 1271 } 1272 1273 // Change the job to get a new version 1274 job.Datacenters = append(job.Datacenters, "foo") 1275 if err := s.Agent.RPC("Job.Register", ®Req, ®Resp); err != nil { 1276 t.Fatalf("err: %v", err) 1277 } 1278 1279 args := structs.JobRevertRequest{ 1280 JobID: job.ID, 1281 JobVersion: 0, 1282 WriteRequest: structs.WriteRequest{ 1283 Region: "global", 1284 Namespace: structs.DefaultNamespace, 1285 }, 1286 } 1287 buf := encodeReq(args) 1288 1289 // Make the HTTP request 1290 req, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/revert", buf) 1291 if err != nil { 1292 t.Fatalf("err: %v", err) 1293 } 1294 respW := httptest.NewRecorder() 1295 1296 // Make the request 1297 obj, err := s.Server.JobSpecificRequest(respW, req) 1298 if err != nil { 1299 t.Fatalf("err: %v", err) 1300 } 1301 1302 // Check the response 1303 revertResp := obj.(structs.JobRegisterResponse) 1304 if revertResp.EvalID == "" { 1305 t.Fatalf("bad: %v", revertResp) 1306 } 1307 1308 // Check for the index 1309 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 1310 t.Fatalf("missing index") 1311 } 1312 }) 1313 } 1314 1315 func TestHTTP_JobStable(t *testing.T) { 1316 t.Parallel() 1317 httpTest(t, nil, func(s *TestAgent) { 1318 // Create the job and register it twice 1319 job := mock.Job() 1320 regReq := structs.JobRegisterRequest{ 1321 Job: job, 1322 WriteRequest: structs.WriteRequest{ 1323 Region: "global", 1324 Namespace: structs.DefaultNamespace, 1325 }, 1326 } 1327 var regResp structs.JobRegisterResponse 1328 if err := s.Agent.RPC("Job.Register", ®Req, ®Resp); err != nil { 1329 t.Fatalf("err: %v", err) 1330 } 1331 1332 if err := s.Agent.RPC("Job.Register", ®Req, ®Resp); err != nil { 1333 t.Fatalf("err: %v", err) 1334 } 1335 1336 args := structs.JobStabilityRequest{ 1337 JobID: job.ID, 1338 JobVersion: 0, 1339 Stable: true, 1340 WriteRequest: structs.WriteRequest{ 1341 Region: "global", 1342 Namespace: structs.DefaultNamespace, 1343 }, 1344 } 1345 buf := encodeReq(args) 1346 1347 // Make the HTTP request 1348 req, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/stable", buf) 1349 if err != nil { 1350 t.Fatalf("err: %v", err) 1351 } 1352 respW := httptest.NewRecorder() 1353 1354 // Make the request 1355 obj, err := s.Server.JobSpecificRequest(respW, req) 1356 if err != nil { 1357 t.Fatalf("err: %v", err) 1358 } 1359 1360 // Check the response 1361 stableResp := obj.(structs.JobStabilityResponse) 1362 if stableResp.Index == 0 { 1363 t.Fatalf("bad: %v", stableResp) 1364 } 1365 1366 // Check for the index 1367 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 1368 t.Fatalf("missing index") 1369 } 1370 }) 1371 } 1372 1373 func TestJobs_ApiJobToStructsJob(t *testing.T) { 1374 apiJob := &api.Job{ 1375 Stop: helper.BoolToPtr(true), 1376 Region: helper.StringToPtr("global"), 1377 Namespace: helper.StringToPtr("foo"), 1378 ID: helper.StringToPtr("foo"), 1379 ParentID: helper.StringToPtr("lol"), 1380 Name: helper.StringToPtr("name"), 1381 Type: helper.StringToPtr("service"), 1382 Priority: helper.IntToPtr(50), 1383 AllAtOnce: helper.BoolToPtr(true), 1384 Datacenters: []string{"dc1", "dc2"}, 1385 Constraints: []*api.Constraint{ 1386 { 1387 LTarget: "a", 1388 RTarget: "b", 1389 Operand: "c", 1390 }, 1391 }, 1392 Affinities: []*api.Affinity{ 1393 { 1394 LTarget: "a", 1395 RTarget: "b", 1396 Operand: "c", 1397 Weight: helper.Int8ToPtr(50), 1398 }, 1399 }, 1400 Update: &api.UpdateStrategy{ 1401 Stagger: helper.TimeToPtr(1 * time.Second), 1402 MaxParallel: helper.IntToPtr(5), 1403 HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Manual), 1404 MinHealthyTime: helper.TimeToPtr(1 * time.Minute), 1405 HealthyDeadline: helper.TimeToPtr(3 * time.Minute), 1406 ProgressDeadline: helper.TimeToPtr(3 * time.Minute), 1407 AutoRevert: helper.BoolToPtr(false), 1408 Canary: helper.IntToPtr(1), 1409 }, 1410 Spreads: []*api.Spread{ 1411 { 1412 Attribute: "${meta.rack}", 1413 Weight: helper.Int8ToPtr(100), 1414 SpreadTarget: []*api.SpreadTarget{ 1415 { 1416 Value: "r1", 1417 Percent: 50, 1418 }, 1419 }, 1420 }, 1421 }, 1422 Periodic: &api.PeriodicConfig{ 1423 Enabled: helper.BoolToPtr(true), 1424 Spec: helper.StringToPtr("spec"), 1425 SpecType: helper.StringToPtr("cron"), 1426 ProhibitOverlap: helper.BoolToPtr(true), 1427 TimeZone: helper.StringToPtr("test zone"), 1428 }, 1429 ParameterizedJob: &api.ParameterizedJobConfig{ 1430 Payload: "payload", 1431 MetaRequired: []string{"a", "b"}, 1432 MetaOptional: []string{"c", "d"}, 1433 }, 1434 Payload: []byte("payload"), 1435 Meta: map[string]string{ 1436 "foo": "bar", 1437 }, 1438 TaskGroups: []*api.TaskGroup{ 1439 { 1440 Name: helper.StringToPtr("group1"), 1441 Count: helper.IntToPtr(5), 1442 Constraints: []*api.Constraint{ 1443 { 1444 LTarget: "x", 1445 RTarget: "y", 1446 Operand: "z", 1447 }, 1448 }, 1449 Affinities: []*api.Affinity{ 1450 { 1451 LTarget: "x", 1452 RTarget: "y", 1453 Operand: "z", 1454 Weight: helper.Int8ToPtr(100), 1455 }, 1456 }, 1457 RestartPolicy: &api.RestartPolicy{ 1458 Interval: helper.TimeToPtr(1 * time.Second), 1459 Attempts: helper.IntToPtr(5), 1460 Delay: helper.TimeToPtr(10 * time.Second), 1461 Mode: helper.StringToPtr("delay"), 1462 }, 1463 ReschedulePolicy: &api.ReschedulePolicy{ 1464 Interval: helper.TimeToPtr(12 * time.Hour), 1465 Attempts: helper.IntToPtr(5), 1466 DelayFunction: helper.StringToPtr("constant"), 1467 Delay: helper.TimeToPtr(30 * time.Second), 1468 Unlimited: helper.BoolToPtr(true), 1469 MaxDelay: helper.TimeToPtr(20 * time.Minute), 1470 }, 1471 Migrate: &api.MigrateStrategy{ 1472 MaxParallel: helper.IntToPtr(12), 1473 HealthCheck: helper.StringToPtr("task_events"), 1474 MinHealthyTime: helper.TimeToPtr(12 * time.Hour), 1475 HealthyDeadline: helper.TimeToPtr(12 * time.Hour), 1476 }, 1477 Spreads: []*api.Spread{ 1478 { 1479 Attribute: "${node.datacenter}", 1480 Weight: helper.Int8ToPtr(100), 1481 SpreadTarget: []*api.SpreadTarget{ 1482 { 1483 Value: "dc1", 1484 Percent: 100, 1485 }, 1486 }, 1487 }, 1488 }, 1489 EphemeralDisk: &api.EphemeralDisk{ 1490 SizeMB: helper.IntToPtr(100), 1491 Sticky: helper.BoolToPtr(true), 1492 Migrate: helper.BoolToPtr(true), 1493 }, 1494 Update: &api.UpdateStrategy{ 1495 HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Checks), 1496 MinHealthyTime: helper.TimeToPtr(2 * time.Minute), 1497 HealthyDeadline: helper.TimeToPtr(5 * time.Minute), 1498 ProgressDeadline: helper.TimeToPtr(5 * time.Minute), 1499 AutoRevert: helper.BoolToPtr(true), 1500 }, 1501 Meta: map[string]string{ 1502 "key": "value", 1503 }, 1504 Services: []*api.Service{ 1505 { 1506 Name: "groupserviceA", 1507 Tags: []string{"a", "b"}, 1508 CanaryTags: []string{"d", "e"}, 1509 PortLabel: "1234", 1510 Meta: map[string]string{ 1511 "servicemeta": "foobar", 1512 }, 1513 CheckRestart: &api.CheckRestart{ 1514 Limit: 4, 1515 Grace: helper.TimeToPtr(11 * time.Second), 1516 }, 1517 Checks: []api.ServiceCheck{ 1518 { 1519 Id: "hello", 1520 Name: "bar", 1521 Type: "http", 1522 Command: "foo", 1523 Args: []string{"a", "b"}, 1524 Path: "/check", 1525 Protocol: "http", 1526 PortLabel: "foo", 1527 AddressMode: "driver", 1528 GRPCService: "foo.Bar", 1529 GRPCUseTLS: true, 1530 Interval: 4 * time.Second, 1531 Timeout: 2 * time.Second, 1532 InitialStatus: "ok", 1533 CheckRestart: &api.CheckRestart{ 1534 Limit: 3, 1535 IgnoreWarnings: true, 1536 }, 1537 TaskName: "task1", 1538 }, 1539 }, 1540 Connect: &api.ConsulConnect{ 1541 Native: false, 1542 SidecarService: &api.ConsulSidecarService{ 1543 Tags: []string{"f", "g"}, 1544 Port: "9000", 1545 }, 1546 }, 1547 }, 1548 }, 1549 Tasks: []*api.Task{ 1550 { 1551 Name: "task1", 1552 Leader: true, 1553 Driver: "docker", 1554 User: "mary", 1555 Config: map[string]interface{}{ 1556 "lol": "code", 1557 }, 1558 Env: map[string]string{ 1559 "hello": "world", 1560 }, 1561 Constraints: []*api.Constraint{ 1562 { 1563 LTarget: "x", 1564 RTarget: "y", 1565 Operand: "z", 1566 }, 1567 }, 1568 Affinities: []*api.Affinity{ 1569 { 1570 LTarget: "a", 1571 RTarget: "b", 1572 Operand: "c", 1573 Weight: helper.Int8ToPtr(50), 1574 }, 1575 }, 1576 1577 Services: []*api.Service{ 1578 { 1579 Id: "id", 1580 Name: "serviceA", 1581 Tags: []string{"1", "2"}, 1582 CanaryTags: []string{"3", "4"}, 1583 PortLabel: "foo", 1584 Meta: map[string]string{ 1585 "servicemeta": "foobar", 1586 }, 1587 CheckRestart: &api.CheckRestart{ 1588 Limit: 4, 1589 Grace: helper.TimeToPtr(11 * time.Second), 1590 }, 1591 Checks: []api.ServiceCheck{ 1592 { 1593 Id: "hello", 1594 Name: "bar", 1595 Type: "http", 1596 Command: "foo", 1597 Args: []string{"a", "b"}, 1598 Path: "/check", 1599 Protocol: "http", 1600 PortLabel: "foo", 1601 AddressMode: "driver", 1602 GRPCService: "foo.Bar", 1603 GRPCUseTLS: true, 1604 Interval: 4 * time.Second, 1605 Timeout: 2 * time.Second, 1606 InitialStatus: "ok", 1607 CheckRestart: &api.CheckRestart{ 1608 Limit: 3, 1609 IgnoreWarnings: true, 1610 }, 1611 }, 1612 { 1613 Id: "check2id", 1614 Name: "check2", 1615 Type: "tcp", 1616 PortLabel: "foo", 1617 Interval: 4 * time.Second, 1618 Timeout: 2 * time.Second, 1619 }, 1620 }, 1621 }, 1622 }, 1623 Resources: &api.Resources{ 1624 CPU: helper.IntToPtr(100), 1625 MemoryMB: helper.IntToPtr(10), 1626 Networks: []*api.NetworkResource{ 1627 { 1628 IP: "10.10.11.1", 1629 MBits: helper.IntToPtr(10), 1630 ReservedPorts: []api.Port{ 1631 { 1632 Label: "http", 1633 Value: 80, 1634 }, 1635 }, 1636 DynamicPorts: []api.Port{ 1637 { 1638 Label: "ssh", 1639 Value: 2000, 1640 }, 1641 }, 1642 }, 1643 }, 1644 Devices: []*api.RequestedDevice{ 1645 { 1646 Name: "nvidia/gpu", 1647 Count: helper.Uint64ToPtr(4), 1648 Constraints: []*api.Constraint{ 1649 { 1650 LTarget: "x", 1651 RTarget: "y", 1652 Operand: "z", 1653 }, 1654 }, 1655 Affinities: []*api.Affinity{ 1656 { 1657 LTarget: "a", 1658 RTarget: "b", 1659 Operand: "c", 1660 Weight: helper.Int8ToPtr(50), 1661 }, 1662 }, 1663 }, 1664 { 1665 Name: "gpu", 1666 Count: nil, 1667 }, 1668 }, 1669 }, 1670 Meta: map[string]string{ 1671 "lol": "code", 1672 }, 1673 KillTimeout: helper.TimeToPtr(10 * time.Second), 1674 KillSignal: "SIGQUIT", 1675 LogConfig: &api.LogConfig{ 1676 MaxFiles: helper.IntToPtr(10), 1677 MaxFileSizeMB: helper.IntToPtr(100), 1678 }, 1679 Artifacts: []*api.TaskArtifact{ 1680 { 1681 GetterSource: helper.StringToPtr("source"), 1682 GetterOptions: map[string]string{ 1683 "a": "b", 1684 }, 1685 GetterMode: helper.StringToPtr("dir"), 1686 RelativeDest: helper.StringToPtr("dest"), 1687 }, 1688 }, 1689 Vault: &api.Vault{ 1690 Policies: []string{"a", "b", "c"}, 1691 Env: helper.BoolToPtr(true), 1692 ChangeMode: helper.StringToPtr("c"), 1693 ChangeSignal: helper.StringToPtr("sighup"), 1694 }, 1695 Templates: []*api.Template{ 1696 { 1697 SourcePath: helper.StringToPtr("source"), 1698 DestPath: helper.StringToPtr("dest"), 1699 EmbeddedTmpl: helper.StringToPtr("embedded"), 1700 ChangeMode: helper.StringToPtr("change"), 1701 ChangeSignal: helper.StringToPtr("signal"), 1702 Splay: helper.TimeToPtr(1 * time.Minute), 1703 Perms: helper.StringToPtr("666"), 1704 LeftDelim: helper.StringToPtr("abc"), 1705 RightDelim: helper.StringToPtr("def"), 1706 Envvars: helper.BoolToPtr(true), 1707 VaultGrace: helper.TimeToPtr(3 * time.Second), 1708 }, 1709 }, 1710 DispatchPayload: &api.DispatchPayloadConfig{ 1711 File: "fileA", 1712 }, 1713 }, 1714 }, 1715 }, 1716 }, 1717 ConsulToken: helper.StringToPtr("abc123"), 1718 VaultToken: helper.StringToPtr("def456"), 1719 Status: helper.StringToPtr("status"), 1720 StatusDescription: helper.StringToPtr("status_desc"), 1721 Version: helper.Uint64ToPtr(10), 1722 CreateIndex: helper.Uint64ToPtr(1), 1723 ModifyIndex: helper.Uint64ToPtr(3), 1724 JobModifyIndex: helper.Uint64ToPtr(5), 1725 } 1726 1727 expected := &structs.Job{ 1728 Stop: true, 1729 Region: "global", 1730 Namespace: "foo", 1731 ID: "foo", 1732 ParentID: "lol", 1733 Name: "name", 1734 Type: "service", 1735 Priority: 50, 1736 AllAtOnce: true, 1737 Datacenters: []string{"dc1", "dc2"}, 1738 Constraints: []*structs.Constraint{ 1739 { 1740 LTarget: "a", 1741 RTarget: "b", 1742 Operand: "c", 1743 }, 1744 }, 1745 Affinities: []*structs.Affinity{ 1746 { 1747 LTarget: "a", 1748 RTarget: "b", 1749 Operand: "c", 1750 Weight: 50, 1751 }, 1752 }, 1753 Spreads: []*structs.Spread{ 1754 { 1755 Attribute: "${meta.rack}", 1756 Weight: 100, 1757 SpreadTarget: []*structs.SpreadTarget{ 1758 { 1759 Value: "r1", 1760 Percent: 50, 1761 }, 1762 }, 1763 }, 1764 }, 1765 Update: structs.UpdateStrategy{ 1766 Stagger: 1 * time.Second, 1767 MaxParallel: 5, 1768 }, 1769 Periodic: &structs.PeriodicConfig{ 1770 Enabled: true, 1771 Spec: "spec", 1772 SpecType: "cron", 1773 ProhibitOverlap: true, 1774 TimeZone: "test zone", 1775 }, 1776 ParameterizedJob: &structs.ParameterizedJobConfig{ 1777 Payload: "payload", 1778 MetaRequired: []string{"a", "b"}, 1779 MetaOptional: []string{"c", "d"}, 1780 }, 1781 Payload: []byte("payload"), 1782 Meta: map[string]string{ 1783 "foo": "bar", 1784 }, 1785 TaskGroups: []*structs.TaskGroup{ 1786 { 1787 Name: "group1", 1788 Count: 5, 1789 Constraints: []*structs.Constraint{ 1790 { 1791 LTarget: "x", 1792 RTarget: "y", 1793 Operand: "z", 1794 }, 1795 }, 1796 Affinities: []*structs.Affinity{ 1797 { 1798 LTarget: "x", 1799 RTarget: "y", 1800 Operand: "z", 1801 Weight: 100, 1802 }, 1803 }, 1804 RestartPolicy: &structs.RestartPolicy{ 1805 Interval: 1 * time.Second, 1806 Attempts: 5, 1807 Delay: 10 * time.Second, 1808 Mode: "delay", 1809 }, 1810 Spreads: []*structs.Spread{ 1811 { 1812 Attribute: "${node.datacenter}", 1813 Weight: 100, 1814 SpreadTarget: []*structs.SpreadTarget{ 1815 { 1816 Value: "dc1", 1817 Percent: 100, 1818 }, 1819 }, 1820 }, 1821 }, 1822 ReschedulePolicy: &structs.ReschedulePolicy{ 1823 Interval: 12 * time.Hour, 1824 Attempts: 5, 1825 DelayFunction: "constant", 1826 Delay: 30 * time.Second, 1827 Unlimited: true, 1828 MaxDelay: 20 * time.Minute, 1829 }, 1830 Migrate: &structs.MigrateStrategy{ 1831 MaxParallel: 12, 1832 HealthCheck: "task_events", 1833 MinHealthyTime: 12 * time.Hour, 1834 HealthyDeadline: 12 * time.Hour, 1835 }, 1836 EphemeralDisk: &structs.EphemeralDisk{ 1837 SizeMB: 100, 1838 Sticky: true, 1839 Migrate: true, 1840 }, 1841 Update: &structs.UpdateStrategy{ 1842 Stagger: 1 * time.Second, 1843 MaxParallel: 5, 1844 HealthCheck: structs.UpdateStrategyHealthCheck_Checks, 1845 MinHealthyTime: 2 * time.Minute, 1846 HealthyDeadline: 5 * time.Minute, 1847 ProgressDeadline: 5 * time.Minute, 1848 AutoRevert: true, 1849 AutoPromote: false, 1850 Canary: 1, 1851 }, 1852 Meta: map[string]string{ 1853 "key": "value", 1854 }, 1855 Services: []*structs.Service{ 1856 { 1857 Name: "groupserviceA", 1858 Tags: []string{"a", "b"}, 1859 CanaryTags: []string{"d", "e"}, 1860 PortLabel: "1234", 1861 AddressMode: "auto", 1862 Meta: map[string]string{ 1863 "servicemeta": "foobar", 1864 }, 1865 Checks: []*structs.ServiceCheck{ 1866 { 1867 Name: "bar", 1868 Type: "http", 1869 Command: "foo", 1870 Args: []string{"a", "b"}, 1871 Path: "/check", 1872 Protocol: "http", 1873 PortLabel: "foo", 1874 AddressMode: "driver", 1875 GRPCService: "foo.Bar", 1876 GRPCUseTLS: true, 1877 Interval: 4 * time.Second, 1878 Timeout: 2 * time.Second, 1879 InitialStatus: "ok", 1880 CheckRestart: &structs.CheckRestart{ 1881 Grace: 11 * time.Second, 1882 Limit: 3, 1883 IgnoreWarnings: true, 1884 }, 1885 TaskName: "task1", 1886 }, 1887 }, 1888 Connect: &structs.ConsulConnect{ 1889 Native: false, 1890 SidecarService: &structs.ConsulSidecarService{ 1891 Tags: []string{"f", "g"}, 1892 Port: "9000", 1893 }, 1894 }, 1895 }, 1896 }, 1897 Tasks: []*structs.Task{ 1898 { 1899 Name: "task1", 1900 Driver: "docker", 1901 Leader: true, 1902 User: "mary", 1903 Config: map[string]interface{}{ 1904 "lol": "code", 1905 }, 1906 Constraints: []*structs.Constraint{ 1907 { 1908 LTarget: "x", 1909 RTarget: "y", 1910 Operand: "z", 1911 }, 1912 }, 1913 Affinities: []*structs.Affinity{ 1914 { 1915 LTarget: "a", 1916 RTarget: "b", 1917 Operand: "c", 1918 Weight: 50, 1919 }, 1920 }, 1921 Env: map[string]string{ 1922 "hello": "world", 1923 }, 1924 Services: []*structs.Service{ 1925 { 1926 Name: "serviceA", 1927 Tags: []string{"1", "2"}, 1928 CanaryTags: []string{"3", "4"}, 1929 PortLabel: "foo", 1930 AddressMode: "auto", 1931 Meta: map[string]string{ 1932 "servicemeta": "foobar", 1933 }, 1934 Checks: []*structs.ServiceCheck{ 1935 { 1936 Name: "bar", 1937 Type: "http", 1938 Command: "foo", 1939 Args: []string{"a", "b"}, 1940 Path: "/check", 1941 Protocol: "http", 1942 PortLabel: "foo", 1943 AddressMode: "driver", 1944 Interval: 4 * time.Second, 1945 Timeout: 2 * time.Second, 1946 InitialStatus: "ok", 1947 GRPCService: "foo.Bar", 1948 GRPCUseTLS: true, 1949 CheckRestart: &structs.CheckRestart{ 1950 Limit: 3, 1951 Grace: 11 * time.Second, 1952 IgnoreWarnings: true, 1953 }, 1954 }, 1955 { 1956 Name: "check2", 1957 Type: "tcp", 1958 PortLabel: "foo", 1959 Interval: 4 * time.Second, 1960 Timeout: 2 * time.Second, 1961 CheckRestart: &structs.CheckRestart{ 1962 Limit: 4, 1963 Grace: 11 * time.Second, 1964 }, 1965 }, 1966 }, 1967 }, 1968 }, 1969 Resources: &structs.Resources{ 1970 CPU: 100, 1971 MemoryMB: 10, 1972 Networks: []*structs.NetworkResource{ 1973 { 1974 IP: "10.10.11.1", 1975 MBits: 10, 1976 ReservedPorts: []structs.Port{ 1977 { 1978 Label: "http", 1979 Value: 80, 1980 }, 1981 }, 1982 DynamicPorts: []structs.Port{ 1983 { 1984 Label: "ssh", 1985 Value: 2000, 1986 }, 1987 }, 1988 }, 1989 }, 1990 Devices: []*structs.RequestedDevice{ 1991 { 1992 Name: "nvidia/gpu", 1993 Count: 4, 1994 Constraints: []*structs.Constraint{ 1995 { 1996 LTarget: "x", 1997 RTarget: "y", 1998 Operand: "z", 1999 }, 2000 }, 2001 Affinities: []*structs.Affinity{ 2002 { 2003 LTarget: "a", 2004 RTarget: "b", 2005 Operand: "c", 2006 Weight: 50, 2007 }, 2008 }, 2009 }, 2010 { 2011 Name: "gpu", 2012 Count: 1, 2013 }, 2014 }, 2015 }, 2016 Meta: map[string]string{ 2017 "lol": "code", 2018 }, 2019 KillTimeout: 10 * time.Second, 2020 KillSignal: "SIGQUIT", 2021 LogConfig: &structs.LogConfig{ 2022 MaxFiles: 10, 2023 MaxFileSizeMB: 100, 2024 }, 2025 Artifacts: []*structs.TaskArtifact{ 2026 { 2027 GetterSource: "source", 2028 GetterOptions: map[string]string{ 2029 "a": "b", 2030 }, 2031 GetterMode: "dir", 2032 RelativeDest: "dest", 2033 }, 2034 }, 2035 Vault: &structs.Vault{ 2036 Policies: []string{"a", "b", "c"}, 2037 Env: true, 2038 ChangeMode: "c", 2039 ChangeSignal: "sighup", 2040 }, 2041 Templates: []*structs.Template{ 2042 { 2043 SourcePath: "source", 2044 DestPath: "dest", 2045 EmbeddedTmpl: "embedded", 2046 ChangeMode: "change", 2047 ChangeSignal: "SIGNAL", 2048 Splay: 1 * time.Minute, 2049 Perms: "666", 2050 LeftDelim: "abc", 2051 RightDelim: "def", 2052 Envvars: true, 2053 VaultGrace: 3 * time.Second, 2054 }, 2055 }, 2056 DispatchPayload: &structs.DispatchPayloadConfig{ 2057 File: "fileA", 2058 }, 2059 }, 2060 }, 2061 }, 2062 }, 2063 2064 ConsulToken: "abc123", 2065 VaultToken: "def456", 2066 } 2067 2068 structsJob := ApiJobToStructJob(apiJob) 2069 2070 if diff := pretty.Diff(expected, structsJob); len(diff) > 0 { 2071 t.Fatalf("bad:\n%s", strings.Join(diff, "\n")) 2072 } 2073 2074 systemAPIJob := &api.Job{ 2075 Stop: helper.BoolToPtr(true), 2076 Region: helper.StringToPtr("global"), 2077 Namespace: helper.StringToPtr("foo"), 2078 ID: helper.StringToPtr("foo"), 2079 ParentID: helper.StringToPtr("lol"), 2080 Name: helper.StringToPtr("name"), 2081 Type: helper.StringToPtr("system"), 2082 Priority: helper.IntToPtr(50), 2083 AllAtOnce: helper.BoolToPtr(true), 2084 Datacenters: []string{"dc1", "dc2"}, 2085 Constraints: []*api.Constraint{ 2086 { 2087 LTarget: "a", 2088 RTarget: "b", 2089 Operand: "c", 2090 }, 2091 }, 2092 TaskGroups: []*api.TaskGroup{ 2093 { 2094 Name: helper.StringToPtr("group1"), 2095 Count: helper.IntToPtr(5), 2096 Constraints: []*api.Constraint{ 2097 { 2098 LTarget: "x", 2099 RTarget: "y", 2100 Operand: "z", 2101 }, 2102 }, 2103 RestartPolicy: &api.RestartPolicy{ 2104 Interval: helper.TimeToPtr(1 * time.Second), 2105 Attempts: helper.IntToPtr(5), 2106 Delay: helper.TimeToPtr(10 * time.Second), 2107 Mode: helper.StringToPtr("delay"), 2108 }, 2109 EphemeralDisk: &api.EphemeralDisk{ 2110 SizeMB: helper.IntToPtr(100), 2111 Sticky: helper.BoolToPtr(true), 2112 Migrate: helper.BoolToPtr(true), 2113 }, 2114 Meta: map[string]string{ 2115 "key": "value", 2116 }, 2117 Tasks: []*api.Task{ 2118 { 2119 Name: "task1", 2120 Leader: true, 2121 Driver: "docker", 2122 User: "mary", 2123 Config: map[string]interface{}{ 2124 "lol": "code", 2125 }, 2126 Env: map[string]string{ 2127 "hello": "world", 2128 }, 2129 Constraints: []*api.Constraint{ 2130 { 2131 LTarget: "x", 2132 RTarget: "y", 2133 Operand: "z", 2134 }, 2135 }, 2136 Resources: &api.Resources{ 2137 CPU: helper.IntToPtr(100), 2138 MemoryMB: helper.IntToPtr(10), 2139 Networks: []*api.NetworkResource{ 2140 { 2141 IP: "10.10.11.1", 2142 MBits: helper.IntToPtr(10), 2143 ReservedPorts: []api.Port{ 2144 { 2145 Label: "http", 2146 Value: 80, 2147 }, 2148 }, 2149 DynamicPorts: []api.Port{ 2150 { 2151 Label: "ssh", 2152 Value: 2000, 2153 }, 2154 }, 2155 }, 2156 }, 2157 }, 2158 Meta: map[string]string{ 2159 "lol": "code", 2160 }, 2161 KillTimeout: helper.TimeToPtr(10 * time.Second), 2162 KillSignal: "SIGQUIT", 2163 LogConfig: &api.LogConfig{ 2164 MaxFiles: helper.IntToPtr(10), 2165 MaxFileSizeMB: helper.IntToPtr(100), 2166 }, 2167 Artifacts: []*api.TaskArtifact{ 2168 { 2169 GetterSource: helper.StringToPtr("source"), 2170 GetterOptions: map[string]string{ 2171 "a": "b", 2172 }, 2173 GetterMode: helper.StringToPtr("dir"), 2174 RelativeDest: helper.StringToPtr("dest"), 2175 }, 2176 }, 2177 DispatchPayload: &api.DispatchPayloadConfig{ 2178 File: "fileA", 2179 }, 2180 }, 2181 }, 2182 }, 2183 }, 2184 Status: helper.StringToPtr("status"), 2185 StatusDescription: helper.StringToPtr("status_desc"), 2186 Version: helper.Uint64ToPtr(10), 2187 CreateIndex: helper.Uint64ToPtr(1), 2188 ModifyIndex: helper.Uint64ToPtr(3), 2189 JobModifyIndex: helper.Uint64ToPtr(5), 2190 } 2191 2192 expectedSystemJob := &structs.Job{ 2193 Stop: true, 2194 Region: "global", 2195 Namespace: "foo", 2196 ID: "foo", 2197 ParentID: "lol", 2198 Name: "name", 2199 Type: "system", 2200 Priority: 50, 2201 AllAtOnce: true, 2202 Datacenters: []string{"dc1", "dc2"}, 2203 Constraints: []*structs.Constraint{ 2204 { 2205 LTarget: "a", 2206 RTarget: "b", 2207 Operand: "c", 2208 }, 2209 }, 2210 TaskGroups: []*structs.TaskGroup{ 2211 { 2212 Name: "group1", 2213 Count: 5, 2214 Constraints: []*structs.Constraint{ 2215 { 2216 LTarget: "x", 2217 RTarget: "y", 2218 Operand: "z", 2219 }, 2220 }, 2221 RestartPolicy: &structs.RestartPolicy{ 2222 Interval: 1 * time.Second, 2223 Attempts: 5, 2224 Delay: 10 * time.Second, 2225 Mode: "delay", 2226 }, 2227 EphemeralDisk: &structs.EphemeralDisk{ 2228 SizeMB: 100, 2229 Sticky: true, 2230 Migrate: true, 2231 }, 2232 Meta: map[string]string{ 2233 "key": "value", 2234 }, 2235 Tasks: []*structs.Task{ 2236 { 2237 Name: "task1", 2238 Driver: "docker", 2239 Leader: true, 2240 User: "mary", 2241 Config: map[string]interface{}{ 2242 "lol": "code", 2243 }, 2244 Constraints: []*structs.Constraint{ 2245 { 2246 LTarget: "x", 2247 RTarget: "y", 2248 Operand: "z", 2249 }, 2250 }, 2251 Env: map[string]string{ 2252 "hello": "world", 2253 }, 2254 Resources: &structs.Resources{ 2255 CPU: 100, 2256 MemoryMB: 10, 2257 Networks: []*structs.NetworkResource{ 2258 { 2259 IP: "10.10.11.1", 2260 MBits: 10, 2261 ReservedPorts: []structs.Port{ 2262 { 2263 Label: "http", 2264 Value: 80, 2265 }, 2266 }, 2267 DynamicPorts: []structs.Port{ 2268 { 2269 Label: "ssh", 2270 Value: 2000, 2271 }, 2272 }, 2273 }, 2274 }, 2275 }, 2276 Meta: map[string]string{ 2277 "lol": "code", 2278 }, 2279 KillTimeout: 10 * time.Second, 2280 KillSignal: "SIGQUIT", 2281 LogConfig: &structs.LogConfig{ 2282 MaxFiles: 10, 2283 MaxFileSizeMB: 100, 2284 }, 2285 Artifacts: []*structs.TaskArtifact{ 2286 { 2287 GetterSource: "source", 2288 GetterOptions: map[string]string{ 2289 "a": "b", 2290 }, 2291 GetterMode: "dir", 2292 RelativeDest: "dest", 2293 }, 2294 }, 2295 DispatchPayload: &structs.DispatchPayloadConfig{ 2296 File: "fileA", 2297 }, 2298 }, 2299 }, 2300 }, 2301 }, 2302 } 2303 2304 systemStructsJob := ApiJobToStructJob(systemAPIJob) 2305 2306 if diff := pretty.Diff(expectedSystemJob, systemStructsJob); len(diff) > 0 { 2307 t.Fatalf("bad:\n%s", strings.Join(diff, "\n")) 2308 } 2309 } 2310 2311 func TestJobs_ApiJobToStructsJobUpdate(t *testing.T) { 2312 apiJob := &api.Job{ 2313 Update: &api.UpdateStrategy{ 2314 Stagger: helper.TimeToPtr(1 * time.Second), 2315 MaxParallel: helper.IntToPtr(5), 2316 HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Manual), 2317 MinHealthyTime: helper.TimeToPtr(1 * time.Minute), 2318 HealthyDeadline: helper.TimeToPtr(3 * time.Minute), 2319 ProgressDeadline: helper.TimeToPtr(3 * time.Minute), 2320 AutoRevert: helper.BoolToPtr(false), 2321 AutoPromote: nil, 2322 Canary: helper.IntToPtr(1), 2323 }, 2324 TaskGroups: []*api.TaskGroup{ 2325 { 2326 Update: &api.UpdateStrategy{ 2327 Canary: helper.IntToPtr(2), 2328 AutoRevert: helper.BoolToPtr(true), 2329 }, 2330 }, { 2331 Update: &api.UpdateStrategy{ 2332 Canary: helper.IntToPtr(3), 2333 AutoPromote: helper.BoolToPtr(true), 2334 }, 2335 }, 2336 }, 2337 } 2338 2339 structsJob := ApiJobToStructJob(apiJob) 2340 2341 // Update has been moved from job down to the groups 2342 jobUpdate := structs.UpdateStrategy{ 2343 Stagger: 1000000000, 2344 MaxParallel: 5, 2345 HealthCheck: "", 2346 MinHealthyTime: 0, 2347 HealthyDeadline: 0, 2348 ProgressDeadline: 0, 2349 AutoRevert: false, 2350 AutoPromote: false, 2351 Canary: 0, 2352 } 2353 2354 // But the groups inherit settings from the job update 2355 group1 := structs.UpdateStrategy{ 2356 Stagger: 1000000000, 2357 MaxParallel: 5, 2358 HealthCheck: "manual", 2359 MinHealthyTime: 60000000000, 2360 HealthyDeadline: 180000000000, 2361 ProgressDeadline: 180000000000, 2362 AutoRevert: true, 2363 AutoPromote: false, 2364 Canary: 2, 2365 } 2366 2367 group2 := structs.UpdateStrategy{ 2368 Stagger: 1000000000, 2369 MaxParallel: 5, 2370 HealthCheck: "manual", 2371 MinHealthyTime: 60000000000, 2372 HealthyDeadline: 180000000000, 2373 ProgressDeadline: 180000000000, 2374 AutoRevert: false, 2375 AutoPromote: true, 2376 Canary: 3, 2377 } 2378 2379 require.Equal(t, jobUpdate, structsJob.Update) 2380 require.Equal(t, group1, *structsJob.TaskGroups[0].Update) 2381 require.Equal(t, group2, *structsJob.TaskGroups[1].Update) 2382 } 2383 2384 // TestHTTP_JobValidate_SystemMigrate asserts that a system job with a migrate 2385 // stanza fails to validate but does not panic (see #5477). 2386 func TestHTTP_JobValidate_SystemMigrate(t *testing.T) { 2387 t.Parallel() 2388 httpTest(t, nil, func(s *TestAgent) { 2389 // Create the job 2390 job := &api.Job{ 2391 Region: helper.StringToPtr("global"), 2392 Datacenters: []string{"dc1"}, 2393 ID: helper.StringToPtr("systemmigrate"), 2394 Name: helper.StringToPtr("systemmigrate"), 2395 TaskGroups: []*api.TaskGroup{ 2396 {Name: helper.StringToPtr("web")}, 2397 }, 2398 2399 // System job... 2400 Type: helper.StringToPtr("system"), 2401 2402 // ...with an empty migrate stanza 2403 Migrate: &api.MigrateStrategy{}, 2404 } 2405 2406 args := api.JobValidateRequest{ 2407 Job: job, 2408 WriteRequest: api.WriteRequest{Region: "global"}, 2409 } 2410 buf := encodeReq(args) 2411 2412 // Make the HTTP request 2413 req, err := http.NewRequest("PUT", "/v1/validate/job", buf) 2414 require.NoError(t, err) 2415 respW := httptest.NewRecorder() 2416 2417 // Make the request 2418 obj, err := s.Server.ValidateJobRequest(respW, req) 2419 require.NoError(t, err) 2420 2421 // Check the response 2422 resp := obj.(structs.JobValidateResponse) 2423 require.Contains(t, resp.Error, `Job type "system" does not allow migrate block`) 2424 }) 2425 }