github.com/ferranbt/nomad@v0.9.3-0.20190607002617-85c449b7667c/command/agent/job_endpoint_test.go (about) 1 package agent 2 3 import ( 4 "net/http" 5 "net/http/httptest" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/golang/snappy" 12 "github.com/hashicorp/nomad/api" 13 "github.com/hashicorp/nomad/helper" 14 "github.com/hashicorp/nomad/nomad/mock" 15 "github.com/hashicorp/nomad/nomad/structs" 16 "github.com/kr/pretty" 17 "github.com/stretchr/testify/assert" 18 "github.com/stretchr/testify/require" 19 ) 20 21 func TestHTTP_JobsList(t *testing.T) { 22 t.Parallel() 23 httpTest(t, nil, func(s *TestAgent) { 24 for i := 0; i < 3; i++ { 25 // Create the job 26 job := mock.Job() 27 args := structs.JobRegisterRequest{ 28 Job: job, 29 WriteRequest: structs.WriteRequest{ 30 Region: "global", 31 Namespace: structs.DefaultNamespace, 32 }, 33 } 34 var resp structs.JobRegisterResponse 35 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 36 t.Fatalf("err: %v", err) 37 } 38 } 39 40 // Make the HTTP request 41 req, err := http.NewRequest("GET", "/v1/jobs", nil) 42 if err != nil { 43 t.Fatalf("err: %v", err) 44 } 45 respW := httptest.NewRecorder() 46 47 // Make the request 48 obj, err := s.Server.JobsRequest(respW, req) 49 if err != nil { 50 t.Fatalf("err: %v", err) 51 } 52 53 // Check for the index 54 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 55 t.Fatalf("missing index") 56 } 57 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 58 t.Fatalf("missing known leader") 59 } 60 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 61 t.Fatalf("missing last contact") 62 } 63 64 // Check the job 65 j := obj.([]*structs.JobListStub) 66 if len(j) != 3 { 67 t.Fatalf("bad: %#v", j) 68 } 69 }) 70 } 71 72 func TestHTTP_PrefixJobsList(t *testing.T) { 73 ids := []string{ 74 "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706", 75 "aabbbbbb-e8f7-fd38-c855-ab94ceb89706", 76 "aabbcccc-e8f7-fd38-c855-ab94ceb89706", 77 } 78 t.Parallel() 79 httpTest(t, nil, func(s *TestAgent) { 80 for i := 0; i < 3; i++ { 81 // Create the job 82 job := mock.Job() 83 job.ID = ids[i] 84 job.TaskGroups[0].Count = 1 85 args := structs.JobRegisterRequest{ 86 Job: job, 87 WriteRequest: structs.WriteRequest{ 88 Region: "global", 89 Namespace: structs.DefaultNamespace, 90 }, 91 } 92 var resp structs.JobRegisterResponse 93 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 94 t.Fatalf("err: %v", err) 95 } 96 } 97 98 // Make the HTTP request 99 req, err := http.NewRequest("GET", "/v1/jobs?prefix=aabb", nil) 100 if err != nil { 101 t.Fatalf("err: %v", err) 102 } 103 respW := httptest.NewRecorder() 104 105 // Make the request 106 obj, err := s.Server.JobsRequest(respW, req) 107 if err != nil { 108 t.Fatalf("err: %v", err) 109 } 110 111 // Check for the index 112 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 113 t.Fatalf("missing index") 114 } 115 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 116 t.Fatalf("missing known leader") 117 } 118 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 119 t.Fatalf("missing last contact") 120 } 121 122 // Check the job 123 j := obj.([]*structs.JobListStub) 124 if len(j) != 2 { 125 t.Fatalf("bad: %#v", j) 126 } 127 }) 128 } 129 130 func TestHTTP_JobsRegister(t *testing.T) { 131 t.Parallel() 132 httpTest(t, nil, func(s *TestAgent) { 133 // Create the job 134 job := MockJob() 135 args := api.JobRegisterRequest{ 136 Job: job, 137 WriteRequest: api.WriteRequest{Region: "global"}, 138 } 139 buf := encodeReq(args) 140 141 // Make the HTTP request 142 req, err := http.NewRequest("PUT", "/v1/jobs", buf) 143 if err != nil { 144 t.Fatalf("err: %v", err) 145 } 146 respW := httptest.NewRecorder() 147 148 // Make the request 149 obj, err := s.Server.JobsRequest(respW, req) 150 if err != nil { 151 t.Fatalf("err: %v", err) 152 } 153 154 // Check the response 155 dereg := obj.(structs.JobRegisterResponse) 156 if dereg.EvalID == "" { 157 t.Fatalf("bad: %v", dereg) 158 } 159 160 // Check for the index 161 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 162 t.Fatalf("missing index") 163 } 164 165 // Check the job is registered 166 getReq := structs.JobSpecificRequest{ 167 JobID: *job.ID, 168 QueryOptions: structs.QueryOptions{ 169 Region: "global", 170 Namespace: structs.DefaultNamespace, 171 }, 172 } 173 var getResp structs.SingleJobResponse 174 if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil { 175 t.Fatalf("err: %v", err) 176 } 177 178 if getResp.Job == nil { 179 t.Fatalf("job does not exist") 180 } 181 }) 182 } 183 184 // Test that ACL token is properly threaded through to the RPC endpoint 185 func TestHTTP_JobsRegister_ACL(t *testing.T) { 186 t.Parallel() 187 httpACLTest(t, nil, func(s *TestAgent) { 188 // Create the job 189 job := MockJob() 190 args := api.JobRegisterRequest{ 191 Job: job, 192 WriteRequest: api.WriteRequest{ 193 Region: "global", 194 }, 195 } 196 buf := encodeReq(args) 197 198 // Make the HTTP request 199 req, err := http.NewRequest("PUT", "/v1/jobs", buf) 200 if err != nil { 201 t.Fatalf("err: %v", err) 202 } 203 respW := httptest.NewRecorder() 204 setToken(req, s.RootToken) 205 206 // Make the request 207 obj, err := s.Server.JobsRequest(respW, req) 208 if err != nil { 209 t.Fatalf("err: %v", err) 210 } 211 assert.NotNil(t, obj) 212 }) 213 } 214 215 func TestHTTP_JobsRegister_Defaulting(t *testing.T) { 216 t.Parallel() 217 httpTest(t, nil, func(s *TestAgent) { 218 // Create the job 219 job := MockJob() 220 221 // Do not set its priority 222 job.Priority = nil 223 224 args := api.JobRegisterRequest{ 225 Job: job, 226 WriteRequest: api.WriteRequest{Region: "global"}, 227 } 228 buf := encodeReq(args) 229 230 // Make the HTTP request 231 req, err := http.NewRequest("PUT", "/v1/jobs", buf) 232 if err != nil { 233 t.Fatalf("err: %v", err) 234 } 235 respW := httptest.NewRecorder() 236 237 // Make the request 238 obj, err := s.Server.JobsRequest(respW, req) 239 if err != nil { 240 t.Fatalf("err: %v", err) 241 } 242 243 // Check the response 244 dereg := obj.(structs.JobRegisterResponse) 245 if dereg.EvalID == "" { 246 t.Fatalf("bad: %v", dereg) 247 } 248 249 // Check for the index 250 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 251 t.Fatalf("missing index") 252 } 253 254 // Check the job is registered 255 getReq := structs.JobSpecificRequest{ 256 JobID: *job.ID, 257 QueryOptions: structs.QueryOptions{ 258 Region: "global", 259 Namespace: structs.DefaultNamespace, 260 }, 261 } 262 var getResp structs.SingleJobResponse 263 if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil { 264 t.Fatalf("err: %v", err) 265 } 266 267 if getResp.Job == nil { 268 t.Fatalf("job does not exist") 269 } 270 if getResp.Job.Priority != 50 { 271 t.Fatalf("job didn't get defaulted") 272 } 273 }) 274 } 275 276 func TestHTTP_JobsParse(t *testing.T) { 277 t.Parallel() 278 httpTest(t, nil, func(s *TestAgent) { 279 buf := encodeReq(api.JobsParseRequest{JobHCL: mock.HCL()}) 280 req, err := http.NewRequest("POST", "/v1/jobs/parse", buf) 281 if err != nil { 282 t.Fatalf("err: %v", err) 283 } 284 285 respW := httptest.NewRecorder() 286 287 obj, err := s.Server.JobsParseRequest(respW, req) 288 if err != nil { 289 t.Fatalf("err: %v", err) 290 } 291 if obj == nil { 292 t.Fatal("response should not be nil") 293 } 294 295 job := obj.(*api.Job) 296 expected := mock.Job() 297 if job.Name == nil || *job.Name != expected.Name { 298 t.Fatalf("job name is '%s', expected '%s'", *job.Name, expected.Name) 299 } 300 301 if job.Datacenters == nil || 302 job.Datacenters[0] != expected.Datacenters[0] { 303 t.Fatalf("job datacenters is '%s', expected '%s'", 304 job.Datacenters[0], expected.Datacenters[0]) 305 } 306 }) 307 } 308 func TestHTTP_JobQuery(t *testing.T) { 309 t.Parallel() 310 httpTest(t, nil, func(s *TestAgent) { 311 // Create the job 312 job := mock.Job() 313 args := structs.JobRegisterRequest{ 314 Job: job, 315 WriteRequest: structs.WriteRequest{ 316 Region: "global", 317 Namespace: structs.DefaultNamespace, 318 }, 319 } 320 var resp structs.JobRegisterResponse 321 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 322 t.Fatalf("err: %v", err) 323 } 324 325 // Make the HTTP request 326 req, err := http.NewRequest("GET", "/v1/job/"+job.ID, nil) 327 if err != nil { 328 t.Fatalf("err: %v", err) 329 } 330 respW := httptest.NewRecorder() 331 332 // Make the request 333 obj, err := s.Server.JobSpecificRequest(respW, req) 334 if err != nil { 335 t.Fatalf("err: %v", err) 336 } 337 338 // Check for the index 339 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 340 t.Fatalf("missing index") 341 } 342 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 343 t.Fatalf("missing known leader") 344 } 345 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 346 t.Fatalf("missing last contact") 347 } 348 349 // Check the job 350 j := obj.(*structs.Job) 351 if j.ID != job.ID { 352 t.Fatalf("bad: %#v", j) 353 } 354 }) 355 } 356 357 func TestHTTP_JobQuery_Payload(t *testing.T) { 358 t.Parallel() 359 httpTest(t, nil, func(s *TestAgent) { 360 // Create the job 361 job := mock.Job() 362 363 // Insert Payload compressed 364 expected := []byte("hello world") 365 compressed := snappy.Encode(nil, expected) 366 job.Payload = compressed 367 368 // Directly manipulate the state 369 state := s.Agent.server.State() 370 if err := state.UpsertJob(1000, job); err != nil { 371 t.Fatalf("Failed to upsert job: %v", err) 372 } 373 374 // Make the HTTP request 375 req, err := http.NewRequest("GET", "/v1/job/"+job.ID, nil) 376 if err != nil { 377 t.Fatalf("err: %v", err) 378 } 379 respW := httptest.NewRecorder() 380 381 // Make the request 382 obj, err := s.Server.JobSpecificRequest(respW, req) 383 if err != nil { 384 t.Fatalf("err: %v", err) 385 } 386 387 // Check for the index 388 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 389 t.Fatalf("missing index") 390 } 391 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 392 t.Fatalf("missing known leader") 393 } 394 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 395 t.Fatalf("missing last contact") 396 } 397 398 // Check the job 399 j := obj.(*structs.Job) 400 if j.ID != job.ID { 401 t.Fatalf("bad: %#v", j) 402 } 403 404 // Check the payload is decompressed 405 if !reflect.DeepEqual(j.Payload, expected) { 406 t.Fatalf("Payload not decompressed properly; got %#v; want %#v", j.Payload, expected) 407 } 408 }) 409 } 410 411 func TestHTTP_JobUpdate(t *testing.T) { 412 t.Parallel() 413 httpTest(t, nil, func(s *TestAgent) { 414 // Create the job 415 job := MockJob() 416 args := api.JobRegisterRequest{ 417 Job: job, 418 WriteRequest: api.WriteRequest{ 419 Region: "global", 420 Namespace: api.DefaultNamespace, 421 }, 422 } 423 buf := encodeReq(args) 424 425 // Make the HTTP request 426 req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID, buf) 427 if err != nil { 428 t.Fatalf("err: %v", err) 429 } 430 respW := httptest.NewRecorder() 431 432 // Make the request 433 obj, err := s.Server.JobSpecificRequest(respW, req) 434 if err != nil { 435 t.Fatalf("err: %v", err) 436 } 437 438 // Check the response 439 dereg := obj.(structs.JobRegisterResponse) 440 if dereg.EvalID == "" { 441 t.Fatalf("bad: %v", dereg) 442 } 443 444 // Check for the index 445 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 446 t.Fatalf("missing index") 447 } 448 449 // Check the job is registered 450 getReq := structs.JobSpecificRequest{ 451 JobID: *job.ID, 452 QueryOptions: structs.QueryOptions{ 453 Region: "global", 454 Namespace: structs.DefaultNamespace, 455 }, 456 } 457 var getResp structs.SingleJobResponse 458 if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil { 459 t.Fatalf("err: %v", err) 460 } 461 462 if getResp.Job == nil { 463 t.Fatalf("job does not exist") 464 } 465 }) 466 } 467 468 func TestHTTP_JobDelete(t *testing.T) { 469 t.Parallel() 470 httpTest(t, nil, func(s *TestAgent) { 471 // Create the job 472 job := mock.Job() 473 args := structs.JobRegisterRequest{ 474 Job: job, 475 WriteRequest: structs.WriteRequest{ 476 Region: "global", 477 Namespace: structs.DefaultNamespace, 478 }, 479 } 480 var resp structs.JobRegisterResponse 481 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 482 t.Fatalf("err: %v", err) 483 } 484 485 // Make the HTTP request to do a soft delete 486 req, err := http.NewRequest("DELETE", "/v1/job/"+job.ID, nil) 487 if err != nil { 488 t.Fatalf("err: %v", err) 489 } 490 respW := httptest.NewRecorder() 491 492 // Make the request 493 obj, err := s.Server.JobSpecificRequest(respW, req) 494 if err != nil { 495 t.Fatalf("err: %v", err) 496 } 497 498 // Check the response 499 dereg := obj.(structs.JobDeregisterResponse) 500 if dereg.EvalID == "" { 501 t.Fatalf("bad: %v", dereg) 502 } 503 504 // Check for the index 505 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 506 t.Fatalf("missing index") 507 } 508 509 // Check the job is still queryable 510 getReq1 := structs.JobSpecificRequest{ 511 JobID: job.ID, 512 QueryOptions: structs.QueryOptions{ 513 Region: "global", 514 Namespace: structs.DefaultNamespace, 515 }, 516 } 517 var getResp1 structs.SingleJobResponse 518 if err := s.Agent.RPC("Job.GetJob", &getReq1, &getResp1); err != nil { 519 t.Fatalf("err: %v", err) 520 } 521 if getResp1.Job == nil { 522 t.Fatalf("job doesn't exists") 523 } 524 if !getResp1.Job.Stop { 525 t.Fatalf("job should be marked as stop") 526 } 527 528 // Make the HTTP request to do a purge delete 529 req2, err := http.NewRequest("DELETE", "/v1/job/"+job.ID+"?purge=true", nil) 530 if err != nil { 531 t.Fatalf("err: %v", err) 532 } 533 respW.Flush() 534 535 // Make the request 536 obj, err = s.Server.JobSpecificRequest(respW, req2) 537 if err != nil { 538 t.Fatalf("err: %v", err) 539 } 540 541 // Check the response 542 dereg = obj.(structs.JobDeregisterResponse) 543 if dereg.EvalID == "" { 544 t.Fatalf("bad: %v", dereg) 545 } 546 547 // Check for the index 548 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 549 t.Fatalf("missing index") 550 } 551 552 // Check the job is gone 553 getReq2 := structs.JobSpecificRequest{ 554 JobID: job.ID, 555 QueryOptions: structs.QueryOptions{ 556 Region: "global", 557 Namespace: structs.DefaultNamespace, 558 }, 559 } 560 var getResp2 structs.SingleJobResponse 561 if err := s.Agent.RPC("Job.GetJob", &getReq2, &getResp2); err != nil { 562 t.Fatalf("err: %v", err) 563 } 564 if getResp2.Job != nil { 565 t.Fatalf("job still exists") 566 } 567 }) 568 } 569 570 func TestHTTP_JobForceEvaluate(t *testing.T) { 571 t.Parallel() 572 httpTest(t, nil, func(s *TestAgent) { 573 // Create the job 574 job := mock.Job() 575 args := structs.JobRegisterRequest{ 576 Job: job, 577 WriteRequest: structs.WriteRequest{ 578 Region: "global", 579 Namespace: structs.DefaultNamespace, 580 }, 581 } 582 var resp structs.JobRegisterResponse 583 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 584 t.Fatalf("err: %v", err) 585 } 586 587 // Make the HTTP request 588 req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/evaluate", nil) 589 if err != nil { 590 t.Fatalf("err: %v", err) 591 } 592 respW := httptest.NewRecorder() 593 594 // Make the request 595 obj, err := s.Server.JobSpecificRequest(respW, req) 596 if err != nil { 597 t.Fatalf("err: %v", err) 598 } 599 600 // Check the response 601 reg := obj.(structs.JobRegisterResponse) 602 if reg.EvalID == "" { 603 t.Fatalf("bad: %v", reg) 604 } 605 606 // Check for the index 607 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 608 t.Fatalf("missing index") 609 } 610 }) 611 } 612 613 func TestHTTP_JobEvaluate_ForceReschedule(t *testing.T) { 614 t.Parallel() 615 httpTest(t, nil, func(s *TestAgent) { 616 // Create the job 617 job := mock.Job() 618 args := structs.JobRegisterRequest{ 619 Job: job, 620 WriteRequest: structs.WriteRequest{ 621 Region: "global", 622 Namespace: structs.DefaultNamespace, 623 }, 624 } 625 var resp structs.JobRegisterResponse 626 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 627 t.Fatalf("err: %v", err) 628 } 629 jobEvalReq := api.JobEvaluateRequest{ 630 JobID: job.ID, 631 EvalOptions: api.EvalOptions{ 632 ForceReschedule: true, 633 }, 634 } 635 636 buf := encodeReq(jobEvalReq) 637 638 // Make the HTTP request 639 req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/evaluate", buf) 640 if err != nil { 641 t.Fatalf("err: %v", err) 642 } 643 respW := httptest.NewRecorder() 644 645 // Make the request 646 obj, err := s.Server.JobSpecificRequest(respW, req) 647 if err != nil { 648 t.Fatalf("err: %v", err) 649 } 650 651 // Check the response 652 reg := obj.(structs.JobRegisterResponse) 653 if reg.EvalID == "" { 654 t.Fatalf("bad: %v", reg) 655 } 656 657 // Check for the index 658 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 659 t.Fatalf("missing index") 660 } 661 }) 662 } 663 664 func TestHTTP_JobEvaluations(t *testing.T) { 665 t.Parallel() 666 httpTest(t, nil, func(s *TestAgent) { 667 // Create the job 668 job := mock.Job() 669 args := structs.JobRegisterRequest{ 670 Job: job, 671 WriteRequest: structs.WriteRequest{ 672 Region: "global", 673 Namespace: structs.DefaultNamespace, 674 }, 675 } 676 var resp structs.JobRegisterResponse 677 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 678 t.Fatalf("err: %v", err) 679 } 680 681 // Make the HTTP request 682 req, err := http.NewRequest("GET", "/v1/job/"+job.ID+"/evaluations", nil) 683 if err != nil { 684 t.Fatalf("err: %v", err) 685 } 686 respW := httptest.NewRecorder() 687 688 // Make the request 689 obj, err := s.Server.JobSpecificRequest(respW, req) 690 if err != nil { 691 t.Fatalf("err: %v", err) 692 } 693 694 // Check the response 695 evals := obj.([]*structs.Evaluation) 696 // Can be multiple evals, use the last one, since they are in order 697 idx := len(evals) - 1 698 if len(evals) < 0 || evals[idx].ID != resp.EvalID { 699 t.Fatalf("bad: %v", evals) 700 } 701 702 // Check for the index 703 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 704 t.Fatalf("missing index") 705 } 706 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 707 t.Fatalf("missing known leader") 708 } 709 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 710 t.Fatalf("missing last contact") 711 } 712 }) 713 } 714 715 func TestHTTP_JobAllocations(t *testing.T) { 716 t.Parallel() 717 httpTest(t, nil, func(s *TestAgent) { 718 // Create the job 719 alloc1 := mock.Alloc() 720 args := structs.JobRegisterRequest{ 721 Job: alloc1.Job, 722 WriteRequest: structs.WriteRequest{ 723 Region: "global", 724 Namespace: structs.DefaultNamespace, 725 }, 726 } 727 var resp structs.JobRegisterResponse 728 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 729 t.Fatalf("err: %v", err) 730 } 731 732 // Directly manipulate the state 733 expectedDisplayMsg := "test message" 734 testEvent := structs.NewTaskEvent("test event").SetMessage(expectedDisplayMsg) 735 var events []*structs.TaskEvent 736 events = append(events, testEvent) 737 taskState := &structs.TaskState{Events: events} 738 alloc1.TaskStates = make(map[string]*structs.TaskState) 739 alloc1.TaskStates["test"] = taskState 740 state := s.Agent.server.State() 741 err := state.UpsertAllocs(1000, []*structs.Allocation{alloc1}) 742 if err != nil { 743 t.Fatalf("err: %v", err) 744 } 745 746 // Make the HTTP request 747 req, err := http.NewRequest("GET", "/v1/job/"+alloc1.Job.ID+"/allocations?all=true", nil) 748 if err != nil { 749 t.Fatalf("err: %v", err) 750 } 751 respW := httptest.NewRecorder() 752 753 // Make the request 754 obj, err := s.Server.JobSpecificRequest(respW, req) 755 if err != nil { 756 t.Fatalf("err: %v", err) 757 } 758 759 // Check the response 760 allocs := obj.([]*structs.AllocListStub) 761 if len(allocs) != 1 && allocs[0].ID != alloc1.ID { 762 t.Fatalf("bad: %v", allocs) 763 } 764 displayMsg := allocs[0].TaskStates["test"].Events[0].DisplayMessage 765 assert.Equal(t, expectedDisplayMsg, displayMsg) 766 767 // Check for the index 768 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 769 t.Fatalf("missing index") 770 } 771 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 772 t.Fatalf("missing known leader") 773 } 774 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 775 t.Fatalf("missing last contact") 776 } 777 }) 778 } 779 780 func TestHTTP_JobDeployments(t *testing.T) { 781 assert := assert.New(t) 782 t.Parallel() 783 httpTest(t, nil, func(s *TestAgent) { 784 // Create the job 785 j := mock.Job() 786 args := structs.JobRegisterRequest{ 787 Job: j, 788 WriteRequest: structs.WriteRequest{ 789 Region: "global", 790 Namespace: structs.DefaultNamespace, 791 }, 792 } 793 var resp structs.JobRegisterResponse 794 assert.Nil(s.Agent.RPC("Job.Register", &args, &resp), "JobRegister") 795 796 // Directly manipulate the state 797 state := s.Agent.server.State() 798 d := mock.Deployment() 799 d.JobID = j.ID 800 d.JobCreateIndex = resp.JobModifyIndex 801 802 assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") 803 804 // Make the HTTP request 805 req, err := http.NewRequest("GET", "/v1/job/"+j.ID+"/deployments", nil) 806 assert.Nil(err, "HTTP") 807 respW := httptest.NewRecorder() 808 809 // Make the request 810 obj, err := s.Server.JobSpecificRequest(respW, req) 811 assert.Nil(err, "JobSpecificRequest") 812 813 // Check the response 814 deploys := obj.([]*structs.Deployment) 815 assert.Len(deploys, 1, "deployments") 816 assert.Equal(d.ID, deploys[0].ID, "deployment id") 817 818 assert.NotZero(respW.HeaderMap.Get("X-Nomad-Index"), "missing index") 819 assert.Equal("true", respW.HeaderMap.Get("X-Nomad-KnownLeader"), "missing known leader") 820 assert.NotZero(respW.HeaderMap.Get("X-Nomad-LastContact"), "missing last contact") 821 }) 822 } 823 824 func TestHTTP_JobDeployment(t *testing.T) { 825 assert := assert.New(t) 826 t.Parallel() 827 httpTest(t, nil, func(s *TestAgent) { 828 // Create the job 829 j := mock.Job() 830 args := structs.JobRegisterRequest{ 831 Job: j, 832 WriteRequest: structs.WriteRequest{ 833 Region: "global", 834 Namespace: structs.DefaultNamespace, 835 }, 836 } 837 var resp structs.JobRegisterResponse 838 assert.Nil(s.Agent.RPC("Job.Register", &args, &resp), "JobRegister") 839 840 // Directly manipulate the state 841 state := s.Agent.server.State() 842 d := mock.Deployment() 843 d.JobID = j.ID 844 d.JobCreateIndex = resp.JobModifyIndex 845 assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment") 846 847 // Make the HTTP request 848 req, err := http.NewRequest("GET", "/v1/job/"+j.ID+"/deployment", nil) 849 assert.Nil(err, "HTTP") 850 respW := httptest.NewRecorder() 851 852 // Make the request 853 obj, err := s.Server.JobSpecificRequest(respW, req) 854 assert.Nil(err, "JobSpecificRequest") 855 856 // Check the response 857 out := obj.(*structs.Deployment) 858 assert.NotNil(out, "deployment") 859 assert.Equal(d.ID, out.ID, "deployment id") 860 861 assert.NotZero(respW.HeaderMap.Get("X-Nomad-Index"), "missing index") 862 assert.Equal("true", respW.HeaderMap.Get("X-Nomad-KnownLeader"), "missing known leader") 863 assert.NotZero(respW.HeaderMap.Get("X-Nomad-LastContact"), "missing last contact") 864 }) 865 } 866 867 func TestHTTP_JobVersions(t *testing.T) { 868 t.Parallel() 869 httpTest(t, nil, func(s *TestAgent) { 870 // Create the job 871 job := mock.Job() 872 args := structs.JobRegisterRequest{ 873 Job: job, 874 WriteRequest: structs.WriteRequest{ 875 Region: "global", 876 Namespace: structs.DefaultNamespace, 877 }, 878 } 879 var resp structs.JobRegisterResponse 880 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 881 t.Fatalf("err: %v", err) 882 } 883 884 job2 := mock.Job() 885 job2.ID = job.ID 886 job2.Priority = 100 887 888 args2 := structs.JobRegisterRequest{ 889 Job: job2, 890 WriteRequest: structs.WriteRequest{ 891 Region: "global", 892 Namespace: structs.DefaultNamespace, 893 }, 894 } 895 var resp2 structs.JobRegisterResponse 896 if err := s.Agent.RPC("Job.Register", &args2, &resp2); err != nil { 897 t.Fatalf("err: %v", err) 898 } 899 900 // Make the HTTP request 901 req, err := http.NewRequest("GET", "/v1/job/"+job.ID+"/versions?diffs=true", nil) 902 if err != nil { 903 t.Fatalf("err: %v", err) 904 } 905 respW := httptest.NewRecorder() 906 907 // Make the request 908 obj, err := s.Server.JobSpecificRequest(respW, req) 909 if err != nil { 910 t.Fatalf("err: %v", err) 911 } 912 913 // Check the response 914 vResp := obj.(structs.JobVersionsResponse) 915 versions := vResp.Versions 916 if len(versions) != 2 { 917 t.Fatalf("got %d versions; want 2", len(versions)) 918 } 919 920 if v := versions[0]; v.Version != 1 || v.Priority != 100 { 921 t.Fatalf("bad %v", v) 922 } 923 924 if v := versions[1]; v.Version != 0 { 925 t.Fatalf("bad %v", v) 926 } 927 928 if len(vResp.Diffs) != 1 { 929 t.Fatalf("bad %v", vResp) 930 } 931 932 // Check for the index 933 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 934 t.Fatalf("missing index") 935 } 936 if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" { 937 t.Fatalf("missing known leader") 938 } 939 if respW.HeaderMap.Get("X-Nomad-LastContact") == "" { 940 t.Fatalf("missing last contact") 941 } 942 }) 943 } 944 945 func TestHTTP_PeriodicForce(t *testing.T) { 946 t.Parallel() 947 httpTest(t, nil, func(s *TestAgent) { 948 // Create and register a periodic job. 949 job := mock.PeriodicJob() 950 args := structs.JobRegisterRequest{ 951 Job: job, 952 WriteRequest: structs.WriteRequest{ 953 Region: "global", 954 Namespace: structs.DefaultNamespace, 955 }, 956 } 957 var resp structs.JobRegisterResponse 958 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 959 t.Fatalf("err: %v", err) 960 } 961 962 // Make the HTTP request 963 req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/periodic/force", nil) 964 if err != nil { 965 t.Fatalf("err: %v", err) 966 } 967 respW := httptest.NewRecorder() 968 969 // Make the request 970 obj, err := s.Server.JobSpecificRequest(respW, req) 971 if err != nil { 972 t.Fatalf("err: %v", err) 973 } 974 975 // Check for the index 976 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 977 t.Fatalf("missing index") 978 } 979 980 // Check the response 981 r := obj.(structs.PeriodicForceResponse) 982 if r.EvalID == "" { 983 t.Fatalf("bad: %#v", r) 984 } 985 }) 986 } 987 988 func TestHTTP_JobPlan(t *testing.T) { 989 t.Parallel() 990 httpTest(t, nil, func(s *TestAgent) { 991 // Create the job 992 job := MockJob() 993 args := api.JobPlanRequest{ 994 Job: job, 995 Diff: true, 996 WriteRequest: api.WriteRequest{ 997 Region: "global", 998 Namespace: api.DefaultNamespace, 999 }, 1000 } 1001 buf := encodeReq(args) 1002 1003 // Make the HTTP request 1004 req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID+"/plan", buf) 1005 if err != nil { 1006 t.Fatalf("err: %v", err) 1007 } 1008 respW := httptest.NewRecorder() 1009 1010 // Make the request 1011 obj, err := s.Server.JobSpecificRequest(respW, req) 1012 if err != nil { 1013 t.Fatalf("err: %v", err) 1014 } 1015 1016 // Check the response 1017 plan := obj.(structs.JobPlanResponse) 1018 if plan.Annotations == nil { 1019 t.Fatalf("bad: %v", plan) 1020 } 1021 1022 if plan.Diff == nil { 1023 t.Fatalf("bad: %v", plan) 1024 } 1025 }) 1026 } 1027 1028 func TestHTTP_JobDispatch(t *testing.T) { 1029 t.Parallel() 1030 httpTest(t, nil, func(s *TestAgent) { 1031 // Create the parameterized job 1032 job := mock.BatchJob() 1033 job.ParameterizedJob = &structs.ParameterizedJobConfig{} 1034 1035 args := structs.JobRegisterRequest{ 1036 Job: job, 1037 WriteRequest: structs.WriteRequest{ 1038 Region: "global", 1039 Namespace: structs.DefaultNamespace, 1040 }, 1041 } 1042 var resp structs.JobRegisterResponse 1043 if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { 1044 t.Fatalf("err: %v", err) 1045 } 1046 1047 // Make the request 1048 respW := httptest.NewRecorder() 1049 args2 := structs.JobDispatchRequest{ 1050 WriteRequest: structs.WriteRequest{ 1051 Region: "global", 1052 Namespace: structs.DefaultNamespace, 1053 }, 1054 } 1055 buf := encodeReq(args2) 1056 1057 // Make the HTTP request 1058 req2, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/dispatch", buf) 1059 if err != nil { 1060 t.Fatalf("err: %v", err) 1061 } 1062 respW.Flush() 1063 1064 // Make the request 1065 obj, err := s.Server.JobSpecificRequest(respW, req2) 1066 if err != nil { 1067 t.Fatalf("err: %v", err) 1068 } 1069 1070 // Check the response 1071 dispatch := obj.(structs.JobDispatchResponse) 1072 if dispatch.EvalID == "" { 1073 t.Fatalf("bad: %v", dispatch) 1074 } 1075 1076 if dispatch.DispatchedJobID == "" { 1077 t.Fatalf("bad: %v", dispatch) 1078 } 1079 }) 1080 } 1081 1082 func TestHTTP_JobRevert(t *testing.T) { 1083 t.Parallel() 1084 httpTest(t, nil, func(s *TestAgent) { 1085 // Create the job and register it twice 1086 job := mock.Job() 1087 regReq := structs.JobRegisterRequest{ 1088 Job: job, 1089 WriteRequest: structs.WriteRequest{ 1090 Region: "global", 1091 Namespace: structs.DefaultNamespace, 1092 }, 1093 } 1094 var regResp structs.JobRegisterResponse 1095 if err := s.Agent.RPC("Job.Register", ®Req, ®Resp); err != nil { 1096 t.Fatalf("err: %v", err) 1097 } 1098 1099 // Change the job to get a new version 1100 job.Datacenters = append(job.Datacenters, "foo") 1101 if err := s.Agent.RPC("Job.Register", ®Req, ®Resp); err != nil { 1102 t.Fatalf("err: %v", err) 1103 } 1104 1105 args := structs.JobRevertRequest{ 1106 JobID: job.ID, 1107 JobVersion: 0, 1108 WriteRequest: structs.WriteRequest{ 1109 Region: "global", 1110 Namespace: structs.DefaultNamespace, 1111 }, 1112 } 1113 buf := encodeReq(args) 1114 1115 // Make the HTTP request 1116 req, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/revert", buf) 1117 if err != nil { 1118 t.Fatalf("err: %v", err) 1119 } 1120 respW := httptest.NewRecorder() 1121 1122 // Make the request 1123 obj, err := s.Server.JobSpecificRequest(respW, req) 1124 if err != nil { 1125 t.Fatalf("err: %v", err) 1126 } 1127 1128 // Check the response 1129 revertResp := obj.(structs.JobRegisterResponse) 1130 if revertResp.EvalID == "" { 1131 t.Fatalf("bad: %v", revertResp) 1132 } 1133 1134 // Check for the index 1135 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 1136 t.Fatalf("missing index") 1137 } 1138 }) 1139 } 1140 1141 func TestHTTP_JobStable(t *testing.T) { 1142 t.Parallel() 1143 httpTest(t, nil, func(s *TestAgent) { 1144 // Create the job and register it twice 1145 job := mock.Job() 1146 regReq := structs.JobRegisterRequest{ 1147 Job: job, 1148 WriteRequest: structs.WriteRequest{ 1149 Region: "global", 1150 Namespace: structs.DefaultNamespace, 1151 }, 1152 } 1153 var regResp structs.JobRegisterResponse 1154 if err := s.Agent.RPC("Job.Register", ®Req, ®Resp); err != nil { 1155 t.Fatalf("err: %v", err) 1156 } 1157 1158 if err := s.Agent.RPC("Job.Register", ®Req, ®Resp); err != nil { 1159 t.Fatalf("err: %v", err) 1160 } 1161 1162 args := structs.JobStabilityRequest{ 1163 JobID: job.ID, 1164 JobVersion: 0, 1165 Stable: true, 1166 WriteRequest: structs.WriteRequest{ 1167 Region: "global", 1168 Namespace: structs.DefaultNamespace, 1169 }, 1170 } 1171 buf := encodeReq(args) 1172 1173 // Make the HTTP request 1174 req, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/stable", buf) 1175 if err != nil { 1176 t.Fatalf("err: %v", err) 1177 } 1178 respW := httptest.NewRecorder() 1179 1180 // Make the request 1181 obj, err := s.Server.JobSpecificRequest(respW, req) 1182 if err != nil { 1183 t.Fatalf("err: %v", err) 1184 } 1185 1186 // Check the response 1187 stableResp := obj.(structs.JobStabilityResponse) 1188 if stableResp.Index == 0 { 1189 t.Fatalf("bad: %v", stableResp) 1190 } 1191 1192 // Check for the index 1193 if respW.HeaderMap.Get("X-Nomad-Index") == "" { 1194 t.Fatalf("missing index") 1195 } 1196 }) 1197 } 1198 1199 func TestJobs_ApiJobToStructsJob(t *testing.T) { 1200 apiJob := &api.Job{ 1201 Stop: helper.BoolToPtr(true), 1202 Region: helper.StringToPtr("global"), 1203 Namespace: helper.StringToPtr("foo"), 1204 ID: helper.StringToPtr("foo"), 1205 ParentID: helper.StringToPtr("lol"), 1206 Name: helper.StringToPtr("name"), 1207 Type: helper.StringToPtr("service"), 1208 Priority: helper.IntToPtr(50), 1209 AllAtOnce: helper.BoolToPtr(true), 1210 Datacenters: []string{"dc1", "dc2"}, 1211 Constraints: []*api.Constraint{ 1212 { 1213 LTarget: "a", 1214 RTarget: "b", 1215 Operand: "c", 1216 }, 1217 }, 1218 Affinities: []*api.Affinity{ 1219 { 1220 LTarget: "a", 1221 RTarget: "b", 1222 Operand: "c", 1223 Weight: helper.Int8ToPtr(50), 1224 }, 1225 }, 1226 Update: &api.UpdateStrategy{ 1227 Stagger: helper.TimeToPtr(1 * time.Second), 1228 MaxParallel: helper.IntToPtr(5), 1229 HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Manual), 1230 MinHealthyTime: helper.TimeToPtr(1 * time.Minute), 1231 HealthyDeadline: helper.TimeToPtr(3 * time.Minute), 1232 ProgressDeadline: helper.TimeToPtr(3 * time.Minute), 1233 AutoRevert: helper.BoolToPtr(false), 1234 Canary: helper.IntToPtr(1), 1235 }, 1236 Spreads: []*api.Spread{ 1237 { 1238 Attribute: "${meta.rack}", 1239 Weight: helper.Int8ToPtr(100), 1240 SpreadTarget: []*api.SpreadTarget{ 1241 { 1242 Value: "r1", 1243 Percent: 50, 1244 }, 1245 }, 1246 }, 1247 }, 1248 Periodic: &api.PeriodicConfig{ 1249 Enabled: helper.BoolToPtr(true), 1250 Spec: helper.StringToPtr("spec"), 1251 SpecType: helper.StringToPtr("cron"), 1252 ProhibitOverlap: helper.BoolToPtr(true), 1253 TimeZone: helper.StringToPtr("test zone"), 1254 }, 1255 ParameterizedJob: &api.ParameterizedJobConfig{ 1256 Payload: "payload", 1257 MetaRequired: []string{"a", "b"}, 1258 MetaOptional: []string{"c", "d"}, 1259 }, 1260 Payload: []byte("payload"), 1261 Meta: map[string]string{ 1262 "foo": "bar", 1263 }, 1264 TaskGroups: []*api.TaskGroup{ 1265 { 1266 Name: helper.StringToPtr("group1"), 1267 Count: helper.IntToPtr(5), 1268 Constraints: []*api.Constraint{ 1269 { 1270 LTarget: "x", 1271 RTarget: "y", 1272 Operand: "z", 1273 }, 1274 }, 1275 Affinities: []*api.Affinity{ 1276 { 1277 LTarget: "x", 1278 RTarget: "y", 1279 Operand: "z", 1280 Weight: helper.Int8ToPtr(100), 1281 }, 1282 }, 1283 RestartPolicy: &api.RestartPolicy{ 1284 Interval: helper.TimeToPtr(1 * time.Second), 1285 Attempts: helper.IntToPtr(5), 1286 Delay: helper.TimeToPtr(10 * time.Second), 1287 Mode: helper.StringToPtr("delay"), 1288 }, 1289 ReschedulePolicy: &api.ReschedulePolicy{ 1290 Interval: helper.TimeToPtr(12 * time.Hour), 1291 Attempts: helper.IntToPtr(5), 1292 DelayFunction: helper.StringToPtr("constant"), 1293 Delay: helper.TimeToPtr(30 * time.Second), 1294 Unlimited: helper.BoolToPtr(true), 1295 MaxDelay: helper.TimeToPtr(20 * time.Minute), 1296 }, 1297 Migrate: &api.MigrateStrategy{ 1298 MaxParallel: helper.IntToPtr(12), 1299 HealthCheck: helper.StringToPtr("task_events"), 1300 MinHealthyTime: helper.TimeToPtr(12 * time.Hour), 1301 HealthyDeadline: helper.TimeToPtr(12 * time.Hour), 1302 }, 1303 Spreads: []*api.Spread{ 1304 { 1305 Attribute: "${node.datacenter}", 1306 Weight: helper.Int8ToPtr(100), 1307 SpreadTarget: []*api.SpreadTarget{ 1308 { 1309 Value: "dc1", 1310 Percent: 100, 1311 }, 1312 }, 1313 }, 1314 }, 1315 EphemeralDisk: &api.EphemeralDisk{ 1316 SizeMB: helper.IntToPtr(100), 1317 Sticky: helper.BoolToPtr(true), 1318 Migrate: helper.BoolToPtr(true), 1319 }, 1320 Update: &api.UpdateStrategy{ 1321 HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Checks), 1322 MinHealthyTime: helper.TimeToPtr(2 * time.Minute), 1323 HealthyDeadline: helper.TimeToPtr(5 * time.Minute), 1324 ProgressDeadline: helper.TimeToPtr(5 * time.Minute), 1325 AutoRevert: helper.BoolToPtr(true), 1326 }, 1327 1328 Meta: map[string]string{ 1329 "key": "value", 1330 }, 1331 Tasks: []*api.Task{ 1332 { 1333 Name: "task1", 1334 Leader: true, 1335 Driver: "docker", 1336 User: "mary", 1337 Config: map[string]interface{}{ 1338 "lol": "code", 1339 }, 1340 Env: map[string]string{ 1341 "hello": "world", 1342 }, 1343 Constraints: []*api.Constraint{ 1344 { 1345 LTarget: "x", 1346 RTarget: "y", 1347 Operand: "z", 1348 }, 1349 }, 1350 Affinities: []*api.Affinity{ 1351 { 1352 LTarget: "a", 1353 RTarget: "b", 1354 Operand: "c", 1355 Weight: helper.Int8ToPtr(50), 1356 }, 1357 }, 1358 1359 Services: []*api.Service{ 1360 { 1361 Id: "id", 1362 Name: "serviceA", 1363 Tags: []string{"1", "2"}, 1364 CanaryTags: []string{"3", "4"}, 1365 PortLabel: "foo", 1366 CheckRestart: &api.CheckRestart{ 1367 Limit: 4, 1368 Grace: helper.TimeToPtr(11 * time.Second), 1369 }, 1370 Checks: []api.ServiceCheck{ 1371 { 1372 Id: "hello", 1373 Name: "bar", 1374 Type: "http", 1375 Command: "foo", 1376 Args: []string{"a", "b"}, 1377 Path: "/check", 1378 Protocol: "http", 1379 PortLabel: "foo", 1380 AddressMode: "driver", 1381 GRPCService: "foo.Bar", 1382 GRPCUseTLS: true, 1383 Interval: 4 * time.Second, 1384 Timeout: 2 * time.Second, 1385 InitialStatus: "ok", 1386 CheckRestart: &api.CheckRestart{ 1387 Limit: 3, 1388 IgnoreWarnings: true, 1389 }, 1390 }, 1391 { 1392 Id: "check2id", 1393 Name: "check2", 1394 Type: "tcp", 1395 PortLabel: "foo", 1396 Interval: 4 * time.Second, 1397 Timeout: 2 * time.Second, 1398 }, 1399 }, 1400 }, 1401 }, 1402 Resources: &api.Resources{ 1403 CPU: helper.IntToPtr(100), 1404 MemoryMB: helper.IntToPtr(10), 1405 Networks: []*api.NetworkResource{ 1406 { 1407 IP: "10.10.11.1", 1408 MBits: helper.IntToPtr(10), 1409 ReservedPorts: []api.Port{ 1410 { 1411 Label: "http", 1412 Value: 80, 1413 }, 1414 }, 1415 DynamicPorts: []api.Port{ 1416 { 1417 Label: "ssh", 1418 Value: 2000, 1419 }, 1420 }, 1421 }, 1422 }, 1423 Devices: []*api.RequestedDevice{ 1424 { 1425 Name: "nvidia/gpu", 1426 Count: helper.Uint64ToPtr(4), 1427 Constraints: []*api.Constraint{ 1428 { 1429 LTarget: "x", 1430 RTarget: "y", 1431 Operand: "z", 1432 }, 1433 }, 1434 Affinities: []*api.Affinity{ 1435 { 1436 LTarget: "a", 1437 RTarget: "b", 1438 Operand: "c", 1439 Weight: helper.Int8ToPtr(50), 1440 }, 1441 }, 1442 }, 1443 { 1444 Name: "gpu", 1445 Count: nil, 1446 }, 1447 }, 1448 }, 1449 Meta: map[string]string{ 1450 "lol": "code", 1451 }, 1452 KillTimeout: helper.TimeToPtr(10 * time.Second), 1453 KillSignal: "SIGQUIT", 1454 LogConfig: &api.LogConfig{ 1455 MaxFiles: helper.IntToPtr(10), 1456 MaxFileSizeMB: helper.IntToPtr(100), 1457 }, 1458 Artifacts: []*api.TaskArtifact{ 1459 { 1460 GetterSource: helper.StringToPtr("source"), 1461 GetterOptions: map[string]string{ 1462 "a": "b", 1463 }, 1464 GetterMode: helper.StringToPtr("dir"), 1465 RelativeDest: helper.StringToPtr("dest"), 1466 }, 1467 }, 1468 Vault: &api.Vault{ 1469 Policies: []string{"a", "b", "c"}, 1470 Env: helper.BoolToPtr(true), 1471 ChangeMode: helper.StringToPtr("c"), 1472 ChangeSignal: helper.StringToPtr("sighup"), 1473 }, 1474 Templates: []*api.Template{ 1475 { 1476 SourcePath: helper.StringToPtr("source"), 1477 DestPath: helper.StringToPtr("dest"), 1478 EmbeddedTmpl: helper.StringToPtr("embedded"), 1479 ChangeMode: helper.StringToPtr("change"), 1480 ChangeSignal: helper.StringToPtr("signal"), 1481 Splay: helper.TimeToPtr(1 * time.Minute), 1482 Perms: helper.StringToPtr("666"), 1483 LeftDelim: helper.StringToPtr("abc"), 1484 RightDelim: helper.StringToPtr("def"), 1485 Envvars: helper.BoolToPtr(true), 1486 VaultGrace: helper.TimeToPtr(3 * time.Second), 1487 }, 1488 }, 1489 DispatchPayload: &api.DispatchPayloadConfig{ 1490 File: "fileA", 1491 }, 1492 }, 1493 }, 1494 }, 1495 }, 1496 VaultToken: helper.StringToPtr("token"), 1497 Status: helper.StringToPtr("status"), 1498 StatusDescription: helper.StringToPtr("status_desc"), 1499 Version: helper.Uint64ToPtr(10), 1500 CreateIndex: helper.Uint64ToPtr(1), 1501 ModifyIndex: helper.Uint64ToPtr(3), 1502 JobModifyIndex: helper.Uint64ToPtr(5), 1503 } 1504 1505 expected := &structs.Job{ 1506 Stop: true, 1507 Region: "global", 1508 Namespace: "foo", 1509 ID: "foo", 1510 ParentID: "lol", 1511 Name: "name", 1512 Type: "service", 1513 Priority: 50, 1514 AllAtOnce: true, 1515 Datacenters: []string{"dc1", "dc2"}, 1516 Constraints: []*structs.Constraint{ 1517 { 1518 LTarget: "a", 1519 RTarget: "b", 1520 Operand: "c", 1521 }, 1522 }, 1523 Affinities: []*structs.Affinity{ 1524 { 1525 LTarget: "a", 1526 RTarget: "b", 1527 Operand: "c", 1528 Weight: 50, 1529 }, 1530 }, 1531 Spreads: []*structs.Spread{ 1532 { 1533 Attribute: "${meta.rack}", 1534 Weight: 100, 1535 SpreadTarget: []*structs.SpreadTarget{ 1536 { 1537 Value: "r1", 1538 Percent: 50, 1539 }, 1540 }, 1541 }, 1542 }, 1543 Update: structs.UpdateStrategy{ 1544 Stagger: 1 * time.Second, 1545 MaxParallel: 5, 1546 }, 1547 Periodic: &structs.PeriodicConfig{ 1548 Enabled: true, 1549 Spec: "spec", 1550 SpecType: "cron", 1551 ProhibitOverlap: true, 1552 TimeZone: "test zone", 1553 }, 1554 ParameterizedJob: &structs.ParameterizedJobConfig{ 1555 Payload: "payload", 1556 MetaRequired: []string{"a", "b"}, 1557 MetaOptional: []string{"c", "d"}, 1558 }, 1559 Payload: []byte("payload"), 1560 Meta: map[string]string{ 1561 "foo": "bar", 1562 }, 1563 TaskGroups: []*structs.TaskGroup{ 1564 { 1565 Name: "group1", 1566 Count: 5, 1567 Constraints: []*structs.Constraint{ 1568 { 1569 LTarget: "x", 1570 RTarget: "y", 1571 Operand: "z", 1572 }, 1573 }, 1574 Affinities: []*structs.Affinity{ 1575 { 1576 LTarget: "x", 1577 RTarget: "y", 1578 Operand: "z", 1579 Weight: 100, 1580 }, 1581 }, 1582 RestartPolicy: &structs.RestartPolicy{ 1583 Interval: 1 * time.Second, 1584 Attempts: 5, 1585 Delay: 10 * time.Second, 1586 Mode: "delay", 1587 }, 1588 Spreads: []*structs.Spread{ 1589 { 1590 Attribute: "${node.datacenter}", 1591 Weight: 100, 1592 SpreadTarget: []*structs.SpreadTarget{ 1593 { 1594 Value: "dc1", 1595 Percent: 100, 1596 }, 1597 }, 1598 }, 1599 }, 1600 ReschedulePolicy: &structs.ReschedulePolicy{ 1601 Interval: 12 * time.Hour, 1602 Attempts: 5, 1603 DelayFunction: "constant", 1604 Delay: 30 * time.Second, 1605 Unlimited: true, 1606 MaxDelay: 20 * time.Minute, 1607 }, 1608 Migrate: &structs.MigrateStrategy{ 1609 MaxParallel: 12, 1610 HealthCheck: "task_events", 1611 MinHealthyTime: 12 * time.Hour, 1612 HealthyDeadline: 12 * time.Hour, 1613 }, 1614 EphemeralDisk: &structs.EphemeralDisk{ 1615 SizeMB: 100, 1616 Sticky: true, 1617 Migrate: true, 1618 }, 1619 Update: &structs.UpdateStrategy{ 1620 Stagger: 1 * time.Second, 1621 MaxParallel: 5, 1622 HealthCheck: structs.UpdateStrategyHealthCheck_Checks, 1623 MinHealthyTime: 2 * time.Minute, 1624 HealthyDeadline: 5 * time.Minute, 1625 ProgressDeadline: 5 * time.Minute, 1626 AutoRevert: true, 1627 AutoPromote: false, 1628 Canary: 1, 1629 }, 1630 Meta: map[string]string{ 1631 "key": "value", 1632 }, 1633 Tasks: []*structs.Task{ 1634 { 1635 Name: "task1", 1636 Driver: "docker", 1637 Leader: true, 1638 User: "mary", 1639 Config: map[string]interface{}{ 1640 "lol": "code", 1641 }, 1642 Constraints: []*structs.Constraint{ 1643 { 1644 LTarget: "x", 1645 RTarget: "y", 1646 Operand: "z", 1647 }, 1648 }, 1649 Affinities: []*structs.Affinity{ 1650 { 1651 LTarget: "a", 1652 RTarget: "b", 1653 Operand: "c", 1654 Weight: 50, 1655 }, 1656 }, 1657 Env: map[string]string{ 1658 "hello": "world", 1659 }, 1660 Services: []*structs.Service{ 1661 { 1662 Name: "serviceA", 1663 Tags: []string{"1", "2"}, 1664 CanaryTags: []string{"3", "4"}, 1665 PortLabel: "foo", 1666 AddressMode: "auto", 1667 Checks: []*structs.ServiceCheck{ 1668 { 1669 Name: "bar", 1670 Type: "http", 1671 Command: "foo", 1672 Args: []string{"a", "b"}, 1673 Path: "/check", 1674 Protocol: "http", 1675 PortLabel: "foo", 1676 AddressMode: "driver", 1677 Interval: 4 * time.Second, 1678 Timeout: 2 * time.Second, 1679 InitialStatus: "ok", 1680 GRPCService: "foo.Bar", 1681 GRPCUseTLS: true, 1682 CheckRestart: &structs.CheckRestart{ 1683 Limit: 3, 1684 Grace: 11 * time.Second, 1685 IgnoreWarnings: true, 1686 }, 1687 }, 1688 { 1689 Name: "check2", 1690 Type: "tcp", 1691 PortLabel: "foo", 1692 Interval: 4 * time.Second, 1693 Timeout: 2 * time.Second, 1694 CheckRestart: &structs.CheckRestart{ 1695 Limit: 4, 1696 Grace: 11 * time.Second, 1697 }, 1698 }, 1699 }, 1700 }, 1701 }, 1702 Resources: &structs.Resources{ 1703 CPU: 100, 1704 MemoryMB: 10, 1705 Networks: []*structs.NetworkResource{ 1706 { 1707 IP: "10.10.11.1", 1708 MBits: 10, 1709 ReservedPorts: []structs.Port{ 1710 { 1711 Label: "http", 1712 Value: 80, 1713 }, 1714 }, 1715 DynamicPorts: []structs.Port{ 1716 { 1717 Label: "ssh", 1718 Value: 2000, 1719 }, 1720 }, 1721 }, 1722 }, 1723 Devices: []*structs.RequestedDevice{ 1724 { 1725 Name: "nvidia/gpu", 1726 Count: 4, 1727 Constraints: []*structs.Constraint{ 1728 { 1729 LTarget: "x", 1730 RTarget: "y", 1731 Operand: "z", 1732 }, 1733 }, 1734 Affinities: []*structs.Affinity{ 1735 { 1736 LTarget: "a", 1737 RTarget: "b", 1738 Operand: "c", 1739 Weight: 50, 1740 }, 1741 }, 1742 }, 1743 { 1744 Name: "gpu", 1745 Count: 1, 1746 }, 1747 }, 1748 }, 1749 Meta: map[string]string{ 1750 "lol": "code", 1751 }, 1752 KillTimeout: 10 * time.Second, 1753 KillSignal: "SIGQUIT", 1754 LogConfig: &structs.LogConfig{ 1755 MaxFiles: 10, 1756 MaxFileSizeMB: 100, 1757 }, 1758 Artifacts: []*structs.TaskArtifact{ 1759 { 1760 GetterSource: "source", 1761 GetterOptions: map[string]string{ 1762 "a": "b", 1763 }, 1764 GetterMode: "dir", 1765 RelativeDest: "dest", 1766 }, 1767 }, 1768 Vault: &structs.Vault{ 1769 Policies: []string{"a", "b", "c"}, 1770 Env: true, 1771 ChangeMode: "c", 1772 ChangeSignal: "sighup", 1773 }, 1774 Templates: []*structs.Template{ 1775 { 1776 SourcePath: "source", 1777 DestPath: "dest", 1778 EmbeddedTmpl: "embedded", 1779 ChangeMode: "change", 1780 ChangeSignal: "SIGNAL", 1781 Splay: 1 * time.Minute, 1782 Perms: "666", 1783 LeftDelim: "abc", 1784 RightDelim: "def", 1785 Envvars: true, 1786 VaultGrace: 3 * time.Second, 1787 }, 1788 }, 1789 DispatchPayload: &structs.DispatchPayloadConfig{ 1790 File: "fileA", 1791 }, 1792 }, 1793 }, 1794 }, 1795 }, 1796 1797 VaultToken: "token", 1798 } 1799 1800 structsJob := ApiJobToStructJob(apiJob) 1801 1802 if diff := pretty.Diff(expected, structsJob); len(diff) > 0 { 1803 t.Fatalf("bad:\n%s", strings.Join(diff, "\n")) 1804 } 1805 1806 systemAPIJob := &api.Job{ 1807 Stop: helper.BoolToPtr(true), 1808 Region: helper.StringToPtr("global"), 1809 Namespace: helper.StringToPtr("foo"), 1810 ID: helper.StringToPtr("foo"), 1811 ParentID: helper.StringToPtr("lol"), 1812 Name: helper.StringToPtr("name"), 1813 Type: helper.StringToPtr("system"), 1814 Priority: helper.IntToPtr(50), 1815 AllAtOnce: helper.BoolToPtr(true), 1816 Datacenters: []string{"dc1", "dc2"}, 1817 Constraints: []*api.Constraint{ 1818 { 1819 LTarget: "a", 1820 RTarget: "b", 1821 Operand: "c", 1822 }, 1823 }, 1824 TaskGroups: []*api.TaskGroup{ 1825 { 1826 Name: helper.StringToPtr("group1"), 1827 Count: helper.IntToPtr(5), 1828 Constraints: []*api.Constraint{ 1829 { 1830 LTarget: "x", 1831 RTarget: "y", 1832 Operand: "z", 1833 }, 1834 }, 1835 RestartPolicy: &api.RestartPolicy{ 1836 Interval: helper.TimeToPtr(1 * time.Second), 1837 Attempts: helper.IntToPtr(5), 1838 Delay: helper.TimeToPtr(10 * time.Second), 1839 Mode: helper.StringToPtr("delay"), 1840 }, 1841 EphemeralDisk: &api.EphemeralDisk{ 1842 SizeMB: helper.IntToPtr(100), 1843 Sticky: helper.BoolToPtr(true), 1844 Migrate: helper.BoolToPtr(true), 1845 }, 1846 Meta: map[string]string{ 1847 "key": "value", 1848 }, 1849 Tasks: []*api.Task{ 1850 { 1851 Name: "task1", 1852 Leader: true, 1853 Driver: "docker", 1854 User: "mary", 1855 Config: map[string]interface{}{ 1856 "lol": "code", 1857 }, 1858 Env: map[string]string{ 1859 "hello": "world", 1860 }, 1861 Constraints: []*api.Constraint{ 1862 { 1863 LTarget: "x", 1864 RTarget: "y", 1865 Operand: "z", 1866 }, 1867 }, 1868 Resources: &api.Resources{ 1869 CPU: helper.IntToPtr(100), 1870 MemoryMB: helper.IntToPtr(10), 1871 Networks: []*api.NetworkResource{ 1872 { 1873 IP: "10.10.11.1", 1874 MBits: helper.IntToPtr(10), 1875 ReservedPorts: []api.Port{ 1876 { 1877 Label: "http", 1878 Value: 80, 1879 }, 1880 }, 1881 DynamicPorts: []api.Port{ 1882 { 1883 Label: "ssh", 1884 Value: 2000, 1885 }, 1886 }, 1887 }, 1888 }, 1889 }, 1890 Meta: map[string]string{ 1891 "lol": "code", 1892 }, 1893 KillTimeout: helper.TimeToPtr(10 * time.Second), 1894 KillSignal: "SIGQUIT", 1895 LogConfig: &api.LogConfig{ 1896 MaxFiles: helper.IntToPtr(10), 1897 MaxFileSizeMB: helper.IntToPtr(100), 1898 }, 1899 Artifacts: []*api.TaskArtifact{ 1900 { 1901 GetterSource: helper.StringToPtr("source"), 1902 GetterOptions: map[string]string{ 1903 "a": "b", 1904 }, 1905 GetterMode: helper.StringToPtr("dir"), 1906 RelativeDest: helper.StringToPtr("dest"), 1907 }, 1908 }, 1909 DispatchPayload: &api.DispatchPayloadConfig{ 1910 File: "fileA", 1911 }, 1912 }, 1913 }, 1914 }, 1915 }, 1916 Status: helper.StringToPtr("status"), 1917 StatusDescription: helper.StringToPtr("status_desc"), 1918 Version: helper.Uint64ToPtr(10), 1919 CreateIndex: helper.Uint64ToPtr(1), 1920 ModifyIndex: helper.Uint64ToPtr(3), 1921 JobModifyIndex: helper.Uint64ToPtr(5), 1922 } 1923 1924 expectedSystemJob := &structs.Job{ 1925 Stop: true, 1926 Region: "global", 1927 Namespace: "foo", 1928 ID: "foo", 1929 ParentID: "lol", 1930 Name: "name", 1931 Type: "system", 1932 Priority: 50, 1933 AllAtOnce: true, 1934 Datacenters: []string{"dc1", "dc2"}, 1935 Constraints: []*structs.Constraint{ 1936 { 1937 LTarget: "a", 1938 RTarget: "b", 1939 Operand: "c", 1940 }, 1941 }, 1942 TaskGroups: []*structs.TaskGroup{ 1943 { 1944 Name: "group1", 1945 Count: 5, 1946 Constraints: []*structs.Constraint{ 1947 { 1948 LTarget: "x", 1949 RTarget: "y", 1950 Operand: "z", 1951 }, 1952 }, 1953 RestartPolicy: &structs.RestartPolicy{ 1954 Interval: 1 * time.Second, 1955 Attempts: 5, 1956 Delay: 10 * time.Second, 1957 Mode: "delay", 1958 }, 1959 EphemeralDisk: &structs.EphemeralDisk{ 1960 SizeMB: 100, 1961 Sticky: true, 1962 Migrate: true, 1963 }, 1964 Meta: map[string]string{ 1965 "key": "value", 1966 }, 1967 Tasks: []*structs.Task{ 1968 { 1969 Name: "task1", 1970 Driver: "docker", 1971 Leader: true, 1972 User: "mary", 1973 Config: map[string]interface{}{ 1974 "lol": "code", 1975 }, 1976 Constraints: []*structs.Constraint{ 1977 { 1978 LTarget: "x", 1979 RTarget: "y", 1980 Operand: "z", 1981 }, 1982 }, 1983 Env: map[string]string{ 1984 "hello": "world", 1985 }, 1986 Resources: &structs.Resources{ 1987 CPU: 100, 1988 MemoryMB: 10, 1989 Networks: []*structs.NetworkResource{ 1990 { 1991 IP: "10.10.11.1", 1992 MBits: 10, 1993 ReservedPorts: []structs.Port{ 1994 { 1995 Label: "http", 1996 Value: 80, 1997 }, 1998 }, 1999 DynamicPorts: []structs.Port{ 2000 { 2001 Label: "ssh", 2002 Value: 2000, 2003 }, 2004 }, 2005 }, 2006 }, 2007 }, 2008 Meta: map[string]string{ 2009 "lol": "code", 2010 }, 2011 KillTimeout: 10 * time.Second, 2012 KillSignal: "SIGQUIT", 2013 LogConfig: &structs.LogConfig{ 2014 MaxFiles: 10, 2015 MaxFileSizeMB: 100, 2016 }, 2017 Artifacts: []*structs.TaskArtifact{ 2018 { 2019 GetterSource: "source", 2020 GetterOptions: map[string]string{ 2021 "a": "b", 2022 }, 2023 GetterMode: "dir", 2024 RelativeDest: "dest", 2025 }, 2026 }, 2027 DispatchPayload: &structs.DispatchPayloadConfig{ 2028 File: "fileA", 2029 }, 2030 }, 2031 }, 2032 }, 2033 }, 2034 } 2035 2036 systemStructsJob := ApiJobToStructJob(systemAPIJob) 2037 2038 if diff := pretty.Diff(expectedSystemJob, systemStructsJob); len(diff) > 0 { 2039 t.Fatalf("bad:\n%s", strings.Join(diff, "\n")) 2040 } 2041 } 2042 2043 func TestJobs_ApiJobToStructsJobUpdate(t *testing.T) { 2044 apiJob := &api.Job{ 2045 Update: &api.UpdateStrategy{ 2046 Stagger: helper.TimeToPtr(1 * time.Second), 2047 MaxParallel: helper.IntToPtr(5), 2048 HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Manual), 2049 MinHealthyTime: helper.TimeToPtr(1 * time.Minute), 2050 HealthyDeadline: helper.TimeToPtr(3 * time.Minute), 2051 ProgressDeadline: helper.TimeToPtr(3 * time.Minute), 2052 AutoRevert: helper.BoolToPtr(false), 2053 AutoPromote: nil, 2054 Canary: helper.IntToPtr(1), 2055 }, 2056 TaskGroups: []*api.TaskGroup{ 2057 { 2058 Update: &api.UpdateStrategy{ 2059 Canary: helper.IntToPtr(2), 2060 AutoRevert: helper.BoolToPtr(true), 2061 }, 2062 }, { 2063 Update: &api.UpdateStrategy{ 2064 Canary: helper.IntToPtr(3), 2065 AutoPromote: helper.BoolToPtr(true), 2066 }, 2067 }, 2068 }, 2069 } 2070 2071 structsJob := ApiJobToStructJob(apiJob) 2072 2073 // Update has been moved from job down to the groups 2074 jobUpdate := structs.UpdateStrategy{ 2075 Stagger: 1000000000, 2076 MaxParallel: 5, 2077 HealthCheck: "", 2078 MinHealthyTime: 0, 2079 HealthyDeadline: 0, 2080 ProgressDeadline: 0, 2081 AutoRevert: false, 2082 AutoPromote: false, 2083 Canary: 0, 2084 } 2085 2086 // But the groups inherit settings from the job update 2087 group1 := structs.UpdateStrategy{ 2088 Stagger: 1000000000, 2089 MaxParallel: 5, 2090 HealthCheck: "manual", 2091 MinHealthyTime: 60000000000, 2092 HealthyDeadline: 180000000000, 2093 ProgressDeadline: 180000000000, 2094 AutoRevert: true, 2095 AutoPromote: false, 2096 Canary: 2, 2097 } 2098 2099 group2 := structs.UpdateStrategy{ 2100 Stagger: 1000000000, 2101 MaxParallel: 5, 2102 HealthCheck: "manual", 2103 MinHealthyTime: 60000000000, 2104 HealthyDeadline: 180000000000, 2105 ProgressDeadline: 180000000000, 2106 AutoRevert: false, 2107 AutoPromote: true, 2108 Canary: 3, 2109 } 2110 2111 require.Equal(t, jobUpdate, structsJob.Update) 2112 require.Equal(t, group1, *structsJob.TaskGroups[0].Update) 2113 require.Equal(t, group2, *structsJob.TaskGroups[1].Update) 2114 } 2115 2116 // TestHTTP_JobValidate_SystemMigrate asserts that a system job with a migrate 2117 // stanza fails to validate but does not panic (see #5477). 2118 func TestHTTP_JobValidate_SystemMigrate(t *testing.T) { 2119 t.Parallel() 2120 httpTest(t, nil, func(s *TestAgent) { 2121 // Create the job 2122 job := &api.Job{ 2123 Region: helper.StringToPtr("global"), 2124 Datacenters: []string{"dc1"}, 2125 ID: helper.StringToPtr("systemmigrate"), 2126 Name: helper.StringToPtr("systemmigrate"), 2127 TaskGroups: []*api.TaskGroup{ 2128 {Name: helper.StringToPtr("web")}, 2129 }, 2130 2131 // System job... 2132 Type: helper.StringToPtr("system"), 2133 2134 // ...with an empty migrate stanza 2135 Migrate: &api.MigrateStrategy{}, 2136 } 2137 2138 args := api.JobValidateRequest{ 2139 Job: job, 2140 WriteRequest: api.WriteRequest{Region: "global"}, 2141 } 2142 buf := encodeReq(args) 2143 2144 // Make the HTTP request 2145 req, err := http.NewRequest("PUT", "/v1/validate/job", buf) 2146 require.NoError(t, err) 2147 respW := httptest.NewRecorder() 2148 2149 // Make the request 2150 obj, err := s.Server.ValidateJobRequest(respW, req) 2151 require.NoError(t, err) 2152 2153 // Check the response 2154 resp := obj.(structs.JobValidateResponse) 2155 require.Contains(t, resp.Error, `Job type "system" does not allow migrate block`) 2156 }) 2157 }