github.com/shashidharatd/test-infra@v0.0.0-20171006011030-71304e1ca560/prow/plank/controller_test.go (about) 1 /* 2 Copyright 2017 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package plank 18 19 import ( 20 "errors" 21 "fmt" 22 "net/http" 23 "net/http/httptest" 24 "sync" 25 "testing" 26 "text/template" 27 "time" 28 29 "github.com/bwmarrin/snowflake" 30 31 "k8s.io/test-infra/prow/config" 32 "k8s.io/test-infra/prow/github" 33 "k8s.io/test-infra/prow/kube" 34 "k8s.io/test-infra/prow/pjutil" 35 ) 36 37 type fca struct { 38 sync.Mutex 39 c *config.Config 40 } 41 42 func newFakeConfigAgent(t *testing.T, maxConcurrency int) *fca { 43 presubmits := []config.Presubmit{ 44 { 45 Name: "test-bazel-build", 46 RunAfterSuccess: []config.Presubmit{ 47 { 48 Name: "test-kubeadm-cloud", 49 RunIfChanged: "^(cmd/kubeadm|build/debs).*$", 50 }, 51 }, 52 }, 53 { 54 Name: "test-e2e", 55 RunAfterSuccess: []config.Presubmit{ 56 { 57 Name: "push-image", 58 }, 59 }, 60 }, 61 { 62 Name: "test-bazel-test", 63 }, 64 } 65 if err := config.SetRegexes(presubmits); err != nil { 66 t.Fatal(err) 67 } 68 presubmitMap := map[string][]config.Presubmit{ 69 "kubernetes/kubernetes": presubmits, 70 } 71 72 return &fca{ 73 c: &config.Config{ 74 Plank: config.Plank{ 75 JobURLTemplate: template.Must(template.New("test").Parse("{{.Metadata.Name}}/{{.Status.State}}")), 76 MaxConcurrency: maxConcurrency, 77 }, 78 Presubmits: presubmitMap, 79 }, 80 } 81 } 82 83 func (f *fca) Config() *config.Config { 84 f.Lock() 85 defer f.Unlock() 86 return f.c 87 } 88 89 type fkc struct { 90 sync.Mutex 91 prowjobs []kube.ProwJob 92 pods []kube.Pod 93 deletedPods []kube.Pod 94 err error 95 } 96 97 func (f *fkc) CreateProwJob(pj kube.ProwJob) (kube.ProwJob, error) { 98 f.Lock() 99 defer f.Unlock() 100 f.prowjobs = append(f.prowjobs, pj) 101 return pj, nil 102 } 103 104 func (f *fkc) ListProwJobs(map[string]string) ([]kube.ProwJob, error) { 105 f.Lock() 106 defer f.Unlock() 107 return f.prowjobs, nil 108 } 109 110 func (f *fkc) ReplaceProwJob(name string, job kube.ProwJob) (kube.ProwJob, error) { 111 f.Lock() 112 defer f.Unlock() 113 for i := range f.prowjobs { 114 if f.prowjobs[i].Metadata.Name == name { 115 f.prowjobs[i] = job 116 return job, nil 117 } 118 } 119 return kube.ProwJob{}, fmt.Errorf("did not find prowjob %s", name) 120 } 121 122 func (f *fkc) CreatePod(pod kube.Pod) (kube.Pod, error) { 123 f.Lock() 124 defer f.Unlock() 125 if f.err != nil { 126 return kube.Pod{}, f.err 127 } 128 f.pods = append(f.pods, pod) 129 return pod, nil 130 } 131 132 func (f *fkc) ListPods(map[string]string) ([]kube.Pod, error) { 133 f.Lock() 134 defer f.Unlock() 135 return f.pods, nil 136 } 137 138 func (f *fkc) DeletePod(name string) error { 139 f.Lock() 140 defer f.Unlock() 141 for i := range f.pods { 142 if f.pods[i].Metadata.Name == name { 143 f.deletedPods = append(f.deletedPods, f.pods[i]) 144 f.pods = append(f.pods[:i], f.pods[i+1:]...) 145 return nil 146 } 147 } 148 return fmt.Errorf("did not find pod %s", name) 149 } 150 151 type fghc struct { 152 sync.Mutex 153 changes []github.PullRequestChange 154 err error 155 } 156 157 func (f *fghc) GetPullRequestChanges(org, repo string, number int) ([]github.PullRequestChange, error) { 158 f.Lock() 159 defer f.Unlock() 160 return f.changes, f.err 161 } 162 163 func (f *fghc) BotName() (string, error) { return "bot", nil } 164 func (f *fghc) CreateStatus(org, repo, ref string, s github.Status) error { return nil } 165 func (f *fghc) ListIssueComments(org, repo string, number int) ([]github.IssueComment, error) { 166 return nil, nil 167 } 168 func (f *fghc) CreateComment(org, repo string, number int, comment string) error { return nil } 169 func (f *fghc) DeleteComment(org, repo string, ID int) error { return nil } 170 func (f *fghc) EditComment(org, repo string, ID int, comment string) error { return nil } 171 172 func TestTerminateDupes(t *testing.T) { 173 now := time.Now() 174 var testcases = []struct { 175 name string 176 177 allowCancellations bool 178 pjs []kube.ProwJob 179 pm map[string]kube.Pod 180 181 terminatedPJs map[string]struct{} 182 terminatedPods map[string]struct{} 183 }{ 184 { 185 name: "terminate all duplicates", 186 187 pjs: []kube.ProwJob{ 188 { 189 Metadata: kube.ObjectMeta{Name: "newest"}, 190 Spec: kube.ProwJobSpec{ 191 Type: kube.PresubmitJob, 192 Job: "j1", 193 Refs: kube.Refs{Pulls: []kube.Pull{{}}}, 194 }, 195 Status: kube.ProwJobStatus{ 196 StartTime: now.Add(-time.Minute), 197 }, 198 }, 199 { 200 Metadata: kube.ObjectMeta{Name: "old"}, 201 Spec: kube.ProwJobSpec{ 202 Type: kube.PresubmitJob, 203 Job: "j1", 204 Refs: kube.Refs{Pulls: []kube.Pull{{}}}, 205 }, 206 Status: kube.ProwJobStatus{ 207 StartTime: now.Add(-time.Hour), 208 }, 209 }, 210 { 211 Metadata: kube.ObjectMeta{Name: "older"}, 212 Spec: kube.ProwJobSpec{ 213 Type: kube.PresubmitJob, 214 Job: "j1", 215 Refs: kube.Refs{Pulls: []kube.Pull{{}}}, 216 }, 217 Status: kube.ProwJobStatus{ 218 StartTime: now.Add(-2 * time.Hour), 219 }, 220 }, 221 { 222 Metadata: kube.ObjectMeta{Name: "complete"}, 223 Spec: kube.ProwJobSpec{ 224 Type: kube.PresubmitJob, 225 Job: "j1", 226 Refs: kube.Refs{Pulls: []kube.Pull{{}}}, 227 }, 228 Status: kube.ProwJobStatus{ 229 StartTime: now.Add(-3 * time.Hour), 230 CompletionTime: now, 231 }, 232 }, 233 { 234 Metadata: kube.ObjectMeta{Name: "newest_j2"}, 235 Spec: kube.ProwJobSpec{ 236 Type: kube.PresubmitJob, 237 Job: "j2", 238 Refs: kube.Refs{Pulls: []kube.Pull{{}}}, 239 }, 240 Status: kube.ProwJobStatus{ 241 StartTime: now.Add(-time.Minute), 242 }, 243 }, 244 { 245 Metadata: kube.ObjectMeta{Name: "old_j2"}, 246 Spec: kube.ProwJobSpec{ 247 Type: kube.PresubmitJob, 248 Job: "j2", 249 Refs: kube.Refs{Pulls: []kube.Pull{{}}}, 250 }, 251 Status: kube.ProwJobStatus{ 252 StartTime: now.Add(-time.Hour), 253 }, 254 }, 255 { 256 Metadata: kube.ObjectMeta{Name: "old_j3"}, 257 Spec: kube.ProwJobSpec{ 258 Type: kube.PresubmitJob, 259 Job: "j3", 260 Refs: kube.Refs{Pulls: []kube.Pull{{}}}, 261 }, 262 Status: kube.ProwJobStatus{ 263 StartTime: now.Add(-time.Hour), 264 }, 265 }, 266 { 267 Metadata: kube.ObjectMeta{Name: "new_j3"}, 268 Spec: kube.ProwJobSpec{ 269 Type: kube.PresubmitJob, 270 Job: "j3", 271 Refs: kube.Refs{Pulls: []kube.Pull{{}}}, 272 }, 273 Status: kube.ProwJobStatus{ 274 StartTime: now.Add(-time.Minute), 275 }, 276 }, 277 }, 278 279 terminatedPJs: map[string]struct{}{ 280 "old": {}, "older": {}, "old_j2": {}, "old_j3": {}, 281 }, 282 }, 283 { 284 name: "should also terminate pods", 285 286 allowCancellations: true, 287 pjs: []kube.ProwJob{ 288 { 289 Metadata: kube.ObjectMeta{Name: "newest"}, 290 Spec: kube.ProwJobSpec{ 291 Type: kube.PresubmitJob, 292 Job: "j1", 293 Refs: kube.Refs{Pulls: []kube.Pull{{}}}, 294 }, 295 Status: kube.ProwJobStatus{ 296 StartTime: now.Add(-time.Minute), 297 }, 298 }, 299 { 300 Metadata: kube.ObjectMeta{Name: "old"}, 301 Spec: kube.ProwJobSpec{ 302 Type: kube.PresubmitJob, 303 Job: "j1", 304 Refs: kube.Refs{Pulls: []kube.Pull{{}}}, 305 }, 306 Status: kube.ProwJobStatus{ 307 StartTime: now.Add(-time.Hour), 308 }, 309 }, 310 }, 311 pm: map[string]kube.Pod{ 312 "newest": {Metadata: kube.ObjectMeta{Name: "newest"}}, 313 "old": {Metadata: kube.ObjectMeta{Name: "old"}}, 314 }, 315 316 terminatedPJs: map[string]struct{}{ 317 "old": {}, 318 }, 319 terminatedPods: map[string]struct{}{ 320 "old": {}, 321 }, 322 }, 323 } 324 325 for _, tc := range testcases { 326 var pods []kube.Pod 327 for _, pod := range tc.pm { 328 pods = append(pods, pod) 329 } 330 fkc := &fkc{pods: pods, prowjobs: tc.pjs} 331 fca := &fca{ 332 c: &config.Config{ 333 Plank: config.Plank{ 334 AllowCancellations: tc.allowCancellations, 335 }, 336 }, 337 } 338 c := Controller{kc: fkc, pkc: fkc, ca: fca} 339 340 if err := c.terminateDupes(fkc.prowjobs, tc.pm); err != nil { 341 t.Fatalf("Error terminating dupes: %v", err) 342 } 343 344 for terminatedName := range tc.terminatedPJs { 345 terminated := false 346 for _, pj := range fkc.prowjobs { 347 if pj.Metadata.Name == terminatedName && !pj.Complete() { 348 t.Errorf("expected prowjob %q to be terminated!", terminatedName) 349 } else { 350 terminated = true 351 } 352 } 353 if !terminated { 354 t.Errorf("expected prowjob %q to be terminated, got %+v", terminatedName, fkc.prowjobs) 355 } 356 } 357 for terminatedName := range tc.terminatedPods { 358 terminated := false 359 for _, deleted := range fkc.deletedPods { 360 if deleted.Metadata.Name == terminatedName { 361 terminated = true 362 } 363 } 364 if !terminated { 365 t.Errorf("expected pod %q to be terminated, got terminated: %v", terminatedName, fkc.deletedPods) 366 } 367 } 368 369 } 370 } 371 372 func handleTot(w http.ResponseWriter, r *http.Request) { 373 fmt.Fprint(w, "42") 374 } 375 376 func TestSyncNonPendingJobs(t *testing.T) { 377 var testcases = []struct { 378 name string 379 380 pj kube.ProwJob 381 pendingJobs map[string]int 382 maxConcurrency int 383 pods []kube.Pod 384 podErr error 385 386 expectedState kube.ProwJobState 387 expectedPodHasName bool 388 expectedNumPods int 389 expectedComplete bool 390 expectedCreatedPJs int 391 expectedReport bool 392 expectedURL string 393 expectedBuildID string 394 expectError bool 395 }{ 396 { 397 name: "completed prow job", 398 pj: kube.ProwJob{ 399 Status: kube.ProwJobStatus{ 400 CompletionTime: time.Now(), 401 State: kube.FailureState, 402 }, 403 }, 404 expectedState: kube.FailureState, 405 expectedComplete: true, 406 }, 407 { 408 name: "completed prow job, missing pod", 409 pj: kube.ProwJob{ 410 Status: kube.ProwJobStatus{ 411 CompletionTime: time.Now(), 412 State: kube.FailureState, 413 PodName: "boop-41", 414 }, 415 }, 416 expectedState: kube.FailureState, 417 expectedNumPods: 0, 418 expectedComplete: true, 419 }, 420 { 421 name: "start new pod", 422 pj: kube.ProwJob{ 423 Metadata: kube.ObjectMeta{ 424 Name: "blabla", 425 }, 426 Spec: kube.ProwJobSpec{ 427 Job: "boop", 428 Type: kube.PeriodicJob, 429 }, 430 Status: kube.ProwJobStatus{ 431 State: kube.TriggeredState, 432 }, 433 }, 434 expectedState: kube.PendingState, 435 expectedPodHasName: true, 436 expectedNumPods: 1, 437 expectedReport: true, 438 expectedURL: "blabla/pending", 439 }, 440 { 441 name: "pod with a max concurrency of 1", 442 pj: kube.ProwJob{ 443 Spec: kube.ProwJobSpec{ 444 Job: "same", 445 MaxConcurrency: 1, 446 }, 447 Status: kube.ProwJobStatus{ 448 State: kube.TriggeredState, 449 }, 450 }, 451 pendingJobs: map[string]int{ 452 "same": 1, 453 }, 454 pods: []kube.Pod{ 455 { 456 Metadata: kube.ObjectMeta{ 457 Name: "same-42", 458 }, 459 Status: kube.PodStatus{ 460 Phase: kube.PodRunning, 461 }, 462 }, 463 }, 464 expectedState: kube.TriggeredState, 465 expectedNumPods: 1, 466 }, 467 { 468 name: "do not exceed global maxconcurrency", 469 pj: kube.ProwJob{ 470 Metadata: kube.ObjectMeta{ 471 Name: "beer", 472 }, 473 Spec: kube.ProwJobSpec{ 474 Job: "same", 475 Type: kube.PeriodicJob, 476 }, 477 Status: kube.ProwJobStatus{ 478 State: kube.TriggeredState, 479 }, 480 }, 481 maxConcurrency: 20, 482 pendingJobs: map[string]int{"motherearth": 10, "allagash": 8, "krusovice": 2}, 483 expectedState: kube.TriggeredState, 484 }, 485 { 486 name: "global maxconcurrency allows new jobs when possible", 487 pj: kube.ProwJob{ 488 Metadata: kube.ObjectMeta{ 489 Name: "beer", 490 }, 491 Spec: kube.ProwJobSpec{ 492 Job: "same", 493 Type: kube.PeriodicJob, 494 }, 495 Status: kube.ProwJobStatus{ 496 State: kube.TriggeredState, 497 }, 498 }, 499 maxConcurrency: 21, 500 pendingJobs: map[string]int{"motherearth": 10, "allagash": 8, "krusovice": 2}, 501 expectedState: kube.PendingState, 502 expectedNumPods: 1, 503 expectedReport: true, 504 expectedURL: "beer/pending", 505 }, 506 { 507 name: "unprocessable prow job", 508 pj: kube.ProwJob{ 509 Spec: kube.ProwJobSpec{ 510 Job: "boop", 511 Type: kube.PeriodicJob, 512 }, 513 Status: kube.ProwJobStatus{ 514 State: kube.TriggeredState, 515 }, 516 }, 517 podErr: kube.NewUnprocessableEntityError(errors.New("no way jose")), 518 expectedState: kube.ErrorState, 519 expectedComplete: true, 520 expectedReport: true, 521 }, 522 { 523 name: "conflict error starting pod", 524 pj: kube.ProwJob{ 525 Spec: kube.ProwJobSpec{ 526 Job: "boop", 527 Type: kube.PeriodicJob, 528 }, 529 Status: kube.ProwJobStatus{ 530 State: kube.TriggeredState, 531 }, 532 }, 533 podErr: kube.NewConflictError(errors.New("no way jose")), 534 expectedState: kube.TriggeredState, 535 expectError: true, 536 }, 537 { 538 name: "unknown error starting pod", 539 pj: kube.ProwJob{ 540 Spec: kube.ProwJobSpec{ 541 Job: "boop", 542 Type: kube.PeriodicJob, 543 }, 544 Status: kube.ProwJobStatus{ 545 State: kube.TriggeredState, 546 }, 547 }, 548 podErr: errors.New("no way unknown jose"), 549 expectedState: kube.TriggeredState, 550 expectError: true, 551 }, 552 { 553 name: "running pod, failed prowjob update", 554 pj: kube.ProwJob{ 555 Metadata: kube.ObjectMeta{ 556 Name: "foo", 557 }, 558 Spec: kube.ProwJobSpec{ 559 Job: "boop", 560 Type: kube.PeriodicJob, 561 }, 562 Status: kube.ProwJobStatus{ 563 State: kube.TriggeredState, 564 }, 565 }, 566 pods: []kube.Pod{ 567 { 568 Metadata: kube.ObjectMeta{ 569 Name: "foo", 570 }, 571 Spec: kube.PodSpec{ 572 Containers: []kube.Container{ 573 { 574 Env: []kube.EnvVar{ 575 { 576 Name: "BUILD_NUMBER", 577 Value: "0987654321", 578 }, 579 }, 580 }, 581 }, 582 }, 583 Status: kube.PodStatus{ 584 Phase: kube.PodRunning, 585 }, 586 }, 587 }, 588 expectedState: kube.PendingState, 589 expectedNumPods: 1, 590 expectedReport: true, 591 expectedURL: "foo/pending", 592 expectedBuildID: "0987654321", 593 }, 594 } 595 for _, tc := range testcases { 596 totServ := httptest.NewServer(http.HandlerFunc(handleTot)) 597 defer totServ.Close() 598 pm := make(map[string]kube.Pod) 599 for i := range tc.pods { 600 pm[tc.pods[i].Metadata.Name] = tc.pods[i] 601 } 602 fc := &fkc{ 603 prowjobs: []kube.ProwJob{tc.pj}, 604 } 605 fpc := &fkc{ 606 pods: tc.pods, 607 err: tc.podErr, 608 } 609 c := Controller{ 610 kc: fc, 611 pkc: fpc, 612 ca: newFakeConfigAgent(t, tc.maxConcurrency), 613 totURL: totServ.URL, 614 pendingJobs: make(map[string]int), 615 } 616 if tc.pendingJobs != nil { 617 c.pendingJobs = tc.pendingJobs 618 } 619 620 reports := make(chan kube.ProwJob, 100) 621 if err := c.syncNonPendingJob(tc.pj, pm, reports); (err != nil) != tc.expectError { 622 if tc.expectError { 623 t.Errorf("for case %q expected an error, but got none", tc.name) 624 } else { 625 t.Errorf("for case %q got an unexpected error: %v", tc.name, err) 626 } 627 continue 628 } 629 close(reports) 630 631 actual := fc.prowjobs[0] 632 if actual.Status.State != tc.expectedState { 633 t.Errorf("for case %q got state %v", tc.name, actual.Status.State) 634 } 635 if (actual.Status.PodName == "") && tc.expectedPodHasName { 636 t.Errorf("for case %q got no pod name, expected one", tc.name) 637 } 638 if len(fpc.pods) != tc.expectedNumPods { 639 t.Errorf("for case %q got %d pods", tc.name, len(fpc.pods)) 640 } 641 if actual.Complete() != tc.expectedComplete { 642 t.Errorf("for case %q got wrong completion", tc.name) 643 } 644 if len(fc.prowjobs) != tc.expectedCreatedPJs+1 { 645 t.Errorf("for case %q got %d created prowjobs", tc.name, len(fc.prowjobs)-1) 646 } 647 if tc.expectedReport && len(reports) != 1 { 648 t.Errorf("for case %q wanted one report but got %d", tc.name, len(reports)) 649 } 650 if !tc.expectedReport && len(reports) != 0 { 651 t.Errorf("for case %q did not wany any reports but got %d", tc.name, len(reports)) 652 } 653 if tc.expectedReport { 654 r := <-reports 655 656 if got, want := r.Status.URL, tc.expectedURL; got != want { 657 t.Errorf("for case %q, report.Status.URL: got %q, want %q", tc.name, got, want) 658 } 659 if got, want := r.Status.BuildID, tc.expectedBuildID; want != "" && got != want { 660 t.Errorf("for case %q, report.Status.BuildID: got %q, want %q", tc.name, got, want) 661 } 662 } 663 } 664 } 665 666 func TestSyncPendingJob(t *testing.T) { 667 var testcases = []struct { 668 name string 669 670 pj kube.ProwJob 671 pods []kube.Pod 672 err error 673 674 expectedState kube.ProwJobState 675 expectedNumPods int 676 expectedComplete bool 677 expectedCreatedPJs int 678 expectedReport bool 679 expectedURL string 680 }{ 681 { 682 name: "reset when pod goes missing", 683 pj: kube.ProwJob{ 684 Metadata: kube.ObjectMeta{ 685 Name: "boop-41", 686 }, 687 Spec: kube.ProwJobSpec{ 688 Type: kube.PostsubmitJob, 689 }, 690 Status: kube.ProwJobStatus{ 691 State: kube.PendingState, 692 PodName: "boop-41", 693 }, 694 }, 695 expectedState: kube.PendingState, 696 expectedReport: true, 697 expectedNumPods: 1, 698 expectedURL: "boop-41/pending", 699 }, 700 { 701 name: "delete pod in unknown state", 702 pj: kube.ProwJob{ 703 Metadata: kube.ObjectMeta{ 704 Name: "boop-41", 705 }, 706 Status: kube.ProwJobStatus{ 707 State: kube.PendingState, 708 PodName: "boop-41", 709 }, 710 }, 711 pods: []kube.Pod{ 712 { 713 Metadata: kube.ObjectMeta{ 714 Name: "boop-41", 715 }, 716 Status: kube.PodStatus{ 717 Phase: kube.PodUnknown, 718 }, 719 }, 720 }, 721 expectedState: kube.PendingState, 722 expectedNumPods: 0, 723 }, 724 { 725 name: "succeeded pod", 726 pj: kube.ProwJob{ 727 Metadata: kube.ObjectMeta{ 728 Name: "boop-42", 729 }, 730 Spec: kube.ProwJobSpec{ 731 Type: kube.BatchJob, 732 RunAfterSuccess: []kube.ProwJobSpec{{}}, 733 }, 734 Status: kube.ProwJobStatus{ 735 State: kube.PendingState, 736 PodName: "boop-42", 737 }, 738 }, 739 pods: []kube.Pod{ 740 { 741 Metadata: kube.ObjectMeta{ 742 Name: "boop-42", 743 }, 744 Status: kube.PodStatus{ 745 Phase: kube.PodSucceeded, 746 }, 747 }, 748 }, 749 expectedComplete: true, 750 expectedState: kube.SuccessState, 751 expectedNumPods: 1, 752 expectedCreatedPJs: 1, 753 expectedReport: true, 754 expectedURL: "boop-42/success", 755 }, 756 { 757 name: "failed pod", 758 pj: kube.ProwJob{ 759 Metadata: kube.ObjectMeta{ 760 Name: "boop-42", 761 }, 762 Spec: kube.ProwJobSpec{ 763 Type: kube.PresubmitJob, 764 Refs: kube.Refs{ 765 Org: "kubernetes", Repo: "kubernetes", 766 BaseRef: "baseref", BaseSHA: "basesha", 767 Pulls: []kube.Pull{{Number: 100, Author: "me", SHA: "sha"}}, 768 }, 769 RunAfterSuccess: []kube.ProwJobSpec{{}}, 770 }, 771 Status: kube.ProwJobStatus{ 772 State: kube.PendingState, 773 PodName: "boop-42", 774 }, 775 }, 776 pods: []kube.Pod{ 777 { 778 Metadata: kube.ObjectMeta{ 779 Name: "boop-42", 780 }, 781 Status: kube.PodStatus{ 782 Phase: kube.PodFailed, 783 }, 784 }, 785 }, 786 expectedComplete: true, 787 expectedState: kube.FailureState, 788 expectedNumPods: 1, 789 expectedReport: true, 790 expectedURL: "boop-42/failure", 791 }, 792 { 793 name: "delete evicted pod", 794 pj: kube.ProwJob{ 795 Metadata: kube.ObjectMeta{ 796 Name: "boop-42", 797 }, 798 Status: kube.ProwJobStatus{ 799 State: kube.PendingState, 800 PodName: "boop-42", 801 }, 802 }, 803 pods: []kube.Pod{ 804 { 805 Metadata: kube.ObjectMeta{ 806 Name: "boop-42", 807 }, 808 Status: kube.PodStatus{ 809 Phase: kube.PodFailed, 810 Reason: kube.Evicted, 811 }, 812 }, 813 }, 814 expectedComplete: false, 815 expectedState: kube.PendingState, 816 expectedNumPods: 0, 817 }, 818 { 819 name: "running pod", 820 pj: kube.ProwJob{ 821 Metadata: kube.ObjectMeta{ 822 Name: "boop-42", 823 }, 824 Spec: kube.ProwJobSpec{ 825 RunAfterSuccess: []kube.ProwJobSpec{{}}, 826 }, 827 Status: kube.ProwJobStatus{ 828 State: kube.PendingState, 829 PodName: "boop-42", 830 }, 831 }, 832 pods: []kube.Pod{ 833 { 834 Metadata: kube.ObjectMeta{ 835 Name: "boop-42", 836 }, 837 Status: kube.PodStatus{ 838 Phase: kube.PodRunning, 839 }, 840 }, 841 }, 842 expectedState: kube.PendingState, 843 expectedNumPods: 1, 844 }, 845 { 846 name: "pod changes url status", 847 pj: kube.ProwJob{ 848 Metadata: kube.ObjectMeta{ 849 Name: "boop-42", 850 }, 851 Spec: kube.ProwJobSpec{ 852 RunAfterSuccess: []kube.ProwJobSpec{{}}, 853 }, 854 Status: kube.ProwJobStatus{ 855 State: kube.PendingState, 856 PodName: "boop-42", 857 URL: "boop-42/pending", 858 }, 859 }, 860 pods: []kube.Pod{ 861 { 862 Metadata: kube.ObjectMeta{ 863 Name: "boop-42", 864 }, 865 Status: kube.PodStatus{ 866 Phase: kube.PodSucceeded, 867 }, 868 }, 869 }, 870 expectedComplete: true, 871 expectedState: kube.SuccessState, 872 expectedNumPods: 1, 873 expectedCreatedPJs: 1, 874 expectedReport: true, 875 expectedURL: "boop-42/success", 876 }, 877 { 878 name: "unprocessable prow job", 879 pj: kube.ProwJob{ 880 Metadata: kube.ObjectMeta{ 881 Name: "jose", 882 }, 883 Spec: kube.ProwJobSpec{ 884 Job: "boop", 885 Type: kube.PostsubmitJob, 886 }, 887 Status: kube.ProwJobStatus{ 888 State: kube.PendingState, 889 }, 890 }, 891 err: kube.NewUnprocessableEntityError(errors.New("no way jose")), 892 expectedState: kube.ErrorState, 893 expectedComplete: true, 894 expectedReport: true, 895 expectedURL: "jose/error", 896 }, 897 } 898 for _, tc := range testcases { 899 totServ := httptest.NewServer(http.HandlerFunc(handleTot)) 900 defer totServ.Close() 901 pm := make(map[string]kube.Pod) 902 for i := range tc.pods { 903 pm[tc.pods[i].Metadata.Name] = tc.pods[i] 904 } 905 fc := &fkc{ 906 prowjobs: []kube.ProwJob{tc.pj}, 907 } 908 fpc := &fkc{ 909 pods: tc.pods, 910 err: tc.err, 911 } 912 c := Controller{ 913 kc: fc, 914 pkc: fpc, 915 ca: newFakeConfigAgent(t, 0), 916 totURL: totServ.URL, 917 pendingJobs: make(map[string]int), 918 } 919 920 reports := make(chan kube.ProwJob, 100) 921 if err := c.syncPendingJob(tc.pj, pm, reports); err != nil { 922 t.Errorf("for case %q got an error: %v", tc.name, err) 923 continue 924 } 925 close(reports) 926 927 actual := fc.prowjobs[0] 928 if actual.Status.State != tc.expectedState { 929 t.Errorf("for case %q got state %v", tc.name, actual.Status.State) 930 } 931 if len(fpc.pods) != tc.expectedNumPods { 932 t.Errorf("for case %q got %d pods, expected %d", tc.name, len(fpc.pods), tc.expectedNumPods) 933 } 934 if actual.Complete() != tc.expectedComplete { 935 t.Errorf("for case %q got wrong completion", tc.name) 936 } 937 if len(fc.prowjobs) != tc.expectedCreatedPJs+1 { 938 t.Errorf("for case %q got %d created prowjobs", tc.name, len(fc.prowjobs)-1) 939 } 940 if tc.expectedReport && len(reports) != 1 { 941 t.Errorf("for case %q wanted one report but got %d", tc.name, len(reports)) 942 } 943 if !tc.expectedReport && len(reports) != 0 { 944 t.Errorf("for case %q did not wany any reports but got %d", tc.name, len(reports)) 945 } 946 if tc.expectedReport { 947 r := <-reports 948 949 if got, want := r.Status.URL, tc.expectedURL; got != want { 950 t.Errorf("for case %q, report.Status.URL: got %q, want %q", tc.name, got, want) 951 } 952 } 953 } 954 } 955 956 // TestPeriodic walks through the happy path of a periodic job. 957 func TestPeriodic(t *testing.T) { 958 per := config.Periodic{ 959 Name: "ci-periodic-job", 960 Agent: "kubernetes", 961 Spec: &kube.PodSpec{ 962 Containers: []kube.Container{{}}, 963 }, 964 RunAfterSuccess: []config.Periodic{ 965 { 966 Name: "ci-periodic-job-2", 967 Agent: "kubernetes", 968 Spec: &kube.PodSpec{}, 969 }, 970 }, 971 } 972 973 totServ := httptest.NewServer(http.HandlerFunc(handleTot)) 974 defer totServ.Close() 975 fc := &fkc{ 976 prowjobs: []kube.ProwJob{pjutil.NewProwJob(pjutil.PeriodicSpec(per))}, 977 } 978 c := Controller{ 979 kc: fc, 980 pkc: fc, 981 ca: newFakeConfigAgent(t, 0), 982 totURL: totServ.URL, 983 pendingJobs: make(map[string]int), 984 lock: sync.RWMutex{}, 985 } 986 987 if err := c.Sync(); err != nil { 988 t.Fatalf("Error on first sync: %v", err) 989 } 990 if fc.prowjobs[0].Spec.PodSpec.Containers[0].Name != "" { 991 t.Fatal("Sync step updated the TPR spec.") 992 } 993 if len(fc.pods) != 1 { 994 t.Fatal("Didn't create pod on first sync.") 995 } 996 if len(fc.pods[0].Spec.Containers) != 1 { 997 t.Fatal("Wiped container list.") 998 } 999 if len(fc.pods[0].Spec.Containers[0].Env) == 0 { 1000 t.Fatal("Container has no env set.") 1001 } 1002 if err := c.Sync(); err != nil { 1003 t.Fatalf("Error on second sync: %v", err) 1004 } 1005 if len(fc.pods) != 1 { 1006 t.Fatalf("Wrong number of pods after second sync: %d", len(fc.pods)) 1007 } 1008 fc.pods[0].Status.Phase = kube.PodSucceeded 1009 if err := c.Sync(); err != nil { 1010 t.Fatalf("Error on third sync: %v", err) 1011 } 1012 if !fc.prowjobs[0].Complete() { 1013 t.Fatal("Prow job didn't complete.") 1014 } 1015 if fc.prowjobs[0].Status.State != kube.SuccessState { 1016 t.Fatalf("Should be success: %v", fc.prowjobs[0].Status.State) 1017 } 1018 if len(fc.prowjobs) != 2 { 1019 t.Fatalf("Wrong number of prow jobs: %d", len(fc.prowjobs)) 1020 } 1021 if err := c.Sync(); err != nil { 1022 t.Fatalf("Error on fourth sync: %v", err) 1023 } 1024 } 1025 1026 func TestRunAfterSuccessCanRun(t *testing.T) { 1027 tests := []struct { 1028 name string 1029 1030 parent *kube.ProwJob 1031 child *kube.ProwJob 1032 1033 changes []github.PullRequestChange 1034 err error 1035 1036 expected bool 1037 }{ 1038 { 1039 name: "child does not require specific changes", 1040 parent: &kube.ProwJob{ 1041 Spec: kube.ProwJobSpec{ 1042 Job: "test-e2e", 1043 Type: kube.PresubmitJob, 1044 Refs: kube.Refs{ 1045 Org: "kubernetes", 1046 Repo: "kubernetes", 1047 Pulls: []kube.Pull{ 1048 {Number: 123}, 1049 }, 1050 }, 1051 }, 1052 }, 1053 child: &kube.ProwJob{ 1054 Spec: kube.ProwJobSpec{ 1055 Job: "push-image", 1056 }, 1057 }, 1058 expected: true, 1059 }, 1060 { 1061 name: "child requires specific changes that are done", 1062 parent: &kube.ProwJob{ 1063 Spec: kube.ProwJobSpec{ 1064 Job: "test-bazel-build", 1065 Type: kube.PresubmitJob, 1066 Refs: kube.Refs{ 1067 Org: "kubernetes", 1068 Repo: "kubernetes", 1069 Pulls: []kube.Pull{ 1070 {Number: 123}, 1071 }, 1072 }, 1073 }, 1074 }, 1075 child: &kube.ProwJob{ 1076 Spec: kube.ProwJobSpec{ 1077 Job: "test-kubeadm-cloud", 1078 }, 1079 }, 1080 changes: []github.PullRequestChange{ 1081 {Filename: "cmd/kubeadm/kubeadm.go"}, 1082 {Filename: "vendor/BUILD"}, 1083 {Filename: ".gitatrributes"}, 1084 }, 1085 expected: true, 1086 }, 1087 { 1088 name: "child requires specific changes that are not done", 1089 parent: &kube.ProwJob{ 1090 Spec: kube.ProwJobSpec{ 1091 Job: "test-bazel-build", 1092 Type: kube.PresubmitJob, 1093 Refs: kube.Refs{ 1094 Org: "kubernetes", 1095 Repo: "kubernetes", 1096 Pulls: []kube.Pull{ 1097 {Number: 123}, 1098 }, 1099 }, 1100 }, 1101 }, 1102 child: &kube.ProwJob{ 1103 Spec: kube.ProwJobSpec{ 1104 Job: "test-kubeadm-cloud", 1105 }, 1106 }, 1107 changes: []github.PullRequestChange{ 1108 {Filename: "vendor/BUILD"}, 1109 {Filename: ".gitatrributes"}, 1110 }, 1111 expected: false, 1112 }, 1113 } 1114 1115 for _, test := range tests { 1116 t.Logf("scenario %q", test.name) 1117 1118 fakeGH := &fghc{ 1119 changes: test.changes, 1120 err: test.err, 1121 } 1122 1123 got := RunAfterSuccessCanRun(test.parent, test.child, newFakeConfigAgent(t, 0), fakeGH) 1124 if got != test.expected { 1125 t.Errorf("expected to run: %t, got: %t", test.expected, got) 1126 } 1127 } 1128 } 1129 1130 func TestMaxConcurrencyWithNewlyTriggeredJobs(t *testing.T) { 1131 tests := []struct { 1132 name string 1133 pjs []kube.ProwJob 1134 pendingJobs map[string]int 1135 expectedPods int 1136 }{ 1137 { 1138 name: "avoid starting a triggered job", 1139 pjs: []kube.ProwJob{ 1140 { 1141 Spec: kube.ProwJobSpec{ 1142 Job: "test-bazel-build", 1143 Type: kube.PostsubmitJob, 1144 MaxConcurrency: 1, 1145 }, 1146 Status: kube.ProwJobStatus{ 1147 State: kube.TriggeredState, 1148 }, 1149 }, 1150 { 1151 Spec: kube.ProwJobSpec{ 1152 Job: "test-bazel-build", 1153 Type: kube.PostsubmitJob, 1154 MaxConcurrency: 1, 1155 }, 1156 Status: kube.ProwJobStatus{ 1157 State: kube.TriggeredState, 1158 }, 1159 }, 1160 }, 1161 pendingJobs: make(map[string]int), 1162 expectedPods: 1, 1163 }, 1164 { 1165 name: "both triggered jobs can start", 1166 pjs: []kube.ProwJob{ 1167 { 1168 Spec: kube.ProwJobSpec{ 1169 Job: "test-bazel-build", 1170 Type: kube.PostsubmitJob, 1171 MaxConcurrency: 2, 1172 }, 1173 Status: kube.ProwJobStatus{ 1174 State: kube.TriggeredState, 1175 }, 1176 }, 1177 { 1178 Spec: kube.ProwJobSpec{ 1179 Job: "test-bazel-build", 1180 Type: kube.PostsubmitJob, 1181 MaxConcurrency: 2, 1182 }, 1183 Status: kube.ProwJobStatus{ 1184 State: kube.TriggeredState, 1185 }, 1186 }, 1187 }, 1188 pendingJobs: make(map[string]int), 1189 expectedPods: 2, 1190 }, 1191 { 1192 name: "no triggered job can start", 1193 pjs: []kube.ProwJob{ 1194 { 1195 Spec: kube.ProwJobSpec{ 1196 Job: "test-bazel-build", 1197 Type: kube.PostsubmitJob, 1198 MaxConcurrency: 5, 1199 }, 1200 Status: kube.ProwJobStatus{ 1201 State: kube.TriggeredState, 1202 }, 1203 }, 1204 { 1205 Spec: kube.ProwJobSpec{ 1206 Job: "test-bazel-build", 1207 Type: kube.PostsubmitJob, 1208 MaxConcurrency: 5, 1209 }, 1210 Status: kube.ProwJobStatus{ 1211 State: kube.TriggeredState, 1212 }, 1213 }, 1214 }, 1215 pendingJobs: map[string]int{"test-bazel-build": 5}, 1216 expectedPods: 0, 1217 }, 1218 } 1219 1220 for _, test := range tests { 1221 jobs := make(chan kube.ProwJob, len(test.pjs)) 1222 for _, pj := range test.pjs { 1223 jobs <- pj 1224 } 1225 close(jobs) 1226 1227 fc := &fkc{ 1228 prowjobs: test.pjs, 1229 } 1230 fpc := &fkc{} 1231 n, err := snowflake.NewNode(1) 1232 if err != nil { 1233 t.Fatalf("unexpected error: %v", err) 1234 } 1235 c := Controller{ 1236 kc: fc, 1237 pkc: fpc, 1238 ca: newFakeConfigAgent(t, 0), 1239 node: n, 1240 pendingJobs: test.pendingJobs, 1241 } 1242 1243 reports := make(chan kube.ProwJob, len(test.pjs)) 1244 errors := make(chan error, len(test.pjs)) 1245 pm := make(map[string]kube.Pod) 1246 1247 syncProwJobs(c.syncNonPendingJob, jobs, reports, errors, pm) 1248 if len(fpc.pods) != test.expectedPods { 1249 t.Errorf("expected pods: %d, got: %d", test.expectedPods, len(fpc.pods)) 1250 } 1251 } 1252 }