github.com/yrj2011/jx-test-infra@v0.0.0-20190529031832-7a2065ee98eb/prow/plank/controller_test.go (about) 1 /* 2 Copyright 2017 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package plank 18 19 import ( 20 "errors" 21 "fmt" 22 "net/http" 23 "net/http/httptest" 24 "sync" 25 "testing" 26 "text/template" 27 "time" 28 29 "github.com/bwmarrin/snowflake" 30 "github.com/sirupsen/logrus" 31 "k8s.io/api/core/v1" 32 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 33 34 "k8s.io/test-infra/prow/config" 35 "k8s.io/test-infra/prow/github" 36 "k8s.io/test-infra/prow/kube" 37 "k8s.io/test-infra/prow/pjutil" 38 ) 39 40 type fca struct { 41 sync.Mutex 42 c *config.Config 43 } 44 45 const ( 46 podPendingTimeout = time.Hour 47 ) 48 49 func newFakeConfigAgent(t *testing.T, maxConcurrency int) *fca { 50 presubmits := []config.Presubmit{ 51 { 52 Name: "test-bazel-build", 53 RunAfterSuccess: []config.Presubmit{ 54 { 55 Name: "test-kubeadm-cloud", 56 RunIfChanged: "^(cmd/kubeadm|build/debs).*$", 57 }, 58 }, 59 }, 60 { 61 Name: "test-e2e", 62 RunAfterSuccess: []config.Presubmit{ 63 { 64 Name: "push-image", 65 }, 66 }, 67 }, 68 { 69 Name: "test-bazel-test", 70 }, 71 } 72 if err := config.SetPresubmitRegexes(presubmits); err != nil { 73 t.Fatal(err) 74 } 75 presubmitMap := map[string][]config.Presubmit{ 76 "kubernetes/kubernetes": presubmits, 77 } 78 79 return &fca{ 80 c: &config.Config{ 81 ProwConfig: config.ProwConfig{ 82 Plank: config.Plank{ 83 Controller: config.Controller{ 84 JobURLTemplate: template.Must(template.New("test").Parse("{{.ObjectMeta.Name}}/{{.Status.State}}")), 85 MaxConcurrency: maxConcurrency, 86 MaxGoroutines: 20, 87 }, 88 PodPendingTimeout: podPendingTimeout, 89 }, 90 }, 91 JobConfig: config.JobConfig{ 92 Presubmits: presubmitMap, 93 }, 94 }, 95 } 96 } 97 98 func (f *fca) Config() *config.Config { 99 f.Lock() 100 defer f.Unlock() 101 return f.c 102 } 103 104 type fkc struct { 105 sync.Mutex 106 prowjobs []kube.ProwJob 107 pods []kube.Pod 108 deletedPods []kube.Pod 109 err error 110 } 111 112 func (f *fkc) CreateProwJob(pj kube.ProwJob) (kube.ProwJob, error) { 113 f.Lock() 114 defer f.Unlock() 115 f.prowjobs = append(f.prowjobs, pj) 116 return pj, nil 117 } 118 119 func (f *fkc) ListProwJobs(selector string) ([]kube.ProwJob, error) { 120 f.Lock() 121 defer f.Unlock() 122 return f.prowjobs, nil 123 } 124 125 func (f *fkc) ReplaceProwJob(name string, job kube.ProwJob) (kube.ProwJob, error) { 126 f.Lock() 127 defer f.Unlock() 128 for i := range f.prowjobs { 129 if f.prowjobs[i].ObjectMeta.Name == name { 130 f.prowjobs[i] = job 131 return job, nil 132 } 133 } 134 return kube.ProwJob{}, fmt.Errorf("did not find prowjob %s", name) 135 } 136 137 func (f *fkc) CreatePod(pod kube.Pod) (kube.Pod, error) { 138 f.Lock() 139 defer f.Unlock() 140 if f.err != nil { 141 return kube.Pod{}, f.err 142 } 143 f.pods = append(f.pods, pod) 144 return pod, nil 145 } 146 147 func (f *fkc) ListPods(selector string) ([]kube.Pod, error) { 148 f.Lock() 149 defer f.Unlock() 150 return f.pods, nil 151 } 152 153 func (f *fkc) DeletePod(name string) error { 154 f.Lock() 155 defer f.Unlock() 156 for i := range f.pods { 157 if f.pods[i].ObjectMeta.Name == name { 158 f.deletedPods = append(f.deletedPods, f.pods[i]) 159 f.pods = append(f.pods[:i], f.pods[i+1:]...) 160 return nil 161 } 162 } 163 return fmt.Errorf("did not find pod %s", name) 164 } 165 166 type fghc struct { 167 sync.Mutex 168 changes []github.PullRequestChange 169 err error 170 } 171 172 func (f *fghc) GetPullRequestChanges(org, repo string, number int) ([]github.PullRequestChange, error) { 173 f.Lock() 174 defer f.Unlock() 175 return f.changes, f.err 176 } 177 178 func (f *fghc) BotName() (string, error) { return "bot", nil } 179 func (f *fghc) CreateStatus(org, repo, ref string, s github.Status) error { return nil } 180 func (f *fghc) ListIssueComments(org, repo string, number int) ([]github.IssueComment, error) { 181 return nil, nil 182 } 183 func (f *fghc) CreateComment(org, repo string, number int, comment string) error { return nil } 184 func (f *fghc) DeleteComment(org, repo string, ID int) error { return nil } 185 func (f *fghc) EditComment(org, repo string, ID int, comment string) error { return nil } 186 187 func TestTerminateDupes(t *testing.T) { 188 now := time.Now() 189 nowFn := func() *metav1.Time { 190 reallyNow := metav1.NewTime(now) 191 return &reallyNow 192 } 193 var testcases = []struct { 194 name string 195 196 allowCancellations bool 197 pjs []kube.ProwJob 198 pm map[string]kube.Pod 199 200 terminatedPJs map[string]struct{} 201 terminatedPods map[string]struct{} 202 }{ 203 { 204 name: "terminate all duplicates", 205 206 pjs: []kube.ProwJob{ 207 { 208 ObjectMeta: metav1.ObjectMeta{Name: "newest"}, 209 Spec: kube.ProwJobSpec{ 210 Type: kube.PresubmitJob, 211 Job: "j1", 212 Refs: &kube.Refs{Pulls: []kube.Pull{{}}}, 213 }, 214 Status: kube.ProwJobStatus{ 215 StartTime: metav1.NewTime(now.Add(-time.Minute)), 216 }, 217 }, 218 { 219 ObjectMeta: metav1.ObjectMeta{Name: "old"}, 220 Spec: kube.ProwJobSpec{ 221 Type: kube.PresubmitJob, 222 Job: "j1", 223 Refs: &kube.Refs{Pulls: []kube.Pull{{}}}, 224 }, 225 Status: kube.ProwJobStatus{ 226 StartTime: metav1.NewTime(now.Add(-time.Hour)), 227 }, 228 }, 229 { 230 ObjectMeta: metav1.ObjectMeta{Name: "older"}, 231 Spec: kube.ProwJobSpec{ 232 Type: kube.PresubmitJob, 233 Job: "j1", 234 Refs: &kube.Refs{Pulls: []kube.Pull{{}}}, 235 }, 236 Status: kube.ProwJobStatus{ 237 StartTime: metav1.NewTime(now.Add(-2 * time.Hour)), 238 }, 239 }, 240 { 241 ObjectMeta: metav1.ObjectMeta{Name: "complete"}, 242 Spec: kube.ProwJobSpec{ 243 Type: kube.PresubmitJob, 244 Job: "j1", 245 Refs: &kube.Refs{Pulls: []kube.Pull{{}}}, 246 }, 247 Status: kube.ProwJobStatus{ 248 StartTime: metav1.NewTime(now.Add(-3 * time.Hour)), 249 CompletionTime: nowFn(), 250 }, 251 }, 252 { 253 ObjectMeta: metav1.ObjectMeta{Name: "newest_j2"}, 254 Spec: kube.ProwJobSpec{ 255 Type: kube.PresubmitJob, 256 Job: "j2", 257 Refs: &kube.Refs{Pulls: []kube.Pull{{}}}, 258 }, 259 Status: kube.ProwJobStatus{ 260 StartTime: metav1.NewTime(now.Add(-time.Minute)), 261 }, 262 }, 263 { 264 ObjectMeta: metav1.ObjectMeta{Name: "old_j2"}, 265 Spec: kube.ProwJobSpec{ 266 Type: kube.PresubmitJob, 267 Job: "j2", 268 Refs: &kube.Refs{Pulls: []kube.Pull{{}}}, 269 }, 270 Status: kube.ProwJobStatus{ 271 StartTime: metav1.NewTime(now.Add(-time.Hour)), 272 }, 273 }, 274 { 275 ObjectMeta: metav1.ObjectMeta{Name: "old_j3"}, 276 Spec: kube.ProwJobSpec{ 277 Type: kube.PresubmitJob, 278 Job: "j3", 279 Refs: &kube.Refs{Pulls: []kube.Pull{{}}}, 280 }, 281 Status: kube.ProwJobStatus{ 282 StartTime: metav1.NewTime(now.Add(-time.Hour)), 283 }, 284 }, 285 { 286 ObjectMeta: metav1.ObjectMeta{Name: "new_j3"}, 287 Spec: kube.ProwJobSpec{ 288 Type: kube.PresubmitJob, 289 Job: "j3", 290 Refs: &kube.Refs{Pulls: []kube.Pull{{}}}, 291 }, 292 Status: kube.ProwJobStatus{ 293 StartTime: metav1.NewTime(now.Add(-time.Minute)), 294 }, 295 }, 296 }, 297 298 terminatedPJs: map[string]struct{}{ 299 "old": {}, "older": {}, "old_j2": {}, "old_j3": {}, 300 }, 301 }, 302 { 303 name: "should also terminate pods", 304 305 allowCancellations: true, 306 pjs: []kube.ProwJob{ 307 { 308 ObjectMeta: metav1.ObjectMeta{Name: "newest"}, 309 Spec: kube.ProwJobSpec{ 310 Type: kube.PresubmitJob, 311 Job: "j1", 312 Refs: &kube.Refs{Pulls: []kube.Pull{{}}}, 313 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 314 }, 315 Status: kube.ProwJobStatus{ 316 StartTime: metav1.NewTime(now.Add(-time.Minute)), 317 }, 318 }, 319 { 320 ObjectMeta: metav1.ObjectMeta{Name: "old"}, 321 Spec: kube.ProwJobSpec{ 322 Type: kube.PresubmitJob, 323 Job: "j1", 324 Refs: &kube.Refs{Pulls: []kube.Pull{{}}}, 325 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 326 }, 327 Status: kube.ProwJobStatus{ 328 StartTime: metav1.NewTime(now.Add(-time.Hour)), 329 }, 330 }, 331 }, 332 pm: map[string]kube.Pod{ 333 "newest": {ObjectMeta: metav1.ObjectMeta{Name: "newest"}}, 334 "old": {ObjectMeta: metav1.ObjectMeta{Name: "old"}}, 335 }, 336 337 terminatedPJs: map[string]struct{}{ 338 "old": {}, 339 }, 340 terminatedPods: map[string]struct{}{ 341 "old": {}, 342 }, 343 }, 344 } 345 346 for _, tc := range testcases { 347 var pods []kube.Pod 348 for _, pod := range tc.pm { 349 pods = append(pods, pod) 350 } 351 fkc := &fkc{pods: pods, prowjobs: tc.pjs} 352 fca := &fca{ 353 c: &config.Config{ 354 ProwConfig: config.ProwConfig{ 355 Plank: config.Plank{ 356 Controller: config.Controller{ 357 AllowCancellations: tc.allowCancellations, 358 }, 359 }, 360 }, 361 }, 362 } 363 c := Controller{ 364 kc: fkc, 365 pkcs: map[string]kubeClient{kube.DefaultClusterAlias: fkc}, 366 log: logrus.NewEntry(logrus.StandardLogger()), 367 ca: fca, 368 } 369 370 if err := c.terminateDupes(fkc.prowjobs, tc.pm); err != nil { 371 t.Fatalf("Error terminating dupes: %v", err) 372 } 373 374 for terminatedName := range tc.terminatedPJs { 375 terminated := false 376 for _, pj := range fkc.prowjobs { 377 if pj.ObjectMeta.Name == terminatedName && !pj.Complete() { 378 t.Errorf("expected prowjob %q to be terminated!", terminatedName) 379 } else { 380 terminated = true 381 } 382 } 383 if !terminated { 384 t.Errorf("expected prowjob %q to be terminated, got %+v", terminatedName, fkc.prowjobs) 385 } 386 } 387 for terminatedName := range tc.terminatedPods { 388 terminated := false 389 for _, deleted := range fkc.deletedPods { 390 if deleted.ObjectMeta.Name == terminatedName { 391 terminated = true 392 } 393 } 394 if !terminated { 395 t.Errorf("expected pod %q to be terminated, got terminated: %v", terminatedName, fkc.deletedPods) 396 } 397 } 398 399 } 400 } 401 402 func handleTot(w http.ResponseWriter, r *http.Request) { 403 fmt.Fprint(w, "42") 404 } 405 406 func TestSyncTriggeredJobs(t *testing.T) { 407 var testcases = []struct { 408 name string 409 410 pj kube.ProwJob 411 pendingJobs map[string]int 412 maxConcurrency int 413 pods map[string][]kube.Pod 414 podErr error 415 416 expectedState kube.ProwJobState 417 expectedPodHasName bool 418 expectedNumPods map[string]int 419 expectedComplete bool 420 expectedCreatedPJs int 421 expectedReport bool 422 expectedURL string 423 expectedBuildID string 424 expectError bool 425 }{ 426 { 427 name: "start new pod", 428 pj: kube.ProwJob{ 429 ObjectMeta: metav1.ObjectMeta{ 430 Name: "blabla", 431 }, 432 Spec: kube.ProwJobSpec{ 433 Job: "boop", 434 Type: kube.PeriodicJob, 435 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 436 }, 437 Status: kube.ProwJobStatus{ 438 State: kube.TriggeredState, 439 }, 440 }, 441 pods: map[string][]kube.Pod{"default": {}}, 442 expectedState: kube.PendingState, 443 expectedPodHasName: true, 444 expectedNumPods: map[string]int{"default": 1}, 445 expectedReport: true, 446 expectedURL: "blabla/pending", 447 }, 448 { 449 name: "pod with a max concurrency of 1", 450 pj: kube.ProwJob{ 451 Spec: kube.ProwJobSpec{ 452 Job: "same", 453 Type: kube.PeriodicJob, 454 MaxConcurrency: 1, 455 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 456 }, 457 Status: kube.ProwJobStatus{ 458 State: kube.TriggeredState, 459 }, 460 }, 461 pendingJobs: map[string]int{ 462 "same": 1, 463 }, 464 pods: map[string][]kube.Pod{ 465 "default": { 466 { 467 ObjectMeta: metav1.ObjectMeta{ 468 Name: "same-42", 469 }, 470 Status: v1.PodStatus{ 471 Phase: kube.PodRunning, 472 }, 473 }, 474 }, 475 }, 476 expectedState: kube.TriggeredState, 477 expectedNumPods: map[string]int{"default": 1}, 478 }, 479 { 480 name: "trusted pod with a max concurrency of 1", 481 pj: kube.ProwJob{ 482 Spec: kube.ProwJobSpec{ 483 Job: "same", 484 Type: kube.PeriodicJob, 485 Cluster: "trusted", 486 MaxConcurrency: 1, 487 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 488 }, 489 Status: kube.ProwJobStatus{ 490 State: kube.TriggeredState, 491 }, 492 }, 493 pendingJobs: map[string]int{ 494 "same": 1, 495 }, 496 pods: map[string][]kube.Pod{ 497 "trusted": { 498 { 499 ObjectMeta: metav1.ObjectMeta{ 500 Name: "same-42", 501 }, 502 Status: kube.PodStatus{ 503 Phase: kube.PodRunning, 504 }, 505 }, 506 }, 507 }, 508 expectedState: kube.TriggeredState, 509 expectedNumPods: map[string]int{"trusted": 1}, 510 }, 511 { 512 name: "trusted pod with a max concurrency of 1 (can start)", 513 pj: kube.ProwJob{ 514 ObjectMeta: metav1.ObjectMeta{ 515 Name: "some", 516 }, 517 Spec: kube.ProwJobSpec{ 518 Job: "some", 519 Type: kube.PeriodicJob, 520 Cluster: "trusted", 521 MaxConcurrency: 1, 522 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 523 }, 524 Status: kube.ProwJobStatus{ 525 State: kube.TriggeredState, 526 }, 527 }, 528 pods: map[string][]kube.Pod{ 529 "default": { 530 { 531 ObjectMeta: metav1.ObjectMeta{ 532 Name: "other-42", 533 }, 534 Status: kube.PodStatus{ 535 Phase: kube.PodRunning, 536 }, 537 }, 538 }, 539 "trusted": {}, 540 }, 541 expectedState: kube.PendingState, 542 expectedNumPods: map[string]int{"default": 1, "trusted": 1}, 543 expectedPodHasName: true, 544 expectedReport: true, 545 expectedURL: "some/pending", 546 }, 547 { 548 name: "do not exceed global maxconcurrency", 549 pj: kube.ProwJob{ 550 ObjectMeta: metav1.ObjectMeta{ 551 Name: "beer", 552 }, 553 Spec: kube.ProwJobSpec{ 554 Job: "same", 555 Type: kube.PeriodicJob, 556 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 557 }, 558 Status: kube.ProwJobStatus{ 559 State: kube.TriggeredState, 560 }, 561 }, 562 maxConcurrency: 20, 563 pendingJobs: map[string]int{"motherearth": 10, "allagash": 8, "krusovice": 2}, 564 expectedState: kube.TriggeredState, 565 }, 566 { 567 name: "global maxconcurrency allows new jobs when possible", 568 pj: kube.ProwJob{ 569 ObjectMeta: metav1.ObjectMeta{ 570 Name: "beer", 571 }, 572 Spec: kube.ProwJobSpec{ 573 Job: "same", 574 Type: kube.PeriodicJob, 575 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 576 }, 577 Status: kube.ProwJobStatus{ 578 State: kube.TriggeredState, 579 }, 580 }, 581 pods: map[string][]kube.Pod{"default": {}}, 582 maxConcurrency: 21, 583 pendingJobs: map[string]int{"motherearth": 10, "allagash": 8, "krusovice": 2}, 584 expectedState: kube.PendingState, 585 expectedNumPods: map[string]int{"default": 1}, 586 expectedReport: true, 587 expectedURL: "beer/pending", 588 }, 589 { 590 name: "unprocessable prow job", 591 pj: kube.ProwJob{ 592 Spec: kube.ProwJobSpec{ 593 Job: "boop", 594 Type: kube.PeriodicJob, 595 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 596 }, 597 Status: kube.ProwJobStatus{ 598 State: kube.TriggeredState, 599 }, 600 }, 601 pods: map[string][]kube.Pod{"default": {}}, 602 podErr: kube.NewUnprocessableEntityError(errors.New("no way jose")), 603 expectedState: kube.ErrorState, 604 expectedComplete: true, 605 expectedReport: true, 606 }, 607 { 608 name: "conflict error starting pod", 609 pj: kube.ProwJob{ 610 Spec: kube.ProwJobSpec{ 611 Job: "boop", 612 Type: kube.PeriodicJob, 613 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 614 }, 615 Status: kube.ProwJobStatus{ 616 State: kube.TriggeredState, 617 }, 618 }, 619 podErr: kube.NewConflictError(errors.New("no way jose")), 620 expectedState: kube.TriggeredState, 621 expectError: true, 622 }, 623 { 624 name: "unknown error starting pod", 625 pj: kube.ProwJob{ 626 Spec: kube.ProwJobSpec{ 627 Job: "boop", 628 Type: kube.PeriodicJob, 629 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 630 }, 631 Status: kube.ProwJobStatus{ 632 State: kube.TriggeredState, 633 }, 634 }, 635 podErr: errors.New("no way unknown jose"), 636 expectedState: kube.TriggeredState, 637 expectError: true, 638 }, 639 { 640 name: "running pod, failed prowjob update", 641 pj: kube.ProwJob{ 642 ObjectMeta: metav1.ObjectMeta{ 643 Name: "foo", 644 }, 645 Spec: kube.ProwJobSpec{ 646 Job: "boop", 647 Type: kube.PeriodicJob, 648 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 649 }, 650 Status: kube.ProwJobStatus{ 651 State: kube.TriggeredState, 652 }, 653 }, 654 pods: map[string][]kube.Pod{ 655 "default": { 656 { 657 ObjectMeta: metav1.ObjectMeta{ 658 Name: "foo", 659 }, 660 Spec: kube.PodSpec{ 661 Containers: []v1.Container{ 662 { 663 Env: []v1.EnvVar{ 664 { 665 Name: "BUILD_NUMBER", 666 Value: "0987654321", 667 }, 668 }, 669 }, 670 }, 671 }, 672 Status: kube.PodStatus{ 673 Phase: kube.PodRunning, 674 }, 675 }, 676 }, 677 }, 678 expectedState: kube.PendingState, 679 expectedNumPods: map[string]int{"default": 1}, 680 expectedReport: true, 681 expectedURL: "foo/pending", 682 expectedBuildID: "0987654321", 683 }, 684 } 685 for _, tc := range testcases { 686 totServ := httptest.NewServer(http.HandlerFunc(handleTot)) 687 defer totServ.Close() 688 pm := make(map[string]kube.Pod) 689 for _, pods := range tc.pods { 690 for i := range pods { 691 pm[pods[i].ObjectMeta.Name] = pods[i] 692 } 693 } 694 fc := &fkc{ 695 prowjobs: []kube.ProwJob{tc.pj}, 696 } 697 pkcs := map[string]kubeClient{} 698 for alias, pods := range tc.pods { 699 pkcs[alias] = &fkc{ 700 pods: pods, 701 err: tc.podErr, 702 } 703 } 704 c := Controller{ 705 kc: fc, 706 pkcs: pkcs, 707 log: logrus.NewEntry(logrus.StandardLogger()), 708 ca: newFakeConfigAgent(t, tc.maxConcurrency), 709 totURL: totServ.URL, 710 pendingJobs: make(map[string]int), 711 } 712 if tc.pendingJobs != nil { 713 c.pendingJobs = tc.pendingJobs 714 } 715 716 reports := make(chan kube.ProwJob, 100) 717 if err := c.syncTriggeredJob(tc.pj, pm, reports); (err != nil) != tc.expectError { 718 if tc.expectError { 719 t.Errorf("for case %q expected an error, but got none", tc.name) 720 } else { 721 t.Errorf("for case %q got an unexpected error: %v", tc.name, err) 722 } 723 continue 724 } 725 close(reports) 726 727 actual := fc.prowjobs[0] 728 if actual.Status.State != tc.expectedState { 729 t.Errorf("for case %q got state %v", tc.name, actual.Status.State) 730 } 731 if (actual.Status.PodName == "") && tc.expectedPodHasName { 732 t.Errorf("for case %q got no pod name, expected one", tc.name) 733 } 734 for alias, expected := range tc.expectedNumPods { 735 if got := len(pkcs[alias].(*fkc).pods); got != expected { 736 t.Errorf("for case %q got %d pods for alias %q, but expected %d", tc.name, got, alias, expected) 737 } 738 } 739 if actual.Complete() != tc.expectedComplete { 740 t.Errorf("for case %q got wrong completion", tc.name) 741 } 742 if len(fc.prowjobs) != tc.expectedCreatedPJs+1 { 743 t.Errorf("for case %q got %d created prowjobs", tc.name, len(fc.prowjobs)-1) 744 } 745 if tc.expectedReport && len(reports) != 1 { 746 t.Errorf("for case %q wanted one report but got %d", tc.name, len(reports)) 747 } 748 if !tc.expectedReport && len(reports) != 0 { 749 t.Errorf("for case %q did not wany any reports but got %d", tc.name, len(reports)) 750 } 751 if tc.expectedReport { 752 r := <-reports 753 754 if got, want := r.Status.URL, tc.expectedURL; got != want { 755 t.Errorf("for case %q, report.Status.URL: got %q, want %q", tc.name, got, want) 756 } 757 if got, want := r.Status.BuildID, tc.expectedBuildID; want != "" && got != want { 758 t.Errorf("for case %q, report.Status.ProwJobID: got %q, want %q", tc.name, got, want) 759 } 760 } 761 } 762 } 763 764 func startTime(s time.Time) *metav1.Time { 765 start := metav1.NewTime(s) 766 return &start 767 } 768 769 func TestSyncPendingJob(t *testing.T) { 770 var testcases = []struct { 771 name string 772 773 pj kube.ProwJob 774 pods []kube.Pod 775 err error 776 777 expectedState kube.ProwJobState 778 expectedNumPods int 779 expectedComplete bool 780 expectedCreatedPJs int 781 expectedReport bool 782 expectedURL string 783 }{ 784 { 785 name: "reset when pod goes missing", 786 pj: kube.ProwJob{ 787 ObjectMeta: metav1.ObjectMeta{ 788 Name: "boop-41", 789 }, 790 Spec: kube.ProwJobSpec{ 791 Type: kube.PostsubmitJob, 792 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 793 }, 794 Status: kube.ProwJobStatus{ 795 State: kube.PendingState, 796 PodName: "boop-41", 797 }, 798 }, 799 expectedState: kube.PendingState, 800 expectedReport: true, 801 expectedNumPods: 1, 802 expectedURL: "boop-41/pending", 803 }, 804 { 805 name: "delete pod in unknown state", 806 pj: kube.ProwJob{ 807 ObjectMeta: metav1.ObjectMeta{ 808 Name: "boop-41", 809 }, 810 Spec: kube.ProwJobSpec{ 811 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 812 }, 813 Status: kube.ProwJobStatus{ 814 State: kube.PendingState, 815 PodName: "boop-41", 816 }, 817 }, 818 pods: []kube.Pod{ 819 { 820 ObjectMeta: metav1.ObjectMeta{ 821 Name: "boop-41", 822 }, 823 Status: kube.PodStatus{ 824 Phase: kube.PodUnknown, 825 }, 826 }, 827 }, 828 expectedState: kube.PendingState, 829 expectedNumPods: 0, 830 }, 831 { 832 name: "succeeded pod", 833 pj: kube.ProwJob{ 834 ObjectMeta: metav1.ObjectMeta{ 835 Name: "boop-42", 836 }, 837 Spec: kube.ProwJobSpec{ 838 Type: kube.BatchJob, 839 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 840 RunAfterSuccess: []kube.ProwJobSpec{{ 841 Job: "job-name", 842 Type: kube.PeriodicJob, 843 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 844 }}, 845 }, 846 Status: kube.ProwJobStatus{ 847 State: kube.PendingState, 848 PodName: "boop-42", 849 }, 850 }, 851 pods: []kube.Pod{ 852 { 853 ObjectMeta: metav1.ObjectMeta{ 854 Name: "boop-42", 855 }, 856 Status: kube.PodStatus{ 857 Phase: kube.PodSucceeded, 858 }, 859 }, 860 }, 861 expectedComplete: true, 862 expectedState: kube.SuccessState, 863 expectedNumPods: 1, 864 expectedCreatedPJs: 1, 865 expectedReport: true, 866 expectedURL: "boop-42/success", 867 }, 868 { 869 name: "failed pod", 870 pj: kube.ProwJob{ 871 ObjectMeta: metav1.ObjectMeta{ 872 Name: "boop-42", 873 }, 874 Spec: kube.ProwJobSpec{ 875 Type: kube.PresubmitJob, 876 Refs: &kube.Refs{ 877 Org: "kubernetes", Repo: "kubernetes", 878 BaseRef: "baseref", BaseSHA: "basesha", 879 Pulls: []kube.Pull{{Number: 100, Author: "me", SHA: "sha"}}, 880 }, 881 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 882 RunAfterSuccess: []kube.ProwJobSpec{{}}, 883 }, 884 Status: kube.ProwJobStatus{ 885 State: kube.PendingState, 886 PodName: "boop-42", 887 }, 888 }, 889 pods: []kube.Pod{ 890 { 891 ObjectMeta: metav1.ObjectMeta{ 892 Name: "boop-42", 893 }, 894 Status: kube.PodStatus{ 895 Phase: kube.PodFailed, 896 }, 897 }, 898 }, 899 expectedComplete: true, 900 expectedState: kube.FailureState, 901 expectedNumPods: 1, 902 expectedReport: true, 903 expectedURL: "boop-42/failure", 904 }, 905 { 906 name: "delete evicted pod", 907 pj: kube.ProwJob{ 908 ObjectMeta: metav1.ObjectMeta{ 909 Name: "boop-42", 910 }, 911 Spec: kube.ProwJobSpec{ 912 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 913 }, 914 Status: kube.ProwJobStatus{ 915 State: kube.PendingState, 916 PodName: "boop-42", 917 }, 918 }, 919 pods: []kube.Pod{ 920 { 921 ObjectMeta: metav1.ObjectMeta{ 922 Name: "boop-42", 923 }, 924 Status: kube.PodStatus{ 925 Phase: kube.PodFailed, 926 Reason: kube.Evicted, 927 }, 928 }, 929 }, 930 expectedComplete: false, 931 expectedState: kube.PendingState, 932 expectedNumPods: 0, 933 }, 934 { 935 name: "running pod", 936 pj: kube.ProwJob{ 937 ObjectMeta: metav1.ObjectMeta{ 938 Name: "boop-42", 939 }, 940 Spec: kube.ProwJobSpec{ 941 RunAfterSuccess: []kube.ProwJobSpec{{ 942 Job: "job-name", 943 Type: kube.PeriodicJob, 944 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 945 }}, 946 }, 947 Status: kube.ProwJobStatus{ 948 State: kube.PendingState, 949 PodName: "boop-42", 950 }, 951 }, 952 pods: []kube.Pod{ 953 { 954 ObjectMeta: metav1.ObjectMeta{ 955 Name: "boop-42", 956 }, 957 Status: kube.PodStatus{ 958 Phase: kube.PodRunning, 959 }, 960 }, 961 }, 962 expectedState: kube.PendingState, 963 expectedNumPods: 1, 964 }, 965 { 966 name: "pod changes url status", 967 pj: kube.ProwJob{ 968 ObjectMeta: metav1.ObjectMeta{ 969 Name: "boop-42", 970 }, 971 Spec: kube.ProwJobSpec{ 972 RunAfterSuccess: []kube.ProwJobSpec{{ 973 Job: "job-name", 974 Type: kube.PeriodicJob, 975 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 976 }}, 977 }, 978 Status: kube.ProwJobStatus{ 979 State: kube.PendingState, 980 PodName: "boop-42", 981 URL: "boop-42/pending", 982 }, 983 }, 984 pods: []kube.Pod{ 985 { 986 ObjectMeta: metav1.ObjectMeta{ 987 Name: "boop-42", 988 }, 989 Status: kube.PodStatus{ 990 Phase: kube.PodSucceeded, 991 }, 992 }, 993 }, 994 expectedComplete: true, 995 expectedState: kube.SuccessState, 996 expectedNumPods: 1, 997 expectedCreatedPJs: 1, 998 expectedReport: true, 999 expectedURL: "boop-42/success", 1000 }, 1001 { 1002 name: "unprocessable prow job", 1003 pj: kube.ProwJob{ 1004 ObjectMeta: metav1.ObjectMeta{ 1005 Name: "jose", 1006 }, 1007 Spec: kube.ProwJobSpec{ 1008 Job: "boop", 1009 Type: kube.PostsubmitJob, 1010 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 1011 }, 1012 Status: kube.ProwJobStatus{ 1013 State: kube.PendingState, 1014 }, 1015 }, 1016 err: kube.NewUnprocessableEntityError(errors.New("no way jose")), 1017 expectedState: kube.ErrorState, 1018 expectedComplete: true, 1019 expectedReport: true, 1020 expectedURL: "jose/error", 1021 }, 1022 { 1023 name: "stale pending prow job", 1024 pj: kube.ProwJob{ 1025 ObjectMeta: metav1.ObjectMeta{ 1026 Name: "nightmare", 1027 }, 1028 Spec: kube.ProwJobSpec{ 1029 RunAfterSuccess: []kube.ProwJobSpec{{ 1030 Job: "job-name", 1031 Type: kube.PeriodicJob, 1032 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 1033 }}, 1034 }, 1035 Status: kube.ProwJobStatus{ 1036 State: kube.PendingState, 1037 PodName: "nightmare", 1038 }, 1039 }, 1040 pods: []kube.Pod{ 1041 { 1042 ObjectMeta: metav1.ObjectMeta{ 1043 Name: "nightmare", 1044 }, 1045 Status: kube.PodStatus{ 1046 Phase: kube.PodPending, 1047 StartTime: startTime(time.Now().Add(-podPendingTimeout)), 1048 }, 1049 }, 1050 }, 1051 expectedState: kube.AbortedState, 1052 expectedNumPods: 1, 1053 expectedComplete: true, 1054 expectedReport: true, 1055 expectedURL: "nightmare/aborted", 1056 }, 1057 } 1058 for _, tc := range testcases { 1059 t.Logf("Running test case %q", tc.name) 1060 totServ := httptest.NewServer(http.HandlerFunc(handleTot)) 1061 defer totServ.Close() 1062 pm := make(map[string]kube.Pod) 1063 for i := range tc.pods { 1064 pm[tc.pods[i].ObjectMeta.Name] = tc.pods[i] 1065 } 1066 fc := &fkc{ 1067 prowjobs: []kube.ProwJob{tc.pj}, 1068 } 1069 fpc := &fkc{ 1070 pods: tc.pods, 1071 err: tc.err, 1072 } 1073 c := Controller{ 1074 kc: fc, 1075 pkcs: map[string]kubeClient{kube.DefaultClusterAlias: fpc}, 1076 log: logrus.NewEntry(logrus.StandardLogger()), 1077 ca: newFakeConfigAgent(t, 0), 1078 totURL: totServ.URL, 1079 pendingJobs: make(map[string]int), 1080 } 1081 1082 reports := make(chan kube.ProwJob, 100) 1083 if err := c.syncPendingJob(tc.pj, pm, reports); err != nil { 1084 t.Errorf("for case %q got an error: %v", tc.name, err) 1085 continue 1086 } 1087 close(reports) 1088 1089 actual := fc.prowjobs[0] 1090 if actual.Status.State != tc.expectedState { 1091 t.Errorf("for case %q got state %v", tc.name, actual.Status.State) 1092 } 1093 if len(fpc.pods) != tc.expectedNumPods { 1094 t.Errorf("for case %q got %d pods, expected %d", tc.name, len(fpc.pods), tc.expectedNumPods) 1095 } 1096 if actual.Complete() != tc.expectedComplete { 1097 t.Errorf("for case %q got wrong completion", tc.name) 1098 } 1099 if len(fc.prowjobs) != tc.expectedCreatedPJs+1 { 1100 t.Errorf("for case %q got %d created prowjobs", tc.name, len(fc.prowjobs)-1) 1101 } 1102 if tc.expectedReport && len(reports) != 1 { 1103 t.Errorf("for case %q wanted one report but got %d", tc.name, len(reports)) 1104 } 1105 if !tc.expectedReport && len(reports) != 0 { 1106 t.Errorf("for case %q did not wany any reports but got %d", tc.name, len(reports)) 1107 } 1108 if tc.expectedReport { 1109 r := <-reports 1110 1111 if got, want := r.Status.URL, tc.expectedURL; got != want { 1112 t.Errorf("for case %q, report.Status.URL: got %q, want %q", tc.name, got, want) 1113 } 1114 } 1115 } 1116 } 1117 1118 // TestPeriodic walks through the happy path of a periodic job. 1119 func TestPeriodic(t *testing.T) { 1120 per := config.Periodic{ 1121 Name: "ci-periodic-job", 1122 Agent: "kubernetes", 1123 Cluster: "trusted", 1124 Spec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 1125 RunAfterSuccess: []config.Periodic{ 1126 { 1127 Name: "ci-periodic-job-2", 1128 Agent: "kubernetes", 1129 Spec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 1130 }, 1131 }, 1132 } 1133 1134 totServ := httptest.NewServer(http.HandlerFunc(handleTot)) 1135 defer totServ.Close() 1136 fc := &fkc{ 1137 prowjobs: []kube.ProwJob{pjutil.NewProwJob(pjutil.PeriodicSpec(per), nil)}, 1138 } 1139 c := Controller{ 1140 kc: fc, 1141 pkcs: map[string]kubeClient{kube.DefaultClusterAlias: &fkc{}, "trusted": fc}, 1142 log: logrus.NewEntry(logrus.StandardLogger()), 1143 ca: newFakeConfigAgent(t, 0), 1144 totURL: totServ.URL, 1145 pendingJobs: make(map[string]int), 1146 lock: sync.RWMutex{}, 1147 } 1148 if err := c.Sync(); err != nil { 1149 t.Fatalf("Error on first sync: %v", err) 1150 } 1151 if len(fc.prowjobs[0].Spec.PodSpec.Containers) != 1 || fc.prowjobs[0].Spec.PodSpec.Containers[0].Name != "test-name" { 1152 t.Fatalf("Sync step updated the pod spec: %#v", fc.prowjobs[0].Spec.PodSpec) 1153 } 1154 if len(fc.pods) != 1 { 1155 t.Fatal("Didn't create pod on first sync.") 1156 } 1157 if len(fc.pods[0].Spec.Containers) != 1 { 1158 t.Fatal("Wiped container list.") 1159 } 1160 if len(fc.pods[0].Spec.Containers[0].Env) == 0 { 1161 t.Fatal("Container has no env set.") 1162 } 1163 if err := c.Sync(); err != nil { 1164 t.Fatalf("Error on second sync: %v", err) 1165 } 1166 if len(fc.pods) != 1 { 1167 t.Fatalf("Wrong number of pods after second sync: %d", len(fc.pods)) 1168 } 1169 fc.pods[0].Status.Phase = kube.PodSucceeded 1170 if err := c.Sync(); err != nil { 1171 t.Fatalf("Error on third sync: %v", err) 1172 } 1173 if !fc.prowjobs[0].Complete() { 1174 t.Fatal("Prow job didn't complete.") 1175 } 1176 if fc.prowjobs[0].Status.State != kube.SuccessState { 1177 t.Fatalf("Should be success: %v", fc.prowjobs[0].Status.State) 1178 } 1179 if len(fc.prowjobs) != 2 { 1180 t.Fatalf("Wrong number of prow jobs: %d", len(fc.prowjobs)) 1181 } 1182 if err := c.Sync(); err != nil { 1183 t.Fatalf("Error on fourth sync: %v", err) 1184 } 1185 } 1186 1187 func TestRunAfterSuccessCanRun(t *testing.T) { 1188 tests := []struct { 1189 name string 1190 1191 parent *kube.ProwJob 1192 child *kube.ProwJob 1193 1194 changes []github.PullRequestChange 1195 err error 1196 1197 expected bool 1198 }{ 1199 { 1200 name: "child does not require specific changes", 1201 parent: &kube.ProwJob{ 1202 Spec: kube.ProwJobSpec{ 1203 Job: "test-e2e", 1204 Type: kube.PresubmitJob, 1205 Refs: &kube.Refs{ 1206 Org: "kubernetes", 1207 Repo: "kubernetes", 1208 Pulls: []kube.Pull{ 1209 {Number: 123}, 1210 }, 1211 }, 1212 }, 1213 }, 1214 child: &kube.ProwJob{ 1215 Spec: kube.ProwJobSpec{ 1216 Job: "push-image", 1217 }, 1218 }, 1219 expected: true, 1220 }, 1221 { 1222 name: "child requires specific changes that are done", 1223 parent: &kube.ProwJob{ 1224 Spec: kube.ProwJobSpec{ 1225 Job: "test-bazel-build", 1226 Type: kube.PresubmitJob, 1227 Refs: &kube.Refs{ 1228 Org: "kubernetes", 1229 Repo: "kubernetes", 1230 Pulls: []kube.Pull{ 1231 {Number: 123}, 1232 }, 1233 }, 1234 }, 1235 }, 1236 child: &kube.ProwJob{ 1237 Spec: kube.ProwJobSpec{ 1238 Job: "test-kubeadm-cloud", 1239 }, 1240 }, 1241 changes: []github.PullRequestChange{ 1242 {Filename: "cmd/kubeadm/kubeadm.go"}, 1243 {Filename: "vendor/BUILD"}, 1244 {Filename: ".gitatrributes"}, 1245 }, 1246 expected: true, 1247 }, 1248 { 1249 name: "child requires specific changes that are not done", 1250 parent: &kube.ProwJob{ 1251 Spec: kube.ProwJobSpec{ 1252 Job: "test-bazel-build", 1253 Type: kube.PresubmitJob, 1254 Refs: &kube.Refs{ 1255 Org: "kubernetes", 1256 Repo: "kubernetes", 1257 Pulls: []kube.Pull{ 1258 {Number: 123}, 1259 }, 1260 }, 1261 }, 1262 }, 1263 child: &kube.ProwJob{ 1264 Spec: kube.ProwJobSpec{ 1265 Job: "test-kubeadm-cloud", 1266 }, 1267 }, 1268 changes: []github.PullRequestChange{ 1269 {Filename: "vendor/BUILD"}, 1270 {Filename: ".gitatrributes"}, 1271 }, 1272 expected: false, 1273 }, 1274 } 1275 1276 for _, test := range tests { 1277 t.Logf("scenario %q", test.name) 1278 1279 fakeGH := &fghc{ 1280 changes: test.changes, 1281 err: test.err, 1282 } 1283 1284 c := Controller{log: logrus.NewEntry(logrus.StandardLogger())} 1285 1286 got := c.RunAfterSuccessCanRun(test.parent, test.child, newFakeConfigAgent(t, 0), fakeGH) 1287 if got != test.expected { 1288 t.Errorf("expected to run: %t, got: %t", test.expected, got) 1289 } 1290 } 1291 } 1292 1293 func TestMaxConcurrencyWithNewlyTriggeredJobs(t *testing.T) { 1294 tests := []struct { 1295 name string 1296 pjs []kube.ProwJob 1297 pendingJobs map[string]int 1298 expectedPods int 1299 }{ 1300 { 1301 name: "avoid starting a triggered job", 1302 pjs: []kube.ProwJob{ 1303 { 1304 Spec: kube.ProwJobSpec{ 1305 Job: "test-bazel-build", 1306 Type: kube.PostsubmitJob, 1307 MaxConcurrency: 1, 1308 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 1309 }, 1310 Status: kube.ProwJobStatus{ 1311 State: kube.TriggeredState, 1312 }, 1313 }, 1314 { 1315 Spec: kube.ProwJobSpec{ 1316 Job: "test-bazel-build", 1317 Type: kube.PostsubmitJob, 1318 MaxConcurrency: 1, 1319 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 1320 }, 1321 Status: kube.ProwJobStatus{ 1322 State: kube.TriggeredState, 1323 }, 1324 }, 1325 }, 1326 pendingJobs: make(map[string]int), 1327 expectedPods: 1, 1328 }, 1329 { 1330 name: "both triggered jobs can start", 1331 pjs: []kube.ProwJob{ 1332 { 1333 Spec: kube.ProwJobSpec{ 1334 Job: "test-bazel-build", 1335 Type: kube.PostsubmitJob, 1336 MaxConcurrency: 2, 1337 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 1338 }, 1339 Status: kube.ProwJobStatus{ 1340 State: kube.TriggeredState, 1341 }, 1342 }, 1343 { 1344 Spec: kube.ProwJobSpec{ 1345 Job: "test-bazel-build", 1346 Type: kube.PostsubmitJob, 1347 MaxConcurrency: 2, 1348 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 1349 }, 1350 Status: kube.ProwJobStatus{ 1351 State: kube.TriggeredState, 1352 }, 1353 }, 1354 }, 1355 pendingJobs: make(map[string]int), 1356 expectedPods: 2, 1357 }, 1358 { 1359 name: "no triggered job can start", 1360 pjs: []kube.ProwJob{ 1361 { 1362 Spec: kube.ProwJobSpec{ 1363 Job: "test-bazel-build", 1364 Type: kube.PostsubmitJob, 1365 MaxConcurrency: 5, 1366 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 1367 }, 1368 Status: kube.ProwJobStatus{ 1369 State: kube.TriggeredState, 1370 }, 1371 }, 1372 { 1373 Spec: kube.ProwJobSpec{ 1374 Job: "test-bazel-build", 1375 Type: kube.PostsubmitJob, 1376 MaxConcurrency: 5, 1377 PodSpec: &kube.PodSpec{Containers: []kube.Container{{Name: "test-name", Env: []kube.EnvVar{}}}}, 1378 }, 1379 Status: kube.ProwJobStatus{ 1380 State: kube.TriggeredState, 1381 }, 1382 }, 1383 }, 1384 pendingJobs: map[string]int{"test-bazel-build": 5}, 1385 expectedPods: 0, 1386 }, 1387 } 1388 1389 for _, test := range tests { 1390 t.Logf("Running scenario %q", test.name) 1391 jobs := make(chan kube.ProwJob, len(test.pjs)) 1392 for _, pj := range test.pjs { 1393 jobs <- pj 1394 } 1395 close(jobs) 1396 1397 fc := &fkc{ 1398 prowjobs: test.pjs, 1399 } 1400 fpc := &fkc{} 1401 n, err := snowflake.NewNode(1) 1402 if err != nil { 1403 t.Fatalf("unexpected error: %v", err) 1404 } 1405 c := Controller{ 1406 kc: fc, 1407 pkcs: map[string]kubeClient{kube.DefaultClusterAlias: fpc}, 1408 log: logrus.NewEntry(logrus.StandardLogger()), 1409 ca: newFakeConfigAgent(t, 0), 1410 node: n, 1411 pendingJobs: test.pendingJobs, 1412 } 1413 1414 reports := make(chan kube.ProwJob, len(test.pjs)) 1415 errors := make(chan error, len(test.pjs)) 1416 pm := make(map[string]kube.Pod) 1417 1418 syncProwJobs(c.log, c.syncTriggeredJob, 20, jobs, reports, errors, pm) 1419 if len(fpc.pods) != test.expectedPods { 1420 t.Errorf("expected pods: %d, got: %d", test.expectedPods, len(fpc.pods)) 1421 } 1422 } 1423 }