sigs.k8s.io/prow@v0.0.0-20240503223140-c5e374dc7eb1/pkg/plank/reconciler_test.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package plank
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"errors"
    23  	"strings"
    24  	"sync"
    25  	"testing"
    26  	"text/template"
    27  	"time"
    28  
    29  	"github.com/go-test/deep"
    30  	"github.com/sirupsen/logrus"
    31  	authorizationv1 "k8s.io/api/authorization/v1"
    32  	corev1 "k8s.io/api/core/v1"
    33  	"k8s.io/apimachinery/pkg/api/meta"
    34  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    35  	"k8s.io/apimachinery/pkg/runtime"
    36  	"k8s.io/apimachinery/pkg/runtime/schema"
    37  	"k8s.io/apimachinery/pkg/types"
    38  	k8sFake "k8s.io/client-go/kubernetes/fake"
    39  	"k8s.io/client-go/rest"
    40  	k8sTesting "k8s.io/client-go/testing"
    41  	toolscache "k8s.io/client-go/tools/cache"
    42  	"sigs.k8s.io/controller-runtime/pkg/cache"
    43  	"sigs.k8s.io/controller-runtime/pkg/cache/informertest"
    44  	ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
    45  	fakectrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
    46  	"sigs.k8s.io/controller-runtime/pkg/controller/controllertest"
    47  	ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log"
    48  	"sigs.k8s.io/controller-runtime/pkg/log/zap"
    49  	"sigs.k8s.io/controller-runtime/pkg/manager"
    50  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    51  
    52  	prowv1 "sigs.k8s.io/prow/pkg/apis/prowjobs/v1"
    53  	"sigs.k8s.io/prow/pkg/config"
    54  	"sigs.k8s.io/prow/pkg/io"
    55  )
    56  
    57  func TestAdd(t *testing.T) {
    58  	ctrlruntimelog.SetLogger(zap.New(zap.UseDevMode(true)))
    59  	const prowJobNamespace = "prowjobs"
    60  
    61  	testCases := []struct {
    62  		name                  string
    63  		additionalSelector    string
    64  		expectedError         string
    65  		prowJob               metav1.Object
    66  		pod                   metav1.Object
    67  		expectedRequest       string
    68  		expectPredicateDenied bool
    69  	}{
    70  		{
    71  			name: "Prowjob with Kubernetes agent generates event",
    72  			prowJob: &prowv1.ProwJob{
    73  				ObjectMeta: metav1.ObjectMeta{Namespace: prowJobNamespace, Name: "my-pj"},
    74  				Spec:       prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent},
    75  			},
    76  			expectedRequest: prowJobNamespace + "/my-pj",
    77  		},
    78  		{
    79  			name: "Prowjob without Kubernetes agent does not generate event",
    80  			prowJob: &prowv1.ProwJob{
    81  				ObjectMeta: metav1.ObjectMeta{Namespace: prowJobNamespace, Name: "my-pj"},
    82  				Spec:       prowv1.ProwJobSpec{Agent: prowv1.ProwJobAgent("my-other-agent")},
    83  			},
    84  			expectPredicateDenied: true,
    85  		},
    86  		{
    87  			name: "ProwJob that is completed does not generate event",
    88  			prowJob: &prowv1.ProwJob{
    89  				ObjectMeta: metav1.ObjectMeta{Namespace: prowJobNamespace, Name: "my-pj"},
    90  				Spec:       prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent},
    91  				Status:     prowv1.ProwJobStatus{CompletionTime: &metav1.Time{}},
    92  			},
    93  			expectPredicateDenied: true,
    94  		},
    95  		{
    96  			name: "Pod generates event",
    97  			pod: &corev1.Pod{
    98  				ObjectMeta: metav1.ObjectMeta{
    99  					Name:   "my-pod",
   100  					Labels: map[string]string{"created-by-prow": "true"},
   101  				},
   102  			},
   103  			expectedRequest: prowJobNamespace + "/my-pod",
   104  		},
   105  		{
   106  			name: "Pod without created-by-prow does not generate event",
   107  			pod: &corev1.Pod{
   108  				ObjectMeta: metav1.ObjectMeta{
   109  					Name: "my-pod",
   110  				},
   111  			},
   112  			expectPredicateDenied: true,
   113  		},
   114  		{
   115  			name: "Pod that does match additionalSelector does generate event",
   116  			pod: &corev1.Pod{
   117  				ObjectMeta: metav1.ObjectMeta{
   118  					Name: "my-pod",
   119  					Labels: map[string]string{
   120  						"created-by-prow": "true",
   121  						"unicorn":         "true",
   122  					},
   123  				},
   124  			},
   125  			additionalSelector: "unicorn=true",
   126  			expectedRequest:    prowJobNamespace + "/my-pod",
   127  		},
   128  		{
   129  			name: "Pod that doesn't match additionalSelector does not generate event",
   130  			pod: &corev1.Pod{
   131  				ObjectMeta: metav1.ObjectMeta{
   132  					Name:   "my-pod",
   133  					Labels: map[string]string{"created-by-prow": "true"},
   134  				},
   135  			},
   136  			additionalSelector:    "unicorn=true",
   137  			expectPredicateDenied: true,
   138  		},
   139  		{
   140  			name:               "Invalid additionalSelector causes error",
   141  			additionalSelector: ",",
   142  			expectedError:      "failed to construct predicate: failed to parse label selector created-by-prow=true,,: found ',', expected: identifier after ','",
   143  		},
   144  	}
   145  
   146  	for _, tc := range testCases {
   147  		tc := tc
   148  		t.Run(tc.name, func(t *testing.T) {
   149  			t.Parallel()
   150  			fakeProwJobInformer := &controllertest.FakeInformer{Synced: true}
   151  			fakePodInformers := &controllertest.FakeInformer{Synced: true}
   152  
   153  			prowJobInformerStarted := make(chan struct{})
   154  			mgr, err := mgrFromFakeInformer(prowv1.SchemeGroupVersion.WithKind("ProwJob"), fakeProwJobInformer, prowJobInformerStarted)
   155  			if err != nil {
   156  				t.Fatalf("failed to construct mgr: %v", err)
   157  			}
   158  			podInformerStarted := make(chan struct{})
   159  			buildMgr, err := mgrFromFakeInformer(corev1.SchemeGroupVersion.WithKind("Pod"), fakePodInformers, podInformerStarted)
   160  			if err != nil {
   161  				t.Fatalf("failed to construct mgr: %v", err)
   162  			}
   163  			buildMgrs := map[string]manager.Manager{"default": buildMgr}
   164  			cfg := func() *config.Config {
   165  				return &config.Config{ProwConfig: config.ProwConfig{ProwJobNamespace: prowJobNamespace}}
   166  			}
   167  
   168  			receivedRequestChan := make(chan string, 1)
   169  			reconcile := func(_ context.Context, r reconcile.Request) (reconcile.Result, error) {
   170  				receivedRequestChan <- r.String()
   171  				return reconcile.Result{}, nil
   172  			}
   173  			predicateResultChan := make(chan bool, 1)
   174  			predicateCallBack := func(b bool) {
   175  				predicateResultChan <- !b
   176  			}
   177  			var errMsg string
   178  			if err := add(mgr, buildMgrs, nil, cfg, nil, "", tc.additionalSelector, reconcile, predicateCallBack, 1); err != nil {
   179  				errMsg = err.Error()
   180  			}
   181  			if errMsg != tc.expectedError {
   182  				t.Fatalf("expected error %v got error %v", tc.expectedError, errMsg)
   183  			}
   184  			if errMsg != "" {
   185  				return
   186  			}
   187  			ctx, cancel := context.WithCancel(context.Background())
   188  			defer cancel()
   189  
   190  			go func() {
   191  				if err := mgr.Start(ctx); err != nil {
   192  					t.Errorf("failed to start main mgr: %v", err)
   193  				}
   194  			}()
   195  			go func() {
   196  				if err := buildMgrs["default"].Start(ctx); err != nil {
   197  					t.Errorf("failed to start build mgr: %v", err)
   198  				}
   199  			}()
   200  			if err := singnalOrTimout(prowJobInformerStarted); err != nil {
   201  				t.Fatalf("failure waiting for prowJobInformer: %v", err)
   202  			}
   203  			if err := singnalOrTimout(podInformerStarted); err != nil {
   204  				t.Fatalf("failure waiting for podInformer: %v", err)
   205  			}
   206  
   207  			if tc.prowJob != nil {
   208  				fakeProwJobInformer.Add(tc.prowJob)
   209  			}
   210  			if tc.pod != nil {
   211  				fakePodInformers.Add(tc.pod)
   212  			}
   213  
   214  			var receivedRequest string
   215  			var predicateDenied bool
   216  			func() {
   217  				for {
   218  					select {
   219  					case receivedRequest = <-receivedRequestChan:
   220  						return
   221  					case predicateDenied = <-predicateResultChan:
   222  						// Actual request has to pass through the workqueue first
   223  						// so it might take an additional moment
   224  						if predicateDenied {
   225  							return
   226  						}
   227  						// This shouldn't take longer than a couple of millisec, but in
   228  						// CI we might be CPU starved so be generous with the timeout
   229  					case <-time.After(15 * time.Second):
   230  						t.Fatal("timed out waiting for event")
   231  					}
   232  				}
   233  			}()
   234  
   235  			if tc.expectedRequest != receivedRequest {
   236  				t.Errorf("expected request %q got request %q", tc.expectedRequest, receivedRequest)
   237  			}
   238  			if tc.expectPredicateDenied != predicateDenied {
   239  				t.Errorf("expected predicate to deny: %t, got predicate denied: %t", tc.expectPredicateDenied, predicateDenied)
   240  			}
   241  		})
   242  	}
   243  }
   244  
   245  func mgrFromFakeInformer(gvk schema.GroupVersionKind, fi *controllertest.FakeInformer, ready chan struct{}) (manager.Manager, error) {
   246  	opts := manager.Options{
   247  		NewClient: func(cache cache.Cache, config *rest.Config, options ctrlruntimeclient.Options, uncachedObjects ...ctrlruntimeclient.Object) (ctrlruntimeclient.Client, error) {
   248  			return nil, nil
   249  		},
   250  		NewCache: func(_ *rest.Config, opts cache.Options) (cache.Cache, error) {
   251  			return &informertest.FakeInformers{
   252  				InformersByGVK: map[schema.GroupVersionKind]toolscache.SharedIndexInformer{gvk: &eventHandlerSignalingInformer{SharedIndexInformer: fi, signal: ready}},
   253  				Synced:         &[]bool{true}[0],
   254  			}, nil
   255  		},
   256  		MapperProvider: func(_ *rest.Config) (meta.RESTMapper, error) {
   257  			return &meta.DefaultRESTMapper{}, nil
   258  		},
   259  		MetricsBindAddress: "0",
   260  	}
   261  	return manager.New(&rest.Config{}, opts)
   262  }
   263  
   264  type eventHandlerSignalingInformer struct {
   265  	toolscache.SharedIndexInformer
   266  	signal chan struct{}
   267  }
   268  
   269  func (ehsi *eventHandlerSignalingInformer) AddEventHandler(handler toolscache.ResourceEventHandler) {
   270  	ehsi.SharedIndexInformer.AddEventHandler(handler)
   271  	close(ehsi.signal)
   272  }
   273  
   274  func singnalOrTimout(signal <-chan struct{}) error {
   275  	select {
   276  	case <-signal:
   277  		return nil
   278  	case <-time.After(15 * time.Second):
   279  		return errors.New("timed out")
   280  	}
   281  }
   282  
   283  func TestProwJobIndexer(t *testing.T) {
   284  	t.Parallel()
   285  	const pjNS = "prowjobs"
   286  	const pjName = "my-pj"
   287  	const pjJobQueue = "pj-queue"
   288  	pj := func(modify ...func(*prowv1.ProwJob)) *prowv1.ProwJob {
   289  		pj := &prowv1.ProwJob{
   290  			ObjectMeta: metav1.ObjectMeta{
   291  				Namespace: pjNS,
   292  				Name:      "some-job",
   293  			},
   294  			Spec: prowv1.ProwJobSpec{
   295  				Job:          pjName,
   296  				JobQueueName: pjJobQueue,
   297  				Agent:        prowv1.KubernetesAgent,
   298  			},
   299  			Status: prowv1.ProwJobStatus{
   300  				State: prowv1.PendingState,
   301  			},
   302  		}
   303  		for _, m := range modify {
   304  			m(pj)
   305  		}
   306  		return pj
   307  	}
   308  	testCases := []struct {
   309  		name     string
   310  		modify   func(*prowv1.ProwJob)
   311  		expected []string
   312  	}{
   313  		{
   314  			name: "Matches all keys",
   315  			expected: []string{
   316  				prowJobIndexKeyAll,
   317  				prowJobIndexKeyPending,
   318  				pendingTriggeredIndexKeyByName(pjName),
   319  				pendingTriggeredIndexKeyByJobQueueName(pjJobQueue),
   320  			},
   321  		},
   322  		{
   323  			name:   "Triggered goes into triggeredPending",
   324  			modify: func(pj *prowv1.ProwJob) { pj.Status.State = prowv1.TriggeredState },
   325  			expected: []string{
   326  				prowJobIndexKeyAll,
   327  				pendingTriggeredIndexKeyByName(pjName),
   328  				pendingTriggeredIndexKeyByJobQueueName(pjJobQueue),
   329  			},
   330  		},
   331  		{
   332  			name:   "Wrong namespace, no key",
   333  			modify: func(pj *prowv1.ProwJob) { pj.Namespace = "wrong" },
   334  		},
   335  		{
   336  			name:   "Wrong agent, no key",
   337  			modify: func(pj *prowv1.ProwJob) { pj.Spec.Agent = prowv1.TektonAgent },
   338  		},
   339  		{
   340  			name:     "Success, matches only the `all` key",
   341  			modify:   func(pj *prowv1.ProwJob) { pj.Status.State = prowv1.SuccessState },
   342  			expected: []string{prowJobIndexKeyAll},
   343  		},
   344  		{
   345  			name:   "Changing name changes pendingTriggeredIndexKeyByName index",
   346  			modify: func(pj *prowv1.ProwJob) { pj.Spec.Job = "some-name" },
   347  			expected: []string{
   348  				prowJobIndexKeyAll,
   349  				prowJobIndexKeyPending,
   350  				pendingTriggeredIndexKeyByName("some-name"),
   351  				pendingTriggeredIndexKeyByJobQueueName(pjJobQueue),
   352  			},
   353  		},
   354  		{
   355  			name:   "Changing job queue name changes pendingTriggeredIndexKeyByJobQueueName index",
   356  			modify: func(pj *prowv1.ProwJob) { pj.Spec.JobQueueName = "some-name" },
   357  			expected: []string{
   358  				prowJobIndexKeyAll,
   359  				prowJobIndexKeyPending,
   360  				pendingTriggeredIndexKeyByName(pjName),
   361  				pendingTriggeredIndexKeyByJobQueueName("some-name"),
   362  			},
   363  		},
   364  	}
   365  
   366  	for _, tc := range testCases {
   367  		t.Run(tc.name, func(t *testing.T) {
   368  			if tc.modify == nil {
   369  				tc.modify = func(_ *prowv1.ProwJob) {}
   370  			}
   371  			result := prowJobIndexer(pjNS)(pj(tc.modify))
   372  			if diff := deep.Equal(result, tc.expected); diff != nil {
   373  				t.Errorf("result differs from expected: %v", diff)
   374  			}
   375  		})
   376  	}
   377  }
   378  
   379  // TestMaxConcurrencyConsidersCacheStaleness verifies that the reconciliation considers the fact
   380  // that there is a delay between doing a change and observing it in the client for determining
   381  // if another copy of a given job may be started.
   382  // It:
   383  // * Creates two runs of the same job that has a MaxConcurrency: 1 setting
   384  // * Using a fake client that applies Patch operations with a delay but returns instantly
   385  // * Reconciles them in parallel
   386  // * Verifies that one of them gets a RequeueAfter: 1 second
   387  // * Verifies that after the other one returns, its state is set to Pending, i.E. it blocked until it observed the state transition it made
   388  // * Verifies that there is exactly one pod
   389  func TestMaxConcurrencyConsidersCacheStaleness(t *testing.T) {
   390  	testConcurrency := func(pja, pjb *prowv1.ProwJob) func(*testing.T) {
   391  		return func(t *testing.T) {
   392  			t.Parallel()
   393  			pjClient := &eventuallyConsistentClient{
   394  				t:      t,
   395  				Client: fakectrlruntimeclient.NewClientBuilder().WithRuntimeObjects(pja, pjb).Build(),
   396  			}
   397  
   398  			cfg := func() *config.Config {
   399  				return &config.Config{ProwConfig: config.ProwConfig{Plank: config.Plank{
   400  					Controller: config.Controller{
   401  						JobURLTemplate: &template.Template{},
   402  					},
   403  					JobQueueCapacities: map[string]int{"queue-1": 1},
   404  				}}}
   405  			}
   406  
   407  			r := newReconciler(context.Background(), pjClient, nil, cfg, nil, "")
   408  			r.buildClients = map[string]buildClient{pja.Spec.Cluster: {Client: fakectrlruntimeclient.NewClientBuilder().Build()}}
   409  
   410  			wg := &sync.WaitGroup{}
   411  			wg.Add(2)
   412  			// Give capacity of two so this doesn't stuck the test if we have a bug that results in two reconcile afters
   413  			gotReconcileAfter := make(chan struct{}, 2)
   414  
   415  			startAsyncReconcile := func(pjName string) {
   416  				go func() {
   417  					defer wg.Done()
   418  					result, err := r.Reconcile(context.Background(), reconcile.Request{NamespacedName: types.NamespacedName{Name: pjName}})
   419  					if err != nil {
   420  						t.Errorf("reconciliation of pj %s failed: %v", pjName, err)
   421  					}
   422  					if result.RequeueAfter == time.Second {
   423  						gotReconcileAfter <- struct{}{}
   424  						return
   425  					}
   426  					pj := &prowv1.ProwJob{}
   427  					if err := r.pjClient.Get(context.Background(), types.NamespacedName{Name: pjName}, pj); err != nil {
   428  						t.Errorf("failed to get prowjob %s after reconciliation: %v", pjName, err)
   429  					}
   430  					if pj.Status.State != prowv1.PendingState {
   431  						t.Error("pj wasn't in pending state, reconciliation didn't wait the change to appear in the cache")
   432  					}
   433  				}()
   434  			}
   435  			startAsyncReconcile(pja.Name)
   436  			startAsyncReconcile(pjb.Name)
   437  
   438  			wg.Wait()
   439  			close(gotReconcileAfter)
   440  
   441  			var numReconcielAfters int
   442  			for range gotReconcileAfter {
   443  				numReconcielAfters++
   444  			}
   445  			if numReconcielAfters != 1 {
   446  				t.Errorf("expected to get exactly one reconcileAfter, got %d", numReconcielAfters)
   447  			}
   448  
   449  			pods := &corev1.PodList{}
   450  			if err := r.buildClients[pja.Spec.Cluster].List(context.Background(), pods); err != nil {
   451  				t.Fatalf("failed to list pods: %v", err)
   452  			}
   453  			if n := len(pods.Items); n != 1 {
   454  				t.Errorf("expected exactly one pod, got %d", n)
   455  			}
   456  		}
   457  	}
   458  	pja := &prowv1.ProwJob{
   459  		ObjectMeta: metav1.ObjectMeta{Name: "a"},
   460  		Spec: prowv1.ProwJobSpec{
   461  			Type:           prowv1.PeriodicJob,
   462  			Cluster:        "cluster",
   463  			MaxConcurrency: 1,
   464  			Job:            "max-1",
   465  			PodSpec:        &corev1.PodSpec{Containers: []corev1.Container{{}}},
   466  			Refs:           &prowv1.Refs{},
   467  		},
   468  		Status: prowv1.ProwJobStatus{
   469  			State: prowv1.TriggeredState,
   470  		},
   471  	}
   472  	pjb := pja.DeepCopy()
   473  	pjb.Name = "b"
   474  
   475  	t.Run("job level MaxConcurrency", testConcurrency(pja.DeepCopy(), pjb))
   476  
   477  	pja.Spec.MaxConcurrency = 0
   478  	pja.Spec.JobQueueName = "queue-1"
   479  	pjb = pja.DeepCopy()
   480  	pjb.Name = "b"
   481  	pjb.Spec.Job = "max-1-same-queue"
   482  	t.Run("queue level JobQueueCapacities", testConcurrency(pja, pjb))
   483  }
   484  
   485  // eventuallyConsistentClient executes patch and create  operations with a delay but instantly returns, before applying the change.
   486  // This simulates the behaviour of a caching client where we can observe our change only after a delay.
   487  type eventuallyConsistentClient struct {
   488  	t *testing.T
   489  	ctrlruntimeclient.Client
   490  }
   491  
   492  func (ecc *eventuallyConsistentClient) Patch(ctx context.Context, obj ctrlruntimeclient.Object, patch ctrlruntimeclient.Patch, opts ...ctrlruntimeclient.PatchOption) error {
   493  	go func() {
   494  		time.Sleep(100 * time.Millisecond)
   495  		if err := ecc.Client.Patch(ctx, obj, patch, opts...); err != nil {
   496  			ecc.t.Errorf("eventuallyConsistentClient failed to execute patch: %v", err)
   497  		}
   498  	}()
   499  
   500  	return nil
   501  }
   502  
   503  func (ecc *eventuallyConsistentClient) Create(ctx context.Context, obj ctrlruntimeclient.Object, opts ...ctrlruntimeclient.CreateOption) error {
   504  	go func() {
   505  		time.Sleep(100 * time.Millisecond)
   506  		if err := ecc.Client.Create(ctx, obj, opts...); err != nil {
   507  			ecc.t.Errorf("eventuallyConsistentClient failed to execute create: %v", err)
   508  		}
   509  	}()
   510  
   511  	return nil
   512  }
   513  
   514  func TestStartPodBlocksUntilItHasThePodInCache(t *testing.T) {
   515  	t.Parallel()
   516  	r := &reconciler{
   517  		log: logrus.NewEntry(logrus.New()),
   518  		buildClients: map[string]buildClient{"default": {
   519  			Client: &eventuallyConsistentClient{t: t, Client: fakectrlruntimeclient.NewClientBuilder().Build()}}},
   520  		config: func() *config.Config { return &config.Config{} },
   521  	}
   522  	pj := &prowv1.ProwJob{
   523  		ObjectMeta: metav1.ObjectMeta{Name: "name"},
   524  		Spec: prowv1.ProwJobSpec{
   525  			PodSpec: &corev1.PodSpec{Containers: []corev1.Container{{}}},
   526  			Refs:    &prowv1.Refs{},
   527  			Type:    prowv1.PeriodicJob,
   528  		},
   529  	}
   530  	if _, _, err := r.startPod(context.Background(), pj); err != nil {
   531  		t.Fatalf("startPod: %v", err)
   532  	}
   533  	if err := r.buildClients["default"].Get(context.Background(), types.NamespacedName{Name: "name"}, &corev1.Pod{}); err != nil {
   534  		t.Errorf("couldn't get pod, this likely means startPod didn't block: %v", err)
   535  	}
   536  }
   537  
   538  type fakeOpener struct {
   539  	io.Opener
   540  	strings.Builder
   541  	signal chan<- bool
   542  }
   543  
   544  func (fo *fakeOpener) Writer(ctx context.Context, path string, opts ...io.WriterOptions) (io.WriteCloser, error) {
   545  	fo.Reset()
   546  	return fo, nil
   547  }
   548  
   549  func (fo *fakeOpener) Write(b []byte) (int, error) {
   550  	n, err := fo.Builder.Write(b)
   551  	fo.signal <- true
   552  	return n, err
   553  }
   554  
   555  func (fo fakeOpener) Close() error {
   556  	return nil
   557  }
   558  
   559  func TestSyncClusterStatus(t *testing.T) {
   560  	tcs := []struct {
   561  		name             string
   562  		location         string
   563  		statuses         map[string]ClusterStatus
   564  		expectedStatuses map[string]ClusterStatus // This is set to statuses ^^ if unspecified.
   565  		knownClusters    map[string]rest.Config
   566  		noWriteExpected  bool
   567  	}{
   568  		{
   569  			name:            "No location set, don't upload.",
   570  			statuses:        map[string]ClusterStatus{"default": ClusterStatusReachable},
   571  			knownClusters:   map[string]rest.Config{"default": {}},
   572  			noWriteExpected: true,
   573  		},
   574  		{
   575  			name:          "Single cluster reachable",
   576  			location:      "gs://my-bucket/build-cluster-statuses.json",
   577  			statuses:      map[string]ClusterStatus{"default": ClusterStatusReachable},
   578  			knownClusters: map[string]rest.Config{"default": {}},
   579  		},
   580  		{
   581  			name:             "Single cluster build manager creation failed",
   582  			location:         "gs://my-bucket/build-cluster-statuses.json",
   583  			expectedStatuses: map[string]ClusterStatus{"default": ClusterStatusNoManager},
   584  			knownClusters:    map[string]rest.Config{"default": {}},
   585  		},
   586  		{
   587  			name:     "Multiple clusters mixed reachability",
   588  			location: "gs://my-bucket/build-cluster-statuses.json",
   589  			statuses: map[string]ClusterStatus{
   590  				"default":                     ClusterStatusReachable,
   591  				"test-infra-trusted":          ClusterStatusReachable,
   592  				"cluster-error":               ClusterStatusError,
   593  				"cluster-missing-permissions": ClusterStatusMissingPermissions,
   594  			},
   595  			expectedStatuses: map[string]ClusterStatus{
   596  				"default":                     ClusterStatusReachable,
   597  				"test-infra-trusted":          ClusterStatusReachable,
   598  				"always-sad-build-cluster":    ClusterStatusNoManager,
   599  				"cluster-error":               ClusterStatusError,
   600  				"cluster-missing-permissions": ClusterStatusMissingPermissions,
   601  			},
   602  			knownClusters: map[string]rest.Config{
   603  				"default":                     {},
   604  				"test-infra-trusted":          {},
   605  				"always-sad-build-cluster":    {},
   606  				"cluster-error":               {},
   607  				"cluster-missing-permissions": {},
   608  			},
   609  		},
   610  	}
   611  	successfulFakeClient := &k8sFake.Clientset{}
   612  	successfulFakeClient.Fake.AddReactor("create", "selfsubjectaccessreviews", func(action k8sTesting.Action) (handled bool, ret runtime.Object, err error) {
   613  		r := &authorizationv1.SelfSubjectAccessReview{
   614  			Status: authorizationv1.SubjectAccessReviewStatus{
   615  				Allowed: true,
   616  				Reason:  "Success!",
   617  			},
   618  		}
   619  		return true, r, nil
   620  	})
   621  
   622  	erroringFakeClient := &k8sFake.Clientset{}
   623  	erroringFakeClient.Fake.AddReactor("create", "selfsubjectaccessreviews", func(action k8sTesting.Action) (handled bool, ret runtime.Object, err error) {
   624  		return true, nil, errors.New("could not create SelfSubjectAccessReview")
   625  
   626  	})
   627  
   628  	missingPermissionsFakeClient := &k8sFake.Clientset{}
   629  	missingPermissionsFakeClient.Fake.AddReactor("create", "selfsubjectaccessreviews", func(action k8sTesting.Action) (handled bool, ret runtime.Object, err error) {
   630  		r := &authorizationv1.SelfSubjectAccessReview{
   631  			Status: authorizationv1.SubjectAccessReviewStatus{
   632  				Allowed: false,
   633  				Reason:  "Permissions missing!",
   634  			},
   635  		}
   636  		return true, r, nil
   637  	})
   638  
   639  	// Whether the authz client runs successfully or not depends on the use of
   640  	// the plain FakeAuthorizationV1 (always success) or erroringFakeAuthzClient
   641  	// (always fail).
   642  	for i := range tcs {
   643  		tc := tcs[i]
   644  		t.Run(tc.name, func(t *testing.T) {
   645  			t.Parallel()
   646  			cfg := func() *config.Config {
   647  				return &config.Config{ProwConfig: config.ProwConfig{Plank: config.Plank{BuildClusterStatusFile: tc.location}}}
   648  			}
   649  
   650  			clients := map[string]buildClient{}
   651  			for alias, status := range tc.statuses {
   652  				switch status {
   653  				case ClusterStatusReachable:
   654  					clients[alias] = buildClient{
   655  						Client: fakectrlruntimeclient.NewClientBuilder().Build(),
   656  						ssar:   successfulFakeClient.AuthorizationV1().SelfSubjectAccessReviews(),
   657  					}
   658  				case ClusterStatusError:
   659  					clients[alias] = buildClient{
   660  						Client: fakectrlruntimeclient.NewClientBuilder().Build(),
   661  						ssar:   erroringFakeClient.AuthorizationV1().SelfSubjectAccessReviews(),
   662  					}
   663  				case ClusterStatusMissingPermissions:
   664  					clients[alias] = buildClient{
   665  						Client: fakectrlruntimeclient.NewClientBuilder().Build(),
   666  						ssar:   missingPermissionsFakeClient.AuthorizationV1().SelfSubjectAccessReviews(),
   667  					}
   668  				}
   669  			}
   670  			// Test harness signals true to indicate completion of a write, false to indicate
   671  			// completion of cluster status sync loop.
   672  			signal := make(chan bool)
   673  			opener := &fakeOpener{signal: signal}
   674  			r := &reconciler{
   675  				config:       cfg,
   676  				log:          logrus.WithField("component", "prow-controller-manager"),
   677  				buildClients: clients,
   678  				opener:       opener,
   679  			}
   680  			ctx, cancel := context.WithCancel(context.Background())
   681  			defer cancel()
   682  			go func() {
   683  				r.syncClusterStatus(time.Millisecond, tc.knownClusters)(ctx)
   684  				signal <- false
   685  			}()
   686  			if !tc.noWriteExpected {
   687  				<-signal // Wait for the first write
   688  			}
   689  			// No need to sleep to confirm no write occurs, race detector should handle it.
   690  			cancel()
   691  			for running := range signal {
   692  				if !running {
   693  					break
   694  				}
   695  			}
   696  
   697  			content := opener.String()
   698  			if tc.noWriteExpected {
   699  				if content != "" {
   700  					t.Errorf("No write was expected, but found: %q.", opener.String())
   701  				}
   702  			} else {
   703  				result := map[string]ClusterStatus{}
   704  				if err := json.Unmarshal([]byte(opener.String()), &result); err != nil {
   705  					t.Fatalf("Failed to unmarshal output: %v.", err)
   706  				}
   707  				expected := tc.expectedStatuses
   708  				if expected == nil {
   709  					expected = tc.statuses
   710  				}
   711  				if diff := deep.Equal(result, expected); diff != nil {
   712  					t.Errorf("result differs from expected: %v", diff)
   713  				}
   714  			}
   715  		})
   716  	}
   717  }