sigs.k8s.io/prow@v0.0.0-20240503223140-c5e374dc7eb1/pkg/scheduler/reconciler_test.go (about) 1 /* 2 Copyright 2024 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package scheduler_test 18 19 import ( 20 "context" 21 "errors" 22 "testing" 23 24 "github.com/google/go-cmp/cmp" 25 v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 "k8s.io/apimachinery/pkg/runtime" 27 "k8s.io/apimachinery/pkg/runtime/schema" 28 "k8s.io/apimachinery/pkg/types" 29 testingclient "k8s.io/client-go/testing" 30 "sigs.k8s.io/controller-runtime/pkg/client" 31 fakectrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" 32 "sigs.k8s.io/controller-runtime/pkg/reconcile" 33 prowv1 "sigs.k8s.io/prow/pkg/apis/prowjobs/v1" 34 "sigs.k8s.io/prow/pkg/client/clientset/versioned/scheme" 35 "sigs.k8s.io/prow/pkg/config" 36 "sigs.k8s.io/prow/pkg/scheduler" 37 "sigs.k8s.io/prow/pkg/scheduler/strategy" 38 ) 39 40 type fakeStrategy struct { 41 cluster string 42 err error 43 } 44 45 func (fs *fakeStrategy) Schedule(context.Context, *prowv1.ProwJob) (strategy.Result, error) { 46 return strategy.Result{Cluster: fs.cluster}, fs.err 47 } 48 49 // Alright our controller-runtime dependency is old as hell so I have to 50 // implement interceptors on my own. 51 type fakeTracker struct { 52 testingclient.ObjectTracker 53 errors map[string]error 54 } 55 56 func (ft *fakeTracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { 57 if err, exists := ft.errors["GET"]; exists { 58 return nil, err 59 } 60 return ft.ObjectTracker.Get(gvr, ns, name) 61 } 62 63 func (ft *fakeTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { 64 if err, exists := ft.errors["UPDATE"]; exists { 65 return err 66 } 67 return ft.ObjectTracker.Update(gvr, obj, ns) 68 } 69 70 func TestReconcile(t *testing.T) { 71 for _, tc := range []struct { 72 name string 73 pj *prowv1.ProwJob 74 request reconcile.Request 75 cluster string 76 schedulingError error 77 clientErrors map[string]error 78 wantPJ *prowv1.ProwJob 79 wantError error 80 }{ 81 { 82 name: "Successfully assign a cluster when agent is k8s", 83 pj: &prowv1.ProwJob{ 84 ObjectMeta: v1.ObjectMeta{Name: "pj", Namespace: "ns", ResourceVersion: "1"}, 85 Spec: prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent}, 86 }, 87 request: reconcile.Request{NamespacedName: types.NamespacedName{Name: "pj", Namespace: "ns"}}, 88 cluster: "foo", 89 wantPJ: &prowv1.ProwJob{ 90 ObjectMeta: v1.ObjectMeta{Name: "pj", Namespace: "ns", ResourceVersion: "2"}, 91 Spec: prowv1.ProwJobSpec{Cluster: "foo", Agent: prowv1.KubernetesAgent}, 92 Status: prowv1.ProwJobStatus{State: prowv1.TriggeredState}, 93 }, 94 }, 95 { 96 name: "Successfully assign a cluster when agent is tekton", 97 pj: &prowv1.ProwJob{ 98 ObjectMeta: v1.ObjectMeta{Name: "pj", Namespace: "ns", ResourceVersion: "1"}, 99 Spec: prowv1.ProwJobSpec{Agent: prowv1.TektonAgent}, 100 }, 101 request: reconcile.Request{NamespacedName: types.NamespacedName{Name: "pj", Namespace: "ns"}}, 102 cluster: "foo", 103 wantPJ: &prowv1.ProwJob{ 104 ObjectMeta: v1.ObjectMeta{Name: "pj", Namespace: "ns", ResourceVersion: "2"}, 105 Spec: prowv1.ProwJobSpec{Cluster: "foo", Agent: prowv1.TektonAgent}, 106 Status: prowv1.ProwJobStatus{State: prowv1.TriggeredState}, 107 }, 108 }, 109 { 110 name: "Skip ProwJob not found", 111 request: reconcile.Request{NamespacedName: types.NamespacedName{Name: "pj", Namespace: "ns"}}, 112 }, 113 { 114 name: "Error getting Prowjob", 115 request: reconcile.Request{NamespacedName: types.NamespacedName{Name: "pj", Namespace: "ns"}}, 116 clientErrors: map[string]error{"GET": errors.New("expected")}, 117 wantError: errors.New("get prowjob pj: expected"), 118 }, 119 { 120 name: "Error patching Prowjob", 121 request: reconcile.Request{NamespacedName: types.NamespacedName{Name: "pj", Namespace: "ns"}}, 122 pj: &prowv1.ProwJob{ 123 ObjectMeta: v1.ObjectMeta{Name: "pj", Namespace: "ns", ResourceVersion: "1"}, 124 Spec: prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent}, 125 }, 126 cluster: "foo", 127 wantPJ: &prowv1.ProwJob{ 128 ObjectMeta: v1.ObjectMeta{Name: "pj", Namespace: "ns", ResourceVersion: "2"}, 129 Spec: prowv1.ProwJobSpec{Cluster: "foo"}, 130 Status: prowv1.ProwJobStatus{State: prowv1.TriggeredState}, 131 }, 132 clientErrors: map[string]error{"UPDATE": errors.New("expected")}, 133 wantError: errors.New("patch prowjob: expected"), 134 }, 135 { 136 name: "Scheduling error", 137 pj: &prowv1.ProwJob{ 138 ObjectMeta: v1.ObjectMeta{Name: "pj", Namespace: "ns", ResourceVersion: "1"}, 139 Spec: prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent}, 140 }, 141 request: reconcile.Request{NamespacedName: types.NamespacedName{Name: "pj", Namespace: "ns"}}, 142 schedulingError: errors.New("expected"), 143 wantError: errors.New("schedule prowjob pj: expected"), 144 }, 145 { 146 name: "No agent set then schedule passthrough", 147 pj: &prowv1.ProwJob{ 148 ObjectMeta: v1.ObjectMeta{Name: "pj", Namespace: "ns", ResourceVersion: "1"}, 149 Spec: prowv1.ProwJobSpec{Cluster: "untouched"}, 150 }, 151 request: reconcile.Request{NamespacedName: types.NamespacedName{Name: "pj", Namespace: "ns"}}, 152 cluster: "untouched", 153 wantPJ: &prowv1.ProwJob{ 154 ObjectMeta: v1.ObjectMeta{Name: "pj", Namespace: "ns", ResourceVersion: "2"}, 155 Spec: prowv1.ProwJobSpec{Cluster: "untouched"}, 156 Status: prowv1.ProwJobStatus{State: prowv1.TriggeredState}, 157 }, 158 }, 159 } { 160 tc := tc 161 t.Run(tc.name, func(t *testing.T) { 162 t.Parallel() 163 164 tracker := testingclient.NewObjectTracker(scheme.Scheme, scheme.Codecs.UniversalDecoder()) 165 fakeTracker := fakeTracker{ObjectTracker: tracker, errors: tc.clientErrors} 166 167 builder := fakectrlruntimeclient.NewClientBuilder().WithObjectTracker(&fakeTracker) 168 // Builder doesn't like nil 169 if tc.pj != nil { 170 builder = builder.WithObjects(tc.pj) 171 } 172 pjClient := builder.Build() 173 174 r := scheduler.NewReconciler(pjClient, 175 func() *config.Config { return nil }, 176 func(_ *config.Config) strategy.Interface { 177 return &fakeStrategy{cluster: tc.cluster, err: tc.schedulingError} 178 }) 179 _, err := r.Reconcile(context.TODO(), tc.request) 180 181 if tc.wantError != nil && err != nil { 182 if tc.wantError.Error() != err.Error() { 183 t.Errorf("Expected error %s but got %s", tc.wantError, err) 184 } 185 return 186 } else if tc.wantError != nil && err == nil { 187 t.Errorf("Expected error %s but got nil", tc.wantError) 188 return 189 } else if tc.wantError == nil && err != nil { 190 t.Errorf("Expected error nil but got %s", err) 191 return 192 } 193 194 pjs := prowv1.ProwJobList{} 195 if err := pjClient.List(context.TODO(), &pjs); err != nil { 196 // It's just not supposed to happen 197 t.Fatalf("Couldn't get PJs from the fake client: %s", err) 198 } 199 200 if tc.wantPJ != nil { 201 if len(pjs.Items) != 1 { 202 t.Errorf("Expected 1 ProwJob but got %d", len(pjs.Items)) 203 return 204 } 205 if diff := cmp.Diff(tc.wantPJ, &pjs.Items[0]); diff != "" { 206 t.Errorf("Unexpected ProwJob: %s", diff) 207 } 208 } 209 }) 210 } 211 } 212 213 func TestConfigHotReload(t *testing.T) { 214 for _, tc := range []struct { 215 name string 216 configs []config.Config 217 pjs []client.Object 218 wantPJs []prowv1.ProwJob 219 }{ 220 { 221 name: "Switch from Passthrough to Failover", 222 configs: []config.Config{ 223 {}, 224 { 225 ProwConfig: config.ProwConfig{Scheduler: config.Scheduler{ 226 Failover: &config.FailoverScheduling{ClusterMappings: map[string]string{"foo": "bar"}}, 227 }}, 228 }, 229 }, 230 pjs: []client.Object{ 231 &prowv1.ProwJob{ 232 ObjectMeta: v1.ObjectMeta{Name: "job1", Namespace: "", ResourceVersion: "1"}, 233 Spec: prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent, Cluster: "foo"}, 234 }, 235 &prowv1.ProwJob{ 236 ObjectMeta: v1.ObjectMeta{Name: "job2", Namespace: "", ResourceVersion: "1"}, 237 Spec: prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent, Cluster: "foo"}, 238 }, 239 }, 240 wantPJs: []prowv1.ProwJob{ 241 { 242 ObjectMeta: v1.ObjectMeta{Name: "job1", Namespace: "", ResourceVersion: "2"}, 243 Spec: prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent, Cluster: "foo"}, 244 Status: prowv1.ProwJobStatus{State: prowv1.TriggeredState}, 245 }, 246 { 247 ObjectMeta: v1.ObjectMeta{Name: "job2", Namespace: "", ResourceVersion: "2"}, 248 Spec: prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent, Cluster: "bar"}, 249 Status: prowv1.ProwJobStatus{State: prowv1.TriggeredState}, 250 }, 251 }, 252 }, 253 { 254 name: "Modify Failover", 255 configs: []config.Config{ 256 { 257 ProwConfig: config.ProwConfig{Scheduler: config.Scheduler{ 258 Failover: &config.FailoverScheduling{ClusterMappings: map[string]string{"foo": "bar"}}, 259 }}, 260 }, 261 { 262 ProwConfig: config.ProwConfig{Scheduler: config.Scheduler{ 263 Failover: &config.FailoverScheduling{ClusterMappings: map[string]string{ 264 "foo": "bar", 265 "super": "duper", 266 }}, 267 }}, 268 }, 269 }, 270 pjs: []client.Object{ 271 &prowv1.ProwJob{ 272 ObjectMeta: v1.ObjectMeta{Name: "job1", Namespace: "", ResourceVersion: "1"}, 273 Spec: prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent, Cluster: "foo"}, 274 }, 275 &prowv1.ProwJob{ 276 ObjectMeta: v1.ObjectMeta{Name: "job2", Namespace: "", ResourceVersion: "1"}, 277 Spec: prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent, Cluster: "super"}, 278 }, 279 }, 280 wantPJs: []prowv1.ProwJob{ 281 { 282 ObjectMeta: v1.ObjectMeta{Name: "job1", Namespace: "", ResourceVersion: "2"}, 283 Spec: prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent, Cluster: "bar"}, 284 Status: prowv1.ProwJobStatus{State: prowv1.TriggeredState}, 285 }, 286 { 287 ObjectMeta: v1.ObjectMeta{Name: "job2", Namespace: "", ResourceVersion: "2"}, 288 Spec: prowv1.ProwJobSpec{Agent: prowv1.KubernetesAgent, Cluster: "duper"}, 289 Status: prowv1.ProwJobStatus{State: prowv1.TriggeredState}, 290 }, 291 }, 292 }, 293 } { 294 tc := tc 295 t.Run(tc.name, func(t *testing.T) { 296 t.Parallel() 297 298 var cfg *config.Config 299 pjClient := fakectrlruntimeclient.NewClientBuilder().WithObjects(tc.pjs...).Build() 300 reconciler := scheduler.NewReconciler(pjClient, func() *config.Config { return cfg }, strategy.Get) 301 302 for i := range tc.configs { 303 cfg = &tc.configs[i] 304 request := reconcile.Request{NamespacedName: types.NamespacedName{Name: tc.pjs[i].GetName()}} 305 if _, err := reconciler.Reconcile(context.TODO(), request); err != nil { 306 t.Fatalf("Failed to reconcile %v: %s", request, err) 307 } 308 } 309 310 pjs := prowv1.ProwJobList{} 311 if err := pjClient.List(context.TODO(), &pjs); err != nil { 312 // It's just not supposed to happen 313 t.Fatalf("Couldn't get PJs from the fake client: %s", err) 314 } 315 316 if diff := cmp.Diff(tc.wantPJs, pjs.Items); diff != "" { 317 t.Errorf("Unexpected ProwJob: %s", diff) 318 } 319 }) 320 } 321 }