github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/controllers/core/kubernetesapply/reconciler_test.go (about) 1 package kubernetesapply 2 3 import ( 4 "context" 5 "errors" 6 "fmt" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/davecgh/go-spew/spew" 12 "github.com/stretchr/testify/assert" 13 "github.com/stretchr/testify/require" 14 batchv1 "k8s.io/api/batch/v1" 15 v1 "k8s.io/api/core/v1" 16 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 17 "k8s.io/apimachinery/pkg/types" 18 "k8s.io/apimachinery/pkg/util/uuid" 19 "sigs.k8s.io/controller-runtime/pkg/reconcile" 20 21 "github.com/tilt-dev/tilt/internal/controllers/fake" 22 "github.com/tilt-dev/tilt/internal/docker" 23 "github.com/tilt-dev/tilt/internal/k8s" 24 "github.com/tilt-dev/tilt/internal/k8s/testyaml" 25 "github.com/tilt-dev/tilt/internal/localexec" 26 "github.com/tilt-dev/tilt/internal/testutils/configmap" 27 "github.com/tilt-dev/tilt/internal/timecmp" 28 "github.com/tilt-dev/tilt/pkg/apis" 29 "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" 30 ) 31 32 // Test constants 33 const timeout = time.Second * 10 34 const interval = 5 * time.Millisecond 35 36 func TestImageIndexing(t *testing.T) { 37 f := newFixture(t) 38 ka := v1alpha1.KubernetesApply{ 39 ObjectMeta: metav1.ObjectMeta{ 40 Name: "a", 41 }, 42 Spec: v1alpha1.KubernetesApplySpec{ 43 ImageMaps: []string{"image-a", "image-c"}, 44 }, 45 } 46 f.Create(&ka) 47 48 // Verify we can index one image map. 49 ctx := context.Background() 50 reqs := f.r.indexer.Enqueue(ctx, &v1alpha1.ImageMap{ObjectMeta: metav1.ObjectMeta{Name: "image-a"}}) 51 assert.ElementsMatch(t, []reconcile.Request{ 52 {NamespacedName: types.NamespacedName{Name: "a"}}, 53 }, reqs) 54 55 kb := v1alpha1.KubernetesApply{ 56 ObjectMeta: metav1.ObjectMeta{ 57 Name: "b", 58 }, 59 Spec: v1alpha1.KubernetesApplySpec{ 60 ImageMaps: []string{"image-b", "image-c"}, 61 }, 62 } 63 f.Create(&kb) 64 65 // Verify we can index one image map to two applies. 66 reqs = f.r.indexer.Enqueue(ctx, &v1alpha1.ImageMap{ObjectMeta: metav1.ObjectMeta{Name: "image-c"}}) 67 assert.ElementsMatch(t, []reconcile.Request{ 68 {NamespacedName: types.NamespacedName{Name: "a"}}, 69 {NamespacedName: types.NamespacedName{Name: "b"}}, 70 }, reqs) 71 72 // Get the latest ka, since resource version numbers 73 // may have changed since its creation and mismatched 74 // versions will throw an error on update 75 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 76 ka.Spec.ImageMaps = []string{"image-a"} 77 f.Update(&ka) 78 79 // Verify we can remove an image map. 80 reqs = f.r.indexer.Enqueue(ctx, &v1alpha1.ImageMap{ObjectMeta: metav1.ObjectMeta{Name: "image-c"}}) 81 assert.ElementsMatch(t, []reconcile.Request{ 82 {NamespacedName: types.NamespacedName{Name: "b"}}, 83 }, reqs) 84 } 85 86 func TestBasicApplyYAML(t *testing.T) { 87 f := newFixture(t) 88 ka := v1alpha1.KubernetesApply{ 89 ObjectMeta: metav1.ObjectMeta{ 90 Name: "a", 91 }, 92 Spec: v1alpha1.KubernetesApplySpec{ 93 YAML: testyaml.SanchoYAML, 94 }, 95 } 96 f.Create(&ka) 97 98 f.MustReconcile(types.NamespacedName{Name: "a"}) 99 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 100 101 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 102 assert.Contains(f.T(), ka.Status.ResultYAML, "name: sancho") 103 assert.Contains(f.T(), ka.Status.ResultYAML, "uid:") 104 105 assert.Contains(t, f.Stdout(), 106 "Objects applied to cluster:\n → sancho:deployment\n", 107 "Log output did not include applied objects") 108 109 // Make sure that re-reconciling doesn't re-apply the YAML" 110 f.kClient.Yaml = "" 111 f.MustReconcile(types.NamespacedName{Name: "a"}) 112 assert.Equal(f.T(), f.kClient.Yaml, "") 113 } 114 115 func TestBasicApplyCmd(t *testing.T) { 116 f := newFixture(t) 117 118 applyCmd, yamlOut := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 119 ka := v1alpha1.KubernetesApply{ 120 ObjectMeta: metav1.ObjectMeta{ 121 Name: "a", 122 }, 123 Spec: v1alpha1.KubernetesApplySpec{ 124 ApplyCmd: &applyCmd, 125 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 126 }, 127 } 128 f.Create(&ka) 129 130 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 131 assert.Empty(t, ka.Status.Error) 132 assert.NotZero(t, ka.Status.LastApplyTime) 133 assert.Equal(t, yamlOut, ka.Status.ResultYAML) 134 135 assert.Contains(t, f.Stdout(), 136 "Running cmd: custom-apply-cmd\n Objects applied to cluster:\n → sancho:deployment\n", 137 "Log output did not include applied objects") 138 assert.Equal(t, 1, strings.Count(f.Stdout(), "Running cmd")) 139 140 // verify that a re-reconcile does NOT re-invoke the command 141 f.execer.RegisterCommandError("custom-apply-cmd", errors.New("this should not get invoked")) 142 f.MustReconcile(types.NamespacedName{Name: "a"}) 143 lastApplyTime := ka.Status.LastApplyTime 144 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 145 assert.Empty(t, ka.Status.Error) 146 timecmp.AssertTimeEqual(t, lastApplyTime, ka.Status.LastApplyTime) 147 148 assert.Len(t, f.execer.Calls(), 1) 149 } 150 151 func TestApplyCmdWithImages(t *testing.T) { 152 f := newFixture(t) 153 154 f.Create(&v1alpha1.ImageMap{ 155 ObjectMeta: metav1.ObjectMeta{ 156 Name: "image-a", 157 }, 158 Status: v1alpha1.ImageMapStatus{ 159 Image: "image-a:my-tag", 160 ImageFromCluster: "image-a:my-tag", 161 }, 162 }) 163 164 applyCmd, _ := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 165 ka := v1alpha1.KubernetesApply{ 166 ObjectMeta: metav1.ObjectMeta{ 167 Name: "a", 168 }, 169 Spec: v1alpha1.KubernetesApplySpec{ 170 ImageMaps: []string{"image-a"}, 171 ApplyCmd: &applyCmd, 172 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 173 }, 174 } 175 f.Create(&ka) 176 177 if assert.Len(t, f.execer.Calls(), 1) { 178 call := f.execer.Calls()[0] 179 assert.Equal(t, []string{ 180 "TILT_IMAGE_MAP_0=image-a", 181 "TILT_IMAGE_0=image-a:my-tag", 182 }, call.Cmd.Env) 183 } 184 } 185 186 func TestApplyCmdWithKubeconfig(t *testing.T) { 187 f := newFixture(t) 188 189 f.Create(&v1alpha1.Cluster{ 190 ObjectMeta: metav1.ObjectMeta{ 191 Name: "default-cluster", 192 }, 193 Status: v1alpha1.ClusterStatus{ 194 Connection: &v1alpha1.ClusterConnectionStatus{ 195 Kubernetes: &v1alpha1.KubernetesClusterConnectionStatus{ 196 ConfigPath: "/path/to/my/kubeconfig", 197 }, 198 }, 199 }, 200 }) 201 202 applyCmd, _ := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 203 ka := v1alpha1.KubernetesApply{ 204 ObjectMeta: metav1.ObjectMeta{ 205 Name: "a", 206 }, 207 Spec: v1alpha1.KubernetesApplySpec{ 208 Cluster: "default-cluster", 209 ApplyCmd: &applyCmd, 210 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 211 }, 212 } 213 f.Create(&ka) 214 215 if assert.Len(t, f.execer.Calls(), 1) { 216 call := f.execer.Calls()[0] 217 assert.Equal(t, []string{"custom-apply-cmd"}, call.Cmd.Argv) 218 assert.Equal(t, []string{ 219 "KUBECONFIG=/path/to/my/kubeconfig", 220 }, call.Cmd.Env) 221 } 222 223 f.Delete(&ka) 224 225 if assert.Len(t, f.execer.Calls(), 2) { 226 call := f.execer.Calls()[1] 227 assert.Equal(t, []string{"custom-delete-cmd"}, call.Cmd.Argv) 228 assert.Equal(t, []string{ 229 "KUBECONFIG=/path/to/my/kubeconfig", 230 }, call.Cmd.Env) 231 } 232 233 } 234 235 func TestBasicApplyCmd_ExecError(t *testing.T) { 236 f := newFixture(t) 237 238 f.execer.RegisterCommandError("custom-apply-cmd", errors.New("could not start process")) 239 240 ka := v1alpha1.KubernetesApply{ 241 ObjectMeta: metav1.ObjectMeta{ 242 Name: "a", 243 }, 244 Spec: v1alpha1.KubernetesApplySpec{ 245 ApplyCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-apply-cmd"}}, 246 }, 247 } 248 f.Create(&ka) 249 250 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 251 252 assert.Equal(t, "apply command failed: could not start process", ka.Status.Error) 253 } 254 255 func TestBasicApplyCmd_NonZeroExitCode(t *testing.T) { 256 f := newFixture(t) 257 258 f.execer.RegisterCommand("custom-apply-cmd", 77, "whoops", "oh no") 259 260 ka := v1alpha1.KubernetesApply{ 261 ObjectMeta: metav1.ObjectMeta{ 262 Name: "a", 263 Annotations: map[string]string{v1alpha1.AnnotationManifest: "foo"}, 264 }, 265 Spec: v1alpha1.KubernetesApplySpec{ 266 ApplyCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-apply-cmd"}}, 267 }, 268 } 269 f.Create(&ka) 270 271 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 272 273 if assert.Equal(t, "apply command exited with status 77\nstdout:\nwhoops\n\n", ka.Status.Error) { 274 assert.Contains(t, f.Stdout(), `oh no`) 275 } 276 } 277 278 func TestBasicApplyCmd_MalformedYAML(t *testing.T) { 279 f := newFixture(t) 280 281 f.execer.RegisterCommand("custom-apply-cmd", 0, "this is not yaml", "") 282 283 ka := v1alpha1.KubernetesApply{ 284 ObjectMeta: metav1.ObjectMeta{ 285 Name: "a", 286 }, 287 Spec: v1alpha1.KubernetesApplySpec{ 288 ApplyCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-apply-cmd"}}, 289 }, 290 } 291 f.Create(&ka) 292 293 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 294 295 if assert.Contains(t, ka.Status.Error, "apply command returned malformed YAML") { 296 assert.Contains(t, ka.Status.Error, "stdout:\nthis is not yaml\n") 297 } 298 } 299 300 func TestBasicApplyYAML_JobComplete(t *testing.T) { 301 f := newFixture(t) 302 303 jobYAML := testyaml.JobYAML 304 entities, err := k8s.ParseYAMLFromString(jobYAML) 305 require.NoError(t, err, "Invalid JobYAML") 306 require.Len(t, entities, 1, "Expected exactly 1 Job entity") 307 require.IsType(t, entities[0].Obj, &batchv1.Job{}, "Expected exactly 1 Job entity") 308 job := entities[0].Obj.(*batchv1.Job) 309 job.SetUID(uuid.NewUUID()) 310 job.Status = batchv1.JobStatus{ 311 Conditions: []batchv1.JobCondition{ 312 { 313 Type: batchv1.JobComplete, 314 Status: v1.ConditionTrue, 315 }, 316 }, 317 } 318 f.kClient.UpsertResult = entities 319 320 ka := v1alpha1.KubernetesApply{ 321 ObjectMeta: metav1.ObjectMeta{ 322 Name: "a", 323 }, 324 Spec: v1alpha1.KubernetesApplySpec{ 325 YAML: testyaml.JobYAML, 326 }, 327 } 328 f.Create(&ka) 329 330 f.MustReconcile(types.NamespacedName{Name: "a"}) 331 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 332 333 expected := []metav1.Condition{ 334 { 335 Type: "JobComplete", 336 Status: metav1.ConditionTrue, 337 }, 338 } 339 340 assert.Equal(f.T(), ka.Status.Conditions, expected, 341 "KubernetesApply status should reflect Job completion") 342 343 // Make sure that re-reconciling doesn't clear the conditions 344 f.MustReconcile(types.NamespacedName{Name: "a"}) 345 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 346 assert.Equal(f.T(), ka.Status.Conditions, expected, 347 "KubernetesApply status should reflect Job completion") 348 } 349 350 func TestGarbageCollectAllOnDelete_YAML(t *testing.T) { 351 f := newFixture(t) 352 ka := v1alpha1.KubernetesApply{ 353 ObjectMeta: metav1.ObjectMeta{ 354 Name: "a", 355 }, 356 Spec: v1alpha1.KubernetesApplySpec{ 357 YAML: testyaml.SanchoYAML, 358 }, 359 } 360 f.Create(&ka) 361 362 f.MustReconcile(types.NamespacedName{Name: "a"}) 363 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 364 365 f.Delete(&ka) 366 f.MustReconcile(types.NamespacedName{Name: "a"}) 367 assert.Contains(f.T(), f.kClient.DeletedYaml, "name: sancho") 368 } 369 370 func TestGarbageCollectAllOnDelete_Cmd(t *testing.T) { 371 f := newFixture(t) 372 373 applyCmd, yamlOut := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 374 ka := v1alpha1.KubernetesApply{ 375 ObjectMeta: metav1.ObjectMeta{ 376 Name: "a", 377 }, 378 Spec: v1alpha1.KubernetesApplySpec{ 379 ApplyCmd: &applyCmd, 380 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 381 }, 382 } 383 f.Create(&ka) 384 385 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 386 assert.Equal(f.T(), yamlOut, ka.Status.ResultYAML) 387 388 f.Delete(&ka) 389 assert.False(t, f.Get(types.NamespacedName{Name: "a"}, &ka), "Object was not deleted") 390 391 calls := f.execer.Calls() 392 if assert.Len(t, calls, 2, "Expected 2 calls (1x apply + 1x delete)") { 393 assert.Equal(t, []string{"custom-delete-cmd"}, calls[1].Cmd.Argv) 394 } 395 } 396 397 func TestGarbageCollectAllOnDisable(t *testing.T) { 398 f := newFixture(t) 399 400 applyCmd, yamlOut := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 401 ka := v1alpha1.KubernetesApply{ 402 ObjectMeta: metav1.ObjectMeta{ 403 Name: "a", 404 }, 405 Spec: v1alpha1.KubernetesApplySpec{ 406 ApplyCmd: &applyCmd, 407 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 408 DisableSource: &v1alpha1.DisableSource{ 409 ConfigMap: &v1alpha1.ConfigMapDisableSource{ 410 Name: "test-disable", 411 Key: "isDisabled", 412 }, 413 }, 414 }, 415 } 416 f.Create(&ka) 417 418 f.setDisabled(ka.GetObjectMeta().Name, false) 419 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 420 assert.Equal(f.T(), yamlOut, ka.Status.ResultYAML) 421 422 f.setDisabled(ka.GetObjectMeta().Name, true) 423 424 calls := f.execer.Calls() 425 if assert.Len(t, calls, 2, "Expected 2 calls (1x apply + 1x delete)") { 426 assert.Equal(t, []string{"custom-apply-cmd"}, calls[0].Cmd.Argv) 427 assert.Equal(t, []string{"custom-delete-cmd"}, calls[1].Cmd.Argv) 428 } 429 430 f.setDisabled(ka.GetObjectMeta().Name, false) 431 calls = f.execer.Calls() 432 if assert.Len(t, calls, 3, "Expected 3 calls (2x apply + 1x delete)") { 433 assert.Equal(t, []string{"custom-apply-cmd"}, calls[2].Cmd.Argv) 434 } 435 436 // Confirm the k8s client never deletes resources directly. 437 assert.Equal(t, "", f.kClient.DeletedYaml) 438 } 439 440 func TestGarbageCollectPartial(t *testing.T) { 441 f := newFixture(t) 442 ka := v1alpha1.KubernetesApply{ 443 ObjectMeta: metav1.ObjectMeta{ 444 Name: "a", 445 }, 446 Spec: v1alpha1.KubernetesApplySpec{ 447 YAML: fmt.Sprintf("%s\n---\n%s\n", testyaml.SanchoYAML, testyaml.PodDisruptionBudgetYAML), 448 }, 449 } 450 f.Create(&ka) 451 452 f.MustReconcile(types.NamespacedName{Name: "a"}) 453 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 454 assert.Contains(f.T(), f.kClient.Yaml, "name: infra-kafka-zookeeper") 455 456 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 457 ka.Spec.YAML = testyaml.SanchoYAML 458 f.Update(&ka) 459 460 f.MustReconcile(types.NamespacedName{Name: "a"}) 461 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 462 assert.NotContains(f.T(), f.kClient.Yaml, "name: infra-kafka-zookeeper") 463 assert.Contains(f.T(), f.kClient.DeletedYaml, "name: infra-kafka-zookeeper") 464 } 465 466 func TestGarbageCollectAfterErrorDuringApply(t *testing.T) { 467 f := newFixture(t) 468 ka := v1alpha1.KubernetesApply{ 469 ObjectMeta: metav1.ObjectMeta{ 470 Name: "a", 471 }, 472 Spec: v1alpha1.KubernetesApplySpec{ 473 YAML: fmt.Sprintf("%s\n---\n%s\n", testyaml.SanchoYAML, testyaml.PodDisruptionBudgetYAML), 474 }, 475 } 476 f.Create(&ka) 477 478 f.MustReconcile(types.NamespacedName{Name: "a"}) 479 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 480 assert.Contains(f.T(), f.kClient.Yaml, "name: infra-kafka-zookeeper") 481 482 f.kClient.UpsertError = errors.New("oh no") 483 484 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 485 ka.Spec.YAML = testyaml.SanchoYAML 486 f.Update(&ka) 487 488 // because the apply (upsert) returned an error, no GC should have happened yet 489 f.MustReconcile(types.NamespacedName{Name: "a"}) 490 if assert.Empty(t, f.kClient.DeletedYaml) { 491 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 492 assert.Contains(f.T(), f.kClient.Yaml, "name: infra-kafka-zookeeper") 493 } 494 495 assert.Contains(f.T(), f.Stdout(), "Tried to apply objects") 496 } 497 498 func TestGarbageCollect_DeleteCmdNotInvokedOnChange(t *testing.T) { 499 f := newFixture(t) 500 501 applyCmd, yamlOut := f.createApplyCmd("custom-apply-1", testyaml.SanchoYAML) 502 ka := v1alpha1.KubernetesApply{ 503 ObjectMeta: metav1.ObjectMeta{ 504 Name: "a", 505 }, 506 Spec: v1alpha1.KubernetesApplySpec{ 507 ApplyCmd: &applyCmd, 508 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 509 }, 510 } 511 f.Create(&ka) 512 513 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 514 assert.Equal(f.T(), yamlOut, ka.Status.ResultYAML) 515 516 applyCmd, yamlOut = f.createApplyCmd("custom-apply-2", testyaml.JobYAML) 517 ka.Spec.ApplyCmd = &applyCmd 518 f.Update(&ka) 519 520 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 521 assert.Equal(f.T(), yamlOut, ka.Status.ResultYAML) 522 523 calls := f.execer.Calls() 524 if assert.Len(t, calls, 2, "Expected 2x calls (both to apply)") { 525 for i := range calls { 526 assert.Equal(t, []string{fmt.Sprintf("custom-apply-%d", i+1)}, calls[i].Cmd.Argv) 527 } 528 } 529 530 if assert.NoError(t, f.kClient.DeleteError, 531 "Delete should not have been invoked (so no error should have occurred)") { 532 assert.Empty(t, f.kClient.DeletedYaml, 533 "Delete should not have been invoked (so no YAML should have been deleted)") 534 } 535 } 536 537 func TestRestartOn(t *testing.T) { 538 f := newFixture(t) 539 540 f.Create(&v1alpha1.FileWatch{ 541 ObjectMeta: metav1.ObjectMeta{Name: "fw"}, 542 Spec: v1alpha1.FileWatchSpec{WatchedPaths: []string{"/fake/dir"}}, 543 }) 544 545 ka := v1alpha1.KubernetesApply{ 546 ObjectMeta: metav1.ObjectMeta{ 547 Name: "a", 548 }, 549 Spec: v1alpha1.KubernetesApplySpec{ 550 YAML: testyaml.SanchoYAML, 551 RestartOn: &v1alpha1.RestartOnSpec{ 552 FileWatches: []string{"fw"}, 553 }, 554 }, 555 } 556 f.Create(&ka) 557 558 f.MustReconcile(types.NamespacedName{Name: "a"}) 559 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 560 561 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 562 assert.Contains(f.T(), ka.Status.ResultYAML, "name: sancho") 563 assert.Contains(f.T(), ka.Status.ResultYAML, "uid:") 564 lastApply := ka.Status.LastApplyTime 565 566 // Make sure that re-reconciling w/o changes doesn't re-apply the YAML 567 f.kClient.Yaml = "" 568 f.MustReconcile(types.NamespacedName{Name: "a"}) 569 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 570 assert.Equal(f.T(), f.kClient.Yaml, "") 571 timecmp.AssertTimeEqual(t, lastApply, ka.Status.LastApplyTime) 572 573 // Fake a FileWatch event - now re-reconciling should re-apply the YAML 574 var fw v1alpha1.FileWatch 575 f.MustGet(types.NamespacedName{Name: "fw"}, &fw) 576 ts := apis.NowMicro() 577 fw.Status.LastEventTime = ts 578 fw.Status.FileEvents = append(fw.Status.FileEvents, v1alpha1.FileEvent{ 579 Time: ts, 580 SeenFiles: []string{"/fake/dir/file"}, 581 }) 582 f.UpdateStatus(&fw) 583 584 f.kClient.Yaml = "" 585 f.MustReconcile(types.NamespacedName{Name: "a"}) 586 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 587 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 588 assert.Truef(t, ka.Status.LastApplyTime.After(lastApply.Time), 589 "Last apply time %s should have been after previous apply time %s", 590 ka.Status.LastApplyTime.Format(time.RFC3339Nano), 591 lastApply.Format(time.RFC3339Nano)) 592 lastApply = ka.Status.LastApplyTime 593 594 // One last time - make sure that re-reconciling w/o changes doesn't re-apply the YAML 595 f.kClient.Yaml = "" 596 f.MustReconcile(types.NamespacedName{Name: "a"}) 597 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 598 assert.Equal(f.T(), f.kClient.Yaml, "") 599 timecmp.AssertTimeEqual(f.T(), lastApply, ka.Status.LastApplyTime) 600 } 601 602 func TestIgnoreManagedObjects(t *testing.T) { 603 f := newFixture(t) 604 ka := v1alpha1.KubernetesApply{ 605 ObjectMeta: metav1.ObjectMeta{ 606 Name: "a", 607 Annotations: map[string]string{ 608 v1alpha1.AnnotationManagedBy: "buildcontrol", 609 }, 610 }, 611 Spec: v1alpha1.KubernetesApplySpec{ 612 YAML: testyaml.SanchoYAML, 613 }, 614 } 615 f.Create(&ka) 616 617 nn := types.NamespacedName{Name: "a"} 618 f.MustReconcile(nn) 619 assert.Empty(f.T(), f.kClient.Yaml) 620 621 // no apply should happen since the object is managed by the engine 622 f.MustGet(nn, &ka) 623 assert.Empty(f.T(), ka.Status.ResultYAML) 624 assert.Zero(f.T(), ka.Status.LastApplyTime) 625 626 result := f.r.ForceApply(f.Context(), nn, ka.Spec, nil, nil) 627 assert.Contains(f.T(), result.ResultYAML, "sancho") 628 assert.True(f.T(), !result.LastApplyTime.IsZero()) 629 assert.True(f.T(), !result.LastApplyStartTime.IsZero()) 630 assert.Equal(f.T(), result.Error, "") 631 632 // ForceApply must NOT update the apiserver. 633 f.MustGet(nn, &ka) 634 assert.Empty(f.T(), ka.Status.ResultYAML) 635 assert.Zero(f.T(), ka.Status.LastApplyTime) 636 637 f.MustReconcile(nn) 638 f.MustGet(nn, &ka) 639 assert.Equal(f.T(), result, ka.Status) 640 } 641 642 func TestForceDelete(t *testing.T) { 643 f := newFixture(t) 644 nn := types.NamespacedName{Name: "a"} 645 ka := v1alpha1.KubernetesApply{ 646 ObjectMeta: metav1.ObjectMeta{ 647 Name: "a", 648 Annotations: map[string]string{ 649 v1alpha1.AnnotationManagedBy: "buildcontrol", 650 }, 651 }, 652 Spec: v1alpha1.KubernetesApplySpec{ 653 YAML: testyaml.SanchoYAML, 654 }, 655 } 656 f.Create(&ka) 657 658 err := f.r.ForceDelete(f.Context(), nn, ka.Spec, nil, "testing") 659 assert.Nil(f.T(), err) 660 assert.Contains(f.T(), f.kClient.DeletedYaml, "sancho") 661 } 662 663 func TestForceDeleteWithCmd(t *testing.T) { 664 f := newFixture(t) 665 666 nn := types.NamespacedName{Name: "a"} 667 applyCmd, _ := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 668 ka := v1alpha1.KubernetesApply{ 669 ObjectMeta: metav1.ObjectMeta{ 670 Name: "a", 671 Annotations: map[string]string{ 672 v1alpha1.AnnotationManagedBy: "buildcontrol", 673 }, 674 }, 675 Spec: v1alpha1.KubernetesApplySpec{ 676 ApplyCmd: &applyCmd, 677 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 678 }, 679 } 680 f.Create(&ka) 681 682 err := f.r.ForceDelete(f.Context(), nn, ka.Spec, nil, "testing") 683 assert.Nil(f.T(), err) 684 685 calls := f.execer.Calls() 686 if assert.Len(t, calls, 1, "Expected 1x delete calls") { 687 assert.Equal(t, []string{"custom-delete-cmd"}, calls[0].Cmd.Argv) 688 } 689 } 690 691 func TestDisableByConfigmap(t *testing.T) { 692 f := newFixture(t) 693 ka := v1alpha1.KubernetesApply{ 694 ObjectMeta: metav1.ObjectMeta{ 695 Name: "test", 696 }, 697 Spec: v1alpha1.KubernetesApplySpec{ 698 YAML: testyaml.SanchoYAML, 699 DisableSource: &v1alpha1.DisableSource{ 700 ConfigMap: &v1alpha1.ConfigMapDisableSource{ 701 Name: "test-disable", 702 Key: "isDisabled", 703 }, 704 }, 705 }, 706 } 707 f.Create(&ka) 708 709 f.setDisabled(ka.GetObjectMeta().Name, false) 710 711 f.setDisabled(ka.GetObjectMeta().Name, true) 712 713 f.setDisabled(ka.GetObjectMeta().Name, false) 714 } 715 716 func (f *fixture) requireKaMatchesInApi(name string, matcher func(ka *v1alpha1.KubernetesApply) bool) *v1alpha1.KubernetesApply { 717 ka := v1alpha1.KubernetesApply{} 718 719 require.Eventually(f.T(), func() bool { 720 f.MustGet(types.NamespacedName{Name: name}, &ka) 721 return matcher(&ka) 722 }, timeout, interval) 723 724 return &ka 725 } 726 727 func (f *fixture) setDisabled(name string, isDisabled bool) { 728 ka := v1alpha1.KubernetesApply{} 729 f.MustGet(types.NamespacedName{Name: name}, &ka) 730 731 require.NotNil(f.T(), ka.Spec.DisableSource) 732 require.NotNil(f.T(), ka.Spec.DisableSource.ConfigMap) 733 734 ds := ka.Spec.DisableSource.ConfigMap 735 err := configmap.UpsertDisableConfigMap(f.Context(), f.Client, ds.Name, ds.Key, isDisabled) 736 require.NoError(f.T(), err) 737 738 _, err = f.Reconcile(types.NamespacedName{Name: name}) 739 require.NoError(f.T(), err) 740 741 f.requireKaMatchesInApi(name, func(ka *v1alpha1.KubernetesApply) bool { 742 return ka.Status.DisableStatus != nil && ka.Status.DisableStatus.Disabled == isDisabled 743 }) 744 745 // the KA reconciler only creates a KD if the yaml is already in the KA's status, 746 // which means we need a second call to Reconcile to get the KD 747 _, err = f.Reconcile(types.NamespacedName{Name: name}) 748 require.NoError(f.T(), err) 749 750 kd := v1alpha1.KubernetesDiscovery{} 751 kdExists := f.Get(types.NamespacedName{Name: name}, &kd) 752 753 if isDisabled { 754 require.False(f.T(), kdExists) 755 756 if ka.Spec.DeleteCmd == nil { 757 require.Contains(f.T(), f.kClient.DeletedYaml, "name: sancho") 758 } else { 759 // Must run the delete cmd instead of deleting resources with our k8s client. 760 require.Equal(f.T(), f.kClient.DeletedYaml, "") 761 } 762 763 // Reset the deletedYaml so it doesn't interfere with other tests 764 f.kClient.DeletedYaml = "" 765 } else { 766 require.True(f.T(), kdExists) 767 } 768 } 769 770 type fixture struct { 771 *fake.ControllerFixture 772 r *Reconciler 773 kClient *k8s.FakeK8sClient 774 execer *localexec.FakeExecer 775 } 776 777 func newFixture(t *testing.T) *fixture { 778 kClient := k8s.NewFakeK8sClient(t) 779 cfb := fake.NewControllerFixtureBuilder(t) 780 dockerClient := docker.NewFakeClient() 781 782 // Make the fake ImageExists always return true, which is the behavior we want 783 // when testing the reconciler 784 dockerClient.ImageAlwaysExists = true 785 786 execer := localexec.NewFakeExecer(t) 787 788 r := NewReconciler(cfb.Client, kClient, v1alpha1.NewScheme(), cfb.Store, execer) 789 790 f := &fixture{ 791 ControllerFixture: cfb.Build(r), 792 r: r, 793 kClient: kClient, 794 execer: execer, 795 } 796 f.Create(&v1alpha1.Cluster{ 797 ObjectMeta: metav1.ObjectMeta{ 798 Name: "default", 799 }, 800 Status: v1alpha1.ClusterStatus{ 801 Connection: &v1alpha1.ClusterConnectionStatus{ 802 Kubernetes: &v1alpha1.KubernetesClusterConnectionStatus{ 803 Context: "default", 804 }, 805 }, 806 }, 807 }) 808 809 return f 810 } 811 812 // createApplyCmd creates a KubernetesApplyCmd that use the passed YAML to generate simulated stdout via the FakeExecer. 813 func (f *fixture) createApplyCmd(name string, yaml string) (v1alpha1.KubernetesApplyCmd, string) { 814 f.T().Helper() 815 816 require.NotEmpty(f.T(), yaml, "applyCmd YAML cannot be blank") 817 818 entities, err := k8s.ParseYAMLFromString(yaml) 819 require.NoErrorf(f.T(), err, "Could not parse YAML: %s", yaml) 820 for i := range entities { 821 entities[i].SetUID(string(uuid.NewUUID())) 822 } 823 yamlOut, err := k8s.SerializeSpecYAML(entities) 824 require.NoErrorf(f.T(), err, "Failed to re-serialize YAML for entities: %s", spew.Sdump(entities)) 825 826 f.execer.RegisterCommand(name, 0, yamlOut, "") 827 return v1alpha1.KubernetesApplyCmd{ 828 Args: []string{name}, 829 }, yamlOut 830 }