github.com/tilt-dev/tilt@v0.36.0/internal/controllers/core/kubernetesapply/reconciler_test.go (about) 1 package kubernetesapply 2 3 import ( 4 "context" 5 "errors" 6 "fmt" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/davecgh/go-spew/spew" 12 "github.com/stretchr/testify/assert" 13 "github.com/stretchr/testify/require" 14 batchv1 "k8s.io/api/batch/v1" 15 v1 "k8s.io/api/core/v1" 16 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 17 "k8s.io/apimachinery/pkg/types" 18 "k8s.io/apimachinery/pkg/util/uuid" 19 "sigs.k8s.io/controller-runtime/pkg/reconcile" 20 21 "github.com/tilt-dev/tilt/internal/controllers/fake" 22 "github.com/tilt-dev/tilt/internal/docker" 23 "github.com/tilt-dev/tilt/internal/k8s" 24 "github.com/tilt-dev/tilt/internal/k8s/testyaml" 25 "github.com/tilt-dev/tilt/internal/localexec" 26 "github.com/tilt-dev/tilt/internal/testutils/configmap" 27 "github.com/tilt-dev/tilt/internal/timecmp" 28 "github.com/tilt-dev/tilt/pkg/apis" 29 "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1" 30 ) 31 32 // Test constants 33 const timeout = time.Second * 10 34 const interval = 5 * time.Millisecond 35 36 func TestImageIndexing(t *testing.T) { 37 f := newFixture(t) 38 ka := v1alpha1.KubernetesApply{ 39 ObjectMeta: metav1.ObjectMeta{ 40 Name: "a", 41 }, 42 Spec: v1alpha1.KubernetesApplySpec{ 43 ImageMaps: []string{"image-a", "image-c"}, 44 }, 45 } 46 f.Create(&ka) 47 48 // Verify we can index one image map. 49 ctx := context.Background() 50 reqs := f.r.indexer.Enqueue(ctx, &v1alpha1.ImageMap{ObjectMeta: metav1.ObjectMeta{Name: "image-a"}}) 51 assert.ElementsMatch(t, []reconcile.Request{ 52 {NamespacedName: types.NamespacedName{Name: "a"}}, 53 }, reqs) 54 55 kb := v1alpha1.KubernetesApply{ 56 ObjectMeta: metav1.ObjectMeta{ 57 Name: "b", 58 }, 59 Spec: v1alpha1.KubernetesApplySpec{ 60 ImageMaps: []string{"image-b", "image-c"}, 61 }, 62 } 63 f.Create(&kb) 64 65 // Verify we can index one image map to two applies. 66 reqs = f.r.indexer.Enqueue(ctx, &v1alpha1.ImageMap{ObjectMeta: metav1.ObjectMeta{Name: "image-c"}}) 67 assert.ElementsMatch(t, []reconcile.Request{ 68 {NamespacedName: types.NamespacedName{Name: "a"}}, 69 {NamespacedName: types.NamespacedName{Name: "b"}}, 70 }, reqs) 71 72 // Get the latest ka, since resource version numbers 73 // may have changed since its creation and mismatched 74 // versions will throw an error on update 75 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 76 ka.Spec.ImageMaps = []string{"image-a"} 77 f.Update(&ka) 78 79 // Verify we can remove an image map. 80 reqs = f.r.indexer.Enqueue(ctx, &v1alpha1.ImageMap{ObjectMeta: metav1.ObjectMeta{Name: "image-c"}}) 81 assert.ElementsMatch(t, []reconcile.Request{ 82 {NamespacedName: types.NamespacedName{Name: "b"}}, 83 }, reqs) 84 } 85 86 func TestBasicApplyYAML(t *testing.T) { 87 f := newFixture(t) 88 ka := v1alpha1.KubernetesApply{ 89 ObjectMeta: metav1.ObjectMeta{ 90 Name: "a", 91 }, 92 Spec: v1alpha1.KubernetesApplySpec{ 93 YAML: testyaml.SanchoYAML, 94 }, 95 } 96 f.Create(&ka) 97 98 f.MustReconcile(types.NamespacedName{Name: "a"}) 99 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 100 101 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 102 assert.Contains(f.T(), ka.Status.ResultYAML, "name: sancho") 103 assert.Contains(f.T(), ka.Status.ResultYAML, "uid:") 104 105 assert.Contains(t, f.Stdout(), 106 "Objects applied to cluster:\n → sancho:deployment\n", 107 "Log output did not include applied objects") 108 109 // Make sure that re-reconciling doesn't re-apply the YAML" 110 f.kClient.Yaml = "" 111 f.MustReconcile(types.NamespacedName{Name: "a"}) 112 assert.Equal(f.T(), f.kClient.Yaml, "") 113 } 114 115 func TestBasicApplyCmd(t *testing.T) { 116 f := newFixture(t) 117 118 applyCmd, yamlOut := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 119 ka := v1alpha1.KubernetesApply{ 120 ObjectMeta: metav1.ObjectMeta{ 121 Name: "a", 122 }, 123 Spec: v1alpha1.KubernetesApplySpec{ 124 ApplyCmd: &applyCmd, 125 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 126 }, 127 } 128 f.Create(&ka) 129 130 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 131 assert.Empty(t, ka.Status.Error) 132 assert.NotZero(t, ka.Status.LastApplyTime) 133 assert.Equal(t, yamlOut, ka.Status.ResultYAML) 134 135 assert.Contains(t, f.Stdout(), 136 "Running cmd: custom-apply-cmd\n Objects applied to cluster:\n → sancho:deployment\n", 137 "Log output did not include applied objects") 138 assert.Equal(t, 1, strings.Count(f.Stdout(), "Running cmd")) 139 140 // verify that a re-reconcile does NOT re-invoke the command 141 f.execer.RegisterCommandError("custom-apply-cmd", errors.New("this should not get invoked")) 142 f.MustReconcile(types.NamespacedName{Name: "a"}) 143 lastApplyTime := ka.Status.LastApplyTime 144 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 145 assert.Empty(t, ka.Status.Error) 146 timecmp.AssertTimeEqual(t, lastApplyTime, ka.Status.LastApplyTime) 147 148 assert.Len(t, f.execer.Calls(), 1) 149 } 150 151 func TestApplyCmdWithImages(t *testing.T) { 152 f := newFixture(t) 153 154 f.Create(&v1alpha1.ImageMap{ 155 ObjectMeta: metav1.ObjectMeta{ 156 Name: "image-a", 157 }, 158 Status: v1alpha1.ImageMapStatus{ 159 Image: "image-a:my-tag", 160 ImageFromCluster: "image-a:my-tag", 161 }, 162 }) 163 164 applyCmd, _ := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 165 ka := v1alpha1.KubernetesApply{ 166 ObjectMeta: metav1.ObjectMeta{ 167 Name: "a", 168 }, 169 Spec: v1alpha1.KubernetesApplySpec{ 170 ImageMaps: []string{"image-a"}, 171 ApplyCmd: &applyCmd, 172 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 173 }, 174 } 175 f.Create(&ka) 176 177 if assert.Len(t, f.execer.Calls(), 1) { 178 call := f.execer.Calls()[0] 179 assert.Equal(t, []string{ 180 "TILT_IMAGE_MAP_0=image-a", 181 "TILT_IMAGE_0=image-a:my-tag", 182 "KUBECONFIG=/path/to/default/kubeconfig", 183 }, call.Cmd.Env) 184 } 185 } 186 187 func TestApplyCmdWithKubeconfig(t *testing.T) { 188 f := newFixture(t) 189 190 f.Create(&v1alpha1.Cluster{ 191 ObjectMeta: metav1.ObjectMeta{ 192 Name: "default-cluster", 193 }, 194 Status: v1alpha1.ClusterStatus{ 195 Connection: &v1alpha1.ClusterConnectionStatus{ 196 Kubernetes: &v1alpha1.KubernetesClusterConnectionStatus{ 197 ConfigPath: "/path/to/my/kubeconfig", 198 }, 199 }, 200 }, 201 }) 202 203 applyCmd, _ := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 204 ka := v1alpha1.KubernetesApply{ 205 ObjectMeta: metav1.ObjectMeta{ 206 Name: "a", 207 }, 208 Spec: v1alpha1.KubernetesApplySpec{ 209 Cluster: "default-cluster", 210 ApplyCmd: &applyCmd, 211 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 212 }, 213 } 214 f.Create(&ka) 215 216 if assert.Len(t, f.execer.Calls(), 1) { 217 call := f.execer.Calls()[0] 218 assert.Equal(t, []string{"custom-apply-cmd"}, call.Cmd.Argv) 219 assert.Equal(t, []string{ 220 "KUBECONFIG=/path/to/my/kubeconfig", 221 }, call.Cmd.Env) 222 } 223 224 f.Delete(&ka) 225 226 if assert.Len(t, f.execer.Calls(), 2) { 227 call := f.execer.Calls()[1] 228 assert.Equal(t, []string{"custom-delete-cmd"}, call.Cmd.Argv) 229 assert.Equal(t, []string{ 230 "KUBECONFIG=/path/to/my/kubeconfig", 231 }, call.Cmd.Env) 232 } 233 234 } 235 236 func TestBasicApplyCmd_ExecError(t *testing.T) { 237 f := newFixture(t) 238 239 f.execer.RegisterCommandError("custom-apply-cmd", errors.New("could not start process")) 240 241 ka := v1alpha1.KubernetesApply{ 242 ObjectMeta: metav1.ObjectMeta{ 243 Name: "a", 244 }, 245 Spec: v1alpha1.KubernetesApplySpec{ 246 ApplyCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-apply-cmd"}}, 247 }, 248 } 249 f.Create(&ka) 250 251 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 252 253 assert.Equal(t, "apply command failed: could not start process", ka.Status.Error) 254 } 255 256 func TestBasicApplyCmd_NonZeroExitCode(t *testing.T) { 257 f := newFixture(t) 258 259 f.execer.RegisterCommand("custom-apply-cmd", 77, "whoops", "oh no") 260 261 ka := v1alpha1.KubernetesApply{ 262 ObjectMeta: metav1.ObjectMeta{ 263 Name: "a", 264 Annotations: map[string]string{v1alpha1.AnnotationManifest: "foo"}, 265 }, 266 Spec: v1alpha1.KubernetesApplySpec{ 267 ApplyCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-apply-cmd"}}, 268 }, 269 } 270 f.Create(&ka) 271 272 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 273 274 if assert.Equal(t, "apply command exited with status 77\nstdout:\nwhoops\n\n", ka.Status.Error) { 275 assert.Contains(t, f.Stdout(), `oh no`) 276 } 277 } 278 279 func TestBasicApplyCmd_MalformedYAML(t *testing.T) { 280 f := newFixture(t) 281 282 f.execer.RegisterCommand("custom-apply-cmd", 0, "this is not yaml", "") 283 284 ka := v1alpha1.KubernetesApply{ 285 ObjectMeta: metav1.ObjectMeta{ 286 Name: "a", 287 }, 288 Spec: v1alpha1.KubernetesApplySpec{ 289 ApplyCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-apply-cmd"}}, 290 }, 291 } 292 f.Create(&ka) 293 294 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 295 296 if assert.Contains(t, ka.Status.Error, "apply command returned malformed YAML") { 297 assert.Contains(t, ka.Status.Error, "stdout:\nthis is not yaml\n") 298 } 299 } 300 301 func TestBasicApplyYAML_JobComplete(t *testing.T) { 302 f := newFixture(t) 303 304 jobYAML := testyaml.JobYAML 305 entities, err := k8s.ParseYAMLFromString(jobYAML) 306 require.NoError(t, err, "Invalid JobYAML") 307 require.Len(t, entities, 1, "Expected exactly 1 Job entity") 308 require.IsType(t, entities[0].Obj, &batchv1.Job{}, "Expected exactly 1 Job entity") 309 job := entities[0].Obj.(*batchv1.Job) 310 job.SetUID(uuid.NewUUID()) 311 job.Status = batchv1.JobStatus{ 312 Conditions: []batchv1.JobCondition{ 313 { 314 Type: batchv1.JobComplete, 315 Status: v1.ConditionTrue, 316 }, 317 }, 318 } 319 f.kClient.UpsertResult = entities 320 321 ka := v1alpha1.KubernetesApply{ 322 ObjectMeta: metav1.ObjectMeta{ 323 Name: "a", 324 }, 325 Spec: v1alpha1.KubernetesApplySpec{ 326 YAML: testyaml.JobYAML, 327 }, 328 } 329 f.Create(&ka) 330 331 f.MustReconcile(types.NamespacedName{Name: "a"}) 332 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 333 334 expected := []metav1.Condition{ 335 { 336 Type: "JobComplete", 337 Status: metav1.ConditionTrue, 338 }, 339 } 340 341 assert.Equal(f.T(), ka.Status.Conditions, expected, 342 "KubernetesApply status should reflect Job completion") 343 344 // Make sure that re-reconciling doesn't clear the conditions 345 f.MustReconcile(types.NamespacedName{Name: "a"}) 346 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 347 assert.Equal(f.T(), ka.Status.Conditions, expected, 348 "KubernetesApply status should reflect Job completion") 349 } 350 351 func TestGarbageCollectAllOnDelete_YAML(t *testing.T) { 352 f := newFixture(t) 353 ka := v1alpha1.KubernetesApply{ 354 ObjectMeta: metav1.ObjectMeta{ 355 Name: "a", 356 }, 357 Spec: v1alpha1.KubernetesApplySpec{ 358 YAML: testyaml.SanchoYAML, 359 }, 360 } 361 f.Create(&ka) 362 363 f.MustReconcile(types.NamespacedName{Name: "a"}) 364 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 365 366 f.Delete(&ka) 367 f.MustReconcile(types.NamespacedName{Name: "a"}) 368 assert.Contains(f.T(), f.kClient.DeletedYaml, "name: sancho") 369 } 370 371 func TestGarbageCollectAllOnDelete_Cmd(t *testing.T) { 372 f := newFixture(t) 373 374 applyCmd, yamlOut := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 375 ka := v1alpha1.KubernetesApply{ 376 ObjectMeta: metav1.ObjectMeta{ 377 Name: "a", 378 }, 379 Spec: v1alpha1.KubernetesApplySpec{ 380 ApplyCmd: &applyCmd, 381 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 382 }, 383 } 384 f.Create(&ka) 385 386 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 387 assert.Equal(f.T(), yamlOut, ka.Status.ResultYAML) 388 389 f.Delete(&ka) 390 assert.False(t, f.Get(types.NamespacedName{Name: "a"}, &ka), "Object was not deleted") 391 392 calls := f.execer.Calls() 393 if assert.Len(t, calls, 2, "Expected 2 calls (1x apply + 1x delete)") { 394 assert.Equal(t, []string{"custom-delete-cmd"}, calls[1].Cmd.Argv) 395 } 396 } 397 398 func TestGarbageCollectAllOnDisable(t *testing.T) { 399 f := newFixture(t) 400 401 applyCmd, yamlOut := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 402 ka := v1alpha1.KubernetesApply{ 403 ObjectMeta: metav1.ObjectMeta{ 404 Name: "a", 405 }, 406 Spec: v1alpha1.KubernetesApplySpec{ 407 ApplyCmd: &applyCmd, 408 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 409 DisableSource: &v1alpha1.DisableSource{ 410 ConfigMap: &v1alpha1.ConfigMapDisableSource{ 411 Name: "test-disable", 412 Key: "isDisabled", 413 }, 414 }, 415 }, 416 } 417 f.Create(&ka) 418 419 f.setDisabled(ka.GetObjectMeta().Name, false) 420 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 421 assert.Equal(f.T(), yamlOut, ka.Status.ResultYAML) 422 423 f.setDisabled(ka.GetObjectMeta().Name, true) 424 425 calls := f.execer.Calls() 426 if assert.Len(t, calls, 2, "Expected 2 calls (1x apply + 1x delete)") { 427 assert.Equal(t, []string{"custom-apply-cmd"}, calls[0].Cmd.Argv) 428 assert.Equal(t, []string{"custom-delete-cmd"}, calls[1].Cmd.Argv) 429 } 430 431 f.setDisabled(ka.GetObjectMeta().Name, false) 432 calls = f.execer.Calls() 433 if assert.Len(t, calls, 3, "Expected 3 calls (2x apply + 1x delete)") { 434 assert.Equal(t, []string{"custom-apply-cmd"}, calls[2].Cmd.Argv) 435 } 436 437 // Confirm the k8s client never deletes resources directly. 438 assert.Equal(t, "", f.kClient.DeletedYaml) 439 } 440 441 func TestGarbageCollectPartial(t *testing.T) { 442 f := newFixture(t) 443 ka := v1alpha1.KubernetesApply{ 444 ObjectMeta: metav1.ObjectMeta{ 445 Name: "a", 446 }, 447 Spec: v1alpha1.KubernetesApplySpec{ 448 YAML: fmt.Sprintf("%s\n---\n%s\n", testyaml.SanchoYAML, testyaml.PodDisruptionBudgetYAML), 449 }, 450 } 451 f.Create(&ka) 452 453 f.MustReconcile(types.NamespacedName{Name: "a"}) 454 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 455 assert.Contains(f.T(), f.kClient.Yaml, "name: infra-kafka-zookeeper") 456 457 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 458 ka.Spec.YAML = testyaml.SanchoYAML 459 f.Update(&ka) 460 461 f.MustReconcile(types.NamespacedName{Name: "a"}) 462 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 463 assert.NotContains(f.T(), f.kClient.Yaml, "name: infra-kafka-zookeeper") 464 assert.Contains(f.T(), f.kClient.DeletedYaml, "name: infra-kafka-zookeeper") 465 } 466 467 func TestGarbageCollectAfterErrorDuringApply(t *testing.T) { 468 f := newFixture(t) 469 ka := v1alpha1.KubernetesApply{ 470 ObjectMeta: metav1.ObjectMeta{ 471 Name: "a", 472 }, 473 Spec: v1alpha1.KubernetesApplySpec{ 474 YAML: fmt.Sprintf("%s\n---\n%s\n", testyaml.SanchoYAML, testyaml.PodDisruptionBudgetYAML), 475 }, 476 } 477 f.Create(&ka) 478 479 f.MustReconcile(types.NamespacedName{Name: "a"}) 480 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 481 assert.Contains(f.T(), f.kClient.Yaml, "name: infra-kafka-zookeeper") 482 483 f.kClient.UpsertError = errors.New("oh no") 484 485 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 486 ka.Spec.YAML = testyaml.SanchoYAML 487 f.Update(&ka) 488 489 // because the apply (upsert) returned an error, no GC should have happened yet 490 f.MustReconcile(types.NamespacedName{Name: "a"}) 491 if assert.Empty(t, f.kClient.DeletedYaml) { 492 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 493 assert.Contains(f.T(), f.kClient.Yaml, "name: infra-kafka-zookeeper") 494 } 495 496 assert.Contains(f.T(), f.Stdout(), "Tried to apply objects") 497 } 498 499 func TestGarbageCollect_DeleteCmdNotInvokedOnChange(t *testing.T) { 500 f := newFixture(t) 501 502 applyCmd, yamlOut := f.createApplyCmd("custom-apply-1", testyaml.SanchoYAML) 503 ka := v1alpha1.KubernetesApply{ 504 ObjectMeta: metav1.ObjectMeta{ 505 Name: "a", 506 }, 507 Spec: v1alpha1.KubernetesApplySpec{ 508 ApplyCmd: &applyCmd, 509 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 510 }, 511 } 512 f.Create(&ka) 513 514 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 515 assert.Equal(f.T(), yamlOut, ka.Status.ResultYAML) 516 517 applyCmd, yamlOut = f.createApplyCmd("custom-apply-2", testyaml.JobYAML) 518 ka.Spec.ApplyCmd = &applyCmd 519 f.Update(&ka) 520 521 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 522 assert.Equal(f.T(), yamlOut, ka.Status.ResultYAML) 523 524 calls := f.execer.Calls() 525 if assert.Len(t, calls, 2, "Expected 2x calls (both to apply)") { 526 for i := range calls { 527 assert.Equal(t, []string{fmt.Sprintf("custom-apply-%d", i+1)}, calls[i].Cmd.Argv) 528 } 529 } 530 531 if assert.NoError(t, f.kClient.DeleteError, 532 "Delete should not have been invoked (so no error should have occurred)") { 533 assert.Empty(t, f.kClient.DeletedYaml, 534 "Delete should not have been invoked (so no YAML should have been deleted)") 535 } 536 } 537 538 func TestRestartOn(t *testing.T) { 539 f := newFixture(t) 540 541 f.Create(&v1alpha1.FileWatch{ 542 ObjectMeta: metav1.ObjectMeta{Name: "fw"}, 543 Spec: v1alpha1.FileWatchSpec{WatchedPaths: []string{"/fake/dir"}}, 544 }) 545 546 ka := v1alpha1.KubernetesApply{ 547 ObjectMeta: metav1.ObjectMeta{ 548 Name: "a", 549 }, 550 Spec: v1alpha1.KubernetesApplySpec{ 551 YAML: testyaml.SanchoYAML, 552 RestartOn: &v1alpha1.RestartOnSpec{ 553 FileWatches: []string{"fw"}, 554 }, 555 }, 556 } 557 f.Create(&ka) 558 559 f.MustReconcile(types.NamespacedName{Name: "a"}) 560 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 561 562 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 563 assert.Contains(f.T(), ka.Status.ResultYAML, "name: sancho") 564 assert.Contains(f.T(), ka.Status.ResultYAML, "uid:") 565 lastApply := ka.Status.LastApplyTime 566 567 // Make sure that re-reconciling w/o changes doesn't re-apply the YAML 568 f.kClient.Yaml = "" 569 f.MustReconcile(types.NamespacedName{Name: "a"}) 570 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 571 assert.Equal(f.T(), f.kClient.Yaml, "") 572 timecmp.AssertTimeEqual(t, lastApply, ka.Status.LastApplyTime) 573 574 // Fake a FileWatch event - now re-reconciling should re-apply the YAML 575 var fw v1alpha1.FileWatch 576 f.MustGet(types.NamespacedName{Name: "fw"}, &fw) 577 ts := apis.NowMicro() 578 fw.Status.LastEventTime = ts 579 fw.Status.FileEvents = append(fw.Status.FileEvents, v1alpha1.FileEvent{ 580 Time: ts, 581 SeenFiles: []string{"/fake/dir/file"}, 582 }) 583 f.UpdateStatus(&fw) 584 585 f.kClient.Yaml = "" 586 f.MustReconcile(types.NamespacedName{Name: "a"}) 587 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 588 assert.Contains(f.T(), f.kClient.Yaml, "name: sancho") 589 assert.Truef(t, ka.Status.LastApplyTime.After(lastApply.Time), 590 "Last apply time %s should have been after previous apply time %s", 591 ka.Status.LastApplyTime.Format(time.RFC3339Nano), 592 lastApply.Format(time.RFC3339Nano)) 593 lastApply = ka.Status.LastApplyTime 594 595 // One last time - make sure that re-reconciling w/o changes doesn't re-apply the YAML 596 f.kClient.Yaml = "" 597 f.MustReconcile(types.NamespacedName{Name: "a"}) 598 f.MustGet(types.NamespacedName{Name: "a"}, &ka) 599 assert.Equal(f.T(), f.kClient.Yaml, "") 600 timecmp.AssertTimeEqual(f.T(), lastApply, ka.Status.LastApplyTime) 601 } 602 603 func TestIgnoreManagedObjects(t *testing.T) { 604 f := newFixture(t) 605 ka := v1alpha1.KubernetesApply{ 606 ObjectMeta: metav1.ObjectMeta{ 607 Name: "a", 608 Annotations: map[string]string{ 609 v1alpha1.AnnotationManagedBy: "buildcontrol", 610 }, 611 }, 612 Spec: v1alpha1.KubernetesApplySpec{ 613 YAML: testyaml.SanchoYAML, 614 }, 615 } 616 f.Create(&ka) 617 618 nn := types.NamespacedName{Name: "a"} 619 f.MustReconcile(nn) 620 assert.Empty(f.T(), f.kClient.Yaml) 621 622 // no apply should happen since the object is managed by the engine 623 f.MustGet(nn, &ka) 624 assert.Empty(f.T(), ka.Status.ResultYAML) 625 assert.Zero(f.T(), ka.Status.LastApplyTime) 626 627 result := f.r.ForceApply(f.Context(), nn, ka.Spec, nil, nil) 628 assert.Contains(f.T(), result.ResultYAML, "sancho") 629 assert.True(f.T(), !result.LastApplyTime.IsZero()) 630 assert.True(f.T(), !result.LastApplyStartTime.IsZero()) 631 assert.Equal(f.T(), result.Error, "") 632 633 // ForceApply must NOT update the apiserver. 634 f.MustGet(nn, &ka) 635 assert.Empty(f.T(), ka.Status.ResultYAML) 636 assert.Zero(f.T(), ka.Status.LastApplyTime) 637 638 f.MustReconcile(nn) 639 f.MustGet(nn, &ka) 640 assert.Equal(f.T(), result, ka.Status) 641 } 642 643 func TestForceDelete(t *testing.T) { 644 f := newFixture(t) 645 nn := types.NamespacedName{Name: "a"} 646 ka := v1alpha1.KubernetesApply{ 647 ObjectMeta: metav1.ObjectMeta{ 648 Name: "a", 649 Annotations: map[string]string{ 650 v1alpha1.AnnotationManagedBy: "buildcontrol", 651 }, 652 }, 653 Spec: v1alpha1.KubernetesApplySpec{ 654 YAML: testyaml.SanchoYAML, 655 }, 656 } 657 f.Create(&ka) 658 659 err := f.r.ForceDelete(f.Context(), nn, ka.Spec, nil, "testing") 660 assert.Nil(f.T(), err) 661 assert.Contains(f.T(), f.kClient.DeletedYaml, "sancho") 662 } 663 664 func TestForceDeleteWithCmd(t *testing.T) { 665 f := newFixture(t) 666 667 nn := types.NamespacedName{Name: "a"} 668 applyCmd, _ := f.createApplyCmd("custom-apply-cmd", testyaml.SanchoYAML) 669 ka := v1alpha1.KubernetesApply{ 670 ObjectMeta: metav1.ObjectMeta{ 671 Name: "a", 672 Annotations: map[string]string{ 673 v1alpha1.AnnotationManagedBy: "buildcontrol", 674 }, 675 }, 676 Spec: v1alpha1.KubernetesApplySpec{ 677 ApplyCmd: &applyCmd, 678 DeleteCmd: &v1alpha1.KubernetesApplyCmd{Args: []string{"custom-delete-cmd"}}, 679 }, 680 } 681 f.Create(&ka) 682 683 err := f.r.ForceDelete(f.Context(), nn, ka.Spec, nil, "testing") 684 assert.Nil(f.T(), err) 685 686 calls := f.execer.Calls() 687 if assert.Len(t, calls, 1, "Expected 1x delete calls") { 688 assert.Equal(t, []string{"custom-delete-cmd"}, calls[0].Cmd.Argv) 689 } 690 } 691 692 func TestDisableByConfigmap(t *testing.T) { 693 f := newFixture(t) 694 ka := v1alpha1.KubernetesApply{ 695 ObjectMeta: metav1.ObjectMeta{ 696 Name: "test", 697 }, 698 Spec: v1alpha1.KubernetesApplySpec{ 699 YAML: testyaml.SanchoYAML, 700 DisableSource: &v1alpha1.DisableSource{ 701 ConfigMap: &v1alpha1.ConfigMapDisableSource{ 702 Name: "test-disable", 703 Key: "isDisabled", 704 }, 705 }, 706 }, 707 } 708 f.Create(&ka) 709 710 f.setDisabled(ka.GetObjectMeta().Name, false) 711 712 f.setDisabled(ka.GetObjectMeta().Name, true) 713 714 f.setDisabled(ka.GetObjectMeta().Name, false) 715 } 716 717 func (f *fixture) requireKaMatchesInApi(name string, matcher func(ka *v1alpha1.KubernetesApply) bool) *v1alpha1.KubernetesApply { 718 ka := v1alpha1.KubernetesApply{} 719 720 require.Eventually(f.T(), func() bool { 721 f.MustGet(types.NamespacedName{Name: name}, &ka) 722 return matcher(&ka) 723 }, timeout, interval) 724 725 return &ka 726 } 727 728 func (f *fixture) setDisabled(name string, isDisabled bool) { 729 ka := v1alpha1.KubernetesApply{} 730 f.MustGet(types.NamespacedName{Name: name}, &ka) 731 732 require.NotNil(f.T(), ka.Spec.DisableSource) 733 require.NotNil(f.T(), ka.Spec.DisableSource.ConfigMap) 734 735 ds := ka.Spec.DisableSource.ConfigMap 736 err := configmap.UpsertDisableConfigMap(f.Context(), f.Client, ds.Name, ds.Key, isDisabled) 737 require.NoError(f.T(), err) 738 739 _, err = f.Reconcile(types.NamespacedName{Name: name}) 740 require.NoError(f.T(), err) 741 742 f.requireKaMatchesInApi(name, func(ka *v1alpha1.KubernetesApply) bool { 743 return ka.Status.DisableStatus != nil && ka.Status.DisableStatus.Disabled == isDisabled 744 }) 745 746 // the KA reconciler only creates a KD if the yaml is already in the KA's status, 747 // which means we need a second call to Reconcile to get the KD 748 _, err = f.Reconcile(types.NamespacedName{Name: name}) 749 require.NoError(f.T(), err) 750 751 kd := v1alpha1.KubernetesDiscovery{} 752 kdExists := f.Get(types.NamespacedName{Name: name}, &kd) 753 754 if isDisabled { 755 require.False(f.T(), kdExists) 756 757 if ka.Spec.DeleteCmd == nil { 758 require.Contains(f.T(), f.kClient.DeletedYaml, "name: sancho") 759 } else { 760 // Must run the delete cmd instead of deleting resources with our k8s client. 761 require.Equal(f.T(), f.kClient.DeletedYaml, "") 762 } 763 764 // Reset the deletedYaml so it doesn't interfere with other tests 765 f.kClient.DeletedYaml = "" 766 } else { 767 require.True(f.T(), kdExists) 768 } 769 } 770 771 type fixture struct { 772 *fake.ControllerFixture 773 r *Reconciler 774 kClient *k8s.FakeK8sClient 775 execer *localexec.FakeExecer 776 } 777 778 func newFixture(t *testing.T) *fixture { 779 kClient := k8s.NewFakeK8sClient(t) 780 cfb := fake.NewControllerFixtureBuilder(t) 781 dockerClient := docker.NewFakeClient() 782 783 // Make the fake ImageExists always return true, which is the behavior we want 784 // when testing the reconciler 785 dockerClient.ImageAlwaysExists = true 786 787 execer := localexec.NewFakeExecer(t) 788 789 r := NewReconciler(cfb.Client, kClient, v1alpha1.NewScheme(), cfb.Store, execer) 790 791 f := &fixture{ 792 ControllerFixture: cfb.Build(r), 793 r: r, 794 kClient: kClient, 795 execer: execer, 796 } 797 f.Create(&v1alpha1.Cluster{ 798 ObjectMeta: metav1.ObjectMeta{ 799 Name: "default", 800 }, 801 Status: v1alpha1.ClusterStatus{ 802 Connection: &v1alpha1.ClusterConnectionStatus{ 803 Kubernetes: &v1alpha1.KubernetesClusterConnectionStatus{ 804 Context: "default", 805 ConfigPath: "/path/to/default/kubeconfig", 806 }, 807 }, 808 }, 809 }) 810 811 return f 812 } 813 814 // createApplyCmd creates a KubernetesApplyCmd that use the passed YAML to generate simulated stdout via the FakeExecer. 815 func (f *fixture) createApplyCmd(name string, yaml string) (v1alpha1.KubernetesApplyCmd, string) { 816 f.T().Helper() 817 818 require.NotEmpty(f.T(), yaml, "applyCmd YAML cannot be blank") 819 820 entities, err := k8s.ParseYAMLFromString(yaml) 821 require.NoErrorf(f.T(), err, "Could not parse YAML: %s", yaml) 822 for i := range entities { 823 entities[i].SetUID(string(uuid.NewUUID())) 824 } 825 yamlOut, err := k8s.SerializeSpecYAML(entities) 826 require.NoErrorf(f.T(), err, "Failed to re-serialize YAML for entities: %s", spew.Sdump(entities)) 827 828 f.execer.RegisterCommand(name, 0, yamlOut, "") 829 return v1alpha1.KubernetesApplyCmd{ 830 Args: []string{name}, 831 }, yamlOut 832 }