github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/k8s/watch_test.go (about) 1 package k8s 2 3 import ( 4 "context" 5 "net/http" 6 goRuntime "runtime" 7 "testing" 8 "time" 9 10 "github.com/stretchr/testify/require" 11 "go.uber.org/atomic" 12 "k8s.io/client-go/rest" 13 14 "github.com/stretchr/testify/assert" 15 appsv1 "k8s.io/api/apps/v1" 16 v1 "k8s.io/api/core/v1" 17 apiErrors "k8s.io/apimachinery/pkg/api/errors" 18 apierrors "k8s.io/apimachinery/pkg/api/errors" 19 "k8s.io/apimachinery/pkg/api/meta" 20 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 21 "k8s.io/apimachinery/pkg/runtime" 22 "k8s.io/apimachinery/pkg/runtime/schema" 23 "k8s.io/apimachinery/pkg/types" 24 "k8s.io/apimachinery/pkg/version" 25 "k8s.io/apimachinery/pkg/watch" 26 difake "k8s.io/client-go/discovery/fake" 27 dfake "k8s.io/client-go/dynamic/fake" 28 kfake "k8s.io/client-go/kubernetes/fake" 29 "k8s.io/client-go/kubernetes/scheme" 30 mfake "k8s.io/client-go/metadata/fake" 31 ktesting "k8s.io/client-go/testing" 32 33 "github.com/tilt-dev/clusterid" 34 "github.com/tilt-dev/tilt/internal/testutils" 35 ) 36 37 func TestK8sClient_WatchPods(t *testing.T) { 38 tf := newWatchTestFixture(t) 39 40 pod1 := fakePod(PodID("abcd"), "efgh") 41 pod2 := fakePod(PodID("1234"), "hieruyge") 42 pod3 := fakePod(PodID("754"), "efgh") 43 pods := []runtime.Object{pod1, pod2, pod3} 44 tf.runPods(pods, pods) 45 } 46 47 func TestPodFromInformerCacheAfterWatch(t *testing.T) { 48 tf := newWatchTestFixture(t) 49 50 pod1 := fakePod(PodID("abcd"), "efgh") 51 pods := []runtime.Object{pod1} 52 ch := tf.watchPods() 53 tf.addObjects(pods...) 54 tf.assertPods(pods, ch) 55 56 pod1Cache, err := tf.kCli.PodFromInformerCache(tf.ctx, types.NamespacedName{Name: "abcd", Namespace: "default"}) 57 require.NoError(t, err) 58 assert.Equal(t, "abcd", pod1Cache.Name) 59 60 _, err = tf.kCli.PodFromInformerCache(tf.ctx, types.NamespacedName{Name: "missing", Namespace: "default"}) 61 if assert.Error(t, err) { 62 assert.True(t, apierrors.IsNotFound(err)) 63 } 64 } 65 66 func TestPodFromInformerCacheBeforeWatch(t *testing.T) { 67 tf := newWatchTestFixture(t) 68 69 pod1 := fakePod(PodID("abcd"), "efgh") 70 pods := []runtime.Object{pod1} 71 tf.addObjects(pods...) 72 73 nn := types.NamespacedName{Name: "abcd", Namespace: "default"} 74 assert.Eventually(t, func() bool { 75 _, err := tf.kCli.PodFromInformerCache(tf.ctx, nn) 76 return err == nil 77 }, time.Second, 5*time.Millisecond) 78 79 pod1Cache, err := tf.kCli.PodFromInformerCache(tf.ctx, nn) 80 require.NoError(t, err) 81 assert.Equal(t, "abcd", pod1Cache.Name) 82 83 // This uses a pooled informer, so don't use the helper function 84 // (which waits for the informer to finish setup). 85 ch, err := tf.kCli.WatchPods(tf.ctx, Namespace(nn.Namespace)) 86 require.NoError(tf.t, err) 87 tf.assertPods(pods, ch) 88 } 89 90 func TestK8sClient_WatchPodsNamespaces(t *testing.T) { 91 tf := newWatchTestFixture(t) 92 93 pod1 := fakePod(PodID("pod1"), "pod1") 94 pod2 := fakePod(PodID("pod2-system"), "pod2-system") 95 pod2.Namespace = "kube-system" 96 pod3 := fakePod(PodID("pod3"), "pod3") 97 98 ch := tf.watchPodsNS("default") 99 tf.addObjects(pod1, pod2, pod3) 100 tf.assertPods([]runtime.Object{pod1, pod3}, ch) 101 } 102 103 func TestK8sClient_WatchPodDeletion(t *testing.T) { 104 tf := newWatchTestFixture(t) 105 106 podID := PodID("pod1") 107 pod := fakePod(podID, "image1") 108 ch := tf.watchPods() 109 tf.addObjects(pod) 110 111 select { 112 case <-time.After(time.Second): 113 t.Fatalf("Timed out waiting for pod update") 114 case obj := <-ch: 115 asPod, _ := obj.AsPod() 116 assert.Equal(t, podID, PodIDFromPod(asPod)) 117 } 118 119 err := tf.tracker.Delete(PodGVR, "default", "pod1") 120 assert.NoError(t, err) 121 122 select { 123 case <-time.After(time.Second): 124 t.Fatalf("Timed out waiting for pod delete") 125 case obj := <-ch: 126 ns, name, _ := obj.AsDeletedKey() 127 assert.Equal(t, "pod1", name) 128 assert.Equal(t, Namespace("default"), ns) 129 } 130 } 131 132 func TestK8sClient_WatchPodsFilterNonPods(t *testing.T) { 133 tf := newWatchTestFixture(t) 134 135 pod := fakePod(PodID("abcd"), "efgh") 136 pods := []runtime.Object{pod} 137 138 deployment := &appsv1.Deployment{} 139 input := []runtime.Object{deployment, pod} 140 tf.runPods(input, pods) 141 } 142 143 func TestK8sClient_WatchServices(t *testing.T) { 144 if goRuntime.GOOS == "windows" { 145 t.Skip("TODO(nick): investigate") 146 } 147 tf := newWatchTestFixture(t) 148 149 svc1 := fakeService("svc1") 150 svc2 := fakeService("svc2") 151 svc3 := fakeService("svc3") 152 svcs := []runtime.Object{svc1, svc2, svc3} 153 tf.runServices(svcs, svcs) 154 } 155 156 func TestK8sClient_WatchServicesFilterNonServices(t *testing.T) { 157 tf := newWatchTestFixture(t) 158 159 svc := fakeService("svc1") 160 svcs := []runtime.Object{svc} 161 162 deployment := &appsv1.Deployment{} 163 input := []runtime.Object{deployment, svc} 164 tf.runServices(input, svcs) 165 } 166 167 func TestK8sClient_WatchPodsError(t *testing.T) { 168 tf := newWatchTestFixture(t) 169 170 tf.watchErr = newForbiddenError() 171 _, err := tf.kCli.WatchPods(tf.ctx, "default") 172 if assert.Error(t, err) { 173 assert.Contains(t, err.Error(), "Forbidden") 174 } 175 } 176 177 func TestK8sClient_WatchPodsWithNamespaceRestriction(t *testing.T) { 178 tf := newWatchTestFixture(t) 179 180 tf.nsRestriction = "sandbox" 181 tf.kCli.configNamespace = "sandbox" 182 183 pod1 := fakePod(PodID("pod1"), "image1") 184 pod1.Namespace = "sandbox" 185 186 input := []runtime.Object{pod1} 187 expected := []runtime.Object{pod1} 188 tf.runPods(input, expected) 189 } 190 191 func TestK8sClient_WatchPodsBlockedByNamespaceRestriction(t *testing.T) { 192 tf := newWatchTestFixture(t) 193 194 tf.nsRestriction = "sandbox" 195 tf.kCli.configNamespace = "" 196 197 _, err := tf.kCli.WatchPods(tf.ctx, "default") 198 if assert.Error(t, err) { 199 assert.Contains(t, err.Error(), "Code: 403") 200 } 201 } 202 203 func TestK8sClient_WatchServicesWithNamespaceRestriction(t *testing.T) { 204 tf := newWatchTestFixture(t) 205 206 tf.nsRestriction = "sandbox" 207 tf.kCli.configNamespace = "sandbox" 208 209 svc1 := fakeService("svc1") 210 svc1.Namespace = "sandbox" 211 212 input := []runtime.Object{svc1} 213 expected := []runtime.Object{svc1} 214 tf.runServices(input, expected) 215 } 216 217 func TestK8sClient_WatchServicesBlockedByNamespaceRestriction(t *testing.T) { 218 tf := newWatchTestFixture(t) 219 220 tf.nsRestriction = "sandbox" 221 tf.kCli.configNamespace = "" 222 223 _, err := tf.kCli.WatchServices(tf.ctx, "default") 224 if assert.Error(t, err) { 225 assert.Contains(t, err.Error(), "Code: 403") 226 } 227 } 228 229 func TestK8sClient_WatchEvents(t *testing.T) { 230 tf := newWatchTestFixture(t) 231 232 event1 := fakeEvent("event1", "hello1", 1) 233 event2 := fakeEvent("event2", "hello2", 2) 234 event3 := fakeEvent("event3", "hello3", 3) 235 events := []runtime.Object{event1, event2, event3} 236 tf.runEvents(events, events) 237 } 238 239 func TestK8sClient_WatchEventsNamespaced(t *testing.T) { 240 tf := newWatchTestFixture(t) 241 242 tf.kCli.configNamespace = "sandbox" 243 244 event1 := fakeEvent("event1", "hello1", 1) 245 event1.Namespace = "sandbox" 246 247 events := []runtime.Object{event1} 248 tf.runEvents(events, events) 249 } 250 251 func TestK8sClient_WatchEventsUpdate(t *testing.T) { 252 tf := newWatchTestFixture(t) 253 254 event1 := fakeEvent("event1", "hello1", 1) 255 event2 := fakeEvent("event2", "hello2", 1) 256 event1b := fakeEvent("event1", "hello1", 1) 257 event3 := fakeEvent("event3", "hello3", 1) 258 event2b := fakeEvent("event2", "hello2", 2) 259 260 ch := tf.watchEvents() 261 262 gvr := schema.GroupVersionResource{Version: "v1", Resource: "events"} 263 tf.addObjects(event1, event2) 264 tf.assertEvents([]runtime.Object{event1, event2}, ch) 265 266 err := tf.tracker.Update(gvr, event1b, "default") 267 require.NoError(t, err) 268 tf.assertEvents([]runtime.Object{}, ch) 269 270 err = tf.tracker.Add(event3) 271 require.NoError(t, err) 272 err = tf.tracker.Update(gvr, event2b, "default") 273 require.NoError(t, err) 274 tf.assertEvents([]runtime.Object{event3, event2b}, ch) 275 } 276 277 func TestWatchPodsAfterAdding(t *testing.T) { 278 tf := newWatchTestFixture(t) 279 280 pod1 := fakePod(PodID("abcd"), "efgh") 281 tf.addObjects(pod1) 282 ch := tf.watchPods() 283 tf.assertPods([]runtime.Object{pod1}, ch) 284 } 285 286 func TestWatchServicesAfterAdding(t *testing.T) { 287 tf := newWatchTestFixture(t) 288 289 svc := fakeService("svc1") 290 tf.addObjects(svc) 291 ch := tf.watchServices() 292 tf.assertServices([]runtime.Object{svc}, ch) 293 } 294 295 func TestWatchEventsAfterAdding(t *testing.T) { 296 tf := newWatchTestFixture(t) 297 298 event := fakeEvent("event1", "hello1", 1) 299 tf.addObjects(event) 300 ch := tf.watchEvents() 301 tf.assertEvents([]runtime.Object{event}, ch) 302 } 303 304 func TestK8sClient_WatchMeta(t *testing.T) { 305 tf := newWatchTestFixture(t) 306 307 pod1 := fakePod(PodID("abcd"), "efgh") 308 pod2 := fakePod(PodID("1234"), "hieruyge") 309 ch := tf.watchMeta(schema.GroupVersionKind{Version: "v1", Kind: "Pod"}) 310 311 _, _ = tf.metadata.Resource(PodGVR).Namespace("default").(mfake.MetadataClient).CreateFake( 312 &metav1.PartialObjectMetadata{TypeMeta: pod1.TypeMeta, ObjectMeta: pod1.ObjectMeta}, 313 metav1.CreateOptions{}) 314 _, _ = tf.metadata.Resource(PodGVR).Namespace("default").(mfake.MetadataClient).CreateFake( 315 &metav1.PartialObjectMetadata{TypeMeta: pod2.TypeMeta, ObjectMeta: pod2.ObjectMeta}, 316 metav1.CreateOptions{}) 317 318 expected := []metav1.Object{&pod1.ObjectMeta, &pod2.ObjectMeta} 319 tf.assertMeta(expected, ch) 320 } 321 322 func TestK8sClient_WatchMetaBackfillK8s14(t *testing.T) { 323 tf := newWatchTestFixture(t) 324 325 tf.version.GitVersion = "v1.14.1" 326 327 pod1 := fakePod(PodID("abcd"), "efgh") 328 pod2 := fakePod(PodID("1234"), "hieruyge") 329 ch := tf.watchMeta(schema.GroupVersionKind{Version: "v1", Kind: "Pod"}) 330 331 tf.addObjects(pod1, pod2) 332 333 expected := []metav1.Object{pod1, pod2} 334 tf.assertMeta(expected, ch) 335 } 336 337 type partialMetaTestCase struct { 338 v string 339 expected bool 340 } 341 342 func TestSupportsPartialMeta(t *testing.T) { 343 cases := []partialMetaTestCase{ 344 // minikube 345 partialMetaTestCase{"v1.19.1", true}, 346 partialMetaTestCase{"v1.15.0", true}, 347 partialMetaTestCase{"v1.14.0", false}, 348 349 // gke 350 partialMetaTestCase{"v1.18.10-gke.601", true}, 351 partialMetaTestCase{"v1.15.10-gke.601", true}, 352 partialMetaTestCase{"v1.14.10-gke.601", false}, 353 354 // microk8s 355 partialMetaTestCase{"v1.19.3-34+fa32ff1c160058", true}, 356 partialMetaTestCase{"v1.15.3-34+fa32ff1c160058", true}, 357 partialMetaTestCase{"v1.14.3-34+fa32ff1c160058", false}, 358 359 partialMetaTestCase{"garbage", false}, 360 } 361 for _, c := range cases { 362 c := c 363 t.Run(c.v, func(t *testing.T) { 364 assert.Equal(t, c.expected, supportsPartialMetadata(&version.Info{GitVersion: c.v})) 365 }) 366 } 367 } 368 369 type fakeDiscovery struct { 370 *difake.FakeDiscovery 371 restClient rest.Interface 372 } 373 374 func (fakeDiscovery) Fresh() bool { return true } 375 func (fakeDiscovery) Invalidate() {} 376 377 func (f fakeDiscovery) RESTClient() rest.Interface { 378 return f.restClient 379 } 380 381 type watchTestFixture struct { 382 t *testing.T 383 kCli *K8sClient 384 385 numWatches atomic.Int32 386 tracker ktesting.ObjectTracker 387 watchRestrictions ktesting.WatchRestrictions 388 metadata *mfake.FakeMetadataClient 389 ctx context.Context 390 watchErr error 391 nsRestriction Namespace 392 cancel context.CancelFunc 393 version *version.Info 394 } 395 396 func newWatchTestFixture(t *testing.T) *watchTestFixture { 397 ret := &watchTestFixture{t: t} 398 t.Cleanup(ret.TearDown) 399 400 ctx, _, _ := testutils.CtxAndAnalyticsForTest() 401 ret.ctx, ret.cancel = context.WithCancel(ctx) 402 403 tracker := ktesting.NewObjectTracker(scheme.Scheme, scheme.Codecs.UniversalDecoder()) 404 ret.tracker = tracker 405 406 wr := func(action ktesting.Action) (handled bool, wi watch.Interface, err error) { 407 wa := action.(ktesting.WatchAction) 408 nsRestriction := ret.nsRestriction 409 if !nsRestriction.Empty() && wa.GetNamespace() != nsRestriction.String() { 410 return true, nil, &apiErrors.StatusError{ 411 ErrStatus: metav1.Status{Code: http.StatusForbidden}, 412 } 413 } 414 415 ret.watchRestrictions = wa.GetWatchRestrictions() 416 if ret.watchErr != nil { 417 return true, nil, ret.watchErr 418 } 419 420 // Fake watcher implementation based on objects added to the tracker. 421 gvr := action.GetResource() 422 ns := action.GetNamespace() 423 watch, err := tracker.Watch(gvr, ns) 424 if err != nil { 425 return false, nil, err 426 } 427 428 ret.numWatches.Add(1) 429 return true, watch, nil 430 } 431 432 cs := kfake.NewSimpleClientset() 433 cs.PrependReactor("*", "*", ktesting.ObjectReaction(tracker)) 434 cs.PrependWatchReactor("*", wr) 435 436 dcs := dfake.NewSimpleDynamicClient(scheme.Scheme) 437 dcs.PrependReactor("*", "*", ktesting.ObjectReaction(tracker)) 438 dcs.PrependWatchReactor("*", wr) 439 440 mcs := mfake.NewSimpleMetadataClient(scheme.Scheme) 441 mcs.PrependReactor("*", "*", ktesting.ObjectReaction(tracker)) 442 mcs.PrependWatchReactor("*", wr) 443 ret.metadata = mcs 444 445 version := &version.Info{Major: "1", Minor: "19", GitVersion: "v1.19.1"} 446 di := fakeDiscovery{ 447 FakeDiscovery: &difake.FakeDiscovery{ 448 Fake: &ktesting.Fake{}, 449 FakedServerVersion: version, 450 }, 451 } 452 453 ret.kCli = &K8sClient{ 454 InformerSet: newInformerSet(cs, dcs), 455 product: clusterid.ProductUnknown, 456 drm: fakeRESTMapper{}, 457 dynamic: dcs, 458 clientset: cs, 459 metadata: mcs, 460 core: cs.CoreV1(), 461 discovery: di, 462 configNamespace: "default", 463 } 464 ret.version = version 465 466 return ret 467 } 468 469 func (tf *watchTestFixture) TearDown() { 470 tf.cancel() 471 } 472 473 func (tf *watchTestFixture) watchPods() <-chan ObjectUpdate { 474 return tf.watchPodsNS(tf.kCli.configNamespace) 475 } 476 477 // the fake watcher has race conditions, so wait until the shared informer 478 // sets up all its watchers 479 func (tf *watchTestFixture) waitForInformerSetup(originalWatches int32) { 480 require.Eventually(tf.t, func() bool { 481 return tf.numWatches.Load() == originalWatches+2 482 }, time.Second, time.Millisecond) 483 } 484 485 func (tf *watchTestFixture) watchPodsNS(ns Namespace) <-chan ObjectUpdate { 486 originalWatches := tf.numWatches.Load() 487 ch, err := tf.kCli.WatchPods(tf.ctx, ns) 488 require.NoError(tf.t, err) 489 tf.waitForInformerSetup(originalWatches) 490 return ch 491 } 492 493 func (tf *watchTestFixture) watchServices() <-chan *v1.Service { 494 originalWatches := tf.numWatches.Load() 495 ch, err := tf.kCli.WatchServices(tf.ctx, tf.kCli.configNamespace) 496 require.NoError(tf.t, err) 497 tf.waitForInformerSetup(originalWatches) 498 return ch 499 } 500 501 func (tf *watchTestFixture) watchEvents() <-chan *v1.Event { 502 originalWatches := tf.numWatches.Load() 503 ch, err := tf.kCli.WatchEvents(tf.ctx, tf.kCli.configNamespace) 504 require.NoError(tf.t, err) 505 tf.waitForInformerSetup(originalWatches) 506 return ch 507 } 508 509 func (tf *watchTestFixture) watchMeta(gvr schema.GroupVersionKind) <-chan metav1.Object { 510 originalWatches := tf.numWatches.Load() 511 ch, err := tf.kCli.WatchMeta(tf.ctx, gvr, tf.kCli.configNamespace) 512 require.NoError(tf.t, err) 513 require.Eventually(tf.t, func() bool { 514 return tf.numWatches.Load() == originalWatches+1 515 }, time.Second, time.Millisecond) 516 return ch 517 } 518 519 func (tf *watchTestFixture) addObjects(inputs ...runtime.Object) { 520 for _, o := range inputs { 521 err := tf.tracker.Add(o) 522 if err != nil { 523 tf.t.Fatalf("addObjects: %v", err) 524 } 525 } 526 } 527 528 func (tf *watchTestFixture) runPods(input []runtime.Object, expected []runtime.Object) { 529 ch := tf.watchPods() 530 tf.addObjects(input...) 531 tf.assertPods(expected, ch) 532 } 533 534 func take[T interface{}](ch <-chan T, expected int) []T { 535 result := []T{} 536 done := false 537 for !done { 538 wait := time.Second 539 if len(result) >= expected { 540 // No need to wait as long if we already have N objects. 541 wait = 200 * time.Millisecond 542 } 543 select { 544 case obj, ok := <-ch: 545 if !ok { 546 done = true 547 continue 548 } 549 550 result = append(result, obj) 551 case <-time.After(wait): 552 // if we haven't seen any events for 200ms, assume we're done 553 done = true 554 } 555 } 556 return result 557 } 558 559 func (tf *watchTestFixture) assertPods(expectedOutput []runtime.Object, ch <-chan ObjectUpdate) { 560 var observedPods []runtime.Object 561 updates := take(ch, len(expectedOutput)) 562 for _, update := range updates { 563 pod, ok := update.AsPod() 564 if ok { 565 observedPods = append(observedPods, pod) 566 } 567 } 568 569 // Our k8s simulation library does not guarantee event order. 570 assert.ElementsMatch(tf.t, expectedOutput, observedPods) 571 } 572 573 func (tf *watchTestFixture) runServices(input []runtime.Object, expected []runtime.Object) { 574 ch := tf.watchServices() 575 tf.addObjects(input...) 576 tf.assertServices(expected, ch) 577 } 578 579 func (tf *watchTestFixture) assertServices(expectedOutput []runtime.Object, ch <-chan *v1.Service) { 580 observedServices := take(ch, len(expectedOutput)) 581 // Our k8s simulation library does not guarantee event order. 582 assert.ElementsMatch(tf.t, expectedOutput, observedServices) 583 } 584 585 func (tf *watchTestFixture) runEvents(input []runtime.Object, expectedOutput []runtime.Object) { 586 ch := tf.watchEvents() 587 tf.addObjects(input...) 588 tf.assertEvents(expectedOutput, ch) 589 } 590 591 func (tf *watchTestFixture) assertEvents(expectedOutput []runtime.Object, ch <-chan *v1.Event) { 592 observedEvents := take(ch, len(expectedOutput)) 593 // Our k8s simulation library does not guarantee event order. 594 assert.ElementsMatch(tf.t, expectedOutput, observedEvents) 595 } 596 597 func (tf *watchTestFixture) assertMeta(expected []metav1.Object, ch <-chan metav1.Object) { 598 observed := take(ch, len(expected)) 599 // Our k8s simulation library does not guarantee event order. 600 assert.ElementsMatch(tf.t, expected, observed) 601 } 602 603 type fakeRESTMapper struct { 604 *meta.DefaultRESTMapper 605 } 606 607 func (f fakeRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { 608 return &meta.RESTMapping{ 609 Resource: PodGVR, 610 Scope: meta.RESTScopeNamespace, 611 }, nil 612 } 613 614 func (f fakeRESTMapper) Reset() { 615 }