github.com/aporeto-inc/trireme-lib@v10.358.0+incompatible/monitor/internal/k8s/event_handler_test.go (about) 1 package k8smonitor 2 3 import ( 4 "context" 5 "fmt" 6 "testing" 7 8 "github.com/golang/mock/gomock" 9 "go.aporeto.io/enforcerd/internal/extractors/containermetadata" 10 "go.aporeto.io/enforcerd/internal/extractors/containermetadata/mockcontainermetadata" 11 "go.aporeto.io/enforcerd/trireme-lib/common" 12 "go.aporeto.io/enforcerd/trireme-lib/monitor/extractors" 13 policy "go.aporeto.io/enforcerd/trireme-lib/policy" 14 corev1 "k8s.io/api/core/v1" 15 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 kubernetes "k8s.io/client-go/kubernetes" 17 "k8s.io/client-go/kubernetes/fake" 18 ) 19 20 func TestK8sMonitor_updateEvent(t *testing.T) { 21 type args struct { 22 ctx context.Context 23 sandboxID string 24 } 25 tests := []struct { 26 name string 27 args args 28 wantErr bool 29 metadataExtractor extractors.PodMetadataExtractor 30 prepare func(t *testing.T, mocks *unitTestMonitorMocks) 31 }{ 32 { 33 name: "runtime not found for sandbox ID", 34 args: args{ 35 ctx: context.Background(), 36 sandboxID: "not found", 37 }, 38 wantErr: false, 39 prepare: func(t *testing.T, mocks *unitTestMonitorMocks) { 40 mocks.runtimeCache.EXPECT().Get(gomock.Eq("not found")).Return(nil).Times(1) 41 }, 42 }, 43 { 44 name: "runtime found, but pod not found for sandbox ID", 45 args: args{ 46 ctx: context.Background(), 47 sandboxID: "not found", 48 }, 49 wantErr: false, 50 prepare: func(t *testing.T, mocks *unitTestMonitorMocks) { 51 mocks.runtimeCache.EXPECT().Get(gomock.Eq("not found")).Return(policy.NewPURuntimeWithDefaults()).Times(1) 52 mocks.podCache.EXPECT().Get(gomock.Eq("not found")).Return(nil).Times(1) 53 }, 54 }, 55 { 56 name: "runtime and pod found, but metadata extraction fails", 57 args: args{ 58 ctx: context.Background(), 59 sandboxID: "sandboxID", 60 }, 61 wantErr: true, 62 metadataExtractor: func(context.Context, *corev1.Pod, string) (*policy.PURuntime, error) { 63 return nil, fmt.Errorf("error") 64 }, 65 prepare: func(t *testing.T, mocks *unitTestMonitorMocks) { 66 mocks.runtimeCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(policy.NewPURuntimeWithDefaults()).Times(1) 67 mocks.podCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(&corev1.Pod{}).Times(1) 68 }, 69 }, 70 { 71 name: "runtime and pod found, metadata extraction succeeded, but internal update failed", 72 args: args{ 73 ctx: context.Background(), 74 sandboxID: "sandboxID", 75 }, 76 wantErr: true, 77 metadataExtractor: func(context.Context, *corev1.Pod, string) (*policy.PURuntime, error) { 78 return policy.NewPURuntimeWithDefaults(), nil 79 }, 80 prepare: func(t *testing.T, mocks *unitTestMonitorMocks) { 81 mocks.runtimeCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(policy.NewPURuntimeWithDefaults()).Times(1) 82 mocks.podCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(&corev1.Pod{}).Times(1) 83 mocks.runtimeCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(policy.NewPURuntimeWithDefaults())).Return(fmt.Errorf("error")).Times(1) 84 }, 85 }, 86 { 87 name: "update event fails in policy engine", 88 args: args{ 89 ctx: context.Background(), 90 sandboxID: "sandboxID", 91 }, 92 wantErr: true, 93 metadataExtractor: func(context.Context, *corev1.Pod, string) (*policy.PURuntime, error) { 94 return policy.NewPURuntimeWithDefaults(), nil 95 }, 96 prepare: func(t *testing.T, mocks *unitTestMonitorMocks) { 97 mocks.runtimeCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(policy.NewPURuntimeWithDefaults()).Times(1) 98 mocks.podCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(&corev1.Pod{}).Times(1) 99 mocks.runtimeCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(policy.NewPURuntimeWithDefaults())).Return(nil).Times(1) 100 mocks.policy.EXPECT().HandlePUEvent( 101 gomock.Any(), 102 gomock.Eq("sandboxID"), 103 gomock.Eq(common.EventUpdate), 104 gomock.Eq(policy.NewPURuntimeWithDefaults()), 105 ).Return(fmt.Errorf("error")).Times(1) 106 }, 107 }, 108 { 109 name: "update event succeeds", 110 args: args{ 111 ctx: context.Background(), 112 sandboxID: "sandboxID", 113 }, 114 wantErr: false, 115 metadataExtractor: func(context.Context, *corev1.Pod, string) (*policy.PURuntime, error) { 116 return policy.NewPURuntimeWithDefaults(), nil 117 }, 118 prepare: func(t *testing.T, mocks *unitTestMonitorMocks) { 119 mocks.runtimeCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(policy.NewPURuntimeWithDefaults()).Times(1) 120 mocks.podCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(&corev1.Pod{}).Times(1) 121 mocks.runtimeCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(policy.NewPURuntimeWithDefaults())).Return(nil).Times(1) 122 mocks.policy.EXPECT().HandlePUEvent( 123 gomock.Any(), 124 gomock.Eq("sandboxID"), 125 gomock.Eq(common.EventUpdate), 126 gomock.Eq(policy.NewPURuntimeWithDefaults()), 127 ).Return(nil).Times(1) 128 }, 129 }, 130 } 131 for _, tt := range tests { 132 t.Run(tt.name, func(t *testing.T) { 133 ctrl := gomock.NewController(t) 134 m, mocks := newUnitTestMonitor(ctrl) 135 m.metadataExtractor = tt.metadataExtractor 136 tt.prepare(t, mocks) 137 if err := m.updateEvent(tt.args.ctx, tt.args.sandboxID); (err != nil) != tt.wantErr { 138 t.Errorf("K8sMonitor.updateEvent() error = %v, wantErr %v", err, tt.wantErr) 139 } 140 ctrl.Finish() 141 }) 142 } 143 } 144 145 func TestK8sMonitor_stopEvent(t *testing.T) { 146 type args struct { 147 ctx context.Context 148 sandboxID string 149 } 150 tests := []struct { 151 name string 152 args args 153 wantErr bool 154 prepare func(t *testing.T, mocks *unitTestMonitorMocks) 155 }{ 156 { 157 name: "runtime not found for sandbox ID", 158 args: args{ 159 ctx: context.Background(), 160 sandboxID: "not found", 161 }, 162 wantErr: false, 163 prepare: func(t *testing.T, mocks *unitTestMonitorMocks) { 164 mocks.runtimeCache.EXPECT().Get(gomock.Eq("not found")).Return(nil).Times(1) 165 }, 166 }, 167 { 168 name: "stop event failed in policy engine", 169 args: args{ 170 ctx: context.Background(), 171 sandboxID: "sandboxID", 172 }, 173 wantErr: true, 174 prepare: func(t *testing.T, mocks *unitTestMonitorMocks) { 175 mocks.runtimeCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(policy.NewPURuntimeWithDefaults()).Times(1) 176 mocks.policy.EXPECT().HandlePUEvent( 177 gomock.Any(), 178 gomock.Eq("sandboxID"), 179 gomock.Eq(common.EventStop), 180 gomock.Eq(policy.NewPURuntimeWithDefaults()), 181 ).Return(fmt.Errorf("error")).Times(1) 182 }, 183 }, 184 { 185 name: "stop event succeeds", 186 args: args{ 187 ctx: context.Background(), 188 sandboxID: "sandboxID", 189 }, 190 wantErr: false, 191 prepare: func(t *testing.T, mocks *unitTestMonitorMocks) { 192 mocks.runtimeCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(policy.NewPURuntimeWithDefaults()).Times(1) 193 mocks.policy.EXPECT().HandlePUEvent( 194 gomock.Any(), 195 gomock.Eq("sandboxID"), 196 gomock.Eq(common.EventStop), 197 gomock.Eq(policy.NewPURuntimeWithDefaults()), 198 ).Return(nil).Times(1) 199 }, 200 }, 201 } 202 for _, tt := range tests { 203 t.Run(tt.name, func(t *testing.T) { 204 ctrl := gomock.NewController(t) 205 m, mocks := newUnitTestMonitor(ctrl) 206 tt.prepare(t, mocks) 207 if err := m.stopEvent(tt.args.ctx, tt.args.sandboxID); (err != nil) != tt.wantErr { 208 t.Errorf("K8sMonitor.stopEvent() error = %v, wantErr %v", err, tt.wantErr) 209 } 210 ctrl.Finish() 211 }) 212 } 213 } 214 215 func TestK8sMonitor_destroyEvent(t *testing.T) { 216 tests := []struct { 217 name string 218 wantErr bool 219 prepare func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) 220 }{ 221 { 222 name: "unexpected container kind", 223 wantErr: true, 224 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 225 kmd.EXPECT().Kind().Return(containermetadata.Container).Times(2) 226 }, 227 }, 228 { 229 name: "nothing happens for a PodContainer", 230 wantErr: false, 231 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 232 kmd.EXPECT().Kind().Return(containermetadata.PodContainer).Times(1) 233 }, 234 }, 235 { 236 name: "PodSandbox not found in cache", 237 wantErr: false, 238 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 239 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 240 kmd.EXPECT().ID().Return("sandboxID").Times(2) 241 mocks.runtimeCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(nil).Times(1) 242 }, 243 }, 244 { 245 name: "destroy event failed in policy engine", 246 wantErr: true, 247 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 248 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 249 kmd.EXPECT().ID().Return("sandboxID").Times(5) 250 mocks.runtimeCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(policy.NewPURuntimeWithDefaults()).Times(1) 251 mocks.runtimeCache.EXPECT().Delete(gomock.Eq("sandboxID")).Times(1) 252 mocks.podCache.EXPECT().Delete(gomock.Eq("sandboxID")).Times(1) 253 mocks.policy.EXPECT().HandlePUEvent( 254 gomock.Any(), 255 gomock.Eq("sandboxID"), 256 gomock.Eq(common.EventDestroy), 257 gomock.Eq(policy.NewPURuntimeWithDefaults()), 258 ).Return(fmt.Errorf("error")).Times(1) 259 }, 260 }, 261 { 262 name: "destroy event succeeds", 263 wantErr: false, 264 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 265 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 266 kmd.EXPECT().ID().Return("sandboxID").Times(5) 267 mocks.runtimeCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(policy.NewPURuntimeWithDefaults()).Times(1) 268 mocks.runtimeCache.EXPECT().Delete(gomock.Eq("sandboxID")).Times(1) 269 mocks.podCache.EXPECT().Delete(gomock.Eq("sandboxID")).Times(1) 270 mocks.policy.EXPECT().HandlePUEvent( 271 gomock.Any(), 272 gomock.Eq("sandboxID"), 273 gomock.Eq(common.EventDestroy), 274 gomock.Eq(policy.NewPURuntimeWithDefaults()), 275 ).Return(nil).Times(1) 276 }, 277 }, 278 } 279 for _, tt := range tests { 280 t.Run(tt.name, func(t *testing.T) { 281 ctrl := gomock.NewController(t) 282 m, mocks := newUnitTestMonitor(ctrl) 283 kmd := mockcontainermetadata.NewMockCommonKubernetesContainerMetadata(ctrl) 284 tt.prepare(t, mocks, kmd) 285 if err := m.destroyEvent(context.Background(), kmd); (err != nil) != tt.wantErr { 286 t.Errorf("K8sMonitor.destroyEvent() error = %v, wantErr %v", err, tt.wantErr) 287 } 288 ctrl.Finish() 289 }) 290 } 291 } 292 293 func TestK8sMonitor_startEvent(t *testing.T) { 294 podTemplate1 := &corev1.Pod{ 295 ObjectMeta: metav1.ObjectMeta{ 296 Name: "my-pod", 297 Namespace: "default", 298 }, 299 Spec: corev1.PodSpec{ 300 NodeName: "test", 301 }, 302 } 303 podTemplate2 := &corev1.Pod{ 304 ObjectMeta: metav1.ObjectMeta{ 305 Name: "my-host-network-pod", 306 Namespace: "default", 307 }, 308 Spec: corev1.PodSpec{ 309 HostNetwork: true, 310 NodeName: "test", 311 }, 312 } 313 c := fake.NewSimpleClientset( 314 podTemplate1.DeepCopy(), 315 podTemplate2.DeepCopy(), 316 ) 317 318 tests := []struct { 319 name string 320 wantErr bool 321 metadataExtractor extractors.PodMetadataExtractor 322 kubeClient kubernetes.Interface 323 prepare func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) 324 }{ 325 { 326 name: "unexpected container kind", 327 wantErr: true, 328 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 329 kmd.EXPECT().Kind().Return(containermetadata.Container).Times(2) 330 }, 331 }, 332 { 333 name: "PodContainer: is simply being ignored", 334 wantErr: false, 335 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 336 kmd.EXPECT().Kind().Return(containermetadata.PodContainer).Times(1) 337 kmd.EXPECT().ID().Return("containerID").Times(1) 338 kmd.EXPECT().PodSandboxID().Return("sandboxID").Times(1) 339 kmd.EXPECT().PodName().Return("my-pod").Times(1) 340 kmd.EXPECT().PodNamespace().Return("default").Times(1) 341 }, 342 }, 343 { 344 name: "PodSandbox: failed to get pod from API", 345 wantErr: true, 346 kubeClient: c, 347 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 348 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 349 kmd.EXPECT().ID().Return("sandboxID").Times(1) 350 kmd.EXPECT().PodName().Return("not-found").Times(2) 351 kmd.EXPECT().PodNamespace().Return("default").Times(2) 352 }, 353 }, 354 { 355 name: "PodSandbox: got pod from API, but failed to update cache", 356 wantErr: true, 357 kubeClient: c, 358 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 359 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 360 kmd.EXPECT().ID().Return("sandboxID").Times(2) 361 kmd.EXPECT().PodName().Return("my-pod").Times(2) 362 kmd.EXPECT().PodNamespace().Return("default").Times(2) 363 mocks.podCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(podTemplate1.DeepCopy())).Return(fmt.Errorf("error")).Times(1) 364 }, 365 }, 366 { 367 name: "PodSandbox: metadata extraction fails", 368 wantErr: true, 369 kubeClient: c, 370 metadataExtractor: func(context.Context, *corev1.Pod, string) (*policy.PURuntime, error) { 371 return nil, fmt.Errorf("error") 372 }, 373 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 374 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 375 kmd.EXPECT().ID().Return("sandboxID").Times(2) 376 kmd.EXPECT().PodName().Return("my-pod").Times(2) 377 kmd.EXPECT().PodNamespace().Return("default").Times(2) 378 kmd.EXPECT().NetNSPath().Return("/var/run/netns/container1") 379 mocks.podCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(podTemplate1.DeepCopy())).Return(nil).Times(1) 380 }, 381 }, 382 { 383 name: "PodSandbox: metadata extraction succeeds, but updating cache fails", 384 wantErr: true, 385 kubeClient: c, 386 metadataExtractor: func(context.Context, *corev1.Pod, string) (*policy.PURuntime, error) { 387 return policy.NewPURuntimeWithDefaults(), nil 388 }, 389 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 390 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 391 kmd.EXPECT().ID().Return("sandboxID").Times(3) 392 kmd.EXPECT().PodName().Return("my-pod").Times(2) 393 kmd.EXPECT().PodNamespace().Return("default").Times(2) 394 kmd.EXPECT().NetNSPath().Return("/var/run/netns/container1") 395 mocks.podCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(podTemplate1.DeepCopy())).Return(nil).Times(1) 396 mocks.runtimeCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(policy.NewPURuntimeWithDefaults())).Return(fmt.Errorf("error")).Times(1) 397 }, 398 }, 399 { 400 name: "PodSandbox: HostNetwork pods are being ignored", 401 wantErr: false, 402 kubeClient: c, 403 metadataExtractor: func(context.Context, *corev1.Pod, string) (*policy.PURuntime, error) { 404 return nil, fmt.Errorf("we should not get here") 405 }, 406 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 407 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 408 kmd.EXPECT().ID().Return("sandboxID").Times(1) 409 kmd.EXPECT().PodName().Return("my-host-network-pod").Times(2) 410 kmd.EXPECT().PodNamespace().Return("default").Times(2) 411 //mocks.podCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(podTemplate2.DeepCopy())).Return(nil).Times(1) 412 }, 413 }, 414 { 415 name: "PodSandbox: start event fails in policy engine", 416 wantErr: true, 417 kubeClient: c, 418 metadataExtractor: func(context.Context, *corev1.Pod, string) (*policy.PURuntime, error) { 419 return policy.NewPURuntimeWithDefaults(), nil 420 }, 421 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 422 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 423 kmd.EXPECT().ID().Return("sandboxID").Times(4) 424 kmd.EXPECT().PodName().Return("my-pod").Times(2) 425 kmd.EXPECT().PodNamespace().Return("default").Times(2) 426 kmd.EXPECT().NetNSPath().Return("/var/run/netns/container1") 427 mocks.podCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(podTemplate1.DeepCopy())).Return(nil).Times(1) 428 mocks.runtimeCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(policy.NewPURuntimeWithDefaults())).Return(nil).Times(1) 429 mocks.policy.EXPECT().HandlePUEvent( 430 gomock.Any(), 431 gomock.Eq("sandboxID"), 432 gomock.Eq(common.EventStart), 433 gomock.Eq(policy.NewPURuntimeWithDefaults()), 434 ).Return(fmt.Errorf("error")).Times(1) 435 }, 436 }, 437 { 438 name: "PodSandbox: start event succeeds", 439 wantErr: false, 440 kubeClient: c, 441 metadataExtractor: func(context.Context, *corev1.Pod, string) (*policy.PURuntime, error) { 442 return policy.NewPURuntimeWithDefaults(), nil 443 }, 444 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 445 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 446 kmd.EXPECT().ID().Return("sandboxID").Times(4) 447 kmd.EXPECT().PodName().Return("my-pod").Times(2) 448 kmd.EXPECT().PodNamespace().Return("default").Times(2) 449 kmd.EXPECT().NetNSPath().Return("/var/run/netns/container1") 450 mocks.podCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(podTemplate1.DeepCopy())).Return(nil).Times(1) 451 mocks.runtimeCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(policy.NewPURuntimeWithDefaults())).Return(nil).Times(1) 452 mocks.policy.EXPECT().HandlePUEvent( 453 gomock.Any(), 454 gomock.Eq("sandboxID"), 455 gomock.Eq(common.EventStart), 456 gomock.Eq(policy.NewPURuntimeWithDefaults()), 457 ).Return(nil).Times(1) 458 }, 459 }, 460 } 461 for _, tt := range tests { 462 ctx, cancel := context.WithCancel(context.Background()) 463 ctrl := gomock.NewController(t) 464 m, mocks := newUnitTestMonitor(ctrl) 465 m.metadataExtractor = tt.metadataExtractor 466 kmd := mockcontainermetadata.NewMockCommonKubernetesContainerMetadata(ctrl) 467 m.kubeClient = tt.kubeClient 468 if m.kubeClient != nil { 469 m.podLister = setupInformerForUnitTests(ctx, m.kubeClient, m.nodename) 470 } 471 tt.prepare(t, mocks, kmd) 472 t.Run(tt.name, func(t *testing.T) { 473 if err := m.startEvent(ctx, kmd, 0); (err != nil) != tt.wantErr { 474 t.Errorf("K8sMonitor.startEvent() error = %v, wantErr %v", err, tt.wantErr) 475 } 476 }) 477 ctrl.Finish() 478 cancel() 479 } 480 } 481 482 func TestK8sMonitor_Event(t *testing.T) { 483 podTemplate1 := &corev1.Pod{ 484 ObjectMeta: metav1.ObjectMeta{ 485 Name: "my-pod", 486 Namespace: "default", 487 }, 488 Spec: corev1.PodSpec{ 489 NodeName: "test", 490 }, 491 } 492 podTemplate2 := &corev1.Pod{ 493 ObjectMeta: metav1.ObjectMeta{ 494 Name: "my-host-network-pod", 495 Namespace: "default", 496 }, 497 Spec: corev1.PodSpec{ 498 HostNetwork: true, 499 NodeName: "test", 500 }, 501 } 502 c := fake.NewSimpleClientset( 503 podTemplate1.DeepCopy(), 504 podTemplate2.DeepCopy(), 505 ) 506 507 type args struct { 508 ctx context.Context 509 ev common.Event 510 data interface{} 511 } 512 tests := []struct { 513 name string 514 args args 515 metadataExtractor extractors.PodMetadataExtractor 516 kubeClient kubernetes.Interface 517 prepare func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) 518 wantErr bool 519 }{ 520 { 521 name: "unexpected event", 522 args: args{ 523 ctx: context.Background(), 524 ev: common.EventPause, 525 }, 526 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 527 }, 528 wantErr: true, 529 }, 530 { 531 name: "unexpected event data", 532 args: args{ 533 ctx: context.Background(), 534 ev: common.EventPause, 535 data: "wrong type", 536 }, 537 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 538 }, 539 wantErr: true, 540 }, 541 { 542 name: "failing start event", 543 args: args{ 544 ctx: context.Background(), 545 ev: common.EventStart, 546 }, 547 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 548 kmd.EXPECT().Kind().Return(containermetadata.Container).Times(2) 549 }, 550 wantErr: true, 551 }, 552 { 553 name: "successful start event for sandbox for pod", 554 kubeClient: c, 555 metadataExtractor: func(context.Context, *corev1.Pod, string) (*policy.PURuntime, error) { 556 return policy.NewPURuntimeWithDefaults(), nil 557 }, 558 args: args{ 559 ctx: context.Background(), 560 ev: common.EventStart, 561 }, 562 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 563 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 564 kmd.EXPECT().ID().Return("sandboxID").Times(4) 565 kmd.EXPECT().PodName().Return("my-pod").Times(2) 566 kmd.EXPECT().PodNamespace().Return("default").Times(2) 567 kmd.EXPECT().NetNSPath().Return("/var/run/netns/container1") 568 mocks.podCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(podTemplate1.DeepCopy())).Return(nil).Times(1) 569 mocks.runtimeCache.EXPECT().Set(gomock.Eq("sandboxID"), gomock.Eq(policy.NewPURuntimeWithDefaults())).Return(nil).Times(1) 570 mocks.policy.EXPECT().HandlePUEvent( 571 gomock.Any(), 572 gomock.Eq("sandboxID"), 573 gomock.Eq(common.EventStart), 574 gomock.Eq(policy.NewPURuntimeWithDefaults()), 575 ).Return(nil).Times(1) 576 }, 577 }, 578 { 579 name: "failing destroy event", 580 args: args{ 581 ctx: context.Background(), 582 ev: common.EventDestroy, 583 }, 584 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 585 kmd.EXPECT().Kind().Return(containermetadata.Container).Times(2) 586 }, 587 wantErr: true, 588 }, 589 { 590 name: "successful destroy event for sandbox for pod", 591 wantErr: false, 592 args: args{ 593 ctx: context.Background(), 594 ev: common.EventDestroy, 595 }, 596 prepare: func(t *testing.T, mocks *unitTestMonitorMocks, kmd *mockcontainermetadata.MockCommonKubernetesContainerMetadata) { 597 kmd.EXPECT().Kind().Return(containermetadata.PodSandbox).Times(1) 598 kmd.EXPECT().ID().Return("sandboxID").Times(5) 599 mocks.runtimeCache.EXPECT().Get(gomock.Eq("sandboxID")).Return(policy.NewPURuntimeWithDefaults()).Times(1) 600 mocks.runtimeCache.EXPECT().Delete(gomock.Eq("sandboxID")).Times(1) 601 mocks.podCache.EXPECT().Delete(gomock.Eq("sandboxID")).Times(1) 602 mocks.policy.EXPECT().HandlePUEvent( 603 gomock.Any(), 604 gomock.Eq("sandboxID"), 605 gomock.Eq(common.EventDestroy), 606 gomock.Eq(policy.NewPURuntimeWithDefaults()), 607 ).Return(nil).Times(1) 608 }, 609 }, 610 } 611 for _, tt := range tests { 612 t.Run(tt.name, func(t *testing.T) { 613 ctx, cancel := context.WithCancel(tt.args.ctx) 614 ctrl := gomock.NewController(t) 615 m, mocks := newUnitTestMonitor(ctrl) 616 m.metadataExtractor = tt.metadataExtractor 617 kmd := mockcontainermetadata.NewMockCommonKubernetesContainerMetadata(ctrl) 618 m.kubeClient = tt.kubeClient 619 if m.kubeClient != nil { 620 m.podLister = setupInformerForUnitTests(ctx, m.kubeClient, m.nodename) 621 } 622 tt.prepare(t, mocks, kmd) 623 var data interface{} 624 if tt.args.data != nil { 625 data = tt.args.data 626 } else { 627 data = kmd 628 } 629 if err := m.Event(ctx, tt.args.ev, data); (err != nil) != tt.wantErr { 630 t.Errorf("K8sMonitor.Event() error = %v, wantErr %v", err, tt.wantErr) 631 } 632 ctrl.Finish() 633 cancel() 634 }) 635 } 636 }