github.com/aporeto-inc/trireme-lib@v10.358.0+incompatible/monitor/internal/pod/monitor_test.go (about)

     1  // +build linux !windows
     2  
     3  package podmonitor
     4  
     5  import (
     6  	"context"
     7  	"fmt"
     8  	"math"
     9  	"strings"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/golang/mock/gomock"
    14  	"go.aporeto.io/trireme-lib/monitor/config"
    15  	"go.aporeto.io/trireme-lib/policy"
    16  	"go.aporeto.io/trireme-lib/policy/mockpolicy"
    17  	"go.uber.org/zap"
    18  	zapcore "go.uber.org/zap/zapcore"
    19  	corev1 "k8s.io/api/core/v1"
    20  	"k8s.io/apimachinery/pkg/runtime"
    21  	"k8s.io/client-go/kubernetes/scheme"
    22  	"k8s.io/client-go/rest"
    23  	cache "k8s.io/client-go/tools/cache"
    24  	"sigs.k8s.io/controller-runtime/pkg/controller"
    25  	"sigs.k8s.io/controller-runtime/pkg/manager"
    26  	"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
    27  )
    28  
    29  func createNewPodMonitor() *PodMonitor {
    30  	m := New()
    31  	mockError := fmt.Errorf("mockerror: overwrite function with your own mock before using")
    32  	monitorConfig := DefaultConfig()
    33  	monitorConfig.Kubeconfig = "testdata/kubeconfig"
    34  	monitorConfig.MetadataExtractor = func(context.Context, *corev1.Pod, bool) (*policy.PURuntime, error) {
    35  		return nil, mockError
    36  	}
    37  	monitorConfig.NetclsProgrammer = func(context.Context, *corev1.Pod, policy.RuntimeReader) error {
    38  		return mockError
    39  	}
    40  	monitorConfig.PidsSetMaxProcsProgrammer = func(ctx context.Context, pod *corev1.Pod, maxProcs int) error {
    41  		return mockError
    42  	}
    43  	monitorConfig.ResetNetcls = func(context.Context) error {
    44  		return mockError
    45  	}
    46  	monitorConfig.SandboxExtractor = func(context.Context, *corev1.Pod) (string, error) {
    47  		return "", mockError
    48  	}
    49  
    50  	if err := m.SetupConfig(nil, monitorConfig); err != nil {
    51  		panic(err)
    52  	}
    53  	return m
    54  }
    55  
    56  func isKubernetesController() gomock.Matcher {
    57  	return &controllerMatcher{}
    58  }
    59  
    60  type controllerMatcher struct{}
    61  
    62  var _ gomock.Matcher = &controllerMatcher{}
    63  
    64  // Matches returns whether x is a match.
    65  func (m *controllerMatcher) Matches(x interface{}) bool {
    66  	_, ok := x.(controller.Controller)
    67  	return ok
    68  }
    69  
    70  // String describes what the matcher matches.
    71  func (m *controllerMatcher) String() string {
    72  	return "is not a Kubernetes controller"
    73  }
    74  
    75  const durationKey = "duration"
    76  
    77  func TestPodMonitor_startManager(t *testing.T) {
    78  	origLogger := zap.L()
    79  	// reset logger after this test completes
    80  	defer func() {
    81  		zap.ReplaceGlobals(origLogger)
    82  	}()
    83  
    84  	// overwrite globals
    85  	retrySleep = time.Duration(0)
    86  	warningMessageSleep = time.Millisecond * 300
    87  	warningTimeout = time.Millisecond * 300
    88  
    89  	// use this like:
    90  	//   managerNew = managerNewTest(mgr, nil)
    91  	managerNewTest := func(mgr *MockManager, err error) func(*rest.Config, manager.Options) (manager.Manager, error) {
    92  		return func(*rest.Config, manager.Options) (manager.Manager, error) {
    93  			return mgr, err
    94  		}
    95  	}
    96  
    97  	m := createNewPodMonitor()
    98  
    99  	tests := []struct {
   100  		name           string
   101  		m              *PodMonitor
   102  		expect         func(*testing.T, *gomock.Controller, context.Context, context.CancelFunc)
   103  		wantKubeClient bool
   104  	}{
   105  		{
   106  			name:           "successful startup without any errors in the expected timeframe",
   107  			m:              m,
   108  			wantKubeClient: true,
   109  			expect: func(t *testing.T, ctrl *gomock.Controller, ctx context.Context, cancel context.CancelFunc) {
   110  				mgr := NewMockManager(ctrl)
   111  				managerNew = managerNewTest(mgr, nil)
   112  				c := NewMockClient(ctrl)
   113  				cch := NewMockCache(ctrl)
   114  				inf := NewMockSharedIndexInformer(ctrl)
   115  
   116  				// this is our version of a mocked SetFields function
   117  				var sf func(i interface{}) error
   118  				sf = func(i interface{}) error {
   119  					if _, err := inject.InjectorInto(sf, i); err != nil {
   120  						return err
   121  					}
   122  					if _, err := inject.SchemeInto(scheme.Scheme, i); err != nil {
   123  						return err
   124  					}
   125  					if _, err := inject.CacheInto(cch, i); err != nil {
   126  						return err
   127  					}
   128  					if _, err := inject.StopChannelInto(ctx.Done(), i); err != nil {
   129  						return err
   130  					}
   131  					return nil
   132  				}
   133  
   134  				// delete controller
   135  				mgr.EXPECT().Add(gomock.AssignableToTypeOf(&DeleteController{})).Times(1).Return(nil)
   136  				mgr.EXPECT().GetClient().Times(1).Return(c)
   137  
   138  				// main controller
   139  				// newReconciler calls these
   140  				mgr.EXPECT().GetClient().Times(1).Return(c)
   141  				mgr.EXPECT().GetRecorder("trireme-pod-controller").Times(1).Return(nil)
   142  				// addController calls controller.New which calls these
   143  				mgr.EXPECT().SetFields(gomock.AssignableToTypeOf(&ReconcilePod{})).Times(1).DoAndReturn(sf)
   144  				mgr.EXPECT().GetCache().Times(1).Return(cch)
   145  				mgr.EXPECT().GetConfig().Times(1).Return(nil)
   146  				mgr.EXPECT().GetScheme().Times(1).Return(scheme.Scheme)
   147  				mgr.EXPECT().GetClient().Times(2).Return(c) // once inside of controller.New and once by us
   148  				mgr.EXPECT().GetRecorder("trireme-pod-controller").Times(1).Return(nil)
   149  				mgr.EXPECT().Add(isKubernetesController()).Times(1).DoAndReturn(func(run manager.Runnable) error {
   150  					return sf(run)
   151  				})
   152  				// these are called by our c.Watch statement for registering our Pod event source
   153  				// NOTE: this will also call Start on the informer already! This is the reason why the mgr.Start which
   154  				//       waits for the caches to be filled will already download a fresh list of all the pods!
   155  				cch.EXPECT().GetInformer(gomock.AssignableToTypeOf(&corev1.Pod{})).Times(1).DoAndReturn(func(arg0 runtime.Object) (cache.SharedIndexInformer, error) {
   156  					return inf, nil
   157  				})
   158  				inf.EXPECT().AddEventHandler(gomock.Any()).Times(1)
   159  
   160  				// monitoring/side controller
   161  				var r manager.Runnable
   162  				mgr.EXPECT().Add(gomock.AssignableToTypeOf(&runnable{})).DoAndReturn(func(run manager.Runnable) error {
   163  					r = run
   164  					return nil
   165  				}).Times(1)
   166  
   167  				// the manager start needs to at least start the monitoring controller for the right behaviour in our code
   168  				mgr.EXPECT().Start(gomock.Any()).DoAndReturn(func(z <-chan struct{}) error {
   169  					go r.Start(z) //nolint
   170  					return nil
   171  				}).Times(1)
   172  
   173  				// after start, we call GetClient as well to assign it to the monitor
   174  				mgr.EXPECT().GetClient().Times(1).Return(c)
   175  
   176  				// on successful startup, we only expect one debug message at the end
   177  				// we setup everything here to ensure that *only* this log will appear
   178  				// we are additionally testing if the logic of the if condition worked
   179  				zc := NewMockCore(ctrl)
   180  				logger := zap.New(zc)
   181  				zap.ReplaceGlobals(logger)
   182  				zc.EXPECT().Enabled(zapcore.DebugLevel).Times(1).Return(true)
   183  				zc.EXPECT().Check(gomock.Any(), gomock.Any()).Times(1).DoAndReturn(func(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
   184  					return ce.AddCore(ent, zc)
   185  				})
   186  				zc.EXPECT().Write(gomock.Any(), gomock.Any()).Times(1).DoAndReturn(func(ent zapcore.Entry, fields []zapcore.Field) error {
   187  					expectedLogMessage := "pod: controller startup finished"
   188  					if !strings.HasPrefix(ent.Message, expectedLogMessage) {
   189  						t.Errorf("expectedLogMessage = '%s', ent.Message = '%s'", expectedLogMessage, ent.Message)
   190  						return nil
   191  					}
   192  					var foundDuration bool
   193  					for _, field := range fields {
   194  						if field.Key == durationKey {
   195  							foundDuration = true
   196  							if field.Type != zapcore.DurationType {
   197  								t.Errorf("duration field of log message is not DurationType (8), but %v", field.Type)
   198  								break
   199  							}
   200  							d := time.Duration(field.Integer)
   201  							if d > warningTimeout {
   202  								t.Errorf("startup time (%s) surpassed the warningTimeout (%s), but printed it as debug log instead of warning", d, warningTimeout)
   203  							}
   204  							break
   205  						}
   206  					}
   207  					if !foundDuration {
   208  						t.Errorf("did not find debug log message which has test duration field")
   209  					}
   210  					return nil
   211  				})
   212  			},
   213  		},
   214  		{
   215  			name:           "successful startup without any errors taking longer than expected",
   216  			m:              m,
   217  			wantKubeClient: true,
   218  			expect: func(t *testing.T, ctrl *gomock.Controller, ctx context.Context, cancel context.CancelFunc) {
   219  				mgr := NewMockManager(ctrl)
   220  				managerNew = managerNewTest(mgr, nil)
   221  				c := NewMockClient(ctrl)
   222  				cch := NewMockCache(ctrl)
   223  				inf := NewMockSharedIndexInformer(ctrl)
   224  
   225  				// this is our version of a mocked SetFields function
   226  				var sf func(i interface{}) error
   227  				sf = func(i interface{}) error {
   228  					if _, err := inject.InjectorInto(sf, i); err != nil {
   229  						return err
   230  					}
   231  					if _, err := inject.SchemeInto(scheme.Scheme, i); err != nil {
   232  						return err
   233  					}
   234  					if _, err := inject.CacheInto(cch, i); err != nil {
   235  						return err
   236  					}
   237  					if _, err := inject.StopChannelInto(ctx.Done(), i); err != nil {
   238  						return err
   239  					}
   240  					return nil
   241  				}
   242  
   243  				// delete controller
   244  				mgr.EXPECT().Add(gomock.AssignableToTypeOf(&DeleteController{})).Times(1).Return(nil)
   245  				mgr.EXPECT().GetClient().Times(1).Return(c)
   246  
   247  				// main controller
   248  				// newReconciler calls these
   249  				mgr.EXPECT().GetClient().Times(1).Return(c)
   250  				mgr.EXPECT().GetRecorder("trireme-pod-controller").Times(1).Return(nil)
   251  				// addController calls controller.New which calls these
   252  				mgr.EXPECT().SetFields(gomock.AssignableToTypeOf(&ReconcilePod{})).Times(1).DoAndReturn(sf)
   253  				mgr.EXPECT().GetCache().Times(1).Return(cch)
   254  				mgr.EXPECT().GetConfig().Times(1).Return(nil)
   255  				mgr.EXPECT().GetScheme().Times(1).Return(scheme.Scheme)
   256  				mgr.EXPECT().GetClient().Times(2).Return(c) // once inside of controller.New and once by us
   257  				mgr.EXPECT().GetRecorder("trireme-pod-controller").Times(1).Return(nil)
   258  				mgr.EXPECT().Add(isKubernetesController()).Times(1).DoAndReturn(func(run manager.Runnable) error {
   259  					return sf(run)
   260  				})
   261  				// these are called by our c.Watch statement for registering our Pod event source
   262  				// NOTE: this will also call Start on the informer already! This is the reason why the mgr.Start which
   263  				//       waits for the caches to be filled will already download a fresh list of all the pods!
   264  				cch.EXPECT().GetInformer(gomock.AssignableToTypeOf(&corev1.Pod{})).Times(1).DoAndReturn(func(arg0 runtime.Object) (cache.SharedIndexInformer, error) {
   265  					return inf, nil
   266  				})
   267  				inf.EXPECT().AddEventHandler(gomock.Any()).Times(1)
   268  
   269  				// monitoring/side controller
   270  				var r manager.Runnable
   271  				mgr.EXPECT().Add(gomock.AssignableToTypeOf(&runnable{})).DoAndReturn(func(run manager.Runnable) error {
   272  					r = run
   273  					return nil
   274  				}).Times(1)
   275  
   276  				// the manager start needs to at least start the monitoring controller for the right behaviour in our code
   277  				mgr.EXPECT().Start(gomock.Any()).DoAndReturn(func(z <-chan struct{}) error {
   278  					// delay the startup by the warningMessage or warningTimeout messages depending on which one is longer
   279  					time.Sleep(time.Duration(math.Max(float64(warningTimeout), float64(warningMessageSleep))))
   280  					go r.Start(z) //nolint
   281  					return nil
   282  				}).Times(1)
   283  
   284  				// after start, we call GetClient as well to assign it to the monitor
   285  				mgr.EXPECT().GetClient().Times(1).Return(c)
   286  
   287  				// on successful startup, we only expect one debug message at the end
   288  				// we setup everything here to ensure that *only* this log will appear
   289  				// we are additionally testing if the logic of the if condition worked
   290  				zc := NewMockCore(ctrl)
   291  				logger := zap.New(zc)
   292  				zap.ReplaceGlobals(logger)
   293  				zc.EXPECT().Enabled(zapcore.WarnLevel).Times(2).Return(true)
   294  				zc.EXPECT().Check(gomock.Any(), gomock.Any()).Times(2).DoAndReturn(func(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
   295  					return ce.AddCore(ent, zc)
   296  				})
   297  				expectedLogMessages := []string{
   298  					"pod: the Kubernetes controller did not start within the last 5s. Waiting...",
   299  					"pod: controller startup finished, but took longer than expected",
   300  				}
   301  				zc.EXPECT().Write(gomock.Any(), gomock.Any()).Times(2).DoAndReturn(func(ent zapcore.Entry, fields []zapcore.Field) error {
   302  					var found bool
   303  					for _, expectedLogMessage := range expectedLogMessages {
   304  						if ent.Message == expectedLogMessage {
   305  							found = true
   306  							break
   307  						}
   308  					}
   309  					if !found {
   310  						t.Errorf("expectedLogMessages = '%s', ent.Message = '%s'", expectedLogMessages, ent.Message)
   311  					}
   312  
   313  					// this is where we expect the duration field
   314  					if ent.Message == "pod: controller startup finished, but took longer than expected undefined" {
   315  						var foundDuration bool
   316  						for _, field := range fields {
   317  							if field.Key == durationKey {
   318  								foundDuration = true
   319  								if field.Type != zapcore.DurationType {
   320  									t.Errorf("duration field of log message is not DurationType (8), but %v", field.Type)
   321  									break
   322  								}
   323  								d := time.Duration(field.Integer)
   324  								if d < warningTimeout {
   325  									t.Errorf("startup time (%s) surpassed the warningTimeout (%s), but printed it as warning log instead of debug", d, warningTimeout)
   326  								}
   327  								break
   328  							}
   329  						}
   330  						if !foundDuration {
   331  							t.Errorf("did not find warning log message which has test duration field")
   332  						}
   333  					}
   334  					return nil
   335  				})
   336  			},
   337  		},
   338  		{
   339  			name:           "successful startup with an error once for all actions",
   340  			m:              m,
   341  			wantKubeClient: true,
   342  			expect: func(t *testing.T, ctrl *gomock.Controller, ctx context.Context, cancel context.CancelFunc) {
   343  				mgr := NewMockManager(ctrl)
   344  				var managerNewErrorerd bool
   345  				managerNew = func(*rest.Config, manager.Options) (manager.Manager, error) {
   346  					if !managerNewErrorerd {
   347  						managerNewErrorerd = true
   348  						return nil, fmt.Errorf("errored")
   349  					}
   350  					return mgr, nil
   351  				}
   352  				c := NewMockClient(ctrl)
   353  				cch := NewMockCache(ctrl)
   354  				inf := NewMockSharedIndexInformer(ctrl)
   355  
   356  				// this is our version of a mocked SetFields function
   357  				var sf func(i interface{}) error
   358  				sf = func(i interface{}) error {
   359  					if _, err := inject.InjectorInto(sf, i); err != nil {
   360  						return err
   361  					}
   362  					if _, err := inject.SchemeInto(scheme.Scheme, i); err != nil {
   363  						return err
   364  					}
   365  					if _, err := inject.CacheInto(cch, i); err != nil {
   366  						return err
   367  					}
   368  					if _, err := inject.StopChannelInto(ctx.Done(), i); err != nil {
   369  						return err
   370  					}
   371  					return nil
   372  				}
   373  
   374  				// delete controller
   375  				var deleteControllerErrored bool
   376  				mgr.EXPECT().Add(gomock.AssignableToTypeOf(&DeleteController{})).Times(2).DoAndReturn(func(run manager.Runnable) error {
   377  					if !deleteControllerErrored {
   378  						deleteControllerErrored = true
   379  						return fmt.Errorf("errored")
   380  					}
   381  					return nil
   382  				})
   383  				mgr.EXPECT().GetClient().Times(1).Return(c)
   384  
   385  				// main controller
   386  				// newReconciler calls these
   387  				mgr.EXPECT().GetClient().Times(2).Return(c)
   388  				mgr.EXPECT().GetRecorder("trireme-pod-controller").Times(2).Return(nil)
   389  				// addController calls controller.New which calls these
   390  				mgr.EXPECT().SetFields(gomock.AssignableToTypeOf(&ReconcilePod{})).Times(2).DoAndReturn(sf)
   391  				mgr.EXPECT().GetCache().Times(2).Return(cch)
   392  				mgr.EXPECT().GetConfig().Times(2).Return(nil)
   393  				mgr.EXPECT().GetScheme().Times(2).Return(scheme.Scheme)
   394  				mgr.EXPECT().GetClient().Times(3).Return(c) // twice in controller.New because of the failure, and one time after that (that is after mgr.Add actually)
   395  				mgr.EXPECT().GetRecorder("trireme-pod-controller").Times(2).Return(nil)
   396  				var mainControllerErrored bool
   397  				mgr.EXPECT().Add(isKubernetesController()).Times(2).DoAndReturn(func(run manager.Runnable) error {
   398  					if !mainControllerErrored {
   399  						mainControllerErrored = true
   400  						return fmt.Errorf("errored")
   401  					}
   402  					return sf(run)
   403  				})
   404  				// these are called by our c.Watch statement for registering our Pod event source
   405  				// NOTE: this will also call Start on the informer already! This is the reason why the mgr.Start which
   406  				//       waits for the caches to be filled will already download a fresh list of all the pods!
   407  				cch.EXPECT().GetInformer(gomock.AssignableToTypeOf(&corev1.Pod{})).Times(1).DoAndReturn(func(arg0 runtime.Object) (cache.SharedIndexInformer, error) {
   408  					return inf, nil
   409  				})
   410  				inf.EXPECT().AddEventHandler(gomock.Any()).Times(1)
   411  
   412  				// monitoring/side controller
   413  				var r manager.Runnable
   414  				var sideControllerErrored bool
   415  				mgr.EXPECT().Add(gomock.AssignableToTypeOf(&runnable{})).DoAndReturn(func(run manager.Runnable) error {
   416  					if !sideControllerErrored {
   417  						sideControllerErrored = true
   418  						return fmt.Errorf("errored")
   419  					}
   420  					r = run
   421  					return nil
   422  				}).Times(2)
   423  
   424  				// the manager start needs to at least start the monitoring controller for the right behaviour in our code
   425  				var managerStartErrored bool
   426  				mgr.EXPECT().Start(gomock.Any()).DoAndReturn(func(z <-chan struct{}) error {
   427  					if !managerStartErrored {
   428  						managerStartErrored = true
   429  						return fmt.Errorf("errored")
   430  					}
   431  					go r.Start(z) //nolint
   432  					return nil
   433  				}).Times(2)
   434  
   435  				// after start, we call GetClient as well to assign it to the monitor
   436  				mgr.EXPECT().GetClient().Times(1).Return(c)
   437  
   438  				// on successful startup, we only expect one debug message at the end
   439  				// we setup everything here to ensure that *only* this log will appear
   440  				// we are additionally testing if the logic of the if condition worked
   441  				zc := NewMockCore(ctrl)
   442  				logger := zap.New(zc)
   443  				zap.ReplaceGlobals(logger)
   444  				zc.EXPECT().Enabled(zapcore.DebugLevel).Times(1).Return(true)
   445  				zc.EXPECT().Enabled(zapcore.ErrorLevel).Times(5).Return(true)
   446  				zc.EXPECT().Check(gomock.Any(), gomock.Any()).Times(6).DoAndReturn(func(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
   447  					return ce.AddCore(ent, zc)
   448  				})
   449  				expectedLogMessages := []string{
   450  					"pod: new manager instantiation failed. Retrying in 3s...",
   451  					"pod: adding delete controller failed. Retrying in 3s...",
   452  					"pod: adding main monitor controller failed. Retrying in 3s...",
   453  					"pod: adding side controller failed. Retrying in 3s...",
   454  					"pod: manager start failed. Retrying in 3s...",
   455  					"pod: controller startup finished",
   456  				}
   457  				zc.EXPECT().Write(gomock.Any(), gomock.Any()).Times(6).DoAndReturn(func(ent zapcore.Entry, fields []zapcore.Field) error {
   458  					var found bool
   459  					for _, expectedLogMessage := range expectedLogMessages {
   460  						if ent.Message == expectedLogMessage {
   461  							found = true
   462  							break
   463  						}
   464  					}
   465  					if !found {
   466  						t.Errorf("expectedLogMessages = '%s', ent.Message = '%s'", expectedLogMessages, ent.Message)
   467  					}
   468  
   469  					// this is where we expect the duration field
   470  					if ent.Message == "pod: controller startup finished" {
   471  						var foundDuration bool
   472  						for _, field := range fields {
   473  							if field.Key == durationKey {
   474  								foundDuration = true
   475  								if field.Type != zapcore.DurationType {
   476  									t.Errorf("duration field of log message is not DurationType (8), but %v", field.Type)
   477  									break
   478  								}
   479  								d := time.Duration(field.Integer)
   480  								if d > warningTimeout {
   481  									t.Errorf("startup time (%s) surpassed the warningTimeout (%s), but printed it as debug log instead of warning", d, warningTimeout)
   482  								}
   483  								break
   484  							}
   485  						}
   486  						if !foundDuration {
   487  							t.Errorf("did not find debug log message which has test duration field")
   488  						}
   489  					}
   490  					return nil
   491  				})
   492  			},
   493  		},
   494  		{
   495  			name:           "context gets cancelled",
   496  			m:              m,
   497  			wantKubeClient: false,
   498  			expect: func(t *testing.T, ctrl *gomock.Controller, ctx context.Context, cancel context.CancelFunc) {
   499  				managerNew = managerNewTest(nil, fmt.Errorf("error"))
   500  				zc := NewMockCore(ctrl)
   501  				logger := zap.New(zc)
   502  				zap.ReplaceGlobals(logger)
   503  				zc.EXPECT().Enabled(zapcore.ErrorLevel).AnyTimes().Return(false)
   504  				cancel()
   505  			},
   506  		},
   507  	}
   508  	for _, tt := range tests {
   509  		t.Run(tt.name, func(t *testing.T) {
   510  			// create a mock controller per test run to track mocked calls
   511  			// call expect() to register and prepare for the side effects of the functions
   512  			// always nil the kubeClient for every call
   513  			ctx, cancel := context.WithCancel(context.Background())
   514  			ctrl := gomock.NewController(t)
   515  			tt.expect(t, ctrl, ctx, cancel)
   516  			tt.m.kubeClient = nil
   517  
   518  			// probably paranoid: this ensures that nothing in the tested function actually calls out to the policy engine yet (ctrl.Finish() would catch those)
   519  			handler := mockpolicy.NewMockResolver(ctrl)
   520  			pc := &config.ProcessorConfig{
   521  				Policy: handler,
   522  			}
   523  			tt.m.SetupHandlers(pc)
   524  
   525  			// now execute the mocked test
   526  			tt.m.startManager(ctx)
   527  
   528  			// do the kubeclient check
   529  			if tt.wantKubeClient && tt.m.kubeClient == nil {
   530  				t.Errorf("PodMonitor.startManager() kubeClient = %v, wantKubeClient %v", tt.m.kubeClient, tt.wantKubeClient)
   531  			}
   532  			if !tt.wantKubeClient && tt.m.kubeClient != nil {
   533  				t.Errorf("PodMonitor.startManager() kubeClient = %v, wantKubeClient %v", tt.m.kubeClient, tt.wantKubeClient)
   534  			}
   535  
   536  			// call Finish on every test run to ensure the calls add up per test
   537  			// this is essentially the real check of all the test conditions as the whole function is side-effecting only
   538  			ctrl.Finish()
   539  
   540  			// last but not least, call cancel() so that all mocked routines which were depending on this context stop for sure
   541  			cancel()
   542  		})
   543  	}
   544  }
   545  
   546  func TestPodMonitor_Resync(t *testing.T) {
   547  	ctrl := gomock.NewController(t)
   548  	defer ctrl.Finish()
   549  
   550  	ctx := context.Background()
   551  
   552  	c := NewMockClient(ctrl)
   553  	m := createNewPodMonitor()
   554  	handler := mockpolicy.NewMockResolver(ctrl)
   555  	pc := &config.ProcessorConfig{
   556  		Policy: handler,
   557  	}
   558  	m.SetupHandlers(pc)
   559  
   560  	type args struct {
   561  		ctx context.Context
   562  	}
   563  	tests := []struct {
   564  		name    string
   565  		m       *PodMonitor
   566  		expect  func(t *testing.T, m *PodMonitor)
   567  		args    args
   568  		wantErr bool
   569  	}{
   570  		{
   571  			name: "resync fails with a failing reset netcls",
   572  			m:    m,
   573  			args: args{
   574  				ctx: ctx,
   575  			},
   576  			expect: func(t *testing.T, m *PodMonitor) {
   577  				m.kubeClient = c
   578  				m.resetNetcls = func(context.Context) error {
   579  					return fmt.Errorf("resync error")
   580  				}
   581  			},
   582  			wantErr: true,
   583  		},
   584  		{
   585  			name: "resync fails with a missing kubeclient",
   586  			m:    m,
   587  			args: args{
   588  				ctx: ctx,
   589  			},
   590  			expect: func(t *testing.T, m *PodMonitor) {
   591  				m.kubeClient = nil
   592  				m.resetNetcls = func(context.Context) error {
   593  					return nil
   594  				}
   595  			},
   596  			wantErr: true,
   597  		},
   598  		{
   599  			name: "successful call to ResyncWathAllPods",
   600  			m:    m,
   601  			args: args{
   602  				ctx: ctx,
   603  			},
   604  			expect: func(t *testing.T, m *PodMonitor) {
   605  				m.kubeClient = c
   606  				m.resetNetcls = func(context.Context) error {
   607  					return nil
   608  				}
   609  				c.EXPECT().List(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(nil)
   610  			},
   611  			wantErr: false,
   612  		},
   613  		// not more to test, the heavy lifting is done in ResyncWithAllPods
   614  	}
   615  	for _, tt := range tests {
   616  		t.Run(tt.name, func(t *testing.T) {
   617  			tt.expect(t, tt.m)
   618  			if err := tt.m.Resync(tt.args.ctx); (err != nil) != tt.wantErr {
   619  				t.Errorf("PodMonitor.Resync() error = %v, wantErr %v", err, tt.wantErr)
   620  			}
   621  		})
   622  	}
   623  }