github.com/aporeto-inc/trireme-lib@v10.358.0+incompatible/monitor/internal/kubernetes/handler_test.go (about)

     1  // +build !windows
     2  
     3  package kubernetesmonitor
     4  
     5  import (
     6  	"context"
     7  	"errors"
     8  	"fmt"
     9  	"testing"
    10  
    11  	"go.aporeto.io/trireme-lib/common"
    12  	"go.aporeto.io/trireme-lib/monitor/config"
    13  	"go.aporeto.io/trireme-lib/monitor/extractors"
    14  	dockermonitor "go.aporeto.io/trireme-lib/monitor/internal/docker"
    15  	"go.aporeto.io/trireme-lib/policy"
    16  	api "k8s.io/api/core/v1"
    17  	"k8s.io/client-go/kubernetes"
    18  	kubefake "k8s.io/client-go/kubernetes/fake"
    19  	kubecache "k8s.io/client-go/tools/cache"
    20  )
    21  
    22  func Test_getKubernetesInformation(t *testing.T) {
    23  
    24  	puRuntimeWithTags := func(tags map[string]string) *policy.PURuntime {
    25  		puRuntime := policy.NewPURuntimeWithDefaults()
    26  		puRuntime.SetTags(policy.NewTagStoreFromMap(tags))
    27  		return puRuntime
    28  	}
    29  
    30  	type args struct {
    31  		runtime policy.RuntimeReader
    32  	}
    33  	tests := []struct {
    34  		name    string
    35  		args    args
    36  		want    string
    37  		want1   string
    38  		wantErr bool
    39  	}{
    40  		{
    41  			name:    "no Kubernetes Information",
    42  			args:    args{runtime: policy.NewPURuntimeWithDefaults()},
    43  			want:    "",
    44  			want1:   "",
    45  			wantErr: true,
    46  		},
    47  		{
    48  			name: "both present",
    49  			args: args{runtime: puRuntimeWithTags(map[string]string{
    50  				KubernetesPodNamespaceIdentifier: "a",
    51  				KubernetesPodNameIdentifier:      "b",
    52  			},
    53  			),
    54  			},
    55  			want:    "a",
    56  			want1:   "b",
    57  			wantErr: false,
    58  		},
    59  		{
    60  			name: "both present. NamespaceIdentifier empty",
    61  			args: args{runtime: puRuntimeWithTags(map[string]string{
    62  				KubernetesPodNamespaceIdentifier: "",
    63  				KubernetesPodNameIdentifier:      "b",
    64  			},
    65  			),
    66  			},
    67  			want:    "",
    68  			want1:   "b",
    69  			wantErr: false,
    70  		},
    71  		{
    72  			name: "both present. Name empty",
    73  			args: args{runtime: puRuntimeWithTags(map[string]string{
    74  				KubernetesPodNamespaceIdentifier: "a",
    75  				KubernetesPodNameIdentifier:      "",
    76  			},
    77  			),
    78  			},
    79  			want:    "a",
    80  			want1:   "",
    81  			wantErr: false,
    82  		},
    83  		{
    84  			name: "Namespace missing",
    85  			args: args{runtime: puRuntimeWithTags(map[string]string{
    86  				KubernetesPodNameIdentifier: "b",
    87  			},
    88  			),
    89  			},
    90  			want:    "",
    91  			want1:   "",
    92  			wantErr: true,
    93  		},
    94  		{
    95  			name: "Name missing",
    96  			args: args{runtime: puRuntimeWithTags(map[string]string{
    97  				KubernetesPodNamespaceIdentifier: "a",
    98  			},
    99  			),
   100  			},
   101  			want:    "",
   102  			want1:   "",
   103  			wantErr: true,
   104  		},
   105  	}
   106  	for _, tt := range tests {
   107  		t.Run(tt.name, func(t *testing.T) {
   108  			got, got1, err := getKubernetesInformation(tt.args.runtime)
   109  			if (err != nil) != tt.wantErr {
   110  				t.Errorf("getKubernetesInformation() error = %v, wantErr %v", err, tt.wantErr)
   111  				return
   112  			}
   113  			if got != tt.want {
   114  				t.Errorf("getKubernetesInformation() got = %v, want %v", got, tt.want)
   115  			}
   116  			if got1 != tt.want1 {
   117  				t.Errorf("getKubernetesInformation() got1 = %v, want %v", got1, tt.want1)
   118  			}
   119  		})
   120  	}
   121  }
   122  
   123  type mockHandler struct{}
   124  
   125  func (m *mockHandler) HandlePUEvent(ctx context.Context, puID string, event common.Event, runtime policy.RuntimeReader) error {
   126  	return nil
   127  }
   128  
   129  type mockErrHandler struct{}
   130  
   131  func (m *mockErrHandler) HandlePUEvent(ctx context.Context, puID string, event common.Event, runtime policy.RuntimeReader) error {
   132  	return errors.New("Dummy error")
   133  }
   134  
   135  func TestKubernetesMonitor_HandlePUEvent(t *testing.T) {
   136  
   137  	pod1 := &api.Pod{}
   138  	pod1.SetName("pod1")
   139  	pod1.SetNamespace("beer")
   140  
   141  	pod1Runtime := policy.NewPURuntimeWithDefaults()
   142  	pod1Runtime.SetTags(policy.NewTagStoreFromMap(map[string]string{
   143  		KubernetesPodNamespaceIdentifier: "beer",
   144  		KubernetesPodNameIdentifier:      "pod1",
   145  	}))
   146  
   147  	hostContainerRuntime := func() *policy.PURuntime {
   148  
   149  		pur := policy.NewPURuntime("", 1, "", nil, nil, common.LinuxProcessPU, nil)
   150  		pur.SetOptions(policy.OptionsType{
   151  			CgroupMark: "100",
   152  		})
   153  		pur.SetTags(policy.NewTagStoreFromMap(map[string]string{
   154  			KubernetesPodNamespaceIdentifier: "beer",
   155  			KubernetesPodNameIdentifier:      "pod1",
   156  		}))
   157  		return pur
   158  	}
   159  
   160  	kubernetesExtractorUnmanaged := func(runtime policy.RuntimeReader, pod *api.Pod) (*policy.PURuntime, bool, error) {
   161  		originalRuntime, ok := runtime.(*policy.PURuntime)
   162  		if !ok {
   163  			return nil, false, fmt.Errorf("Error casting puruntime")
   164  		}
   165  
   166  		newRuntime := originalRuntime.Clone()
   167  
   168  		return newRuntime, false, nil
   169  	}
   170  
   171  	kubernetesExtractorErrored := func(runtime policy.RuntimeReader, pod *api.Pod) (*policy.PURuntime, bool, error) {
   172  		originalRuntime, ok := runtime.(*policy.PURuntime)
   173  		if !ok {
   174  			return nil, false, fmt.Errorf("Error casting puruntime")
   175  		}
   176  
   177  		newRuntime := originalRuntime.Clone()
   178  
   179  		return newRuntime, false, fmt.Errorf("Previsible error")
   180  	}
   181  
   182  	kubernetesExtractorManaged := func(runtime policy.RuntimeReader, pod *api.Pod) (*policy.PURuntime, bool, error) {
   183  		originalRuntime, ok := runtime.(*policy.PURuntime)
   184  		if !ok {
   185  			return nil, false, fmt.Errorf("Error casting puruntime")
   186  		}
   187  
   188  		newRuntime := originalRuntime.Clone()
   189  
   190  		return newRuntime, true, nil
   191  	}
   192  
   193  	type fields struct {
   194  		dockerMonitor       *dockermonitor.DockerMonitor
   195  		kubeClient          kubernetes.Interface
   196  		localNode           string
   197  		handlers            *config.ProcessorConfig
   198  		cache               *cache
   199  		kubernetesExtractor extractors.KubernetesMetadataExtractorType
   200  		podStore            kubecache.Store
   201  		podController       kubecache.Controller
   202  		podControllerStop   chan struct{}
   203  		enableHostPods      bool
   204  	}
   205  	type args struct {
   206  		ctx           context.Context
   207  		puID          string
   208  		event         common.Event
   209  		dockerRuntime policy.RuntimeReader
   210  	}
   211  	tests := []struct {
   212  		name    string
   213  		fields  fields
   214  		args    args
   215  		wantErr bool
   216  	}{
   217  		{
   218  			name:   "empty dockerruntime on create",
   219  			fields: fields{},
   220  			args: args{
   221  				event:         common.EventCreate,
   222  				dockerRuntime: policy.NewPURuntimeWithDefaults(),
   223  			},
   224  			wantErr: true,
   225  		},
   226  		{
   227  			name:   "empty dockerruntime on start",
   228  			fields: fields{},
   229  			args: args{
   230  				event:         common.EventCreate,
   231  				dockerRuntime: policy.NewPURuntimeWithDefaults(),
   232  			},
   233  			wantErr: true,
   234  		},
   235  		{
   236  			name: "Extractor with Unmanaged PU",
   237  			fields: fields{
   238  				kubeClient:          kubefake.NewSimpleClientset(pod1),
   239  				kubernetesExtractor: kubernetesExtractorUnmanaged,
   240  			},
   241  			args: args{
   242  				event:         common.EventCreate,
   243  				dockerRuntime: pod1Runtime,
   244  			},
   245  			wantErr: false,
   246  		},
   247  		{
   248  			name: "Extractor with Errored output",
   249  			fields: fields{
   250  				kubeClient:          kubefake.NewSimpleClientset(pod1),
   251  				kubernetesExtractor: kubernetesExtractorErrored,
   252  			},
   253  			args: args{
   254  				event:         common.EventCreate,
   255  				dockerRuntime: pod1Runtime,
   256  			},
   257  			wantErr: true,
   258  		},
   259  		{
   260  			name: "Extractor with managed PU",
   261  			fields: fields{
   262  				kubeClient:          kubefake.NewSimpleClientset(pod1),
   263  				kubernetesExtractor: kubernetesExtractorManaged,
   264  				cache:               newCache(),
   265  				handlers: &config.ProcessorConfig{
   266  					Policy: &mockHandler{},
   267  				},
   268  			},
   269  			args: args{
   270  				event:         common.EventCreate,
   271  				dockerRuntime: pod1Runtime,
   272  			},
   273  			wantErr: false,
   274  		},
   275  		{
   276  			name: "Destroy not in cache",
   277  			fields: fields{
   278  				kubeClient:          kubefake.NewSimpleClientset(pod1),
   279  				kubernetesExtractor: kubernetesExtractorManaged,
   280  				cache:               newCache(),
   281  				handlers: &config.ProcessorConfig{
   282  					Policy: &mockHandler{},
   283  				},
   284  			},
   285  			args: args{
   286  				event:         common.EventDestroy,
   287  				dockerRuntime: pod1Runtime,
   288  			},
   289  			wantErr: false,
   290  		},
   291  		{
   292  			name: "Activate host network pu",
   293  			fields: fields{
   294  				kubeClient:          kubefake.NewSimpleClientset(pod1),
   295  				kubernetesExtractor: kubernetesExtractorManaged,
   296  				cache:               newCache(),
   297  				handlers: &config.ProcessorConfig{
   298  					Policy: &mockHandler{},
   299  				},
   300  			},
   301  			args: args{
   302  				event:         common.EventStart,
   303  				dockerRuntime: hostContainerRuntime(),
   304  			},
   305  			wantErr: false,
   306  		},
   307  		{
   308  			name: "Non infra containers in a pod with host net",
   309  			fields: fields{
   310  				kubeClient:          kubefake.NewSimpleClientset(pod1),
   311  				kubernetesExtractor: kubernetesExtractorUnmanaged,
   312  				cache:               newCache(),
   313  				handlers: &config.ProcessorConfig{
   314  					Policy: &mockHandler{},
   315  				},
   316  			},
   317  			args: args{
   318  				event:         common.EventStart,
   319  				dockerRuntime: pod1Runtime,
   320  			},
   321  			wantErr: false,
   322  		},
   323  		{
   324  			name: "Activate host network pu and policy engine fails",
   325  			fields: fields{
   326  				kubeClient:          kubefake.NewSimpleClientset(pod1),
   327  				kubernetesExtractor: kubernetesExtractorManaged,
   328  				cache:               newCache(),
   329  				handlers: &config.ProcessorConfig{
   330  					Policy: &mockErrHandler{},
   331  				},
   332  			},
   333  			args: args{
   334  				event:         common.EventStart,
   335  				dockerRuntime: hostContainerRuntime(),
   336  			},
   337  			wantErr: true,
   338  		},
   339  	}
   340  	for _, tt := range tests {
   341  
   342  		t.Run(tt.name, func(t *testing.T) {
   343  			m := &KubernetesMonitor{
   344  				dockerMonitor:       tt.fields.dockerMonitor,
   345  				kubeClient:          tt.fields.kubeClient,
   346  				localNode:           tt.fields.localNode,
   347  				handlers:            tt.fields.handlers,
   348  				cache:               tt.fields.cache,
   349  				kubernetesExtractor: tt.fields.kubernetesExtractor,
   350  				podStore:            tt.fields.podStore,
   351  				podController:       tt.fields.podController,
   352  				podControllerStop:   tt.fields.podControllerStop,
   353  				enableHostPods:      tt.fields.enableHostPods,
   354  			}
   355  			if err := m.HandlePUEvent(tt.args.ctx, tt.args.puID, tt.args.event, tt.args.dockerRuntime); (err != nil) != tt.wantErr {
   356  				t.Errorf("KubernetesMonitor.HandlePUEvent() error = %v, wantErr %v", err, tt.wantErr)
   357  			}
   358  		})
   359  	}
   360  }
   361  
   362  func TestKubernetesMonitor_RefreshPUs(t *testing.T) {
   363  	type fields struct {
   364  		dockerMonitor       *dockermonitor.DockerMonitor
   365  		kubeClient          kubernetes.Interface
   366  		localNode           string
   367  		handlers            *config.ProcessorConfig
   368  		cache               *cache
   369  		kubernetesExtractor extractors.KubernetesMetadataExtractorType
   370  		podStore            kubecache.Store
   371  		podController       kubecache.Controller
   372  		podControllerStop   chan struct{}
   373  		enableHostPods      bool
   374  	}
   375  	type args struct {
   376  		ctx context.Context
   377  		pod *api.Pod
   378  	}
   379  	tests := []struct {
   380  		name    string
   381  		fields  fields
   382  		args    args
   383  		wantErr bool
   384  	}{
   385  		{
   386  			name:   "empty pod",
   387  			fields: fields{},
   388  			args: args{
   389  				pod: nil,
   390  			},
   391  			wantErr: true,
   392  		},
   393  	}
   394  	for _, tt := range tests {
   395  		t.Run(tt.name, func(t *testing.T) {
   396  			m := &KubernetesMonitor{
   397  				dockerMonitor:       tt.fields.dockerMonitor,
   398  				kubeClient:          tt.fields.kubeClient,
   399  				localNode:           tt.fields.localNode,
   400  				handlers:            tt.fields.handlers,
   401  				cache:               tt.fields.cache,
   402  				kubernetesExtractor: tt.fields.kubernetesExtractor,
   403  				podStore:            tt.fields.podStore,
   404  				podController:       tt.fields.podController,
   405  				podControllerStop:   tt.fields.podControllerStop,
   406  				enableHostPods:      tt.fields.enableHostPods,
   407  			}
   408  			if err := m.RefreshPUs(tt.args.ctx, tt.args.pod); (err != nil) != tt.wantErr {
   409  				t.Errorf("KubernetesMonitor.RefreshPUs() error = %v, wantErr %v", err, tt.wantErr)
   410  			}
   411  		})
   412  	}
   413  }
   414  
   415  func Test_isPodInfraContainer(t *testing.T) {
   416  	type args struct {
   417  		runtime policy.RuntimeReader
   418  	}
   419  
   420  	puRuntimeWithTags := func(tags map[string]string) *policy.PURuntime {
   421  		puRuntime := policy.NewPURuntimeWithDefaults()
   422  		puRuntime.SetTags(policy.NewTagStoreFromMap(tags))
   423  		return puRuntime
   424  	}
   425  
   426  	tests := []struct {
   427  		name string
   428  		args args
   429  		want bool
   430  	}{
   431  		{
   432  			name: "Test if runtime has kubernetes infra pod tags",
   433  			args: args{
   434  				runtime: puRuntimeWithTags(map[string]string{
   435  					"@usr:io.kubernetes.container.name": "POD",
   436  				},
   437  				),
   438  			},
   439  			want: true,
   440  		},
   441  		{
   442  			name: "Test if runtime does not have kubernetes infra pod tags",
   443  			args: args{
   444  				runtime: puRuntimeWithTags(map[string]string{
   445  					"key": "value",
   446  				},
   447  				),
   448  			},
   449  			want: false,
   450  		},
   451  	}
   452  	for _, tt := range tests {
   453  		t.Run(tt.name, func(t *testing.T) {
   454  			if got := isPodInfraContainer(tt.args.runtime); got != tt.want {
   455  				t.Errorf("isPodInfraContainer() = %v, want %v", got, tt.want)
   456  			}
   457  		})
   458  	}
   459  }
   460  
   461  func TestKubernetesMonitor_decorateRuntime(t *testing.T) {
   462  
   463  	pur1 := policy.NewPURuntime("", 1, "", nil, nil, common.LinuxProcessPU, nil)
   464  	pur2 := policy.NewPURuntime("", 1, "", nil, nil, common.ContainerPU, nil)
   465  
   466  	hostpuID := "1234"
   467  	puID := "12345"
   468  
   469  	podName := "abcd"
   470  	podNamespace := "abcd"
   471  	testCache := newCache()
   472  	testCache.updatePUIDCache("abcd", "abcd", "1234", pur1, nil)
   473  
   474  	type fields struct {
   475  		dockerMonitor       *dockermonitor.DockerMonitor
   476  		kubeClient          kubernetes.Interface
   477  		localNode           string
   478  		handlers            *config.ProcessorConfig
   479  		cache               *cache
   480  		kubernetesExtractor extractors.KubernetesMetadataExtractorType
   481  		podStore            kubecache.Store
   482  		podController       kubecache.Controller
   483  		podControllerStop   chan struct{}
   484  		enableHostPods      bool
   485  	}
   486  	type args struct {
   487  		puID         string
   488  		runtimeInfo  policy.RuntimeReader
   489  		event        common.Event
   490  		podName      string
   491  		podNamespace string
   492  	}
   493  	tests := []struct {
   494  		name    string
   495  		fields  fields
   496  		args    args
   497  		wantErr bool
   498  	}{
   499  		{
   500  			name:   "Check no op behavior on non start events",
   501  			fields: fields{},
   502  			args: args{
   503  				puID:  "123",
   504  				event: common.EventCreate,
   505  			},
   506  			wantErr: false,
   507  		},
   508  		{
   509  			name:   "Invalid Runtime",
   510  			fields: fields{},
   511  			args: args{
   512  				event: common.EventStart,
   513  			},
   514  			wantErr: true,
   515  		},
   516  		{
   517  			name:   "Decorate runtime for pu activated as linux process",
   518  			fields: fields{},
   519  			args: args{
   520  				event:        common.EventStart,
   521  				podName:      podName,
   522  				podNamespace: podNamespace,
   523  				runtimeInfo:  pur1,
   524  				puID:         hostpuID,
   525  			},
   526  			wantErr: false,
   527  		},
   528  		{
   529  			name: "Decorate runtime for pu activated as container process",
   530  			fields: fields{
   531  				cache: testCache,
   532  			},
   533  			args: args{
   534  				event:        common.EventStart,
   535  				podName:      podName,
   536  				podNamespace: podNamespace,
   537  				runtimeInfo:  pur2,
   538  				puID:         puID,
   539  			},
   540  			wantErr: false,
   541  		},
   542  	}
   543  
   544  	for _, tt := range tests {
   545  		t.Run(tt.name, func(t *testing.T) {
   546  			m := &KubernetesMonitor{
   547  				dockerMonitor:       tt.fields.dockerMonitor,
   548  				kubeClient:          tt.fields.kubeClient,
   549  				localNode:           tt.fields.localNode,
   550  				handlers:            tt.fields.handlers,
   551  				cache:               tt.fields.cache,
   552  				kubernetesExtractor: tt.fields.kubernetesExtractor,
   553  				podStore:            tt.fields.podStore,
   554  				podController:       tt.fields.podController,
   555  				podControllerStop:   tt.fields.podControllerStop,
   556  				enableHostPods:      tt.fields.enableHostPods,
   557  			}
   558  			if err := m.decorateRuntime(tt.args.puID, tt.args.runtimeInfo, tt.args.event, tt.args.podName, tt.args.podNamespace); (err != nil) != tt.wantErr {
   559  				t.Errorf("KubernetesMonitor.decorateRuntime() error = %v, wantErr %v", err, tt.wantErr)
   560  			}
   561  		})
   562  	}
   563  }