github.com/castai/kvisor@v1.7.1-0.20240516114728-b3572a2607b5/cmd/controller/state/imagescan/scanner_test.go (about)

     1  package imagescan
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/castai/kvisor/cmd/controller/kube"
    10  	"github.com/samber/lo"
    11  	"github.com/sirupsen/logrus"
    12  	"github.com/stretchr/testify/require"
    13  	batchv1 "k8s.io/api/batch/v1"
    14  	corev1 "k8s.io/api/core/v1"
    15  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    16  	"k8s.io/apimachinery/pkg/api/resource"
    17  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    18  	"k8s.io/client-go/kubernetes/fake"
    19  )
    20  
    21  func TestScanner(t *testing.T) {
    22  	log := logrus.New()
    23  	log.SetLevel(logrus.DebugLevel)
    24  	ns := "castai-sec"
    25  
    26  	t.Run("create scan job", func(t *testing.T) {
    27  		for _, testCase := range []struct {
    28  			cloudProvider string
    29  			podLabels     map[string]string
    30  		}{
    31  			{
    32  				cloudProvider: "aks",
    33  				podLabels: map[string]string{
    34  					"azure.workload.identity/use": "true",
    35  				},
    36  			},
    37  			{
    38  				cloudProvider: "eks",
    39  				podLabels:     map[string]string{},
    40  			},
    41  		} {
    42  			t.Run(fmt.Sprintf("in %s", testCase.cloudProvider), func(t *testing.T) {
    43  				ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
    44  				defer cancel()
    45  
    46  				r := require.New(t)
    47  
    48  				client := fake.NewSimpleClientset()
    49  				scanner := NewImageScanner(client, Config{
    50  					CPURequest:          "500m",
    51  					CPULimit:            "2",
    52  					MemoryRequest:       "100Mi",
    53  					MemoryLimit:         "2Gi",
    54  					ProfileEnabled:      true,
    55  					PhlareEnabled:       true,
    56  					Mode:                "",
    57  					CastaiSecretRefName: "castai-kvisor",
    58  					CastaiGRPCAddress:   "api.cast.ai:443",
    59  					CastaiClusterID:     "abcd",
    60  					CloudProvider:       testCase.cloudProvider,
    61  				}, ns)
    62  				scanner.jobCheckInterval = 1 * time.Microsecond
    63  
    64  				err := scanner.ScanImage(ctx, ScanImageParams{
    65  					ImageName:        "test-image",
    66  					ImageID:          "test-image@sha2566282b5ec0c18cfd723e40ef8b98649a47b9388a479c520719c615acc3b073504",
    67  					ContainerRuntime: "containerd",
    68  					Mode:             "hostfs",
    69  					NodeName:         "n1",
    70  					ResourceIDs:      []string{"p1", "p2"},
    71  					Architecture:     "amd64",
    72  					Os:               "linux",
    73  					ScanImageDetails: kube.ImageDetails{
    74  						ScannerImageName: "imgcollector:1.0.0",
    75  					},
    76  				})
    77  				r.NoError(err)
    78  
    79  				jobs, err := client.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
    80  				r.NoError(err)
    81  				r.Len(jobs.Items, 1)
    82  				r.Equal(batchv1.Job{
    83  					TypeMeta: metav1.TypeMeta{
    84  						Kind:       "Job",
    85  						APIVersion: "batch/v1",
    86  					},
    87  					ObjectMeta: metav1.ObjectMeta{
    88  						Annotations: map[string]string{
    89  							"autoscaling.cast.ai/disposable": "true",
    90  						},
    91  						Name:      "castai-imgscan-1ba98dcd098ba64e9b2fe4dafc7a5c85",
    92  						Namespace: ns,
    93  						Labels: map[string]string{
    94  							"app.kubernetes.io/managed-by": "castai",
    95  						},
    96  					},
    97  					Spec: batchv1.JobSpec{
    98  						TTLSecondsAfterFinished: lo.ToPtr(int32(100)),
    99  						BackoffLimit:            lo.ToPtr(int32(0)),
   100  						Template: corev1.PodTemplateSpec{
   101  							ObjectMeta: metav1.ObjectMeta{
   102  								Annotations: map[string]string{
   103  									"phlare.grafana.com/port":   "6060",
   104  									"phlare.grafana.com/scrape": "true",
   105  								},
   106  								Labels: testCase.podLabels,
   107  							},
   108  							Spec: corev1.PodSpec{
   109  								RestartPolicy: "Never",
   110  								Affinity: &corev1.Affinity{
   111  									NodeAffinity: &corev1.NodeAffinity{
   112  										RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
   113  											NodeSelectorTerms: []corev1.NodeSelectorTerm{
   114  												{
   115  													MatchExpressions: []corev1.NodeSelectorRequirement{
   116  														{
   117  															Key:      "kubernetes.io/os",
   118  															Operator: corev1.NodeSelectorOpIn,
   119  															Values:   []string{"linux"},
   120  														},
   121  													},
   122  												},
   123  											},
   124  										},
   125  										PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{
   126  											{
   127  												Weight: 1,
   128  												Preference: corev1.NodeSelectorTerm{
   129  													MatchExpressions: []corev1.NodeSelectorRequirement{
   130  														{
   131  															Key:      "kubernetes.io/hostname",
   132  															Operator: corev1.NodeSelectorOpIn,
   133  															Values:   []string{"n1"},
   134  														},
   135  													},
   136  												},
   137  											},
   138  										},
   139  									},
   140  								},
   141  								Tolerations: []corev1.Toleration{
   142  									{
   143  										Operator: corev1.TolerationOpExists,
   144  										Key:      "scheduling.cast.ai/spot",
   145  									},
   146  								},
   147  								AutomountServiceAccountToken: lo.ToPtr(false),
   148  								Containers: []corev1.Container{
   149  									{
   150  										Name:  "collector",
   151  										Image: "imgcollector:1.0.0",
   152  										Command: []string{
   153  											"/usr/local/bin/kvisor-image-scanner",
   154  										},
   155  										Args: []string{
   156  											"scan",
   157  										},
   158  										EnvFrom: []corev1.EnvFromSource{
   159  											{
   160  												SecretRef: &corev1.SecretEnvSource{
   161  													LocalObjectReference: corev1.LocalObjectReference{
   162  														Name: "castai-kvisor",
   163  													},
   164  												},
   165  											},
   166  										},
   167  										Env: []corev1.EnvVar{
   168  											{
   169  												Name:  "GOMEMLIMIT",
   170  												Value: "1800MiB",
   171  											},
   172  											{
   173  												Name:  "COLLECTOR_IMAGE_ID",
   174  												Value: "test-image@sha2566282b5ec0c18cfd723e40ef8b98649a47b9388a479c520719c615acc3b073504",
   175  											},
   176  											{
   177  												Name:  "COLLECTOR_IMAGE_NAME",
   178  												Value: "test-image",
   179  											},
   180  											{
   181  												Name:  "COLLECTOR_TIMEOUT",
   182  												Value: "5m",
   183  											},
   184  											{
   185  												Name:  "COLLECTOR_MODE",
   186  												Value: "hostfs",
   187  											},
   188  											{
   189  												Name:  "COLLECTOR_RUNTIME",
   190  												Value: "containerd",
   191  											},
   192  											{
   193  												Name:  "COLLECTOR_RESOURCE_IDS",
   194  												Value: "p1,p2",
   195  											},
   196  											{
   197  												Name:  "COLLECTOR_IMAGE_ARCHITECTURE",
   198  												Value: "amd64",
   199  											},
   200  											{
   201  												Name:  "COLLECTOR_IMAGE_OS",
   202  												Value: "linux",
   203  											},
   204  											{
   205  												Name:  "CASTAI_API_GRPC_ADDR",
   206  												Value: "api.cast.ai:443",
   207  											},
   208  											{
   209  												Name:  "CASTAI_CLUSTER_ID",
   210  												Value: "abcd",
   211  											},
   212  											{
   213  												Name:  "COLLECTOR_PPROF_ADDR",
   214  												Value: ":6060",
   215  											},
   216  										},
   217  										VolumeMounts: []corev1.VolumeMount{
   218  											{
   219  												Name:      "containerd-content",
   220  												ReadOnly:  true,
   221  												MountPath: "/var/lib/containerd/io.containerd.content.v1.content",
   222  											},
   223  										},
   224  										Resources: corev1.ResourceRequirements{
   225  											Limits: map[corev1.ResourceName]resource.Quantity{
   226  												corev1.ResourceCPU:    resource.MustParse("2"),
   227  												corev1.ResourceMemory: resource.MustParse("2Gi"),
   228  											},
   229  											Requests: map[corev1.ResourceName]resource.Quantity{
   230  												corev1.ResourceCPU:    resource.MustParse("500m"),
   231  												corev1.ResourceMemory: resource.MustParse("100Mi"),
   232  											},
   233  										},
   234  										SecurityContext: &corev1.SecurityContext{
   235  											RunAsUser:                lo.ToPtr(nonRootUserID),
   236  											RunAsNonRoot:             lo.ToPtr(true),
   237  											AllowPrivilegeEscalation: lo.ToPtr(false),
   238  										},
   239  									},
   240  								},
   241  								Volumes: []corev1.Volume{
   242  									{
   243  										Name: "containerd-content",
   244  										VolumeSource: corev1.VolumeSource{
   245  											HostPath: &corev1.HostPathVolumeSource{
   246  												Path: "/var/lib/containerd/io.containerd.content.v1.content",
   247  												Type: lo.ToPtr(corev1.HostPathDirectory),
   248  											},
   249  										},
   250  									},
   251  								},
   252  							},
   253  						},
   254  					},
   255  					Status: batchv1.JobStatus{},
   256  				}, jobs.Items[0])
   257  
   258  			})
   259  		}
   260  	})
   261  
   262  	t.Run("delete already completed job", func(t *testing.T) {
   263  		ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
   264  		defer cancel()
   265  
   266  		r := require.New(t)
   267  
   268  		job := &batchv1.Job{
   269  			TypeMeta: metav1.TypeMeta{},
   270  			ObjectMeta: metav1.ObjectMeta{
   271  				Name:      "castai-imgscan-1ba98dcd098ba64e9b2fe4dafc7a5c85",
   272  				Namespace: ns,
   273  			},
   274  			Spec: batchv1.JobSpec{},
   275  			Status: batchv1.JobStatus{
   276  				Conditions: []batchv1.JobCondition{
   277  					{
   278  						Type:   batchv1.JobComplete,
   279  						Status: corev1.ConditionTrue,
   280  					},
   281  				},
   282  			},
   283  		}
   284  		client := fake.NewSimpleClientset(job)
   285  		scanner := NewImageScanner(client, Config{
   286  			CPURequest:    "500m",
   287  			CPULimit:      "2",
   288  			MemoryRequest: "100Mi",
   289  			MemoryLimit:   "2Gi",
   290  		}, ns)
   291  		scanner.jobCheckInterval = 1 * time.Microsecond
   292  
   293  		err := scanner.ScanImage(ctx, ScanImageParams{
   294  			ImageName:         "test-image",
   295  			ImageID:           "test-image@sha2566282b5ec0c18cfd723e40ef8b98649a47b9388a479c520719c615acc3b073504",
   296  			ContainerRuntime:  "containerd",
   297  			Mode:              "hostfs",
   298  			NodeName:          "n1",
   299  			ResourceIDs:       []string{"p1", "p2"},
   300  			DeleteFinishedJob: true,
   301  			ScanImageDetails: kube.ImageDetails{
   302  				ScannerImageName: "imgcollector:1.0.0",
   303  			},
   304  		})
   305  		r.NoError(err)
   306  
   307  		_, err = client.BatchV1().Jobs(ns).Get(ctx, job.Name, metav1.GetOptions{})
   308  		r.True(apierrors.IsNotFound(err))
   309  	})
   310  
   311  	t.Run("get failed job error with detailed reason", func(t *testing.T) {
   312  		ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
   313  		defer cancel()
   314  
   315  		r := require.New(t)
   316  
   317  		jobPod := &corev1.Pod{
   318  			ObjectMeta: metav1.ObjectMeta{
   319  				Namespace: ns,
   320  				Name:      "img-scan",
   321  				Labels: map[string]string{
   322  					"job-name": "castai-imgscan-1ba98dcd098ba64e9b2fe4dafc7a5c85",
   323  				},
   324  			},
   325  			Status: corev1.PodStatus{
   326  				Conditions: []corev1.PodCondition{
   327  					{
   328  						Type:   corev1.PodReady,
   329  						Status: corev1.ConditionFalse,
   330  						Reason: "no cpu",
   331  					},
   332  					{
   333  						Type:   corev1.PodScheduled,
   334  						Status: corev1.ConditionFalse,
   335  						Reason: "no cpu",
   336  					},
   337  				},
   338  			},
   339  		}
   340  
   341  		client := fake.NewSimpleClientset(jobPod)
   342  		scanner := NewImageScanner(client, Config{}, ns)
   343  		scanner.jobCheckInterval = 1 * time.Microsecond
   344  
   345  		err := scanner.ScanImage(ctx, ScanImageParams{
   346  			ImageName:         "test-image",
   347  			ImageID:           "test-image@sha2566282b5ec0c18cfd723e40ef8b98649a47b9388a479c520719c615acc3b073504",
   348  			ContainerRuntime:  "containerd",
   349  			Mode:              "hostfs",
   350  			NodeName:          "n1",
   351  			ResourceIDs:       []string{"p1", "p2"},
   352  			WaitForCompletion: true,
   353  			ScanImageDetails: kube.ImageDetails{
   354  				ScannerImageName: "imgcollector:1.0.0",
   355  			},
   356  		})
   357  		r.ErrorContains(err, "[type=Ready, status=False, reason=no cpu], [type=PodScheduled, status=False, reason=no cpu]")
   358  	})
   359  }