github.com/khulnasoft-lab/kube-bench@v0.2.1-0.20240330183753-9df52345ae58/cfg/tkgi-1.2.53/node.yaml (about)

     1  ---
     2  controls:
     3  version: "tkgi-1.2.53"
     4  id: 4
     5  text: "Worker Node Security Configuration"
     6  type: "node"
     7  groups:
     8    - id: 4.1
     9      text: "Worker Node Configuration Files"
    10      checks:
    11        - id: 4.1.1
    12          text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive"
    13          audit: stat -c permissions=%a /var/vcap/jobs/kubelet/monit
    14          tests:
    15            test_items:
    16              - flag: "permissions"
    17                compare:
    18                  op: bitmask
    19                  value: "644"
    20          remediation: |
    21            Run the below command (based on the file location on your system) on the each worker node.
    22            For example,
    23            chmod 644 /var/vcap/jobs/kubelet/monit
    24          scored: true
    25  
    26        - id: 4.1.2
    27          text: "Ensure that the kubelet service file ownership is set to root:root"
    28          audit: stat -c %U:%G /var/vcap/jobs/kubelet/monit
    29          tests:
    30            test_items:
    31              - flag: root:root
    32          remediation: |
    33            Run the below command (based on the file location on your system) on the each worker node.
    34            For example,
    35            chown root:root /var/vcap/jobs/kubelet/monit
    36            Exception
    37            File is group owned by vcap
    38          scored: true
    39  
    40        - id: 4.1.3
    41          text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive"
    42          audit: stat -c permissions=%a /var/vcap/jobs/kube-proxy/config/kubeconfig
    43          tests:
    44            test_items:
    45              - flag: "permissions"
    46                compare:
    47                  op: bitmask
    48                  value: "644"
    49          remediation: |
    50            Run the below command (based on the file location on your system) on the each worker node.
    51            For example,
    52            chmod 644 /var/vcap/jobs/kube-proxy/config/kubeconfig
    53          scored: true
    54  
    55        - id: 4.1.4
    56          text: "Ensure that the proxy kubeconfig file ownership is set to root:root"
    57          audit: stat -c %U:%G /var/vcap/jobs/kube-proxy/config/kubeconfig
    58          type: manual
    59          tests:
    60            test_items:
    61              - flag: root:root
    62          remediation: |
    63            Run the below command (based on the file location on your system) on the each worker node.
    64            For example, chown root:root /var/vcap/jobs/kube-proxy/config/kubeconfig
    65            Exception
    66            File is group owned by vcap
    67          scored: false
    68  
    69        - id: 4.1.5
    70          text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive"
    71          audit: stat -c permissions=%a /var/vcap/jobs/kube-proxy/config/kubeconfig
    72          type: manual
    73          tests:
    74            test_items:
    75              - flag: "permissions"
    76                compare:
    77                  op: bitmask
    78                  value: "644"
    79          remediation: |
    80            Run the below command (based on the file location on your system) on the each worker node.
    81            For example,
    82            chmod 644 /var/vcap/jobs/kube-proxy/config/kubeconfig
    83            Exception
    84            kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on worker
    85          scored: false
    86  
    87        - id: 4.1.6
    88          text: "Ensure that the kubelet.conf file ownership is set to root:root"
    89          audit: stat -c %U:%G /etc/kubernetes/kubelet.conf
    90          type: manual
    91          tests:
    92            test_items:
    93              - flag: root:root
    94          remediation: |
    95            Run the below command (based on the file location on your system) on the each worker node.
    96            For example,
    97            chown root:root /etc/kubernetes/kubelet.conf
    98            Exception
    99            file ownership is vcap:vcap
   100          scored: false
   101  
   102        - id: 4.1.7
   103          text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive"
   104          audit: stat -c permissions=%a /var/vcap/jobs/kubelet/config/kubelet-client-ca.pem
   105          tests:
   106            test_items:
   107              - flag: "permissions"
   108                compare:
   109                  op: bitmask
   110                  value: "644"
   111          remediation: |
   112            Run the following command to modify the file permissions of the
   113            --client-ca-file chmod 644 <filename>
   114          scored: true
   115  
   116        - id: 4.1.8
   117          text: "Ensure that the client certificate authorities file ownership is set to root:root"
   118          audit: stat -c %U:%G /var/vcap/jobs/kubelet/config/kubelet-client-ca.pem
   119          type: manual
   120          tests:
   121            test_items:
   122              - flag: root:root
   123                compare:
   124                  op: eq
   125                  value: root:root
   126          remediation: |
   127            Run the following command to modify the ownership of the --client-ca-file.
   128            chown root:root <filename>
   129            Exception
   130            File is group owned by vcap
   131          scored: false
   132  
   133        - id: 4.1.9
   134          text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive"
   135          audit: stat -c permissions=%a /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   136          tests:
   137            test_items:
   138              - flag: "permissions"
   139                compare:
   140                  op: bitmask
   141                  value: "644"
   142          remediation: |
   143            Run the following command (using the config file location identified in the Audit step)
   144            chmod 644 /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   145          scored: true
   146  
   147        - id: 4.1.10
   148          text: "Ensure that the kubelet --config configuration file ownership is set to root:root"
   149          audit: stat -c %U:%G /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   150          type: manual
   151          tests:
   152            test_items:
   153              - flag: root:root
   154          remediation: |
   155            Run the following command (using the config file location identified in the Audit step)
   156            chown root:root /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   157            Exception
   158            File is group owned by vcap
   159          scored: false
   160  
   161    - id: 4.2
   162      text: "Kubelet"
   163      checks:
   164        - id: 4.2.1
   165          text: "Ensure that the anonymous-auth argument is set to false"
   166          audit: grep "^authentication:\n\s{2}anonymous:\n\s{4}enabled:\sfalse$" /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   167          tests:
   168            test_items:
   169              - flag: "enabled: false"
   170          remediation: |
   171            If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to
   172            false.
   173            If using executable arguments, edit the kubelet service file
   174            on each worker node and
   175            set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
   176            --anonymous-auth=false
   177            Based on your system, restart the kubelet service. For example:
   178            systemctl daemon-reload
   179            systemctl restart kubelet.service
   180          scored: true
   181  
   182        - id: 4.2.2
   183          text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow"
   184          audit: |
   185            grep "^authorization:\n\s{2}mode: AlwaysAllow$" /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   186          tests:
   187            test_items:
   188              - flag: "AlwaysAllow"
   189                set: false
   190          remediation: |
   191            If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If
   192            using executable arguments, edit the kubelet service file
   193            on each worker node and
   194            set the below parameter in KUBELET_AUTHZ_ARGS variable.
   195            --authorization-mode=Webhook
   196            Based on your system, restart the kubelet service. For example:
   197            systemctl daemon-reload
   198            systemctl restart kubelet.service
   199          scored: true
   200  
   201        - id: 4.2.3
   202          text: "Ensure that the --client-ca-file argument is set as appropriate"
   203          audit: |
   204            grep ^authentication:\n\s{2}anonymous:\n\s{4}enabled:\sfalse\n(\s{2}webhook:\n\s{4}cacheTTL:\s\d+s\n\s{4}enabled:.*\n)?
   205            \s{2}x509:\n\s{4}clientCAFile:\s"\/var\/vcap\/jobs\/kubelet\/config\/kubelet-client-ca\.pem" /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   206          tests:
   207            test_items:
   208              - flag: "clientCAFile"
   209          remediation: |
   210            If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to
   211            the location of the client CA file.
   212            If using command line arguments, edit the kubelet service file
   213            on each worker node and
   214            set the below parameter in KUBELET_AUTHZ_ARGS variable.
   215            --client-ca-file=<path/to/client-ca-file>
   216            Based on your system, restart the kubelet service. For example:
   217            systemctl daemon-reload
   218            systemctl restart kubelet.service
   219          scored: true
   220  
   221        - id: 4.2.4
   222          text: "Ensure that the --read-only-port argument is set to 0"
   223          audit: |
   224            grep "readOnlyPort: 0" /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   225          tests:
   226            test_items:
   227              - flag: "readOnlyPort: 0"
   228          remediation: |
   229            If using a Kubelet config file, edit the file to set readOnlyPort to 0.
   230            If using command line arguments, edit the kubelet service file
   231            on each worker node and
   232            set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
   233            --read-only-port=0
   234            Based on your system, restart the kubelet service. For example:
   235            systemctl daemon-reload
   236            systemctl restart kubelet.service
   237          scored: true
   238  
   239        - id: 4.2.5
   240          text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0"
   241          audit: |
   242            grep -- "streamingConnectionIdleTimeout: 0"  /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   243          tests:
   244            test_items:
   245              - flag: "streamingConnectionIdleTimeout: 0"
   246                set: false
   247          remediation: |
   248            If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a
   249            value other than 0.
   250            If using command line arguments, edit the kubelet service file
   251            on each worker node and
   252            set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
   253            --streaming-connection-idle-timeout=5m
   254            Based on your system, restart the kubelet service. For example:
   255            systemctl daemon-reload
   256            systemctl restart kubelet.service
   257          scored: true
   258  
   259        - id: 4.2.6
   260          text: "Ensure that the --protect-kernel-defaults argument is set to true"
   261          audit: |
   262            grep -- "protectKernelDefaults: true" /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   263          tests:
   264            test_items:
   265              - flag: "protectKernelDefaults: true"
   266          remediation: |
   267            If using a Kubelet config file, edit the file to set protectKernelDefaults: true.
   268            If using command line arguments, edit the kubelet service file
   269            on each worker node and
   270            set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
   271            --protect-kernel-defaults=true
   272            Based on your system, restart the kubelet service. For example:
   273            systemctl daemon-reload
   274            systemctl restart kubelet.service
   275          scored: true
   276  
   277        - id: 4.2.7
   278          text: "Ensure that the --make-iptables-util-chains argument is set to true"
   279          audit: |
   280            grep -- "makeIPTablesUtilChains: true" /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   281          tests:
   282            test_items:
   283              - flag: "makeIPTablesUtilChains: true"
   284          remediation: |
   285            If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true.
   286            If using command line arguments, edit the kubelet service file
   287            on each worker node and
   288            remove the --make-iptables-util-chains argument from the
   289            KUBELET_SYSTEM_PODS_ARGS variable.
   290            Based on your system, restart the kubelet service. For example:
   291            systemctl daemon-reload
   292            systemctl restart kubelet.service
   293          scored: true
   294  
   295        - id: 4.2.8
   296          text: "Ensure that the --hostname-override argument is not set"
   297          audit: |
   298            ps -ef | grep [k]ubelet | grep -- --[c]onfig=/var/vcap/jobs/kubelet/config/kubeletconfig.yml | grep -v -- --hostname-override
   299          type: manual
   300          remediation: |
   301            Edit the kubelet service file
   302            on each worker node and remove the --hostname-override argument from the
   303            KUBELET_SYSTEM_PODS_ARGS variable.
   304            Based on your system, restart the kubelet service. For example:
   305            systemctl daemon-reload
   306            systemctl restart kubelet.service
   307            Exception
   308            On GCE, the hostname needs to be set to the instance name so the gce cloud provider can manage the instance.
   309            In other cases its set to the IP address of the VM.
   310          scored: false
   311  
   312        - id: 4.2.9
   313          text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture"
   314          audit: grep -- "--event-qps" /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   315          type: manual
   316          tests:
   317            test_items:
   318              - flag: "--event-qps"
   319                compare:
   320                  op: eq
   321                  value: 0
   322          remediation: |
   323            If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level.
   324            If using command line arguments, edit the kubelet service file
   325            on each worker node and
   326            set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
   327            Based on your system, restart the kubelet service. For example:
   328            systemctl daemon-reload
   329            systemctl restart kubelet.service
   330          scored: false
   331  
   332        - id: 4.2.10
   333          text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate"
   334          audit: |
   335            grep  ^tlsCertFile:\s\"\/var\/vcap\/jobs\/kubelet\/config\/kubelet\.pem\"\ntlsPrivateKeyFile:\s\"\/var\/vcap\/jobs\/kubelet\/config\/kubelet-key\.pem\"$
   336            /var/vcap/jobs/kubelet/config/kubeletconfig.yml
   337          tests:
   338            bin_op: and
   339            test_items:
   340              - flag: "tlsCertFile"
   341              - flag: "tlsPrivateKeyFile"
   342          remediation: |
   343            If using a Kubelet config file, edit the file to set tlsCertFile to the location
   344            of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile
   345            to the location of the corresponding private key file.
   346            If using command line arguments, edit the kubelet service file
   347            on each worker node and
   348            set the below parameters in KUBELET_CERTIFICATE_ARGS variable.
   349            --tls-cert-file=<path/to/tls-certificate-file>
   350            --tls-private-key-file=<path/to/tls-key-file>
   351            Based on your system, restart the kubelet service. For example:
   352            systemctl daemon-reload
   353            systemctl restart kubelet.service
   354          scored: true
   355  
   356        - id: 4.2.11
   357          text: "Ensure that the --rotate-certificates argument is not set to false"
   358          audit: ps -ef | grep kubele[t] | grep -- "--rotate-certificates=false"
   359          type: manual
   360          tests:
   361            test_items:
   362              - flag: "--rotate-certificates=false"
   363                set: false
   364          remediation: |
   365            If using a Kubelet config file, edit the file to add the line rotateCertificates: true or
   366            remove it altogether to use the default value.
   367            If using command line arguments, edit the kubelet service file
   368            on each worker node and
   369            remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS
   370            variable.
   371            Based on your system, restart the kubelet service. For example:
   372            systemctl daemon-reload
   373            systemctl restart kubelet.service
   374            Exception
   375            Certificate rotation is handled by Credhub
   376          scored: false
   377  
   378        - id: 4.2.12
   379          text: "Verify that the RotateKubeletServerCertificate argument is set to true"
   380          audit: ps -ef | grep kubele[t] | grep -- "--feature-gates=\(\w\+\|,\)*RotateKubeletServerCertificate=true\(\w\+\|,\)*"
   381          type: manual
   382          tests:
   383            test_items:
   384              - flag: "RotateKubeletServerCertificate=true"
   385          remediation: |
   386            Edit the kubelet service file
   387            on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable.
   388            --feature-gates=RotateKubeletServerCertificate=true
   389            Based on your system, restart the kubelet service. For example:
   390            systemctl daemon-reload
   391            systemctl restart kubelet.service
   392            Exception
   393            Certificate rotation is handled by Credhub
   394          scored: false
   395  
   396        - id: 4.2.13
   397          text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers"
   398          audit: ps -ef | grep kubele[t] | grep -- "--tls-cipher-
   399            suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"
   400          type: manual
   401          tests:
   402            test_items:
   403              - flag: --tls-cipher-suites
   404                compare:
   405                  op: regex
   406                  value: (TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384|TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_128_GCM_SHA256)
   407          remediation: |
   408            If using a Kubelet config file, edit the file to set TLSCipherSuites: to
   409            TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
   410            or to a subset of these values.
   411            If using executable arguments, edit the kubelet service file
   412            on each worker node and
   413            set the --tls-cipher-suites parameter as follows, or to a subset of these values.
   414            --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256
   415            Based on your system, restart the kubelet service. For example:
   416            systemctl daemon-reload
   417            systemctl restart kubelet.service
   418          scored: false