github.com/khulnasoft-lab/kube-bench@v0.2.1-0.20240330183753-9df52345ae58/cfg/tkgi-1.2.53/master.yaml (about) 1 --- 2 controls: 3 version: "tkgi-1.2.53" 4 id: 1 5 text: "Master Node Security Configuration" 6 type: "master" 7 groups: 8 - id: 1.1 9 text: "Master Node Configuration Files" 10 checks: 11 - id: 1.1.1 12 text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive" 13 audit: stat -c permissions=%a /var/vcap/jobs/kube-apiserver/config/bpm.yml 14 tests: 15 test_items: 16 - flag: "permissions" 17 compare: 18 op: bitmask 19 value: "644" 20 remediation: | 21 Run the below command (based on the file location on your system) on the 22 master node. 23 For example, chmod 644 /var/vcap/jobs/kube-apiserver/config/bpm.yml 24 scored: true 25 26 - id: 1.1.2 27 text: "Ensure that the API server pod specification file ownership is set to root:root" 28 audit: stat -c %U:%G /var/vcap/jobs/kube-apiserver/config/bpm.yml 29 tests: 30 test_items: 31 - flag: "root:root" 32 remediation: | 33 Run the below command (based on the file location on your system) on the 34 master node. 35 For example, chown root:root /var/vcap/jobs/kube-apiserver/config/bpm.yml 36 scored: true 37 38 - id: 1.1.3 39 text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive" 40 audit: stat -c permissions=%a /var/vcap/jobs/kube-controller-manager/config/bpm.yml 41 tests: 42 test_items: 43 - flag: "permissions" 44 compare: 45 op: bitmask 46 value: "644" 47 remediation: | 48 Run the below command (based on the file location on your system) on the 49 master node. 50 For example, chmod 644 /var/vcap/jobs/kube-apiserver/config/bpm.yml 51 scored: true 52 53 - id: 1.1.4 54 text: "Ensure that the controller manager pod specification file ownership is set to root:root" 55 audit: stat -c %U:%G /var/vcap/jobs/kube-controller-manager/config/bpm.yml 56 tests: 57 test_items: 58 - flag: "root:root" 59 remediation: | 60 Run the below command (based on the file location on your system) on the 61 master node. 62 For example, chown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml 63 scored: true 64 65 - id: 1.1.5 66 text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive" 67 audit: stat -c permissions=%a /var/vcap/jobs/kube-scheduler/config/bpm.yml 68 tests: 69 test_items: 70 - flag: "permissions" 71 compare: 72 op: bitmask 73 value: "644" 74 remediation: | 75 Run the below command (based on the file location on your system) on the 76 master node. 77 For example, chown 644 /var/vcap/jobs/kube-scheduler/config/bpm.yml 78 scored: true 79 80 - id: 1.1.6 81 text: "Ensure that the scheduler pod specification file ownership is set to root:root" 82 audit: stat -c %U:%G /var/vcap/jobs/kube-scheduler/config/bpm.yml 83 tests: 84 test_items: 85 - flag: "root:root" 86 remediation: | 87 Run the below command (based on the file location on your system) on the master node. 88 For example, 89 chown root:root /var/vcap/jobs/kube-scheduler/config/bpm.yml 90 scored: true 91 92 - id: 1.1.7 93 text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive" 94 audit: stat -c permissions=%a /var/vcap/jobs/etcd/config/bpm.yml 95 tests: 96 test_items: 97 - flag: "permissions" 98 compare: 99 op: bitmask 100 value: "644" 101 remediation: | 102 Run the below command (based on the file location on your system) on the master node. 103 For example, 104 chmod 644 stat -c permissions=%a /var/vcap/jobs/etcd/config/bpm.yml 105 scored: true 106 107 - id: 1.1.8 108 text: "Ensure that the etcd pod specification file ownership is set to root:root" 109 audit: stat -c %U:%G /var/vcap/jobs/etcd/config/bpm.yml 110 tests: 111 test_items: 112 - flag: "root:root" 113 remediation: | 114 Run the below command (based on the file location on your system) on the master node. 115 For example, 116 chown root:root /var/vcap/jobs/etcd/config/bpm.yml 117 scored: true 118 119 - id: 1.1.9 120 text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive" 121 audit: find ((CNI_DIR))/config/ -type f -not -perm 640 | awk 'END{print NR}' | grep "^0$" 122 type: manual 123 tests: 124 test_items: 125 - flag: "permissions" 126 compare: 127 op: bitmask 128 value: "644" 129 remediation: | 130 Run the below command (based on the file location on your system) on the master node. 131 For example, 132 chmod 644 <path/to/cni/files> 133 scored: false 134 135 - id: 1.1.10 136 text: "Ensure that the Container Network Interface file ownership is set to root:root" 137 audit: find ((CNI_DIR))/config/ -type f -not -user root -or -not -group root | awk 'END{print NR}' | grep "^0$" 138 type: manual 139 tests: 140 test_items: 141 - flag: "root:root" 142 remediation: | 143 Run the below command (based on the file location on your system) on the master node. 144 For example, 145 chown root:root <path/to/cni/files> 146 scored: false 147 148 - id: 1.1.11 149 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive" 150 audit: stat -c permissions=%a /var/vcap/store/etcd/ 151 tests: 152 test_items: 153 - flag: "permissions" 154 compare: 155 op: bitmask 156 value: "700" 157 remediation: | 158 Run the below command (based on the etcd data directory found above). For example, 159 chmod 700 /var/vcap/store/etcd/ 160 scored: true 161 162 - id: 1.1.12 163 text: "Ensure that the etcd data directory ownership is set to etcd:etcd" 164 audit: stat -c %U:%G /var/vcap/store/etcd/ 165 type: manual 166 tests: 167 test_items: 168 - flag: "etcd:etcd" 169 remediation: | 170 Run the below command (based on the etcd data directory found above). 171 For example, chown etcd:etcd /var/vcap/store/etcd/ 172 Exception: All bosh processes run as vcap user 173 The etcd data directory ownership is vcap:vcap 174 scored: false 175 176 - id: 1.1.13 177 text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive" 178 audit: stat -c permissions=%a /etc/kubernetes/admin.conf 179 type: manual 180 tests: 181 test_items: 182 - flag: "permissions" 183 compare: 184 op: bitmask 185 value: "644" 186 remediation: | 187 Run the below command (based on the file location on your system) on the master node. 188 For example, 189 chmod 644 /etc/kubernetes/admin.conf 190 Exception 191 kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on master 192 Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- 193 kubeconfig-files-for-control-plane-components 194 scored: false 195 196 - id: 1.1.14 197 text: "Ensure that the admin.conf file ownership is set to root:root" 198 audit: stat -c %U:%G /etc/kubernetes/admin.conf 199 type: manual 200 tests: 201 test_items: 202 - flag: "root:root" 203 remediation: | 204 Run the below command (based on the file location on your system) on the master node. 205 For example, 206 chown root:root /etc/kubernetes/admin.conf 207 Exception 208 kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on 209 master 210 Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- 211 kubeconfig-files-for-control-plane-components 212 scored: false 213 214 - id: 1.1.15 215 text: "Ensure that the scheduler configuration file permissions are set to 644" 216 audit: stat -c permissions=%a /etc/kubernetes/scheduler.conf 217 type: manual 218 tests: 219 test_items: 220 - flag: "permissions" 221 compare: 222 op: bitmask 223 value: "644" 224 remediation: | 225 Run the below command (based on the file location on your system) on the master node. 226 For example, 227 chmod 644 /etc/kubernetes/scheduler.conf 228 Exception 229 kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on 230 master 231 Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- 232 kubeconfig-files-for-control-plane-components 233 scored: false 234 235 - id: 1.1.16 236 text: "Ensure that the scheduler configuration file ownership is set to root:root" 237 audit: stat -c %U:%G /etc/kubernetes/scheduler.conf 238 type: manual 239 tests: 240 test_items: 241 - flag: "root:root" 242 remediation: | 243 Run the below command (based on the file location on your system) on the master node. 244 For example, 245 chown root:root /etc/kubernetes/scheduler.conf 246 Exception 247 kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on 248 master 249 Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- 250 kubeconfig-files-for-control-plane-components 251 scored: false 252 253 - id: 1.1.17 254 text: "Ensure that the controller manager configuration file permissions are set to 644" 255 audit: stat -c permissions=%a /etc/kubernetes/controller-manager.conf 256 type: manual 257 tests: 258 test_items: 259 - flag: "permissions" 260 compare: 261 op: bitmask 262 value: "644" 263 remediation: | 264 Run the below command (based on the file location on your system) on the master node. 265 For example, 266 chmod 644 /etc/kubernetes/controller-manager.conf 267 Exception 268 kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on 269 master 270 Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- 271 kubeconfig-files-for-control-plane-components 272 scored: false 273 274 - id: 1.1.18 275 text: "Ensure that the controller manager configuration file ownership is set to root:root" 276 audit: stat -c %U:%G /etc/kubernetes/controller-manager.conf 277 type: manual 278 tests: 279 test_items: 280 - flag: "root:root" 281 remediation: | 282 Run the below command (based on the file location on your system) on the master node. 283 For example, 284 chown root:root /etc/kubernetes/controller-manager.conf 285 Exception 286 kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on 287 master 288 Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- 289 kubeconfig-files-for-control-plane-components 290 scored: false 291 292 - id: 1.1.19 293 text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root" 294 audit: | 295 find -L /var/vcap/jobs/kube-apiserver/config /var/vcap/jobs/kube-controller-manager/config /var/vcap/jobs/kube- 296 scheduler/config ((CNI_DIR))/config /var/vcap/jobs/etcd/config | sort -u | xargs ls -ld | awk '{ print $3 " " $4}' | 297 grep -c -v "root root" | grep "^0$" 298 type: manual 299 tests: 300 test_items: 301 - flag: "root:root" 302 remediation: | 303 Run the below command (based on the file location on your system) on the master node. 304 For example, 305 chown -R root:root /etc/kubernetes/pki/ 306 Exception 307 Files are group owned by vcap 308 scored: false 309 310 - id: 1.1.20 311 text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive" 312 audit: | 313 find -L /var/vcap/jobs/kube-apiserver/config \( -name '*.crt' -or -name '*.pem' \) -and -not -perm 640 | grep -v 314 "packages/golang" | grep -v "packages/ncp_rootfs" | awk 'END{print NR}' | grep "^0$" 315 type: manual 316 tests: 317 test_items: 318 - flag: "permissions" 319 compare: 320 op: bitmask 321 value: "644" 322 remediation: | 323 Run the below command (based on the file location on your system) on the master node. 324 For example, 325 chmod -R 644 /etc/kubernetes/pki/*.crt 326 Exception 327 Ignoring packages/golang as the package includes test certs used by golang. Ignoring packages/ncp_rootfs on 328 TKG1 with NSX-T container plugin uses the package is used as the overlay filesystem `mount | grep 329 "packages/ncp_rootfs"` 330 scored: false 331 332 - id: 1.1.21 333 text: "Ensure that the Kubernetes PKI key file permissions are set to 600" 334 audit: | 335 find -L /var/vcap/jobs/kube-apiserver/config -name '*.key' -and -not -perm 600 | awk 'END{print NR}' | grep "^0$" 336 type: manual 337 tests: 338 test_items: 339 - flag: "permissions" 340 compare: 341 op: eq 342 value: "600" 343 remediation: | 344 Run the below command (based on the file location on your system) on the master node. 345 For example, 346 chmod -R 600 /etc/kubernetes/pki/*.key 347 Exception 348 Permission on etcd .key files is set to 640, to allow read access to vcap group 349 scored: false 350 351 - id: 1.2 352 text: "API Server" 353 checks: 354 - id: 1.2.1 355 text: "Ensure that the --anonymous-auth argument is set to false" 356 audit: ps -ef | grep kube-apiserver | grep -- "--anonymous-auth=false" 357 type: manual 358 tests: 359 test_items: 360 - flag: "--anonymous-auth=false" 361 remediation: | 362 Edit the API server pod specification file kube-apiserver 363 on the master node and set the below parameter. 364 --anonymous-auth=false 365 Exception 366 The flag is set to true to enable API discoveribility. 367 "Starting in 1.6, the ABAC and RBAC authorizers require explicit authorization of the system:anonymous user or the 368 system:unauthenticated group, so legacy policy rules that grant access to the * user or * group do not include 369 anonymous users." 370 -authorization-mode is set to RBAC 371 scored: false 372 373 - id: 1.2.2 374 text: "Ensure that the --basic-auth-file argument is not set" 375 audit: ps -ef | grep kube-apiserver | grep -v -- "--basic-auth-file" 376 tests: 377 test_items: 378 - flag: "--basic-auth-file" 379 set: false 380 remediation: | 381 Follow the documentation and configure alternate mechanisms for authentication. Then, 382 edit the API server pod specification file kube-apiserver 383 on the master node and remove the --basic-auth-file=<filename> parameter. 384 scored: true 385 386 - id: 1.2.3 387 text: "Ensure that the --token-auth-file parameter is not set" 388 audit: ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-apiserve[r]" | grep -v tini | grep -v -- "--token-auth-file=" 389 type: manual 390 tests: 391 test_items: 392 - flag: "--token-auth-file" 393 set: false 394 remediation: | 395 Follow the documentation and configure alternate mechanisms for authentication. Then, 396 edit the API server pod specification file /var/vcap/packages/kubernetes/bin/kube-apiserve[r] 397 on the master node and remove the --token-auth-file=<filename> parameter. 398 Exception 399 Since k8s processes' lifecyle are managed by BOSH, token based authentication is required when processes 400 restart. The file has 0640 permission and root:vcap ownership 401 scored: false 402 403 - id: 1.2.4 404 text: "Ensure that the --kubelet-https argument is set to true" 405 audit: ps -ef | grep kube-apiserver | grep -v -- "--kubelet-https=true" 406 tests: 407 test_items: 408 - flag: "--kubelet-https=true" 409 remediation: | 410 Edit the API server pod specification file kube-apiserver 411 on the master node and remove the --kubelet-https parameter. 412 scored: true 413 414 - id: 1.2.5 415 text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate" 416 audit: | 417 ps -ef | grep kube-apiserver | grep -- "--kubelet-client-certificate=/var/vcap/jobs/kube-apiserver/config/kubelet- 418 client-cert.pem" | grep -- "--kubelet-client-key=/var/vcap/jobs/kube-apiserver/config/kubelet-client-key.pem" 419 type: manual 420 tests: 421 bin_op: and 422 test_items: 423 - flag: "--kubelet-client-certificate" 424 - flag: "--kubelet-client-key" 425 remediation: | 426 Follow the Kubernetes documentation and set up the TLS connection between the 427 apiserver and kubelets. Then, edit API server pod specification file 428 kube-apiserver on the master node and set the 429 kubelet client certificate and key parameters as below. 430 --kubelet-client-certificate=<path/to/client-certificate-file> 431 --kubelet-client-key=<path/to/client-key-file> 432 scored: false 433 434 - id: 1.2.6 435 text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate" 436 audit: ps -ef | grep kube-apiserver | grep -- "--kubelet-certificate-authority=" 437 type: manual 438 tests: 439 test_items: 440 - flag: "--kubelet-certificate-authority" 441 remediation: | 442 Follow the Kubernetes documentation and setup the TLS connection between 443 the apiserver and kubelets. Then, edit the API server pod specification file 444 kube-apiserver on the master node and set the 445 --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. 446 --kubelet-certificate-authority=<ca-string> 447 Exception 448 JIRA ticket #PKS-696 created to investigate a fix. PR opened to address the issue https://github.com/cloudfoundry- 449 incubator/kubo-release/pull/179 450 scored: false 451 452 - id: 1.2.7 453 text: "Ensure API server authorization modes does not include AlwaysAllow" 454 audit: | 455 ps -ef | grep kube-apiserver | grep -- "--authorization-mode" && ps -ef | grep kube-apiserver | grep -v -- "-- 456 authorization-mode=\(\w\+\|,\)*AlwaysAllow\(\w\+\|,\)*" 457 tests: 458 test_items: 459 - flag: "--authorization-mode" 460 compare: 461 op: nothave 462 value: "AlwaysAllow" 463 remediation: | 464 Edit the API server pod specification file kube-apiserver 465 on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. 466 One such example could be as below. 467 --authorization-mode=RBAC 468 scored: true 469 470 - id: 1.2.8 471 text: "Ensure that the --authorization-mode argument includes Node" 472 audit: | 473 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--authorization-mode=\(\w\+\|,\)*Node\(\w\+\|,\)* --" 474 type: manual 475 tests: 476 test_items: 477 - flag: "--authorization-mode" 478 compare: 479 op: has 480 value: "Node" 481 remediation: | 482 Edit the API server pod specification file kube-apiserver 483 on the master node and set the --authorization-mode parameter to a value that includes Node. 484 --authorization-mode=Node,RBAC 485 Exception 486 This flag can be added using Kubernetes Profiles. Please follow instructions here https://docs.pivotal.io/tkgi/1- 487 8/k8s-profiles.html 488 scored: false 489 490 - id: 1.2.9 491 text: "Ensure that the --authorization-mode argument includes RBAC" 492 audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--authorization-mode=\(\w\+\|,\)*RBAC\(\w\+\|,\)* --" 493 tests: 494 test_items: 495 - flag: "--authorization-mode" 496 compare: 497 op: has 498 value: "RBAC" 499 remediation: | 500 Edit the API server pod specification file kube-apiserver 501 on the master node and set the --authorization-mode parameter to a value that includes RBAC, 502 for example: 503 --authorization-mode=Node,RBAC 504 scored: true 505 506 - id: 1.2.10 507 text: "Ensure that the admission control plugin EventRateLimit is set" 508 audit: | 509 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*EventRateLimit\ 510 (\w\+\|,\)*" 511 type: manual 512 tests: 513 test_items: 514 - flag: "--enable-admission-plugins" 515 compare: 516 op: has 517 value: "EventRateLimit" 518 remediation: | 519 Follow the Kubernetes documentation and set the desired limits in a configuration file. 520 Then, edit the API server pod specification file kube-apiserver 521 and set the below parameters. 522 --enable-admission-plugins=...,EventRateLimit,... 523 --admission-control-config-file=<path/to/configuration/file> 524 Exception 525 "Note: This is an Alpha feature in the Kubernetes v1.13" 526 Control provides rate limiting and is site-specific 527 scored: false 528 529 - id: 1.2.11 530 text: "Ensure that the admission control plugin AlwaysAdmit is not set" 531 audit: | 532 ps -ef | grep kube-apiserver | grep -v -- "--enable-admission-plugins=\(\w\+\|,\)*AlwaysAdmit\(\w\+\|,\)*" 533 tests: 534 test_items: 535 - flag: "--enable-admission-plugins" 536 compare: 537 op: nothave 538 value: AlwaysAdmit 539 remediation: | 540 Edit the API server pod specification file kube-apiserver 541 on the master node and either remove the --enable-admission-plugins parameter, or set it to a 542 value that does not include AlwaysAdmit. 543 scored: true 544 545 - id: 1.2.12 546 text: "Ensure that the admission control plugin AlwaysPullImages is set" 547 audit: | 548 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*AlwaysPullImages\ 549 (\w\+\|,\)* --" 550 type: manual 551 tests: 552 test_items: 553 - flag: "--enable-admission-plugins" 554 compare: 555 op: has 556 value: "AlwaysPullImages" 557 remediation: | 558 Edit the API server pod specification file kube-apiserver 559 on the master node and set the --enable-admission-plugins parameter to include 560 AlwaysPullImages. 561 --enable-admission-plugins=...,AlwaysPullImages,... 562 Exception 563 "Credentials would be required to pull the private images every time. Also, in trusted 564 environments, this might increases load on network, registry, and decreases speed. 565 This setting could impact offline or isolated clusters, which have images pre-loaded and do 566 not have access to a registry to pull in-use images. This setting is not appropriate for 567 clusters which use this configuration." 568 TKGi is packages with pre-loaded images. 569 scored: false 570 571 - id: 1.2.13 572 text: "Ensure that the admission control plugin SecurityContextDeny is set" 573 audit: | 574 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*SecurityContextDeny\ 575 (\w\+\|,\)* --" 576 type: manual 577 tests: 578 test_items: 579 - flag: "--enable-admission-plugins" 580 compare: 581 op: has 582 value: "SecurityContextDeny" 583 remediation: | 584 Edit the API server pod specification file kube-apiserver 585 on the master node and set the --enable-admission-plugins parameter to include 586 SecurityContextDeny, unless PodSecurityPolicy is already in place. 587 --enable-admission-plugins=...,SecurityContextDeny,... 588 Exception 589 This setting is site-specific. It can be set in the "Admission Plugins" section of the appropriate "Plan" 590 scored: false 591 592 - id: 1.2.14 593 text: "Ensure that the admission control plugin ServiceAccount is set" 594 audit: | 595 ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--disable-admission-plugins=\(\w\+\|,\)*ServiceAccount\ 596 (\w\+\|,\)* --" 597 tests: 598 test_items: 599 - flag: "--disable-admission-plugins" 600 compare: 601 op: nothave 602 value: "ServiceAccount" 603 remediation: | 604 Follow the documentation and create ServiceAccount objects as per your environment. 605 Then, edit the API server pod specification file kube-apiserver 606 on the master node and ensure that the --disable-admission-plugins parameter is set to a 607 value that does not include ServiceAccount. 608 scored: true 609 610 - id: 1.2.15 611 text: "Ensure that the admission control plugin NamespaceLifecycle is set" 612 audit: | 613 ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--disable-admission-plugins=\ 614 (\w\+\|,\)*NamespaceLifecycle\(\w\+\|,\)* --" 615 tests: 616 test_items: 617 - flag: "--disable-admission-plugins" 618 compare: 619 op: nothave 620 value: "NamespaceLifecycle" 621 remediation: | 622 Edit the API server pod specification file kube-apiserver 623 on the master node and set the --disable-admission-plugins parameter to 624 ensure it does not include NamespaceLifecycle. 625 scored: true 626 627 - id: 1.2.16 628 text: "Ensure that the admission control plugin PodSecurityPolicy is set" 629 audit: | 630 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*PodSecurityPolicy\ 631 (\w\+\|,\)* --" 632 type: manual 633 tests: 634 test_items: 635 - flag: "--enable-admission-plugins" 636 compare: 637 op: has 638 value: "PodSecurityPolicy" 639 remediation: | 640 Follow the documentation and create Pod Security Policy objects as per your environment. 641 Then, edit the API server pod specification file kube-apiserver 642 on the master node and set the --enable-admission-plugins parameter to a 643 value that includes PodSecurityPolicy: 644 --enable-admission-plugins=...,PodSecurityPolicy,... 645 Then restart the API Server. 646 Exception 647 This setting is site-specific. It can be set in the "Admission Plugins" section of the appropriate "Plan" 648 scored: false 649 650 - id: 1.2.17 651 text: "Ensure that the admission control plugin NodeRestriction is set" 652 audit: | 653 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*NodeRestriction\ 654 (\w\+\|,\)* --" 655 type: manual 656 tests: 657 test_items: 658 - flag: "--enable-admission-plugins" 659 compare: 660 op: has 661 value: "NodeRestriction" 662 remediation: | 663 Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. 664 Then, edit the API server pod specification file kube-apiserver 665 on the master node and set the --enable-admission-plugins parameter to a 666 value that includes NodeRestriction. 667 --enable-admission-plugins=...,NodeRestriction,... 668 Exception 669 PR opened to address the issue https://github.com/cloudfoundry-incubator/kubo-release/pull/179" 670 scored: true 671 672 - id: 1.2.18 673 text: "Ensure that the --insecure-bind-address argument is not set" 674 audit: | 675 ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--insecure-bind-address" 676 tests: 677 test_items: 678 - flag: "--insecure-bind-address" 679 set: false 680 remediation: | 681 Edit the API server pod specification file kube-apiserver 682 on the master node and remove the --insecure-bind-address parameter. 683 scored: true 684 685 - id: 1.2.19 686 text: "Ensure that the --insecure-port argument is set to 0" 687 audit: | 688 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--insecure-port=0" 689 type: manual 690 tests: 691 test_items: 692 - flag: "--insecure-port=0" 693 remediation: | 694 Edit the API server pod specification file kube-apiserver 695 on the master node and set the below parameter. 696 --insecure-port=0 697 Exception 698 Related to 1.2.1 699 The insecure port is 8080, and is binding only to localhost on the master node, in use by other components on the 700 master that are bypassing authn/z. 701 The components connecting to the APIServer are: 702 kube-controller-manager 703 kube-proxy 704 kube-scheduler 705 Pods are not scheduled on the master node. 706 scored: false 707 708 - id: 1.2.20 709 text: "Ensure that the --secure-port argument is not set to 0" 710 audit: | 711 ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--secure-port=0" 712 tests: 713 test_items: 714 - flag: "--secure-port" 715 compare: 716 op: noteq 717 value: 0 718 remediation: | 719 Edit the API server pod specification file kube-apiserver 720 on the master node and either remove the --secure-port parameter or 721 set it to a different (non-zero) desired port. 722 scored: true 723 724 - id: 1.2.21 725 text: "Ensure that the --profiling argument is set to false" 726 audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--profiling=false" 727 tests: 728 test_items: 729 - flag: "--profiling=false" 730 remediation: | 731 Edit the API server pod specification file kube-apiserver 732 on the master node and set the below parameter. 733 --profiling=false 734 scored: true 735 736 - id: 1.2.22 737 text: "Ensure that the --audit-log-path argument is set as appropriate" 738 audit: | 739 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-path=\/var\/vcap\/sys\/log\/kube-apiserver\/audit.log" 740 type: manual 741 tests: 742 test_items: 743 - flag: "--audit-log-path" 744 remediation: | 745 Edit the API server pod specification file kube-apiserver 746 on the master node and set the --audit-log-path parameter to a suitable path and 747 file where you would like audit logs to be written, for example: 748 --audit-log-path=/var/log/apiserver/audit.log 749 scored: false 750 751 - id: 1.2.23 752 text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate" 753 audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxage=30" 754 type: manual 755 tests: 756 test_items: 757 - flag: "--audit-log-maxage=30" 758 remediation: | 759 Edit the API server pod specification file kube-apiserver 760 on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: 761 --audit-log-maxage=30 762 Exception 763 This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here 764 https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html 765 scored: false 766 767 - id: 1.2.24 768 text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate" 769 audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxbackup=10" 770 type: manual 771 tests: 772 test_items: 773 - flag: "--audit-log-maxbackup=10" 774 remediation: | 775 Edit the API server pod specification file kube-apiserver 776 on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate 777 value. 778 --audit-log-maxbackup=10 779 Exception 780 This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here 781 https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html 782 scored: false 783 784 - id: 1.2.25 785 text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate" 786 audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxsize=100" 787 type: manual 788 tests: 789 test_items: 790 - flag: "--audit-log-maxsize=100" 791 remediation: | 792 Edit the API server pod specification file kube-apiserver 793 on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. 794 For example, to set it as 100 MB: 795 --audit-log-maxsize=100 796 Exception 797 This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here 798 https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html 799 scored: false 800 801 - id: 1.2.26 802 text: "Ensure that the --request-timeout argument is set as appropriate" 803 audit: ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--request-timeout=" 804 type: manual 805 tests: 806 test_items: 807 - flag: "--request-timeout" 808 remediation: | 809 Edit the API server pod specification file kube-apiserver 810 and set the below parameter as appropriate and if needed. 811 For example, 812 --request-timeout=300s 813 scored: false 814 815 - id: 1.2.27 816 text: "Ensure that the --service-account-lookup argument is set to true" 817 audit: ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--service-account-lookup" 818 tests: 819 test_items: 820 - flag: "--service-account-lookup=true" 821 remediation: | 822 Edit the API server pod specification file kube-apiserver 823 on the master node and set the below parameter. 824 --service-account-lookup=true 825 Alternatively, you can delete the --service-account-lookup parameter from this file so 826 that the default takes effect. 827 scored: true 828 829 - id: 1.2.28 830 text: "Ensure that the --service-account-key-file argument is set as appropriate" 831 audit: | 832 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--service-account-key-file=/var/vcap/jobs/kube- 833 apiserver/config/service-account-public-key.pem" 834 type: manual 835 tests: 836 test_items: 837 - flag: "--service-account-key-file" 838 remediation: | 839 Edit the API server pod specification file kube-apiserver 840 on the master node and set the --service-account-key-file parameter 841 to the public key file for service accounts: 842 --service-account-key-file=<filename> 843 scored: false 844 845 - id: 1.2.29 846 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate" 847 audit: | 848 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--etcd-certfile=/var/vcap/jobs/kube-apiserver/config/etcd- 849 client.crt" | grep -- "--etcd-keyfile=/var/vcap/jobs/kube-apiserver/config/etcd-client.key" 850 type: manual 851 tests: 852 bin_op: and 853 test_items: 854 - flag: "--etcd-certfile" 855 - flag: "--etcd-keyfile" 856 remediation: | 857 Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. 858 Then, edit the API server pod specification file kube-apiserver 859 on the master node and set the etcd certificate and key file parameters. 860 --etcd-certfile=<path/to/client-certificate-file> 861 --etcd-keyfile=<path/to/client-key-file> 862 scored: false 863 864 - id: 1.2.30 865 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate" 866 audit: | 867 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--tls-cert-file=/var/vcap/jobs/kube-apiserver/config/kubernetes.pem" | grep -- "--tls-private-key-file=/var/vcap/jobs/kube- 868 apiserver/config/kubernetes-key.pem" 869 type: manual 870 tests: 871 bin_op: and 872 test_items: 873 - flag: "--tls-cert-file" 874 - flag: "--tls-private-key-file" 875 remediation: | 876 Follow the Kubernetes documentation and set up the TLS connection on the apiserver. 877 Then, edit the API server pod specification file kube-apiserver 878 on the master node and set the TLS certificate and private key file parameters. 879 --tls-cert-file=<path/to/tls-certificate-file> 880 --tls-private-key-file=<path/to/tls-key-file> 881 scored: false 882 883 - id: 1.2.31 884 text: "Ensure that the --client-ca-file argument is set as appropriate" 885 audit: | 886 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--client-ca-file=/var/vcap/jobs/kube-apiserver/config/kubernetes-ca.pem" 887 type: manual 888 tests: 889 test_items: 890 - flag: "--client-ca-file" 891 remediation: | 892 Follow the Kubernetes documentation and set up the TLS connection on the apiserver. 893 Then, edit the API server pod specification file kube-apiserver 894 on the master node and set the client certificate authority file. 895 --client-ca-file=<path/to/client-ca-file> 896 scored: false 897 898 - id: 1.2.32 899 text: "Ensure that the --etcd-cafile argument is set as appropriate" 900 audit: | 901 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--etcd-cafile=/var/vcap/jobs/kube-apiserver/config/etcd-ca.crt" 902 type: manual 903 tests: 904 test_items: 905 - flag: "--etcd-cafile" 906 remediation: | 907 Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. 908 Then, edit the API server pod specification file kube-apiserver 909 on the master node and set the etcd certificate authority file parameter. 910 --etcd-cafile=<path/to/ca-file> 911 scored: false 912 913 - id: 1.2.33 914 text: "Ensure that the --encryption-provider-config argument is set as appropriate" 915 audit: | 916 ps -ef | grep kube-apiserver | grep -v tini | grep -- "--encryption-provider-config=" 917 type: manual 918 tests: 919 test_items: 920 - flag: "--encryption-provider-config" 921 remediation: | 922 Follow the Kubernetes documentation and configure a EncryptionConfig file. 923 Then, edit the API server pod specification file kube-apiserver 924 on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config=</path/to/EncryptionConfig/File> 925 Exception 926 Encrypting Secrets in an etcd database can be enabled using Kubernetes Profiles. Please follow instructions here 927 https://docs.pivotal.io/tkgi/1-8/k8s-profiles-encrypt-etcd.html 928 scored: false 929 930 - id: 1.2.34 931 text: "Ensure that the encryption provider is set to aescbc" 932 audit: | 933 ENC_CONF=`ps -ef | grep kube-apiserver | grep -v tini | sed $'s/ /\\\\\\n/g' | grep -- '--encryption-provider- 934 config=' | cut -d'=' -f2` grep -- "- \(aescbc\|kms\|secretbox\):" $ENC_CONF 935 type: manual 936 remediation: | 937 Follow the Kubernetes documentation and configure a EncryptionConfig file. 938 In this file, choose aescbc, kms or secretbox as the encryption provider. 939 Exception 940 Encrypting Secrets in an etcd database can be enabled using Kubernetes Profiles. Please follow instructions here 941 https://docs.pivotal.io/tkgi/1-8/k8s-profiles-encrypt-etcd.html 942 scored: false 943 944 - id: 1.2.35 945 text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers" 946 audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--tls-cipher-suites=" 947 type: manual 948 tests: 949 test_items: 950 - flag: "--tls-cipher-suites" 951 compare: 952 op: valid_elements 953 value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" 954 remediation: | 955 Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml 956 on the master node and set the below parameter. 957 --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM 958 _SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM 959 _SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM 960 _SHA384 961 scored: false 962 963 - id: 1.3 964 text: "Controller Manager" 965 checks: 966 - id: 1.3.1 967 text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate" 968 audit: ps -ef | grep kube-controller-manager | grep -- "--terminated-pod-gc-threshold=100" 969 type: manual 970 tests: 971 test_items: 972 - flag: "--terminated-pod-gc-threshold" 973 remediation: | 974 Edit the Controller Manager pod specification file controller manager conf 975 on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, 976 for example: 977 --terminated-pod-gc-threshold=10 978 scored: false 979 980 - id: 1.3.2 981 text: "Ensure controller manager profiling is disabled" 982 audit: ps -ef | grep kube-controller-manager | grep -- "--profiling=false" 983 tests: 984 test_items: 985 - flag: "--profiling=false" 986 remediation: | 987 Edit the Controller Manager pod specification file controller manager conf 988 on the master node and set the below parameter. 989 --profiling=false 990 scored: true 991 992 - id: 1.3.3 993 text: "Ensure that the --use-service-account-credentials argument is set to true" 994 audit: ps -ef | grep kube-controller-manager | grep -- "--use\-service\-account\-credentials=true" 995 tests: 996 test_items: 997 - flag: "--use-service-account-credentials=true" 998 remediation: | 999 Edit the Controller Manager pod specification file controller manager conf 1000 on the master node to set the below parameter. 1001 --use-service-account-credentials=true 1002 scored: true 1003 1004 - id: 1.3.4 1005 text: "Ensure that the --service-account-private-key-file argument is set as appropriate" 1006 audit: | 1007 ps -ef | grep kube-controller-manager | grep -- "--service\-account\-private\-key\-file=\/var\/vcap\/jobs\/kube\- 1008 controller\-manager\/config\/service\-account\-private\-key.pem" 1009 type: manual 1010 tests: 1011 test_items: 1012 - flag: "--service-account-private-key-file" 1013 remediation: | 1014 Edit the Controller Manager pod specification file controller manager conf 1015 on the master node and set the --service-account-private-key-file parameter 1016 to the private key file for service accounts. 1017 --service-account-private-key-file=<filename> 1018 scored: false 1019 1020 - id: 1.3.5 1021 text: "Ensure that the --root-ca-file argument is set as appropriate" 1022 audit: | 1023 ps -ef | grep kube-controller-manager | grep -- "--root\-ca\-file=\/var\/vcap\/jobs\/kube\-controller\-manager\/config\/ca.pem" 1024 type: manual 1025 tests: 1026 test_items: 1027 - flag: "--root-ca-file" 1028 remediation: | 1029 Edit the Controller Manager pod specification file controller manager conf 1030 on the master node and set the --root-ca-file parameter to the certificate bundle file`. 1031 --root-ca-file=<path/to/file> 1032 scored: false 1033 1034 - id: 1.3.6 1035 text: "Ensure that the RotateKubeletServerCertificate argument is set to true" 1036 audit: | 1037 ps -ef | grep kube-controller-manager | grep -- "--feature-gates=\ 1038 (\w\+\|,\)*RotateKubeletServerCertificate=true\(\w\+\|,\)*" 1039 type: manual 1040 tests: 1041 test_items: 1042 - flag: "--feature-gates=RotateKubeletServerCertificate=true" 1043 remediation: | 1044 Edit the Controller Manager pod specification file controller manager conf 1045 on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. 1046 --feature-gates=RotateKubeletServerCertificate=true 1047 Exception 1048 Certificate rotation is handled by Credhub 1049 scored: false 1050 1051 - id: 1.3.7 1052 text: "Ensure that the --bind-address argument is set to 127.0.0.1" 1053 audit: | 1054 ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-controller-manage[r]" | grep -v tini | grep -- "--bind-address=127.0.0.1" 1055 type: manual 1056 tests: 1057 test_items: 1058 - flag: "--bind-address=127.0.0.1" 1059 remediation: | 1060 Edit the Controller Manager pod specification file controller manager conf 1061 on the master node and ensure the correct value for the --bind-address parameter 1062 Exception 1063 This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here 1064 https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html 1065 scored: false 1066 1067 - id: 1.4 1068 text: "Scheduler" 1069 checks: 1070 - id: 1.4.1 1071 text: "Ensure that the --profiling argument is set to false" 1072 audit: ps -ef | grep kube-scheduler | grep -v tini | grep -- "--profiling=false" 1073 tests: 1074 test_items: 1075 - flag: "--profiling=false" 1076 remediation: | 1077 Edit the Scheduler pod specification file scheduler config file 1078 on the master node and set the below parameter. 1079 --profiling=false 1080 scored: true 1081 1082 - id: 1.4.2 1083 text: "Ensure that the --bind-address argument is set to 127.0.0.1" 1084 audit: ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-schedule[r]" | grep -v tini | grep -- "--bind-address=127.0.0.1" 1085 type: manual 1086 tests: 1087 test_items: 1088 - flag: "--bind-address" 1089 compare: 1090 op: eq 1091 value: "127.0.0.1" 1092 remediation: | 1093 Edit the Scheduler pod specification file scheduler config 1094 on the master node and ensure the correct value for the --bind-address parameter 1095 Exception 1096 This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here 1097 https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html 1098 scored: false