github.com/khulnasoft-lab/kube-bench@v0.2.1-0.20240330183753-9df52345ae58/cfg/rh-1.0/master.yaml (about) 1 --- 2 controls: 3 version: rh-1.0 4 id: 1 5 text: "Master Node Security Configuration" 6 type: "master" 7 groups: 8 - id: 1.1 9 text: "Master Node Configuration Files" 10 checks: 11 - id: 1.1.1 12 text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Manual)" 13 audit: | 14 for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name ) 15 do 16 oc exec -n openshift-kube-apiserver $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml; 17 done 2>/dev/null 18 use_multiple_values: true 19 tests: 20 test_items: 21 - flag: "permissions" 22 compare: 23 op: bitmask 24 value: "644" 25 remediation: | 26 No remediation required; file permissions are managed by the operator. 27 scored: false 28 29 - id: 1.1.2 30 text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)" 31 audit: | 32 for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name ) 33 do 34 oc exec -n openshift-kube-apiserver $i -- \ 35 stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml 36 done 2>/dev/null 37 use_multiple_values: true 38 tests: 39 test_items: 40 - flag: "root:root" 41 remediation: | 42 No remediation required; file permissions are managed by the operator. 43 scored: false 44 45 - id: 1.1.3 46 text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)" 47 audit: | 48 for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager) 49 do 50 oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml; 51 done 2>/dev/null 52 use_multiple_values: true 53 tests: 54 test_items: 55 - flag: "permissions" 56 compare: 57 op: bitmask 58 value: "644" 59 remediation: | 60 No remediation required; file permissions are managed by the operator. 61 scored: false 62 63 - id: 1.1.4 64 text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)" 65 audit: | 66 for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager) 67 do 68 oc exec -n openshift-kube-controller-manager $i -- \ 69 stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml 70 done 2>/dev/null 71 use_multiple_values: true 72 tests: 73 test_items: 74 - flag: "root:root" 75 remediation: | 76 No remediation required; file permissions are managed by the operator. 77 scored: false 78 79 - id: 1.1.5 80 text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)" 81 audit: | 82 for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name ) 83 do 84 oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml; 85 done 2>/dev/null 86 use_multiple_values: true 87 tests: 88 test_items: 89 - flag: "permissions" 90 compare: 91 op: bitmask 92 value: "644" 93 remediation: | 94 No remediation required; file permissions are managed by the operator. 95 scored: false 96 97 - id: 1.1.6 98 text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))" 99 audit: | 100 for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name ) 101 do 102 oc exec -n openshift-kube-scheduler $i -- \ 103 stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml 104 done 2>/dev/null 105 use_multiple_values: true 106 tests: 107 test_items: 108 - flag: "root:root" 109 remediation: | 110 No remediation required; file permissions are managed by the operator. 111 scored: false 112 113 - id: 1.1.7 114 text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))" 115 audit: | 116 for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd ) 117 do 118 oc rsh -n openshift-etcd $i stat -c "$i %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml 119 done 2>/dev/null 120 use_multiple_values: true 121 tests: 122 test_items: 123 - flag: "permissions" 124 compare: 125 op: bitmask 126 value: "644" 127 remediation: | 128 No remediation required; file permissions are managed by the operator. 129 scored: false 130 131 - id: 1.1.8 132 text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" 133 audit: | 134 for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd ) 135 do 136 oc rsh -n openshift-etcd $i stat -c "$i %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml 137 done 2>/dev/null 138 use_multiple_values: true 139 tests: 140 test_items: 141 - flag: "root:root" 142 remediation: | 143 No remediation required; file permissions are managed by the operator. 144 scored: false 145 146 - id: 1.1.9 147 text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" 148 audit: | 149 # For CNI multus 150 for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null 151 for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null 152 # For SDN pods 153 for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null 154 for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null 155 # For OVS pods 156 for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null 157 for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null 158 for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null 159 use_multiple_values: true 160 tests: 161 test_items: 162 - flag: "permissions" 163 compare: 164 op: bitmask 165 value: "644" 166 remediation: | 167 No remediation required; file permissions are managed by the operator. 168 scored: false 169 170 - id: 1.1.10 171 text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" 172 audit: | 173 # For CNI multus 174 for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null 175 for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null 176 # For SDN pods 177 for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null 178 for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G"{} \;; done 2>/dev/null 179 # For OVS pods in 4.5 180 for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null 181 for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null 182 for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null 183 # For OVS pods in 4.6 TBD 184 use_multiple_values: true 185 tests: 186 test_items: 187 - flag: "root:root" 188 remediation: | 189 No remediation required; file permissions are managed by the operator. 190 scored: false 191 192 - id: 1.1.11 193 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" 194 audit: | 195 for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n permissions=%a" /var/lib/etcd/member; done 196 use_multiple_values: true 197 tests: 198 test_items: 199 - flag: "permissions" 200 compare: 201 op: bitmask 202 value: "700" 203 remediation: | 204 No remediation required; file permissions are managed by the operator. 205 scored: false 206 207 - id: 1.1.12 208 text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" 209 audit: | 210 for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n %U:%G" /var/lib/etcd/member; done 211 use_multiple_values: true 212 tests: 213 test_items: 214 - flag: "root:root" 215 remediation: | 216 No remediation required; file permissions are managed by the operator. 217 scored: false 218 219 - id: 1.1.13 220 text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))" 221 audit: | 222 for i in $(oc get nodes -o name) 223 do 224 oc debug $i -- chroot /host stat -c "$i %n permissions=%a" /etc/kubernetes/kubeconfig 225 done 2>/dev/null 226 use_multiple_values: true 227 tests: 228 test_items: 229 - flag: "permissions" 230 compare: 231 op: bitmask 232 value: "644" 233 remediation: | 234 No remediation required; file permissions are managed by the operator. 235 scored: false 236 237 - id: 1.1.14 238 text: "Ensure that the admin.conf file ownership is set to root:root (Manual)" 239 audit: | 240 for i in $(oc get nodes -o name) 241 do 242 oc debug $i -- chroot /host stat -c "$i %n %U:%G" /etc/kubernetes/kubeconfig 243 done 2>/dev/null 244 use_multiple_values: true 245 tests: 246 test_items: 247 - flag: "root:root" 248 remediation: | 249 No remediation required; file permissions are managed by the operator. 250 scored: false 251 252 - id: 1.1.15 253 text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)" 254 audit: | 255 for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name) 256 do 257 oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig 258 done 2>/dev/null 259 use_multiple_values: true 260 tests: 261 test_items: 262 - flag: "permissions" 263 compare: 264 op: bitmask 265 value: "644" 266 remediation: | 267 No remediation required; file permissions are managed by the operator. 268 scored: false 269 270 - id: 1.1.16 271 text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)" 272 audit: | 273 for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name) 274 do 275 oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig 276 done 2>/dev/null 277 use_multiple_values: true 278 tests: 279 test_items: 280 - flag: "root:root" 281 remediation: | 282 No remediation required; file permissions are managed by the operator. 283 scored: false 284 285 - id: 1.1.17 286 text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)" 287 audit: | 288 for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name) 289 do 290 oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig 291 done 2>/dev/null 292 use_multiple_values: true 293 tests: 294 test_items: 295 - flag: "permissions" 296 compare: 297 op: bitmask 298 value: "644" 299 remediation: | 300 No remediation required; file permissions are managed by the operator. 301 scored: false 302 303 - id: 1.1.18 304 text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)" 305 audit: | 306 for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name) 307 do 308 oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig 309 done 2>/dev/null 310 use_multiple_values: true 311 tests: 312 test_items: 313 - flag: "root:root" 314 remediation: | 315 No remediation required; file permissions are managed by the operator. 316 scored: false 317 318 - id: 1.1.19 319 text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" 320 audit: | 321 # Should return root:root for all files and directories 322 for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') 323 do 324 # echo $i static-pod-certs 325 oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; 326 oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; 327 # echo $i static-pod-resources 328 oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; 329 oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; 330 done 331 use_multiple_values: true 332 tests: 333 test_items: 334 - flag: "root:root" 335 remediation: | 336 No remediation required; file permissions are managed by the operator. 337 scored: false 338 339 - id: 1.1.20 340 text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" 341 audit: | 342 for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') 343 do 344 # echo $i static-pod-certs 345 oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$i %n permissions=%a" {} \; 346 done 347 use_multiple_values: true 348 tests: 349 test_items: 350 - flag: "permissions" 351 compare: 352 op: bitmask 353 value: "644" 354 remediation: | 355 No remediation required; file permissions are managed by the operator. 356 scored: false 357 358 - id: 1.1.21 359 text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)" 360 audit: | 361 for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') 362 do 363 # echo $i static-pod-certs 364 oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$i %n permissions=%a" {} \; 365 done 366 use_multiple_values: true 367 tests: 368 test_items: 369 - flag: "permissions" 370 compare: 371 op: bitmask 372 value: "600" 373 remediation: | 374 No remediation required; file permissions are managed by the operator. 375 scored: false 376 377 - id: 1.2 378 text: "API Server" 379 checks: 380 - id: 1.2.1 381 text: "Ensure that anonymous requests are authorized (Manual)" 382 audit: | 383 # To verify that userGroups include system:unauthenticated 384 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' 385 # To verify that userGroups include system:unauthenticated 386 oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?.userGroups' 387 # To verify RBAC is enabled 388 oc get clusterrolebinding 389 oc get clusterrole 390 oc get rolebinding 391 oc get role 392 tests: 393 test_items: 394 - flag: "system:unauthenticated" 395 remediation: | 396 None required. The default configuration should not be modified. 397 scored: false 398 399 - id: 1.2.2 400 text: "Ensure that the --basic-auth-file argument is not set (Manual)" 401 audit: | 402 oc -n openshift-kube-apiserver get cm config -o yaml | grep --color "basic-auth" 403 oc -n openshift-apiserver get cm config -o yaml | grep --color "basic-auth" 404 # Add | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }; to create AVAILABLE = true/false form 405 oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' 406 tests: 407 bin_op: and 408 test_items: 409 - flag: "basic-auth-file" 410 set: false 411 - flag: "available" 412 compare: 413 op: eq 414 value: true 415 remediation: | 416 None required. --basic-auth-file cannot be configured on OpenShift. 417 scored: false 418 419 - id: 1.2.3 420 text: "Ensure that the --token-auth-file parameter is not set (Manual)" 421 audit: | 422 # Verify that the token-auth-file flag is not present 423 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 424 oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 425 oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' 426 #Verify that the authentication operator is running 427 oc get clusteroperator authentication | awk '$3 != "AVAILABLE" { if ($3){print "available=true"}else{print "available=false"} }' 428 tests: 429 bin_op: and 430 test_items: 431 - flag: "token-auth-file" 432 set: false 433 - flag: "available" 434 compare: 435 op: eq 436 value: true 437 remediation: | 438 None is required. 439 scored: false 440 441 - id: 1.2.4 442 text: "Use https for kubelet connections (Manual)" 443 audit: | 444 #for 4.5 445 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' 446 #for 4.6 447 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 448 #for both 4.5 and 4.6 449 oc -n openshift-apiserver describe secret serving-cert 450 tests: 451 bin_op: and 452 test_items: 453 - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" 454 - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" 455 remediation: | 456 No remediation is required. 457 OpenShift platform components use X.509 certificates for authentication. 458 OpenShift manages the CAs and certificates for platform components. This is not configurable. 459 scored: false 460 461 - id: 1.2.5 462 text: "Ensure that the kubelet uses certificates to authenticate (Manual)" 463 audit: | 464 #for 4.5 465 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' 466 #for 4.6 467 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 468 #for both 4.5 and 4.6 469 oc -n openshift-apiserver describe secret serving-cert 470 tests: 471 bin_op: and 472 test_items: 473 - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.crt" 474 - flag: "/etc/kubernetes/static-pod-resources/secrets/kubelet-client/tls.key" 475 remediation: | 476 No remediation is required. 477 OpenShift platform components use X.509 certificates for authentication. 478 OpenShift manages the CAs and certificates for platform components. 479 This is not configurable. 480 scored: false 481 482 - id: 1.2.6 483 text: "Verify that the kubelet certificate authority is set as appropriate (Manual)" 484 audit: | 485 # for 4.5 486 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' 487 # for 4.6 488 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 489 tests: 490 test_items: 491 - flag: "/etc/kubernetes/static-pod-resources/configmaps/kubelet-serving-ca/ca-bundle.crt" 492 remediation: | 493 No remediation is required. 494 OpenShift platform components use X.509 certificates for authentication. 495 OpenShift manages the CAs and certificates for platform components. 496 This is not configurable. 497 scored: false 498 499 - id: 1.2.7 500 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" 501 audit: | 502 # To verify that the authorization-mode argument is not used 503 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 504 oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 505 #Check that no overrides are configured 506 oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' 507 # To verify RBAC is configured: 508 oc get clusterrolebinding 509 oc get clusterrole 510 oc get rolebinding 511 oc get role 512 audit_config: | 513 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 514 tests: 515 bin_op: or 516 test_items: 517 - path: "{.authorization-mode}" 518 compare: 519 op: nothave 520 value: "AlwaysAllow" 521 - path: "{.authorization-mode}" 522 flag: "authorization-mode" 523 set: false 524 remediation: | 525 None. RBAC is always on and the OpenShift API server does not use the values assigned to the flag authorization-mode. 526 scored: false 527 528 - id: 1.2.8 529 text: "Verify that the Node authorizer is enabled (Manual)" 530 audit: | 531 # For OCP 4.5 and earlier verify that authorization-mode is not used 532 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 533 oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' 534 # For OCP 4.5 and earlier verify that authorization-mode is not used 535 for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') 536 do 537 oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode 538 oc debug node/${node} -- chroot /host ps -aux | grep kubelet | grep authorization-mode 539 done 540 #Check that no overrides are configured 541 oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' 542 audit_config: | 543 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 544 tests: 545 bin_op: or 546 test_items: 547 - path: "{.authorization-mode}" 548 compare: 549 op: has 550 value: "Node" 551 - path: "{.authorization-mode}" 552 flag: "authorization-mode" 553 set: false 554 remediation: | 555 No remediation is required. 556 scored: false 557 558 - id: 1.2.9 559 text: "Verify that RBAC is enabled (Manual)" 560 audit: | 561 # For 4.5 To verify that the authorization-mode argument is not used 562 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 563 oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 564 #Check that no overrides are configured 565 oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' 566 # To verify RBAC is used 567 oc get clusterrolebinding 568 oc get clusterrole 569 oc get rolebinding 570 oc get role 571 # For 4.6, verify that the authorization-mode argument includes RBAC 572 audit_config: | 573 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' 574 tests: 575 bin_op: or 576 test_items: 577 - path: "{.authorization-mode}" 578 compare: 579 op: has 580 value: "RBAC" 581 - path: "{.authorization-mode}" 582 flag: "authorization-mode" 583 set: false 584 remediation: | 585 None. It is not possible to disable RBAC. 586 scored: false 587 588 - id: 1.2.10 589 text: "Ensure that the APIPriorityAndFairness feature gate is enabled (Manual)" 590 audit: | 591 #Verify the APIPriorityAndFairness feature-gate 592 oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' 593 #Verify the set of admission-plugins for OCP 4.6 and higher 594 oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' 595 #Check that no overrides are configured 596 oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' 597 tests: 598 bin_op: and 599 test_items: 600 - flag: "APIPriorityAndFairness=true" 601 - flag: "EventRateLimit" 602 set: false 603 remediation: | 604 No remediation is required 605 scored: false 606 607 - id: 1.2.11 608 text: "Ensure that the admission control plugin AlwaysAdmit is not set (Manual)" 609 audit: | 610 #Verify the set of admission-plugins for OCP 4.6 and higher 611 oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' 612 #Check that no overrides are configured 613 oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' 614 tests: 615 test_items: 616 - flag: "AlwaysAdmit" 617 set: false 618 remediation: | 619 No remediation is required. The AlwaysAdmit admission controller cannot be enabled in OpenShift. 620 scored: false 621 622 - id: 1.2.12 623 text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" 624 audit: | 625 #Verify the set of admissi on-plugins for OCP 4.6 and higher 626 oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' 627 #Check that no overrides are configured 628 oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' 629 tests: 630 test_items: 631 - flag: "AlwaysPullImages" 632 set: false 633 remediation: | 634 None required. 635 scored: false 636 637 - id: 1.2.13 638 text: "Ensure that the admission control plugin SecurityContextDeny is not set (Manual)" 639 audit: | 640 #Verify the set of admission-plugins for OCP 4.6 and higher 641 oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' 642 output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') 643 [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextDeny and SecurityContextConstraint compiled" || echo $output 644 #Check that no overrides are configured 645 oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' 646 #Verify that SecurityContextConstraints are deployed 647 oc get scc 648 oc describe scc restricted 649 tests: 650 bin_op: and 651 test_items: 652 - flag: "SecurityContextConstraint" 653 set: true 654 - flag: "anyuid" 655 - flag: "hostaccess" 656 - flag: "hostmount-anyuid" 657 - flag: "hostnetwork" 658 - flag: "node-exporter" 659 - flag: "nonroot" 660 - flag: "privileged" 661 - flag: "restricted" 662 remediation: | 663 None required. The Security Context Constraint admission controller cannot be disabled in OpenShift 4. 664 scored: false 665 666 - id: 1.2.14 667 text: "Ensure that the admission control plugin ServiceAccount is set (Manual)" 668 audit: | 669 #Verify the list of admission controllers for 4.6 and higher 670 oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' 671 output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') 672 [ "$output" == "null" ] && echo "ocp 4.5 has ServiceAccount compiled" || echo $output 673 #Check that no overrides are configured 674 oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' 675 #Verify that Service Accounts are present 676 oc get sa -A 677 tests: 678 test_items: 679 - flag: "ServiceAccount" 680 set: true 681 remediation: | 682 None required. OpenShift is configured to use service accounts by default. 683 scored: false 684 685 - id: 1.2.15 686 text: "Ensure that the admission control plugin NamespaceLifecycle is set (Manual)" 687 audit: | 688 #Verify the list of admission controllers for 4.6 and higher 689 oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' 690 output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') 691 [ "$output" == "null" ] && echo "ocp 4.5 has NamespaceLifecycle compiled" || echo $output 692 #Check that no overrides are configured 693 oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' 694 tests: 695 test_items: 696 - flag: "NamespaceLifecycle" 697 remediation: | 698 Ensure that the --disable-admission-plugins parameter does not include NamespaceLifecycle. 699 scored: false 700 701 - id: 1.2.16 702 text: "Ensure that the admission control plugin SecurityContextConstraint is set (Manual)" 703 audit: | 704 #Verify the set of admission-plugins for OCP 4.6 and higher 705 oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' 706 output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') 707 [ "$output" == "null" ] && echo "ocp 4.5 has SecurityContextConstraint compiled" || echo $output 708 #Check that no overrides are configured 709 oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' 710 #Verify that SecurityContextConstraints are deployed 711 oc get scc 712 oc describe scc restricted 713 tests: 714 bin_op: and 715 test_items: 716 - flag: "SecurityContextConstraint" 717 - flag: "anyuid" 718 - flag: "hostaccess" 719 - flag: "hostmount-anyuid" 720 - flag: "hostnetwork" 721 - flag: "node-exporter" 722 - flag: "nonroot" 723 - flag: "privileged" 724 - flag: "restricted" 725 remediation: | 726 None required. Security Context Constraints are enabled by default in OpenShift and cannot be disabled. 727 scored: false 728 729 - id: 1.2.17 730 text: "Ensure that the admission control plugin NodeRestriction is set (Manual)" 731 audit: | 732 # For 4.5, review the control plane manifest https://github.com/openshift/origin/blob/release-4.5/vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane/manifests.go#L132 733 #Verify the set of admission-plugins for OCP 4.6 and higher 734 oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"' 735 output=$(oc -n openshift-kube-apiserver get configmap config -o json | jq -r '.data."config.yaml"' | jq '.apiServerArguments."enable-admission-plugins"') 736 [ "$output" == "null" ] && echo "ocp 4.5 has NodeRestriction compiled" || echo $output 737 #Check that no overrides are configured 738 oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' 739 tests: 740 test_items: 741 - flag: "NodeRestriction" 742 remediation: | 743 The NodeRestriction plugin cannot be disabled. 744 scored: false 745 746 - id: 1.2.18 747 text: "Ensure that the --insecure-bind-address argument is not set (Manual)" 748 audit: | 749 # InsecureBindAddress=true should not be in the results 750 oc get kubeapiservers.operator.openshift.io cluster -o jsonpath='{range .spec.observedConfig.apiServerArguments.feature-gates[*]}{@}{"\n"}{end}' 751 # Result should be only 6443 752 oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' 753 # Result should be only 8443 754 oc -n openshift-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' 755 tests: 756 bin_op: and 757 test_items: 758 - flag: "insecure-bind-address" 759 set: false 760 - flag: 6443 761 - flag: 8443 762 remediation: | 763 None required. 764 scored: false 765 766 - id: 1.2.19 767 text: "Ensure that the --insecure-port argument is set to 0 (Manual)" 768 audit: | 769 # Should return 6443 770 oc -n openshift-kube-apiserver get endpoints -o jsonpath='{.items[*].subsets[*].ports[*].port}' 771 # For OCP 4.6 and above 772 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]' 773 output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments["insecure-port"]') 774 [ "$output" == "null" ] && echo "ocp 4.5 has insecure-port set to \"0\" compiled" || echo $output 775 tests: 776 bin_op: and 777 test_items: 778 - flag: "\"0\"" 779 - flag: "6443" 780 remediation: | 781 None required. The configuration is managed by the API server operator. 782 scored: false 783 784 - id: 1.2.20 785 text: "Ensure that the --secure-port argument is not set to 0 (Manual)" 786 audit: | 787 oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig' 788 # Should return only 6443 789 echo ports=`oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[*].spec.containers[?(@.name=="kube-apiserver")].ports[*].containerPort}'` 790 tests: 791 bin_op: and 792 test_items: 793 - flag: '"bindAddress": "0.0.0.0:6443"' 794 - flag: "ports" 795 compare: 796 op: regex 797 value: '\s*(?:6443\s*){1,}$' 798 remediation: | 799 None required. 800 scored: false 801 802 - id: 1.2.21 803 text: "Ensure that the healthz endpoint is protected by RBAC (Manual)" 804 type: manual 805 audit: | 806 # Verify endpoints 807 oc -n openshift-kube-apiserver describe endpoints 808 # Check config for ports, livenessProbe, readinessProbe, healthz 809 oc -n openshift-kube-apiserver get cm kube-apiserver-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' 810 # Test to validate RBAC enabled on the apiserver endpoint; check with non-admin role 811 oc project openshift-kube-apiserver POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') PORT=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') 812 # Following should return 403 Forbidden 813 oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -k 814 # Create a service account to test RBAC 815 oc create -n openshift-kube-apiserver sa permission-test-sa 816 # Should return 403 Forbidden 817 SA_TOKEN=$(oc sa -n openshift-kube-apiserver get-token permission-test-sa) 818 oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k 819 # Cleanup 820 oc delete -n openshift-kube-apiserver sa permission-test-sa 821 # As cluster admin, should succeed 822 CLUSTER_ADMIN_TOKEN=$(oc whoami -t) 823 oc rsh -n openshift-kube-apiserver ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k 824 remediation: | 825 None required as profiling data is protected by RBAC. 826 scored: false 827 828 - id: 1.2.22 829 text: "Ensure that the --audit-log-path argument is set (Manual)" 830 audit: | 831 # Should return “/var/log/kube-apiserver/audit.log" 832 output=$(oc get configmap config -n openshift-kube-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') 833 [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true 834 output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') 835 [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true 836 POD=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') 837 oc rsh -n openshift-kube-apiserver -c kube-apiserver $POD ls /var/log/kube-apiserver/audit.log 2>/dev/null 838 # Should return 0 839 echo exit_code=$? 840 # Should return "/var/log/openshift-apiserver/audit.log" 841 output=$(oc get configmap config -n openshift-apiserver -o jsonpath="{['.data.config\.yaml']}" | jq '.auditConfig.auditFilePath') 842 [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true 843 output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-path"][]?') 844 [ "$output" != "" ] && [ "$output" != "null" ] && echo "$output" || true 845 POD=$(oc get pods -n openshift-apiserver -l apiserver=true -o jsonpath='{.items[0].metadata.name}') 846 oc rsh -n openshift-apiserver $POD ls /var/log/openshift-apiserver/audit.log 2>/dev/null 847 # Should return 0 848 echo exit_code=$? 849 use_multiple_values: true 850 tests: 851 bin_op: or 852 test_items: 853 - flag: "/var/log/kube-apiserver/audit.log" 854 - flag: "/var/log/openshift-apiserver/audit.log" 855 - flag: "exit_code=0" 856 - flag: "null" 857 remediation: | 858 None required. This is managed by the cluster apiserver operator. 859 scored: false 860 861 - id: 1.2.23 862 text: "Ensure that the audit logs are forwarded off the cluster for retention (Manual)" 863 type: "manual" 864 remediation: | 865 Follow the documentation for log forwarding. Forwarding logs to third party systems 866 https://docs.openshift.com/container-platform/4.5/logging/cluster-logging-external.html 867 868 scored: false 869 870 - id: 1.2.24 871 text: "Ensure that the maximumRetainedFiles argument is set to 10 or as appropriate (Manual)" 872 audit: | 873 #NOTICE 874 output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) 875 [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true 876 output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumRetainedFiles) 877 [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumRetainedFiles=$output" || true 878 output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') 879 [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true 880 output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxbackup"][]?') 881 [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxbackup=$output" || true 882 use_multiple_values: true 883 tests: 884 bin_op: or 885 test_items: 886 - flag: "maximumRetainedFiles" 887 compare: 888 op: gte 889 value: 10 890 - flag: "audit-log-maxbackup" 891 compare: 892 op: gte 893 value: 10 894 remediation: | 895 Set the maximumRetainedFiles parameter to 10 or as an appropriate number of files. maximumRetainedFiles: 10 896 scored: false 897 898 - id: 1.2.25 899 text: "Ensure that the maximumFileSizeMegabytes argument is set to 100 or as appropriate (Manual)" 900 audit: | 901 #NOTICE 902 output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) 903 [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true 904 output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r .auditConfig.maximumFileSizeMegabytes) 905 [ "$output" != "" ] && [ "$output" != "null" ] && echo "maximumFileSizeMegabytes=$output" || true 906 output=$(oc get configmap config -n openshift-kube-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') 907 [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true 908 output=$(oc get configmap config -n openshift-apiserver -o json | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["audit-log-maxsize"][]?') 909 [ "$output" != "" ] && [ "$output" != "null" ] && echo "audit-log-maxsize=$output" || true 910 use_multiple_values: true 911 tests: 912 bin_op: or 913 test_items: 914 - flag: "maximumFileSizeMegabytes" 915 compare: 916 op: gte 917 value: 100 918 - flag: "audit-log-maxsize" 919 compare: 920 op: gte 921 value: 100 922 remediation: | 923 Set the audit-log-maxsize parameter to 100 or as an appropriate number. 924 maximumFileSizeMegabytes: 100 925 scored: false 926 927 - id: 1.2.26 928 text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" 929 audit: | 930 echo requestTimeoutSeconds=`oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.requestTimeoutSeconds` 931 tests: 932 test_items: 933 - flag: "requestTimeoutSeconds" 934 remediation: | 935 TBD 936 scored: false 937 938 - id: 1.2.27 939 text: "Ensure that the --service-account-lookup argument is set to true (Manual)" 940 audit: | 941 # For OCP 4.5 942 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' | grep service-account-lookup 943 # For OCP 4.6 and above 944 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"]' 945 output=$(oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["service-account-lookup"][0]') 946 [ "$output" == "null" ] && echo "ocp 4.5 has service-account-lookup=true compiled" || echo service-account-lookup=$output 947 tests: 948 test_items: 949 - flag: "service-account-lookup=true" 950 remediation: | 951 TBD 952 scored: false 953 954 - id: 1.2.28 955 text: "Ensure that the --service-account-key-file argument is set as appropriate (Manual)" 956 audit: | 957 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .serviceAccountPublicKeyFiles[] 958 tests: 959 bin_op: and 960 test_items: 961 - flag: "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs" 962 - flag: "/etc/kubernetes/static-pod-resources/configmaps/bound-sa-token-signing-certs" 963 remediation: | 964 The OpenShift API server does not use the service-account-key-file argument. 965 The ServiceAccount token authenticator is configured with serviceAccountConfig.publicKeyFiles. 966 OpenShift does not reuse the apiserver TLS key. This is not configurable. 967 scored: false 968 969 - id: 1.2.29 970 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Manual)" 971 audit: | 972 # etcd Certificate File 973 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.certFile 974 # etcd Key File 975 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.keyFile 976 # NOTICE 4.6 extention 977 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-certfile"]' 978 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-keyfile"]' 979 tests: 980 bin_op: and 981 test_items: 982 - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.crt" 983 - flag: "/etc/kubernetes/static-pod-resources/secrets/etcd-client/tls.key" 984 remediation: | 985 OpenShift automatically manages TLS and client certificate authentication for etcd. 986 This is not configurable. 987 scored: false 988 989 - id: 1.2.30 990 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" 991 audit: | 992 # TLS Cert File - openshift-kube-apiserver 993 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.certFile 994 # TLS Key File 995 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.servingInfo.keyFile' 996 # NOTECI 4.6 extention 997 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-cert-file"]' 998 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["tls-private-key-file"]' 999 tests: 1000 bin_op: and 1001 test_items: 1002 - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.crt" 1003 - flag: "/etc/kubernetes/static-pod-certs/secrets/service-network-serving-certkey/tls.key" 1004 remediation: | 1005 OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. 1006 This is not configurable. You may optionally set a custom default certificate to be used by the API server 1007 when serving content in order to enable clients to access the API server at a different host name or without 1008 the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. 1009 Follow the directions in the OpenShift documentation User-provided certificates for the API server 1010 scored: false 1011 1012 - id: 1.2.31 1013 text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" 1014 audit: | 1015 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .servingInfo.clientCA 1016 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["client-ca-file"]' 1017 tests: 1018 test_items: 1019 - flag: "/etc/kubernetes/static-pod-certs/configmaps/client-ca/ca-bundle.crt" 1020 remediation: | 1021 OpenShift automatically manages TLS authentication for the API server communication with the node/kublet. 1022 This is not configurable. You may optionally set a custom default certificate to be used by the API 1023 server when serving content in order to enable clients to access the API server at a different host name 1024 or without the need to distribute the cluster-managed certificate authority (CA) certificates to the clients. 1025 1026 User-provided certificates must be provided in a kubernetes.io/tls type Secret in the openshift-config namespace. 1027 Update the API server cluster configuration, 1028 the apiserver/cluster resource, to enable the use of the user-provided certificate. 1029 scored: false 1030 1031 - id: 1.2.32 1032 text: "Ensure that the --etcd-cafile argument is set as appropriate (Manual)" 1033 audit: | 1034 #etcd CA File 1035 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r .storageConfig.ca 1036 oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq -r '.apiServerArguments["etcd-cafile"]' 1037 tests: 1038 test_items: 1039 - flag: "/etc/kubernetes/static-pod-resources/configmaps/etcd-serving-ca/ca-bundle.crt" 1040 remediation: | 1041 None required. OpenShift generates the etcd-cafile and sets the arguments appropriately in the API server. Communication with etcd is secured by the etcd serving CA. 1042 scored: false 1043 1044 - id: 1.2.33 1045 text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" 1046 audit: | 1047 # encrypt the etcd datastore 1048 oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' 1049 tests: 1050 test_items: 1051 - flag: "EncryptionCompleted" 1052 remediation: | 1053 Follow the OpenShift documentation for Encrypting etcd data | Authentication | OpenShift Container Platform 4.5 1054 https://docs.openshift.com/container-platform/4.5/security/encrypting-etcd.html 1055 scored: false 1056 1057 - id: 1.2.34 1058 text: "Ensure that encryption providers are appropriately configured (Manual)" 1059 audit: | 1060 # encrypt the etcd datastore 1061 oc get openshiftapiserver -o=jsonpath='{range.items[0].status.conditions[?(@.type=="Encrypted")]}{.reason}{"\n"}{.message}{"\n"}' 1062 tests: 1063 test_items: 1064 - flag: "EncryptionCompleted" 1065 remediation: | 1066 Follow the Kubernetes documentation and configure a EncryptionConfig file. 1067 In this file, choose aescbc, kms or secretbox as the encryption provider. 1068 scored: false 1069 1070 - id: 1.2.35 1071 text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" 1072 type: manual 1073 remediation: | 1074 Verify that the tlsSecurityProfile is set to the value you chose. 1075 Note: The HAProxy Ingress controller image does not support TLS 1.3 1076 and because the Modern profile requires TLS 1.3, it is not supported. 1077 The Ingress Operator converts the Modern profile to Intermediate. 1078 The Ingress Operator also converts the TLS 1.0 of an Old or Custom profile to 1.1, 1079 and TLS 1.3 of a Custom profile to 1.2. 1080 scored: false 1081 1082 - id: 1.3 1083 text: "Controller Manager" 1084 checks: 1085 - id: 1.3.1 1086 text: "Ensure that garbage collection is configured as appropriate (Manual)" 1087 type: manual 1088 remediation: | 1089 To configure, follow the directions in Configuring garbage collection for containers and images 1090 https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring 1091 scored: false 1092 1093 - id: 1.3.2 1094 text: "Ensure that controller manager healthz endpoints are protected by RBAC (Manual)" 1095 type: manual 1096 audit: | 1097 # Verify configuration for ports, livenessProbe, readinessProbe, healthz 1098 oc -n openshift-kube-controller-manager get cm kube-controller-manager-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' 1099 # Verify endpoints 1100 oc -n openshift-kube-controller-manager describe endpoints 1101 # Test to validate RBAC enabled on the controller endpoint; check with non-admin role 1102 oc project openshift-kube-controller-manage 1103 POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') 1104 PORT=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].spec.containers[0].ports[0].hostPort}') 1105 # Following should return 403 Forbidden 1106 oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -k 1107 # Create a service account to test RBAC 1108 oc create -n openshift-kube-controller-manager sa permission-test-sa 1109 # Should return 403 Forbidden 1110 SA_TOKEN=$(oc sa -n openshift-kube-controller-manager get-token permission-test-sa) 1111 oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k 1112 # Cleanup 1113 oc delete -n openshift-kube-controller-manager sa permission-test-sa 1114 # As cluster admin, should succeed 1115 CLUSTER_ADMIN_TOKEN=$(oc whoami -t) 1116 oc rsh -n openshift-kube-controller-manager ${POD} curl https://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k 1117 remediation: | 1118 None required; profiling is protected by RBAC. 1119 scored: false 1120 1121 - id: 1.3.3 1122 text: "Ensure that the --use-service-account-credentials argument is set to true (Manual)" 1123 audit: | 1124 echo use-service-account-credentials=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["use-service-account-credentials"][]'` 1125 tests: 1126 test_items: 1127 - flag: "use-service-account-credentials" 1128 compare: 1129 op: eq 1130 value: true 1131 remediation: | 1132 The OpenShift Controller Manager operator manages and updates the OpenShift Controller Manager. 1133 The Kubernetes Controller Manager operator manages and updates the Kubernetes Controller Manager deployed on top of OpenShift. 1134 This operator is configured via KubeControllerManager custom resource. 1135 scored: false 1136 1137 - id: 1.3.4 1138 text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Manual)" 1139 audit: | 1140 oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["service-account-private-key-file"][]' 1141 tests: 1142 test_items: 1143 - flag: "/etc/kubernetes/static-pod-resources/secrets/service-account-private-key/service-account.key" 1144 remediation: | 1145 None required. 1146 OpenShift manages the service account credentials for the scheduler automatically. 1147 scored: false 1148 1149 - id: 1.3.5 1150 text: "Ensure that the --root-ca-file argument is set as appropriate (Manual)" 1151 audit: | 1152 oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["root-ca-file"][]' 1153 tests: 1154 test_items: 1155 - flag: "/etc/kubernetes/static-pod-resources/configmaps/serviceaccount-ca/ca-bundle.crt" 1156 remediation: | 1157 None required. 1158 Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. 1159 scored: false 1160 1161 - id: 1.3.6 1162 text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" 1163 audit: | 1164 oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq -r '.extendedArguments["feature-gates"][]' 1165 tests: 1166 test_items: 1167 - flag: "RotateKubeletServerCertificate" 1168 compare: 1169 op: eq 1170 value: "true" 1171 remediation: | 1172 None required. 1173 Certificates for OpenShift platform components are automatically created and rotated by the OpenShift Container Platform. 1174 scored: false 1175 1176 - id: 1.3.7 1177 text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Manual)" 1178 audit: | 1179 echo port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["port"][]'` 1180 echo secure-port=`oc get configmaps config -n openshift-kube-controller-manager -ojson | jq -r '.data["config.yaml"]' | jq '.extendedArguments["secure-port"][]'` 1181 #Following should fail with a http code 403 1182 POD=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o jsonpath='{.items[0].metadata.name}') 1183 oc rsh -n openshift-kube-controller-manager -c kube-controller-manager $POD curl https://localhost:10257/metrics -k 1184 tests: 1185 bin_op: and 1186 test_items: 1187 - flag: "secure-port" 1188 compare: 1189 op: eq 1190 value: "\"10257\"" 1191 - flag: "port" 1192 compare: 1193 op: eq 1194 value: "\"0\"" 1195 - flag: "\"code\": 403" 1196 remediation: | 1197 Edit the Controller Manager pod specification file $controllermanagerconf 1198 on the master node and ensure the correct value for the --bind-address parameter 1199 scored: false 1200 1201 - id: 1.4 1202 text: "Scheduler" 1203 checks: 1204 - id: 1.4.1 1205 text: "Ensure that the healthz endpoints for the scheduler are protected by RBAC (Manual)" 1206 type: manual 1207 audit: | 1208 # check configuration for ports, livenessProbe, readinessProbe, healthz 1209 oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' 1210 # Test to verify endpoints 1211 oc -n openshift-kube-scheduler describe endpoints 1212 # Test to validate RBAC enabled on the scheduler endpoint; check with non-admin role 1213 oc project openshift-kube-scheduler 1214 POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') 1215 PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') 1216 # Should return 403 Forbidden 1217 oc rsh ${POD} curl http://localhost:${PORT}/metrics -k 1218 # Create a service account to test RBAC 1219 oc create sa permission-test-sa 1220 # Should return 403 Forbidden 1221 SA_TOKEN=$(oc sa get-token permission-test-sa) 1222 oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k 1223 # Cleanup 1224 oc delete sa permission-test-sa 1225 # As cluster admin, should succeed 1226 CLUSTER_ADMIN_TOKEN=$(oc whoami -t) 1227 oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k 1228 remediation: | 1229 A fix to this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 None required. 1230 Profiling is protected by RBAC and cannot be disabled. 1231 scored: false 1232 1233 - id: 1.4.2 1234 text: "Verify that the scheduler API service is protected by authentication and authorization (Manual)" 1235 type: manual 1236 audit: | 1237 # To verify endpoints 1238 oc -n openshift-kube-scheduler describe endpoints 1239 # To verify that bind-adress is not used in the configuration and that port is set to 0 1240 oc -n openshift-kube-scheduler get cm kube-scheduler-pod -o json | jq -r '.data."pod.yaml"' | jq '.spec.containers' 1241 # To test for RBAC: 1242 oc project openshift-kube-scheduler 1243 POD=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].metadata.name}') 1244 POD_IP=$(oc get pods -l app=openshift-kube-scheduler -o jsonpath='{.items[0].status.podIP}') 1245 PORT=$(oc get pod $POD -o jsonpath='{.spec.containers[0].livenessProbe.httpGet.port}') 1246 # Should return a 403 1247 oc rsh ${POD} curl http://${POD_IP}:${PORT}/metrics 1248 # Create a service account to test RBAC 1249 oc create sa permission-test-sa 1250 # Should return 403 Forbidden 1251 SA_TOKEN=$(oc sa get-token permission-test-sa) 1252 oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $SA_TOKEN" -k 1253 # Cleanup 1254 oc delete sa permission-test-sa 1255 # As cluster admin, should succeed 1256 CLUSTER_ADMIN_TOKEN=$(oc whoami -t) 1257 oc rsh ${POD} curl http://localhost:${PORT}/metrics -H "Authorization: Bearer $CLUSTER_ADMIN_TOKEN" -k 1258 remediation: | 1259 By default, the --bind-address argument is not present, 1260 the readinessProbe and livenessProbe arguments are set to 10251 and the port argument is set to 0. 1261 Check the status of this issue: https://bugzilla.redhat.com/show_bug.cgi?id=1889488 1262 scored: false