k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go (about) 1 /* 2 Copyright 2016 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package bootstrappolicy 18 19 import ( 20 capi "k8s.io/api/certificates/v1beta1" 21 rbacv1 "k8s.io/api/rbac/v1" 22 "k8s.io/apimachinery/pkg/api/meta" 23 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 24 "k8s.io/apimachinery/pkg/runtime" 25 "k8s.io/apiserver/pkg/authentication/serviceaccount" 26 "k8s.io/apiserver/pkg/authentication/user" 27 utilfeature "k8s.io/apiserver/pkg/util/feature" 28 rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1" 29 "k8s.io/kubernetes/pkg/features" 30 ) 31 32 // Write and other vars are slices of the allowed verbs. 33 // Label and Annotation are default maps of bootstrappolicy. 34 var ( 35 Write = []string{"create", "update", "patch", "delete", "deletecollection"} 36 ReadWrite = []string{"get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"} 37 Read = []string{"get", "list", "watch"} 38 ReadUpdate = []string{"get", "list", "watch", "update", "patch"} 39 40 Label = map[string]string{"kubernetes.io/bootstrapping": "rbac-defaults"} 41 Annotation = map[string]string{rbacv1.AutoUpdateAnnotationKey: "true"} 42 ) 43 44 const ( 45 legacyGroup = "" 46 appsGroup = "apps" 47 authenticationGroup = "authentication.k8s.io" 48 authorizationGroup = "authorization.k8s.io" 49 autoscalingGroup = "autoscaling" 50 batchGroup = "batch" 51 certificatesGroup = "certificates.k8s.io" 52 coordinationGroup = "coordination.k8s.io" 53 discoveryGroup = "discovery.k8s.io" 54 extensionsGroup = "extensions" 55 policyGroup = "policy" 56 rbacGroup = "rbac.authorization.k8s.io" 57 resourceGroup = "resource.k8s.io" 58 storageGroup = "storage.k8s.io" 59 resMetricsGroup = "metrics.k8s.io" 60 customMetricsGroup = "custom.metrics.k8s.io" 61 externalMetricsGroup = "external.metrics.k8s.io" 62 networkingGroup = "networking.k8s.io" 63 eventsGroup = "events.k8s.io" 64 internalAPIServerGroup = "internal.apiserver.k8s.io" 65 admissionRegistrationGroup = "admissionregistration.k8s.io" 66 storageVersionMigrationGroup = "storagemigration.k8s.io" 67 ) 68 69 func addDefaultMetadata(obj runtime.Object) { 70 metadata, err := meta.Accessor(obj) 71 if err != nil { 72 // if this happens, then some static code is broken 73 panic(err) 74 } 75 76 labels := metadata.GetLabels() 77 if labels == nil { 78 labels = map[string]string{} 79 } 80 for k, v := range Label { 81 labels[k] = v 82 } 83 metadata.SetLabels(labels) 84 85 annotations := metadata.GetAnnotations() 86 if annotations == nil { 87 annotations = map[string]string{} 88 } 89 for k, v := range Annotation { 90 annotations[k] = v 91 } 92 metadata.SetAnnotations(annotations) 93 } 94 95 func addClusterRoleLabel(roles []rbacv1.ClusterRole) { 96 for i := range roles { 97 addDefaultMetadata(&roles[i]) 98 } 99 return 100 } 101 102 func addClusterRoleBindingLabel(rolebindings []rbacv1.ClusterRoleBinding) { 103 for i := range rolebindings { 104 addDefaultMetadata(&rolebindings[i]) 105 } 106 return 107 } 108 109 // NodeRules returns node policy rules, it is slice of rbacv1.PolicyRule. 110 func NodeRules() []rbacv1.PolicyRule { 111 nodePolicyRules := []rbacv1.PolicyRule{ 112 // Needed to check API access. These creates are non-mutating 113 rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), 114 rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(), 115 116 // Needed to build serviceLister, to populate env vars for services 117 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services").RuleOrDie(), 118 119 // Nodes can register Node API objects and report status. 120 // Use the NodeRestriction admission plugin to limit a node to creating/updating its own API object. 121 rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), 122 rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), 123 rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), 124 125 // TODO: restrict to the bound node as creator in the NodeRestrictions admission plugin 126 rbacv1helpers.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(), 127 128 // TODO: restrict to pods scheduled on the bound node once field selectors are supported by list/watch authorization 129 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods").RuleOrDie(), 130 131 // Needed for the node to create/delete mirror pods. 132 // Use the NodeRestriction admission plugin to limit a node to creating/deleting mirror pods bound to itself. 133 rbacv1helpers.NewRule("create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), 134 // Needed for the node to report status of pods it is running. 135 // Use the NodeRestriction admission plugin to limit a node to updating status of pods bound to itself. 136 rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), 137 // Needed for the node to create pod evictions. 138 // Use the NodeRestriction admission plugin to limit a node to creating evictions for pods bound to itself. 139 rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(), 140 141 // Needed for imagepullsecrets, rbd/ceph and secret volumes, and secrets in envs 142 // Needed for configmap volume and envs 143 // Use the Node authorization mode to limit a node to get secrets/configmaps referenced by pods bound to itself. 144 rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets", "configmaps").RuleOrDie(), 145 // Needed for persistent volumes 146 // Use the Node authorization mode to limit a node to get pv/pvc objects referenced by pods bound to itself. 147 rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(), 148 149 // TODO: add to the Node authorizer and restrict to endpoints referenced by pods or PVs bound to the node 150 // Needed for glusterfs volumes 151 rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("endpoints").RuleOrDie(), 152 // Used to create a certificatesigningrequest for a node-specific client certificate, and watch 153 // for it to be signed. This allows the kubelet to rotate it's own certificate. 154 rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(), 155 156 // Leases 157 rbacv1helpers.NewRule("get", "create", "update", "patch", "delete").Groups("coordination.k8s.io").Resources("leases").RuleOrDie(), 158 159 // CSI 160 rbacv1helpers.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie(), 161 162 // Use the Node authorization to limit a node to create tokens for service accounts running on that node 163 // Use the NodeRestriction admission plugin to limit a node to create tokens bound to pods on that node 164 rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie(), 165 } 166 167 // Use the Node authorization mode to limit a node to update status of pvc objects referenced by pods bound to itself. 168 // Use the NodeRestriction admission plugin to limit a node to just update the status stanza. 169 pvcStatusPolicyRule := rbacv1helpers.NewRule("get", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie() 170 nodePolicyRules = append(nodePolicyRules, pvcStatusPolicyRule) 171 172 // CSI 173 csiDriverRule := rbacv1helpers.NewRule("get", "watch", "list").Groups("storage.k8s.io").Resources("csidrivers").RuleOrDie() 174 nodePolicyRules = append(nodePolicyRules, csiDriverRule) 175 csiNodeInfoRule := rbacv1helpers.NewRule("get", "create", "update", "patch", "delete").Groups("storage.k8s.io").Resources("csinodes").RuleOrDie() 176 nodePolicyRules = append(nodePolicyRules, csiNodeInfoRule) 177 178 // RuntimeClass 179 nodePolicyRules = append(nodePolicyRules, rbacv1helpers.NewRule("get", "list", "watch").Groups("node.k8s.io").Resources("runtimeclasses").RuleOrDie()) 180 181 // DRA Resource Claims 182 if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { 183 nodePolicyRules = append(nodePolicyRules, rbacv1helpers.NewRule("get").Groups(resourceGroup).Resources("resourceclaims").RuleOrDie()) 184 } 185 // Kubelet needs access to ClusterTrustBundles to support the pemTrustAnchors volume type. 186 if utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundle) { 187 nodePolicyRules = append(nodePolicyRules, rbacv1helpers.NewRule("get", "list", "watch").Groups(certificatesGroup).Resources("clustertrustbundles").RuleOrDie()) 188 } 189 190 return nodePolicyRules 191 } 192 193 // ClusterRoles returns the cluster roles to bootstrap an API server with 194 func ClusterRoles() []rbacv1.ClusterRole { 195 roles := []rbacv1.ClusterRole{ 196 { 197 // a "root" role which can do absolutely anything 198 ObjectMeta: metav1.ObjectMeta{Name: "cluster-admin"}, 199 Rules: []rbacv1.PolicyRule{ 200 rbacv1helpers.NewRule("*").Groups("*").Resources("*").RuleOrDie(), 201 rbacv1helpers.NewRule("*").URLs("*").RuleOrDie(), 202 }, 203 }, 204 { 205 // a role which provides just enough power to determine if the server is 206 // ready and discover API versions for negotiation 207 ObjectMeta: metav1.ObjectMeta{Name: "system:discovery"}, 208 Rules: []rbacv1.PolicyRule{ 209 rbacv1helpers.NewRule("get").URLs( 210 "/livez", "/readyz", "/healthz", 211 "/version", "/version/", 212 "/openapi", "/openapi/*", 213 "/api", "/api/*", 214 "/apis", "/apis/*", 215 ).RuleOrDie(), 216 }, 217 }, 218 { 219 // a role which provides minimal read access to the monitoring endpoints 220 // (i.e. /metrics, /livez/*, /readyz/*, /healthz/*, /livez, /readyz, /healthz) 221 // The splatted health check endpoints allow read access to individual health check 222 // endpoints which may contain more sensitive cluster information information 223 ObjectMeta: metav1.ObjectMeta{Name: "system:monitoring"}, 224 Rules: []rbacv1.PolicyRule{ 225 rbacv1helpers.NewRule("get").URLs( 226 "/metrics", "/metrics/slis", 227 "/livez", "/readyz", "/healthz", 228 "/livez/*", "/readyz/*", "/healthz/*", 229 ).RuleOrDie(), 230 }, 231 }, 232 } 233 234 basicUserRules := []rbacv1.PolicyRule{ 235 rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("selfsubjectaccessreviews", "selfsubjectrulesreviews").RuleOrDie(), 236 rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("selfsubjectreviews").RuleOrDie(), 237 } 238 239 roles = append(roles, []rbacv1.ClusterRole{ 240 { 241 // a role which provides minimal resource access to allow a "normal" user to learn information about themselves 242 ObjectMeta: metav1.ObjectMeta{Name: "system:basic-user"}, 243 Rules: basicUserRules, 244 }, 245 { 246 // a role which provides just enough power read insensitive cluster information 247 ObjectMeta: metav1.ObjectMeta{Name: "system:public-info-viewer"}, 248 Rules: []rbacv1.PolicyRule{ 249 rbacv1helpers.NewRule("get").URLs( 250 "/livez", "/readyz", "/healthz", "/version", "/version/", 251 ).RuleOrDie(), 252 }, 253 }, 254 { 255 // a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users. 256 ObjectMeta: metav1.ObjectMeta{Name: "admin"}, 257 AggregationRule: &rbacv1.AggregationRule{ 258 ClusterRoleSelectors: []metav1.LabelSelector{ 259 {MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}}, 260 }, 261 }, 262 }, 263 { 264 // a role for a namespace level editor. It grants access to all user level actions in a namespace. 265 // It does not grant powers for "privileged" resources which are domain of the system: `/status` 266 // subresources or `quota`/`limits` which are used to control namespaces 267 ObjectMeta: metav1.ObjectMeta{Name: "edit", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}}, 268 AggregationRule: &rbacv1.AggregationRule{ 269 ClusterRoleSelectors: []metav1.LabelSelector{ 270 {MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}}, 271 }, 272 }, 273 }, 274 { 275 // a role for namespace level viewing. It grants Read-only access to non-escalating resources in 276 // a namespace. 277 ObjectMeta: metav1.ObjectMeta{Name: "view", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}}, 278 AggregationRule: &rbacv1.AggregationRule{ 279 ClusterRoleSelectors: []metav1.LabelSelector{ 280 {MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}}, 281 }, 282 }, 283 }, 284 { 285 // a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users. 286 ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-admin", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}}, 287 Rules: []rbacv1.PolicyRule{ 288 // additional admin powers 289 rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("localsubjectaccessreviews").RuleOrDie(), 290 rbacv1helpers.NewRule(ReadWrite...).Groups(rbacGroup).Resources("roles", "rolebindings").RuleOrDie(), 291 }, 292 }, 293 { 294 // a role for a namespace level editor. It grants access to all user level actions in a namespace. 295 // It does not grant powers for "privileged" resources which are domain of the system: `/status` 296 // subresources or `quota`/`limits` which are used to control namespaces 297 ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-edit", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}}, 298 Rules: []rbacv1.PolicyRule{ 299 // Allow read on escalating resources 300 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods/attach", "pods/proxy", "pods/exec", "pods/portforward", "secrets", "services/proxy").RuleOrDie(), 301 rbacv1helpers.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(), 302 303 rbacv1helpers.NewRule(Write...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(), 304 rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(), 305 rbacv1helpers.NewRule(Write...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", 306 "services", "services/proxy", "persistentvolumeclaims", "configmaps", "secrets", "events").RuleOrDie(), 307 rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie(), 308 309 rbacv1helpers.NewRule(Write...).Groups(appsGroup).Resources( 310 "statefulsets", "statefulsets/scale", 311 "daemonsets", 312 "deployments", "deployments/scale", "deployments/rollback", 313 "replicasets", "replicasets/scale").RuleOrDie(), 314 315 rbacv1helpers.NewRule(Write...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(), 316 317 rbacv1helpers.NewRule(Write...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(), 318 319 rbacv1helpers.NewRule(Write...).Groups(extensionsGroup).Resources("daemonsets", 320 "deployments", "deployments/scale", "deployments/rollback", "ingresses", 321 "replicasets", "replicasets/scale", "replicationcontrollers/scale", 322 "networkpolicies").RuleOrDie(), 323 324 rbacv1helpers.NewRule(Write...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), 325 326 rbacv1helpers.NewRule(Write...).Groups(networkingGroup).Resources("networkpolicies", "ingresses").RuleOrDie(), 327 328 rbacv1helpers.NewRule(ReadWrite...).Groups(coordinationGroup).Resources("leases").RuleOrDie(), 329 }, 330 }, 331 { 332 // a role for namespace level viewing. It grants Read-only access to non-escalating resources in 333 // a namespace. 334 ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-view", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}}, 335 Rules: []rbacv1.PolicyRule{ 336 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts", 337 "services", "services/status", "endpoints", "persistentvolumeclaims", "persistentvolumeclaims/status", "configmaps").RuleOrDie(), 338 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events", 339 "pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(), 340 // read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an 341 // indicator of which namespaces you have access to. 342 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), 343 344 rbacv1helpers.NewRule(Read...).Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(), 345 346 rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources( 347 "controllerrevisions", 348 "statefulsets", "statefulsets/status", "statefulsets/scale", 349 "daemonsets", "daemonsets/status", 350 "deployments", "deployments/status", "deployments/scale", 351 "replicasets", "replicasets/status", "replicasets/scale").RuleOrDie(), 352 353 rbacv1helpers.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers", "horizontalpodautoscalers/status").RuleOrDie(), 354 355 rbacv1helpers.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs", "cronjobs/status", "jobs/status").RuleOrDie(), 356 357 rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "daemonsets/status", "deployments", "deployments/scale", "deployments/status", 358 "ingresses", "ingresses/status", "replicasets", "replicasets/scale", "replicasets/status", "replicationcontrollers/scale", 359 "networkpolicies").RuleOrDie(), 360 361 rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets", "poddisruptionbudgets/status").RuleOrDie(), 362 363 rbacv1helpers.NewRule(Read...).Groups(networkingGroup).Resources("networkpolicies", "ingresses", "ingresses/status").RuleOrDie(), 364 }, 365 }, 366 { 367 // a role to use for heapster's connections back to the API server 368 ObjectMeta: metav1.ObjectMeta{Name: "system:heapster"}, 369 Rules: []rbacv1.PolicyRule{ 370 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("events", "pods", "nodes", "namespaces").RuleOrDie(), 371 rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("deployments").RuleOrDie(), 372 }, 373 }, 374 { 375 // a role for nodes to use to have the access they need for running pods 376 ObjectMeta: metav1.ObjectMeta{Name: systemNodeRoleName}, 377 Rules: NodeRules(), 378 }, 379 { 380 // a role to use for node-problem-detector access. It does not get bound to default location since 381 // deployment locations can reasonably vary. 382 ObjectMeta: metav1.ObjectMeta{Name: "system:node-problem-detector"}, 383 Rules: []rbacv1.PolicyRule{ 384 rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(), 385 rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(), 386 eventsRule(), 387 }, 388 }, 389 { 390 // a role to use for full access to the kubelet API 391 ObjectMeta: metav1.ObjectMeta{Name: "system:kubelet-api-admin"}, 392 Rules: []rbacv1.PolicyRule{ 393 // Allow read-only access to the Node API objects 394 rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), 395 // Allow all API calls to the nodes 396 rbacv1helpers.NewRule("proxy").Groups(legacyGroup).Resources("nodes").RuleOrDie(), 397 rbacv1helpers.NewRule("*").Groups(legacyGroup).Resources("nodes/proxy", "nodes/metrics", "nodes/stats", "nodes/log").RuleOrDie(), 398 }, 399 }, 400 { 401 // a role to use for bootstrapping a node's client certificates 402 ObjectMeta: metav1.ObjectMeta{Name: "system:node-bootstrapper"}, 403 Rules: []rbacv1.PolicyRule{ 404 // used to create a certificatesigningrequest for a node-specific client certificate, and watch for it to be signed 405 rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(), 406 }, 407 }, 408 { 409 // a role to use for allowing authentication and authorization delegation 410 ObjectMeta: metav1.ObjectMeta{Name: "system:auth-delegator"}, 411 Rules: []rbacv1.PolicyRule{ 412 // These creates are non-mutating 413 rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), 414 rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(), 415 }, 416 }, 417 { 418 // a role to use for the API registry, summarization, and proxy handling 419 ObjectMeta: metav1.ObjectMeta{Name: "system:kube-aggregator"}, 420 Rules: []rbacv1.PolicyRule{ 421 // it needs to see all services so that it knows whether the ones it points to exist or not 422 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(), 423 }, 424 }, 425 { 426 // a role to use for bootstrapping the kube-controller-manager so it can create the shared informers 427 // service accounts, and secrets that we need to create separate identities for other controllers 428 ObjectMeta: metav1.ObjectMeta{Name: "system:kube-controller-manager"}, 429 Rules: []rbacv1.PolicyRule{ 430 eventsRule(), 431 // Needed for leader election. 432 rbacv1helpers.NewRule("create").Groups(coordinationGroup).Resources("leases").RuleOrDie(), 433 rbacv1helpers.NewRule("get", "update").Groups(coordinationGroup).Resources("leases").Names("kube-controller-manager").RuleOrDie(), 434 // Fundamental resources. 435 rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("secrets", "serviceaccounts").RuleOrDie(), 436 rbacv1helpers.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(), 437 rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("namespaces", "secrets", "serviceaccounts", "configmaps").RuleOrDie(), 438 rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("secrets", "serviceaccounts").RuleOrDie(), 439 // Needed to check API access. These creates are non-mutating 440 rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), 441 rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(), 442 // Needed for all shared informers 443 rbacv1helpers.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(), 444 rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie(), 445 }, 446 }, 447 { 448 // a role to use for the kube-dns pod 449 ObjectMeta: metav1.ObjectMeta{Name: "system:kube-dns"}, 450 Rules: []rbacv1.PolicyRule{ 451 rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("endpoints", "services").RuleOrDie(), 452 }, 453 }, 454 { 455 // a role for an external/out-of-tree persistent volume provisioner 456 ObjectMeta: metav1.ObjectMeta{Name: "system:persistent-volume-provisioner"}, 457 Rules: []rbacv1.PolicyRule{ 458 rbacv1helpers.NewRule("get", "list", "watch", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), 459 // update is needed in addition to read access for setting lock annotations on PVCs 460 rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), 461 rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(), 462 463 // Needed for watching provisioning success and failure events 464 rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(), 465 466 eventsRule(), 467 }, 468 }, 469 { 470 // a role making the csrapprover controller approve a node client CSR 471 ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:nodeclient"}, 472 Rules: []rbacv1.PolicyRule{ 473 rbacv1helpers.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/nodeclient").RuleOrDie(), 474 }, 475 }, 476 { 477 // a role making the csrapprover controller approve a node client CSR requested by the node itself 478 ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient"}, 479 Rules: []rbacv1.PolicyRule{ 480 rbacv1helpers.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/selfnodeclient").RuleOrDie(), 481 }, 482 }, 483 { 484 ObjectMeta: metav1.ObjectMeta{Name: "system:volume-scheduler"}, 485 Rules: []rbacv1.PolicyRule{ 486 rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(), 487 rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(), 488 rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(), 489 }, 490 }, 491 { 492 ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:legacy-unknown-approver"}, 493 Rules: []rbacv1.PolicyRule{ 494 rbacv1helpers.NewRule("approve").Groups(certificatesGroup).Resources("signers").Names(capi.LegacyUnknownSignerName).RuleOrDie(), 495 }, 496 }, 497 { 498 ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:kubelet-serving-approver"}, 499 Rules: []rbacv1.PolicyRule{ 500 rbacv1helpers.NewRule("approve").Groups(certificatesGroup).Resources("signers").Names(capi.KubeletServingSignerName).RuleOrDie(), 501 }, 502 }, 503 { 504 ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:kube-apiserver-client-approver"}, 505 Rules: []rbacv1.PolicyRule{ 506 rbacv1helpers.NewRule("approve").Groups(certificatesGroup).Resources("signers").Names(capi.KubeAPIServerClientSignerName).RuleOrDie(), 507 }, 508 }, 509 { 510 ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:kube-apiserver-client-kubelet-approver"}, 511 Rules: []rbacv1.PolicyRule{ 512 rbacv1helpers.NewRule("approve").Groups(certificatesGroup).Resources("signers").Names(capi.KubeAPIServerClientKubeletSignerName).RuleOrDie(), 513 }, 514 }, 515 }...) 516 517 // Add the cluster role for reading the ServiceAccountIssuerDiscovery endpoints 518 // Also allow slash-ended URLs to allow clients generated from published openapi docs prior to fixing the trailing slash to work properly 519 roles = append(roles, rbacv1.ClusterRole{ 520 ObjectMeta: metav1.ObjectMeta{Name: "system:service-account-issuer-discovery"}, 521 Rules: []rbacv1.PolicyRule{ 522 rbacv1helpers.NewRule("get").URLs( 523 "/.well-known/openid-configuration", 524 "/.well-known/openid-configuration/", 525 "/openid/v1/jwks", 526 "/openid/v1/jwks/", 527 ).RuleOrDie(), 528 }, 529 }) 530 531 // node-proxier role is used by kube-proxy. 532 nodeProxierRules := []rbacv1.PolicyRule{ 533 rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(), 534 rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(), 535 536 eventsRule(), 537 } 538 if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) { 539 nodeProxierRules = append(nodeProxierRules, rbacv1helpers.NewRule("list", "watch").Groups(networkingGroup).Resources("servicecidrs").RuleOrDie()) 540 } 541 542 nodeProxierRules = append(nodeProxierRules, rbacv1helpers.NewRule("list", "watch").Groups(discoveryGroup).Resources("endpointslices").RuleOrDie()) 543 roles = append(roles, rbacv1.ClusterRole{ 544 ObjectMeta: metav1.ObjectMeta{Name: "system:node-proxier"}, 545 Rules: nodeProxierRules, 546 }) 547 548 kubeSchedulerRules := []rbacv1.PolicyRule{ 549 eventsRule(), 550 // This is for leaderlease access 551 // TODO: scope this to the kube-system namespace 552 rbacv1helpers.NewRule("create").Groups(coordinationGroup).Resources("leases").RuleOrDie(), 553 rbacv1helpers.NewRule("get", "update").Groups(coordinationGroup).Resources("leases").Names("kube-scheduler").RuleOrDie(), 554 555 // Fundamental resources 556 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(), 557 rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), 558 rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(), 559 rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), 560 // Things that select pods 561 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(), 562 rbacv1helpers.NewRule(Read...).Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(), 563 rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(), 564 // Things that pods use or applies to them 565 rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(), 566 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(), 567 // Needed to check API access. These creates are non-mutating 568 rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), 569 rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(), 570 // Needed for volume limits 571 rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("csinodes").RuleOrDie(), 572 // Needed for namespaceSelector feature in pod affinity 573 rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(), 574 rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("csidrivers").RuleOrDie(), 575 rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("csistoragecapacities").RuleOrDie(), 576 } 577 // Needed for dynamic resource allocation. 578 if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { 579 kubeSchedulerRules = append(kubeSchedulerRules, 580 rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceclasses").RuleOrDie(), 581 rbacv1helpers.NewRule(ReadUpdate...).Groups(resourceGroup).Resources("resourceclaims").RuleOrDie(), 582 rbacv1helpers.NewRule(ReadUpdate...).Groups(resourceGroup).Resources("resourceclaims/status").RuleOrDie(), 583 rbacv1helpers.NewRule(ReadWrite...).Groups(resourceGroup).Resources("podschedulingcontexts").RuleOrDie(), 584 rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("podschedulingcontexts/status").RuleOrDie(), 585 rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(), 586 rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceslices", "resourceclassparameters", "resourceclaimparameters").RuleOrDie(), 587 ) 588 } 589 roles = append(roles, rbacv1.ClusterRole{ 590 // a role to use for the kube-scheduler 591 ObjectMeta: metav1.ObjectMeta{Name: "system:kube-scheduler"}, 592 Rules: kubeSchedulerRules, 593 }) 594 595 // Default ClusterRole to allow reading ClusterTrustBundle objects 596 if utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundle) { 597 roles = append(roles, rbacv1.ClusterRole{ 598 ObjectMeta: metav1.ObjectMeta{Name: "system:cluster-trust-bundle-discovery"}, 599 Rules: []rbacv1.PolicyRule{ 600 rbacv1helpers.NewRule(Read...).Groups(certificatesGroup).Resources("clustertrustbundles").RuleOrDie(), 601 }, 602 }) 603 } 604 605 addClusterRoleLabel(roles) 606 return roles 607 } 608 609 const systemNodeRoleName = "system:node" 610 611 // ClusterRoleBindings return default rolebindings to the default roles 612 func ClusterRoleBindings() []rbacv1.ClusterRoleBinding { 613 rolebindings := []rbacv1.ClusterRoleBinding{ 614 rbacv1helpers.NewClusterBinding("cluster-admin").Groups(user.SystemPrivilegedGroup).BindingOrDie(), 615 rbacv1helpers.NewClusterBinding("system:monitoring").Groups(user.MonitoringGroup).BindingOrDie(), 616 rbacv1helpers.NewClusterBinding("system:discovery").Groups(user.AllAuthenticated).BindingOrDie(), 617 rbacv1helpers.NewClusterBinding("system:basic-user").Groups(user.AllAuthenticated).BindingOrDie(), 618 rbacv1helpers.NewClusterBinding("system:public-info-viewer").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(), 619 rbacv1helpers.NewClusterBinding("system:node-proxier").Users(user.KubeProxy).BindingOrDie(), 620 rbacv1helpers.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(), 621 rbacv1helpers.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(), 622 rbacv1helpers.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(), 623 rbacv1helpers.NewClusterBinding("system:volume-scheduler").Users(user.KubeScheduler).BindingOrDie(), 624 625 // This default binding of the system:node role to the system:nodes group is deprecated in 1.7 with the availability of the Node authorizer. 626 // This leaves the binding, but with an empty set of subjects, so that tightening reconciliation can remove the subject. 627 { 628 ObjectMeta: metav1.ObjectMeta{Name: systemNodeRoleName}, 629 RoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: systemNodeRoleName}, 630 }, 631 } 632 633 // Allow all in-cluster workloads (via their service accounts) to read the OIDC discovery endpoints. 634 // Users with certain forms of write access (create pods, create secrets, create service accounts, etc) 635 // can gain access to a service account identity which would allow them to access this information. 636 // This includes the issuer URL, which is already present in the SA token JWT. Similarly, SAs can 637 // already gain this same info via introspection of their own token. Since this discovery endpoint 638 // points to what issued all service account tokens, it seems fitting for SAs to have this access. 639 // Defer to the cluster admin with regard to binding directly to all authenticated and/or 640 // unauthenticated users. 641 rolebindings = append(rolebindings, 642 rbacv1helpers.NewClusterBinding("system:service-account-issuer-discovery").Groups(serviceaccount.AllServiceAccountsGroup).BindingOrDie(), 643 ) 644 645 // Service accounts can read ClusterTrustBundle objects. 646 if utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundle) { 647 rolebindings = append(rolebindings, rbacv1helpers.NewClusterBinding("system:cluster-trust-bundle-discovery").Groups(serviceaccount.AllServiceAccountsGroup).BindingOrDie()) 648 } 649 650 addClusterRoleBindingLabel(rolebindings) 651 652 return rolebindings 653 } 654 655 // ClusterRolesToAggregate maps from previous clusterrole name to the new clusterrole name 656 func ClusterRolesToAggregate() map[string]string { 657 return map[string]string{ 658 "admin": "system:aggregate-to-admin", 659 "edit": "system:aggregate-to-edit", 660 "view": "system:aggregate-to-view", 661 } 662 } 663 664 // ClusterRoleBindingsToSplit returns a map of Names of source ClusterRoleBindings 665 // to copy Subjects, Annotations, and Labels to destination ClusterRoleBinding templates. 666 func ClusterRoleBindingsToSplit() map[string]rbacv1.ClusterRoleBinding { 667 bindingsToSplit := map[string]rbacv1.ClusterRoleBinding{} 668 for _, defaultClusterRoleBinding := range ClusterRoleBindings() { 669 switch defaultClusterRoleBinding.Name { 670 case "system:public-info-viewer": 671 bindingsToSplit["system:discovery"] = defaultClusterRoleBinding 672 } 673 } 674 return bindingsToSplit 675 }