k8s.io/kubernetes@v1.29.3/test/integration/apiserver/apply/reset_fields_test.go (about) 1 /* 2 Copyright 2020 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package apiserver 18 19 import ( 20 "context" 21 "encoding/json" 22 "fmt" 23 "reflect" 24 "strings" 25 "testing" 26 27 v1 "k8s.io/api/core/v1" 28 apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" 29 "k8s.io/apimachinery/pkg/api/meta" 30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 31 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 32 "k8s.io/apimachinery/pkg/runtime/schema" 33 "k8s.io/client-go/dynamic" 34 "k8s.io/client-go/kubernetes" 35 apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" 36 37 "k8s.io/kubernetes/test/integration/etcd" 38 "k8s.io/kubernetes/test/integration/framework" 39 "k8s.io/kubernetes/test/utils/image" 40 ) 41 42 // namespace used for all tests, do not change this 43 const resetFieldsNamespace = "reset-fields-namespace" 44 45 // resetFieldsStatusData contains statuses for all the resources in the 46 // statusData list with slightly different data to create a field manager 47 // conflict. 48 var resetFieldsStatusData = map[schema.GroupVersionResource]string{ 49 gvr("", "v1", "persistentvolumes"): `{"status": {"message": "hello2"}}`, 50 gvr("", "v1", "resourcequotas"): `{"status": {"used": {"cpu": "25M"}}}`, 51 gvr("", "v1", "services"): `{"status": {"loadBalancer": {"ingress": [{"ip": "127.0.0.2"}]}}}`, 52 gvr("extensions", "v1beta1", "ingresses"): `{"status": {"loadBalancer": {"ingress": [{"ip": "127.0.0.2"}]}}}`, 53 gvr("networking.k8s.io", "v1beta1", "ingresses"): `{"status": {"loadBalancer": {"ingress": [{"ip": "127.0.0.2"}]}}}`, 54 gvr("networking.k8s.io", "v1", "ingresses"): `{"status": {"loadBalancer": {"ingress": [{"ip": "127.0.0.2"}]}}}`, 55 gvr("autoscaling", "v1", "horizontalpodautoscalers"): `{"status": {"currentReplicas": 25}}`, 56 gvr("autoscaling", "v2", "horizontalpodautoscalers"): `{"status": {"currentReplicas": 25}}`, 57 gvr("batch", "v1", "cronjobs"): `{"status": {"lastScheduleTime": "2020-01-01T00:00:00Z"}}`, 58 gvr("batch", "v1beta1", "cronjobs"): `{"status": {"lastScheduleTime": "2020-01-01T00:00:00Z"}}`, 59 gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": false}}`, 60 gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`, 61 gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`, 62 gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulingcontexts. 63 gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`, 64 gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`, 65 // standard for []metav1.Condition 66 gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`, 67 gvr("admissionregistration.k8s.io", "v1beta1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`, 68 gvr("networking.k8s.io", "v1alpha1", "servicecidrs"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`, 69 } 70 71 // resetFieldsStatusDefault conflicts with statusDefault 72 const resetFieldsStatusDefault = `{"status": {"conditions": [{"type": "MyStatus", "status":"False"}]}}` 73 74 var resetFieldsSkippedResources = map[string]struct{}{} 75 76 // noConflicts is the set of reources for which 77 // a conflict cannot occur. 78 var noConflicts = map[string]struct{}{ 79 // both spec and status get wiped for CSRs, 80 // nothing is expected to be managed for it, skip it 81 "certificatesigningrequests": {}, 82 // storageVersions are skipped because their spec is empty 83 // and thus they can never have a conflict. 84 "storageversions": {}, 85 // servicecidrs are skipped because their spec is inmutable 86 // and thus they can never have a conflict. 87 "servicecidrs": {}, 88 // namespaces only have a spec.finalizers field which is also skipped, 89 // thus it will never have a conflict. 90 "namespaces": {}, 91 // podschedulingcontexts.status only has a list which contains items with a list, 92 // therefore apply works because it simply merges either the outer or 93 // the inner list. 94 "podschedulingcontexts": {}, 95 } 96 97 var image2 = image.GetE2EImage(image.Etcd) 98 99 // resetFieldsSpecData contains conflicting data with the objects in 100 // etcd.GetEtcdStorageDataForNamespace() 101 // It contains the minimal changes needed to conflict with all the fields 102 // added to resetFields by the strategy of each resource. 103 // In most cases, just one field on the spec is changed, but 104 // some also wipe metadata or other fields. 105 var resetFieldsSpecData = map[schema.GroupVersionResource]string{ 106 gvr("", "v1", "resourcequotas"): `{"spec": {"hard": {"cpu": "25M"}}}`, 107 gvr("", "v1", "namespaces"): `{"spec": {"finalizers": ["kubernetes2"]}}`, 108 gvr("", "v1", "nodes"): `{"spec": {"unschedulable": false}}`, 109 gvr("", "v1", "persistentvolumes"): `{"spec": {"capacity": {"storage": "23M"}}}`, 110 gvr("", "v1", "persistentvolumeclaims"): `{"spec": {"resources": {"limits": {"storage": "21M"}}}}`, 111 gvr("", "v1", "pods"): `{"metadata": {"deletionTimestamp": "2020-01-01T00:00:00Z", "ownerReferences":[]}, "spec": {"containers": [{"image": "` + image2 + `", "name": "container7"}]}}`, 112 gvr("", "v1", "replicationcontrollers"): `{"spec": {"selector": {"new": "stuff2"}}}`, 113 gvr("", "v1", "resourcequotas"): `{"spec": {"hard": {"cpu": "25M"}}}`, 114 gvr("", "v1", "services"): `{"spec": {"type": "ClusterIP"}}`, 115 gvr("apps", "v1", "daemonsets"): `{"spec": {"template": {"spec": {"containers": [{"image": "` + image2 + `", "name": "container6"}]}}}}`, 116 gvr("apps", "v1", "deployments"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"template": {"spec": {"containers": [{"image": "` + image2 + `", "name": "container6"}]}}}}`, 117 gvr("apps", "v1", "replicasets"): `{"spec": {"template": {"spec": {"containers": [{"image": "` + image2 + `", "name": "container4"}]}}}}`, 118 gvr("apps", "v1", "statefulsets"): `{"spec": {"selector": {"matchLabels": {"a2": "b2"}}}}`, 119 gvr("autoscaling", "v1", "horizontalpodautoscalers"): `{"spec": {"maxReplicas": 23}}`, 120 gvr("autoscaling", "v2", "horizontalpodautoscalers"): `{"spec": {"maxReplicas": 23}}`, 121 gvr("autoscaling", "v2beta1", "horizontalpodautoscalers"): `{"spec": {"maxReplicas": 23}}`, 122 gvr("autoscaling", "v2beta2", "horizontalpodautoscalers"): `{"spec": {"maxReplicas": 23}}`, 123 gvr("batch", "v1", "jobs"): `{"spec": {"template": {"spec": {"containers": [{"image": "` + image2 + `", "name": "container1"}]}}}}`, 124 gvr("batch", "v1", "cronjobs"): `{"spec": {"jobTemplate": {"spec": {"template": {"spec": {"containers": [{"image": "` + image2 + `", "name": "container0"}]}}}}}}`, 125 gvr("batch", "v1beta1", "cronjobs"): `{"spec": {"jobTemplate": {"spec": {"template": {"spec": {"containers": [{"image": "` + image2 + `", "name": "container0"}]}}}}}}`, 126 gvr("certificates.k8s.io", "v1", "certificatesigningrequests"): `{}`, 127 gvr("certificates.k8s.io", "v1beta1", "certificatesigningrequests"): `{}`, 128 gvr("flowcontrol.apiserver.k8s.io", "v1alpha1", "flowschemas"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"priorityLevelConfiguration": {"name": "name2"}}}`, 129 gvr("flowcontrol.apiserver.k8s.io", "v1beta1", "flowschemas"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"priorityLevelConfiguration": {"name": "name2"}}}`, 130 gvr("flowcontrol.apiserver.k8s.io", "v1beta2", "flowschemas"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"priorityLevelConfiguration": {"name": "name2"}}}`, 131 gvr("flowcontrol.apiserver.k8s.io", "v1beta3", "flowschemas"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"priorityLevelConfiguration": {"name": "name2"}}}`, 132 gvr("flowcontrol.apiserver.k8s.io", "v1", "flowschemas"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"priorityLevelConfiguration": {"name": "name2"}}}`, 133 gvr("flowcontrol.apiserver.k8s.io", "v1alpha1", "prioritylevelconfigurations"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"limited": {"assuredConcurrencyShares": 23}}}`, 134 gvr("flowcontrol.apiserver.k8s.io", "v1beta1", "prioritylevelconfigurations"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"limited": {"assuredConcurrencyShares": 23}}}`, 135 gvr("flowcontrol.apiserver.k8s.io", "v1beta2", "prioritylevelconfigurations"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"limited": {"assuredConcurrencyShares": 23}}}`, 136 gvr("flowcontrol.apiserver.k8s.io", "v1beta3", "prioritylevelconfigurations"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"limited": {"nominalConcurrencyShares": 23}}}`, 137 gvr("flowcontrol.apiserver.k8s.io", "v1", "prioritylevelconfigurations"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"limited": {"nominalConcurrencyShares": 23}}}`, 138 gvr("extensions", "v1beta1", "ingresses"): `{"spec": {"backend": {"serviceName": "service2"}}}`, 139 gvr("networking.k8s.io", "v1beta1", "ingresses"): `{"spec": {"backend": {"serviceName": "service2"}}}`, 140 gvr("networking.k8s.io", "v1", "ingresses"): `{"spec": {"defaultBackend": {"service": {"name": "service2"}}}}`, 141 gvr("networking.k8s.io", "v1alpha1", "servicecidrs"): `{}`, 142 gvr("policy", "v1", "poddisruptionbudgets"): `{"spec": {"selector": {"matchLabels": {"anokkey2": "anokvalue"}}}}`, 143 gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"spec": {"selector": {"matchLabels": {"anokkey2": "anokvalue"}}}}`, 144 gvr("storage.k8s.io", "v1alpha1", "volumeattachments"): `{"metadata": {"name": "va3"}, "spec": {"nodeName": "localhost2"}}`, 145 gvr("storage.k8s.io", "v1", "volumeattachments"): `{"metadata": {"name": "va3"}, "spec": {"nodeName": "localhost2"}}`, 146 gvr("apiextensions.k8s.io", "v1", "customresourcedefinitions"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"group": "webconsole22.operator.openshift.io"}}`, 147 gvr("apiextensions.k8s.io", "v1beta1", "customresourcedefinitions"): `{"metadata": {"labels":{"a":"c"}}, "spec": {"group": "webconsole22.operator.openshift.io"}}`, 148 gvr("awesome.bears.com", "v1", "pandas"): `{"spec": {"replicas": 102}}`, 149 gvr("awesome.bears.com", "v3", "pandas"): `{"spec": {"replicas": 302}}`, 150 gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`, 151 gvr("apiregistration.k8s.io", "v1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`, 152 gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"spec": {"selectedNode": "node2name"}}`, 153 gvr("resource.k8s.io", "v1alpha2", "resourceclasses"): `{"driverName": "other.example.com"}`, 154 gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test. 155 gvr("resource.k8s.io", "v1alpha2", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`, 156 gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{}`, 157 gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"paramKind": {"apiVersion": "apps/v1", "kind": "Deployment"}}}`, 158 gvr("admissionregistration.k8s.io", "v1beta1", "validatingadmissionpolicies"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"paramKind": {"apiVersion": "apps/v1", "kind": "Deployment"}}}`, 159 } 160 161 // TestResetFields makes sure that fieldManager does not own fields reset by the storage strategy. 162 // It takes 2 objects obj1 and obj2 that differ by one field in the spec and one field in the status. 163 // It applies obj1 to the spec endpoint and obj2 to the status endpoint, the lack of conflicts 164 // confirms that the fieldmanager1 is wiped of the status and fieldmanager2 is wiped of the spec. 165 // We then attempt to apply obj2 to the spec endpoint which fails with an expected conflict. 166 func TestApplyResetFields(t *testing.T) { 167 server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), []string{"--disable-admission-plugins", "ServiceAccount,TaintNodesByCondition"}, framework.SharedEtcd()) 168 if err != nil { 169 t.Fatal(err) 170 } 171 defer server.TearDownFn() 172 173 client, err := kubernetes.NewForConfig(server.ClientConfig) 174 if err != nil { 175 t.Fatal(err) 176 } 177 dynamicClient, err := dynamic.NewForConfig(server.ClientConfig) 178 if err != nil { 179 t.Fatal(err) 180 } 181 182 // create CRDs so we can make sure that custom resources do not get lost 183 etcd.CreateTestCRDs(t, apiextensionsclientset.NewForConfigOrDie(server.ClientConfig), false, etcd.GetCustomResourceDefinitionData()...) 184 185 if _, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: resetFieldsNamespace}}, metav1.CreateOptions{}); err != nil { 186 t.Fatal(err) 187 } 188 189 createData := etcd.GetEtcdStorageDataForNamespace(resetFieldsNamespace) 190 // gather resources to test 191 _, resourceLists, err := client.Discovery().ServerGroupsAndResources() 192 if err != nil { 193 t.Fatalf("Failed to get ServerGroupsAndResources with error: %+v", err) 194 } 195 196 for _, resourceList := range resourceLists { 197 for _, resource := range resourceList.APIResources { 198 if !strings.HasSuffix(resource.Name, "/status") { 199 continue 200 } 201 mapping, err := createMapping(resourceList.GroupVersion, resource) 202 if err != nil { 203 t.Fatal(err) 204 } 205 t.Run(mapping.Resource.String(), func(t *testing.T) { 206 if _, ok := resetFieldsSkippedResources[mapping.Resource.Resource]; ok { 207 t.Skip() 208 } 209 210 namespace := resetFieldsNamespace 211 if mapping.Scope == meta.RESTScopeRoot { 212 namespace = "" 213 } 214 215 // assemble first object 216 status, ok := statusData[mapping.Resource] 217 if !ok { 218 status = statusDefault 219 } 220 221 resource, ok := createData[mapping.Resource] 222 if !ok { 223 t.Fatalf("no test data for %s. Please add a test for your new type to etcd.GetEtcdStorageData() or getResetFieldsEtcdStorageData()", mapping.Resource) 224 } 225 226 obj1 := unstructured.Unstructured{} 227 if err := json.Unmarshal([]byte(resource.Stub), &obj1.Object); err != nil { 228 t.Fatal(err) 229 } 230 if err := json.Unmarshal([]byte(status), &obj1.Object); err != nil { 231 t.Fatal(err) 232 } 233 234 name := obj1.GetName() 235 obj1.SetAPIVersion(mapping.GroupVersionKind.GroupVersion().String()) 236 obj1.SetKind(mapping.GroupVersionKind.Kind) 237 obj1.SetName(name) 238 239 // apply the spec of the first object 240 _, err = dynamicClient. 241 Resource(mapping.Resource). 242 Namespace(namespace). 243 Apply(context.TODO(), name, &obj1, metav1.ApplyOptions{FieldManager: "fieldmanager1"}) 244 if err != nil { 245 t.Fatalf("Failed to apply obj1: %v", err) 246 } 247 248 // create second object 249 obj2 := &unstructured.Unstructured{} 250 obj1.DeepCopyInto(obj2) 251 if err := json.Unmarshal([]byte(resetFieldsSpecData[mapping.Resource]), &obj2.Object); err != nil { 252 t.Fatal(err) 253 } 254 status2, ok := resetFieldsStatusData[mapping.Resource] 255 if !ok { 256 status2 = resetFieldsStatusDefault 257 } 258 if err := json.Unmarshal([]byte(status2), &obj2.Object); err != nil { 259 t.Fatal(err) 260 } 261 262 if reflect.DeepEqual(obj1, obj2) { 263 t.Fatalf("obj1 and obj2 should not be equal %v", obj2) 264 } 265 266 // apply the status of the second object 267 // this won't conflict if resetfields are set correctly 268 // and will conflict if they are not 269 _, err = dynamicClient. 270 Resource(mapping.Resource). 271 Namespace(namespace). 272 ApplyStatus(context.TODO(), name, obj2, metav1.ApplyOptions{FieldManager: "fieldmanager2"}) 273 if err != nil { 274 t.Fatalf("Failed to apply obj2: %v", err) 275 } 276 277 // skip checking for conflicts on resources 278 // that will never have conflicts 279 if _, ok = noConflicts[mapping.Resource.Resource]; !ok { 280 var objRet *unstructured.Unstructured 281 282 // reapply second object to the spec endpoint 283 // that should fail with a conflict 284 objRet, err = dynamicClient. 285 Resource(mapping.Resource). 286 Namespace(namespace). 287 Apply(context.TODO(), name, obj2, metav1.ApplyOptions{FieldManager: "fieldmanager2"}) 288 err = expectConflict(objRet, err, dynamicClient, mapping.Resource, namespace, name) 289 if err != nil { 290 t.Fatalf("Did not get expected conflict in spec of %s %s/%s: %v", mapping.Resource, namespace, name, err) 291 } 292 293 // reapply first object to the status endpoint 294 // that should fail with a conflict 295 objRet, err = dynamicClient. 296 Resource(mapping.Resource). 297 Namespace(namespace). 298 ApplyStatus(context.TODO(), name, &obj1, metav1.ApplyOptions{FieldManager: "fieldmanager1"}) 299 err = expectConflict(objRet, err, dynamicClient, mapping.Resource, namespace, name) 300 if err != nil { 301 t.Fatalf("Did not get expected conflict in status of %s %s/%s: %v", mapping.Resource, namespace, name, err) 302 } 303 } 304 305 // cleanup 306 rsc := dynamicClient.Resource(mapping.Resource).Namespace(namespace) 307 if err := rsc.Delete(context.TODO(), name, *metav1.NewDeleteOptions(0)); err != nil { 308 t.Fatalf("deleting final object failed: %v", err) 309 } 310 }) 311 } 312 } 313 } 314 315 func expectConflict(objRet *unstructured.Unstructured, err error, dynamicClient dynamic.Interface, resource schema.GroupVersionResource, namespace, name string) error { 316 if err != nil && strings.Contains(err.Error(), "conflict") { 317 return nil 318 } 319 which := "returned" 320 // something unexpected is going on here, let's not assume that objRet==nil if any only if err!=nil 321 if objRet == nil { 322 which = "subsequently fetched" 323 var err2 error 324 objRet, err2 = dynamicClient. 325 Resource(resource). 326 Namespace(namespace). 327 Get(context.TODO(), name, metav1.GetOptions{}) 328 if err2 != nil { 329 return fmt.Errorf("instead got error %w, and failed to Get object: %v", err, err2) 330 } 331 } 332 marshBytes, marshErr := json.Marshal(objRet) 333 var gotten string 334 if marshErr == nil { 335 gotten = string(marshBytes) 336 } else { 337 gotten = fmt.Sprintf("<failed to json.Marshall(%#+v): %v>", objRet, marshErr) 338 } 339 return fmt.Errorf("instead got error %w; %s object is %s", err, which, gotten) 340 }