k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/framework/autoscaling/autoscaling_utils.go (about) 1 /* 2 Copyright 2015 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package autoscaling 18 19 import ( 20 "context" 21 "fmt" 22 "strconv" 23 "sync" 24 "time" 25 26 autoscalingv1 "k8s.io/api/autoscaling/v1" 27 autoscalingv2 "k8s.io/api/autoscaling/v2" 28 v1 "k8s.io/api/core/v1" 29 apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 30 crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" 31 "k8s.io/apiextensions-apiserver/test/integration/fixtures" 32 "k8s.io/apimachinery/pkg/api/meta" 33 "k8s.io/apimachinery/pkg/api/resource" 34 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 35 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 36 "k8s.io/apimachinery/pkg/runtime/schema" 37 "k8s.io/apimachinery/pkg/util/intstr" 38 "k8s.io/client-go/dynamic" 39 clientset "k8s.io/client-go/kubernetes" 40 scaleclient "k8s.io/client-go/scale" 41 "k8s.io/kubernetes/test/e2e/framework" 42 e2edebug "k8s.io/kubernetes/test/e2e/framework/debug" 43 e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" 44 e2erc "k8s.io/kubernetes/test/e2e/framework/rc" 45 e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" 46 e2eservice "k8s.io/kubernetes/test/e2e/framework/service" 47 testutils "k8s.io/kubernetes/test/utils" 48 utilpointer "k8s.io/utils/pointer" 49 50 "github.com/onsi/ginkgo/v2" 51 "github.com/onsi/gomega" 52 53 imageutils "k8s.io/kubernetes/test/utils/image" 54 ) 55 56 const ( 57 dynamicConsumptionTimeInSeconds = 30 58 dynamicRequestSizeInMillicores = 100 59 dynamicRequestSizeInMegabytes = 100 60 dynamicRequestSizeCustomMetric = 10 61 port = 80 62 targetPort = 8080 63 sidecarTargetPort = 8081 64 timeoutRC = 120 * time.Second 65 startServiceTimeout = time.Minute 66 startServiceInterval = 5 * time.Second 67 invalidKind = "ERROR: invalid workload kind for resource consumer" 68 customMetricName = "QPS" 69 serviceInitializationTimeout = 2 * time.Minute 70 serviceInitializationInterval = 15 * time.Second 71 megabytes = 1024 * 1024 72 crdVersion = "v1" 73 crdKind = "TestCRD" 74 crdGroup = "autoscalinge2e.example.com" 75 crdName = "testcrd" 76 crdNamePlural = "testcrds" 77 ) 78 79 var ( 80 // KindRC is the GVK for ReplicationController 81 KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"} 82 // KindDeployment is the GVK for Deployment 83 KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"} 84 // KindReplicaSet is the GVK for ReplicaSet 85 KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"} 86 // KindCRD is the GVK for CRD for test purposes 87 KindCRD = schema.GroupVersionKind{Group: crdGroup, Version: crdVersion, Kind: crdKind} 88 ) 89 90 // ScalingDirection identifies the scale direction for HPA Behavior. 91 type ScalingDirection int 92 93 const ( 94 DirectionUnknown ScalingDirection = iota 95 ScaleUpDirection 96 ScaleDownDirection 97 ) 98 99 /* 100 ResourceConsumer is a tool for testing. It helps to create a specified usage of CPU or memory. 101 Typical use case: 102 rc.ConsumeCPU(600) 103 // ... check your assumption here 104 rc.ConsumeCPU(300) 105 // ... check your assumption here 106 */ 107 type ResourceConsumer struct { 108 name string 109 controllerName string 110 kind schema.GroupVersionKind 111 nsName string 112 clientSet clientset.Interface 113 apiExtensionClient crdclientset.Interface 114 dynamicClient dynamic.Interface 115 resourceClient dynamic.ResourceInterface 116 scaleClient scaleclient.ScalesGetter 117 cpu chan int 118 mem chan int 119 customMetric chan int 120 stopCPU chan int 121 stopMem chan int 122 stopCustomMetric chan int 123 stopWaitGroup sync.WaitGroup 124 consumptionTimeInSeconds int 125 sleepTime time.Duration 126 requestSizeInMillicores int 127 requestSizeInMegabytes int 128 requestSizeCustomMetric int 129 sidecarStatus SidecarStatusType 130 sidecarType SidecarWorkloadType 131 } 132 133 // NewDynamicResourceConsumer is a wrapper to create a new dynamic ResourceConsumer 134 func NewDynamicResourceConsumer(ctx context.Context, name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, enableSidecar SidecarStatusType, sidecarType SidecarWorkloadType) *ResourceConsumer { 135 return newResourceConsumer(ctx, name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, 136 dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil, enableSidecar, sidecarType) 137 } 138 139 // getSidecarContainer returns sidecar container 140 func getSidecarContainer(name string, cpuLimit, memLimit int64) v1.Container { 141 container := v1.Container{ 142 Name: name + "-sidecar", 143 Image: imageutils.GetE2EImage(imageutils.ResourceConsumer), 144 Command: []string{"/consumer", "-port=8081"}, 145 Ports: []v1.ContainerPort{{ContainerPort: 80}}, 146 } 147 148 if cpuLimit > 0 || memLimit > 0 { 149 container.Resources.Limits = v1.ResourceList{} 150 container.Resources.Requests = v1.ResourceList{} 151 } 152 153 if cpuLimit > 0 { 154 container.Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(cpuLimit, resource.DecimalSI) 155 container.Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(cpuLimit, resource.DecimalSI) 156 } 157 158 if memLimit > 0 { 159 container.Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(memLimit*megabytes, resource.DecimalSI) 160 container.Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memLimit*megabytes, resource.DecimalSI) 161 } 162 163 return container 164 } 165 166 /* 167 NewResourceConsumer creates new ResourceConsumer 168 initCPUTotal argument is in millicores 169 initMemoryTotal argument is in megabytes 170 memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod 171 cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod 172 */ 173 func newResourceConsumer(ctx context.Context, name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, 174 requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string, sidecarStatus SidecarStatusType, sidecarType SidecarWorkloadType) *ResourceConsumer { 175 if podAnnotations == nil { 176 podAnnotations = make(map[string]string) 177 } 178 if serviceAnnotations == nil { 179 serviceAnnotations = make(map[string]string) 180 } 181 182 var additionalContainers []v1.Container 183 184 if sidecarStatus == Enable { 185 sidecarContainer := getSidecarContainer(name, cpuLimit, memLimit) 186 additionalContainers = append(additionalContainers, sidecarContainer) 187 } 188 189 config, err := framework.LoadConfig() 190 framework.ExpectNoError(err) 191 apiExtensionClient, err := crdclientset.NewForConfig(config) 192 framework.ExpectNoError(err) 193 dynamicClient, err := dynamic.NewForConfig(config) 194 framework.ExpectNoError(err) 195 resourceClient := dynamicClient.Resource(schema.GroupVersionResource{Group: crdGroup, Version: crdVersion, Resource: crdNamePlural}).Namespace(nsName) 196 197 runServiceAndWorkloadForResourceConsumer(ctx, clientset, resourceClient, apiExtensionClient, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations, additionalContainers) 198 controllerName := name + "-ctrl" 199 // If sidecar is enabled and busy, run service and consumer for sidecar 200 if sidecarStatus == Enable && sidecarType == Busy { 201 runServiceAndSidecarForResourceConsumer(ctx, clientset, nsName, name, kind, replicas, serviceAnnotations) 202 controllerName = name + "-sidecar-ctrl" 203 } 204 205 rc := &ResourceConsumer{ 206 name: name, 207 controllerName: controllerName, 208 kind: kind, 209 nsName: nsName, 210 clientSet: clientset, 211 apiExtensionClient: apiExtensionClient, 212 scaleClient: scaleClient, 213 resourceClient: resourceClient, 214 dynamicClient: dynamicClient, 215 cpu: make(chan int), 216 mem: make(chan int), 217 customMetric: make(chan int), 218 stopCPU: make(chan int), 219 stopMem: make(chan int), 220 stopCustomMetric: make(chan int), 221 consumptionTimeInSeconds: consumptionTimeInSeconds, 222 sleepTime: time.Duration(consumptionTimeInSeconds) * time.Second, 223 requestSizeInMillicores: requestSizeInMillicores, 224 requestSizeInMegabytes: requestSizeInMegabytes, 225 requestSizeCustomMetric: requestSizeCustomMetric, 226 sidecarType: sidecarType, 227 sidecarStatus: sidecarStatus, 228 } 229 230 go rc.makeConsumeCPURequests(ctx) 231 rc.ConsumeCPU(initCPUTotal) 232 go rc.makeConsumeMemRequests(ctx) 233 rc.ConsumeMem(initMemoryTotal) 234 go rc.makeConsumeCustomMetric(ctx) 235 rc.ConsumeCustomMetric(initCustomMetric) 236 return rc 237 } 238 239 // ConsumeCPU consumes given number of CPU 240 func (rc *ResourceConsumer) ConsumeCPU(millicores int) { 241 framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores) 242 rc.cpu <- millicores 243 } 244 245 // ConsumeMem consumes given number of Mem 246 func (rc *ResourceConsumer) ConsumeMem(megabytes int) { 247 framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes) 248 rc.mem <- megabytes 249 } 250 251 // ConsumeCustomMetric consumes given number of custom metric 252 func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) { 253 framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount) 254 rc.customMetric <- amount 255 } 256 257 func (rc *ResourceConsumer) makeConsumeCPURequests(ctx context.Context) { 258 defer ginkgo.GinkgoRecover() 259 rc.stopWaitGroup.Add(1) 260 defer rc.stopWaitGroup.Done() 261 tick := time.After(time.Duration(0)) 262 millicores := 0 263 for { 264 select { 265 case millicores = <-rc.cpu: 266 if millicores != 0 { 267 framework.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores) 268 } else { 269 framework.Logf("RC %s: disabling CPU consumption", rc.name) 270 } 271 case <-tick: 272 if millicores != 0 { 273 framework.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores) 274 rc.sendConsumeCPURequest(ctx, millicores) 275 } 276 tick = time.After(rc.sleepTime) 277 case <-ctx.Done(): 278 framework.Logf("RC %s: stopping CPU consumer: %v", rc.name, ctx.Err()) 279 return 280 case <-rc.stopCPU: 281 framework.Logf("RC %s: stopping CPU consumer", rc.name) 282 return 283 } 284 } 285 } 286 287 func (rc *ResourceConsumer) makeConsumeMemRequests(ctx context.Context) { 288 defer ginkgo.GinkgoRecover() 289 rc.stopWaitGroup.Add(1) 290 defer rc.stopWaitGroup.Done() 291 tick := time.After(time.Duration(0)) 292 megabytes := 0 293 for { 294 select { 295 case megabytes = <-rc.mem: 296 if megabytes != 0 { 297 framework.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes) 298 } else { 299 framework.Logf("RC %s: disabling mem consumption", rc.name) 300 } 301 case <-tick: 302 if megabytes != 0 { 303 framework.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes) 304 rc.sendConsumeMemRequest(ctx, megabytes) 305 } 306 tick = time.After(rc.sleepTime) 307 case <-ctx.Done(): 308 framework.Logf("RC %s: stopping mem consumer: %v", rc.name, ctx.Err()) 309 return 310 case <-rc.stopMem: 311 framework.Logf("RC %s: stopping mem consumer", rc.name) 312 return 313 } 314 } 315 } 316 317 func (rc *ResourceConsumer) makeConsumeCustomMetric(ctx context.Context) { 318 defer ginkgo.GinkgoRecover() 319 rc.stopWaitGroup.Add(1) 320 defer rc.stopWaitGroup.Done() 321 tick := time.After(time.Duration(0)) 322 delta := 0 323 for { 324 select { 325 case delta = <-rc.customMetric: 326 if delta != 0 { 327 framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta) 328 } else { 329 framework.Logf("RC %s: disabling consumption of custom metric %s", rc.name, customMetricName) 330 } 331 case <-tick: 332 if delta != 0 { 333 framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName) 334 rc.sendConsumeCustomMetric(ctx, delta) 335 } 336 tick = time.After(rc.sleepTime) 337 case <-ctx.Done(): 338 framework.Logf("RC %s: stopping metric consumer: %v", rc.name, ctx.Err()) 339 return 340 case <-rc.stopCustomMetric: 341 framework.Logf("RC %s: stopping metric consumer", rc.name) 342 return 343 } 344 } 345 } 346 347 func (rc *ResourceConsumer) sendConsumeCPURequest(ctx context.Context, millicores int) { 348 err := framework.Gomega().Eventually(ctx, func(ctx context.Context) error { 349 proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) 350 if err != nil { 351 return err 352 } 353 req := proxyRequest.Namespace(rc.nsName). 354 Name(rc.controllerName). 355 Suffix("ConsumeCPU"). 356 Param("millicores", strconv.Itoa(millicores)). 357 Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). 358 Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores)) 359 framework.Logf("ConsumeCPU URL: %v", *req.URL()) 360 _, err = req.DoRaw(ctx) 361 if err != nil { 362 framework.Logf("ConsumeCPU failure: %v", err) 363 return err 364 } 365 return nil 366 }).WithTimeout(serviceInitializationTimeout).WithPolling(serviceInitializationInterval).Should(gomega.Succeed()) 367 368 // Test has already finished (ctx got canceled), so don't fail on err from PollUntilContextTimeout 369 // which is a side-effect to context cancelling from the cleanup task. 370 if ctx.Err() != nil { 371 return 372 } 373 374 framework.ExpectNoError(err) 375 } 376 377 // sendConsumeMemRequest sends POST request for memory consumption 378 func (rc *ResourceConsumer) sendConsumeMemRequest(ctx context.Context, megabytes int) { 379 err := framework.Gomega().Eventually(ctx, func(ctx context.Context) error { 380 proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) 381 if err != nil { 382 return err 383 } 384 req := proxyRequest.Namespace(rc.nsName). 385 Name(rc.controllerName). 386 Suffix("ConsumeMem"). 387 Param("megabytes", strconv.Itoa(megabytes)). 388 Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). 389 Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes)) 390 framework.Logf("ConsumeMem URL: %v", *req.URL()) 391 _, err = req.DoRaw(ctx) 392 if err != nil { 393 framework.Logf("ConsumeMem failure: %v", err) 394 return err 395 } 396 return nil 397 }).WithTimeout(serviceInitializationTimeout).WithPolling(serviceInitializationInterval).Should(gomega.Succeed()) 398 399 // Test has already finished (ctx got canceled), so don't fail on err from PollUntilContextTimeout 400 // which is a side-effect to context cancelling from the cleanup task. 401 if ctx.Err() != nil { 402 return 403 } 404 405 framework.ExpectNoError(err) 406 } 407 408 // sendConsumeCustomMetric sends POST request for custom metric consumption 409 func (rc *ResourceConsumer) sendConsumeCustomMetric(ctx context.Context, delta int) { 410 err := framework.Gomega().Eventually(ctx, func(ctx context.Context) error { 411 proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) 412 if err != nil { 413 return err 414 } 415 req := proxyRequest.Namespace(rc.nsName). 416 Name(rc.controllerName). 417 Suffix("BumpMetric"). 418 Param("metric", customMetricName). 419 Param("delta", strconv.Itoa(delta)). 420 Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). 421 Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric)) 422 framework.Logf("ConsumeCustomMetric URL: %v", *req.URL()) 423 _, err = req.DoRaw(ctx) 424 if err != nil { 425 framework.Logf("ConsumeCustomMetric failure: %v", err) 426 return err 427 } 428 return nil 429 }).WithTimeout(serviceInitializationTimeout).WithPolling(serviceInitializationInterval).Should(gomega.Succeed()) 430 431 // Test has already finished (ctx got canceled), so don't fail on err from PollUntilContextTimeout 432 // which is a side-effect to context cancelling from the cleanup task. 433 if ctx.Err() != nil { 434 return 435 } 436 437 framework.ExpectNoError(err) 438 } 439 440 // GetReplicas get the replicas 441 func (rc *ResourceConsumer) GetReplicas(ctx context.Context) (int, error) { 442 switch rc.kind { 443 case KindRC: 444 replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(ctx, rc.name, metav1.GetOptions{}) 445 if err != nil { 446 return 0, err 447 } 448 return int(replicationController.Status.ReadyReplicas), nil 449 case KindDeployment: 450 deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(ctx, rc.name, metav1.GetOptions{}) 451 if err != nil { 452 return 0, err 453 } 454 return int(deployment.Status.ReadyReplicas), nil 455 case KindReplicaSet: 456 rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(ctx, rc.name, metav1.GetOptions{}) 457 if err != nil { 458 return 0, err 459 } 460 return int(rs.Status.ReadyReplicas), nil 461 case KindCRD: 462 deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(ctx, rc.name, metav1.GetOptions{}) 463 if err != nil { 464 return 0, err 465 } 466 deploymentReplicas := int64(deployment.Status.ReadyReplicas) 467 468 scale, err := rc.scaleClient.Scales(rc.nsName).Get(ctx, schema.GroupResource{Group: crdGroup, Resource: crdNamePlural}, rc.name, metav1.GetOptions{}) 469 if err != nil { 470 return 0, err 471 } 472 crdInstance, err := rc.resourceClient.Get(ctx, rc.name, metav1.GetOptions{}) 473 if err != nil { 474 return 0, err 475 } 476 // Update custom resource's status.replicas with child Deployment's current number of ready replicas. 477 err = unstructured.SetNestedField(crdInstance.Object, deploymentReplicas, "status", "replicas") 478 if err != nil { 479 return 0, err 480 } 481 _, err = rc.resourceClient.Update(ctx, crdInstance, metav1.UpdateOptions{}) 482 if err != nil { 483 return 0, err 484 } 485 return int(scale.Spec.Replicas), nil 486 default: 487 return 0, fmt.Errorf(invalidKind) 488 } 489 } 490 491 // GetHpa get the corresponding horizontalPodAutoscaler object 492 func (rc *ResourceConsumer) GetHpa(ctx context.Context, name string) (*autoscalingv1.HorizontalPodAutoscaler, error) { 493 return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(ctx, name, metav1.GetOptions{}) 494 } 495 496 // WaitForReplicas wait for the desired replicas 497 func (rc *ResourceConsumer) WaitForReplicas(ctx context.Context, desiredReplicas int, duration time.Duration) { 498 interval := 20 * time.Second 499 err := framework.Gomega().Eventually(ctx, framework.HandleRetry(rc.GetReplicas)). 500 WithTimeout(duration). 501 WithPolling(interval). 502 Should(gomega.Equal(desiredReplicas)) 503 504 framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas) 505 } 506 507 // EnsureDesiredReplicasInRange ensure the replicas is in a desired range 508 func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(ctx context.Context, minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) { 509 interval := 10 * time.Second 510 desiredReplicasErr := framework.Gomega().Consistently(ctx, framework.HandleRetry(rc.GetReplicas)). 511 WithTimeout(duration). 512 WithPolling(interval). 513 Should(gomega.And(gomega.BeNumerically(">=", minDesiredReplicas), gomega.BeNumerically("<=", maxDesiredReplicas))) 514 515 // dump HPA for debugging 516 as, err := rc.GetHpa(ctx, hpaName) 517 if err != nil { 518 framework.Logf("Error getting HPA: %s", err) 519 } else { 520 framework.Logf("HPA status: %+v", as.Status) 521 } 522 framework.ExpectNoError(desiredReplicasErr) 523 } 524 525 // Pause stops background goroutines responsible for consuming resources. 526 func (rc *ResourceConsumer) Pause() { 527 ginkgo.By(fmt.Sprintf("HPA pausing RC %s", rc.name)) 528 rc.stopCPU <- 0 529 rc.stopMem <- 0 530 rc.stopCustomMetric <- 0 531 rc.stopWaitGroup.Wait() 532 } 533 534 // Resume starts background goroutines responsible for consuming resources. 535 func (rc *ResourceConsumer) Resume(ctx context.Context) { 536 ginkgo.By(fmt.Sprintf("HPA resuming RC %s", rc.name)) 537 go rc.makeConsumeCPURequests(ctx) 538 go rc.makeConsumeMemRequests(ctx) 539 go rc.makeConsumeCustomMetric(ctx) 540 } 541 542 // CleanUp clean up the background goroutines responsible for consuming resources. 543 func (rc *ResourceConsumer) CleanUp(ctx context.Context) { 544 ginkgo.By(fmt.Sprintf("Removing consuming RC %s", rc.name)) 545 close(rc.stopCPU) 546 close(rc.stopMem) 547 close(rc.stopCustomMetric) 548 rc.stopWaitGroup.Wait() 549 // Wait some time to ensure all child goroutines are finished. 550 time.Sleep(10 * time.Second) 551 kind := rc.kind.GroupKind() 552 if kind.Kind == crdKind { 553 gvr := schema.GroupVersionResource{Group: crdGroup, Version: crdVersion, Resource: crdNamePlural} 554 framework.ExpectNoError(e2eresource.DeleteCustomResourceAndWaitForGC(ctx, rc.clientSet, rc.dynamicClient, rc.scaleClient, gvr, rc.nsName, rc.name)) 555 556 } else { 557 framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, rc.clientSet, kind, rc.nsName, rc.name)) 558 } 559 560 framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(ctx, rc.name, metav1.DeleteOptions{})) 561 framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(ctx, rc.clientSet, schema.GroupKind{Kind: "ReplicationController"}, rc.nsName, rc.controllerName)) 562 framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(ctx, rc.name+"-ctrl", metav1.DeleteOptions{})) 563 // Cleanup sidecar related resources 564 if rc.sidecarStatus == Enable && rc.sidecarType == Busy { 565 framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(ctx, rc.name+"-sidecar", metav1.DeleteOptions{})) 566 framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(ctx, rc.name+"-sidecar-ctrl", metav1.DeleteOptions{})) 567 } 568 } 569 570 func createService(ctx context.Context, c clientset.Interface, name, ns string, annotations, selectors map[string]string, port int32, targetPort int) (*v1.Service, error) { 571 return c.CoreV1().Services(ns).Create(ctx, &v1.Service{ 572 ObjectMeta: metav1.ObjectMeta{ 573 Name: name, 574 Annotations: annotations, 575 }, 576 Spec: v1.ServiceSpec{ 577 Ports: []v1.ServicePort{{ 578 Port: port, 579 TargetPort: intstr.FromInt32(int32(targetPort)), 580 }}, 581 Selector: selectors, 582 }, 583 }, metav1.CreateOptions{}) 584 } 585 586 // runServiceAndSidecarForResourceConsumer creates service and runs resource consumer for sidecar container 587 func runServiceAndSidecarForResourceConsumer(ctx context.Context, c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, serviceAnnotations map[string]string) { 588 ginkgo.By(fmt.Sprintf("Running consuming RC sidecar %s via %s with %v replicas", name, kind, replicas)) 589 590 sidecarName := name + "-sidecar" 591 serviceSelectors := map[string]string{ 592 "name": name, 593 } 594 _, err := createService(ctx, c, sidecarName, ns, serviceAnnotations, serviceSelectors, port, sidecarTargetPort) 595 framework.ExpectNoError(err) 596 597 ginkgo.By("Running controller for sidecar") 598 controllerName := sidecarName + "-ctrl" 599 _, err = createService(ctx, c, controllerName, ns, map[string]string{}, map[string]string{"name": controllerName}, port, targetPort) 600 framework.ExpectNoError(err) 601 602 dnsClusterFirst := v1.DNSClusterFirst 603 controllerRcConfig := testutils.RCConfig{ 604 Client: c, 605 Image: imageutils.GetE2EImage(imageutils.Agnhost), 606 Name: controllerName, 607 Namespace: ns, 608 Timeout: timeoutRC, 609 Replicas: 1, 610 Command: []string{"/agnhost", "resource-consumer-controller", "--consumer-service-name=" + sidecarName, "--consumer-service-namespace=" + ns, "--consumer-port=80"}, 611 DNSPolicy: &dnsClusterFirst, 612 } 613 614 framework.ExpectNoError(e2erc.RunRC(ctx, controllerRcConfig)) 615 // Wait for endpoints to propagate for the controller service. 616 framework.ExpectNoError(framework.WaitForServiceEndpointsNum( 617 ctx, c, ns, controllerName, 1, startServiceInterval, startServiceTimeout)) 618 } 619 620 func runServiceAndWorkloadForResourceConsumer(ctx context.Context, c clientset.Interface, resourceClient dynamic.ResourceInterface, apiExtensionClient crdclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string, additionalContainers []v1.Container) { 621 ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) 622 _, err := createService(ctx, c, name, ns, serviceAnnotations, map[string]string{"name": name}, port, targetPort) 623 framework.ExpectNoError(err) 624 625 rcConfig := testutils.RCConfig{ 626 Client: c, 627 Image: imageutils.GetE2EImage(imageutils.ResourceConsumer), 628 Name: name, 629 Namespace: ns, 630 Timeout: timeoutRC, 631 Replicas: replicas, 632 CpuRequest: cpuLimitMillis, 633 CpuLimit: cpuLimitMillis, 634 MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes 635 MemLimit: memLimitMb * 1024 * 1024, 636 Annotations: podAnnotations, 637 AdditionalContainers: additionalContainers, 638 } 639 640 dpConfig := testutils.DeploymentConfig{ 641 RCConfig: rcConfig, 642 } 643 dpConfig.NodeDumpFunc = e2edebug.DumpNodeDebugInfo 644 dpConfig.ContainerDumpFunc = e2ekubectl.LogFailedContainers 645 646 switch kind { 647 case KindRC: 648 framework.ExpectNoError(e2erc.RunRC(ctx, rcConfig)) 649 case KindDeployment: 650 ginkgo.By(fmt.Sprintf("Creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace)) 651 framework.ExpectNoError(testutils.RunDeployment(ctx, dpConfig)) 652 case KindReplicaSet: 653 rsConfig := testutils.ReplicaSetConfig{ 654 RCConfig: rcConfig, 655 } 656 ginkgo.By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace)) 657 framework.ExpectNoError(runReplicaSet(ctx, rsConfig)) 658 case KindCRD: 659 crd := CreateCustomResourceDefinition(ctx, apiExtensionClient) 660 crdInstance, err := CreateCustomSubresourceInstance(ctx, ns, name, resourceClient, crd) 661 framework.ExpectNoError(err) 662 663 ginkgo.By(fmt.Sprintf("Creating deployment %s backing CRD in namespace %s", dpConfig.Name, dpConfig.Namespace)) 664 framework.ExpectNoError(testutils.RunDeployment(ctx, dpConfig)) 665 666 deployment, err := c.AppsV1().Deployments(dpConfig.Namespace).Get(ctx, dpConfig.Name, metav1.GetOptions{}) 667 framework.ExpectNoError(err) 668 deployment.SetOwnerReferences([]metav1.OwnerReference{{ 669 APIVersion: kind.GroupVersion().String(), 670 Kind: crdKind, 671 Name: name, 672 UID: crdInstance.GetUID(), 673 }}) 674 _, err = c.AppsV1().Deployments(dpConfig.Namespace).Update(ctx, deployment, metav1.UpdateOptions{}) 675 framework.ExpectNoError(err) 676 default: 677 framework.Failf(invalidKind) 678 } 679 680 ginkgo.By(fmt.Sprintf("Running controller")) 681 controllerName := name + "-ctrl" 682 _, err = createService(ctx, c, controllerName, ns, map[string]string{}, map[string]string{"name": controllerName}, port, targetPort) 683 framework.ExpectNoError(err) 684 685 dnsClusterFirst := v1.DNSClusterFirst 686 controllerRcConfig := testutils.RCConfig{ 687 Client: c, 688 Image: imageutils.GetE2EImage(imageutils.Agnhost), 689 Name: controllerName, 690 Namespace: ns, 691 Timeout: timeoutRC, 692 Replicas: 1, 693 Command: []string{"/agnhost", "resource-consumer-controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"}, 694 DNSPolicy: &dnsClusterFirst, 695 } 696 697 framework.ExpectNoError(e2erc.RunRC(ctx, controllerRcConfig)) 698 // Wait for endpoints to propagate for the controller service. 699 framework.ExpectNoError(framework.WaitForServiceEndpointsNum( 700 ctx, c, ns, controllerName, 1, startServiceInterval, startServiceTimeout)) 701 } 702 703 func CreateHorizontalPodAutoscaler(ctx context.Context, rc *ResourceConsumer, targetRef autoscalingv2.CrossVersionObjectReference, namespace string, metrics []autoscalingv2.MetricSpec, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { 704 hpa := &autoscalingv2.HorizontalPodAutoscaler{ 705 ObjectMeta: metav1.ObjectMeta{ 706 Name: targetRef.Name, 707 Namespace: namespace, 708 }, 709 Spec: autoscalingv2.HorizontalPodAutoscalerSpec{ 710 ScaleTargetRef: targetRef, 711 MinReplicas: &minReplicas, 712 MaxReplicas: maxReplicas, 713 Metrics: metrics, 714 }, 715 } 716 hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(namespace).Create(ctx, hpa, metav1.CreateOptions{}) 717 framework.ExpectNoError(errHPA) 718 return hpa 719 } 720 721 func CreateResourceHorizontalPodAutoscaler(ctx context.Context, rc *ResourceConsumer, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { 722 targetRef := autoscalingv2.CrossVersionObjectReference{ 723 APIVersion: rc.kind.GroupVersion().String(), 724 Kind: rc.kind.Kind, 725 Name: rc.name, 726 } 727 metrics := []autoscalingv2.MetricSpec{ 728 { 729 Type: autoscalingv2.ResourceMetricSourceType, 730 Resource: &autoscalingv2.ResourceMetricSource{ 731 Name: resourceType, 732 Target: CreateMetricTargetWithType(resourceType, metricTargetType, metricTargetValue), 733 }, 734 }, 735 } 736 return CreateHorizontalPodAutoscaler(ctx, rc, targetRef, rc.nsName, metrics, resourceType, metricTargetType, metricTargetValue, minReplicas, maxReplicas) 737 } 738 739 func CreateCPUResourceHorizontalPodAutoscaler(ctx context.Context, rc *ResourceConsumer, cpu, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { 740 return CreateResourceHorizontalPodAutoscaler(ctx, rc, v1.ResourceCPU, autoscalingv2.UtilizationMetricType, cpu, minReplicas, maxReplicas) 741 } 742 743 // DeleteHorizontalPodAutoscaler delete the horizontalPodAutoscaler for consuming resources. 744 func DeleteHorizontalPodAutoscaler(ctx context.Context, rc *ResourceConsumer, autoscalerName string) { 745 framework.ExpectNoError(rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(ctx, autoscalerName, metav1.DeleteOptions{})) 746 } 747 748 // runReplicaSet launches (and verifies correctness) of a replicaset. 749 func runReplicaSet(ctx context.Context, config testutils.ReplicaSetConfig) error { 750 ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace)) 751 config.NodeDumpFunc = e2edebug.DumpNodeDebugInfo 752 config.ContainerDumpFunc = e2ekubectl.LogFailedContainers 753 return testutils.RunReplicaSet(ctx, config) 754 } 755 756 func CreateContainerResourceHorizontalPodAutoscaler(ctx context.Context, rc *ResourceConsumer, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { 757 targetRef := autoscalingv2.CrossVersionObjectReference{ 758 APIVersion: rc.kind.GroupVersion().String(), 759 Kind: rc.kind.Kind, 760 Name: rc.name, 761 } 762 metrics := []autoscalingv2.MetricSpec{ 763 { 764 Type: autoscalingv2.ContainerResourceMetricSourceType, 765 ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ 766 Name: resourceType, 767 Container: rc.name, 768 Target: CreateMetricTargetWithType(resourceType, metricTargetType, metricTargetValue), 769 }, 770 }, 771 } 772 return CreateHorizontalPodAutoscaler(ctx, rc, targetRef, rc.nsName, metrics, resourceType, metricTargetType, metricTargetValue, minReplicas, maxReplicas) 773 } 774 775 // DeleteContainerResourceHPA delete the horizontalPodAutoscaler for consuming resources. 776 func DeleteContainerResourceHPA(ctx context.Context, rc *ResourceConsumer, autoscalerName string) { 777 framework.ExpectNoError(rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(ctx, autoscalerName, metav1.DeleteOptions{})) 778 } 779 780 func CreateMetricTargetWithType(resourceType v1.ResourceName, targetType autoscalingv2.MetricTargetType, targetValue int32) autoscalingv2.MetricTarget { 781 var metricTarget autoscalingv2.MetricTarget 782 if targetType == autoscalingv2.UtilizationMetricType { 783 metricTarget = autoscalingv2.MetricTarget{ 784 Type: targetType, 785 AverageUtilization: &targetValue, 786 } 787 } else if targetType == autoscalingv2.AverageValueMetricType { 788 var averageValue *resource.Quantity 789 if resourceType == v1.ResourceCPU { 790 averageValue = resource.NewMilliQuantity(int64(targetValue), resource.DecimalSI) 791 } else { 792 averageValue = resource.NewQuantity(int64(targetValue*megabytes), resource.DecimalSI) 793 } 794 metricTarget = autoscalingv2.MetricTarget{ 795 Type: targetType, 796 AverageValue: averageValue, 797 } 798 } 799 return metricTarget 800 } 801 802 func CreateCPUHorizontalPodAutoscalerWithBehavior(ctx context.Context, rc *ResourceConsumer, cpu int32, minReplicas int32, maxRepl int32, behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) *autoscalingv2.HorizontalPodAutoscaler { 803 hpa := &autoscalingv2.HorizontalPodAutoscaler{ 804 ObjectMeta: metav1.ObjectMeta{ 805 Name: rc.name, 806 Namespace: rc.nsName, 807 }, 808 Spec: autoscalingv2.HorizontalPodAutoscalerSpec{ 809 ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ 810 APIVersion: rc.kind.GroupVersion().String(), 811 Kind: rc.kind.Kind, 812 Name: rc.name, 813 }, 814 MinReplicas: &minReplicas, 815 MaxReplicas: maxRepl, 816 Metrics: []autoscalingv2.MetricSpec{ 817 { 818 Type: autoscalingv2.ResourceMetricSourceType, 819 Resource: &autoscalingv2.ResourceMetricSource{ 820 Name: v1.ResourceCPU, 821 Target: autoscalingv2.MetricTarget{ 822 Type: autoscalingv2.UtilizationMetricType, 823 AverageUtilization: &cpu, 824 }, 825 }, 826 }, 827 }, 828 Behavior: behavior, 829 }, 830 } 831 hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(ctx, hpa, metav1.CreateOptions{}) 832 framework.ExpectNoError(errHPA) 833 return hpa 834 } 835 836 func HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule *autoscalingv2.HPAScalingRules) *autoscalingv2.HorizontalPodAutoscalerBehavior { 837 return &autoscalingv2.HorizontalPodAutoscalerBehavior{ 838 ScaleUp: scaleUpRule, 839 ScaleDown: scaleDownRule, 840 } 841 } 842 843 func HPABehaviorWithScalingRuleInDirection(scalingDirection ScalingDirection, rule *autoscalingv2.HPAScalingRules) *autoscalingv2.HorizontalPodAutoscalerBehavior { 844 var scaleUpRule, scaleDownRule *autoscalingv2.HPAScalingRules 845 if scalingDirection == ScaleUpDirection { 846 scaleUpRule = rule 847 } 848 if scalingDirection == ScaleDownDirection { 849 scaleDownRule = rule 850 } 851 return HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule) 852 } 853 854 func HPAScalingRuleWithStabilizationWindow(stabilizationDuration int32) *autoscalingv2.HPAScalingRules { 855 return &autoscalingv2.HPAScalingRules{ 856 StabilizationWindowSeconds: &stabilizationDuration, 857 } 858 } 859 860 func HPAScalingRuleWithPolicyDisabled() *autoscalingv2.HPAScalingRules { 861 disabledPolicy := autoscalingv2.DisabledPolicySelect 862 return &autoscalingv2.HPAScalingRules{ 863 SelectPolicy: &disabledPolicy, 864 } 865 } 866 867 func HPAScalingRuleWithScalingPolicy(policyType autoscalingv2.HPAScalingPolicyType, value, periodSeconds int32) *autoscalingv2.HPAScalingRules { 868 stabilizationWindowDisabledDuration := int32(0) 869 selectPolicy := autoscalingv2.MaxChangePolicySelect 870 return &autoscalingv2.HPAScalingRules{ 871 Policies: []autoscalingv2.HPAScalingPolicy{ 872 { 873 Type: policyType, 874 Value: value, 875 PeriodSeconds: periodSeconds, 876 }, 877 }, 878 SelectPolicy: &selectPolicy, 879 StabilizationWindowSeconds: &stabilizationWindowDisabledDuration, 880 } 881 } 882 883 func HPABehaviorWithStabilizationWindows(upscaleStabilization, downscaleStabilization time.Duration) *autoscalingv2.HorizontalPodAutoscalerBehavior { 884 scaleUpRule := HPAScalingRuleWithStabilizationWindow(int32(upscaleStabilization.Seconds())) 885 scaleDownRule := HPAScalingRuleWithStabilizationWindow(int32(downscaleStabilization.Seconds())) 886 return HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule) 887 } 888 889 func HPABehaviorWithScaleDisabled(scalingDirection ScalingDirection) *autoscalingv2.HorizontalPodAutoscalerBehavior { 890 scalingRule := HPAScalingRuleWithPolicyDisabled() 891 return HPABehaviorWithScalingRuleInDirection(scalingDirection, scalingRule) 892 } 893 894 func HPABehaviorWithScaleLimitedByNumberOfPods(scalingDirection ScalingDirection, numberOfPods, periodSeconds int32) *autoscalingv2.HorizontalPodAutoscalerBehavior { 895 scalingRule := HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, numberOfPods, periodSeconds) 896 return HPABehaviorWithScalingRuleInDirection(scalingDirection, scalingRule) 897 } 898 899 func HPABehaviorWithScaleLimitedByPercentage(scalingDirection ScalingDirection, percentage, periodSeconds int32) *autoscalingv2.HorizontalPodAutoscalerBehavior { 900 scalingRule := HPAScalingRuleWithScalingPolicy(autoscalingv2.PercentScalingPolicy, percentage, periodSeconds) 901 return HPABehaviorWithScalingRuleInDirection(scalingDirection, scalingRule) 902 } 903 904 func DeleteHPAWithBehavior(ctx context.Context, rc *ResourceConsumer, autoscalerName string) { 905 framework.ExpectNoError(rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(ctx, autoscalerName, metav1.DeleteOptions{})) 906 } 907 908 // SidecarStatusType type for sidecar status 909 type SidecarStatusType bool 910 911 const ( 912 Enable SidecarStatusType = true 913 Disable SidecarStatusType = false 914 ) 915 916 // SidecarWorkloadType type of the sidecar 917 type SidecarWorkloadType string 918 919 const ( 920 Busy SidecarWorkloadType = "Busy" 921 Idle SidecarWorkloadType = "Idle" 922 ) 923 924 func CreateCustomResourceDefinition(ctx context.Context, c crdclientset.Interface) *apiextensionsv1.CustomResourceDefinition { 925 crdSchema := &apiextensionsv1.CustomResourceDefinition{ 926 ObjectMeta: metav1.ObjectMeta{Name: crdNamePlural + "." + crdGroup}, 927 Spec: apiextensionsv1.CustomResourceDefinitionSpec{ 928 Group: crdGroup, 929 Scope: apiextensionsv1.ResourceScope("Namespaced"), 930 Names: apiextensionsv1.CustomResourceDefinitionNames{ 931 Plural: crdNamePlural, 932 Singular: crdName, 933 Kind: crdKind, 934 ListKind: "TestCRDList", 935 }, 936 Versions: []apiextensionsv1.CustomResourceDefinitionVersion{{ 937 Name: crdVersion, 938 Served: true, 939 Storage: true, 940 Schema: fixtures.AllowAllSchema(), 941 Subresources: &apiextensionsv1.CustomResourceSubresources{ 942 Scale: &apiextensionsv1.CustomResourceSubresourceScale{ 943 SpecReplicasPath: ".spec.replicas", 944 StatusReplicasPath: ".status.replicas", 945 LabelSelectorPath: utilpointer.String(".status.selector"), 946 }, 947 }, 948 }}, 949 }, 950 Status: apiextensionsv1.CustomResourceDefinitionStatus{}, 951 } 952 // Create Custom Resource Definition if it's not present. 953 crd, err := c.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crdSchema.Name, metav1.GetOptions{}) 954 if err != nil { 955 crd, err = c.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, crdSchema, metav1.CreateOptions{}) 956 framework.ExpectNoError(err) 957 // Wait until just created CRD appears in discovery. 958 err = framework.Gomega().Eventually(ctx, framework.RetryNotFound(framework.HandleRetry(func(ctx context.Context) (*metav1.APIResourceList, error) { 959 return c.Discovery().ServerResourcesForGroupVersion(crd.Spec.Group + "/" + "v1") 960 }))).Should(framework.MakeMatcher(func(actual *metav1.APIResourceList) (func() string, error) { 961 for _, g := range actual.APIResources { 962 if g.Name == crd.Spec.Names.Plural { 963 return nil, nil 964 } 965 } 966 return func() string { 967 return fmt.Sprintf("CRD %s not found in discovery", crd.Spec.Names.Plural) 968 }, nil 969 })) 970 framework.ExpectNoError(err) 971 ginkgo.By(fmt.Sprintf("Successfully created Custom Resource Definition: %v", crd)) 972 } 973 return crd 974 } 975 976 func CreateCustomSubresourceInstance(ctx context.Context, namespace, name string, client dynamic.ResourceInterface, definition *apiextensionsv1.CustomResourceDefinition) (*unstructured.Unstructured, error) { 977 instance := &unstructured.Unstructured{ 978 Object: map[string]interface{}{ 979 "apiVersion": crdGroup + "/" + crdVersion, 980 "kind": crdKind, 981 "metadata": map[string]interface{}{ 982 "namespace": namespace, 983 "name": name, 984 }, 985 "spec": map[string]interface{}{ 986 "num": int64(1), 987 "replicas": int64(1), 988 }, 989 "status": map[string]interface{}{ 990 "replicas": int64(1), 991 "selector": "name=" + name, 992 }, 993 }, 994 } 995 instance, err := client.Create(ctx, instance, metav1.CreateOptions{}) 996 if err != nil { 997 framework.Logf("%#v", instance) 998 return nil, err 999 } 1000 createdObjectMeta, err := meta.Accessor(instance) 1001 if err != nil { 1002 return nil, fmt.Errorf("Error while creating object meta: %w", err) 1003 } 1004 if len(createdObjectMeta.GetUID()) == 0 { 1005 return nil, fmt.Errorf("Missing UUID: %v", instance) 1006 } 1007 ginkgo.By(fmt.Sprintf("Successfully created instance of CRD of kind %v: %v", definition.Kind, instance)) 1008 return instance, nil 1009 }