github.com/1aal/kubeblocks@v0.0.0-20231107070852-e1c03e598921/controllers/apps/components/component_test.go (about) 1 /* 2 Copyright (C) 2022-2023 ApeCloud Co., Ltd 3 This file is part of KubeBlocks project 4 This program is free software: you can redistribute it and/or modify 5 it under the terms of the GNU Affero General Public License as published by 6 the Free Software Foundation, either version 3 of the License, or 7 (at your option) any later version. 8 This program is distributed in the hope that it will be useful 9 but WITHOUT ANY WARRANTY; without even the implied warranty of 10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 GNU Affero General Public License for more details. 12 You should have received a copy of the GNU Affero General Public License 13 along with this program. If not, see <http://www.gnu.org/licenses/>. 14 */ 15 16 package components 17 18 import ( 19 "context" 20 "fmt" 21 "strconv" 22 "strings" 23 "time" 24 25 . "github.com/onsi/ginkgo/v2" 26 . "github.com/onsi/gomega" 27 28 appsv1 "k8s.io/api/apps/v1" 29 corev1 "k8s.io/api/core/v1" 30 storagev1 "k8s.io/api/storage/v1" 31 apierrors "k8s.io/apimachinery/pkg/api/errors" 32 apiresource "k8s.io/apimachinery/pkg/api/resource" 33 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 34 "k8s.io/apimachinery/pkg/types" 35 "sigs.k8s.io/controller-runtime/pkg/client" 36 "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 37 38 appsv1alpha1 "github.com/1aal/kubeblocks/apis/apps/v1alpha1" 39 workloads "github.com/1aal/kubeblocks/apis/workloads/v1alpha1" 40 "github.com/1aal/kubeblocks/pkg/constant" 41 "github.com/1aal/kubeblocks/pkg/controller/builder" 42 "github.com/1aal/kubeblocks/pkg/controller/graph" 43 "github.com/1aal/kubeblocks/pkg/controller/model" 44 intctrlutil "github.com/1aal/kubeblocks/pkg/controllerutil" 45 "github.com/1aal/kubeblocks/pkg/generics" 46 testapps "github.com/1aal/kubeblocks/pkg/testutil/apps" 47 testk8s "github.com/1aal/kubeblocks/pkg/testutil/k8s" 48 ) 49 50 var _ = Describe("Component", func() { 51 const ( 52 statefulCompName = "stateful" 53 statefulCompDefName = "stateful" 54 ) 55 56 var ( 57 random = testCtx.GetRandomStr() 58 clusterDefName = "test-clusterdef-" + random 59 clusterVerName = "test-clusterver-" + random 60 clusterName = "test-cluster" + random 61 clusterDefObj *appsv1alpha1.ClusterDefinition 62 clusterVerObj *appsv1alpha1.ClusterVersion 63 clusterObj *appsv1alpha1.Cluster 64 reqCtx intctrlutil.RequestCtx 65 66 defaultStorageClass *storagev1.StorageClass 67 68 defaultReplicas = 2 69 defaultVolumeSize = "2Gi" 70 defaultVolumeQuantity = apiresource.MustParse(defaultVolumeSize) 71 72 podAnnotationKey4Test = fmt.Sprintf("%s-test", constant.ComponentReplicasAnnotationKey) 73 ) 74 75 cleanAll := func() { 76 // must wait until resources deleted and no longer exist before the testcases start, 77 // otherwise if later it needs to create some new resource objects with the same name, 78 // in race conditions, it will find the existence of old objects, resulting failure to 79 // create the new objects. 80 By("clean resources") 81 // delete cluster(and all dependent sub-resources), clusterversion and clusterdef 82 testapps.ClearClusterResources(&testCtx) 83 84 // clear rest resources 85 inNS := client.InNamespace(testCtx.DefaultNamespace) 86 ml := client.HasLabels{testCtx.TestObjLabelKey} 87 // namespaced resources 88 testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.RSMSignature, true, inNS, ml) 89 testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.StatefulSetSignature, true, inNS, ml) 90 testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PodSignature, true, inNS, ml) 91 testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS, ml) 92 // non-namespaced 93 testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeSignature, true, inNS, ml) 94 testapps.ClearResources(&testCtx, generics.StorageClassSignature, ml) 95 } 96 97 BeforeEach(cleanAll) 98 99 AfterEach(cleanAll) 100 101 setup := func() { 102 defaultStorageClass = testk8s.CreateMockStorageClass(&testCtx, testk8s.DefaultStorageClassName) 103 Expect(*defaultStorageClass.AllowVolumeExpansion).Should(BeTrue()) 104 105 clusterDefObj = testapps.NewClusterDefFactory(clusterDefName). 106 AddComponentDef(testapps.StatefulMySQLComponent, statefulCompDefName). 107 GetObject() 108 109 clusterVerObj = testapps.NewClusterVersionFactory(clusterVerName, clusterDefObj.GetName()). 110 AddComponentVersion(statefulCompDefName).AddContainerShort(testapps.DefaultMySQLContainerName, testapps.ApeCloudMySQLImage). 111 GetObject() 112 113 clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefObj.Name, clusterVerObj.Name). 114 AddComponent(statefulCompName, statefulCompDefName). 115 SetReplicas(int32(defaultReplicas)). 116 AddVolumeClaimTemplate(testapps.LogVolumeName, testapps.NewPVCSpec(defaultVolumeSize)). 117 AddVolumeClaimTemplate(testapps.DataVolumeName, testapps.NewPVCSpec(defaultVolumeSize)). 118 AddUserSecretVolume("secret_volumes", "/opt/secrets", "secret_name", "mysql"). 119 AddUserConfigmapVolume("configmap_volumes", "/opt/scripts", "user_config_name", "mysql"). 120 GetObject() 121 122 clusterObj.SetUID(types.UID(clusterObj.Name)) 123 reqCtx = intctrlutil.RequestCtx{Ctx: ctx, Log: logger, Recorder: recorder} 124 } 125 126 labels := func() client.MatchingLabels { 127 return client.MatchingLabels{ 128 constant.AppManagedByLabelKey: constant.AppName, 129 constant.AppInstanceLabelKey: clusterObj.Name, 130 constant.KBAppComponentLabelKey: statefulCompName, 131 } 132 } 133 134 spec := func() *appsv1alpha1.ClusterComponentSpec { 135 for i, v := range clusterObj.Spec.ComponentSpecs { 136 if v.Name == statefulCompName { 137 return &clusterObj.Spec.ComponentSpecs[i] 138 } 139 } 140 return nil 141 } 142 143 status := func() appsv1alpha1.ClusterComponentStatus { 144 return clusterObj.Status.Components[statefulCompName] 145 } 146 147 newDAGWithPlaceholder := func(namespace, clusterName, compName string) *graph.DAG { 148 root := builder.NewReplicatedStateMachineBuilder(namespace, fmt.Sprintf("%s-%s", clusterName, compName)).GetObject() 149 dag := graph.NewDAG() 150 model.NewGraphClient(nil).Root(dag, nil, root, nil) 151 return dag 152 } 153 154 applyChanges := func(ctx context.Context, cli client.Client, dag *graph.DAG) error { 155 walking := func(v graph.Vertex) error { 156 node, ok := v.(*model.ObjectVertex) 157 Expect(ok).Should(BeTrue()) 158 159 _, ok = node.Obj.(*appsv1alpha1.Cluster) 160 Expect(!ok || *node.Action == model.NOOP).Should(BeTrue()) 161 162 switch *node.Action { 163 case model.CREATE: 164 controllerutil.AddFinalizer(node.Obj, constant.DBClusterFinalizerName) 165 // Using the testCtx.Create since it adds the label which will be used at clearing resources. 166 err := testCtx.Create(ctx, node.Obj) 167 if err != nil && !apierrors.IsAlreadyExists(err) { 168 return err 169 } 170 case model.UPDATE: 171 err := cli.Update(ctx, node.Obj) 172 if err != nil && !apierrors.IsNotFound(err) { 173 return err 174 } 175 case model.PATCH: 176 patch := client.MergeFrom(node.OriObj) 177 err := cli.Patch(ctx, node.Obj, patch) 178 if err != nil && !apierrors.IsNotFound(err) { 179 return err 180 } 181 case model.DELETE: 182 if controllerutil.RemoveFinalizer(node.Obj, constant.DBClusterFinalizerName) { 183 err := cli.Update(ctx, node.Obj) 184 if err != nil && !apierrors.IsNotFound(err) { 185 return err 186 } 187 } 188 if _, ok := node.Obj.(*appsv1alpha1.Cluster); !ok { 189 err := cli.Delete(ctx, node.Obj) 190 if err != nil && !apierrors.IsNotFound(err) { 191 return err 192 } 193 } 194 case model.STATUS: 195 patch := client.MergeFrom(node.OriObj) 196 if err := cli.Status().Patch(ctx, node.Obj, patch); err != nil { 197 return err 198 } 199 case model.NOOP: 200 // nothing 201 } 202 return nil 203 } 204 if dag.Root() != nil { 205 return dag.WalkReverseTopoOrder(walking, nil) 206 } else { 207 withRoot := graph.NewDAG() 208 model.NewGraphClient(cli).Root(withRoot, nil, &appsv1alpha1.Cluster{}, model.ActionNoopPtr()) 209 withRoot.Merge(dag) 210 return withRoot.WalkReverseTopoOrder(walking, nil) 211 } 212 } 213 214 newComponent := func(compName string) (Component, *graph.DAG) { 215 dag := newDAGWithPlaceholder(testCtx.DefaultNamespace, clusterName, compName) 216 comp, err := NewComponent(reqCtx, testCtx.Cli, clusterDefObj, clusterVerObj, clusterObj, compName, dag) 217 Expect(comp).ShouldNot(BeNil()) 218 Expect(err).Should(Succeed()) 219 return comp, dag 220 } 221 222 createComponent := func() error { 223 comp, dag := newComponent(statefulCompName) 224 if err := comp.Create(reqCtx, testCtx.Cli); err != nil { 225 return err 226 } 227 return applyChanges(testCtx.Ctx, testCtx.Cli, dag) 228 } 229 230 deleteComponent := func() error { 231 comp, dag := newComponent(statefulCompName) 232 if err := comp.Delete(reqCtx, testCtx.Cli); err != nil { 233 return err 234 } 235 return applyChanges(testCtx.Ctx, testCtx.Cli, dag) 236 } 237 238 updateComponent := func() error { 239 comp, dag := newComponent(statefulCompName) 240 comp.GetCluster().Generation = comp.GetCluster().Status.ObservedGeneration + 1 241 if err := comp.Update(reqCtx, testCtx.Cli); err != nil { 242 return err 243 } 244 return applyChanges(testCtx.Ctx, testCtx.Cli, dag) 245 } 246 247 retryUpdateComponent := func() error { 248 comp, dag := newComponent(statefulCompName) 249 // don't update the cluster generation 250 if err := comp.Update(reqCtx, testCtx.Cli); err != nil { 251 return err 252 } 253 return applyChanges(testCtx.Ctx, testCtx.Cli, dag) 254 } 255 256 statusComponent := func() error { 257 comp, dag := newComponent(statefulCompName) 258 comp.GetCluster().Status.ObservedGeneration = comp.GetCluster().Generation 259 if err := comp.Status(reqCtx, testCtx.Cli); err != nil { 260 return err 261 } 262 return applyChanges(testCtx.Ctx, testCtx.Cli, dag) 263 } 264 265 pvcKey := func(clusterName, compName, vctName string, ordinal int) types.NamespacedName { 266 return types.NamespacedName{ 267 Namespace: testCtx.DefaultNamespace, 268 Name: fmt.Sprintf("%s-%s-%s-%d", vctName, clusterName, compName, ordinal), 269 } 270 } 271 272 pvKey := func(clusterName, compName, vctName string, ordinal int) types.NamespacedName { 273 return types.NamespacedName{ 274 Namespace: testCtx.DefaultNamespace, 275 Name: fmt.Sprintf("pvc-%s-%s-%s-%d", clusterName, compName, vctName, ordinal), 276 } 277 } 278 279 createPVCs := func() { 280 for _, vct := range spec().VolumeClaimTemplates { 281 for i := 0; i < int(spec().Replicas); i++ { 282 var ( 283 pvcName = pvcKey(clusterName, statefulCompName, vct.Name, i).Name 284 pvName = pvKey(clusterName, statefulCompName, vct.Name, i).Name 285 ) 286 pvc := testapps.NewPersistentVolumeClaimFactory(testCtx.DefaultNamespace, pvcName, clusterName, statefulCompName, vct.Name). 287 AddLabelsInMap(labels()). 288 SetStorageClass(defaultStorageClass.Name). 289 SetStorage(defaultVolumeSize). 290 SetVolumeName(pvName). 291 CheckedCreate(&testCtx). 292 GetObject() 293 testapps.NewPersistentVolumeFactory(testCtx.DefaultNamespace, pvName, pvcName). 294 SetStorage(defaultVolumeSize). 295 SetClaimRef(pvc). 296 CheckedCreate(&testCtx) 297 Eventually(testapps.GetAndChangeObjStatus(&testCtx, pvcKey(clusterName, statefulCompName, vct.Name, i), 298 func(pvc *corev1.PersistentVolumeClaim) { 299 pvc.Status.Phase = corev1.ClaimBound 300 if pvc.Status.Capacity == nil { 301 pvc.Status.Capacity = corev1.ResourceList{} 302 } 303 pvc.Status.Capacity[corev1.ResourceStorage] = pvc.Spec.Resources.Requests[corev1.ResourceStorage] 304 })).Should(Succeed()) 305 Eventually(testapps.GetAndChangeObjStatus(&testCtx, pvKey(clusterName, statefulCompName, vct.Name, i), 306 func(pv *corev1.PersistentVolume) { 307 pvc.Status.Phase = corev1.ClaimBound 308 })).Should(Succeed()) 309 } 310 } 311 } 312 313 rsmKey := func() types.NamespacedName { 314 return types.NamespacedName{ 315 Namespace: testCtx.DefaultNamespace, 316 Name: fmt.Sprintf("%s-%s", clusterObj.GetName(), statefulCompName), 317 } 318 } 319 320 rsm := func() *workloads.ReplicatedStateMachine { 321 rsm := &workloads.ReplicatedStateMachine{} 322 Expect(testCtx.Cli.Get(testCtx.Ctx, rsmKey(), rsm)).Should(Succeed()) 323 return rsm 324 } 325 326 stsKey := func() types.NamespacedName { 327 return types.NamespacedName{ 328 Namespace: testCtx.DefaultNamespace, 329 Name: fmt.Sprintf("%s-%s", clusterObj.GetName(), statefulCompName), 330 } 331 } 332 333 sts := func() *appsv1.StatefulSet { 334 sts := &appsv1.StatefulSet{} 335 Expect(testCtx.Cli.Get(testCtx.Ctx, stsKey(), sts)).Should(Succeed()) 336 return sts 337 } 338 339 createSts := func() { 340 rsmKey := rsmKey() 341 testapps.NewStatefulSetFactory(rsmKey.Namespace, rsmKey.Name, clusterName, statefulCompName). 342 SetReplicas(spec().Replicas). 343 CheckedCreate(&testCtx) 344 } 345 346 pods := func() []*corev1.Pod { 347 objs, err := listObjWithLabelsInNamespace(testCtx.Ctx, testCtx.Cli, 348 generics.PodSignature, testCtx.DefaultNamespace, labels()) 349 Expect(err).Should(Succeed()) 350 return objs 351 } 352 353 createPods := func() { 354 for i := 0; i < int(spec().Replicas); i++ { 355 pod := &corev1.Pod{ 356 ObjectMeta: metav1.ObjectMeta{ 357 Name: stsKey().Name + "-" + strconv.Itoa(i), 358 Namespace: testCtx.DefaultNamespace, 359 Labels: map[string]string{ 360 constant.AppManagedByLabelKey: constant.AppName, 361 constant.AppNameLabelKey: clusterDefName, 362 constant.AppInstanceLabelKey: clusterName, 363 constant.KBAppComponentLabelKey: statefulCompName, 364 // appsv1.ControllerRevisionHashLabelKey: "mock-version", 365 }, 366 Annotations: map[string]string{ 367 podAnnotationKey4Test: fmt.Sprintf("%d", spec().Replicas), 368 }, 369 }, 370 Spec: corev1.PodSpec{ 371 Containers: []corev1.Container{{ 372 Name: "mock-container", 373 Image: "mock-container", 374 }}, 375 }, 376 } 377 Expect(testCtx.CheckedCreateObj(testCtx.Ctx, pod)).Should(Succeed()) 378 } 379 } 380 381 createWorkloads := func() { 382 createSts() 383 createPods() 384 } 385 386 mockWorkloadsReady := func() { 387 rsmObj := rsm() 388 stsObj := sts() 389 podObjs := pods() 390 Expect(testapps.ChangeObjStatus(&testCtx, rsmObj, func() { testk8s.MockRSMReady(rsmObj, podObjs...) })).Should(Succeed()) 391 Expect(testapps.ChangeObjStatus(&testCtx, stsObj, func() { testk8s.MockStatefulSetReady(stsObj) })).Should(Succeed()) 392 for _, pod := range podObjs { 393 Expect(testapps.ChangeObjStatus(&testCtx, pod, func() { testk8s.MockPodAvailable(pod, metav1.NewTime(time.Now())) })).Should(Succeed()) 394 } 395 } 396 397 mockWorkloadsUpdating := func() { 398 vct := spec().VolumeClaimTemplates[0] 399 quantity := vct.Spec.Resources.Requests.Storage() 400 quantity.Add(apiresource.MustParse("1Gi")) 401 for i := 0; i < int(spec().Replicas); i++ { 402 Eventually(testapps.GetAndChangeObj(&testCtx, pvcKey(clusterName, statefulCompName, vct.Name, i), 403 func(pvc *corev1.PersistentVolumeClaim) { 404 pvc.Spec.Resources.Requests[corev1.ResourceStorage] = *quantity 405 })).Should(Succeed()) 406 } 407 } 408 409 mockWorkloadsAbnormal := func() { 410 // mock pods failure 411 for _, pod := range pods() { 412 podKey := types.NamespacedName{ 413 Namespace: testCtx.DefaultNamespace, 414 Name: pod.GetName(), 415 } 416 Eventually(testapps.GetAndChangeObjStatus(&testCtx, podKey, func(pod *corev1.Pod) { 417 cond := corev1.PodCondition{ 418 Type: corev1.PodScheduled, 419 Status: corev1.ConditionFalse, 420 LastTransitionTime: metav1.NewTime(time.Now().Add(podScheduledFailedTimeout * -2)), 421 } 422 pod.Status.Conditions = append(pod.Status.Conditions, cond) 423 })).Should(Succeed()) 424 } 425 // mock isRunning as false 426 Eventually(testapps.GetAndChangeObjStatus(&testCtx, rsmKey(), func(rsm *workloads.ReplicatedStateMachine) { 427 if rsm.Status.ReadyReplicas == *rsm.Spec.Replicas { 428 rsm.Status.ReadyReplicas-- 429 } 430 })).Should(Succeed()) 431 } 432 433 mockWorkloadsFailed := func() { 434 // mock pods failure and isAvailable as false 435 for _, pod := range pods() { 436 podKey := types.NamespacedName{ 437 Namespace: testCtx.DefaultNamespace, 438 Name: pod.GetName(), 439 } 440 Eventually(testapps.GetAndChangeObjStatus(&testCtx, podKey, func(pod *corev1.Pod) { 441 cond1 := corev1.PodCondition{ 442 Type: corev1.PodReady, 443 Status: corev1.ConditionFalse, 444 } 445 cond2 := corev1.PodCondition{ 446 Type: corev1.PodScheduled, 447 Status: corev1.ConditionFalse, 448 LastTransitionTime: metav1.NewTime(time.Now().Add(podScheduledFailedTimeout * -2)), 449 } 450 pod.Status.Conditions = []corev1.PodCondition{cond1, cond2} 451 })).Should(Succeed()) 452 } 453 454 // mock isRunning as false 455 Eventually(testapps.GetAndChangeObjStatus(&testCtx, rsmKey(), func(rsm *workloads.ReplicatedStateMachine) { 456 rsm.Status.ReadyReplicas-- 457 })).Should(Succeed()) 458 } 459 460 Context("new component object", func() { 461 BeforeEach(func() { 462 setup() 463 }) 464 465 It("ok", func() { 466 By("new cluster component ok") 467 comp, _ := newComponent(statefulCompName) 468 Expect(comp.GetNamespace()).Should(Equal(clusterObj.GetNamespace())) 469 Expect(comp.GetClusterName()).Should(Equal(clusterObj.GetName())) 470 Expect(comp.GetName()).Should(Equal(statefulCompName)) 471 Expect(comp.GetCluster()).Should(Equal(clusterObj)) 472 Expect(comp.GetClusterVersion()).Should(Equal(clusterVerObj)) 473 Expect(comp.GetSynthesizedComponent()).ShouldNot(BeNil()) 474 }) 475 476 It("w/o component definition", func() { 477 By("new cluster component without component definition") 478 clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefObj.Name, clusterVerObj.Name). 479 AddComponent(statefulCompName, statefulCompDefName+random). // with a random component def name 480 GetObject() 481 _, err := NewComponent(reqCtx, testCtx.Cli, clusterDefObj, clusterVerObj, clusterObj, statefulCompName, graph.NewDAG()) 482 Expect(err).ShouldNot(Succeed()) 483 }) 484 485 It("w/o component definition and spec", func() { 486 By("new cluster component without component definition and spec") 487 clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefObj.Name, clusterVerObj.Name). 488 AddComponent(statefulCompName+random, statefulCompDefName+random). // with a random component spec and def name 489 GetObject() 490 comp, err := NewComponent(reqCtx, testCtx.Cli, clusterDefObj, clusterVerObj, clusterObj, statefulCompName, graph.NewDAG()) 491 Expect(comp).Should(BeNil()) 492 Expect(err).Should(BeNil()) 493 }) 494 }) 495 496 Context("create and delete component", func() { 497 BeforeEach(func() { 498 setup() 499 Expect(createComponent()).Should(Succeed()) 500 }) 501 502 It("create component resources", func() { 503 By("check workload resources created") 504 Eventually(testapps.List(&testCtx, generics.RSMSignature, labels())).Should(HaveLen(1)) 505 }) 506 507 It("delete component doesn't affect resources", func() { 508 By("delete the component") 509 Expect(deleteComponent()).Should(Succeed()) 510 511 By("check workload resources still exist") 512 Eventually(testapps.List(&testCtx, generics.RSMSignature, labels())).Should(HaveLen(1)) 513 }) 514 }) 515 516 Context("update component", func() { 517 BeforeEach(func() { 518 setup() 519 520 Expect(createComponent()).Should(Succeed()) 521 522 // create all PVCs ann PVs 523 createPVCs() 524 }) 525 526 It("update w/o changes", func() { 527 By("update without change") 528 Expect(updateComponent()).Should(Succeed()) 529 530 By("check the workload not updated") 531 Consistently(testapps.CheckObj(&testCtx, rsmKey(), func(g Gomega, rsm *workloads.ReplicatedStateMachine) { 532 g.Expect(rsm.GetGeneration()).Should(Equal(int64(1))) 533 })).Should(Succeed()) 534 }) 535 536 It("scale out", func() { 537 By("scale out replicas with 1") 538 replicas := spec().Replicas 539 spec().Replicas = spec().Replicas + 1 540 541 By("update to create new PVCs, the workload not updated") 542 // since we don't set backup policy, the dummy clone policy will be used 543 Expect(updateComponent()).Should(Succeed()) 544 Consistently(testapps.CheckObj(&testCtx, rsmKey(), func(g Gomega, rsm *workloads.ReplicatedStateMachine) { 545 g.Expect(rsm.GetGeneration()).Should(Equal(int64(1))) 546 g.Expect(*rsm.Spec.Replicas).Should(Equal(replicas)) 547 })).Should(Succeed()) 548 expectedCnt := int(spec().Replicas) * len(spec().VolumeClaimTemplates) 549 Eventually(testapps.List(&testCtx, generics.PersistentVolumeClaimSignature, labels())).Should(HaveLen(expectedCnt)) 550 551 By("update again to apply changes to the workload") 552 Expect(retryUpdateComponent()).Should(Succeed()) 553 554 By("check the workload updated") 555 Eventually(testapps.CheckObj(&testCtx, rsmKey(), func(g Gomega, rsm *workloads.ReplicatedStateMachine) { 556 g.Expect(rsm.GetGeneration()).Should(Equal(int64(2))) 557 g.Expect(*rsm.Spec.Replicas).Should(Equal(spec().Replicas)) 558 })).Should(Succeed()) 559 }) 560 561 It("TODO - scale out out-of-range", func() { 562 }) 563 564 It("scale in", func() { 565 By("scale in replicas with 1") 566 Expect(spec().Replicas > 1).Should(BeTrue()) 567 replicas := spec().Replicas 568 spec().Replicas = spec().Replicas - 1 569 Expect(updateComponent()).Should(Succeed()) 570 571 By("check the workload updated") 572 Eventually(testapps.CheckObj(&testCtx, rsmKey(), func(g Gomega, rsm *workloads.ReplicatedStateMachine) { 573 g.Expect(rsm.GetGeneration()).Should(Equal(int64(2))) 574 g.Expect(*rsm.Spec.Replicas).Should(Equal(spec().Replicas)) 575 })).Should(Succeed()) 576 577 By("check the PVC logically deleted") 578 Eventually(func(g Gomega) { 579 objs, err := listObjWithLabelsInNamespace(testCtx.Ctx, testCtx.Cli, 580 generics.PersistentVolumeClaimSignature, testCtx.DefaultNamespace, labels()) 581 g.Expect(err).Should(Succeed()) 582 g.Expect(objs).Should(HaveLen(int(replicas) * len(spec().VolumeClaimTemplates))) 583 for _, pvc := range objs { 584 if strings.HasSuffix(pvc.GetName(), fmt.Sprintf("-%d", replicas-1)) { 585 g.Expect(pvc.GetDeletionTimestamp()).ShouldNot(BeNil()) 586 } else { 587 g.Expect(pvc.GetDeletionTimestamp()).Should(BeNil()) 588 } 589 } 590 }).Should(Succeed()) 591 }) 592 593 It("scale in to zero", func() { 594 By("scale in replicas to 0") 595 replicas := spec().Replicas 596 spec().Replicas = 0 597 Expect(updateComponent()).Should(Succeed()) 598 599 By("check the workload updated") 600 Eventually(testapps.CheckObj(&testCtx, rsmKey(), func(g Gomega, rsm *workloads.ReplicatedStateMachine) { 601 g.Expect(rsm.GetGeneration()).Should(Equal(int64(2))) 602 g.Expect(*rsm.Spec.Replicas).Should(Equal(spec().Replicas)) 603 })).Should(Succeed()) 604 605 By("check all the PVCs unchanged") 606 Consistently(func(g Gomega) { 607 objs, err := listObjWithLabelsInNamespace(testCtx.Ctx, testCtx.Cli, 608 generics.PersistentVolumeClaimSignature, testCtx.DefaultNamespace, labels()) 609 g.Expect(err).Should(Succeed()) 610 g.Expect(objs).Should(HaveLen(int(replicas) * len(spec().VolumeClaimTemplates))) 611 for _, pvc := range objs { 612 g.Expect(pvc.GetDeletionTimestamp()).Should(BeNil()) 613 } 614 }).Should(Succeed()) 615 }) 616 617 It("TODO - scale up", func() { 618 }) 619 620 It("TODO - scale up out-of-limit", func() { 621 }) 622 623 It("TODO - scale down", func() { 624 }) 625 626 It("expand volume", func() { 627 By("up the log volume size with 1Gi") 628 vct := spec().VolumeClaimTemplates[0] 629 quantity := vct.Spec.Resources.Requests.Storage() 630 quantity.Add(apiresource.MustParse("1Gi")) 631 spec().VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage] = *quantity 632 Expect(updateComponent()).Should(Succeed()) 633 634 By("check all the log PVCs updated") 635 Eventually(func(g Gomega) { 636 objs, err := listObjWithLabelsInNamespace(testCtx.Ctx, testCtx.Cli, 637 generics.PersistentVolumeClaimSignature, testCtx.DefaultNamespace, labels()) 638 g.Expect(err).Should(Succeed()) 639 g.Expect(objs).Should(HaveLen(int(spec().Replicas) * len(spec().VolumeClaimTemplates))) 640 for _, pvc := range objs { 641 if strings.HasPrefix(pvc.GetName(), vct.Name) { 642 g.Expect(pvc.Spec.Resources.Requests.Storage().Cmp(defaultVolumeQuantity)).Should(Equal(1)) 643 } else { 644 g.Expect(pvc.Spec.Resources.Requests.Storage().Cmp(defaultVolumeQuantity)).Should(Equal(0)) 645 } 646 } 647 }).Should(Succeed()) 648 }) 649 650 It("shrink volume", func() { 651 By("shrink the log volume with 1Gi") 652 quantity := spec().VolumeClaimTemplates[0].Spec.Resources.Requests.Storage() 653 quantity.Sub(apiresource.MustParse("1Gi")) 654 spec().VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage] = *quantity 655 Expect(updateComponent()).Should(HaveOccurred()) 656 657 By("check all the PVCs unchanged") 658 Consistently(func(g Gomega) { 659 objs, err := listObjWithLabelsInNamespace(testCtx.Ctx, testCtx.Cli, 660 generics.PersistentVolumeClaimSignature, testCtx.DefaultNamespace, labels()) 661 g.Expect(err).Should(Succeed()) 662 g.Expect(objs).Should(HaveLen(int(spec().Replicas) * len(spec().VolumeClaimTemplates))) 663 for _, pvc := range objs { 664 g.Expect(pvc.Spec.Resources.Requests.Storage().Cmp(defaultVolumeQuantity)).Should(Equal(0)) 665 } 666 }).Should(Succeed()) 667 }) 668 669 It("rollback volume size during expansion", func() { 670 By("up the log volume size with 1Gi") 671 vct := spec().VolumeClaimTemplates[0] 672 quantity := vct.Spec.Resources.Requests.Storage() 673 quantity.Add(apiresource.MustParse("1Gi")) 674 spec().VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage] = *quantity 675 Expect(updateComponent()).Should(Succeed()) 676 677 By("check all the log PVCs updating") 678 Eventually(func(g Gomega) { 679 objs, err := listObjWithLabelsInNamespace(testCtx.Ctx, testCtx.Cli, 680 generics.PersistentVolumeClaimSignature, testCtx.DefaultNamespace, labels()) 681 g.Expect(err).Should(Succeed()) 682 g.Expect(objs).Should(HaveLen(int(spec().Replicas) * len(spec().VolumeClaimTemplates))) 683 for _, pvc := range objs { 684 if strings.HasPrefix(pvc.GetName(), vct.Name) { 685 g.Expect(pvc.Spec.Resources.Requests.Storage().Cmp(*pvc.Status.Capacity.Storage())).Should(Equal(1)) 686 } else { 687 g.Expect(pvc.Spec.Resources.Requests.Storage().Cmp(*pvc.Status.Capacity.Storage())).Should(Equal(0)) 688 } 689 } 690 }).Should(Succeed()) 691 692 By("reset the log volumes as original size") 693 spec().VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage] = defaultVolumeQuantity 694 Expect(updateComponent()).Should(Succeed()) 695 696 By("check all the PVCs rolled-back") 697 Eventually(func(g Gomega) { 698 objs, err := listObjWithLabelsInNamespace(testCtx.Ctx, testCtx.Cli, 699 generics.PersistentVolumeClaimSignature, testCtx.DefaultNamespace, labels()) 700 g.Expect(err).Should(Succeed()) 701 g.Expect(objs).Should(HaveLen(int(spec().Replicas) * len(spec().VolumeClaimTemplates))) 702 for _, pvc := range objs { 703 g.Expect(pvc.Spec.Resources.Requests.Storage().Cmp(defaultVolumeQuantity)).Should(Equal(0)) 704 } 705 }).Should(Succeed()) 706 }) 707 708 It("TODO- rollback volume size during expansion - re-create PVC error", func() { 709 }) 710 711 It("TODO- general workload update", func() { 712 Expect(updateComponent()).Should(Succeed()) 713 }) 714 715 It("TODO- KB system images update", func() { 716 Expect(updateComponent()).Should(Succeed()) 717 }) 718 719 It("TODO- update strategy", func() { 720 Expect(updateComponent()).Should(Succeed()) 721 }) 722 }) 723 724 Context("status component", func() { 725 BeforeEach(func() { 726 setup() 727 728 Expect(createComponent()).Should(Succeed()) 729 730 // create all PVCs ann PVs 731 createPVCs() 732 733 // create sts and all Pods 734 createWorkloads() 735 }) 736 737 It("provisioning", func() { 738 By("check component status as CREATING") 739 Expect(statusComponent()).Should(Succeed()) 740 Expect(status().Phase).Should(Equal(appsv1alpha1.CreatingClusterCompPhase)) 741 }) 742 743 It("deleting", func() { 744 By("delete underlying workload w/o removing finalizers") 745 Expect(testCtx.Cli.Delete(testCtx.Ctx, rsm())).Should(Succeed()) 746 747 By("check component status as DELETING") 748 Expect(statusComponent()).Should(Succeed()) 749 Expect(status().Phase).Should(Equal(appsv1alpha1.DeletingClusterCompPhase)) 750 }) 751 752 It("running", func() { 753 By("mock workloads ready") 754 mockWorkloadsReady() 755 756 By("check component status as RUNNING") 757 Expect(statusComponent()).Should(Succeed()) 758 Expect(status().Phase).Should(Equal(appsv1alpha1.RunningClusterCompPhase)) 759 }) 760 761 It("updating", func() { 762 By("mock workloads ready and component status as RUNNING") 763 mockWorkloadsReady() 764 Expect(statusComponent()).Should(Succeed()) 765 766 By("mock workloads updating") 767 mockWorkloadsUpdating() 768 769 By("check component status as UPDATING") 770 Expect(statusComponent()).Should(Succeed()) 771 Expect(status().Phase).Should(Equal(appsv1alpha1.UpdatingClusterCompPhase)) 772 }) 773 774 It("stopping", func() { 775 By("set replicas as 0 (stop or scale-in to 0)") 776 replicas := spec().Replicas 777 spec().Replicas = 0 778 Expect(updateComponent()).Should(Succeed()) 779 780 By("wait and check the workload updated") 781 Eventually(testapps.CheckObj(&testCtx, rsmKey(), func(g Gomega, rsm *workloads.ReplicatedStateMachine) { 782 g.Expect(rsm.GetGeneration()).Should(Equal(int64(2))) 783 g.Expect(*rsm.Spec.Replicas).Should(Equal(spec().Replicas)) 784 })).Should(Succeed()) 785 Eventually(testapps.List(&testCtx, generics.PodSignature, labels())).Should(HaveLen(int(replicas))) 786 787 By("check component status as STOPPING") 788 Expect(statusComponent()).Should(Succeed()) 789 Expect(status().Phase).Should(Equal(appsv1alpha1.StoppingClusterCompPhase)) 790 }) 791 792 It("stopped", func() { 793 By("set replicas as 0 (stop or scale-in to 0)") 794 replicas := spec().Replicas 795 spec().Replicas = 0 796 Expect(updateComponent()).Should(Succeed()) 797 798 By("wait and check the workload updated") 799 Eventually(testapps.CheckObj(&testCtx, rsmKey(), func(g Gomega, rsm *workloads.ReplicatedStateMachine) { 800 g.Expect(rsm.GetGeneration()).Should(Equal(int64(2))) 801 g.Expect(*rsm.Spec.Replicas).Should(Equal(spec().Replicas)) 802 })).Should(Succeed()) 803 Eventually(testapps.List(&testCtx, generics.PodSignature, labels())).Should(HaveLen(int(replicas))) 804 805 By("delete all pods") 806 inNS := client.InNamespace(testCtx.DefaultNamespace) 807 ml := client.HasLabels{testCtx.TestObjLabelKey} 808 testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PodSignature, true, inNS, ml) 809 Eventually(testapps.List(&testCtx, generics.PodSignature, labels())).Should(HaveLen(0)) 810 811 By("check component status as STOPPED") 812 Expect(statusComponent()).Should(Succeed()) 813 Expect(status().Phase).Should(Equal(appsv1alpha1.StoppedClusterCompPhase)) 814 }) 815 816 It("failure at provisioning", func() { 817 By("mock workloads ready, but keep status phase as CREATING") 818 mockWorkloadsReady() 819 820 By("mock workloads abnormal") 821 mockWorkloadsAbnormal() 822 823 By("check component status as CREATING") 824 Expect(statusComponent()).Should(Succeed()) 825 // TODO: expect the status phase as CREATING 826 // Expect(status().Phase).Should(Equal(appsv1alpha1.CreatingClusterCompPhase)) 827 Expect(status().Phase).Should(Equal(appsv1alpha1.AbnormalClusterCompPhase)) 828 }) 829 830 It("failure at deleting", func() { 831 By("mock workloads ready and component status as RUNNING") 832 mockWorkloadsReady() 833 Expect(statusComponent()).Should(Succeed()) 834 835 By("mock workloads deleting and component status as DELETING") 836 Expect(testCtx.Cli.Delete(testCtx.Ctx, rsm())).Should(Succeed()) 837 Expect(statusComponent()).Should(Succeed()) 838 839 By("mock workloads abnormal") 840 mockWorkloadsAbnormal() 841 842 By("check component status as DELETING") 843 Expect(statusComponent()).Should(Succeed()) 844 Expect(status().Phase).Should(Equal(appsv1alpha1.DeletingClusterCompPhase)) 845 }) 846 847 It("failure at running - abnormal", func() { 848 By("mock workloads ready and component status as RUNNING") 849 mockWorkloadsReady() 850 Expect(statusComponent()).Should(Succeed()) 851 852 By("mock workloads abnormal") 853 mockWorkloadsAbnormal() 854 855 By("check component status as ABNORMAL") 856 Expect(statusComponent()).Should(Succeed()) 857 Expect(status().Phase).Should(Equal(appsv1alpha1.AbnormalClusterCompPhase)) 858 }) 859 860 It("failure at running - failed", func() { 861 By("mock workloads ready and component status as RUNNING") 862 mockWorkloadsReady() 863 Expect(statusComponent()).Should(Succeed()) 864 865 By("mock workloads failed") 866 mockWorkloadsFailed() 867 868 By("check component status as FAILED") 869 Expect(statusComponent()).Should(Succeed()) 870 Expect(status().Phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) 871 }) 872 873 It("failure at updating - abnormal", func() { 874 By("mock workloads ready and component status as RUNNING") 875 mockWorkloadsReady() 876 Expect(statusComponent()).Should(Succeed()) 877 878 By("mock workloads updating") 879 mockWorkloadsUpdating() 880 Expect(statusComponent()).Should(Succeed()) 881 882 By("mock workloads abnormal") 883 mockWorkloadsAbnormal() 884 885 By("check component status as ABNORMAL") 886 Expect(statusComponent()).Should(Succeed()) 887 Expect(status().Phase).Should(Equal(appsv1alpha1.AbnormalClusterCompPhase)) 888 }) 889 890 It("failure at updating - failed", func() { 891 By("mock workloads ready and component status as RUNNING") 892 mockWorkloadsReady() 893 Expect(statusComponent()).Should(Succeed()) 894 895 By("mock workloads updating") 896 mockWorkloadsUpdating() 897 Expect(statusComponent()).Should(Succeed()) 898 899 By("mock workloads failed") 900 mockWorkloadsFailed() 901 902 By("check component status as FAILED") 903 Expect(statusComponent()).Should(Succeed()) 904 Expect(status().Phase).Should(Equal(appsv1alpha1.FailedClusterCompPhase)) 905 }) 906 }) 907 })