github.com/1aal/kubeblocks@v0.0.0-20231107070852-e1c03e598921/controllers/apps/systemaccount_controller.go (about) 1 /* 2 Copyright (C) 2022-2023 ApeCloud Co., Ltd 3 4 This file is part of KubeBlocks project 5 6 This program is free software: you can redistribute it and/or modify 7 it under the terms of the GNU Affero General Public License as published by 8 the Free Software Foundation, either version 3 of the License, or 9 (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU Affero General Public License for more details. 15 16 You should have received a copy of the GNU Affero General Public License 17 along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 package apps 21 22 import ( 23 "context" 24 "fmt" 25 "strings" 26 27 "github.com/go-logr/logr" 28 "github.com/pkg/errors" 29 batchv1 "k8s.io/api/batch/v1" 30 corev1 "k8s.io/api/core/v1" 31 "k8s.io/apimachinery/pkg/runtime" 32 "k8s.io/apimachinery/pkg/types" 33 "k8s.io/apimachinery/pkg/util/rand" 34 "k8s.io/client-go/tools/record" 35 "k8s.io/client-go/util/workqueue" 36 ctrl "sigs.k8s.io/controller-runtime" 37 "sigs.k8s.io/controller-runtime/pkg/client" 38 "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 39 "sigs.k8s.io/controller-runtime/pkg/event" 40 "sigs.k8s.io/controller-runtime/pkg/handler" 41 "sigs.k8s.io/controller-runtime/pkg/log" 42 43 appsv1alpha1 "github.com/1aal/kubeblocks/apis/apps/v1alpha1" 44 opsutil "github.com/1aal/kubeblocks/controllers/apps/operations/util" 45 "github.com/1aal/kubeblocks/pkg/constant" 46 intctrlutil "github.com/1aal/kubeblocks/pkg/controllerutil" 47 lorry "github.com/1aal/kubeblocks/pkg/lorry/client" 48 viper "github.com/1aal/kubeblocks/pkg/viperx" 49 ) 50 51 // SystemAccountReconciler reconciles a SystemAccount object. 52 type SystemAccountReconciler struct { 53 client.Client 54 Scheme *runtime.Scheme 55 Recorder record.EventRecorder 56 } 57 58 // componentUniqueKey is used internally to uniquely identify a component, by namespace-clusterName-componentName. 59 type componentUniqueKey struct { 60 namespace string 61 clusterName string 62 componentName string 63 characterType string 64 } 65 66 // updateStrategy is used to specify the update strategy for a component. 67 type updateStrategy int8 68 69 const ( 70 inPlaceUpdate updateStrategy = 1 71 reCreate updateStrategy = 2 72 ) 73 74 // SysAccountDeletion and SysAccountCreation are used as event reasons. 75 const ( 76 SysAcctDelete = "SysAcctDelete" 77 SysAcctCreate = "SysAcctCreate" 78 SysAcctUnsupported = "SysAcctUnsupported" 79 ) 80 81 // Environment names for cmd config connections 82 const ( 83 kbAccountStmtEnvName = "KB_ACCOUNT_STATEMENT" 84 kbAccountEndPointEnvName = "KB_ACCOUNT_ENDPOINT" 85 ) 86 87 // ENABLE_DEBUG_SYSACCOUNTS is used for debug only. 88 const ( 89 systemAccountsDebugMode string = "ENABLE_DEBUG_SYSACCOUNTS" 90 systemAccountPasswdAnnotation string = "passwd" 91 systemAccountjobPrefix = "sysacc" 92 ) 93 94 var ( 95 // systemAccountLog is a logger during runtime 96 systemAccountLog logr.Logger 97 ) 98 99 func init() { 100 viper.SetDefault(systemAccountsDebugMode, false) 101 systemAccountLog = log.Log.WithName("systemAccountRuntime") 102 } 103 104 // SystemAccountController does not have a custom resource, but wathes the create/delete/update of resource like cluster, 105 // clusterdefinition, backuppolicy, jobs, secrets 106 // +kubebuilder:rbac:groups=apps.kubeblocks.io,resources=clusters,verbs=get;list;watch; 107 // +kubebuilder:rbac:groups=apps.kubeblocks.io,resources=clusters/status,verbs=get 108 // +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete 109 // +kubebuilder:rbac:groups=batch,resources=jobs/status,verbs=get 110 // +kubebuilder:rbac:groups=batch,resources=jobs/finalizers,verbs=update 111 // +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete 112 // +kubebuilder:rbac:groups=core,resources=secrets/finalizers,verbs=update 113 114 // Reconcile is part of the main kubernetes reconciliation loop which aims to 115 // move the current state of the cluster closer to the desired state. 116 // TODO(user): Modify the Reconcile function to compare the state specified by 117 // the SystemAccount object against the actual cluster state, and then 118 // perform operations to make the cluster state reflect the state specified by 119 // the user. 120 // 121 // For more details, check Reconcile and its Result here: 122 // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.0/pkg/reconcile 123 func (r *SystemAccountReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 124 reqCtx := intctrlutil.RequestCtx{ 125 Ctx: ctx, 126 Req: req, 127 Log: log.FromContext(ctx).WithValues("cluster", req.NamespacedName), 128 Recorder: r.Recorder, 129 } 130 reqCtx.Log.V(1).Info("reconcile", "cluster", req.NamespacedName) 131 132 cluster := &appsv1alpha1.Cluster{} 133 if err := r.Client.Get(reqCtx.Ctx, reqCtx.Req.NamespacedName, cluster); err != nil { 134 return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") 135 } 136 // cluster is under deletion, do nothing 137 if !cluster.GetDeletionTimestamp().IsZero() { 138 reqCtx.Log.V(1).Info("Cluster is under deletion.", "cluster", req.NamespacedName) 139 // get sysaccount jobs for this cluster and delete them 140 jobs := &batchv1.JobList{} 141 options := client.ListOptions{} 142 143 client.InNamespace(reqCtx.Req.Namespace).ApplyToList(&options) 144 client.MatchingLabels{constant.AppInstanceLabelKey: reqCtx.Req.Name}.ApplyToList(&options) 145 client.HasLabels{constant.ClusterAccountLabelKey}.ApplyToList(&options) 146 147 if err := r.Client.List(reqCtx.Ctx, jobs, &options); err != nil { 148 return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") 149 } 150 151 for _, job := range jobs.Items { 152 patch := client.MergeFrom(job.DeepCopy()) 153 controllerutil.RemoveFinalizer(&job, constant.DBClusterFinalizerName) 154 _ = r.Client.Patch(context.Background(), &job, patch) 155 } 156 return intctrlutil.Reconciled() 157 } 158 159 // wait till the cluster is running 160 if cluster.Status.Phase != appsv1alpha1.RunningClusterPhase { 161 reqCtx.Log.V(1).Info("Cluster is not ready yet", "cluster", req.NamespacedName) 162 return intctrlutil.Reconciled() 163 } 164 165 clusterdefinition := &appsv1alpha1.ClusterDefinition{} 166 clusterDefNS := types.NamespacedName{Name: cluster.Spec.ClusterDefRef} 167 if err := r.Client.Get(reqCtx.Ctx, clusterDefNS, clusterdefinition); err != nil { 168 return intctrlutil.RequeueWithErrorAndRecordEvent(cluster, r.Recorder, err, reqCtx.Log) 169 } 170 171 clusterVersion := &appsv1alpha1.ClusterVersion{} 172 if err := r.Client.Get(reqCtx.Ctx, types.NamespacedName{Name: cluster.Spec.ClusterVersionRef}, clusterVersion); err != nil { 173 return intctrlutil.RequeueWithErrorAndRecordEvent(cluster, r.Recorder, err, reqCtx.Log) 174 } 175 176 componentVersions := clusterVersion.Spec.GetDefNameMappingComponents() 177 178 // process accounts for each component 179 processAccountsForComponent := func(compDef *appsv1alpha1.ClusterComponentDefinition, compDecl *appsv1alpha1.ClusterComponentSpec, 180 svcEP *corev1.Endpoints, headlessEP *corev1.Endpoints) error { 181 var ( 182 err error 183 toCreate appsv1alpha1.KBAccountType 184 detectedK8SFacts appsv1alpha1.KBAccountType 185 detectedEngineFacts appsv1alpha1.KBAccountType 186 engine *customizedEngine 187 compKey = componentUniqueKey{ 188 namespace: cluster.Namespace, 189 clusterName: cluster.Name, 190 componentName: compDecl.Name, 191 characterType: compDef.CharacterType, 192 } 193 ) 194 195 // expectations: collect accounts from default setting, cluster and cluster definition. 196 toCreate = getDefaultAccounts() 197 reqCtx.Log.V(1).Info("accounts to create", "cluster", req.NamespacedName, "accounts", toCreate) 198 199 // facts: accounts have been created, in form of k8s secrets. 200 if detectedK8SFacts, err = r.getAccountFacts(reqCtx, compKey); err != nil { 201 reqCtx.Log.Error(err, "failed to get secrets") 202 return err 203 } 204 reqCtx.Log.V(1).Info("detected k8s facts", "cluster", req.NamespacedName, "accounts", detectedK8SFacts) 205 206 // toCreate = account to create - account exists 207 // (toCreate \intersect detectedEngineFacts) means the set of account exists in engine but not in k8s, and should be updated or altered, not re-created. 208 toCreate &= toCreate ^ detectedK8SFacts 209 if toCreate == 0 { 210 return nil 211 } 212 213 // facts: accounts have been created in engine. 214 if detectedEngineFacts, err = r.getEngineFacts(reqCtx, compKey); err != nil { 215 reqCtx.Log.Error(err, "failed to get accounts", "cluster", cluster.Name, "component", compDecl.Name) 216 // we don't return error here, because we can still create accounts in k8s and will give it a try. 217 } 218 reqCtx.Log.V(1).Info("detected database facts", "cluster", req.NamespacedName, "accounts", detectedEngineFacts) 219 220 // replace KubeBlocks ENVs. 221 replaceEnvsValues(cluster.Name, compDef.SystemAccounts) 222 223 for _, account := range compDef.SystemAccounts.Accounts { 224 accountID := account.Name.GetAccountID() 225 if toCreate&accountID == 0 { 226 continue 227 } 228 229 strategy := reCreate 230 if detectedEngineFacts&accountID != 0 { 231 strategy = inPlaceUpdate 232 } 233 234 switch account.ProvisionPolicy.Type { 235 case appsv1alpha1.CreateByStmt: 236 if engine == nil { 237 execConfig := compDef.SystemAccounts.CmdExecutorConfig 238 // complete execConfig with settings from component version 239 completeExecConfig(execConfig, componentVersions[compDef.Name]) 240 engine = newCustomizedEngine(execConfig, cluster, compDecl.Name) 241 } 242 reqCtx.Log.V(1).Info("create account by stmt", "cluster", req.NamespacedName, "account", account.Name, "strategy", strategy) 243 if err := r.createByStmt(reqCtx, cluster, compDef, compKey, engine, account, svcEP, headlessEP, strategy); err != nil { 244 return err 245 } 246 case appsv1alpha1.ReferToExisting: 247 if err := r.createByReferringToExisting(reqCtx, cluster, compKey, account); err != nil { 248 return err 249 } 250 } 251 } 252 return nil 253 } // end of processAccountForComponent 254 255 reconcileCounter := 0 256 existsOps := existsOperations(cluster) 257 // for each component in the cluster 258 for _, compDecl := range cluster.Spec.ComponentSpecs { 259 compName := compDecl.Name 260 compType := compDecl.ComponentDefRef 261 for _, compDef := range clusterdefinition.Spec.ComponentDefs { 262 if compType != compDef.Name || compDef.SystemAccounts == nil { 263 continue 264 } 265 266 isReady, svcEP, headlessEP, err := r.isComponentReady(reqCtx, cluster.Name, compName) 267 if err != nil { 268 return intctrlutil.RequeueAfter(requeueDuration, reqCtx.Log, "failed to get service") 269 } 270 271 // either service or endpoint is not ready, increase counter and continue to process next component 272 if !isReady || existsOps { 273 reconcileCounter++ 274 continue 275 } 276 277 if err := processAccountsForComponent(&compDef, &compDecl, svcEP, headlessEP); err != nil { 278 reconcileCounter++ 279 continue 280 } 281 } 282 } 283 284 if reconcileCounter > 0 { 285 return intctrlutil.Requeue(reqCtx.Log, "Not all components have been reconciled. Requeue request.") 286 } 287 return ctrl.Result{}, nil 288 } 289 290 // SetupWithManager sets up the controller with the Manager. 291 func (r *SystemAccountReconciler) SetupWithManager(mgr ctrl.Manager) error { 292 return ctrl.NewControllerManagedBy(mgr). 293 For(&appsv1alpha1.Cluster{}). 294 Owns(&corev1.Secret{}). 295 Watches(&batchv1.Job{}, r.jobCompletionHandler()). 296 Complete(r) 297 } 298 299 func (r *SystemAccountReconciler) createByStmt(reqCtx intctrlutil.RequestCtx, 300 cluster *appsv1alpha1.Cluster, 301 compDef *appsv1alpha1.ClusterComponentDefinition, 302 compKey componentUniqueKey, 303 engine *customizedEngine, 304 account appsv1alpha1.SystemAccountConfig, 305 svcEP *corev1.Endpoints, headlessEP *corev1.Endpoints, strategy updateStrategy) error { 306 policy := account.ProvisionPolicy 307 308 generateJobName := func() string { 309 // render a job object, named after account name 310 randSuffix := rand.String(5) 311 fullJobName := strings.Join([]string{systemAccountjobPrefix, compKey.clusterName, compKey.componentName, string(account.Name), randSuffix}, "-") 312 if len(fullJobName) > 63 { 313 return systemAccountjobPrefix + "-" + string(account.Name) + "-" + randSuffix 314 } else { 315 return fullJobName 316 } 317 } 318 319 stmts, passwd := getCreationStmtForAccount(compKey, compDef.SystemAccounts.PasswordConfig, account, strategy) 320 321 for _, ep := range retrieveEndpoints(policy.Scope, svcEP, headlessEP) { 322 job := renderJob(generateJobName(), engine, compKey, stmts, ep) 323 controllerutil.AddFinalizer(job, constant.DBClusterFinalizerName) 324 if job.Annotations == nil { 325 job.Annotations = map[string]string{} 326 } 327 job.Annotations[systemAccountPasswdAnnotation] = passwd 328 329 // before creating job, we adjust job's attributes, such as labels, tolerations w.r.t cluster info. 330 if err := calibrateJobMetaAndSpec(job, cluster, compKey, account.Name); err != nil { 331 return err 332 } 333 // update owner reference 334 if err := controllerutil.SetControllerReference(cluster, job, r.Scheme); err != nil { 335 return err 336 } 337 // create job 338 if err := r.Client.Create(reqCtx.Ctx, job); err != nil { 339 return err 340 } 341 reqCtx.Log.V(1).Info("created job", "job", job.Name, "passwd", passwd) 342 } 343 return nil 344 } 345 346 func (r *SystemAccountReconciler) createByReferringToExisting(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, key componentUniqueKey, account appsv1alpha1.SystemAccountConfig) error { 347 // get secret 348 secret := &corev1.Secret{} 349 secretRef := account.ProvisionPolicy.SecretRef 350 if err := r.Client.Get(reqCtx.Ctx, types.NamespacedName{Namespace: secretRef.Namespace, Name: secretRef.Name}, secret); err != nil { 351 reqCtx.Log.Error(err, "Failed to find secret", "secret", secretRef.Name) 352 return err 353 } 354 // and make a copy of it 355 newSecret := renderSecretByCopy(key, (string)(account.Name), secret) 356 if uprefErr := controllerutil.SetControllerReference(cluster, newSecret, r.Scheme); uprefErr != nil { 357 return uprefErr 358 } 359 360 if err := r.Client.Create(reqCtx.Ctx, newSecret); err != nil { 361 reqCtx.Log.Error(err, "Failed to find secret", "secret", newSecret.Name) 362 return err 363 } 364 return nil 365 } 366 367 func (r *SystemAccountReconciler) isComponentReady(reqCtx intctrlutil.RequestCtx, clusterName string, compName string) (bool, *corev1.Endpoints, *corev1.Endpoints, error) { 368 svcEP := &corev1.Endpoints{} 369 serviceName := clusterName + "-" + compName 370 371 headlessEP := &corev1.Endpoints{} 372 headlessSvcName := serviceName + "-headless" 373 374 svcErr := r.Client.Get(reqCtx.Ctx, types.NamespacedName{Namespace: reqCtx.Req.Namespace, Name: serviceName}, svcEP) 375 if svcErr != nil { 376 return false, nil, nil, svcErr 377 } 378 379 headlessSvcErr := r.Client.Get(reqCtx.Ctx, types.NamespacedName{Namespace: reqCtx.Req.Namespace, Name: headlessSvcName}, headlessEP) 380 if headlessSvcErr != nil { 381 return false, nil, nil, headlessSvcErr 382 } 383 // Neither service nor endpoints is ready. 384 if len(svcEP.Subsets) == 0 || len(headlessEP.Subsets) == 0 { 385 return false, nil, nil, nil 386 } 387 388 // make sure address exists 389 if len(svcEP.Subsets[0].Addresses) == 0 || len(headlessEP.Subsets[0].Addresses) == 0 { 390 return false, nil, nil, nil 391 } 392 393 return true, svcEP, headlessEP, nil 394 } 395 396 // getAccountFacts parses secrets for given cluster as facts, i.e., accounts created 397 // TODO: @shanshan, should verify accounts on database cluster as well. 398 func (r *SystemAccountReconciler) getAccountFacts(reqCtx intctrlutil.RequestCtx, key componentUniqueKey) (appsv1alpha1.KBAccountType, error) { 399 // get account facts, i.e., secrets created 400 ml := getLabelsForSecretsAndJobs(key) 401 402 secrets := &corev1.SecretList{} 403 if err := r.Client.List(reqCtx.Ctx, secrets, client.InNamespace(key.namespace), ml); err != nil { 404 return appsv1alpha1.KBAccountInvalid, err 405 } 406 407 // get all running jobs 408 jobs := &batchv1.JobList{} 409 if err := r.Client.List(reqCtx.Ctx, jobs, client.InNamespace(key.namespace), ml); err != nil { 410 return appsv1alpha1.KBAccountInvalid, err 411 } 412 413 detectedFacts := getAcctFromSecretAndJobs(secrets, jobs) 414 reqCtx.Log.V(1).Info("Detected account facts", "facts", detectedFacts) 415 return detectedFacts, nil 416 } 417 418 func (r *SystemAccountReconciler) getEngineFacts(reqCtx intctrlutil.RequestCtx, key componentUniqueKey) (appsv1alpha1.KBAccountType, error) { 419 // get pods for this cluster-component, by label 420 ml := getLabelsForSecretsAndJobs(key) 421 pods := &corev1.PodList{} 422 if err := r.Client.List(reqCtx.Ctx, pods, client.InNamespace(key.namespace), ml); err != nil { 423 return appsv1alpha1.KBAccountInvalid, err 424 } 425 if len(pods.Items) == 0 { 426 return appsv1alpha1.KBAccountInvalid, fmt.Errorf("no pods available for cluster: %s, component %s", key.clusterName, key.componentName) 427 } 428 // find the first running pod 429 var target *corev1.Pod 430 for _, pod := range pods.Items { 431 if pod.Status.Phase == corev1.PodRunning { 432 target = &pod 433 } 434 } 435 if target == nil { 436 return appsv1alpha1.KBAccountInvalid, fmt.Errorf("no pod is running for cluster: %s, component %s", key.clusterName, key.componentName) 437 } 438 439 lorryClient, err := lorry.NewHTTPClientWithPod(target) 440 if err != nil { 441 return appsv1alpha1.KBAccountInvalid, err 442 } 443 if intctrlutil.IsNil(lorryClient) { 444 return appsv1alpha1.KBAccountInvalid, errors.New("not lorry service") 445 } 446 447 accounts, err := lorryClient.ListSystemAccounts(reqCtx.Ctx) 448 if err != nil { 449 return appsv1alpha1.KBAccountInvalid, err 450 } 451 accountsID := appsv1alpha1.KBAccountInvalid 452 for _, acc := range accounts { 453 accountName, ok := acc["userName"] 454 if !ok { 455 continue 456 } 457 458 updateFacts(appsv1alpha1.AccountName(accountName.(string)), &accountsID) 459 } 460 return accountsID, nil 461 } 462 463 func (r *SystemAccountReconciler) jobCompletionHandler() *handler.Funcs { 464 logger := systemAccountLog.WithName("jobCompletionHandler") 465 466 containsJobCondition := func(job batchv1.Job, jobConditions []batchv1.JobCondition, 467 jobCondType batchv1.JobConditionType, jobCondStatus corev1.ConditionStatus) bool { 468 for _, jobCond := range job.Status.Conditions { 469 if jobCond.Type == jobCondType && jobCond.Status == jobCondStatus { 470 return true 471 } 472 } 473 return false 474 } 475 476 // check against a job to make sure it 477 // 1. works for sysaccount (by checking labels) 478 // 2. has completed (either successed or failed) 479 // 3. is under deletion (either by user or by TTL, where deletionTimestamp is set) 480 return &handler.Funcs{ 481 UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { 482 var ( 483 jobTerminated = false 484 job *batchv1.Job 485 ok bool 486 ) 487 488 defer func() { 489 // prepare a patch by removing finalizer 490 if jobTerminated { 491 patch := client.MergeFrom(job.DeepCopy()) 492 controllerutil.RemoveFinalizer(job, constant.DBClusterFinalizerName) 493 _ = r.Client.Patch(context.Background(), job, patch) 494 } 495 }() 496 497 if e.ObjectNew == nil { 498 return 499 } 500 501 if job, ok = e.ObjectNew.(*batchv1.Job); !ok { 502 return 503 } 504 505 if job.Annotations == nil || job.Labels == nil { 506 return 507 } 508 509 accountName := job.Labels[constant.ClusterAccountLabelKey] 510 clusterName := job.Labels[constant.AppInstanceLabelKey] 511 componentName := job.Labels[constant.KBAppComponentLabelKey] 512 513 // filter out jobs that are not for system account 514 if len(accountName) == 0 || len(clusterName) == 0 || len(componentName) == 0 { 515 return 516 } 517 // filter out jobs that have not reached completion (either completed or failed) or have been handled 518 if !containsJobCondition(*job, job.Status.Conditions, batchv1.JobFailed, corev1.ConditionTrue) && 519 !containsJobCondition(*job, job.Status.Conditions, batchv1.JobComplete, corev1.ConditionTrue) || 520 !controllerutil.ContainsFinalizer(job, constant.DBClusterFinalizerName) { 521 return 522 } 523 524 jobTerminated = true 525 clusterKey := types.NamespacedName{Namespace: job.Namespace, Name: clusterName} 526 cluster := &appsv1alpha1.Cluster{} 527 if err := r.Client.Get(context.TODO(), clusterKey, cluster); err != nil { 528 logger.Error(err, "failed to get cluster", "cluster key", clusterKey) 529 return 530 } 531 532 if containsJobCondition(*job, job.Status.Conditions, batchv1.JobFailed, corev1.ConditionTrue) { 533 logger.V(1).Info("job failed", "job", job.Name) 534 r.Recorder.Eventf(cluster, corev1.EventTypeNormal, SysAcctCreate, 535 "Failed to create accounts for cluster: %s, component: %s, accounts: %s", cluster.Name, componentName, accountName) 536 return 537 } 538 539 compKey := componentUniqueKey{ 540 namespace: job.Namespace, 541 clusterName: clusterName, 542 componentName: componentName, 543 } 544 // get password from job 545 passwd := job.Annotations[systemAccountPasswdAnnotation] 546 secret := renderSecretWithPwd(compKey, accountName, passwd) 547 if err := controllerutil.SetControllerReference(cluster, secret, r.Scheme); err != nil { 548 logger.Error(err, "failed to set ownere reference for secret", "secret", secret.Name) 549 return 550 } 551 552 if err := r.Client.Create(context.TODO(), secret); err != nil { 553 logger.Error(err, "failed to create secret", "secret", secret.Name) 554 return 555 } 556 557 r.Recorder.Eventf(cluster, corev1.EventTypeNormal, SysAcctCreate, 558 "Created accounts for cluster: %s, component: %s, accounts: %s", cluster.Name, componentName, accountName) 559 }, 560 } 561 } 562 563 // existsOperations checks if the cluster is doing operations 564 func existsOperations(cluster *appsv1alpha1.Cluster) bool { 565 opsRequestMap, _ := opsutil.GetOpsRequestSliceFromCluster(cluster) 566 _, isRestoring := cluster.Annotations[constant.RestoreFromBackupAnnotationKey] 567 return len(opsRequestMap) > 0 || isRestoring 568 }