github.com/percona/percona-xtradb-cluster-operator@v1.14.0/pkg/controller/pxc/upgrade.go (about) 1 package pxc 2 3 import ( 4 "context" 5 "crypto/md5" 6 "encoding/json" 7 "fmt" 8 "sort" 9 "strconv" 10 "strings" 11 "time" 12 13 "github.com/pkg/errors" 14 15 appsv1 "k8s.io/api/apps/v1" 16 corev1 "k8s.io/api/core/v1" 17 k8serrors "k8s.io/apimachinery/pkg/api/errors" 18 "k8s.io/apimachinery/pkg/labels" 19 "k8s.io/apimachinery/pkg/types" 20 "sigs.k8s.io/controller-runtime/pkg/client" 21 logf "sigs.k8s.io/controller-runtime/pkg/log" 22 23 api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" 24 "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc" 25 "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/queries" 26 "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users" 27 ) 28 29 func (r *ReconcilePerconaXtraDBCluster) updatePod(ctx context.Context, sfs api.StatefulApp, podSpec *api.PodSpec, cr *api.PerconaXtraDBCluster, initContainers []corev1.Container) error { 30 log := logf.FromContext(ctx) 31 32 if cr.PVCResizeInProgress() { 33 log.V(1).Info("PVC resize in progress, skipping statefulset", "sfs", sfs.Name()) 34 return nil 35 } 36 37 currentSet := sfs.StatefulSet() 38 newAnnotations := currentSet.Spec.Template.Annotations // need this step to save all new annotations that was set to currentSet in this reconcile loop 39 err := r.client.Get(ctx, types.NamespacedName{Name: currentSet.Name, Namespace: currentSet.Namespace}, currentSet) 40 if err != nil { 41 return errors.Wrap(err, "failed to get statefulset") 42 } 43 44 currentSet.Spec.UpdateStrategy = sfs.UpdateStrategy(cr) 45 46 // support annotation adjustements 47 pxc.MergeTemplateAnnotations(currentSet, podSpec.Annotations) 48 49 // change the pod size 50 currentSet.Spec.Replicas = &podSpec.Size 51 currentSet.Spec.Template.Spec.SecurityContext = podSpec.PodSecurityContext 52 currentSet.Spec.Template.Spec.ImagePullSecrets = podSpec.ImagePullSecrets 53 54 if currentSet.Spec.Template.Labels == nil { 55 currentSet.Spec.Template.Labels = make(map[string]string) 56 } 57 58 for k, v := range podSpec.Labels { 59 currentSet.Spec.Template.Labels[k] = v 60 } 61 62 err = r.reconcileConfigMap(cr) 63 if err != nil { 64 return errors.Wrap(err, "upgradePod/updateApp error: update db config error") 65 } 66 67 // embed DB configuration hash 68 // TODO: code duplication with deploy function 69 configHash, err := r.getConfigHash(cr, sfs) 70 if err != nil { 71 return errors.Wrap(err, "getting config hash") 72 } 73 74 if currentSet.Spec.Template.Annotations == nil { 75 currentSet.Spec.Template.Annotations = make(map[string]string) 76 } 77 78 pxc.MergeTemplateAnnotations(currentSet, newAnnotations) 79 80 if cr.CompareVersionWith("1.1.0") >= 0 { 81 currentSet.Spec.Template.Annotations["percona.com/configuration-hash"] = configHash 82 } 83 if cr.CompareVersionWith("1.5.0") >= 0 { 84 currentSet.Spec.Template.Spec.ServiceAccountName = podSpec.ServiceAccountName 85 } 86 87 // change TLS secret configuration 88 sslHash, err := r.getSecretHash(cr, cr.Spec.PXC.SSLSecretName, cr.Spec.AllowUnsafeConfig) 89 if err != nil { 90 return errors.Wrap(err, "upgradePod/updateApp error: update secret error") 91 } 92 if sslHash != "" && cr.CompareVersionWith("1.1.0") >= 0 { 93 currentSet.Spec.Template.Annotations["percona.com/ssl-hash"] = sslHash 94 } 95 96 sslInternalHash, err := r.getSecretHash(cr, cr.Spec.PXC.SSLInternalSecretName, cr.Spec.AllowUnsafeConfig) 97 if err != nil && !k8serrors.IsNotFound(err) { 98 return errors.Wrap(err, "upgradePod/updateApp error: update secret error") 99 } 100 if sslInternalHash != "" && cr.CompareVersionWith("1.1.0") >= 0 { 101 currentSet.Spec.Template.Annotations["percona.com/ssl-internal-hash"] = sslInternalHash 102 } 103 104 vaultConfigHash, err := r.getSecretHash(cr, cr.Spec.VaultSecretName, true) 105 if err != nil { 106 return errors.Wrap(err, "upgradePod/updateApp error: update secret error") 107 } 108 if vaultConfigHash != "" && cr.CompareVersionWith("1.6.0") >= 0 && !isHAproxy(sfs) { 109 currentSet.Spec.Template.Annotations["percona.com/vault-config-hash"] = vaultConfigHash 110 } 111 112 if cr.CompareVersionWith("1.9.0") >= 0 { 113 envVarsHash, err := r.getSecretHash(cr, cr.Spec.PXC.EnvVarsSecretName, true) 114 if isHAproxy(sfs) { 115 envVarsHash, err = r.getSecretHash(cr, cr.Spec.HAProxy.EnvVarsSecretName, true) 116 } else if isProxySQL(sfs) { 117 envVarsHash, err = r.getSecretHash(cr, cr.Spec.ProxySQL.EnvVarsSecretName, true) 118 } 119 if err != nil { 120 return errors.Wrap(err, "upgradePod/updateApp error: update secret error") 121 } 122 if envVarsHash != "" { 123 currentSet.Spec.Template.Annotations["percona.com/env-secret-config-hash"] = envVarsHash 124 } 125 } 126 127 if isHAproxy(sfs) && cr.CompareVersionWith("1.6.0") >= 0 { 128 delete(currentSet.Spec.Template.Annotations, "percona.com/ssl-internal-hash") 129 delete(currentSet.Spec.Template.Annotations, "percona.com/ssl-hash") 130 } 131 132 var newContainers []corev1.Container 133 var newInitContainers []corev1.Container 134 135 secretsName := cr.Spec.SecretsName 136 if cr.CompareVersionWith("1.6.0") >= 0 { 137 secretsName = "internal-" + cr.Name 138 } 139 140 secret := new(corev1.Secret) 141 err = r.client.Get(ctx, types.NamespacedName{ 142 Name: secretsName, Namespace: cr.Namespace, 143 }, secret) 144 if client.IgnoreNotFound(err) != nil { 145 return errors.Wrap(err, "get internal secret") 146 } 147 // pmm container 148 if cr.Spec.PMM != nil && cr.Spec.PMM.IsEnabled(secret) { 149 pmmC, err := sfs.PMMContainer(ctx, r.client, cr.Spec.PMM, secret, cr) 150 if err != nil { 151 return errors.Wrap(err, "pmm container error") 152 } 153 if pmmC != nil { 154 newContainers = append(newContainers, *pmmC) 155 } 156 } 157 158 // log-collector container 159 if cr.Spec.LogCollector != nil && cr.Spec.LogCollector.Enabled && cr.CompareVersionWith("1.7.0") >= 0 { 160 logCollectorC, err := sfs.LogCollectorContainer(cr.Spec.LogCollector, cr.Spec.LogCollectorSecretName, secretsName, cr) 161 if err != nil { 162 return errors.Wrap(err, "logcollector container error") 163 } 164 if logCollectorC != nil { 165 newContainers = append(newContainers, logCollectorC...) 166 } 167 } 168 169 // volumes 170 sfsVolume, err := sfs.Volumes(podSpec, cr, r.getConfigVolume) 171 if err != nil { 172 return errors.Wrap(err, "volumes error") 173 } 174 175 // application container 176 appC, err := sfs.AppContainer(podSpec, secretsName, cr, sfsVolume.Volumes) 177 if err != nil { 178 return errors.Wrap(err, "app container error") 179 } 180 181 newContainers = append(newContainers, appC) 182 183 if len(initContainers) > 0 { 184 newInitContainers = append(newInitContainers, initContainers...) 185 } 186 187 if podSpec.ForceUnsafeBootstrap { 188 log.Info("spec.pxc.forceUnsafeBootstrap option is not supported since v1.10") 189 190 if cr.CompareVersionWith("1.10.0") < 0 { 191 ic := appC.DeepCopy() 192 ic.Name = ic.Name + "-init-unsafe" 193 ic.Resources = podSpec.Resources 194 ic.ReadinessProbe = nil 195 ic.LivenessProbe = nil 196 ic.Command = []string{"/var/lib/mysql/unsafe-bootstrap.sh"} 197 newInitContainers = append(newInitContainers, *ic) 198 } 199 } 200 201 // sidecars 202 sideC, err := sfs.SidecarContainers(podSpec, secretsName, cr) 203 if err != nil { 204 return errors.Wrap(err, "sidecar container error") 205 } 206 newContainers = append(newContainers, sideC...) 207 208 newContainers = api.AddSidecarContainers(log, newContainers, podSpec.Sidecars) 209 210 currentSet.Spec.Template.Spec.Containers = newContainers 211 currentSet.Spec.Template.Spec.InitContainers = newInitContainers 212 currentSet.Spec.Template.Spec.Affinity = pxc.PodAffinity(podSpec.Affinity, sfs) 213 currentSet.Spec.Template.Spec.TopologySpreadConstraints = pxc.PodTopologySpreadConstraints(podSpec.TopologySpreadConstraints, sfs.Labels()) 214 if sfsVolume != nil && sfsVolume.Volumes != nil { 215 currentSet.Spec.Template.Spec.Volumes = sfsVolume.Volumes 216 } 217 currentSet.Spec.Template.Spec.Volumes = api.AddSidecarVolumes(log, currentSet.Spec.Template.Spec.Volumes, podSpec.SidecarVolumes) 218 currentSet.Spec.Template.Spec.Tolerations = podSpec.Tolerations 219 err = r.createOrUpdate(cr, currentSet) 220 if err != nil { 221 return errors.Wrap(err, "update error") 222 } 223 224 if cr.Spec.UpdateStrategy != api.SmartUpdateStatefulSetStrategyType { 225 return nil 226 } 227 228 return r.smartUpdate(ctx, sfs, cr) 229 } 230 231 func (r *ReconcilePerconaXtraDBCluster) smartUpdate(ctx context.Context, sfs api.StatefulApp, cr *api.PerconaXtraDBCluster) error { 232 log := logf.FromContext(ctx) 233 234 if !isPXC(sfs) { 235 return nil 236 } 237 238 if cr.Spec.Pause { 239 return nil 240 } 241 242 if cr.HAProxyEnabled() && cr.Status.HAProxy.Status != api.AppStateReady { 243 log.Info("Waiting for HAProxy to be ready before smart update") 244 return nil 245 } 246 247 if cr.ProxySQLEnabled() && cr.Status.ProxySQL.Status != api.AppStateReady { 248 log.Info("Waiting for ProxySQL to be ready before smart update") 249 return nil 250 } 251 252 // sleep to get new sfs revision 253 time.Sleep(time.Second) 254 255 currentSet := sfs.StatefulSet() 256 err := r.client.Get(ctx, types.NamespacedName{ 257 Name: currentSet.Name, 258 Namespace: currentSet.Namespace, 259 }, currentSet) 260 if err != nil { 261 return errors.Wrap(err, "failed to get current sfs") 262 } 263 264 list := corev1.PodList{} 265 if err := r.client.List(ctx, 266 &list, 267 &client.ListOptions{ 268 Namespace: currentSet.Namespace, 269 LabelSelector: labels.SelectorFromSet(sfs.Labels()), 270 }, 271 ); err != nil { 272 return errors.Wrap(err, "get pod list") 273 } 274 statefulSetChanged := false 275 for _, pod := range list.Items { 276 if pod.ObjectMeta.Labels["controller-revision-hash"] != currentSet.Status.UpdateRevision { 277 statefulSetChanged = true 278 break 279 } 280 } 281 if !statefulSetChanged { 282 return nil 283 } 284 285 log.Info("statefulSet was changed, run smart update") 286 287 running, err := r.isBackupRunning(cr) 288 if err != nil { 289 log.Error(err, "can't start 'SmartUpdate'") 290 return nil 291 } 292 if running { 293 log.Info("can't start/continue 'SmartUpdate': backup is running") 294 return nil 295 } 296 297 if currentSet.Status.ReadyReplicas < currentSet.Status.Replicas { 298 log.Info("can't start/continue 'SmartUpdate': waiting for all replicas are ready") 299 return nil 300 } 301 302 primary, err := r.getPrimaryPod(cr) 303 if err != nil { 304 return errors.Wrap(err, "get primary pod") 305 } 306 for _, pod := range list.Items { 307 if pod.Status.PodIP == primary || pod.Name == primary { 308 primary = fmt.Sprintf("%s.%s.%s", pod.Name, currentSet.Name, currentSet.Namespace) 309 break 310 } 311 } 312 313 log.Info("primary pod", "pod name", primary) 314 315 waitLimit := 2 * 60 * 60 // 2 hours 316 if cr.Spec.PXC.LivenessInitialDelaySeconds != nil { 317 waitLimit = int(*cr.Spec.PXC.LivenessInitialDelaySeconds) 318 } 319 320 sort.Slice(list.Items, func(i, j int) bool { 321 return list.Items[i].Name > list.Items[j].Name 322 }) 323 324 var primaryPod corev1.Pod 325 for _, pod := range list.Items { 326 pod := pod 327 if strings.HasPrefix(primary, fmt.Sprintf("%s.%s.%s", pod.Name, currentSet.Name, currentSet.Namespace)) { 328 primaryPod = pod 329 } else { 330 log.Info("apply changes to secondary pod", "pod name", pod.Name) 331 if err := r.applyNWait(ctx, cr, currentSet, &pod, waitLimit); err != nil { 332 return errors.Wrap(err, "failed to apply changes") 333 } 334 } 335 } 336 337 log.Info("apply changes to primary pod", "pod name", primaryPod.Name) 338 if err := r.applyNWait(ctx, cr, currentSet, &primaryPod, waitLimit); err != nil { 339 return errors.Wrap(err, "failed to apply changes") 340 } 341 342 log.Info("smart update finished") 343 344 return nil 345 } 346 347 func (r *ReconcilePerconaXtraDBCluster) applyNWait(ctx context.Context, cr *api.PerconaXtraDBCluster, sfs *appsv1.StatefulSet, pod *corev1.Pod, waitLimit int) error { 348 log := logf.FromContext(ctx) 349 350 if pod.ObjectMeta.Labels["controller-revision-hash"] == sfs.Status.UpdateRevision { 351 log.Info("pod already updated", "pod name", pod.Name) 352 } else { 353 if err := r.client.Delete(ctx, pod); err != nil { 354 return errors.Wrap(err, "failed to delete pod") 355 } 356 } 357 358 orderInSts, err := getPodOrderInSts(sfs.Name, pod.Name) 359 if err != nil { 360 return errors.Errorf("compute pod order err, sfs name: %s, pod name: %s", sfs.Name, pod.Name) 361 } 362 if int32(orderInSts) >= *sfs.Spec.Replicas { 363 log.Info("sfs scaled down, pod will not be started", "sfs", sfs.Name, "pod", pod.Name) 364 return nil 365 } 366 367 if err := r.waitPodRestart(ctx, cr, sfs.Status.UpdateRevision, pod, waitLimit); err != nil { 368 return errors.Wrap(err, "failed to wait pod") 369 } 370 371 if err := r.waitPXCSynced(cr, pod.Name+"."+cr.Name+"-pxc."+cr.Namespace, waitLimit); err != nil { 372 return errors.Wrap(err, "failed to wait pxc sync") 373 } 374 375 if err := r.waitHostgroups(ctx, cr, sfs.Name, pod, waitLimit); err != nil { 376 return errors.Wrap(err, "failed to wait hostgroups status") 377 } 378 379 if err := r.waitUntilOnline(ctx, cr, sfs.Name, pod, waitLimit); err != nil { 380 return errors.Wrap(err, "failed to wait pxc status") 381 } 382 383 return nil 384 } 385 386 func getPodOrderInSts(stsName string, podName string) (int, error) { 387 return strconv.Atoi(podName[len(stsName)+1:]) 388 } 389 390 func (r *ReconcilePerconaXtraDBCluster) waitHostgroups(ctx context.Context, cr *api.PerconaXtraDBCluster, sfsName string, pod *corev1.Pod, waitLimit int) error { 391 if !cr.Spec.ProxySQLEnabled() { 392 return nil 393 } 394 395 database, err := r.connectProxy(cr) 396 if err != nil { 397 return errors.Wrap(err, "failed to get proxySQL db") 398 } 399 400 defer database.Close() 401 402 podNamePrefix := fmt.Sprintf("%s.%s.%s", pod.Name, sfsName, cr.Namespace) 403 404 return retry(time.Second*10, time.Duration(waitLimit)*time.Second, 405 func() (bool, error) { 406 present, err := database.PresentInHostgroups(podNamePrefix) 407 if err != nil && err != queries.ErrNotFound { 408 return false, errors.Wrap(err, "failed to get hostgroup status") 409 } 410 if !present { 411 return false, nil 412 } 413 414 logf.FromContext(ctx).Info("pod present in hostgroups", "pod name", pod.Name) 415 return true, nil 416 }) 417 } 418 419 func (r *ReconcilePerconaXtraDBCluster) waitUntilOnline(ctx context.Context, cr *api.PerconaXtraDBCluster, sfsName string, pod *corev1.Pod, waitLimit int) error { 420 if !cr.Spec.ProxySQLEnabled() { 421 return nil 422 } 423 424 database, err := r.connectProxy(cr) 425 if err != nil { 426 return errors.Wrap(err, "failed to get proxySQL db") 427 } 428 429 defer database.Close() 430 431 podNamePrefix := fmt.Sprintf("%s.%s.%s", pod.Name, sfsName, cr.Namespace) 432 433 return retry(time.Second*10, time.Duration(waitLimit)*time.Second, 434 func() (bool, error) { 435 statuses, err := database.ProxySQLInstanceStatus(podNamePrefix) 436 if err != nil && err != queries.ErrNotFound { 437 return false, errors.Wrap(err, "failed to get status") 438 } 439 440 for _, status := range statuses { 441 if status != "ONLINE" { 442 return false, nil 443 } 444 } 445 446 logf.FromContext(ctx).Info("pod is online", "pod name", pod.Name) 447 return true, nil 448 }) 449 } 450 451 // retry runs func "f" every "in" time until "limit" is reached 452 // it also doesn't have an extra tail wait after the limit is reached 453 // and f func runs first time instantly 454 func retry(in, limit time.Duration, f func() (bool, error)) error { 455 fdone, err := f() 456 if err != nil { 457 return err 458 } 459 if fdone { 460 return nil 461 } 462 463 done := time.NewTimer(limit) 464 defer done.Stop() 465 tk := time.NewTicker(in) 466 defer tk.Stop() 467 468 for { 469 select { 470 case <-done.C: 471 return errors.New("reach pod wait limit") 472 case <-tk.C: 473 fdone, err := f() 474 if err != nil { 475 return err 476 } 477 if fdone { 478 return nil 479 } 480 } 481 } 482 } 483 484 // connectProxy returns a new connection through the proxy (ProxySQL or HAProxy) 485 func (r *ReconcilePerconaXtraDBCluster) connectProxy(cr *api.PerconaXtraDBCluster) (queries.Database, error) { 486 var database queries.Database 487 var user, host string 488 var port, proxySize int32 489 490 if cr.ProxySQLEnabled() { 491 user = users.ProxyAdmin 492 host = fmt.Sprintf("%s-proxysql-unready.%s", cr.ObjectMeta.Name, cr.Namespace) 493 proxySize = cr.Spec.ProxySQL.Size 494 port = 6032 495 } else if cr.HAProxyEnabled() { 496 user = users.Monitor 497 host = fmt.Sprintf("%s-haproxy.%s", cr.Name, cr.Namespace) 498 proxySize = cr.Spec.HAProxy.Size 499 500 hasKey, err := cr.ConfigHasKey("mysqld", "proxy_protocol_networks") 501 if err != nil { 502 return database, errors.Wrap(err, "check if config has proxy_protocol_networks key") 503 } 504 505 port = 3306 506 if hasKey && cr.CompareVersionWith("1.6.0") >= 0 { 507 port = 33062 508 } 509 } else { 510 return database, errors.New("can't detect enabled proxy, please enable HAProxy or ProxySQL") 511 } 512 513 secrets := cr.Spec.SecretsName 514 if cr.CompareVersionWith("1.6.0") >= 0 { 515 secrets = "internal-" + cr.Name 516 } 517 518 for i := 0; ; i++ { 519 db, err := queries.New(r.client, cr.Namespace, secrets, user, host, port, cr.Spec.PXC.ReadinessProbes.TimeoutSeconds) 520 if err != nil && i < int(proxySize) { 521 time.Sleep(time.Second) 522 } else if err != nil && i == int(proxySize) { 523 return database, err 524 } else { 525 database = db 526 break 527 } 528 } 529 530 return database, nil 531 } 532 533 func (r *ReconcilePerconaXtraDBCluster) getPrimaryPod(cr *api.PerconaXtraDBCluster) (string, error) { 534 conn, err := r.connectProxy(cr) 535 if err != nil { 536 return "", errors.Wrap(err, "failed to get proxy connection") 537 } 538 defer conn.Close() 539 540 if cr.HAProxyEnabled() { 541 host, err := conn.Hostname() 542 if err != nil { 543 return "", err 544 } 545 546 return host, nil 547 } 548 549 return conn.PrimaryHost() 550 } 551 552 func (r *ReconcilePerconaXtraDBCluster) waitPXCSynced(cr *api.PerconaXtraDBCluster, host string, waitLimit int) error { 553 secrets := cr.Spec.SecretsName 554 port := int32(3306) 555 if cr.CompareVersionWith("1.6.0") >= 0 { 556 secrets = "internal-" + cr.Name 557 port = int32(33062) 558 } 559 560 database, err := queries.New(r.client, cr.Namespace, secrets, users.Root, host, port, cr.Spec.PXC.ReadinessProbes.TimeoutSeconds) 561 if err != nil { 562 return errors.Wrap(err, "failed to access PXC database") 563 } 564 565 defer database.Close() 566 567 return retry(time.Second*10, time.Duration(waitLimit)*time.Second, 568 func() (bool, error) { 569 state, err := database.WsrepLocalStateComment() 570 if err != nil { 571 return false, errors.Wrap(err, "failed to get wsrep local state") 572 } 573 574 if state == "Synced" { 575 return true, nil 576 } 577 578 return false, nil 579 }) 580 } 581 582 func (r *ReconcilePerconaXtraDBCluster) waitPodRestart(ctx context.Context, cr *api.PerconaXtraDBCluster, updateRevision string, pod *corev1.Pod, waitLimit int) error { 583 return retry(time.Second*10, time.Duration(waitLimit)*time.Second, 584 func() (bool, error) { 585 err := r.client.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace}, pod) 586 if err != nil && !k8serrors.IsNotFound(err) { 587 return false, errors.Wrap(err, "fetch pod") 588 } 589 590 // We update status in every loop to not wait until the end of smart update 591 if err := r.updateStatus(cr, true, nil); err != nil { 592 return false, errors.Wrap(err, "update status") 593 } 594 595 ready := false 596 for _, container := range pod.Status.ContainerStatuses { 597 if container.Name == "pxc" { 598 ready = container.Ready 599 600 if container.State.Waiting != nil { 601 switch container.State.Waiting.Reason { 602 case "ImagePullBackOff", "ErrImagePull", "CrashLoopBackOff": 603 return false, errors.Errorf("pod %s is in %s state", pod.Name, container.State.Waiting.Reason) 604 default: 605 logf.FromContext(ctx).Info("pod is waiting", "pod name", pod.Name, "reason", container.State.Waiting.Reason) 606 } 607 } 608 } 609 } 610 611 if pod.Status.Phase == corev1.PodFailed { 612 return false, errors.Errorf("pod %s is in failed phase", pod.Name) 613 } 614 615 if pod.Status.Phase == corev1.PodRunning && pod.ObjectMeta.Labels["controller-revision-hash"] == updateRevision && ready { 616 logf.FromContext(ctx).Info("pod is running", "pod name", pod.Name) 617 return true, nil 618 } 619 620 return false, nil 621 }) 622 } 623 624 func isPXC(sfs api.StatefulApp) bool { 625 return sfs.Labels()["app.kubernetes.io/component"] == "pxc" 626 } 627 628 func isHAproxy(sfs api.StatefulApp) bool { 629 return sfs.Labels()["app.kubernetes.io/component"] == "haproxy" 630 } 631 632 func isProxySQL(sfs api.StatefulApp) bool { 633 return sfs.Labels()["app.kubernetes.io/component"] == "proxysql" 634 } 635 636 func (r *ReconcilePerconaXtraDBCluster) isBackupRunning(cr *api.PerconaXtraDBCluster) (bool, error) { 637 bcpList := api.PerconaXtraDBClusterBackupList{} 638 if err := r.client.List(context.TODO(), &bcpList, &client.ListOptions{Namespace: cr.Namespace}); err != nil { 639 if k8serrors.IsNotFound(err) { 640 return false, nil 641 } 642 return false, errors.Wrap(err, "failed to get backup object") 643 } 644 645 for _, bcp := range bcpList.Items { 646 if bcp.Spec.PXCCluster != cr.Name { 647 continue 648 } 649 650 if bcp.Status.State == api.BackupRunning || bcp.Status.State == api.BackupStarting { 651 return true, nil 652 } 653 } 654 655 return false, nil 656 } 657 658 func (r *ReconcilePerconaXtraDBCluster) isRestoreRunning(clusterName, namespace string) (bool, error) { 659 restoreList := api.PerconaXtraDBClusterRestoreList{} 660 661 err := r.client.List(context.TODO(), &restoreList, &client.ListOptions{ 662 Namespace: namespace, 663 }) 664 if err != nil { 665 return false, errors.Wrap(err, "failed to get restore list") 666 } 667 668 for _, v := range restoreList.Items { 669 if v.Spec.PXCCluster != clusterName { 670 continue 671 } 672 673 switch v.Status.State { 674 case api.RestoreStarting, api.RestoreStopCluster, api.RestoreRestore, 675 api.RestoreStartCluster, api.RestorePITR: 676 return true, nil 677 } 678 } 679 return false, nil 680 } 681 682 func getCustomConfigHashHex(strData map[string]string, binData map[string][]byte) (string, error) { 683 content := struct { 684 StrData map[string]string `json:"str_data,omitempty"` 685 BinData map[string][]byte `json:"bin_data,omitempty"` 686 }{ 687 StrData: strData, 688 BinData: binData, 689 } 690 691 allData, err := json.Marshal(content) 692 if err != nil { 693 return "", errors.Wrap(err, "failed to concat data for config hash") 694 } 695 696 hashHex := fmt.Sprintf("%x", md5.Sum(allData)) 697 698 return hashHex, nil 699 } 700 701 func (r *ReconcilePerconaXtraDBCluster) getConfigHash(cr *api.PerconaXtraDBCluster, sfs api.StatefulApp) (string, error) { 702 ls := sfs.Labels() 703 704 name := types.NamespacedName{ 705 Namespace: cr.Namespace, 706 Name: ls["app.kubernetes.io/instance"] + "-" + ls["app.kubernetes.io/component"], 707 } 708 709 obj, err := r.getFirstExisting(name, &corev1.Secret{}, &corev1.ConfigMap{}) 710 if err != nil { 711 return "", errors.Wrap(err, "failed to get custom config") 712 } 713 714 switch obj := obj.(type) { 715 case *corev1.Secret: 716 return getCustomConfigHashHex(obj.StringData, obj.Data) 717 case *corev1.ConfigMap: 718 return getCustomConfigHashHex(obj.Data, obj.BinaryData) 719 default: 720 return fmt.Sprintf("%x", md5.Sum([]byte{})), nil 721 } 722 } 723 724 func (r *ReconcilePerconaXtraDBCluster) getFirstExisting(name types.NamespacedName, objs ...client.Object) (client.Object, error) { 725 for _, o := range objs { 726 err := r.client.Get(context.TODO(), name, o) 727 if err != nil && !k8serrors.IsNotFound(err) { 728 return nil, err 729 } 730 if err == nil { 731 return o, nil 732 } 733 } 734 return nil, nil 735 } 736 737 func (r *ReconcilePerconaXtraDBCluster) getSecretHash(cr *api.PerconaXtraDBCluster, secretName string, allowNonExistingSecret bool) (string, error) { 738 if allowNonExistingSecret && secretName == "" { 739 return "", nil 740 } 741 secretObj := corev1.Secret{} 742 if err := r.client.Get(context.TODO(), 743 types.NamespacedName{ 744 Namespace: cr.Namespace, 745 Name: secretName, 746 }, 747 &secretObj, 748 ); err != nil && k8serrors.IsNotFound(err) && allowNonExistingSecret { 749 return "", nil 750 } else if err != nil { 751 return "", err 752 } 753 754 secretString := fmt.Sprintln(secretObj.Data) 755 hash := fmt.Sprintf("%x", md5.Sum([]byte(secretString))) 756 757 return hash, nil 758 }