github.com/percona/percona-xtradb-cluster-operator@v1.14.0/pkg/pxc/backup/restore.go (about) 1 package backup 2 3 import ( 4 "path" 5 "strconv" 6 "strings" 7 8 "github.com/pkg/errors" 9 batchv1 "k8s.io/api/batch/v1" 10 corev1 "k8s.io/api/core/v1" 11 "k8s.io/apimachinery/pkg/api/resource" 12 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 logf "sigs.k8s.io/controller-runtime/pkg/log" 14 15 api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1" 16 "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc" 17 "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app" 18 "github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users" 19 "github.com/percona/percona-xtradb-cluster-operator/pkg/util" 20 ) 21 22 var log = logf.Log.WithName("backup/restore") 23 24 func PVCRestoreService(cr *api.PerconaXtraDBClusterRestore) *corev1.Service { 25 svc := &corev1.Service{ 26 TypeMeta: metav1.TypeMeta{ 27 APIVersion: "v1", 28 Kind: "Service", 29 }, 30 ObjectMeta: metav1.ObjectMeta{ 31 Name: "restore-src-" + cr.Name + "-" + cr.Spec.PXCCluster, 32 Namespace: cr.Namespace, 33 }, 34 Spec: corev1.ServiceSpec{ 35 Selector: map[string]string{ 36 "name": "restore-src-" + cr.Name + "-" + cr.Spec.PXCCluster, 37 }, 38 Type: corev1.ServiceTypeClusterIP, 39 Ports: []corev1.ServicePort{ 40 { 41 Port: 3307, 42 Name: "ncat", 43 }, 44 }, 45 }, 46 } 47 48 if cr.Annotations["percona.com/headless-service"] == "true" { 49 svc.Spec.ClusterIP = corev1.ClusterIPNone 50 } 51 52 return svc 53 } 54 55 func PVCRestorePod(cr *api.PerconaXtraDBClusterRestore, bcpStorageName, pvcName string, cluster *api.PerconaXtraDBCluster) (*corev1.Pod, error) { 56 if _, ok := cluster.Spec.Backup.Storages[bcpStorageName]; !ok { 57 log.Info("storage " + bcpStorageName + " doesn't exist") 58 if len(cluster.Spec.Backup.Storages) == 0 { 59 cluster.Spec.Backup.Storages = map[string]*api.BackupStorageSpec{} 60 } 61 cluster.Spec.Backup.Storages[bcpStorageName] = &api.BackupStorageSpec{} 62 } 63 64 // Copy from the original labels to the restore labels 65 labels := make(map[string]string) 66 for key, value := range cluster.Spec.Backup.Storages[bcpStorageName].Labels { 67 labels[key] = value 68 } 69 labels["name"] = "restore-src-" + cr.Name + "-" + cr.Spec.PXCCluster 70 71 return &corev1.Pod{ 72 TypeMeta: metav1.TypeMeta{ 73 APIVersion: "v1", 74 Kind: "Pod", 75 }, 76 ObjectMeta: metav1.ObjectMeta{ 77 Name: "restore-src-" + cr.Name + "-" + cr.Spec.PXCCluster, 78 Namespace: cr.Namespace, 79 Annotations: cluster.Spec.Backup.Storages[bcpStorageName].Annotations, 80 Labels: labels, 81 }, 82 Spec: corev1.PodSpec{ 83 ImagePullSecrets: cluster.Spec.Backup.ImagePullSecrets, 84 SecurityContext: cluster.Spec.Backup.Storages[bcpStorageName].PodSecurityContext, 85 Containers: []corev1.Container{ 86 { 87 Name: "ncat", 88 Image: cluster.Spec.Backup.Image, 89 ImagePullPolicy: cluster.Spec.Backup.ImagePullPolicy, 90 Command: []string{"recovery-pvc-donor.sh"}, 91 SecurityContext: cluster.Spec.Backup.Storages[bcpStorageName].ContainerSecurityContext, 92 VolumeMounts: []corev1.VolumeMount{ 93 { 94 Name: "backup", 95 MountPath: "/backup", 96 }, 97 { 98 Name: "ssl", 99 MountPath: "/etc/mysql/ssl", 100 }, 101 { 102 Name: "ssl-internal", 103 MountPath: "/etc/mysql/ssl-internal", 104 }, 105 { 106 Name: "vault-keyring-secret", 107 MountPath: "/etc/mysql/vault-keyring-secret", 108 }, 109 }, 110 Resources: cr.Spec.Resources, 111 }, 112 }, 113 Volumes: []corev1.Volume{ 114 { 115 Name: "backup", 116 VolumeSource: corev1.VolumeSource{ 117 PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 118 ClaimName: pvcName, 119 }, 120 }, 121 }, 122 app.GetSecretVolumes("ssl-internal", cluster.Spec.PXC.SSLInternalSecretName, true), 123 app.GetSecretVolumes("ssl", cluster.Spec.PXC.SSLSecretName, cluster.Spec.AllowUnsafeConfig), 124 app.GetSecretVolumes("vault-keyring-secret", cluster.Spec.PXC.VaultSecretName, true), 125 }, 126 RestartPolicy: corev1.RestartPolicyAlways, 127 NodeSelector: cluster.Spec.Backup.Storages[bcpStorageName].NodeSelector, 128 Affinity: cluster.Spec.Backup.Storages[bcpStorageName].Affinity, 129 TopologySpreadConstraints: pxc.PodTopologySpreadConstraints(cluster.Spec.Backup.Storages[bcpStorageName].TopologySpreadConstraints, labels), 130 Tolerations: cluster.Spec.Backup.Storages[bcpStorageName].Tolerations, 131 SchedulerName: cluster.Spec.Backup.Storages[bcpStorageName].SchedulerName, 132 PriorityClassName: cluster.Spec.Backup.Storages[bcpStorageName].PriorityClassName, 133 ServiceAccountName: cluster.Spec.PXC.ServiceAccountName, 134 RuntimeClassName: cluster.Spec.Backup.Storages[bcpStorageName].RuntimeClassName, 135 }, 136 }, nil 137 } 138 139 func RestoreJob(cr *api.PerconaXtraDBClusterRestore, bcp *api.PerconaXtraDBClusterBackup, cluster *api.PerconaXtraDBCluster, destination api.PXCBackupDestination, pitr bool) (*batchv1.Job, error) { 140 switch bcp.Status.GetStorageType(cluster) { 141 case api.BackupStorageAzure: 142 if bcp.Status.Azure == nil { 143 return nil, errors.New("nil azure backup status storage") 144 } 145 case api.BackupStorageS3: 146 if bcp.Status.S3 == nil { 147 return nil, errors.New("nil s3 backup status storage") 148 } 149 case api.BackupStorageFilesystem: 150 default: 151 return nil, errors.Errorf("no storage type was specified in status, got: %s", bcp.Status.GetStorageType(cluster)) 152 } 153 154 jobName := "restore-job-" + cr.Name + "-" + cr.Spec.PXCCluster 155 volumeMounts := []corev1.VolumeMount{ 156 { 157 Name: "datadir", 158 MountPath: "/datadir", 159 }, 160 { 161 Name: "vault-keyring-secret", 162 MountPath: "/etc/mysql/vault-keyring-secret", 163 }, 164 } 165 jobPVCs := []corev1.Volume{ 166 { 167 Name: "datadir", 168 VolumeSource: corev1.VolumeSource{ 169 PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 170 ClaimName: "datadir-" + cr.Spec.PXCCluster + "-pxc-0", 171 }, 172 }, 173 }, 174 app.GetSecretVolumes("vault-keyring-secret", cluster.Spec.PXC.VaultSecretName, true), 175 } 176 var command []string 177 178 switch bcp.Status.GetStorageType(cluster) { 179 case api.BackupStorageFilesystem: 180 command = []string{"recovery-pvc-joiner.sh"} 181 volumeMounts = append(volumeMounts, []corev1.VolumeMount{ 182 { 183 Name: "ssl", 184 MountPath: "/etc/mysql/ssl", 185 }, 186 { 187 Name: "ssl-internal", 188 MountPath: "/etc/mysql/ssl-internal", 189 }, 190 }...) 191 jobPVCs = append(jobPVCs, []corev1.Volume{ 192 app.GetSecretVolumes("ssl-internal", cluster.Spec.PXC.SSLInternalSecretName, true), 193 app.GetSecretVolumes("ssl", cluster.Spec.PXC.SSLSecretName, cluster.Spec.AllowUnsafeConfig), 194 }...) 195 case api.BackupStorageAzure, api.BackupStorageS3: 196 command = []string{"recovery-cloud.sh"} 197 if bcp.Status.GetStorageType(cluster) == api.BackupStorageS3 && cluster.CompareVersionWith("1.12.0") < 0 { 198 command = []string{"recovery-s3.sh"} 199 } 200 if pitr { 201 if cluster.Spec.Backup == nil && len(cluster.Spec.Backup.Storages) == 0 { 202 return nil, errors.New("no storage section") 203 } 204 jobName = "pitr-job-" + cr.Name + "-" + cr.Spec.PXCCluster 205 volumeMounts = []corev1.VolumeMount{} 206 jobPVCs = []corev1.Volume{} 207 command = []string{"pitr", "recover"} 208 } 209 default: 210 return nil, errors.Errorf("invalid storage type was specified in status, got: %s", bcp.Status.GetStorageType(cluster)) 211 } 212 213 envs, err := restoreJobEnvs(bcp, cr, cluster, destination, pitr) 214 if err != nil { 215 return nil, errors.Wrap(err, "restore job envs") 216 } 217 218 job := &batchv1.Job{ 219 TypeMeta: metav1.TypeMeta{ 220 APIVersion: "batch/v1", 221 Kind: "Job", 222 }, 223 ObjectMeta: metav1.ObjectMeta{ 224 Name: jobName, 225 Namespace: cr.Namespace, 226 }, 227 Spec: batchv1.JobSpec{ 228 Template: corev1.PodTemplateSpec{ 229 ObjectMeta: metav1.ObjectMeta{ 230 Annotations: cluster.Spec.PXC.Annotations, 231 Labels: cluster.Spec.PXC.Labels, 232 }, 233 Spec: corev1.PodSpec{ 234 ImagePullSecrets: cluster.Spec.Backup.ImagePullSecrets, 235 SecurityContext: cluster.Spec.PXC.PodSecurityContext, 236 Containers: []corev1.Container{ 237 xtrabackupContainer(cr, cluster, command, volumeMounts, envs), 238 }, 239 RestartPolicy: corev1.RestartPolicyNever, 240 Volumes: jobPVCs, 241 NodeSelector: cluster.Spec.PXC.NodeSelector, 242 Affinity: cluster.Spec.PXC.Affinity.Advanced, 243 TopologySpreadConstraints: pxc.PodTopologySpreadConstraints(cluster.Spec.PXC.TopologySpreadConstraints, cluster.Spec.PXC.Labels), 244 Tolerations: cluster.Spec.PXC.Tolerations, 245 SchedulerName: cluster.Spec.PXC.SchedulerName, 246 PriorityClassName: cluster.Spec.PXC.PriorityClassName, 247 ServiceAccountName: cluster.Spec.PXC.ServiceAccountName, 248 RuntimeClassName: cluster.Spec.PXC.RuntimeClassName, 249 }, 250 }, 251 BackoffLimit: func(i int32) *int32 { return &i }(4), 252 }, 253 } 254 return job, nil 255 } 256 257 func restoreJobEnvs(bcp *api.PerconaXtraDBClusterBackup, cr *api.PerconaXtraDBClusterRestore, cluster *api.PerconaXtraDBCluster, destination api.PXCBackupDestination, pitr bool) ([]corev1.EnvVar, error) { 258 if bcp.Status.GetStorageType(cluster) == api.BackupStorageFilesystem { 259 return util.MergeEnvLists( 260 []corev1.EnvVar{ 261 { 262 Name: "RESTORE_SRC_SERVICE", 263 Value: "restore-src-" + cr.Name + "-" + cr.Spec.PXCCluster, 264 }, 265 }, 266 cr.Spec.ContainerOptions.GetEnvVar(cluster, bcp.Spec.StorageName), 267 ), nil 268 } 269 pxcUser := users.Xtrabackup 270 verifyTLS := true 271 if cluster.Spec.Backup != nil && len(cluster.Spec.Backup.Storages) > 0 { 272 storage, ok := cluster.Spec.Backup.Storages[bcp.Spec.StorageName] 273 if ok && storage.VerifyTLS != nil { 274 verifyTLS = *storage.VerifyTLS 275 } 276 } 277 if bs := cr.Spec.BackupSource; bs != nil { 278 if bs.StorageName != "" { 279 storage, ok := cluster.Spec.Backup.Storages[bs.StorageName] 280 if ok && storage.VerifyTLS != nil { 281 verifyTLS = *storage.VerifyTLS 282 } 283 } 284 if bs.VerifyTLS != nil { 285 verifyTLS = *bs.VerifyTLS 286 } 287 } 288 envs := []corev1.EnvVar{ 289 { 290 Name: "PXC_SERVICE", 291 Value: cr.Spec.PXCCluster + "-pxc", 292 }, 293 { 294 Name: "PXC_USER", 295 Value: pxcUser, 296 }, 297 { 298 Name: "PXC_PASS", 299 ValueFrom: &corev1.EnvVarSource{ 300 SecretKeyRef: app.SecretKeySelector(cluster.Spec.SecretsName, pxcUser), 301 }, 302 }, 303 } 304 if pitr { 305 envs = append(envs, []corev1.EnvVar{ 306 { 307 Name: "PITR_GTID", 308 Value: cr.Spec.PITR.GTID, 309 }, 310 { 311 Name: "PITR_DATE", 312 Value: cr.Spec.PITR.Date, 313 }, 314 { 315 Name: "PITR_RECOVERY_TYPE", 316 Value: cr.Spec.PITR.Type, 317 }, 318 }...) 319 if bs := cr.Spec.PITR.BackupSource; bs != nil { 320 if bs.StorageName != "" { 321 storage, ok := cluster.Spec.Backup.Storages[bs.StorageName] 322 if ok && storage.VerifyTLS != nil { 323 verifyTLS = *storage.VerifyTLS 324 } 325 } 326 if bs.VerifyTLS != nil { 327 verifyTLS = *bs.VerifyTLS 328 } 329 } 330 } 331 332 envs = append(envs, corev1.EnvVar{ 333 Name: "VERIFY_TLS", 334 Value: strconv.FormatBool(verifyTLS), 335 }) 336 337 switch bcp.Status.GetStorageType(cluster) { 338 case api.BackupStorageAzure: 339 azureEnvs, err := azureEnvs(cr, bcp, cluster, destination, pitr) 340 if err != nil { 341 return nil, err 342 } 343 envs = append(envs, azureEnvs...) 344 case api.BackupStorageS3: 345 s3Envs, err := s3Envs(cr, bcp, cluster, destination, pitr) 346 if err != nil { 347 return nil, err 348 } 349 envs = append(envs, s3Envs...) 350 default: 351 return nil, errors.Errorf("invalid storage type was specified in status, got: %s", bcp.Status.GetStorageType(cluster)) 352 } 353 return util.MergeEnvLists( 354 envs, 355 cr.Spec.ContainerOptions.GetEnvVar(cluster, bcp.Spec.StorageName), 356 ), nil 357 } 358 359 func azureEnvs(cr *api.PerconaXtraDBClusterRestore, bcp *api.PerconaXtraDBClusterBackup, cluster *api.PerconaXtraDBCluster, destination api.PXCBackupDestination, pitr bool) ([]corev1.EnvVar, error) { 360 azure := bcp.Status.Azure 361 container, prefix := azure.ContainerAndPrefix() 362 if container == "" { 363 container, prefix = destination.BucketAndPrefix() 364 } 365 backupPath := path.Join(prefix, destination.BackupName()) 366 envs := []corev1.EnvVar{ 367 { 368 Name: "AZURE_STORAGE_ACCOUNT", 369 ValueFrom: &corev1.EnvVarSource{ 370 SecretKeyRef: app.SecretKeySelector(azure.CredentialsSecret, "AZURE_STORAGE_ACCOUNT_NAME"), 371 }, 372 }, 373 { 374 Name: "AZURE_ACCESS_KEY", 375 ValueFrom: &corev1.EnvVarSource{ 376 SecretKeyRef: app.SecretKeySelector(azure.CredentialsSecret, "AZURE_STORAGE_ACCOUNT_KEY"), 377 }, 378 }, 379 { 380 Name: "AZURE_CONTAINER_NAME", 381 Value: container, 382 }, 383 { 384 Name: "AZURE_ENDPOINT", 385 Value: azure.Endpoint, 386 }, 387 { 388 Name: "AZURE_STORAGE_CLASS", 389 Value: azure.StorageClass, 390 }, 391 { 392 Name: "BACKUP_PATH", 393 Value: backupPath, 394 }, 395 } 396 if pitr { 397 storageAzure := new(api.BackupStorageAzureSpec) 398 if bs := cr.Spec.PITR.BackupSource; bs != nil { 399 if bs.StorageName != "" { 400 storage, ok := cluster.Spec.Backup.Storages[cr.Spec.PITR.BackupSource.StorageName] 401 if ok { 402 storageAzure = storage.Azure 403 } 404 } 405 if bs.Azure != nil { 406 storageAzure = cr.Spec.PITR.BackupSource.Azure 407 } 408 } 409 if len(storageAzure.ContainerPath) == 0 { 410 return nil, errors.New("container name is not specified in storage") 411 } 412 envs = append(envs, []corev1.EnvVar{ 413 { 414 Name: "BINLOG_AZURE_STORAGE_ACCOUNT", 415 ValueFrom: &corev1.EnvVarSource{ 416 SecretKeyRef: app.SecretKeySelector(storageAzure.CredentialsSecret, "AZURE_STORAGE_ACCOUNT_NAME"), 417 }, 418 }, 419 { 420 Name: "BINLOG_AZURE_ACCESS_KEY", 421 ValueFrom: &corev1.EnvVarSource{ 422 SecretKeyRef: app.SecretKeySelector(storageAzure.CredentialsSecret, "AZURE_STORAGE_ACCOUNT_KEY"), 423 }, 424 }, 425 { 426 Name: "BINLOG_AZURE_STORAGE_CLASS", 427 Value: storageAzure.StorageClass, 428 }, 429 { 430 Name: "BINLOG_AZURE_CONTAINER_PATH", 431 Value: storageAzure.ContainerPath, 432 }, 433 { 434 Name: "BINLOG_AZURE_ENDPOINT", 435 Value: storageAzure.Endpoint, 436 }, 437 { 438 Name: "STORAGE_TYPE", 439 Value: "azure", 440 }, 441 }...) 442 } 443 return envs, nil 444 } 445 446 func s3Envs(cr *api.PerconaXtraDBClusterRestore, bcp *api.PerconaXtraDBClusterBackup, cluster *api.PerconaXtraDBCluster, destination api.PXCBackupDestination, pitr bool) ([]corev1.EnvVar, error) { 447 envs := []corev1.EnvVar{ 448 { 449 Name: "S3_BUCKET_URL", 450 Value: strings.TrimPrefix(destination.String(), destination.StorageTypePrefix()), 451 }, 452 { 453 Name: "ENDPOINT", 454 Value: bcp.Status.S3.EndpointURL, 455 }, 456 { 457 Name: "DEFAULT_REGION", 458 Value: bcp.Status.S3.Region, 459 }, 460 { 461 Name: "ACCESS_KEY_ID", 462 ValueFrom: &corev1.EnvVarSource{ 463 SecretKeyRef: &corev1.SecretKeySelector{ 464 LocalObjectReference: corev1.LocalObjectReference{ 465 Name: bcp.Status.S3.CredentialsSecret, 466 }, 467 Key: "AWS_ACCESS_KEY_ID", 468 }, 469 }, 470 }, 471 { 472 Name: "SECRET_ACCESS_KEY", 473 ValueFrom: &corev1.EnvVarSource{ 474 SecretKeyRef: &corev1.SecretKeySelector{ 475 LocalObjectReference: corev1.LocalObjectReference{ 476 Name: bcp.Status.S3.CredentialsSecret, 477 }, 478 Key: "AWS_SECRET_ACCESS_KEY", 479 }, 480 }, 481 }, 482 } 483 if pitr { 484 bucket := "" 485 storageS3 := new(api.BackupStorageS3Spec) 486 if bs := cr.Spec.PITR.BackupSource; bs != nil { 487 if bs.StorageName != "" { 488 storage, ok := cluster.Spec.Backup.Storages[bs.StorageName] 489 if ok { 490 storageS3 = storage.S3 491 bucket = storage.S3.Bucket 492 } 493 } 494 if bs.S3 != nil { 495 storageS3 = bs.S3 496 bucket = storageS3.Bucket 497 } 498 } 499 if len(bucket) == 0 { 500 return nil, errors.New("no bucket in storage") 501 } 502 envs = append(envs, []corev1.EnvVar{ 503 { 504 Name: "BINLOG_S3_ENDPOINT", 505 Value: storageS3.EndpointURL, 506 }, 507 { 508 Name: "BINLOG_S3_REGION", 509 Value: storageS3.Region, 510 }, 511 { 512 Name: "BINLOG_ACCESS_KEY_ID", 513 ValueFrom: &corev1.EnvVarSource{ 514 SecretKeyRef: &corev1.SecretKeySelector{ 515 LocalObjectReference: corev1.LocalObjectReference{ 516 Name: storageS3.CredentialsSecret, 517 }, 518 Key: "AWS_ACCESS_KEY_ID", 519 }, 520 }, 521 }, 522 { 523 Name: "BINLOG_SECRET_ACCESS_KEY", 524 ValueFrom: &corev1.EnvVarSource{ 525 SecretKeyRef: &corev1.SecretKeySelector{ 526 LocalObjectReference: corev1.LocalObjectReference{ 527 Name: storageS3.CredentialsSecret, 528 }, 529 Key: "AWS_SECRET_ACCESS_KEY", 530 }, 531 }, 532 }, 533 { 534 Name: "BINLOG_S3_BUCKET_URL", 535 Value: bucket, 536 }, 537 { 538 Name: "STORAGE_TYPE", 539 Value: "s3", 540 }, 541 }...) 542 } 543 return envs, nil 544 } 545 546 func xtrabackupContainer(cr *api.PerconaXtraDBClusterRestore, cluster *api.PerconaXtraDBCluster, cmd []string, volumeMounts []corev1.VolumeMount, envs []corev1.EnvVar) corev1.Container { 547 container := corev1.Container{ 548 Name: "xtrabackup", 549 Image: cluster.Spec.Backup.Image, 550 ImagePullPolicy: cluster.Spec.Backup.ImagePullPolicy, 551 Command: cmd, 552 SecurityContext: cluster.Spec.PXC.ContainerSecurityContext, 553 VolumeMounts: volumeMounts, 554 Env: envs, 555 Resources: *cr.Spec.Resources.DeepCopy(), 556 } 557 if cluster.CompareVersionWith("1.13.0") < 0 { 558 container.Resources = cluster.Spec.PXC.Resources 559 } 560 561 useMem, k8sq := xbMemoryUse(container.Resources) 562 container.Env = append( 563 container.Env, 564 corev1.EnvVar{ 565 Name: "XB_USE_MEMORY", 566 Value: useMem, 567 }, 568 ) 569 if k8sq.Value() > 0 { 570 container.Resources.Requests = corev1.ResourceList{ 571 corev1.ResourceMemory: k8sq, 572 } 573 } 574 return container 575 } 576 577 func xbMemoryUse(res corev1.ResourceRequirements) (useMem string, k8sQuantity resource.Quantity) { 578 if _, ok := res.Requests[corev1.ResourceMemory]; ok { 579 k8sQuantity = *res.Requests.Memory() 580 } 581 if _, ok := res.Limits[corev1.ResourceMemory]; ok { 582 k8sQuantity = *res.Limits.Memory() 583 } 584 585 useMem = "100MB" 586 587 useMem75 := k8sQuantity.Value() / int64(100) * int64(75) 588 if useMem75 > 2000000000 { 589 useMem = "2GB" 590 } else if k8sQuantity.Value() > 0 { 591 useMem = strconv.FormatInt(useMem75, 10) 592 } 593 594 return useMem, k8sQuantity 595 }