github.com/percona/percona-xtradb-cluster-operator@v1.14.0/pkg/pxc/backup/job.go (about)

     1  package backup
     2  
     3  import (
     4  	"path"
     5  	"strconv"
     6  
     7  	"github.com/pkg/errors"
     8  	batchv1 "k8s.io/api/batch/v1"
     9  	corev1 "k8s.io/api/core/v1"
    10  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    11  
    12  	api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1"
    13  	"github.com/percona/percona-xtradb-cluster-operator/pkg/pxc"
    14  	"github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app"
    15  	"github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users"
    16  	"github.com/percona/percona-xtradb-cluster-operator/pkg/util"
    17  )
    18  
    19  func (*Backup) Job(cr *api.PerconaXtraDBClusterBackup, cluster *api.PerconaXtraDBCluster) *batchv1.Job {
    20  	// Copy from the original labels to the backup labels
    21  	labels := make(map[string]string)
    22  	for key, value := range cluster.Spec.Backup.Storages[cr.Spec.StorageName].Labels {
    23  		labels[key] = value
    24  	}
    25  	labels["type"] = "xtrabackup"
    26  	labels["cluster"] = cr.Spec.PXCCluster
    27  	labels["backup-name"] = cr.Name
    28  	labels["job-name"] = GenName63(cr)
    29  
    30  	return &batchv1.Job{
    31  		TypeMeta: metav1.TypeMeta{
    32  			APIVersion: "batch/v1",
    33  			Kind:       "Job",
    34  		},
    35  		ObjectMeta: metav1.ObjectMeta{
    36  			Name:        labels["job-name"],
    37  			Namespace:   cr.Namespace,
    38  			Labels:      labels,
    39  			Annotations: cluster.Spec.Backup.Storages[cr.Spec.StorageName].Annotations,
    40  		},
    41  	}
    42  }
    43  
    44  func (bcp *Backup) JobSpec(spec api.PXCBackupSpec, cluster *api.PerconaXtraDBCluster, job *batchv1.Job) (batchv1.JobSpec, error) {
    45  	manualSelector := true
    46  	backoffLimit := int32(10)
    47  	if cluster.CompareVersionWith("1.11.0") >= 0 && cluster.Spec.Backup.BackoffLimit != nil {
    48  		backoffLimit = *cluster.Spec.Backup.BackoffLimit
    49  	}
    50  	verifyTLS := true
    51  	storage := cluster.Spec.Backup.Storages[spec.StorageName]
    52  	if storage.VerifyTLS != nil {
    53  		verifyTLS = *storage.VerifyTLS
    54  	}
    55  	envs := []corev1.EnvVar{
    56  		{
    57  			Name:  "BACKUP_DIR",
    58  			Value: "/backup",
    59  		},
    60  		{
    61  			Name:  "PXC_SERVICE",
    62  			Value: spec.PXCCluster + "-pxc",
    63  		},
    64  		{
    65  			Name: "PXC_PASS",
    66  			ValueFrom: &corev1.EnvVarSource{
    67  				SecretKeyRef: app.SecretKeySelector(cluster.Spec.SecretsName, users.Xtrabackup),
    68  			},
    69  		},
    70  		{
    71  			Name:  "VERIFY_TLS",
    72  			Value: strconv.FormatBool(verifyTLS),
    73  		},
    74  	}
    75  	envs = util.MergeEnvLists(envs, spec.ContainerOptions.GetEnvVar(cluster, spec.StorageName))
    76  
    77  	return batchv1.JobSpec{
    78  		BackoffLimit:   &backoffLimit,
    79  		ManualSelector: &manualSelector,
    80  		Selector: &metav1.LabelSelector{
    81  			MatchLabels: job.Labels,
    82  		},
    83  		Template: corev1.PodTemplateSpec{
    84  			ObjectMeta: metav1.ObjectMeta{
    85  				Labels:      job.Labels,
    86  				Annotations: storage.Annotations,
    87  			},
    88  			Spec: corev1.PodSpec{
    89  				SecurityContext:    storage.PodSecurityContext,
    90  				ImagePullSecrets:   bcp.imagePullSecrets,
    91  				RestartPolicy:      corev1.RestartPolicyNever,
    92  				ServiceAccountName: cluster.Spec.Backup.ServiceAccountName,
    93  				Containers: []corev1.Container{
    94  					{
    95  						Name:            "xtrabackup",
    96  						Image:           bcp.image,
    97  						SecurityContext: storage.ContainerSecurityContext,
    98  						ImagePullPolicy: bcp.imagePullPolicy,
    99  						Command:         []string{"bash", "/usr/bin/backup.sh"},
   100  						Env:             envs,
   101  						Resources:       storage.Resources,
   102  					},
   103  				},
   104  				Affinity:                  storage.Affinity,
   105  				TopologySpreadConstraints: pxc.PodTopologySpreadConstraints(storage.TopologySpreadConstraints, job.Labels),
   106  				Tolerations:               storage.Tolerations,
   107  				NodeSelector:              storage.NodeSelector,
   108  				SchedulerName:             storage.SchedulerName,
   109  				PriorityClassName:         storage.PriorityClassName,
   110  				RuntimeClassName:          storage.RuntimeClassName,
   111  			},
   112  		},
   113  	}, nil
   114  }
   115  
   116  func appendStorageSecret(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup) error {
   117  	// Volume for secret
   118  	secretVol := corev1.Volume{
   119  		Name: "ssl",
   120  	}
   121  	secretVol.Secret = &corev1.SecretVolumeSource{}
   122  	secretVol.Secret.SecretName = cr.Status.SSLSecretName
   123  	t := true
   124  	secretVol.Secret.Optional = &t
   125  
   126  	// IntVolume for secret
   127  	secretIntVol := corev1.Volume{
   128  		Name: "ssl-internal",
   129  	}
   130  	secretIntVol.Secret = &corev1.SecretVolumeSource{}
   131  	secretIntVol.Secret.SecretName = cr.Status.SSLInternalSecretName
   132  	secretIntVol.Secret.Optional = &t
   133  
   134  	// Volume for vault secret
   135  	secretVaultVol := corev1.Volume{
   136  		Name: "vault-keyring-secret",
   137  	}
   138  	secretVaultVol.Secret = &corev1.SecretVolumeSource{}
   139  	secretVaultVol.Secret.SecretName = cr.Status.VaultSecretName
   140  	secretVaultVol.Secret.Optional = &t
   141  
   142  	if len(job.Template.Spec.Containers) == 0 {
   143  		return errors.New("no containers in job spec")
   144  	}
   145  	job.Template.Spec.Containers[0].VolumeMounts = append(
   146  		job.Template.Spec.Containers[0].VolumeMounts,
   147  		corev1.VolumeMount{
   148  			Name:      "ssl",
   149  			MountPath: "/etc/mysql/ssl",
   150  		},
   151  		corev1.VolumeMount{
   152  			Name:      "ssl-internal",
   153  			MountPath: "/etc/mysql/ssl-internal",
   154  		},
   155  		corev1.VolumeMount{
   156  			Name:      "vault-keyring-secret",
   157  			MountPath: "/etc/mysql/vault-keyring-secret",
   158  		},
   159  	)
   160  	job.Template.Spec.Volumes = append(
   161  		job.Template.Spec.Volumes,
   162  		secretVol,
   163  		secretIntVol,
   164  		secretVaultVol,
   165  	)
   166  
   167  	return nil
   168  }
   169  
   170  func SetStoragePVC(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup, volName string) error {
   171  	pvc := corev1.Volume{
   172  		Name: "xtrabackup",
   173  	}
   174  	pvc.PersistentVolumeClaim = &corev1.PersistentVolumeClaimVolumeSource{
   175  		ClaimName: volName,
   176  	}
   177  
   178  	if len(job.Template.Spec.Containers) == 0 {
   179  		return errors.New("no containers in job spec")
   180  	}
   181  
   182  	job.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
   183  		{
   184  			Name:      pvc.Name,
   185  			MountPath: "/backup",
   186  		},
   187  	}
   188  
   189  	job.Template.Spec.Volumes = []corev1.Volume{
   190  		pvc,
   191  	}
   192  
   193  	err := appendStorageSecret(job, cr)
   194  	if err != nil {
   195  		return errors.Wrap(err, "failed to append storage secret")
   196  	}
   197  
   198  	return nil
   199  }
   200  
   201  func SetStorageAzure(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup) error {
   202  	if cr.Status.Azure == nil {
   203  		return errors.New("azure storage is not specified in backup status")
   204  	}
   205  	azure := cr.Status.Azure
   206  	storageAccount := corev1.EnvVar{
   207  		Name: "AZURE_STORAGE_ACCOUNT",
   208  		ValueFrom: &corev1.EnvVarSource{
   209  			SecretKeyRef: app.SecretKeySelector(azure.CredentialsSecret, "AZURE_STORAGE_ACCOUNT_NAME"),
   210  		},
   211  	}
   212  	accessKey := corev1.EnvVar{
   213  		Name: "AZURE_ACCESS_KEY",
   214  		ValueFrom: &corev1.EnvVarSource{
   215  			SecretKeyRef: app.SecretKeySelector(azure.CredentialsSecret, "AZURE_STORAGE_ACCOUNT_KEY"),
   216  		},
   217  	}
   218  	container, prefix := azure.ContainerAndPrefix()
   219  	if container == "" {
   220  		container, prefix = cr.Status.Destination.BucketAndPrefix()
   221  	}
   222  	bucketPath := path.Join(prefix, cr.Status.Destination.BackupName())
   223  
   224  	containerName := corev1.EnvVar{
   225  		Name:  "AZURE_CONTAINER_NAME",
   226  		Value: container,
   227  	}
   228  	endpoint := corev1.EnvVar{
   229  		Name:  "AZURE_ENDPOINT",
   230  		Value: azure.Endpoint,
   231  	}
   232  	storageClass := corev1.EnvVar{
   233  		Name:  "AZURE_STORAGE_CLASS",
   234  		Value: azure.StorageClass,
   235  	}
   236  	backupPath := corev1.EnvVar{
   237  		Name:  "BACKUP_PATH",
   238  		Value: bucketPath,
   239  	}
   240  	if len(job.Template.Spec.Containers) == 0 {
   241  		return errors.New("no containers in job spec")
   242  	}
   243  	job.Template.Spec.Containers[0].Env = append(job.Template.Spec.Containers[0].Env, storageAccount, accessKey, containerName, endpoint, storageClass, backupPath)
   244  
   245  	// add SSL volumes
   246  	job.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{}
   247  	job.Template.Spec.Volumes = []corev1.Volume{}
   248  
   249  	err := appendStorageSecret(job, cr)
   250  	if err != nil {
   251  		return errors.Wrap(err, "failed to append storage secrets")
   252  	}
   253  
   254  	return nil
   255  }
   256  
   257  func SetStorageS3(job *batchv1.JobSpec, cr *api.PerconaXtraDBClusterBackup) error {
   258  	if cr.Status.S3 == nil {
   259  		return errors.New("s3 storage is not specified in backup status")
   260  	}
   261  	s3 := cr.Status.S3
   262  	accessKey := corev1.EnvVar{
   263  		Name: "ACCESS_KEY_ID",
   264  		ValueFrom: &corev1.EnvVarSource{
   265  			SecretKeyRef: app.SecretKeySelector(s3.CredentialsSecret, "AWS_ACCESS_KEY_ID"),
   266  		},
   267  	}
   268  	secretKey := corev1.EnvVar{
   269  		Name: "SECRET_ACCESS_KEY",
   270  		ValueFrom: &corev1.EnvVarSource{
   271  			SecretKeyRef: app.SecretKeySelector(s3.CredentialsSecret, "AWS_SECRET_ACCESS_KEY"),
   272  		},
   273  	}
   274  	region := corev1.EnvVar{
   275  		Name:  "DEFAULT_REGION",
   276  		Value: s3.Region,
   277  	}
   278  	endpoint := corev1.EnvVar{
   279  		Name:  "ENDPOINT",
   280  		Value: s3.EndpointURL,
   281  	}
   282  
   283  	if len(job.Template.Spec.Containers) == 0 {
   284  		return errors.New("no containers in job spec")
   285  	}
   286  	job.Template.Spec.Containers[0].Env = append(job.Template.Spec.Containers[0].Env, accessKey, secretKey, region, endpoint)
   287  
   288  	bucket, prefix := s3.BucketAndPrefix()
   289  	if bucket == "" {
   290  		bucket, prefix = cr.Status.Destination.BucketAndPrefix()
   291  	}
   292  	bucketPath := path.Join(prefix, cr.Status.Destination.BackupName())
   293  
   294  	bucketEnv := corev1.EnvVar{
   295  		Name:  "S3_BUCKET",
   296  		Value: bucket,
   297  	}
   298  	bucketPathEnv := corev1.EnvVar{
   299  		Name:  "S3_BUCKET_PATH",
   300  		Value: bucketPath,
   301  	}
   302  	job.Template.Spec.Containers[0].Env = append(job.Template.Spec.Containers[0].Env, bucketEnv, bucketPathEnv)
   303  
   304  	// add SSL volumes
   305  	job.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{}
   306  	job.Template.Spec.Volumes = []corev1.Volume{}
   307  
   308  	err := appendStorageSecret(job, cr)
   309  	if err != nil {
   310  		return errors.Wrap(err, "failed to append storage secrets")
   311  	}
   312  
   313  	return nil
   314  }