github.com/percona/percona-xtradb-cluster-operator@v1.14.0/pkg/pxc/app/deployment/binlog-collector.go (about)

     1  package deployment
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"strconv"
     8  	"strings"
     9  
    10  	"github.com/pkg/errors"
    11  	appsv1 "k8s.io/api/apps/v1"
    12  	corev1 "k8s.io/api/core/v1"
    13  	"k8s.io/apimachinery/pkg/api/resource"
    14  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    15  	"k8s.io/apimachinery/pkg/labels"
    16  	"sigs.k8s.io/controller-runtime/pkg/client"
    17  
    18  	"github.com/percona/percona-xtradb-cluster-operator/clientcmd"
    19  	api "github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1"
    20  	"github.com/percona/percona-xtradb-cluster-operator/pkg/pxc"
    21  	"github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/app"
    22  	"github.com/percona/percona-xtradb-cluster-operator/pkg/pxc/users"
    23  )
    24  
    25  func GetBinlogCollectorDeployment(cr *api.PerconaXtraDBCluster) (appsv1.Deployment, error) {
    26  	binlogCollectorName := GetBinlogCollectorDeploymentName(cr)
    27  	pxcUser := users.Xtrabackup
    28  	sleepTime := fmt.Sprintf("%.2f", cr.Spec.Backup.PITR.TimeBetweenUploads)
    29  
    30  	bufferSize, err := getBufferSize(cr.Spec)
    31  	if err != nil {
    32  		return appsv1.Deployment{}, errors.Wrap(err, "get buffer size")
    33  	}
    34  
    35  	labels := map[string]string{
    36  		"app.kubernetes.io/name":       "percona-xtradb-cluster",
    37  		"app.kubernetes.io/instance":   cr.Name,
    38  		"app.kubernetes.io/component":  "pitr",
    39  		"app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator",
    40  		"app.kubernetes.io/part-of":    "percona-xtradb-cluster",
    41  	}
    42  	for key, value := range cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName].Labels {
    43  		labels[key] = value
    44  	}
    45  	envs, err := getStorageEnvs(cr)
    46  	if err != nil {
    47  		return appsv1.Deployment{}, errors.Wrap(err, "get storage envs")
    48  	}
    49  	envs = append(envs, []corev1.EnvVar{
    50  		{
    51  			Name:  "PXC_SERVICE",
    52  			Value: cr.Name + "-pxc",
    53  		},
    54  		{
    55  			Name:  "PXC_USER",
    56  			Value: pxcUser,
    57  		},
    58  		{
    59  			Name: "PXC_PASS",
    60  			ValueFrom: &corev1.EnvVarSource{
    61  				SecretKeyRef: app.SecretKeySelector(cr.Spec.SecretsName, pxcUser),
    62  			},
    63  		},
    64  		{
    65  			Name:  "COLLECT_SPAN_SEC",
    66  			Value: sleepTime,
    67  		},
    68  		{
    69  			Name:  "BUFFER_SIZE",
    70  			Value: strconv.FormatInt(bufferSize, 10),
    71  		},
    72  	}...)
    73  
    74  	if cr.CompareVersionWith("1.14.0") >= 0 {
    75  		timeout := fmt.Sprintf("%.2f", cr.Spec.Backup.PITR.TimeoutSeconds)
    76  
    77  		envs = append(envs, corev1.EnvVar{
    78  			Name:  "TIMEOUT_SECONDS",
    79  			Value: timeout,
    80  		})
    81  	}
    82  
    83  	container := corev1.Container{
    84  		Name:            "pitr",
    85  		Image:           cr.Spec.Backup.Image,
    86  		ImagePullPolicy: cr.Spec.Backup.ImagePullPolicy,
    87  		Env:             envs,
    88  		SecurityContext: cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName].ContainerSecurityContext,
    89  		Command:         []string{"pitr"},
    90  		Resources:       cr.Spec.Backup.PITR.Resources,
    91  		VolumeMounts: []corev1.VolumeMount{
    92  			{
    93  				Name:      "mysql-users-secret-file",
    94  				MountPath: "/etc/mysql/mysql-users-secret",
    95  			},
    96  		},
    97  	}
    98  	replicas := int32(1)
    99  
   100  	return appsv1.Deployment{
   101  		TypeMeta: metav1.TypeMeta{
   102  			APIVersion: "apps/v1",
   103  			Kind:       "Deployment",
   104  		},
   105  		ObjectMeta: metav1.ObjectMeta{
   106  			Name:      binlogCollectorName,
   107  			Namespace: cr.Namespace,
   108  		},
   109  		Spec: appsv1.DeploymentSpec{
   110  			Replicas: &replicas,
   111  			Selector: &metav1.LabelSelector{
   112  				MatchLabels: labels,
   113  			},
   114  			Template: corev1.PodTemplateSpec{
   115  				ObjectMeta: metav1.ObjectMeta{
   116  					Name:        binlogCollectorName,
   117  					Namespace:   cr.Namespace,
   118  					Labels:      labels,
   119  					Annotations: cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName].Annotations,
   120  				},
   121  				Spec: corev1.PodSpec{
   122  					Containers:                []corev1.Container{container},
   123  					ImagePullSecrets:          cr.Spec.Backup.ImagePullSecrets,
   124  					ServiceAccountName:        cr.Spec.Backup.ServiceAccountName,
   125  					SecurityContext:           cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName].PodSecurityContext,
   126  					Affinity:                  cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName].Affinity,
   127  					TopologySpreadConstraints: pxc.PodTopologySpreadConstraints(cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName].TopologySpreadConstraints, labels),
   128  					Tolerations:               cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName].Tolerations,
   129  					NodeSelector:              cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName].NodeSelector,
   130  					SchedulerName:             cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName].SchedulerName,
   131  					PriorityClassName:         cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName].PriorityClassName,
   132  					Volumes: []corev1.Volume{
   133  						app.GetSecretVolumes("mysql-users-secret-file", "internal-"+cr.Name, false),
   134  					},
   135  					RuntimeClassName: cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName].RuntimeClassName,
   136  				},
   137  			},
   138  		},
   139  	}, nil
   140  }
   141  
   142  func getStorageEnvs(cr *api.PerconaXtraDBCluster) ([]corev1.EnvVar, error) {
   143  	storage := cr.Spec.Backup.Storages[cr.Spec.Backup.PITR.StorageName]
   144  	verifyTLS := "true"
   145  	if storage.VerifyTLS != nil && !*storage.VerifyTLS {
   146  		verifyTLS = "false"
   147  	}
   148  	var envs []corev1.EnvVar
   149  
   150  	switch storage.Type {
   151  	case api.BackupStorageS3:
   152  		if storage.S3 == nil {
   153  			return nil, errors.New("s3 storage is not specified")
   154  		}
   155  		envs = []corev1.EnvVar{
   156  			{
   157  				Name: "SECRET_ACCESS_KEY",
   158  				ValueFrom: &corev1.EnvVarSource{
   159  					SecretKeyRef: app.SecretKeySelector(storage.S3.CredentialsSecret, "AWS_SECRET_ACCESS_KEY"),
   160  				},
   161  			},
   162  			{
   163  				Name: "ACCESS_KEY_ID",
   164  				ValueFrom: &corev1.EnvVarSource{
   165  					SecretKeyRef: app.SecretKeySelector(storage.S3.CredentialsSecret, "AWS_ACCESS_KEY_ID"),
   166  				},
   167  			},
   168  			{
   169  				Name:  "S3_BUCKET_URL",
   170  				Value: storage.S3.Bucket,
   171  			},
   172  			{
   173  				Name:  "DEFAULT_REGION",
   174  				Value: storage.S3.Region,
   175  			},
   176  			{
   177  				Name:  "STORAGE_TYPE",
   178  				Value: "s3",
   179  			},
   180  		}
   181  		if len(storage.S3.EndpointURL) > 0 {
   182  			envs = append(envs, corev1.EnvVar{
   183  				Name:  "ENDPOINT",
   184  				Value: storage.S3.EndpointURL,
   185  			})
   186  		}
   187  	case api.BackupStorageAzure:
   188  		if storage.Azure == nil {
   189  			return nil, errors.New("azure storage is not specified")
   190  		}
   191  		envs = []corev1.EnvVar{
   192  			{
   193  				Name: "AZURE_STORAGE_ACCOUNT",
   194  				ValueFrom: &corev1.EnvVarSource{
   195  					SecretKeyRef: app.SecretKeySelector(storage.Azure.CredentialsSecret, "AZURE_STORAGE_ACCOUNT_NAME"),
   196  				},
   197  			},
   198  			{
   199  				Name: "AZURE_ACCESS_KEY",
   200  				ValueFrom: &corev1.EnvVarSource{
   201  					SecretKeyRef: app.SecretKeySelector(storage.Azure.CredentialsSecret, "AZURE_STORAGE_ACCOUNT_KEY"),
   202  				},
   203  			},
   204  			{
   205  				Name:  "AZURE_STORAGE_CLASS",
   206  				Value: storage.Azure.StorageClass,
   207  			},
   208  			{
   209  				Name:  "AZURE_CONTAINER_PATH",
   210  				Value: storage.Azure.ContainerPath,
   211  			},
   212  			{
   213  				Name:  "AZURE_ENDPOINT",
   214  				Value: storage.Azure.Endpoint,
   215  			},
   216  			{
   217  				Name:  "STORAGE_TYPE",
   218  				Value: "azure",
   219  			},
   220  		}
   221  	default:
   222  		return nil, errors.Errorf("%s storage has unsupported type %s", cr.Spec.Backup.PITR.StorageName, storage.Type)
   223  	}
   224  
   225  	if cr.CompareVersionWith("1.13.0") >= 0 {
   226  		envs = append(envs, corev1.EnvVar{
   227  			Name:  "VERIFY_TLS",
   228  			Value: verifyTLS,
   229  		})
   230  	}
   231  
   232  	return envs, nil
   233  }
   234  
   235  func GetBinlogCollectorDeploymentName(cr *api.PerconaXtraDBCluster) string {
   236  	return cr.Name + "-pitr"
   237  }
   238  
   239  func getBufferSize(cluster api.PerconaXtraDBClusterSpec) (mem int64, err error) {
   240  	res := cluster.Backup.PITR.Resources
   241  	if res.Size() == 0 {
   242  		return 0, nil
   243  	}
   244  
   245  	var memory *resource.Quantity
   246  
   247  	if _, ok := res.Requests[corev1.ResourceMemory]; ok {
   248  		memory = res.Requests.Memory()
   249  	}
   250  
   251  	if _, ok := res.Limits[corev1.ResourceMemory]; ok {
   252  		memory = res.Limits.Memory()
   253  	}
   254  
   255  	return memory.Value() / int64(100) * int64(75), nil
   256  }
   257  
   258  func GetBinlogCollectorPod(ctx context.Context, c client.Client, cr *api.PerconaXtraDBCluster) (*corev1.Pod, error) {
   259  	collectorPodList := corev1.PodList{}
   260  
   261  	err := c.List(ctx, &collectorPodList,
   262  		&client.ListOptions{
   263  			Namespace: cr.Namespace,
   264  			LabelSelector: labels.SelectorFromSet(map[string]string{
   265  				"app.kubernetes.io/name":       "percona-xtradb-cluster",
   266  				"app.kubernetes.io/instance":   cr.Name,
   267  				"app.kubernetes.io/component":  "pitr",
   268  				"app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator",
   269  				"app.kubernetes.io/part-of":    "percona-xtradb-cluster",
   270  			}),
   271  		},
   272  	)
   273  	if err != nil {
   274  		return nil, errors.Wrap(err, "get binlog collector pods")
   275  	}
   276  
   277  	if len(collectorPodList.Items) < 1 {
   278  		return nil, errors.New("no binlog collector pods")
   279  	}
   280  
   281  	return &collectorPodList.Items[0], nil
   282  }
   283  
   284  var GapFileNotFound = errors.New("gap file not found")
   285  
   286  func RemoveGapFile(ctx context.Context, c *clientcmd.Client, pod *corev1.Pod) error {
   287  	stderrBuf := &bytes.Buffer{}
   288  	err := c.Exec(pod, "pitr", []string{"/bin/bash", "-c", "rm /tmp/gap-detected"}, nil, nil, stderrBuf, false)
   289  	if err != nil {
   290  		if strings.Contains(stderrBuf.String(), "No such file or directory") {
   291  			return GapFileNotFound
   292  		}
   293  		return errors.Wrapf(err, "delete gap file in collector pod %s", pod.Name)
   294  	}
   295  
   296  	return nil
   297  }
   298  
   299  func RemoveTimelineFile(ctx context.Context, c *clientcmd.Client, pod *corev1.Pod) error {
   300  	stderrBuf := &bytes.Buffer{}
   301  	err := c.Exec(pod, "pitr", []string{"/bin/bash", "-c", "rm /tmp/pitr-timeline"}, nil, nil, stderrBuf, false)
   302  	if err != nil {
   303  		if strings.Contains(stderrBuf.String(), "No such file or directory") {
   304  			return nil
   305  		}
   306  		return errors.Wrapf(err, "delete timeline file in collector pod %s", pod.Name)
   307  	}
   308  
   309  	return nil
   310  }