github.com/1aal/kubeblocks@v0.0.0-20231107070852-e1c03e598921/pkg/dataprotection/restore/builder.go (about) 1 /* 2 Copyright (C) 2022-2023 ApeCloud Co., Ltd 3 4 This file is part of KubeBlocks project 5 6 This program is free software: you can redistribute it and/or modify 7 it under the terms of the GNU Affero General Public License as published by 8 the Free Software Foundation, either version 3 of the License, or 9 (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU Affero General Public License for more details. 15 16 You should have received a copy of the GNU Affero General Public License 17 along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 package restore 21 22 import ( 23 "fmt" 24 "strconv" 25 "strings" 26 "time" 27 28 batchv1 "k8s.io/api/batch/v1" 29 corev1 "k8s.io/api/core/v1" 30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 31 "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 32 33 dpv1alpha1 "github.com/1aal/kubeblocks/apis/dataprotection/v1alpha1" 34 "github.com/1aal/kubeblocks/pkg/common" 35 intctrlutil "github.com/1aal/kubeblocks/pkg/controllerutil" 36 dptypes "github.com/1aal/kubeblocks/pkg/dataprotection/types" 37 "github.com/1aal/kubeblocks/pkg/dataprotection/utils" 38 ) 39 40 type restoreJobBuilder struct { 41 restore *dpv1alpha1.Restore 42 stage dpv1alpha1.RestoreStage 43 backupSet BackupActionSet 44 backupRepo *dpv1alpha1.BackupRepo 45 buildWithRepo bool 46 env []corev1.EnvVar 47 commonVolumes []corev1.Volume 48 commonVolumeMounts []corev1.VolumeMount 49 // specificVolumes should be rebuilt for each job. 50 specificVolumes []corev1.Volume 51 // specificVolumeMounts should be rebuilt for each job. 52 specificVolumeMounts []corev1.VolumeMount 53 image string 54 command []string 55 tolerations []corev1.Toleration 56 nodeSelector map[string]string 57 jobName string 58 labels map[string]string 59 } 60 61 func newRestoreJobBuilder(restore *dpv1alpha1.Restore, backupSet BackupActionSet, backupRepo *dpv1alpha1.BackupRepo, stage dpv1alpha1.RestoreStage) *restoreJobBuilder { 62 return &restoreJobBuilder{ 63 restore: restore, 64 backupSet: backupSet, 65 backupRepo: backupRepo, 66 stage: stage, 67 commonVolumes: []corev1.Volume{}, 68 commonVolumeMounts: []corev1.VolumeMount{}, 69 labels: BuildRestoreLabels(restore.Name), 70 } 71 } 72 73 func (r *restoreJobBuilder) buildPVCVolumeAndMount( 74 claim dpv1alpha1.VolumeConfig, 75 claimName, 76 identifier string) (*corev1.Volume, *corev1.VolumeMount, error) { 77 volumeName := fmt.Sprintf("%s-%s", identifier, claimName) 78 volume := &corev1.Volume{ 79 Name: volumeName, 80 VolumeSource: corev1.VolumeSource{PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: claimName}}, 81 } 82 volumeMount := &corev1.VolumeMount{Name: volumeName} 83 if claim.MountPath != "" { 84 volumeMount.MountPath = claim.MountPath 85 return volume, volumeMount, nil 86 } 87 mountPath := getMountPathWithSourceVolume(r.backupSet.Backup, claim.VolumeSource) 88 if mountPath != "" { 89 volumeMount.MountPath = mountPath 90 return volume, volumeMount, nil 91 } 92 93 if r.backupSet.UseVolumeSnapshot && !r.backupSet.ActionSet.HasPrepareDataStage() { 94 return nil, nil, nil 95 } 96 return nil, nil, intctrlutil.NewFatalError(fmt.Sprintf(`unable to find the mountPath corresponding to volumeSource "%s" from status.backupMethod.targetVolumes.volumeMounts of backup "%s"`, 97 claim.VolumeSource, r.backupSet.Backup.Name)) 98 } 99 100 // addToCommonVolumesAndMounts adds the volume and volumeMount to common volumes and volumeMounts slice. 101 func (r *restoreJobBuilder) addToCommonVolumesAndMounts(volume *corev1.Volume, volumeMount *corev1.VolumeMount) *restoreJobBuilder { 102 if volume != nil { 103 r.commonVolumes = append(r.commonVolumes, *volume) 104 } 105 if volumeMount != nil { 106 r.commonVolumeMounts = append(r.commonVolumeMounts, *volumeMount) 107 } 108 return r 109 } 110 111 // resetSpecificVolumesAndMounts resets the specific volumes and volumeMounts slice. 112 func (r *restoreJobBuilder) resetSpecificVolumesAndMounts() { 113 r.specificVolumes = []corev1.Volume{} 114 r.specificVolumeMounts = []corev1.VolumeMount{} 115 } 116 117 // addToSpecificVolumesAndMounts adds the volume and volumeMount to specific volumes and volumeMounts slice. 118 func (r *restoreJobBuilder) addToSpecificVolumesAndMounts(volume *corev1.Volume, volumeMount *corev1.VolumeMount) *restoreJobBuilder { 119 if volume != nil { 120 r.specificVolumes = append(r.specificVolumes, *volume) 121 } 122 if volumeMount != nil { 123 r.specificVolumeMounts = append(r.specificVolumeMounts, *volumeMount) 124 } 125 return r 126 } 127 128 func (r *restoreJobBuilder) setImage(image string) *restoreJobBuilder { 129 r.image = image 130 return r 131 } 132 133 func (r *restoreJobBuilder) setCommand(command []string) *restoreJobBuilder { 134 r.command = command 135 return r 136 } 137 138 func (r *restoreJobBuilder) setToleration(tolerations []corev1.Toleration) *restoreJobBuilder { 139 r.tolerations = tolerations 140 return r 141 } 142 143 func (r *restoreJobBuilder) setNodeNameToNodeSelector(nodeName string) *restoreJobBuilder { 144 r.nodeSelector = map[string]string{ 145 corev1.LabelHostname: nodeName, 146 } 147 return r 148 } 149 150 func (r *restoreJobBuilder) setJobName(jobName string) *restoreJobBuilder { 151 r.jobName = jobName 152 return r 153 } 154 155 func (r *restoreJobBuilder) addLabel(key, value string) *restoreJobBuilder { 156 r.labels[key] = value 157 return r 158 } 159 160 func (r *restoreJobBuilder) attachBackupRepo() *restoreJobBuilder { 161 r.buildWithRepo = true 162 return r 163 } 164 165 // addCommonEnv adds the common envs for each restore job. 166 func (r *restoreJobBuilder) addCommonEnv() *restoreJobBuilder { 167 backupName := r.backupSet.Backup.Name 168 // add backupName env 169 r.env = []corev1.EnvVar{{Name: dptypes.DPBackupName, Value: backupName}} 170 // add mount path env of backup dir 171 filePath := r.backupSet.Backup.Status.Path 172 if filePath != "" { 173 r.env = append(r.env, corev1.EnvVar{Name: dptypes.DPBackupBasePath, Value: filePath}) 174 // TODO: add continuous file path env 175 } 176 // add time env 177 actionSetEnv := r.backupSet.ActionSet.Spec.Env 178 timeFormat := getTimeFormat(r.backupSet.ActionSet.Spec.Env) 179 appendTimeEnv := func(envName, envTimestampName string, targetTime *metav1.Time) { 180 if targetTime.IsZero() { 181 return 182 } 183 if envName != "" { 184 r.env = append(r.env, corev1.EnvVar{Name: envName, Value: targetTime.UTC().Format(timeFormat)}) 185 } 186 if envTimestampName != "" { 187 r.env = append(r.env, corev1.EnvVar{Name: envTimestampName, Value: strconv.FormatInt(targetTime.Unix(), 10)}) 188 } 189 } 190 appendTimeEnv(dptypes.DPBackupStopTime, "", r.backupSet.Backup.GetEndTime()) 191 if r.restore.Spec.RestoreTime != "" { 192 restoreTime, _ := time.Parse(time.RFC3339, r.restore.Spec.RestoreTime) 193 appendTimeEnv(DPRestoreTime, DPRestoreTimestamp, &metav1.Time{Time: restoreTime}) 194 } 195 // append actionSet env 196 r.env = append(r.env, actionSetEnv...) 197 backupMethod := r.backupSet.Backup.Status.BackupMethod 198 if backupMethod != nil && len(backupMethod.Env) > 0 { 199 r.env = utils.MergeEnv(r.env, backupMethod.Env) 200 } 201 // merge the restore env 202 r.env = utils.MergeEnv(r.env, r.restore.Spec.Env) 203 return r 204 } 205 206 func (r *restoreJobBuilder) addTargetPodAndCredentialEnv(pod *corev1.Pod, 207 connectionCredential *dpv1alpha1.ConnectionCredential) *restoreJobBuilder { 208 if pod == nil { 209 return r 210 } 211 var env []corev1.EnvVar 212 // Note: now only add the first container envs. 213 if len(pod.Spec.Containers) != 0 { 214 env = pod.Spec.Containers[0].Env 215 } 216 env = append(env, corev1.EnvVar{Name: dptypes.DPDBHost, Value: intctrlutil.BuildPodHostDNS(pod)}) 217 if connectionCredential != nil { 218 appendEnvFromSecret := func(envName, keyName string) { 219 if keyName == "" { 220 return 221 } 222 env = append(env, corev1.EnvVar{Name: envName, ValueFrom: &corev1.EnvVarSource{ 223 SecretKeyRef: &corev1.SecretKeySelector{ 224 LocalObjectReference: corev1.LocalObjectReference{ 225 Name: connectionCredential.SecretName, 226 }, 227 Key: keyName, 228 }, 229 }}) 230 } 231 appendEnvFromSecret(dptypes.DPDBUser, connectionCredential.UsernameKey) 232 appendEnvFromSecret(dptypes.DPDBPassword, connectionCredential.PasswordKey) 233 appendEnvFromSecret(dptypes.DPDBPort, connectionCredential.PortKey) 234 if connectionCredential.HostKey != "" { 235 appendEnvFromSecret(dptypes.DPDBHost, connectionCredential.HostKey) 236 } 237 } 238 r.env = utils.MergeEnv(r.env, env) 239 return r 240 } 241 242 // builderRestoreJobName builds restore job name. 243 func (r *restoreJobBuilder) builderRestoreJobName(jobIndex int) string { 244 jobName := fmt.Sprintf("restore-%s-%s-%s-%d", strings.ToLower(string(r.stage)), r.restore.UID[:8], r.backupSet.Backup.Name, jobIndex) 245 return cutJobName(jobName) 246 } 247 248 // build the restore job by this builder. 249 func (r *restoreJobBuilder) build() *batchv1.Job { 250 if r.jobName == "" { 251 r.jobName = r.builderRestoreJobName(0) 252 } 253 job := &batchv1.Job{ 254 ObjectMeta: metav1.ObjectMeta{ 255 Name: r.jobName, 256 Namespace: r.restore.Namespace, 257 Labels: r.labels, 258 }, 259 } 260 podSpec := job.Spec.Template.Spec 261 // 1. set pod spec 262 runUser := int64(0) 263 podSpec.SecurityContext = &corev1.PodSecurityContext{ 264 RunAsUser: &runUser, 265 } 266 podSpec.RestartPolicy = corev1.RestartPolicyNever 267 if r.stage == dpv1alpha1.PrepareData { 268 // set scheduling spec 269 schedulingSpec := r.restore.Spec.PrepareDataConfig.SchedulingSpec 270 podSpec.Tolerations = schedulingSpec.Tolerations 271 podSpec.Affinity = schedulingSpec.Affinity 272 podSpec.NodeSelector = schedulingSpec.NodeSelector 273 podSpec.NodeName = schedulingSpec.NodeName 274 podSpec.SchedulerName = schedulingSpec.SchedulerName 275 podSpec.TopologySpreadConstraints = schedulingSpec.TopologySpreadConstraints 276 } else { 277 podSpec.Tolerations = r.tolerations 278 podSpec.NodeSelector = r.nodeSelector 279 } 280 r.specificVolumes = append(r.specificVolumes, r.commonVolumes...) 281 podSpec.Volumes = r.specificVolumes 282 job.Spec.Template.Spec = podSpec 283 job.Spec.BackoffLimit = &defaultBackoffLimit 284 285 // 2. set restore container 286 r.specificVolumeMounts = append(r.specificVolumeMounts, r.commonVolumeMounts...) 287 container := corev1.Container{ 288 Name: Restore, 289 Resources: r.restore.Spec.ContainerResources, 290 Env: r.env, 291 VolumeMounts: r.specificVolumeMounts, 292 Command: r.command, 293 // expand the image value with the env variables. 294 Image: common.Expand(r.image, common.MappingFuncFor(utils.CovertEnvToMap(r.env))), 295 ImagePullPolicy: corev1.PullIfNotPresent, 296 } 297 intctrlutil.InjectZeroResourcesLimitsIfEmpty(&container) 298 job.Spec.Template.Spec.Containers = []corev1.Container{container} 299 controllerutil.AddFinalizer(job, dptypes.DataProtectionFinalizerName) 300 301 // 3. inject datasafed if needed 302 if r.buildWithRepo { 303 mountPath := "/backupdata" 304 backupPath := r.backupSet.Backup.Status.Path 305 if r.backupRepo != nil { 306 utils.InjectDatasafed(&job.Spec.Template.Spec, r.backupRepo, mountPath, backupPath) 307 } else if pvcName := r.backupSet.Backup.Status.PersistentVolumeClaimName; pvcName != "" { 308 // If the backup object was created in an old version that doesn't have the backupRepo field, 309 // use the PVC name field as a fallback. 310 utils.InjectDatasafedWithPVC(&job.Spec.Template.Spec, pvcName, mountPath, backupPath) 311 } 312 } 313 return job 314 }