github.com/percona/percona-xtradb-cluster-operator@v1.14.0/e2e-tests/scheduled-backup/run (about) 1 #!/bin/bash 2 3 set -o errexit 4 5 test_dir=$(realpath $(dirname $0)) 6 . ${test_dir}/../functions 7 8 set_debug 9 10 run_recovery_check() { 11 local cluster=$1 12 local backup1=$2 13 14 restore_name="${backup1:22:32}" 15 16 desc 'write data after backup' 17 run_mysql \ 18 'INSERT myApp.myApp (id) VALUES (100501)' \ 19 "-h $cluster-proxysql -uroot -proot_password" 20 21 sleep 20 22 compare_mysql_cmd "select-2" "SELECT * from myApp.myApp;" "-h $cluster-pxc-0.$cluster-pxc -uroot -proot_password" 23 compare_mysql_cmd "select-2" "SELECT * from myApp.myApp;" "-h $cluster-pxc-1.$cluster-pxc -uroot -proot_password" 24 compare_mysql_cmd "select-2" "SELECT * from myApp.myApp;" "-h $cluster-pxc-2.$cluster-pxc -uroot -proot_password" 25 26 desc 'recover backup' 27 cat $src_dir/deploy/backup/restore.yaml \ 28 | $sed "s/pxcCluster: .*/pxcCluster: $cluster/" \ 29 | $sed "s/backupName: .*/backupName: $backup1/" \ 30 | $sed "s/name: .*/name: $restore_name/" \ 31 | kubectl_bin apply -f - 32 wait_backup_restore ${restore_name} 33 34 kubectl_bin logs job/restore-job-${restore_name}-${cluster:0:16} 35 36 wait_for_running "$cluster-proxysql" 1 37 wait_for_running "$cluster-pxc" 3 38 sleep 20 39 40 desc 'check data after backup' 41 compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-0.$cluster-pxc -uroot -proot_password" 42 compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-1.$cluster-pxc -uroot -proot_password" 43 compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-2.$cluster-pxc -uroot -proot_password" 44 } 45 46 get_backup_name() { 47 kubectl_bin get pxc-backup -o=jsonpath='{range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' \ 48 | grep ":$1:Succeeded" \ 49 | tail -1 \ 50 | cut -d ':' -f 1 51 } 52 53 wait_backup() { 54 while [ -z "$(get_backup_name $1)" ]; do 55 sleep 20 56 done 57 } 58 59 get_running_backups_amount() { 60 kubectl_bin get pxc-backup -o=jsonpath='{range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' \ 61 | grep -vE ":Succeeded|:Failed" \ 62 | wc -l 63 } 64 65 get_failed_backups_amount() { 66 kubectl_bin get pxc-backup -o=jsonpath='{range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' \ 67 | grep ":Failed" \ 68 | wc -l 69 } 70 71 wait_all_backups() { 72 while [[ "$(get_running_backups_amount)" -ne 0 && "$(get_failed_backups_amount)" -eq 0 ]]; do 73 wait_for_running "$cluster-pxc" 3 1 74 echo 75 kubectl_bin get pxc-backup 76 echo 77 kubectl_bin get pods 78 sleep 20 79 done 80 if [[ "$(get_failed_backups_amount)" -gt 0 ]]; then 81 echo "One or more backups have been failed!\n" 82 desc "LIST OF BACKUPS" 83 kubectl_bin get pxc-backup 84 desc "LIST OF PODS" 85 kubectl_bin get pods 86 exit 1 87 fi 88 } 89 90 label_node() { 91 LABELED_NODE=$(kubectl_bin get nodes --no-headers=true | grep -v master | head -n1 | awk '{print $1}') 92 93 kubectl_bin label nodes "${LABELED_NODE}" backupWorker=True --overwrite 94 } 95 96 unlabel_node() { 97 kubectl_bin label nodes "${LABELED_NODE}" backupWorker- --overwrite 98 } 99 100 compare_extrafields() { 101 local resource_type="$1" 102 local resource="$2" 103 local expected_result=${test_dir}/compare/extra-fields.json 104 local new_result="${tmp_dir}/${resource//\//_}.json" 105 106 if [ ! -z "$OPENSHIFT" -a -f ${expected_result//.json/-oc.json} ]; then 107 expected_result=${expected_result//.json/-oc.json} 108 fi 109 110 case ${resource_type} in 111 job) 112 kubectl_bin get ${resource_type} ${resource} -o json | jq '{ 113 affinity: .spec.template.spec.affinity, 114 annotations: 115 { 116 testName: .spec.template.metadata.annotations.testName 117 }, 118 labels: 119 { 120 backupWorker: .spec.template.metadata.labels.backupWorker 121 }, 122 nodeSelector: 123 { 124 backupWorker: .spec.template.spec.nodeSelector.backupWorker 125 }, 126 priorityClassName: .spec.template.spec.priorityClassName, 127 schedulerName: .spec.template.spec.schedulerName, 128 tolerations: (.spec.template.spec.tolerations[] | select(.key | contains("backupWorker"))), 129 resources: .spec.template.spec.containers[0].resources 130 }' >${new_result} 131 ;; 132 pod) 133 kubectl_bin get ${resource_type} ${resource} -o json | jq '{ 134 affinity: .spec.affinity, 135 annotations: 136 { 137 testName: .metadata.annotations.testName 138 }, 139 labels: 140 { 141 backupWorker: .metadata.labels.backupWorker 142 }, 143 nodeSelector: 144 { 145 backupWorker: .spec.nodeSelector.backupWorker 146 }, 147 priorityClassName: .spec.priorityClassName, 148 schedulerName: .spec.schedulerName, 149 tolerations: (.spec.tolerations[] | select(.key | contains("backupWorker"))), 150 resources: .spec.containers[0].resources 151 }' >${new_result} 152 ;; 153 esac 154 155 diff -u ${expected_result} ${new_result} 156 } 157 158 main() { 159 create_infra $namespace 160 start_minio 161 162 cluster="scheduled-backup" 163 164 cat - <<-EOF | kubectl_bin apply -f - 165 apiVersion: scheduling.k8s.io/v1 166 kind: PriorityClass 167 metadata: 168 name: high-priority 169 value: 1000000 170 globalDefault: false 171 description: "This priority class should be used for backup service pods only." 172 EOF 173 174 spinup_pxc "$cluster" "$test_dir/conf/${cluster}-init.yml" 175 sleep 20 176 177 desc 'add backups schedule for pvc storage' 178 kubectl_bin config set-context "$(kubectl_bin config current-context)" --namespace="$namespace" 179 apply_config "${test_dir}/conf/${cluster}-pvc.yml" 180 label_node 181 sleep 61 182 apply_config "${test_dir}/conf/${cluster}-disable.yml" 183 wait_all_backups 184 desc 'add backups schedule for aws s3 storage' 185 apply_config "${test_dir}/conf/${cluster}-aws.yml" 186 sleep 61 187 apply_config "${test_dir}/conf/${cluster}-disable.yml" 188 wait_all_backups 189 desc 'add backups schedule for minio storage' 190 apply_config "${test_dir}/conf/${cluster}-minio.yml" 191 sleep 61 192 apply_config "${test_dir}/conf/${cluster}-disable.yml" 193 wait_all_backups 194 desc 'add backups schedule for gcs storage' 195 apply_config "${test_dir}/conf/${cluster}-gcs.yml" 196 sleep 61 197 apply_config "${test_dir}/conf/${cluster}-disable.yml" 198 wait_all_backups 199 desc 'add backups schedule for azure storage' 200 apply_config "${test_dir}/conf/${cluster}-azure.yml" 201 sleep 61 202 apply_config "${test_dir}/conf/${cluster}-disable.yml" 203 wait_all_backups 204 205 FIRST_PVC_BACKUP=$(kubectl_bin get pxc-backup -o jsonpath='{range .items[*]}{.metadata.name}:{.spec.storageName}:{.status.state}{"\n"}{end}' | grep Succeeded | grep pvc | head -n1 | cut -d: -f1) 206 JOB_PVC_BACKUP=$(kubectl_bin get jobs | grep ${FIRST_PVC_BACKUP} | awk '{print $1}') 207 POD_PVC_BACKUP=$(kubectl_bin get pods | grep ${JOB_PVC_BACKUP%-*} | awk '{print $1}') 208 209 FIRST_MINIO_BACKUP=$(kubectl_bin get pxc-backup -o jsonpath='{range .items[*]}{.metadata.name}:{.spec.storageName}:{.status.state}{"\n"}{end}' | grep Succeeded | grep minio | head -n1 | cut -d: -f1) 210 JOB_MINIO_BACKUP=$(kubectl_bin get jobs | grep ${FIRST_MINIO_BACKUP} | awk '{print $1}') 211 POD_MINIO_BACKUP=$(kubectl_bin get pods | grep ${JOB_MINIO_BACKUP%-*} | awk '{print $1}') 212 213 if [ -z "$SKIP_REMOTE_BACKUPS" ]; then 214 FIRST_AWS_BACKUP=$(kubectl_bin get pxc-backup -o jsonpath='{range .items[*]}{.metadata.name}:{.spec.storageName}:{.status.state}{"\n"}{end}' | grep Succeeded | grep aws | head -n1 | cut -d: -f1) 215 JOB_AWS_BACKUP=$(kubectl_bin get jobs | grep ${FIRST_AWS_BACKUP} | awk '{print $1}') 216 POD_AWS_BACKUP=$(kubectl_bin get pods | grep ${JOB_AWS_BACKUP%-*} | awk '{print $1}') 217 218 FIRST_GCP_BACKUP=$(kubectl_bin get pxc-backup -o jsonpath='{range .items[*]}{.metadata.name}:{.spec.storageName}:{.status.state}{"\n"}{end}' | grep Succeeded | grep gcp | head -n1 | cut -d: -f1) 219 JOB_GCP_BACKUP=$(kubectl_bin get jobs | grep ${FIRST_GCP_BACKUP} | awk '{print $1}') 220 POD_GCP_BACKUP=$(kubectl_bin get pods | grep ${JOB_GCP_BACKUP%-*} | awk '{print $1}') 221 222 FIRST_AZURE_BACKUP=$(kubectl_bin get pxc-backup -o jsonpath='{range .items[*]}{.metadata.name}:{.spec.storageName}:{.status.state}{"\n"}{end}' | grep Succeeded | grep azure | head -n1 | cut -d: -f1) 223 JOB_AZURE_BACKUP=$(kubectl_bin get jobs | grep ${FIRST_AZURE_BACKUP} | awk '{print $1}') 224 POD_AZURE_BACKUP=$(kubectl_bin get pods | grep ${JOB_AZURE_BACKUP%-*} | awk '{print $1}') 225 226 BACKUP_DEST_AWS=$(kubectl_bin get pxc-backup "$FIRST_AWS_BACKUP" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) 227 BACKUP_DEST_GCP=$(kubectl_bin get pxc-backup "$FIRST_GCP_BACKUP" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 6-) 228 BACKUP_DEST_AZURE=$(kubectl_bin get pxc-backup "$FIRST_AZURE_BACKUP" -o jsonpath='{.status.destination}' | sed -e 's/.json$//' | cut -c 9-) 229 230 desc "Check backup existence" 231 check_backup_existence "https://s3.amazonaws.com/${BACKUP_DEST_AWS}.sst_info/sst_info.00000000000000000000" "aws-s3" 232 check_backup_existence "https://storage.googleapis.com/${BACKUP_DEST_GCP}.sst_info/sst_info.00000000000000000000" "gcp-cs" 233 check_backup_existence "https://engk8soperators.blob.core.windows.net/${BACKUP_DEST_AZURE}.sst_info/sst_info.00000000000000000000" "azure-blob" 234 235 desc "Check that KEEP option saves correct backup's amount (1 for our settings)" 236 237 desc 'add backups schedule for gcs storage' 238 apply_config "${test_dir}/conf/${cluster}-gcs.yml" 239 sleep 110 240 apply_config "${test_dir}/conf/${cluster}-disable.yml" 241 wait_all_backups 242 desc 'add backups schedule for azure storage' 243 apply_config "${test_dir}/conf/${cluster}-azure.yml" 244 sleep 110 245 apply_config "${test_dir}/conf/${cluster}-disable.yml" 246 wait_all_backups 247 desc 'add backups schedule for aws s3 storage' 248 apply_config "${test_dir}/conf/${cluster}-aws.yml" 249 sleep 110 250 apply_config "${test_dir}/conf/${cluster}-disable.yml" 251 wait_all_backups 252 sleep 30 253 SECOND_AWS_BACKUP=$(kubectl_bin get pxc-backup -o jsonpath='{range .items[*]}{.metadata.name}:{.spec.storageName}:{.status.state}{"\n"}{end}' | grep Succeeded | grep aws | head -n1 | cut -d: -f1) 254 SECOND_GCP_BACKUP=$(kubectl_bin get pxc-backup -o jsonpath='{range .items[*]}{.metadata.name}:{.spec.storageName}:{.status.state}{"\n"}{end}' | grep Succeeded | grep gcp | head -n1 | cut -d: -f1) 255 SECOND_AZURE_BACKUP=$(kubectl_bin get pxc-backup -o jsonpath='{range .items[*]}{.metadata.name}:{.spec.storageName}:{.status.state}{"\n"}{end}' | grep Succeeded | grep azure | head -n1 | cut -d: -f1) 256 257 if [[ $FIRST_GCP_BACKUP == "$SECOND_GCP_BACKUP" || $FIRST_AZURE_BACKUP == "$SECOND_AZURE_BACKUP" || $FIRST_AWS_BACKUP == "$SECOND_AWS_BACKUP" ]]; then 258 echo "Something got wrong: First $FIRST_GCP_BACKUP and Second backup $SECOND_GCP_BACKUP for GCP or first $FIRST_AZURE_BACKUP and second backup $SECOND_AZURE_BACKUP for AZURE or first $FIRST_AWS_BACKUP and second backup $SECOND_AWS_BACKUP for AWS are the same" 259 exit 1 260 fi 261 262 desc "Check backup deletion" 263 check_backup_deletion "https://s3.amazonaws.com/${BACKUP_DEST_AWS}.sst_info/sst_info.00000000000000000000" "aws-s3" 264 check_backup_deletion "https://storage.googleapis.com/${BACKUP_DEST_GCP}.sst_info/sst_info.00000000000000000000" "gcp-cs" 265 check_backup_deletion "https://engk8soperators.blob.core.windows.net/${BACKUP_DEST_AZURE}.sst_info/sst_info.00000000000000000000" "azure-blob" 266 fi 267 268 backup_name_pvc=$(get_backup_name "pvc") 269 backup_name_minio=$(get_backup_name "minio") 270 if [ -z "$SKIP_REMOTE_BACKUPS" ]; then 271 backup_name_aws=$(get_backup_name "aws-s3") 272 backup_name_gcp=$(get_backup_name "gcp-cs") 273 backup_name_azure=$(get_backup_name "azure-blob") 274 fi 275 276 apply_config "$test_dir/conf/${cluster}-init.yml" 277 278 run_recovery_check "$cluster" "$backup_name_pvc" 279 run_recovery_check "$cluster" "$backup_name_minio" 280 if [ -z "$SKIP_REMOTE_BACKUPS" ]; then 281 run_recovery_check "$cluster" "$backup_name_aws" 282 run_recovery_check "$cluster" "$backup_name_gcp" 283 run_recovery_check "$cluster" "$backup_name_azure" 284 fi 285 286 unlabel_node 287 destroy $namespace 288 desc "test passed" 289 } 290 291 main