github.com/percona/percona-xtradb-cluster-operator@v1.14.0/deploy/backup/copy-backup.sh (about)

     1  #!/bin/bash
     2  
     3  set -o errexit
     4  tmp_dir=$(mktemp -d)
     5  ctrl=""
     6  ACCESS_KEY_ID=${ACCESS_KEY_ID:-}
     7  SECRET_ACCESS_KEY=${SECRET_ACCESS_KEY:-}
     8  ENDPOINT=${ENDPOINT:-}
     9  DEFAULT_REGION=${DEFAULT_REGION:-us-east-1}
    10  
    11  # it is needed to have full path to xbcloud on some platforms
    12  if ! xbcloud=$(which xbcloud); then
    13  	echo "No xtrabackup binaries found, please install them:"
    14  	echo "https://www.percona.com/downloads/Percona-XtraBackup-LATEST"
    15  	echo "https://formulae.brew.sh/formula/percona-xtrabackup"
    16  	exit 1
    17  fi
    18  
    19  check_ctrl() {
    20  	if [ -x "$(command -v kubectl)" ]; then
    21  		ctrl="kubectl"
    22  	elif [ -x "$(command -v oc)" ]; then
    23  		ctrl="oc"
    24  	else
    25  		echo "[ERROR] Neither <oc> nor <kubectl> client found"
    26  		exit 1
    27  	fi
    28  }
    29  
    30  usage() {
    31  	cat - <<-EOF
    32  		usage: $0 <backup-name> <local/dir>
    33  
    34  		OPTIONS:
    35  		    <backup-name>  the backup name
    36  		                   it can be obtained with the "$ctrl get pxc-backup" command
    37  		    <local/dir>    the name of destination directory on local machine
    38  		    <namespace>    optionally specify a namespace
    39  	EOF
    40  	exit 1
    41  }
    42  
    43  get_backup_dest() {
    44  	local backup=$1
    45  	local secret
    46  
    47  	if $ctrl get "pxc-backup/$backup" 1>/dev/null 2>/dev/null; then
    48  		BASE64_DECODE_CMD=""
    49  		if echo eWVz | base64 -d 1>/dev/null 2>/dev/null; then
    50  			BASE64_DECODE_CMD="base64 -d"
    51  		elif echo eWVz | base64 -D 1>/dev/null 2>/dev/null; then
    52  			BASE64_DECODE_CMD="base64 -D"
    53  		else
    54  			echo "base64 decode error."
    55  			exit 1
    56  		fi
    57  
    58  		secret=$($ctrl get "pxc-backup/$backup" -o 'jsonpath={.status.s3.credentialsSecret}' 2>/dev/null)
    59  		if [ -n "$secret" ]; then
    60  			ENDPOINT=$($ctrl get "pxc-backup/$backup" -o 'jsonpath={.status.s3.endpointUrl}' 2>/dev/null)
    61  			ACCESS_KEY_ID=$($ctrl get "secret/$secret" -o 'jsonpath={.data.AWS_ACCESS_KEY_ID}' 2>/dev/null | eval "${BASE64_DECODE_CMD}")
    62  			SECRET_ACCESS_KEY=$($ctrl get "secret/$secret" -o 'jsonpath={.data.AWS_SECRET_ACCESS_KEY}' 2>/dev/null | eval "${BASE64_DECODE_CMD}")
    63  			export CREDENTIALS="ENDPOINT=$ENDPOINT ACCESS_KEY_ID=$ACCESS_KEY_ID SECRET_ACCESS_KEY=$SECRET_ACCESS_KEY DEFAULT_REGION=$DEFAULT_REGION"
    64  
    65  			$ctrl get "pxc-backup/$backup" -o jsonpath='{.status.destination}'
    66  			return
    67  		fi
    68  
    69  		secret=$($ctrl get "pxc-backup/$backup" -o 'jsonpath={.status.azure.credentialsSecret}' 2>/dev/null)
    70  		if [ -n "$secret" ]; then
    71  			AZURE_STORAGE_ACCOUNT=$($ctrl get "secret/$secret" -o 'jsonpath={.data.AZURE_STORAGE_ACCOUNT_NAME}' 2>/dev/null | eval "${BASE64_DECODE_CMD}")
    72  			AZURE_ACCESS_KEY=$($ctrl get "secret/$secret" -o 'jsonpath={.data.AZURE_STORAGE_ACCOUNT_KEY}' 2>/dev/null | eval "${BASE64_DECODE_CMD}")
    73  			AZURE_STORAGE_CLASS=$($ctrl get "pxc-backup/$backup" -o 'jsonpath={.data.storageClass}' 2>/dev/null | eval "${BASE64_DECODE_CMD}")
    74  			export CREDENTIALS="AZURE_STORAGE_ACCOUNT=$AZURE_STORAGE_ACCOUNT AZURE_ACCESS_KEY=$AZURE_ACCESS_KEY AZURE_STORAGE_CLASS=$AZURE_STORAGE_CLASS"
    75  
    76  			$ctrl get "pxc-backup/$backup" -o jsonpath='{.status.destination}'
    77  			return
    78  		fi
    79  
    80  		$ctrl get "pxc-backup/$backup" -o jsonpath='{.status.destination}'
    81  	else
    82  		# support direct PVC name here
    83  		echo -n "$backup"
    84  	fi
    85  }
    86  
    87  enable_logging() {
    88  	BASH_VER=$(echo "$BASH_VERSION" | cut -d . -f 1,2)
    89  	if (($(echo "$BASH_VER >= 4.1" | bc -l))); then
    90  		exec 5>"$tmp_dir/log"
    91  		BASH_XTRACEFD=5
    92  		set -o xtrace
    93  		echo "Log: $tmp_dir/log"
    94  	fi
    95  }
    96  
    97  check_input_namespace() {
    98  	local namespace=${1}
    99  
   100  	if [ -n "$namespace" ]; then
   101  		ctrl="$ctrl -n $namespace"
   102  	fi
   103  }
   104  
   105  check_input_destination() {
   106  	local backup_dest=$1
   107  	local dest_dir=$2
   108  
   109  	if [ -z "$backup_dest" ] || [ -z "$dest_dir" ]; then
   110  		usage
   111  	fi
   112  
   113  	if [ ! -e "$dest_dir" ]; then
   114  		mkdir -p "$dest_dir"
   115  	fi
   116  
   117  	if [ "${backup_dest:0:4}" = "pvc/" ]; then
   118  		if ! $ctrl get "$backup_dest" 1>/dev/null; then
   119  			printf "[ERROR] '%s' PVC doesn't exists.\n\n" "$backup_dest"
   120  			usage
   121  		fi
   122  	elif [ "${backup_dest:0:5}" = "s3://" ] || [ "${backup_dest:0:8}" = "azure://" ]; then
   123  		env -i "${CREDENTIALS} ${xbcloud} get ${backup_dest} xtrabackup_info" 1>/dev/null
   124  	else
   125  		echo "Can't find $backup_dest backup"
   126  		usage
   127  	fi
   128  
   129  	if [ ! -d "$dest_dir" ]; then
   130  		printf "[ERROR] '%s' is not local directory.\n\n" "$dest_dir"
   131  		usage
   132  	fi
   133  
   134  }
   135  
   136  start_tmp_pod() {
   137  	local backup_pvc=$1
   138  
   139  	$ctrl delete pod/backup-access 2>/dev/null || :
   140  
   141  	cat - <<-EOF | $ctrl apply -f -
   142  		apiVersion: v1
   143  		kind: Pod
   144  		metadata:
   145  		  name: backup-access
   146  		spec:
   147  		  containers:
   148  		  - name: xtrabackup
   149  		    image: percona/percona-xtradb-cluster-operator:0.3.0-backup
   150  		    volumeMounts:
   151  		    - name: backup
   152  		      mountPath: /backup
   153  		  restartPolicy: Never
   154  		  volumes:
   155  		  - name: backup
   156  		    persistentVolumeClaim:
   157  		      claimName: ${backup_pvc#pvc/}
   158  	EOF
   159  
   160  	echo -n Starting pod.
   161  	until $ctrl get pod/backup-access -o jsonpath='{.status.containerStatuses[0].ready}' 2>/dev/null | grep -q 'true'; do
   162  		sleep 1
   163  		echo -n .
   164  	done
   165  	echo "[done]"
   166  }
   167  
   168  copy_files_pvc() {
   169  	local dest_dir=$1
   170  	local real_dest_dir
   171  	real_dest_dir=$(
   172  		cd "$dest_dir"
   173  		pwd -P
   174  	)
   175  
   176  	echo ""
   177  	echo "Downloading started"
   178  	$ctrl cp backup-access:/backup/ "${real_dest_dir%/}/"
   179  	echo "Downloading finished"
   180  }
   181  
   182  copy_files_xbcloud() {
   183  	local backup_path=$1
   184  	local dest_dir=$2
   185  
   186  	echo ""
   187  	echo "Downloading started"
   188  	env -i "${CREDENTIALS} ${xbcloud} get ${backup_path} --parallel=10" 1>"$dest_dir/xtrabackup.stream" 2>"$dest_dir/transfer.log"
   189  	echo "Downloading finished"
   190  }
   191  
   192  stop_tmp_pod() {
   193  	$ctrl delete pod/backup-access
   194  }
   195  
   196  main() {
   197  	local backup=$1
   198  	local dest_dir=$2
   199  	local namespace=$3
   200  	local backup_dest
   201  
   202  	check_ctrl
   203  	enable_logging
   204  	check_input_namespace "$namespace"
   205  	get_backup_dest "$backup"
   206  	backup_dest=$(get_backup_dest "$backup")
   207  	check_input_destination "$backup_dest" "$dest_dir"
   208  
   209  	if [ "${backup_dest:0:4}" = "pvc/" ]; then
   210  		start_tmp_pod "$backup_dest"
   211  		copy_files_pvc "$dest_dir"
   212  		stop_tmp_pod
   213  	elif [ "${backup_dest:0:5}" = "s3://" ] || [ "${backup_dest:0:8}" = "azure://" ]; then
   214  		copy_files_xbcloud "$backup_dest" "$dest_dir"
   215  	fi
   216  
   217  	cat - <<-EOF
   218  
   219  		You can recover data locally with following commands:
   220  		    $ service mysqld stop
   221  		    $ rm -rf /var/lib/mysql/*
   222  		    $ cat $dest_dir/xtrabackup.stream | xbstream --decompress -x -C /var/lib/mysql
   223  		    $ xtrabackup --prepare --target-dir=/var/lib/mysql
   224  		    $ chown -R mysql:mysql /var/lib/mysql
   225  		    $ service mysqld start
   226  
   227  	EOF
   228  }
   229  
   230  main "$@"
   231  exit 0