github.com/percona/percona-xtradb-cluster-operator@v1.14.0/e2e-tests/functions (about)

     1  #!/bin/bash
     2  
     3  GIT_COMMIT=$(git rev-parse HEAD)
     4  BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
     5  GIT_BRANCH=${VERSION:-$(git rev-parse --abbrev-ref HEAD | sed -e 's^/^-^g; s^[.]^-^g;' | tr '[:upper:]' '[:lower:]')}
     6  API="pxc.percona.com/v1"
     7  OPERATOR_VERSION="$(grep 'crVersion' $(realpath $(dirname ${BASH_SOURCE[0]})/../deploy/cr.yaml) | awk '{print $2}')"
     8  IMAGE=${IMAGE:-"perconalab/percona-xtradb-cluster-operator:${GIT_BRANCH}"}
     9  MYSQL_VERSION=${MYSQL_VERSION:-"8.0"}
    10  IMAGE_PXC=${IMAGE_PXC:-"perconalab/percona-xtradb-cluster-operator:main-pxc${MYSQL_VERSION}"}
    11  IMAGE_PROXY=${IMAGE_PROXY:-"perconalab/percona-xtradb-cluster-operator:main-proxysql"}
    12  IMAGE_HAPROXY=${IMAGE_HAPROXY:-"perconalab/percona-xtradb-cluster-operator:main-haproxy"}
    13  IMAGE_BACKUP=${IMAGE_BACKUP:-"perconalab/percona-xtradb-cluster-operator:main-pxc${MYSQL_VERSION}-backup"}
    14  IMAGE_LOGCOLLECTOR=${IMAGE_LOGCOLLECTOR:-"perconalab/percona-xtradb-cluster-operator:main-logcollector"}
    15  SKIP_REMOTE_BACKUPS=${SKIP_REMOTE_BACKUPS:-1}
    16  PMM_SERVER_VER=${PMM_SERVER_VER:-"9.9.9"}
    17  IMAGE_PMM_CLIENT=${IMAGE_PMM_CLIENT:-"perconalab/pmm-client:dev-latest"}
    18  IMAGE_PMM_SERVER=${IMAGE_PMM_SERVER:-"perconalab/pmm-server:dev-latest"}
    19  CERT_MANAGER_VER="1.14.2"
    20  tmp_dir=$(mktemp -d)
    21  sed=$(which gsed || which sed)
    22  date=$(which gdate || which date)
    23  
    24  test_name=$(basename $test_dir)
    25  namespace="${test_name}-${RANDOM}"
    26  replica_namespace="${test_name}-replica-${RANDOM}"
    27  conf_dir=$(realpath $test_dir/../conf || :)
    28  src_dir=$(realpath $test_dir/../..)
    29  logs_dir=$(realpath $test_dir/../logs)
    30  
    31  if [[ ${ENABLE_LOGGING} == "true" ]]; then
    32  	if [ ! -d "${logs_dir}" ]; then
    33  		mkdir "${logs_dir}"
    34  	fi
    35  	log_file_name=$(echo "$test_name-$MYSQL_VERSION" | tr '.' '-')
    36  	exec &> >(tee ${logs_dir}/${log_file_name}.log)
    37  	echo "Log: ${logs_dir}/${log_file_name}.log"
    38  fi
    39  
    40  if [ -f "$conf_dir/cloud-secret.yml" ]; then
    41  	SKIP_REMOTE_BACKUPS=''
    42  fi
    43  
    44  if oc get projects 2>/dev/null; then
    45  	OPENSHIFT=$(oc version -o json | jq -r '.openshiftVersion' | grep -oE '^[0-9]+\.[0-9]+')
    46  fi
    47  
    48  if [ $(kubectl version -o json | jq -r '.serverVersion.gitVersion' | grep "\-eks\-") ]; then
    49  	EKS=1
    50  else
    51  	EKS=0
    52  fi
    53  
    54  KUBE_VERSION=$(kubectl version -o json | jq -r '.serverVersion.major + "." + .serverVersion.minor' | $sed -r 's/[^0-9.]+//g')
    55  
    56  set_debug() {
    57  	if [[ ${DEBUG_TESTS} == 1 ]]; then
    58  		set -o xtrace
    59  	else
    60  		set +o xtrace
    61  	fi
    62  }
    63  
    64  HELM_VERSION=$(helm version -c | $sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/')
    65  if [ "${HELM_VERSION:0:2}" == "v2" ]; then
    66  	HELM_ARGS="--name"
    67  fi
    68  
    69  version_gt() {
    70  	desc "return true if kubernetes version equal or greater than desired"
    71  	if [ $(echo "${KUBE_VERSION} >= $1" | bc -l) -eq 1 ]; then
    72  		return 0
    73  	else
    74  		return 1
    75  	fi
    76  }
    77  
    78  get_proxy_size() {
    79  	local cluster=${1}
    80  	if [[ "$(kubectl_bin get pxc "${cluster}" -o 'jsonpath={.spec.haproxy.enabled}')" == "true" ]]; then
    81  		kubectl_bin get pxc "${cluster}" -o 'jsonpath={.spec.haproxy.size}'
    82  		return
    83  	fi
    84  	if [[ "$(kubectl_bin get pxc "${cluster}" -o 'jsonpath={.spec.proxysql.enabled}')" == "true" ]]; then
    85  		kubectl_bin get pxc "${cluster}" -o 'jsonpath={.spec.proxysql.size}'
    86  		return
    87  	fi
    88  }
    89  
    90  wait_cluster_consistency() {
    91  	local cluster_name=${1}
    92  	local cluster_size=${2}
    93  	local proxy_size=${3}
    94  
    95  	if [ -z "${proxy_size}" ]; then
    96  		proxy_size="$(get_proxy_size "$cluster_name")"
    97  	fi
    98  	desc "wait cluster consistency"
    99  	local i=0
   100  	local max=36
   101  	sleep 7 # wait for two reconcile loops ;)  3 sec x 2 times + 1 sec = 7 seconds
   102  	until [[ "$(kubectl_bin get pxc "${cluster_name}" -o jsonpath='{.status.state}')" == "ready" &&
   103  	"$(kubectl_bin get pxc "${cluster_name}" -o jsonpath='{.status.pxc.ready}')" == "${cluster_size}" &&
   104  	"$(kubectl_bin get pxc "${cluster_name}" -o jsonpath='{.status.'$(get_proxy_engine ${cluster_name})'.ready}')" == "${proxy_size}" ]]; do
   105  		echo 'waiting for cluster readyness'
   106  		sleep 20
   107  		if [[ $i -ge $max ]]; then
   108  			echo "Something went wrong waiting for cluster consistency!"
   109  			exit 1
   110  		fi
   111  		let i+=1
   112  	done
   113  }
   114  
   115  create_namespace() {
   116  	local namespace="$1"
   117  	local skip_clean_namespace="$2"
   118  
   119  	if [[ ${CLEAN_NAMESPACE} == 1 ]] && [[ -z ${skip_clean_namespace} ]]; then
   120  		destroy_chaos_mesh
   121  		desc 'cleaned up all old namespaces'
   122  		kubectl_bin get ns \
   123  			| egrep -v "^kube-|^default$|Terminating|pxc-operator|openshift|^NAME" \
   124  			| awk '{print$1}' \
   125  			| xargs kubectl delete ns &
   126  	fi
   127  
   128  	if [ -n "$OPENSHIFT" ]; then
   129  		desc 'cleaned up all old namespaces'
   130  		if [ -n "$OPERATOR_NS" -a $(oc get project "$OPERATOR_NS" -o json | jq -r '.metadata.name') ]; then
   131  			oc delete --grace-period=0 --force=true project "$namespace" && sleep 120 || :
   132  		else
   133  			oc delete project "$namespace" && sleep 40 || :
   134  		fi
   135  		wait_for_delete "project/$namespace"
   136  
   137  		desc "create namespace $namespace"
   138  		oc new-project "$namespace"
   139  		oc project "$namespace"
   140  		oc adm policy add-scc-to-user hostaccess -z default || :
   141  	else
   142  		desc "cleaned up old namespaces $namespace"
   143  		kubectl_bin delete namespace "$namespace" || :
   144  		wait_for_delete "namespace/$namespace"
   145  		desc "create namespace $namespace"
   146  		kubectl_bin create namespace "$namespace"
   147  		kubectl_bin config set-context $(kubectl_bin config current-context) --namespace="$namespace"
   148  	fi
   149  }
   150  
   151  get_operator_pod() {
   152  	local label_prefix="app.kubernetes.io/"
   153  	local check_label=$(kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator ${OPERATOR_NS:+-n $OPERATOR_NS} | grep -c "percona-xtradb-cluster-operator")
   154  	if [[ ${check_label} -eq 0 ]]; then
   155  		label_prefix=""
   156  	fi
   157  	kubectl_bin get pods \
   158  		--selector=${label_prefix}name=percona-xtradb-cluster-operator \
   159  		-o 'jsonpath={.items[].metadata.name}' ${OPERATOR_NS:+-n $OPERATOR_NS}
   160  }
   161  
   162  get_pitr_pod() {
   163  	local pitr_pod=$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pitr --output=custom-columns='NAME:.metadata.name')
   164  	if [[ -z ${pitr_pod} ]]; then
   165  		echo "PITR pod is not found! Exiting..."
   166  		exit 1
   167  	else
   168  		echo "${pitr_pod}"
   169  	fi
   170  }
   171  
   172  wait_pod() {
   173  	local pod=$1
   174  	local max_retry="${2:-480}"
   175  	local ns=$3
   176  	local container=$(echo "$pod" | $sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' | egrep "^(pxc|proxysql)$")
   177  
   178  	set +o xtrace
   179  	kubectl_bin wait --for=condition=Ready pod/${pod} --timeout="${max_retry}s" ${ns:+-n $ns} || true
   180  	retry=0
   181  	echo -n $pod
   182  	until kubectl_bin get ${ns:+-n $ns} pod/$pod -o jsonpath='{.status.conditions[?(@.type == "Ready")].status}' 2>/dev/null | grep -q -i 'True' \
   183  		&& kubectl_bin get ${ns:+-n $ns} pod/$pod | grep -q "^$pod" \
   184  		&& ! IS_FULL_CRASH=$(kubectl_bin logs --tail=1 ${ns:+-n $ns} pod/$pod ${container:+ -c $container} | grep LAST_LINE); do
   185  		sleep 1
   186  		echo -n .
   187  		let retry+=1
   188  
   189  		if [[ -n $IS_FULL_CRASH ]]; then
   190  			echo 'full cluster crash detected'
   191  			exit 1
   192  		fi
   193  
   194  		if [[ $retry -ge $max_retry ]]; then
   195  			kubectl_bin describe pod/$pod
   196  			kubectl_bin logs $pod
   197  			kubectl_bin logs ${OPERATOR_NS:+-n $OPERATOR_NS} $(get_operator_pod)
   198  			echo max retry count $retry reached. something went wrong with operator or kubernetes cluster
   199  			exit 1
   200  		fi
   201  	done
   202  	echo ".Ok"
   203  	set_debug
   204  }
   205  
   206  wait_crash_pod() {
   207  	local pod=$1
   208  	local max_retry="${2:-480}"
   209  	local ns=$3
   210  	local container=$(echo "$pod" | $sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' | egrep "^(pxc|proxysql)$")
   211  
   212  	set +o xtrace
   213  	retry=0
   214  	echo -n $pod
   215  	until kubectl_bin get ${ns:+-n $ns} pod/$pod -o jsonpath='{.status.conditions[?(@.type == "Ready")].status}' 2>/dev/null | grep -q -i 'True' \
   216  		&& kubectl_bin get ${ns:+-n $ns} pod/$pod | grep -q "^$pod" \
   217  		&& kubectl_bin logs --tail=1 ${ns:+-n $ns} pod/$pod ${container:+ -c $container} | grep -q LAST_LINE; do
   218  		sleep 1
   219  		echo -n .
   220  		let retry+=1
   221  		if [[ $retry -ge $max_retry ]]; then
   222  			kubectl_bin describe pod/$pod
   223  			kubectl_bin logs $pod
   224  			kubectl_bin logs ${OPERATOR_NS:+-n $OPERATOR_NS} $(get_operator_pod)
   225  			echo max retry count $retry reached. something went wrong with operator or kubernetes cluster
   226  			exit 1
   227  		fi
   228  	done
   229  	echo ".Ok"
   230  	set_debug
   231  }
   232  
   233  wait_backup() {
   234  	local backup=$1
   235  	local status=${2:-'Succeeded'}
   236  
   237  	set +o xtrace
   238  	retry=0
   239  	echo -n $backup
   240  	until kubectl_bin get pxc-backup/$backup -o jsonpath='{.status.state}' 2>/dev/null | grep $status; do
   241  		sleep 1
   242  		echo -n .
   243  		let retry+=1
   244  		if [ $retry -ge 120 ]; then
   245  			kubectl_bin logs ${OPERATOR_NS:+-n $OPERATOR_NS} $(get_operator_pod)
   246  			echo max retry count $retry reached. something went wrong with operator or kubernetes cluster
   247  			exit 1
   248  		fi
   249  	done
   250  	set_debug
   251  }
   252  
   253  wait_backup_restore() {
   254  	local backup_name=$1
   255  	local status=${2:-'Succeeded'}
   256  	local wait_time=${3:-720}
   257  
   258  	desc "wait backup restore"
   259  	set +o xtrace
   260  	retry=0
   261  	echo -n $backup_name
   262  	until kubectl_bin get pxc-restore/$backup_name -o jsonpath='{.status.state}' 2>/dev/null | grep $status; do
   263  		sleep 1
   264  		echo -n .
   265  		let retry+=1
   266  		if [ $retry -ge $wait_time ]; then
   267  			kubectl_bin logs ${OPERATOR_NS:+-n $OPERATOR_NS} $(get_operator_pod)
   268  			echo max retry count $retry reached. something went wrong with operator or kubernetes cluster
   269  			exit 1
   270  		fi
   271  	done
   272  	echo
   273  	set_debug
   274  }
   275  
   276  apply_rbac() {
   277  	local operator_namespace=${OPERATOR_NS:-'pxc-operator'}
   278  	local rbac=${1:-'rbac'}
   279  
   280  	cat ${src_dir}/deploy/${rbac}.yaml \
   281  		| sed -e "s^namespace: .*^namespace: $operator_namespace^" \
   282  		| kubectl_bin apply -f -
   283  }
   284  
   285  deploy_operator() {
   286  	desc 'start PXC operator'
   287  
   288  	kubectl_bin apply --server-side --force-conflicts -f ${src_dir}/deploy/crd.yaml || :
   289  
   290  	if [ -n "$OPERATOR_NS" ]; then
   291  		apply_rbac cw-rbac
   292  		cat ${src_dir}/deploy/cw-operator.yaml \
   293  			| sed -e "s^image: .*^image: ${IMAGE}^" \
   294  			| sed -e "s^failureThreshold: .*^failureThreshold: 10^" \
   295  			| yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - \
   296  			| yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' - \
   297  			| kubectl_bin apply -f -
   298  	else
   299  		apply_rbac rbac
   300  		cat ${src_dir}/deploy/operator.yaml \
   301  			| sed -e "s^image: .*^image: ${IMAGE}^" \
   302  			| sed -e "s^failureThreshold: .*^failureThreshold: 10^" \
   303  			| yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - \
   304  			| yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' - \
   305  			| kubectl_bin apply -f -
   306  	fi
   307  
   308  	sleep 10
   309  
   310  	kubectl_bin wait \
   311  		--for=condition=Ready \
   312  		pods \
   313  		-l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator \
   314  		--timeout=30s || true
   315  
   316  	wait_pod "$(get_operator_pod)" "480" "${OPERATOR_NS}"
   317  	sleep 3
   318  }
   319  
   320  deploy_helm() {
   321  	helm repo add hashicorp https://helm.releases.hashicorp.com
   322  	helm repo add minio https://charts.min.io/
   323  	helm repo update
   324  }
   325  
   326  create_infra() {
   327  	local ns="$1"
   328  
   329  	if [ -n "$OPERATOR_NS" ]; then
   330  		kubectl get pxc --all-namespaces -o wide \
   331  			| grep -v 'NAMESPACE' \
   332  			| xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' \
   333  			|| :
   334  		kubectl_bin delete pxc --all --all-namespaces || :
   335  		kubectl_bin delete pxc-backup --all --all-namespaces || :
   336  		kubectl_bin delete pxc-restore --all --all-namespaces || :
   337  
   338  		create_namespace $OPERATOR_NS
   339  		deploy_operator
   340  		create_namespace $ns
   341  	else
   342  		create_namespace $ns
   343  		deploy_operator
   344  	fi
   345  	apply_secrets
   346  }
   347  
   348  wait_for_running() {
   349  	local name="$1"
   350  	let last_pod="$(($2 - 1))" || :
   351  	local max_retry="${3:-480}"
   352  	desc "wait for running cluster"
   353  	for i in $(seq 0 $last_pod); do
   354  		wait_pod ${name}-${i} ${max_retry}
   355  	done
   356  }
   357  
   358  wait_for_generation() {
   359  	local resource="$1"
   360  	local target_generation="$2"
   361  
   362  	echo "Waiting for $resource to reach generation $target_generation..."
   363  
   364  	while true; do
   365  		current_generation=$(kubectl get "$resource" -o jsonpath='{.metadata.generation}')
   366  
   367  		if [ "$current_generation" -eq "$target_generation" ]; then
   368  			echo "Resource $resource has reached generation $target_generation."
   369  			break
   370  		else
   371  			echo "Resource $resource is at generation $current_generation. Waiting..."
   372  			sleep 5
   373  		fi
   374  	done
   375  }
   376  
   377  wait_for_delete() {
   378  	local res="$1"
   379  
   380  	echo -n "$res - "
   381  	set +o xtrace
   382  	retry=0
   383  	until (kubectl_bin get $res || :) 2>&1 | grep NotFound; do
   384  		sleep 1
   385  		echo -n .
   386  		let retry+=1
   387  		if [ $retry -ge 120 ]; then
   388  			kubectl_bin logs ${OPERATOR_NS:+-n $OPERATOR_NS} $(get_operator_pod)
   389  			echo max retry count $retry reached. something went wrong with operator or kubernetes cluster
   390  			exit 1
   391  		fi
   392  	done
   393  	set_debug
   394  }
   395  
   396  compare_kubectl() {
   397  	local resource="$1"
   398  	local postfix="$2"
   399  	local expected_result=${test_dir}/compare/${resource//\//_}${postfix}.yml
   400  	local new_result="${tmp_dir}/${resource//\//_}.yml"
   401  
   402  	desc "compare $resource-$postfix"
   403  	if [ "$EKS" = 1 -a -f ${expected_result//.yml/-eks.yml} ]; then
   404  		expected_result=${expected_result//.yml/-eks.yml}
   405  	fi
   406  
   407  	if [[ $IMAGE_PXC =~ 8\.0 ]] && [ -f ${expected_result//.yml/-80.yml} ]; then
   408  		expected_result=${expected_result//.yml/-80.yml}
   409  	fi
   410  
   411  	if version_gt "1.29" && [ -f ${expected_result//.yml/-k129.yml} ]; then
   412  		expected_result=${expected_result//.yml/-k129.yml}
   413  	elif version_gt "1.27" && [ -f ${expected_result//.yml/-k127.yml} ]; then
   414  		expected_result=${expected_result//.yml/-k127.yml}
   415  	elif version_gt "1.24" && [ -f ${expected_result//.yml/-k124.yml} ]; then
   416  		expected_result=${expected_result//.yml/-k124.yml}
   417  	elif version_gt "1.22" && [ -f ${expected_result//.yml/-k122.yml} ]; then
   418  		expected_result=${expected_result//.yml/-k122.yml}
   419  	elif version_gt "1.21" && [ -f ${expected_result//.yml/-k121.yml} ]; then
   420  		expected_result=${expected_result//.yml/-k121.yml}
   421  	fi
   422  
   423  	if [ ! -z "$OPENSHIFT" -a -f ${expected_result//.yml/-oc.yml} ]; then
   424  		expected_result=${expected_result//.yml/-oc.yml}
   425  	fi
   426  
   427  	if [ "$EKS" = 1 -a -f ${expected_result//.yml/-eks.yml} ]; then
   428  		expected_result=${expected_result//.yml/-eks.yml}
   429  	fi
   430  
   431  	kubectl_bin get -o yaml ${resource} \
   432  		| yq eval '
   433  			del(.metadata.managedFields) |
   434  			del(.. | select(has("creationTimestamp")).creationTimestamp) |
   435  			del(.. | select(has("namespace")).namespace) |
   436  			del(.. | select(has("uid")).uid) |
   437  			del(.metadata.resourceVersion) |
   438  			del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) |
   439  			del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) |
   440  			del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) |
   441  			del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) |
   442  			del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) |
   443  			del(.metadata.selfLink) |
   444  			del(.metadata.deletionTimestamp) |
   445  			del(.metadata.annotations."kubernetes.io/psp") |
   446  			del(.metadata.annotations."batch.kubernetes.io/job-tracking") |
   447  			del(.metadata.annotations."cloud.google.com/neg") |
   448  			del(.metadata.annotations."k8s.v1.cni.cncf.io*") |
   449  			del(.metadata.annotations."k8s.ovn.org/pod-networks") |
   450  			del(.spec.template.metadata.annotations."last-applied-secret") |
   451  			del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") |
   452  			del(.. | select(has("image")).image) |
   453  			del(.. | select(has("clusterIP")).clusterIP) |
   454  			del(.. | select(has("clusterIPs")).clusterIPs) |
   455  			del(.. | select(has("dataSource")).dataSource) |
   456  			del(.. | select(has("procMount")).procMount) |
   457  			del(.. | select(has("storageClassName")).storageClassName) |
   458  			del(.. | select(has("finalizers")).finalizers) |
   459  			del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") |
   460  			del(.. | select(has("volumeName")).volumeName) |
   461  			del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") |
   462  			del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") |
   463  			del(.spec.volumeMode) |
   464  			del(.spec.nodeName) |
   465  			del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") |
   466  			del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") |
   467  			del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") |
   468  			del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") |
   469  			del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") |
   470  			del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") |
   471  			del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) |
   472  			del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) |
   473  			del(.. | select(has("nodePort")).nodePort) |
   474  			del(.. | select(has("imagePullSecrets")).imagePullSecrets) |
   475  			del(.. | select(has("enableServiceLinks")).enableServiceLinks) |
   476  			del(.status) |
   477  			del(.spec.volumeClaimTemplates[].apiVersion) |
   478  			del(.spec.volumeClaimTemplates[].kind) |
   479  			del(.metadata.ownerReferences[].apiVersion) |
   480  			del(.. | select(has("controller-uid")).controller-uid) |
   481  			del(.. | select(has("preemptionPolicy")).preemptionPolicy) |
   482  			del(.spec.ipFamilies) |
   483  			del(.spec.ipFamilyPolicy) |
   484  			(.. | select(. == "policy/v1beta1")) = "policy/v1" |
   485  			del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") |
   486  			(.. | select(tag == "!!str")) |= sub("'$namespace'", "namespace") |
   487  			(.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") |
   488  			del(.. | select(has("annotations")).annotations | select(length==0)) |
   489  			del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - >${new_result}
   490  
   491  	diff -u ${expected_result} ${new_result}
   492  }
   493  
   494  get_client_pod() {
   495  	kubectl_bin get pods \
   496  		--selector=name=pxc-client \
   497  		-o 'jsonpath={.items[].metadata.name}'
   498  }
   499  
   500  run_mysql() {
   501  	local command="$1"
   502  	local uri="$2"
   503  
   504  	client_pod=$(get_client_pod)
   505  	wait_pod $client_pod 1>&2
   506  	set +o xtrace
   507  	kubectl_bin exec $client_pod -- \
   508  		bash -c "printf '%s\n' \"${command}\" | mysql -sN $uri" 2>&1 \
   509  		| sed -e 's/mysql: //' \
   510  		| (grep -v 'Using a password on the command line interface can be insecure.' || :)
   511  
   512  	set_debug
   513  }
   514  
   515  run_mysql_local() {
   516  	local command="$1"
   517  	local uri="$2"
   518  	local pod="$3"
   519  	local container_name="$4"
   520  	set +o xtrace
   521  	kubectl_bin exec $pod ${container_name:+-c $container_name} -- \
   522  		bash -c "printf \"$command\n\" | mysql -sN $uri" 2>&1 \
   523  		| sed -e 's/mysql: //' \
   524  		| (egrep -v 'Using a password on the command line interface can be insecure.|Defaulted container|Defaulting container name|see all of the containers in this pod' || :)
   525  	set_debug
   526  }
   527  
   528  compare_mysql_cmd() {
   529  	local command_id="$1"
   530  	local command="$2"
   531  	local uri="$3"
   532  	local postfix="$4"
   533  	local expected_result=${test_dir}/compare/${command_id}${postfix}.sql
   534  
   535  	if [[ $IMAGE_PXC =~ 8\.0 ]] && [ -f ${test_dir}/compare/${command_id}${postfix}-80.sql ]; then
   536  		expected_result=${test_dir}/compare/${command_id}${postfix}-80.sql
   537  	fi
   538  
   539  	run_mysql "$command" "$uri" \
   540  		>$tmp_dir/${command_id}.sql
   541  	if [ ! -s "$tmp_dir/${command_id}.sql" ]; then
   542  		sleep 20
   543  		run_mysql "$command" "$uri" \
   544  			>$tmp_dir/${command_id}.sql
   545  	fi
   546  	diff -u $expected_result $tmp_dir/${command_id}.sql
   547  }
   548  
   549  compare_mysql_cmd_local() {
   550  	local command_id="$1"
   551  	local command="$2"
   552  	local uri="$3"
   553  	local pod="$4"
   554  	local postfix="$5"
   555  	local container_name="$6"
   556  	local expected_result=${test_dir}/compare/${command_id}${postfix}.sql
   557  
   558  	if [[ $IMAGE_PXC =~ 8\.0 ]] && [ -f ${test_dir}/compare/${command_id}${postfix}-80.sql ]; then
   559  		expected_result=${test_dir}/compare/${command_id}${postfix}-80.sql
   560  	fi
   561  
   562  	run_mysql_local "$command" "$uri" "$pod" "$container_name" \
   563  		>$tmp_dir/${command_id}.sql
   564  	if [ ! -s "$tmp_dir/${command_id}.sql" ]; then
   565  		sleep 20
   566  		run_mysql_local "$command" "$uri" "$pod" \
   567  			>$tmp_dir/${command_id}.sql
   568  	fi
   569  	diff -u $expected_result $tmp_dir/${command_id}.sql
   570  }
   571  
   572  get_proxy_primary() {
   573  	local uri="$1"
   574  	local pod="$2"
   575  	local ip=$(run_mysql_local "SELECT hostname FROM runtime_mysql_servers WHERE hostgroup_id=11 AND status='ONLINE';" "$uri" "$pod" 'proxysql')
   576  
   577  	while [ $(echo "$ip" | wc -l) != 1 ]; do
   578  		sleep 1
   579  		ip=$(run_mysql_local "SELECT hostname FROM runtime_mysql_servers WHERE hostgroup_id=11 AND status='ONLINE';" "$uri" "$pod" 'proxysql')
   580  	done
   581  
   582  	echo $ip | cut -d'.' -f1
   583  }
   584  
   585  get_pod_name() {
   586  	local ip=$1
   587  	kubectl_bin get pods -o json | jq -r '.items[] | select(.status.podIP == "'$ip'") | .metadata.name'
   588  }
   589  
   590  get_pod_ip() {
   591  	local name=$1
   592  	kubectl_bin get pods -o json | jq -r '.items[] | select(.metadata.name == "'$name'") | .status.podIP'
   593  }
   594  
   595  compare_mysql_user() {
   596  	local uri="$1"
   597  	local postfix="$2"
   598  	local user=$(echo $uri | sed -e 's/.*-u//; s/ .*//')
   599  	local expected_result=${test_dir}/compare/$user$postfix.sql
   600  
   601  	if [[ $IMAGE_PXC =~ 8\.0 ]] && [ -f ${test_dir}/compare/$user$postfix-80.sql ]; then
   602  		expected_result=${test_dir}/compare/$user$postfix-80.sql
   603  	fi
   604  
   605  	(run_mysql "SHOW GRANTS;" "$uri" || :) \
   606  		| $sed -E "s/'(10|192)[.][0-9][^']*'//; s/'[^']*[.]internal'//" \
   607  			>$tmp_dir/$user.sql
   608  	diff -u $expected_result $tmp_dir/$user.sql
   609  }
   610  
   611  compare_mysql_user_local() {
   612  	local uri="$1"
   613  	local pod="$2"
   614  	local postfix="$3"
   615  	local container_name="$4"
   616  	local user=$(echo $uri | sed -e 's/.*-u//; s/ .*//')
   617  	local expected_result=$test_dir/compare/$user$postfix.sql
   618  
   619  	if [[ $IMAGE_PXC =~ 8\.0 ]] && [ -f ${test_dir}/compare/$user$postfix-80.sql ]; then
   620  		expected_result=${test_dir}/compare/$user$postfix-80.sql
   621  	fi
   622  
   623  	(run_mysql_local "SHOW GRANTS;" "$uri" "$pod" "$container_name" || :) \
   624  		| $sed -E "s/'(10|192)[.][0-9][^']*'//; s/'[^']*[.]internal'//" \
   625  			>$tmp_dir/$user.sql
   626  	diff -u $expected_result $tmp_dir/$user.sql
   627  }
   628  
   629  get_pumba() {
   630  	kubectl_bin get pods \
   631  		--selector=name=pumba \
   632  		-o 'jsonpath={.items[].metadata.name}'
   633  }
   634  
   635  run_pumba() {
   636  	local cmd="$*"
   637  	kubectl_bin exec -it "$(get_pumba)" -- /pumba -l info ${cmd}
   638  }
   639  
   640  deploy_cert_manager() {
   641  	desc 'deploy cert manager'
   642  	kubectl_bin create namespace cert-manager || :
   643  	kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true || :
   644  	kubectl_bin apply -f "https://github.com/jetstack/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml" --validate=false || : 2>/dev/null
   645  	if [ "$OPENSHIFT" == "4.10" ]; then
   646  		oc delete scc restricted-seccomp || true
   647  		oc get scc restricted -o yaml | yq eval '.metadata.name = "restricted-seccomp" |
   648  		.seccompProfiles[0] = "runtime/default"' \
   649  			| oc create -f -
   650  		oc adm policy add-scc-to-user restricted-seccomp -z cert-manager
   651  		oc adm policy add-scc-to-user restricted-seccomp -z cert-manager-cainjector
   652  		oc adm policy add-scc-to-user restricted-seccomp -z cert-manager-webhook
   653  	fi
   654  	sleep 70
   655  }
   656  
   657  destroy() {
   658  	local namespace="$1"
   659  	local ignore_logs="${2:-true}"
   660  
   661  	desc 'destroy cluster/operator and all other resources'
   662  	if [ ${ignore_logs} == "false" -o "${DEBUG_TESTS}" == 1 ]; then
   663  		kubectl_bin logs ${OPERATOR_NS:+-n $OPERATOR_NS} $(get_operator_pod) \
   664  			| grep -v 'level=info' \
   665  			| grep -v 'the object has been modified' \
   666  			| grep -v 'get backup status: Job.batch' \
   667  			| $sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' \
   668  			| sort -u \
   669  			| tee $tmp_dir/operator.log || :
   670  	fi
   671  	#TODO: maybe will be enabled later
   672  	#diff $test_dir/compare/operator.log $tmp_dir/operator.log
   673  
   674  	kubectl get pxc --all-namespaces -o wide \
   675  		| grep -v 'NAMESPACE' \
   676  		| xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' \
   677  		|| :
   678  	kubectl_bin delete pxc --all --all-namespaces || :
   679  	kubectl_bin delete pxc-backup --all --all-namespaces || :
   680  	kubectl_bin delete pxc-restore --all --all-namespaces || :
   681  	kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook || :
   682  
   683  	kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml 2>/dev/null || :
   684  	if [ ! -z "$OPENSHIFT" ]; then
   685  		oc delete --grace-period=0 --force=true project "$namespace" &
   686  		if [ -n "$OPERATOR_NS" ]; then
   687  			oc delete --grace-period=0 --force=true project "$OPERATOR_NS" &
   688  		fi
   689  	else
   690  		kubectl_bin delete --grace-period=0 --force=true namespace "$namespace" &
   691  		if [ -n "$OPERATOR_NS" ]; then
   692  			kubectl_bin delete --grace-period=0 --force=true namespace "$OPERATOR_NS" &
   693  		fi
   694  	fi
   695  	rm -rf ${tmp_dir}
   696  }
   697  
   698  desc() {
   699  	set +o xtrace
   700  	local msg="$@"
   701  	printf "\n\n-----------------------------------------------------------------------------------\n"
   702  	printf "$msg"
   703  	printf "\n-----------------------------------------------------------------------------------\n\n"
   704  	set_debug
   705  }
   706  
   707  get_service_endpoint() {
   708  	local service=$1
   709  
   710  	local hostname=$(
   711  		kubectl_bin get service/$service -o json \
   712  			| jq '.status.loadBalancer.ingress[].hostname' \
   713  			| sed -e 's/^"//; s/"$//;'
   714  	)
   715  	if [ -n "$hostname" -a "$hostname" != "null" ]; then
   716  		echo $hostname
   717  		return
   718  	fi
   719  
   720  	local ip=$(
   721  		kubectl_bin get service/$service -o json \
   722  			| jq '.status.loadBalancer.ingress[].ip' \
   723  			| sed -e 's/^"//; s/"$//;'
   724  	)
   725  	if [ -n "$ip" -a "$ip" != "null" ]; then
   726  		echo $ip
   727  		return
   728  	fi
   729  
   730  	exit 1
   731  }
   732  
   733  get_service_ip() {
   734  	local service=$1
   735  	while (kubectl_bin get service/$service -o 'jsonpath={.spec.type}' 2>&1 || :) | grep -q NotFound; do
   736  		sleep 1
   737  	done
   738  	if [ "$(kubectl_bin get service/$service -o 'jsonpath={.spec.type}')" = "ClusterIP" ]; then
   739  		kubectl_bin get service/$service -o 'jsonpath={.spec.clusterIP}'
   740  		return
   741  	fi
   742  	until kubectl_bin get service/$service -o 'jsonpath={.status.loadBalancer.ingress[]}' 2>&1 | egrep -q "hostname|ip"; do
   743  		sleep 1
   744  	done
   745  	kubectl_bin get service/$service -o 'jsonpath={.status.loadBalancer.ingress[].ip}'
   746  	kubectl_bin get service/$service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}'
   747  }
   748  
   749  get_metric_values() {
   750  	local metric=$1
   751  	local instance=$2
   752  	local user_pass=$3
   753  	local start=$($date -u "+%s" -d "-1 minute")
   754  	local end=$($date -u "+%s")
   755  	local endpoint=$(get_service_endpoint monitoring-service)
   756  
   757  	local result=$(curl -s -k "https://${user_pass}@$endpoint/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28$metric%7Bnode_name%3D%7E%22$instance%22%7d%20or%20$metric%7Bnode_name%3D%7E%22$instance%22%7D%29&start=$start&end=$end&step=60" | jq '.data.result[0]')
   758  	if [ "$result" = "null" ]; then
   759  		echo "no values for metric $metric"
   760  		exit 1
   761  	fi
   762  	echo -n "$result" | jq '.values[][1]' \
   763  		| grep '^"[0-9]'
   764  
   765  }
   766  
   767  get_qan_values() {
   768  	local instance=$1
   769  	local start=$($date -u "+%Y-%m-%dT%H:%M:%S" -d "-30 minute")
   770  	local end=$($date -u "+%Y-%m-%dT%H:%M:%S")
   771  	local endpoint=$(get_service_endpoint monitoring-service)
   772  
   773  	local uuid=$(
   774  		curl -s -k "https://$endpoint/qan-api/instances?deleted=no" \
   775  			| jq '.[] | select(.Subsystem == "mysql" and .Name == "'$instance'") | .UUID' \
   776  			| sed -e 's/^"//; s/"$//;'
   777  	)
   778  
   779  	curl -s -k "https://$endpoint/qan-api/qan/profile/$uuid?begin=$start&end=$end&offset=0" \
   780  		| jq '.Query[].Fingerprint'
   781  }
   782  
   783  get_qan20_values() {
   784  	local instance=$1
   785  	local user_pass=$2
   786  	local start=$($date -u "+%Y-%m-%dT%H:%M:%S" -d "-30 minute")
   787  	local end=$($date -u "+%Y-%m-%dT%H:%M:%S")
   788  	local endpoint=$(get_service_endpoint monitoring-service)
   789  
   790  	cat >payload.json <<EOF
   791  {
   792     "columns":[
   793        "load",
   794        "num_queries",
   795        "query_time"
   796     ],
   797     "first_seen": false,
   798     "group_by": "queryid",
   799     "include_only_fields": [],
   800     "keyword": "",
   801     "labels": [
   802         {
   803             "key": "cluster",
   804             "value": ["pxc"]
   805     }],
   806     "limit": 10,
   807     "offset": 0,
   808     "order_by": "-load",
   809     "main_metric": "load",
   810     "period_start_from": "$($date -u -d '-12 hour' '+%Y-%m-%dT%H:%M:%S%:z')",
   811     "period_start_to": "$($date -u '+%Y-%m-%dT%H:%M:%S%:z')"
   812  }
   813  EOF
   814  
   815  	curl -s -k -XPOST -d @payload.json "https://${user_pass}@$endpoint/v0/qan/GetReport" \
   816  		| jq '.rows[].fingerprint'
   817  	rm -f payload.json
   818  }
   819  
   820  cat_config() {
   821  	cat "$1" \
   822  		| $sed -e "s#apiVersion: pxc.percona.com/v.*\$#apiVersion: $API#" \
   823  		| $sed -e "s#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}\$#image: $IMAGE_PXC#" \
   824  		| $sed -e "s#image:.*\/percona-xtradb-cluster:.*\$#image: $IMAGE_PXC#" \
   825  		| $sed -e "s#image:.*-init\$#image: $IMAGE#" \
   826  		| $sed -e "s#image:.*-pmm\$#image: $IMAGE_PMM_CLIENT#" \
   827  		| $sed -e "s#image:.*-backup\$#image: $IMAGE_BACKUP#" \
   828  		| $sed -e "s#image:.*-proxysql\$#image: $IMAGE_PROXY#" \
   829  		| $sed -e "s#image:.*-haproxy\$#image: $IMAGE_HAPROXY#" \
   830  		| $sed -e "s#image:.*-logcollector\$#image: $IMAGE_LOGCOLLECTOR#" \
   831  		| $sed -e "s~minio-service.#namespace~minio-service.$namespace~" \
   832  		| $sed -e "s#apply:.*#apply: Never#"
   833  }
   834  
   835  apply_secrets() {
   836  	desc 'create secrets for cloud storages'
   837  	if [ -z "$SKIP_REMOTE_BACKUPS" ]; then
   838  		kubectl_bin apply \
   839  			-f $conf_dir/minio-secret.yml \
   840  			-f $conf_dir/cloud-secret.yml
   841  	else
   842  		kubectl_bin apply \
   843  			-f $conf_dir/minio-secret.yml
   844  	fi
   845  }
   846  
   847  apply_config() {
   848  	if [ -z "$SKIP_REMOTE_BACKUPS" ]; then
   849  		cat_config "$1" \
   850  			| kubectl_bin apply -f -
   851  	else
   852  		cat_config "$1" \
   853  			| yq eval 'del(.spec.backup.schedule.[1])' - \
   854  			| kubectl_bin apply -f -
   855  	fi
   856  }
   857  
   858  get_proxy() {
   859  	local target_cluster=${1}
   860  	if [[ "$(kubectl_bin get pxc ${target_cluster} -o 'jsonpath={.spec.haproxy.enabled}')" == "true" ]]; then
   861  		echo "${target_cluster}-haproxy"
   862  		return
   863  	fi
   864  	if [[ "$(kubectl_bin get pxc ${target_cluster} -o 'jsonpath={.spec.proxysql.enabled}')" == "true" ]]; then
   865  		echo "${target_cluster}-proxysql"
   866  		return
   867  	fi
   868  	echo "${target_cluster}-pxc"
   869  }
   870  
   871  get_proxy_engine() {
   872  	local cluster_name=$1
   873  	local cluster_proxy="$(get_proxy ${cluster_name})"
   874  	echo "${cluster_proxy//$cluster_name-/}"
   875  }
   876  
   877  spinup_pxc() {
   878  	local cluster=$1
   879  	local config=$2
   880  	local size="${3:-3}"
   881  	local sleep="${4:-10}"
   882  	local secretsFile="${5:-$conf_dir/secrets.yml}"
   883  	local pxcClientFile="${6:-$conf_dir/client.yml}"
   884  	local port="${7:-3306}"
   885  
   886  	desc 'create first PXC cluster'
   887  	kubectl_bin apply -f $secretsFile
   888  	apply_config "$pxcClientFile"
   889  	if [[ $IMAGE_PXC =~ 5\.7 ]] && [[ $cluster == 'demand-backup' || $cluster == 'demand-backup-cloud' ]]; then
   890  		cat_config "$config" \
   891  			| $sed '/\[sst\]/,+1d' \
   892  			| $sed 's|compress=lz4|compress|' \
   893  			| kubectl_bin apply -f -
   894  	else
   895  		apply_config "$config"
   896  	fi
   897  
   898  	desc 'check if all 3 Pods started'
   899  	local proxy=$(get_proxy "$cluster")
   900  	kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n ${namespace} || true
   901  	wait_for_running "$proxy" 1
   902  	wait_for_running "$cluster-pxc" "$size"
   903  	sleep $sleep
   904  
   905  	desc 'write data'
   906  	if [[ $IMAGE_PXC =~ 5\.7 ]] && [[ "$(is_keyring_plugin_in_use "$cluster")" ]]; then
   907  		encrypt='ENCRYPTION=\"Y\"'
   908  	fi
   909  	run_mysql \
   910  		"CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) $encrypt;" \
   911  		"-h $proxy -uroot -proot_password -P$port"
   912  	run_mysql \
   913  		'INSERT myApp.myApp (id) VALUES (100500)' \
   914  		"-h $proxy -uroot -proot_password -P$port"
   915  	sleep 30
   916  	for i in $(seq 0 $((size - 1))); do
   917  		compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-$i.$cluster-pxc -uroot -proot_password -P$port"
   918  	done
   919  
   920  	if [ "$(is_keyring_plugin_in_use "$cluster")" ]; then
   921  		table_must_be_encrypted "$cluster" "myApp"
   922  	fi
   923  
   924  }
   925  
   926  function is_table_encrypted() {
   927  	local cluster=$1
   928  	local table=$2
   929  	run_mysql \
   930  		"SELECT CREATE_OPTIONS FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME=\\\"$table\\\";" \
   931  		"-h $cluster-proxysql -uroot -proot_password" \
   932  		| egrep -o "ENCRYPTION=('Y'|\"Y\")"
   933  }
   934  
   935  function is_keyring_plugin_in_use() {
   936  	local cluster=$1
   937  	kubectl_bin exec -it $cluster-pxc-0 -c pxc -- bash -c "cat /etc/mysql/node.cnf" \
   938  		| egrep -o "early-plugin-load=keyring_\w+.so"
   939  }
   940  
   941  function table_must_not_be_encrypted() {
   942  	local cluster=$1
   943  	local table=$2
   944  	if is_table_encrypted "$cluster" "$table"; then
   945  		echo "error: table is encrypted"
   946  		exit 1
   947  	fi
   948  }
   949  
   950  function table_must_be_encrypted() {
   951  	desc "check table encryption"
   952  	local cluster=$1
   953  	local table=$2
   954  	if ! is_table_encrypted "$cluster" "$table"; then
   955  		echo "error: table is not encrypted"
   956  		exit 1
   957  	fi
   958  }
   959  
   960  function keyring_plugin_must_be_in_use() {
   961  	local cluster=$1
   962  	desc "check keyring plugin usage"
   963  	if ! is_keyring_plugin_in_use "$cluster"; then
   964  		echo "error: keyring_plugin is not used"
   965  		exit 1
   966  	fi
   967  }
   968  
   969  function keyring_plugin_must_not_be_in_use() {
   970  	local cluster=$1
   971  	if is_keyring_plugin_in_use "$cluster"; then
   972  		echo "error: keyring_plugin is used"
   973  		exit 1
   974  	fi
   975  }
   976  
   977  kubectl_bin() {
   978  	local LAST_OUT="$(mktemp)"
   979  	local LAST_ERR="$(mktemp)"
   980  	local exit_status=0
   981  	for i in $(seq 0 2); do
   982  		set +e
   983  		kubectl "$@" 1>"$LAST_OUT" 2>"$LAST_ERR"
   984  		exit_status=$?
   985  		set -e
   986  		if [ ${exit_status} != 0 ] && [ "${DEBUG_TESTS}" == 1 ]; then
   987  			sleep "$((timeout * i))"
   988  		else
   989  			break
   990  		fi
   991  	done
   992  	cat "$LAST_OUT"
   993  	cat "$LAST_ERR" >&2
   994  	rm "$LAST_OUT" "$LAST_ERR"
   995  	return ${exit_status}
   996  }
   997  
   998  retry() {
   999  	local max=$1
  1000  	local delay=$2
  1001  	shift 2 # cut delay and max args
  1002  	local n=1
  1003  
  1004  	until "$@"; do
  1005  		if [[ $n -ge $max ]]; then
  1006  			echo "The command '$@' has failed after $n attempts."
  1007  			exit 1
  1008  		fi
  1009  		let n+=1
  1010  		sleep $delay
  1011  	done
  1012  }
  1013  
  1014  check_pvc_md5() {
  1015  	desc 'check backup file md5sum'
  1016  	apply_config "$test_dir/conf/client.yml"
  1017  	sleep 10
  1018  	bak_client_pod=$(
  1019  		kubectl_bin get pods \
  1020  			--selector=name=backup-client \
  1021  			-o 'jsonpath={.items[].metadata.name}'
  1022  	)
  1023  	wait_pod $bak_client_pod
  1024  	kubectl_bin exec $bak_client_pod -- \
  1025  		bash -c "cd /backup; md5sum -c md5sum.txt"
  1026  	kubectl_bin delete \
  1027  		-f $test_dir/conf/client.yml
  1028  }
  1029  
  1030  run_backup() {
  1031  	local cluster=$1
  1032  	local backup1=$2
  1033  
  1034  	desc "make backup $backup1"
  1035  	kubectl_bin apply \
  1036  		-f $test_dir/conf/$backup1.yml
  1037  	wait_backup $backup1
  1038  }
  1039  
  1040  run_recovery_check() {
  1041  	local cluster=$1
  1042  	local backup1=$2
  1043  	local proxy=$(get_proxy_engine $cluster)
  1044  
  1045  	desc "recover backup $backup1"
  1046  	cat "$test_dir/conf/restore-${backup1}.yaml" \
  1047  		| $sed -e "s~minio-service.#namespace~minio-service.$namespace~" \
  1048  		| kubectl_bin apply -f -
  1049  	wait_backup_restore ${backup1}
  1050  	kubectl_bin logs job/restore-job-${backup1}-${cluster}
  1051  	wait_for_running "$cluster-$proxy" 1
  1052  	wait_for_running "$cluster-pxc" 3
  1053  
  1054  	sleep 35
  1055  	desc 'check data after backup'
  1056  	compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-0.$cluster-pxc -uroot -proot_password"
  1057  	compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-1.$cluster-pxc -uroot -proot_password"
  1058  	compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h $cluster-pxc-2.$cluster-pxc -uroot -proot_password"
  1059  
  1060  	if [ "$backup1" != "on-demand-backup-minio" ]; then
  1061  		desc 'copy backup'
  1062  		bash $src_dir/deploy/backup/copy-backup.sh $backup1 $tmp_dir/backup
  1063  	fi
  1064  }
  1065  
  1066  function vault_tls() {
  1067  	local name=${1:-vault-service}
  1068  
  1069  	SERVICE=$name
  1070  	NAMESPACE=$name
  1071  	SECRET_NAME=$name
  1072  	CSR_NAME=vault-csr-${RANDOM}
  1073  
  1074  	if version_gt "1.22"; then
  1075  		CSR_API_VER="v1"
  1076  		if [ "$EKS" = 1 ]; then
  1077  			CSR_SIGNER="  signerName: beta.eks.amazonaws.com/app-serving"
  1078  		else
  1079  			CSR_SIGNER="  signerName: kubernetes.io/kubelet-serving"
  1080  		fi
  1081  	else
  1082  		CSR_API_VER="v1beta1"
  1083  		CSR_SIGNER=""
  1084  	fi
  1085  
  1086  	openssl genrsa -out ${tmp_dir}/vault.key 2048
  1087  	cat <<EOF >${tmp_dir}/csr.conf
  1088  [req]
  1089  req_extensions = v3_req
  1090  distinguished_name = req_distinguished_name
  1091  [req_distinguished_name]
  1092  [ v3_req ]
  1093  basicConstraints = CA:FALSE
  1094  keyUsage = nonRepudiation, digitalSignature, keyEncipherment
  1095  extendedKeyUsage = serverAuth
  1096  subjectAltName = @alt_names
  1097  [alt_names]
  1098  DNS.1 = ${SERVICE}
  1099  DNS.2 = ${SERVICE}.${NAMESPACE}
  1100  DNS.3 = ${SERVICE}.${NAMESPACE}.svc
  1101  DNS.4 = ${SERVICE}.${NAMESPACE}.svc.cluster.local
  1102  IP.1 = 127.0.0.1
  1103  EOF
  1104  
  1105  	if version_gt "1.22"; then
  1106  		openssl req -new -key ${tmp_dir}/vault.key -subj "/CN=system:node:${SERVICE}.${NAMESPACE}.svc;/O=system:nodes" -out ${tmp_dir}/server.csr -config ${tmp_dir}/csr.conf
  1107  	else
  1108  		openssl req -new -key ${tmp_dir}/vault.key -subj "/CN=${SERVICE}.${NAMESPACE}.svc" -out ${tmp_dir}/server.csr -config ${tmp_dir}/csr.conf
  1109  	fi
  1110  
  1111  	cat <<EOF >${tmp_dir}/csr.yaml
  1112  apiVersion: certificates.k8s.io/${CSR_API_VER}
  1113  kind: CertificateSigningRequest
  1114  metadata:
  1115    name: ${CSR_NAME}
  1116  spec:
  1117    groups:
  1118    - system:authenticated
  1119    request: $(cat ${tmp_dir}/server.csr | base64 | tr -d '\n')
  1120  ${CSR_SIGNER}
  1121    usages:
  1122    - digital signature
  1123    - key encipherment
  1124    - server auth
  1125  EOF
  1126  
  1127  	kubectl_bin create -f ${tmp_dir}/csr.yaml
  1128  	sleep 10
  1129  	kubectl_bin certificate approve ${CSR_NAME}
  1130  	kubectl_bin get csr ${CSR_NAME} -o jsonpath='{.status.certificate}' >${tmp_dir}/serverCert
  1131  	openssl base64 -in ${tmp_dir}/serverCert -d -A -out ${tmp_dir}/vault.crt
  1132  	kubectl_bin config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}' | base64 -d >${tmp_dir}/vault.ca
  1133  	if [[ -n ${OPENSHIFT} ]]; then
  1134  		if [[ "x$(kubectl_bin get namespaces | awk '{print $1}' | grep openshift-kube-controller-manager-operator)" != "x" ]]; then
  1135  			#Detecting openshift 4+
  1136  			kubectl_bin -n openshift-kube-controller-manager-operator get secret csr-signer -o jsonpath='{.data.tls\.crt}' \
  1137  				| base64 -d >${tmp_dir}/vault.ca
  1138  		else
  1139  			CA_SECRET_NAME=$(kubectl_bin -n default get secrets \
  1140  				| grep default \
  1141  				| grep service-account-token \
  1142  				| head -n 1 \
  1143  				| awk {'print $1'})
  1144  			kubectl_bin -n default get secret ${CA_SECRET_NAME} -o jsonpath='{.data.ca\.crt}' \
  1145  				| base64 -d >${tmp_dir}/vault.ca
  1146  		fi
  1147  	fi
  1148  	kubectl_bin create secret generic ${SECRET_NAME} \
  1149  		--namespace ${NAMESPACE} \
  1150  		--from-file=vault.key=${tmp_dir}/vault.key \
  1151  		--from-file=vault.crt=${tmp_dir}/vault.crt \
  1152  		--from-file=vault.ca=${tmp_dir}/vault.ca
  1153  }
  1154  
  1155  start_vault() {
  1156  	name=${1:-vault-service}
  1157  	protocol=${2:-http}
  1158  
  1159  	local platform=kubernetes
  1160  
  1161  	if [[ -n ${OPENSHIFT} ]]; then
  1162  		platform=openshift
  1163  		oc patch clusterrole system:auth-delegator --type='json' -p '[{"op":"add","path":"/rules/-", "value":{"apiGroups":["security.openshift.io"], "attributeRestrictions":null, "resourceNames": ["privileged"], "resources":["securitycontextconstraints"],"verbs":["use"]}}]'
  1164  	fi
  1165  	create_namespace "$name" "skip_clean"
  1166  	deploy_helm "$name"
  1167  	helm uninstall "$name" || :
  1168  
  1169  	desc "install Vault $name"
  1170  
  1171  	if [ $protocol == "https" ]; then
  1172  		vault_tls "$name"
  1173  		helm install $name hashicorp/vault \
  1174  			--disable-openapi-validation \
  1175  			--version 0.16.1 \
  1176  			--namespace "$name" \
  1177  			--set dataStorage.enabled=false \
  1178  			--set global.tlsDisable=false \
  1179  			--set global.platform="${platform}" \
  1180  			--set server.extraVolumes[0].type=secret \
  1181  			--set server.extraVolumes[0].name=$name \
  1182  			--set server.extraEnvironmentVars.VAULT_CACERT=/vault/userconfig/$name/vault.ca \
  1183  			--set server.standalone.config="
  1184  listener \"tcp\" {
  1185      address = \"[::]:8200\"
  1186      cluster_address = \"[::]:8201\"
  1187      tls_cert_file = \"/vault/userconfig/$name/vault.crt\"
  1188      tls_key_file  = \"/vault/userconfig/$name/vault.key\"
  1189      tls_client_ca_file = \"/vault/userconfig/$name/vault.ca\"
  1190  }
  1191  
  1192  storage \"file\" {
  1193      path = \"/vault/data\"
  1194  }"
  1195  
  1196  	else
  1197  		helm install $name hashicorp/vault \
  1198  			--disable-openapi-validation \
  1199  			--version 0.16.1 \
  1200  			--namespace "$name" \
  1201  			--set dataStorage.enabled=false \
  1202  			--set global.platform="${platform}"
  1203  	fi
  1204  
  1205  	if [[ -n ${OPENSHIFT} ]]; then
  1206  		oc patch clusterrole $name-agent-injector-clusterrole --type='json' -p '[{"op":"add","path":"/rules/-", "value":{"apiGroups":["security.openshift.io"], "attributeRestrictions":null, "resourceNames": ["privileged"], "resources":["securitycontextconstraints"],"verbs":["use"]}}]'
  1207  		oc adm policy add-scc-to-user privileged $name-agent-injector
  1208  	fi
  1209  
  1210  	set +o xtrace
  1211  	retry=0
  1212  	echo -n pod/$name-0
  1213  	until kubectl_bin get pod/$name-0 -o 'jsonpath={.status.containerStatuses[0].state}' 2>/dev/null | grep 'running'; do
  1214  		echo -n .
  1215  		sleep 1
  1216  		let retry+=1
  1217  		if [ "$retry" -ge 480 ]; then
  1218  			kubectl_bin describe pod/$name-0
  1219  			kubectl_bin logs $name-0
  1220  			echo max retry count "$retry" reached. something went wrong with vault
  1221  			exit 1
  1222  		fi
  1223  	done
  1224  	set_debug
  1225  
  1226  	kubectl_bin exec -it $name-0 -- vault operator init -tls-skip-verify -key-shares=1 -key-threshold=1 -format=json >"$tmp_dir/$name"
  1227  	unsealKey=$(jq -r ".unseal_keys_b64[]" <"$tmp_dir/$name")
  1228  	token=$(jq -r ".root_token" <"$tmp_dir/$name")
  1229  	sleep 10
  1230  
  1231  	kubectl_bin exec -it $name-0 -- vault operator unseal -tls-skip-verify "$unsealKey"
  1232  	kubectl_bin exec -it $name-0 -- \
  1233  		sh -c "export VAULT_TOKEN=$token && export VAULT_LOG_LEVEL=trace \
  1234                  && vault secrets enable --version=1 -tls-skip-verify -path=secret kv \
  1235                  && vault audit enable file file_path=/vault/vault-audit.log"
  1236  	sleep 10
  1237  
  1238  	cat "$conf_dir/vault-secret.yaml" \
  1239  		| sed -e "s/#token/$token/" \
  1240  		| sed -e "s/#vault_url/$protocol:\/\/$name.$name.svc.cluster.local:8200/" \
  1241  		| sed -e "s/#secret/secret/" >"${tmp_dir}/vault-secret.yaml"
  1242  	if [ $protocol == "https" ]; then
  1243  		sed -e 's/^/    /' ${tmp_dir}/vault.ca >${tmp_dir}/vault.new.ca
  1244  		$sed -i "s/#vault_ca/vault_ca/" "${tmp_dir}/vault-secret.yaml"
  1245  		$sed -i "/#certVal/r ${tmp_dir}/vault.new.ca" "${tmp_dir}/vault-secret.yaml"
  1246  		$sed -i "/#certVal/d" "${tmp_dir}/vault-secret.yaml"
  1247  	else
  1248  		$sed -i "/#vault_ca/d" "${tmp_dir}/vault-secret.yaml"
  1249  	fi
  1250  
  1251  	kubectl_bin apply --namespace="$namespace" -f ${tmp_dir}/vault-secret.yaml
  1252  
  1253  	kubectl_bin config set-context "$(kubectl_bin config current-context)" --namespace="$namespace"
  1254  }
  1255  
  1256  start_minio() {
  1257  	deploy_helm $namespace
  1258  	local cert_secret="$1"
  1259  
  1260  	local endpoint="http://minio-service:9000"
  1261  	local minio_args=(
  1262  		--version 5.0.14
  1263  		--set replicas=1
  1264  		--set mode=standalone
  1265  		--set resources.requests.memory=256Mi
  1266  		--set rootUser=rootuser
  1267  		--set rootPassword=rootpass123
  1268  		--set "users[0].accessKey=some-access-key"
  1269  		--set "users[0].secretKey=some-secret-key"
  1270  		--set "users[0].policy=consoleAdmin"
  1271  		--set service.type=ClusterIP
  1272  		--set configPathmc=/tmp/
  1273  		--set securityContext.enabled=false
  1274  		--set persistence.size=2G
  1275  	)
  1276  	if [[ -n $cert_secret ]]; then
  1277  		endpoint="https://minio-service:9000"
  1278  		minio_args+=(
  1279  			--set tls.enabled=true
  1280  			--set tls.certSecret="$cert_secret"
  1281  			--set tls.publicCrt=tls.crt
  1282  			--set tls.privateKey=tls.key
  1283  		)
  1284  	fi
  1285  
  1286  	desc 'install Minio'
  1287  	helm uninstall minio-service || :
  1288  	retry 10 60 helm install \
  1289  		$HELM_ARGS \
  1290  		minio-service \
  1291  		"${minio_args[@]}" \
  1292  		minio/minio
  1293  	sleep 30
  1294  	MINIO_POD=$(kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}')
  1295  	wait_pod $MINIO_POD
  1296  
  1297  	kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \
  1298  		/usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \
  1299  		/usr/bin/aws --endpoint-url "$endpoint" --no-verify-ssl s3 mb s3://operator-testing
  1300  }
  1301  
  1302  deploy_chaos_mesh() {
  1303  	local chaos_mesh_ns=$1
  1304  
  1305  	destroy_chaos_mesh
  1306  
  1307  	desc 'install chaos-mesh'
  1308  	helm repo add chaos-mesh https://charts.chaos-mesh.org
  1309  	helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${chaos_mesh_ns} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1
  1310  	sleep 10
  1311  }
  1312  
  1313  destroy_chaos_mesh() {
  1314  	local chaos_mesh_ns=$(helm list --all-namespaces --filter chaos-mesh | tail -n1 | awk -F' ' '{print $2}' | sed 's/NAMESPACE//')
  1315  
  1316  	if [ -n "${chaos_mesh_ns}" ]; then
  1317  		helm uninstall --wait --timeout 60s chaos-mesh --namespace ${chaos_mesh_ns} || :
  1318  	fi
  1319  	timeout 30 kubectl delete MutatingWebhookConfiguration $(kubectl get MutatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || :
  1320  	timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || :
  1321  	timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'validate-auth' | awk '{print $1}') || :
  1322  	for i in $(kubectl api-resources | grep chaos-mesh | awk '{print $1}'); do
  1323  		kubectl get ${i} --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace \
  1324  			| while read -r line; do
  1325  				local kind=$(echo "$line" | awk '{print $1}')
  1326  				local name=$(echo "$line" | awk '{print $2}')
  1327  				local namespace=$(echo "$line" | awk '{print $3}')
  1328  				kubectl patch $kind $name -n $namespace --type=merge -p '{"metadata":{"finalizers":[]}}' || :
  1329  			done
  1330  		timeout 30 kubectl delete ${i} --all --all-namespaces || :
  1331  	done
  1332  	timeout 30 kubectl delete crd $(kubectl get crd | grep 'chaos-mesh.org' | awk '{print $1}') || :
  1333  	timeout 30 kubectl delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'chaos-mesh' | awk '{print $1}') || :
  1334  	timeout 30 kubectl delete clusterrole $(kubectl get clusterrole | grep 'chaos-mesh' | awk '{print $1}') || :
  1335  }
  1336  
  1337  patch_secret() {
  1338  	local secret=$1
  1339  	local key=$2
  1340  	local value=$3
  1341  
  1342  	kubectl_bin patch secret $secret -p="{\"data\":{\"$key\": \"$value\"}}"
  1343  }
  1344  
  1345  getSecretData() {
  1346  	local secretName=$1
  1347  	local dataKey=$2
  1348  	kubectl_bin get secrets/${secretName} --template={{.data.${dataKey}}} \
  1349  		| base64 --decode
  1350  }
  1351  
  1352  checkTLSSecret() {
  1353  	local secretName=$1
  1354  	local dataKey=$2
  1355  	local secretData=$(kubectl_bin get secrets/${secretName} -o json | jq '.data["'${dataKey}'"]')
  1356  	if [ -z "$secretData" ]; then
  1357  		exit 1
  1358  	fi
  1359  }
  1360  
  1361  tlsSecretsShouldExist() {
  1362  	local secretName=$1
  1363  	checkTLSSecret "$secretName" 'ca.crt'
  1364  	checkTLSSecret "$secretName" 'tls.crt'
  1365  	checkTLSSecret "$secretName" 'tls.key'
  1366  }
  1367  
  1368  function check_pxc_liveness() {
  1369  	local cluster="$1"
  1370  	local cluster_size="$2"
  1371  	wait_cluster_consistency "${cluster}" "${cluster_size}"
  1372  
  1373  	wait_for_running "${cluster}-pxc" "${cluster_size}"
  1374  
  1375  	for i in $(seq 0 $((cluster_size - 1))); do
  1376  		compare_mysql_cmd "select-1" "SELECT * from myApp.myApp;" "-h ${cluster}-pxc-${i}.${cluster}-pxc -uroot -proot_password"
  1377  	done
  1378  }
  1379  
  1380  function compare_generation() {
  1381  	local generation="$1"
  1382  	local proxy="$2"
  1383  	local cluster="$3"
  1384  	local current_generation
  1385  
  1386  	if [[ ${proxy} == "haproxy" ]]; then
  1387  		containers=(pxc haproxy)
  1388  	else
  1389  		containers=(pxc proxysql)
  1390  	fi
  1391  	for container in "${containers[@]}"; do
  1392  		current_generation="$(kubectl_bin get statefulset "${cluster}-${container}" -o jsonpath='{.metadata.generation}')"
  1393  		if [[ ${generation} != "${current_generation}" ]]; then
  1394  			echo "Generation for resource ${container} is: ${current_generation}, but should be: ${generation}"
  1395  			exit 1
  1396  		fi
  1397  	done
  1398  }
  1399  
  1400  function apply_rbac_gh() {
  1401  	local operator_namespace="${OPERATOR_NS:-'pxc-operator'}"
  1402  	local rbac="${1:-'rbac'}"
  1403  	local git_tag="$2"
  1404  
  1405  	curl -s "https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/${git_tag}/deploy/${rbac}.yaml" >"${tmp_dir}/rbac_${git_tag}.yaml"
  1406  	$sed -i -e "s^namespace: .*^namespace: ${operator_namespace}^" "${tmp_dir}/rbac_${git_tag}.yaml"
  1407  	kubectl_bin apply -f "${tmp_dir}/rbac_${git_tag}.yaml"
  1408  }
  1409  
  1410  function deploy_operator_gh() {
  1411  	local git_tag="$1"
  1412  
  1413  	desc 'start PXC operator'
  1414  	if [[ -n $(kubectl_bin get crds -o jsonpath='{.items[?(@.metadata.name == "perconaxtradbclusters.pxc.percona.com")].metadata.name}') ]] \
  1415  		&& [[ -n $(kubectl_bin get crd/perconaxtradbclusters.pxc.percona.com -o jsonpath='{.spec.versions[?(@.name == "'"${git_tag//\./-}"'")].name}') ]]; then
  1416  		echo "Target CRD for ${git_tag} is in place"
  1417  	else
  1418  		kubectl_bin apply --server-side --force-conflicts -f "https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/${git_tag}/deploy/crd.yaml" >"${tmp_dir}/crd_${git_tag}.yaml"
  1419  	fi
  1420  
  1421  	local rbac_yaml="rbac"
  1422  	local operator_yaml="operator.yaml"
  1423  	if [ -n "${OPERATOR_NS}" ]; then
  1424  		rbac_yaml="cw-rbac"
  1425  		operator_yaml="cw-operator.yaml"
  1426  	fi
  1427  	apply_rbac_gh "${rbac_yaml}" "${git_tag}"
  1428  	curl -s "https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/${git_tag}/deploy/${operator_yaml}" >"${tmp_dir}/${operator_yaml}_${git_tag}.yaml"
  1429  
  1430  	cat "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" \
  1431  		| sed -e "s^image: .*^image: ${IMAGE}^" \
  1432  		| yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' \
  1433  		| yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' \
  1434  		| kubectl_bin apply ${OPERATOR_NS:+-n $OPERATOR_NS} -f -
  1435  
  1436  	sleep 2
  1437  	wait_pod "$(get_operator_pod)"
  1438  }
  1439  
  1440  function create_infra_gh() {
  1441  	local ns="$1"
  1442  	local git_tag="$2"
  1443  
  1444  	if [ -n "${OPERATOR_NS}" ]; then
  1445  		create_namespace "${OPERATOR_NS}"
  1446  		deploy_operator_gh "${git_tag}"
  1447  		create_namespace "${ns}"
  1448  	else
  1449  		create_namespace "${ns}"
  1450  		deploy_operator_gh "${git_tag}"
  1451  	fi
  1452  	apply_secrets
  1453  }
  1454  
  1455  function prepare_cr_yaml() {
  1456  	local cr_yaml="$1"
  1457  	local proxy="$2"
  1458  	local cluster="$3"
  1459  	local cluster_size="$4"
  1460  	local git_tag="$5"
  1461  
  1462  	# spinup function expects images to have suffix like "-pxc"
  1463  	# to replace them with images from environment variables
  1464  	curl -s "https://raw.githubusercontent.com/percona/percona-xtradb-cluster-operator/${git_tag}/deploy/cr.yaml" \
  1465  		| yq eval "
  1466  			.metadata.name = \"${cluster}\" |
  1467  			.spec.secretsName = \"my-cluster-secrets\" |
  1468  			.spec.vaultSecretName = \"some-name-vault\" |
  1469  			.spec.sslSecretName = \"some-name-ssl\" |
  1470  			.spec.sslInternalSecretName = \"some-name-ssl-internal\" |
  1471  			.spec.upgradeOptions.apply = \"disabled\" |
  1472  			.spec.pxc.size = ${cluster_size} |
  1473  			.spec.proxysql.size = ${cluster_size} |
  1474  			.spec.haproxy.size = ${cluster_size} |
  1475  			.spec.pxc.image = \"-pxc\" |
  1476  			.spec.proxysql.image = \"-proxysql\" |
  1477  			.spec.haproxy.image = \"-haproxy\" |
  1478  			.spec.backup.image = \"-backup\" |
  1479  			.spec.backup.storages.minio.s3.credentialsSecret = \"minio-secret\" |
  1480  			.spec.backup.storages.minio.s3.region = \"us-east-1\" |
  1481  			.spec.backup.storages.minio.s3.bucket = \"operator-testing\" |
  1482  			.spec.backup.storages.minio.s3.endpointUrl = \"http://minio-service.#namespace:9000/\" |
  1483  			.spec.backup.storages.minio.type = \"s3\"
  1484  		" - >"${cr_yaml}"
  1485  	if [[ ${proxy} == "haproxy" ]]; then
  1486  		yq -i eval '
  1487  			.spec.haproxy.enabled = true |
  1488  			.spec.proxysql.enabled = false
  1489  		' "${cr_yaml}"
  1490  	else
  1491  		yq -i eval '
  1492  			.spec.haproxy.enabled = false |
  1493  			.spec.proxysql.enabled = true
  1494  		' "${cr_yaml}"
  1495  	fi
  1496  }
  1497  
  1498  function kpatch_delete_field() {
  1499  	local type=$1
  1500  	local name=$2
  1501  	local path=$3
  1502  
  1503  	kubectl_bin patch $type $name --type=json -p "[{\"op\": \"remove\", \"path\": \"$path\"}]" >/dev/null
  1504  }
  1505  
  1506  function kpatch_set_field() {
  1507  	local type=$1
  1508  	local name=$2
  1509  	local path=$3
  1510  	local value=$4
  1511  
  1512  	kubectl_bin patch $type $name --type=json -p "[{\"op\": \"replace\", \"path\": \"$path\", \"value\": $value}]" >/dev/null
  1513  }
  1514  
  1515  function check_backup_existence() {
  1516  	path=$1
  1517  	storage_name=$2
  1518  	retry=0
  1519  	until [[ $(curl -sw '%{http_code}' -o /dev/null "$path") -eq 200 ]]; do
  1520  		if [ $retry -ge 30 ]; then
  1521  			echo "max retry count $retry reached. something went wrong with operator or kubernetes cluster"
  1522  			echo "Backup was not found in bucket -- $storage_name"
  1523  			exit 1
  1524  		fi
  1525  		echo "waiting for backup in $storage_name"
  1526  		sleep 10
  1527  		((retry += 1))
  1528  	done
  1529  }
  1530  
  1531  function check_backup_deletion() {
  1532  	path=$1
  1533  	storage_name=$2
  1534  	retry=0
  1535  	until [[ $(curl -sw '%{http_code}' -o /dev/null "$path") -eq 403 ]] || [[ $(curl -sw '%{http_code}' -o /dev/null "$path") -eq 404 ]] || [[ $(curl -sw '%{http_code}' -o /dev/null "$path") -eq 400 ]]; do
  1536  		if [ $retry -ge 30 ]; then
  1537  			echo "max retry count $retry reached. something went wrong with operator or kubernetes cluster"
  1538  			echo "Backup was not removed from bucket -- $storage_name"
  1539  			exit 1
  1540  		fi
  1541  		echo "waiting for backup deletion $storage_name"
  1542  		sleep 10
  1543  		((retry += 1))
  1544  	done
  1545  }
  1546  
  1547  check_passwords_leak() {
  1548  	local secrets
  1549  	local passwords
  1550  	local pods
  1551  
  1552  	secrets=$(kubectl_bin get secrets -o json | jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or endswith(".sql") or contains("release") or contains("namespace") or contains("AWS_ACCESS_KEY_ID") or contains("AZURE_STORAGE_ACCOUNT_NAME")) | not) | .value')
  1553  	echo secrets=$secrets
  1554  
  1555  	passwords="$(for i in $secrets; do
  1556  		base64 -d <<<$i
  1557  		echo
  1558  	done) $secrets"
  1559  	echo passwords=$passwords
  1560  
  1561  	pods=$(kubectl_bin get pods -o name | awk -F "/" '{print $2}')
  1562  	echo pods=$pods
  1563  
  1564  	collect_logs() {
  1565  		local containers
  1566  		local count
  1567  
  1568  		NS=$1
  1569  		for p in $pods; do
  1570  			if [[ $p == "monitoring-0" ]]; then
  1571  				continue
  1572  			fi
  1573  			containers=$(kubectl_bin -n "$NS" get pod $p -o jsonpath='{.spec.containers[*].name}')
  1574  			for c in $containers; do
  1575  				# temporary, because of: https://jira.percona.com/browse/PMM-8357
  1576  				if [[ $c =~ "pmm" ]]; then
  1577  					continue
  1578  				fi
  1579  				kubectl_bin -n "$NS" logs $p -c $c >${tmp_dir}/logs_output-$p-$c.txt
  1580  				echo logs saved in: ${tmp_dir}/logs_output-$p-$c.txt
  1581  				for pass in $passwords; do
  1582  					echo trying password: $pass
  1583  					count=$(grep -c --fixed-strings -- "$pass" ${tmp_dir}/logs_output-$p-$c.txt || :)
  1584  					if [[ $count != 0 ]]; then
  1585  						echo leaked password $pass is found in log ${tmp_dir}/logs_output-$p-$c.txt
  1586  						echo the content of file ${tmp_dir}/logs_output-$p-$c.txt is:
  1587  						echo =========================================================
  1588  						cat ${tmp_dir}/logs_output-$p-$c.txt
  1589  						false
  1590  					fi
  1591  				done
  1592  			done
  1593  			echo
  1594  		done
  1595  	}
  1596  
  1597  	collect_logs $namespace
  1598  	if [ -n "$OPERATOR_NS" ]; then
  1599  		pods=$(kubectl_bin -n "${OPERATOR_NS}" get pods -o name | awk -F "/" '{print $2}')
  1600  		collect_logs $OPERATOR_NS
  1601  	fi
  1602  }
  1603  
  1604  deploy_pmm_server() {
  1605  	if [ ! -z "$OPENSHIFT" ]; then
  1606  		platform=openshift
  1607  		oc create sa pmm-server
  1608  		oc adm policy add-scc-to-user privileged -z pmm-server
  1609  		if [[ $OPERATOR_NS ]]; then
  1610  			timeout 30 oc delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'pmm-pxc-operator-' | awk '{print $1}') || :
  1611  			oc create clusterrolebinding pmm-pxc-operator-cluster-wide --clusterrole=percona-xtradb-cluster-operator --serviceaccount=$namespace:pmm-server
  1612  			oc patch clusterrole/percona-xtradb-cluster-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]' -n $OPERATOR_NS
  1613  		else
  1614  			oc create rolebinding pmm-pxc-operator-namespace-only --role percona-xtradb-cluster-operator --serviceaccount=$namespace:pmm-server
  1615  			oc patch role/percona-xtradb-cluster-operator --type json -p='[{"op":"add","path": "/rules/-","value":{"apiGroups":["security.openshift.io"],"resources":["securitycontextconstraints"],"verbs":["use"],"resourceNames":["privileged"]}}]'
  1616  		fi
  1617  		local additional_params="--set platform=openshift --set sa=pmm-server --set supresshttp2=false"
  1618  	fi
  1619  
  1620  	helm repo add percona https://percona.github.io/percona-helm-charts/
  1621  	helm repo update
  1622  	helm uninstall monitoring || :
  1623  
  1624  	retry 10 60 helm install monitoring --set imageRepo=${IMAGE_PMM_SERVER%:*} --set imageTag=${IMAGE_PMM_SERVER#*:} $additional_params https://percona-charts.storage.googleapis.com/pmm-server-$PMM_SERVER_VER.tgz
  1625  }
  1626  
  1627  run_recovery_check_pitr() {
  1628  	local cluster=$1
  1629  	local restore=$2
  1630  	local backup=$3
  1631  	local compare=$4
  1632  	local time_now=$5
  1633  	local dest=$6
  1634  	local gtid=$7
  1635  
  1636  	desc 'recover backup' $restore
  1637  	cat "$test_dir/conf/${restore}.yaml" \
  1638  		| $sed -e "s/<datetime>/${time_now}/g" \
  1639  		| $sed -e "s/<destination>/${dest}/g" \
  1640  		| $sed -e "s/<gtid>/${gtid}/g" \
  1641  		| $sed -e "s~minio-service.#namespace~minio-service.$namespace~" \
  1642  		| kubectl_bin apply -f -
  1643  	wait_backup_restore ${backup}
  1644  	kubectl_bin logs job/restore-job-${backup}-${cluster}
  1645  	wait_for_running "$cluster-proxysql" 2
  1646  	wait_for_running "$cluster-pxc" 3
  1647  	wait_cluster_consistency "$cluster" 3 2
  1648  	desc 'check data after backup' $restore
  1649  	compare_mysql_cmd $compare "SELECT * from test.test;" "-h $cluster-pxc-0.$cluster-pxc -uroot -proot_password"
  1650  	compare_mysql_cmd $compare "SELECT * from test.test;" "-h $cluster-pxc-1.$cluster-pxc -uroot -proot_password"
  1651  	compare_mysql_cmd $compare "SELECT * from test.test;" "-h $cluster-pxc-2.$cluster-pxc -uroot -proot_password"
  1652  	kubectl_bin delete -f "$test_dir/conf/${restore}.yaml"
  1653  }