github.com/verrazzano/verrazzano@v1.7.0/tools/scripts/k8s-dump-cluster.sh (about) 1 #!/usr/bin/env bash 2 # 3 # Copyright (c) 2021, 2023, Oracle and/or its affiliates. 4 # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 5 # 6 SCRIPT_DIR=$(cd $(dirname "$0"); pwd -P) 7 8 # REVIEW: Look at whether we can use the common.sh utility functions here (there is some log support but 9 # that seems intertwined with the build/install, not sure it is a good fit here as this is intended to be 10 # standalone capture as well not specific). 11 12 # prints usage message for this script to consoleerr 13 # Usage: 14 # usage 15 function usage { 16 echo "" 17 echo "usage: $0 -z tar_gz_file" 18 echo " You must specify at least a tar file or a directory to capture into" 19 echo " Specifying both -z and -d is valid as well, but note they are independent of each other" 20 echo " -z tar_gz_file Name of the compressed tar file to generate. Ie: capture.tar.gz" 21 echo " -d directory Directory to capture an expanded dump into. This does not affect a tar_gz_file if that is also specified" 22 echo " -a Call the analyzer on the captured dump and report to stdout" 23 echo " -r report_file Call the analyzer on the captured dump and report to the file specified, requires sources and go build environment" 24 echo " -h Help" 25 echo "" 26 exit 1 27 } 28 29 kubectl >/dev/null 2>&1 || { 30 echo "kubectl is required but cannot be found on the path. Aborting." 31 exit 1 32 } 33 34 TAR_GZ_FILE="" 35 ANALYZE="FALSE" 36 REPORT_FILE="" 37 while getopts z:d:har: flag 38 do 39 case "${flag}" in 40 z) TAR_GZ_FILE=${OPTARG};; 41 d) DIRECTORY=${OPTARG};; 42 a) ANALYZE="TRUE";; 43 r) REPORT_FILE=${OPTARG} 44 ANALYZE="TRUE";; 45 h) usage;; 46 *) usage;; 47 esac 48 done 49 shift $((OPTIND -1)) 50 51 # We need at least a directory or a tar file specified for the dump 52 if [[ -z "$TAR_GZ_FILE" && -z "$DIRECTORY" ]] ; then 53 usage 54 fi 55 56 # If a tar file output was specified and it exists already fail 57 if [[ ! -z "$TAR_GZ_FILE" && -f "$TAR_GZ_FILE" ]] ; then 58 echo "$TAR_GZ_FILE already exists. Aborting." 59 exit 1 60 fi 61 62 # If a tar file output was specified and it exists already fail 63 if [[ ! -z "$DIRECTORY" && -f "$DIRECTORY" ]] ; then 64 echo "$DIRECTORY already exists. Aborting." 65 exit 1 66 fi 67 68 # If a report file output was specified and it exists already fail 69 if [[ ! -z "$REPORT_FILE" && -f "$REPORT_FILE" ]] ; then 70 echo "$REPORT_FILE already exists. Aborting." 71 exit 1 72 fi 73 74 # We create a temporary directory to dump info. The basic structure is along these lines. 75 # 76 # $CAPTURE_DIR/cluster-snapshot 77 # directory per namespace 78 # daemonsets.json 79 # deployments.json 80 # events.json 81 # pods.json 82 # replicasets.json 83 # replication-controllers.json 84 # services.json 85 # directory per pod 86 # logs.txt 87 # application-configurations.json 88 # coherence.json 89 # gateways.json 90 # ingress-traits.json 91 # virtualservices.json 92 # configmap_list.out 93 # crd.json 94 # es_indexes.out 95 # verrazzano-resources.json 96 # helm-ls.json 97 # helm-version.out 98 # images-on-nodes.csv 99 # ingress.json 100 # kubectl-version.json 101 # nodes.json 102 # pv.json 103 104 # 105 # REVIEW: We certainly could capture some of the above per-namespace into the hierarchy 106 # created by the cluster-info. 107 # NOTE: We are capturing details into json (a few version dumps aren't), this ultimately will be consumed by the triage 108 # tooling but it is also human readable. 109 # EVOLVING: This is a first cut that captures everything (quick/easy), we may not want that to remain as an option 110 # but by default we will really want to capture details about our namespaces, and capture some info otherwise. 111 # So we will want to have some options to control what we capture here overall. Maybe: 112 # base: This would be default and would capture Verrazzano related namespaces 113 # full: This would 114 # REVIEW: As this is intended to be used to assist in issue handling, we do not want to capture things from a customer 115 # environment which may be considered sensitive. The intention is that both the capture and triage tooling ultimately 116 # would be runnable by the customer entirely (ie: we would never receive the captured data), but we need to be 117 # careful in any case as once captured into an archive they need to be aware in how they handle it, and we may 118 # need to trim down more from what we capture as well. 119 120 if [ -z $DIRECTORY ]; then 121 CAPTURE_DIR=$(mktemp -d $(pwd)/capture_XXXXXXX) 122 else 123 mkdir -p $DIRECTORY 124 CAPTURE_DIR=$DIRECTORY 125 fi 126 127 if [ -z $CAPTURE_DIR ] || [ ! -d $CAPTURE_DIR ]; then 128 echo "Failed to intialize capture directory" 129 exit 1 130 fi 131 132 function process_nodes_output() { 133 if [ -f $CAPTURE_DIR/cluster-snapshot/nodes.json ]; then 134 cat $CAPTURE_DIR/cluster-snapshot/nodes.json | jq '.items[].status.images[].names|@csv' | sed -e 's/"//g' -e 's/\\//g'| sort -u > $CAPTURE_DIR/cluster-snapshot/images-on-nodes.csv 135 fi 136 } 137 138 function dump_es_indexes() { 139 kubectl --insecure-skip-tls-verify get ingress -A -o json | jq .items[].spec.tls[].hosts[] 2>/dev/null | grep elasticsearch.vmi.system.default | sed -e 's;^";https://;' -e 's/"//' || true 140 local ES_ENDPOINT=$(kubectl --insecure-skip-tls-verify get ingress -A -o json | jq .items[].spec.tls[].hosts[] 2>/dev/null | grep elasticsearch.vmi.system.default | sed -e 's;^";https://;' -e 's/"//') || true 141 local ES_USER=$(kubectl --insecure-skip-tls-verify get secret -n verrazzano-system verrazzano -o jsonpath={.data.username} 2>/dev/null | base64 --decode) || true 142 local ES_PWD=$(kubectl --insecure-skip-tls-verify get secret -n verrazzano-system verrazzano -o jsonpath={.data.password} 2>/dev/null | base64 --decode) || true 143 if [ ! -z $ES_ENDPOINT ] && [ ! -z $ES_USER ] && [ ! -z $ES_PWD ]; then 144 curl -k -u $ES_USER:$ES_PWD $ES_ENDPOINT/_all || true 145 fi 146 } 147 148 # This relies on the directory structure which is setup by kubectl cluster-info dump, so this is not a standalone function and currently 149 # should only be called after that has been called 150 function dump_configmaps() { 151 # Get list of all config maps in the cluster 152 kubectl --insecure-skip-tls-verify get -o custom-columns=NAMESPACEHEADER:.metadata.namespace,NAMEHEADER:.metadata.name configmap --all-namespaces > $CAPTURE_DIR/cluster-snapshot/configmap_list.out || true 153 154 # Iterate the list, describe each configmap individually in a file in the namespace 155 local CSV_LINE="" 156 local NAMESPACE="" 157 local CONFIGNAME="" 158 while read INPUT_LINE; do 159 if [[ ! $INPUT_LINE == *"NAMESPACEHEADER"* ]]; then 160 CSV_LINE=$(echo "$INPUT_LINE" | sed -e "s/[' '][' ']*/,/g") 161 NAMESPACE=$(echo "$CSV_LINE" | cut -d, -f"1") 162 CONFIGNAME=$(echo "$CSV_LINE" | cut -d, -f"2") 163 if [ ! -z $NAMESPACE ] && [ ! -z $CONFIGNAME ] ; then 164 # The cluster-snapshot should create the directories for us, but just in case there is a situation where there is a namespace 165 # that is present which doesn't have one created, make sure we have the directory 166 if [ ! -d $CAPTURE_DIR/cluster-snapshot/$NAMESPACE ] ; then 167 mkdir $CAPTURE_DIR/cluster-snapshot/$NAMESPACE || true 168 fi 169 kubectl --insecure-skip-tls-verify describe configmap $CONFIGNAME -n $NAMESPACE > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/$CONFIGNAME.configmap || true 170 fi 171 fi 172 done <$CAPTURE_DIR/cluster-snapshot/configmap_list.out 173 } 174 175 # This relies on the directory structure which is setup by kubectl cluster-info dump, so this is not a standalone function and currently 176 # should only be called after that has been called. 177 # kubectl cluster-info dump only captures certain information, we need additional information captured though and have it placed into 178 # namespace specific directories which are created by cluster-info. We capture those things here. 179 # 180 function dump_extra_details_per_namespace() { 181 # Get list of all namespaces in the cluster 182 kubectl --insecure-skip-tls-verify get -o custom-columns=NAMEHEADER:.metadata.name namespaces > $CAPTURE_DIR/cluster-snapshot/namespace_list.out || true 183 184 # Iterate the list, describe each configmap individually in a file in the namespace 185 local NAMESPACE="" 186 while read NAMESPACE; do 187 if [[ ! $NAMESPACE == *"NAMEHEADER"* ]]; then 188 if [ ! -z $NAMESPACE ] ; then 189 echo "Capturing $NAMESPACE namespace" 190 if ! kubectl get ns $NAMESPACE 2>&1 > /dev/null ; then 191 echo "Namespace ${NAMESPACE} not found, skipping" 192 continue 193 fi 194 # The cluster-snapshot should create the directories for us, but just in case there is a situation where there is a namespace 195 # that is present which doesn't have one created, make sure we have the directory 196 if [ ! -d $CAPTURE_DIR/cluster-snapshot/$NAMESPACE ] ; then 197 mkdir $CAPTURE_DIR/cluster-snapshot/$NAMESPACE || true 198 fi 199 kubectl --insecure-skip-tls-verify get ApplicationConfiguration -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/application-configurations.json || true 200 kubectl --insecure-skip-tls-verify get Component -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/components.json || true 201 kubectl --insecure-skip-tls-verify get domains.weblogic.oracle -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/weblogic-domains.json || true 202 kubectl --insecure-skip-tls-verify get clusters.weblogic.oracle -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/weblogic-clusters.json || true 203 kubectl --insecure-skip-tls-verify get IngressTrait -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/ingress-traits.json || true 204 kubectl --insecure-skip-tls-verify get Coherence -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/coherence.json || true 205 kubectl --insecure-skip-tls-verify get gateway -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/gateways.json || true 206 kubectl --insecure-skip-tls-verify get virtualservice -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/virtualservices.json || true 207 kubectl --insecure-skip-tls-verify get rolebindings -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/role-bindings.json || true 208 kubectl --insecure-skip-tls-verify get roles -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/roles.json || true 209 kubectl --insecure-skip-tls-verify get clusterrolebindings -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/cluster-role-bindings.json || true 210 kubectl --insecure-skip-tls-verify get clusterroles -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/cluster-roles.json || true 211 kubectl --insecure-skip-tls-verify get ns $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/namespace.json || true 212 kubectl --insecure-skip-tls-verify get pvc -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/persistent-volume-claims.json || true 213 kubectl --insecure-skip-tls-verify get pv -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/persistent-volumes.json || true 214 kubectl --insecure-skip-tls-verify get jobs.batch -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/jobs.json || true 215 kubectl --insecure-skip-tls-verify get metricsbindings -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/metrics-bindings.json || true 216 kubectl --insecure-skip-tls-verify get metricstemplates -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/metrics-templates.json || true 217 kubectl --insecure-skip-tls-verify get multiclusterapplicationconfigurations -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/multicluster-application-configurations.json || true 218 kubectl --insecure-skip-tls-verify get multiclustercomponents -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/multicluster-components.json || true 219 kubectl --insecure-skip-tls-verify get multiclusterconfigmaps -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/multicluster-config-maps.json || true 220 kubectl --insecure-skip-tls-verify get multiclusterloggingscopes -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/multicluster-logging-scopes.json || true 221 kubectl --insecure-skip-tls-verify get multiclustersecrets -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/multicluster-secrets.json || true 222 kubectl --insecure-skip-tls-verify get verrazzanoprojects -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/verrazzano-projects.json || true 223 kubectl --insecure-skip-tls-verify get verrazzanomanagedclusters -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/verrazzano-managed-clusters.json || true 224 kubectl --insecure-skip-tls-verify get verrazzanoweblogicworkload -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/verrazzano-weblogic-workload.json || true 225 kubectl --insecure-skip-tls-verify get verrazzanocoherenceworkload -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/verrazzano-coherence-workload.json || true 226 kubectl --insecure-skip-tls-verify get verrazzanohelidonworkload -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/verrazzano-helidon-workload.json || true 227 kubectl --insecure-skip-tls-verify get domain -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/domain.json || true 228 kubectl --insecure-skip-tls-verify get certificaterequests.cert-manager.io -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/certificate-requests.json || true 229 kubectl --insecure-skip-tls-verify get orders.acme.cert-manager.io -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/acme-orders.json || true 230 kubectl --insecure-skip-tls-verify get statefulsets -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/statefulsets.json || true 231 kubectl --insecure-skip-tls-verify get secrets -n $NAMESPACE -o json |jq 'del(.items[].data)' 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/secrets.json || true 232 kubectl --insecure-skip-tls-verify get serviceaccounts -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/serviceaccounts.json || true 233 kubectl --insecure-skip-tls-verify get certificates -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/certificates.json || true 234 kubectl --insecure-skip-tls-verify get MetricsTrait -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/metrics-traits.json || true 235 kubectl --insecure-skip-tls-verify get servicemonitor -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/service-monitors.json || true 236 kubectl --insecure-skip-tls-verify get podmonitor -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/pod-monitors.json || true 237 kubectl --insecure-skip-tls-verify get endpoints -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/endpoints.json || true 238 fi 239 fi 240 done <$CAPTURE_DIR/cluster-snapshot/namespace_list.out 241 rm $CAPTURE_DIR/cluster-snapshot/namespace_list.out 242 } 243 244 function gather_cronological_events() { 245 find $CAPTURE_DIR/cluster-snapshot -name events.json -print0 | xargs -0 cat | jq '.items[] | [.firstTimestamp, .lastTimestamp, .reason, .message] | @csv' | sort > $CAPTURE_DIR/cluster-snapshot/cronological-event-messages.csv || true 246 } 247 248 function full_k8s_cluster_snapshot() { 249 echo "Full capture of kubernetes cluster" 250 # Get general cluster-info dump, this contains quite a bit but not everything, it also sets up the directory structure 251 kubectl --insecure-skip-tls-verify cluster-info dump --all-namespaces --output-directory=$CAPTURE_DIR/cluster-snapshot >/dev/null 2>&1 252 253 # Get the Verrazzano resource at the root level. The Verrazzano custom resource can define the namespace, so use all the namespaces in the command 254 kubectl --insecure-skip-tls-verify get verrazzano --all-namespaces -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/verrazzano-resources.json || true 255 256 # Get the installed Verrazzano modules, in the verrazzano-install namespace 257 kubectl --insecure-skip-tls-verify get modules.platform.verrazzano.io --all-namespaces -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/verrazzano-modules.json || true 258 259 if [ $? -eq 0 ]; then 260 kubectl --insecure-skip-tls-verify version -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/kubectl-version.json || true 261 kubectl --insecure-skip-tls-verify get crd -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/crd.json || true 262 kubectl --insecure-skip-tls-verify get pv -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/pv.json || true 263 kubectl --insecure-skip-tls-verify get ingress -A -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/ingress.json || true 264 kubectl --insecure-skip-tls-verify api-resources -o wide 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/api_resources.out || true 265 kubectl --insecure-skip-tls-verify get netpol -A -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/network-policies.json || true 266 kubectl --insecure-skip-tls-verify describe netpol -A 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/network-policies.txt || true 267 kubectl --insecure-skip-tls-verify describe ClusterIssuer -A 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/cluster-issuers.txt || true 268 kubectl --insecure-skip-tls-verify get MutatingWebhookConfigurations -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/mutating-webhook-configs.txt || true 269 kubectl --insecure-skip-tls-verify get ValidatingWebhookConfigurations -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/validating-webhook-configs.txt || true 270 kubectl --insecure-skip-tls-verify get settings -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/settings.json || true 271 # squelch the "too many clients" warnings from newer kubectl versions 272 dump_extra_details_per_namespace 273 dump_configmaps 274 helm version 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/helm-version.out || true 275 helm ls -A -o json 2>/dev/null | jq . > $CAPTURE_DIR/cluster-snapshot/helm-ls.json || true 276 dump_es_indexes > $CAPTURE_DIR/cluster-snapshot/es_indexes.out || true 277 process_nodes_output || true 278 # dump the Prometheus scrape configuration 279 if kubectl get ns verrazzano-monitoring 2>&1 > /dev/null ; then 280 kubectl get secret prometheus-prometheus-operator-kube-p-prometheus -n verrazzano-monitoring -o json | jq -r '.data["prometheus.yaml.gz"]' | base64 -d | gunzip > $CAPTURE_DIR/cluster-snapshot/prom-scrape-config.yaml || true 281 fi 282 # Gather event messages in chronological order 283 gather_cronological_events 284 # Dump CAPI resources 285 kubectl --insecure-skip-tls-verify get kontainerdrivers -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/kontainerdrivers.json || true 286 else 287 echo "Failed to dump cluster, verify kubectl has access to the cluster" 288 fi 289 } 290 291 function analyze_dump() { 292 if [ $ANALYZE == "TRUE" ]; then 293 if ! [ -x "$(command -v go)" ]; then 294 echo "Analyze requires go which does not appear to be installed, skipping analyze" 295 else 296 local FULL_PATH_CAPTURE_DIR=$(echo "$(cd "$(dirname "$CAPTURE_DIR")" && pwd -P)/$(basename "$CAPTURE_DIR")") 297 local SAVE_DIR=$(pwd) 298 cd $SCRIPT_DIR/../vz 299 # To enable debug, add -zap-log-level debug 300 if [ -z $REPORT_FILE ]; then 301 if [[ -x $GOPATH/bin/vz ]]; then 302 $GOPATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR || true 303 elif [[ -x $GO_REPO_PATH/vz ]]; then 304 $GO_REPO_PATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR || true 305 else 306 echo "Warning: VZ tool is not available. Pleaes download and configured" $GO_REPO_PATH 307 fi 308 else 309 # Since we have to change the current working directory to run go, we need to take into account if the reportFile specified was relative to the original 310 # working directory. If it was absolute then we just use it directly 311 if [[ $REPORT_FILE = /* ]]; then 312 if [[ -x $GOPATH/bin/vz ]]; then 313 $GOPATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR --report-format detailed --report-file $REPORT_FILE || true 314 elif [[ -x $GO_REPO_PATH/vz ]]; then 315 $GO_REPO_PATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR || true 316 else 317 echo "Warning: VZ tool is not available. Pleaes download and configured" $GO_REPO_PATH 318 fi 319 else 320 if [[ -x $GOPATH/bin/vz ]]; then 321 $GOPATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR --report-format detailed --report-file $SAVE_DIR/$REPORT_FILE || true 322 elif [[ -x $GO_REPO_PATH/vz ]]; then 323 $GO_REPO_PATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR --report-format detailed --report-file $SAVE_DIR/$REPORT_FILE || true 324 else 325 echo "Warning: VZ tool is not available. Pleaes download and configured" $GO_REPO_PATH 326 fi 327 fi 328 fi 329 fi 330 cd $SAVE_DIR 331 fi 332 } 333 334 function save_dump_file() { 335 # This will save the dump to a tar gz file if that was specified 336 if [ ! -z $TAR_GZ_FILE ]; then 337 # We only save files into cluster-snapshot and below we do not save the temp directory portion 338 if [ -d $CAPTURE_DIR/cluster-snapshot ]; then 339 tar -czf $TAR_GZ_FILE -C $CAPTURE_DIR cluster-snapshot 340 echo "Dump saved to $TAR_GZ_FILE" 341 fi 342 fi 343 } 344 345 function cleanup_dump() { 346 # This will cleanup the capture directory if it was not specified (it is a temp directory in that case) 347 if [ -z $DIRECTORY ]; then 348 rm -rf $CAPTURE_DIR 349 fi 350 } 351 352 full_k8s_cluster_snapshot 353 if [ $? -eq 0 ]; then 354 save_dump_file 355 fi 356 357 analyze_dump 358 cleanup_dump