github.com/verrazzano/verrazzano@v1.7.1/tools/scripts/k8s-dump-cluster.sh (about)

     1  #!/usr/bin/env bash
     2  #
     3  # Copyright (c) 2021, 2024, Oracle and/or its affiliates.
     4  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
     5  #
     6  SCRIPT_DIR=$(cd $(dirname "$0"); pwd -P)
     7  
     8  # REVIEW: Look at whether we can use the common.sh utility functions here (there is some log support but
     9  # that seems intertwined with the build/install, not sure it is a good fit here as this is intended to be
    10  # standalone capture as well not specific).
    11  
    12  # prints usage message for this script to consoleerr
    13  # Usage:
    14  # usage
    15  function usage {
    16      echo ""
    17      echo "usage: $0 -z tar_gz_file"
    18      echo " You must specify at least a tar file or a directory to capture into"
    19      echo " Specifying both -z and -d is valid as well, but note they are independent of each other"
    20      echo " -z tar_gz_file   Name of the compressed tar file to generate. Ie: capture.tar.gz"
    21      echo " -d directory     Directory to capture an expanded dump into. This does not affect a tar_gz_file if that is also specified"
    22      echo " -a               Call the analyzer on the captured dump and report to stdout"
    23      echo " -r report_file   Call the analyzer on the captured dump and report to the file specified, requires sources and go build environment"
    24      echo " -n namespace     Name of a specific namespace to capture"
    25      echo " -h               Help"
    26      echo ""
    27      exit 1
    28  }
    29  
    30  kubectl >/dev/null 2>&1 || {
    31    echo "kubectl is required but cannot be found on the path. Aborting."
    32    exit 1
    33  }
    34  
    35  TAR_GZ_FILE=""
    36  ANALYZE="FALSE"
    37  REPORT_FILE=""
    38  SINGLE_NAMESPACE=""
    39  while getopts z:d:n:har: flag
    40  do
    41      case "${flag}" in
    42          z) TAR_GZ_FILE=${OPTARG};;
    43          d) DIRECTORY=${OPTARG};;
    44          a) ANALYZE="TRUE";;
    45          r) REPORT_FILE=${OPTARG}
    46             ANALYZE="TRUE";;
    47          n) SINGLE_NAMESPACE=${OPTARG};;
    48          h) usage;;
    49          *) usage;;
    50      esac
    51  done
    52  shift $((OPTIND -1))
    53  
    54  # We need at least a directory or a tar file specified for the dump
    55  if [[ -z "$TAR_GZ_FILE" && -z "$DIRECTORY" ]] ; then
    56    usage
    57  fi
    58  
    59  # If a tar file output was specified and it exists already fail
    60  if [[ ! -z "$TAR_GZ_FILE" && -f "$TAR_GZ_FILE" ]] ; then
    61    echo "$TAR_GZ_FILE already exists. Aborting."
    62    exit 1
    63  fi
    64  
    65  # If a tar file output was specified and it exists already fail
    66  if [[ ! -z "$DIRECTORY" && -f "$DIRECTORY" ]] ; then
    67    echo "$DIRECTORY already exists. Aborting."
    68    exit 1
    69  fi
    70  
    71  # If a report file output was specified and it exists already fail
    72  if [[ ! -z "$REPORT_FILE" && -f "$REPORT_FILE" ]] ; then
    73    echo "$REPORT_FILE already exists. Aborting."
    74    exit 1
    75  fi
    76  
    77  # We create a temporary directory to dump info. The basic structure is along these lines.
    78  #
    79  # $CAPTURE_DIR/cluster-snapshot
    80  #	directory per namespace
    81  #		daemonsets.json
    82  #		deployments.json
    83  #		events.json
    84  #		pods.json
    85  #		replicasets.json
    86  #		replication-controllers.json
    87  #		services.json
    88  #		directory per pod
    89  #			logs.txt
    90  #	  application-configurations.json
    91  #   coherence.json
    92  #	  gateways.json
    93  #	  ingress-traits.json
    94  #	  virtualservices.json
    95  # configmap_list.out
    96  #	crd.json
    97  # es_indexes.out
    98  # verrazzano-resources.json
    99  #	helm-ls.json
   100  #	helm-version.out
   101  # images-on-nodes.csv
   102  #	ingress.json
   103  # kubectl-version.json
   104  #	nodes.json
   105  #	pv.json
   106  
   107  #
   108  # REVIEW: We certainly could capture some of the above per-namespace into the hierarchy
   109  #         created by the cluster-info.
   110  # NOTE: We are capturing details into json (a few version dumps aren't), this ultimately will be consumed by the triage
   111  #       tooling but it is also human readable.
   112  # EVOLVING: This is a first cut that captures everything (quick/easy), we may not want that to remain as an option
   113  #      but by default we will really want to capture details about our namespaces, and capture some info otherwise.
   114  #      So we will want to have some options to control what we capture here overall. Maybe:
   115  #         base: This would be default and would capture Verrazzano related namespaces
   116  #         full: This would
   117  # REVIEW: As this is intended to be used to assist in issue handling, we do not want to capture things from a customer
   118  #      environment which may be considered sensitive. The intention is that both the capture and triage tooling ultimately
   119  #      would be runnable by the customer entirely (ie: we would never receive the captured data), but we need to be
   120  #      careful in any case as once captured into an archive they need to be aware in how they handle it, and we may
   121  #      need to trim down more from what we capture as well.
   122  
   123  if [ -z $DIRECTORY ]; then
   124    CAPTURE_DIR=$(mktemp -d $(pwd)/capture_XXXXXXX)
   125  else
   126    mkdir -p $DIRECTORY
   127    CAPTURE_DIR=$DIRECTORY
   128  fi
   129  
   130  if [ -z $CAPTURE_DIR ] || [ ! -d $CAPTURE_DIR ]; then
   131    echo "Failed to intialize capture directory"
   132    exit 1
   133  fi
   134  
   135  function process_nodes_output() {
   136    if [ -f $CAPTURE_DIR/cluster-snapshot/nodes.json ]; then
   137      cat $CAPTURE_DIR/cluster-snapshot/nodes.json | jq '.items[].status.images[].names|@csv' | sed -e 's/"//g' -e 's/\\//g'| sort -u > $CAPTURE_DIR/cluster-snapshot/images-on-nodes.csv
   138    fi
   139  }
   140  
   141  function dump_es_indexes() {
   142    kubectl --insecure-skip-tls-verify get ingress -A -o json | jq .items[].spec.tls[].hosts[]  2>/dev/null | grep elasticsearch.vmi.system.default | sed -e 's;^";https://;' -e 's/"//' || true
   143    local ES_ENDPOINT=$(kubectl --insecure-skip-tls-verify get ingress -A -o json | jq .items[].spec.tls[].hosts[] 2>/dev/null | grep elasticsearch.vmi.system.default | sed -e 's;^";https://;' -e 's/"//') || true
   144    local ES_USER=$(kubectl --insecure-skip-tls-verify get secret -n verrazzano-system verrazzano -o jsonpath={.data.username} 2>/dev/null | base64 --decode) || true
   145    local ES_PWD=$(kubectl --insecure-skip-tls-verify get secret -n verrazzano-system verrazzano -o jsonpath={.data.password} 2>/dev/null | base64 --decode) || true
   146    if [ ! -z $ES_ENDPOINT ] && [ ! -z $ES_USER ] && [ ! -z $ES_PWD ]; then
   147      curl -k -u $ES_USER:$ES_PWD $ES_ENDPOINT/_all || true
   148    fi
   149  }
   150  
   151  # This relies on the directory structure which is setup by kubectl cluster-info dump, so this is not a standalone function and currently
   152  # should only be called after that has been called
   153  function dump_configmaps() {
   154    if [ ! -z $SINGLE_NAMESPACE ]; then
   155      # Get list of all config maps in the single namespace
   156      kubectl --insecure-skip-tls-verify get -o custom-columns=NAMESPACEHEADER:.metadata.namespace,NAMEHEADER:.metadata.name configmap --namespace $SINGLE_NAMESPACE > $CAPTURE_DIR/cluster-snapshot/configmap_list.out || true
   157    else
   158      # Get list of all config maps in the cluster
   159      kubectl --insecure-skip-tls-verify get -o custom-columns=NAMESPACEHEADER:.metadata.namespace,NAMEHEADER:.metadata.name configmap --all-namespaces > $CAPTURE_DIR/cluster-snapshot/configmap_list.out || true
   160    fi
   161  
   162    # Iterate the list, describe each configmap individually in a file in the namespace
   163    local CSV_LINE=""
   164    local NAMESPACE=""
   165    local CONFIGNAME=""
   166    while read INPUT_LINE; do
   167        if [[ ! $INPUT_LINE == *"NAMESPACEHEADER"* ]]; then
   168          CSV_LINE=$(echo "$INPUT_LINE" | sed  -e "s/[' '][' ']*/,/g")
   169          NAMESPACE=$(echo "$CSV_LINE" | cut -d, -f"1")
   170          CONFIGNAME=$(echo "$CSV_LINE" | cut -d, -f"2")
   171          if [ ! -z $NAMESPACE ] && [ ! -z $CONFIGNAME ] ; then
   172            # The cluster-snapshot should create the directories for us, but just in case there is a situation where there is a namespace
   173            # that is present which doesn't have one created, make sure we have the directory
   174            if [ ! -d $CAPTURE_DIR/cluster-snapshot/$NAMESPACE ] ; then
   175              mkdir $CAPTURE_DIR/cluster-snapshot/$NAMESPACE || true
   176            fi
   177            kubectl --insecure-skip-tls-verify describe configmap $CONFIGNAME -n $NAMESPACE > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/$CONFIGNAME.configmap || true
   178          fi
   179        fi
   180      done <$CAPTURE_DIR/cluster-snapshot/configmap_list.out
   181  }
   182  function generate_metadata_json_file(){
   183    DATE_ARG=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
   184    jq -n --arg DATE_FOR_JSON ${DATE_ARG} '{"time": $DATE_FOR_JSON}' > $CAPTURE_DIR/cluster-snapshot/metadata.json || true
   185  
   186  }
   187  
   188  function capture_extra_details_from_namespace() {
   189    local NAMESPACE=$1
   190  
   191    # The cluster-snapshot should create the directories for us, but just in case there is a situation where there is a namespace
   192    # that is present which doesn't have one created, make sure we have the directory
   193    if [ ! -d $CAPTURE_DIR/cluster-snapshot/$NAMESPACE ] ; then
   194      mkdir $CAPTURE_DIR/cluster-snapshot/$NAMESPACE || true
   195    fi
   196    kubectl --insecure-skip-tls-verify get ApplicationConfiguration -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/application-configurations.json || true
   197    kubectl --insecure-skip-tls-verify get Component -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/components.json || true
   198    kubectl --insecure-skip-tls-verify get domains.weblogic.oracle -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/weblogic-domains.json || true
   199    kubectl --insecure-skip-tls-verify get clusters.weblogic.oracle -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/weblogic-clusters.json || true
   200    kubectl --insecure-skip-tls-verify get IngressTrait -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/ingress-traits.json || true
   201    kubectl --insecure-skip-tls-verify get Coherence -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/coherence.json || true
   202    kubectl --insecure-skip-tls-verify get gateway -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/gateways.json || true
   203    kubectl --insecure-skip-tls-verify get virtualservice -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/virtualservices.json || true
   204    kubectl --insecure-skip-tls-verify get rolebindings -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/role-bindings.json || true
   205    kubectl --insecure-skip-tls-verify get roles -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/roles.json || true
   206    kubectl --insecure-skip-tls-verify get clusterrolebindings -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/cluster-role-bindings.json || true
   207    kubectl --insecure-skip-tls-verify get clusterroles -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/cluster-roles.json || true
   208    kubectl --insecure-skip-tls-verify get ns $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/namespace.json || true
   209    kubectl --insecure-skip-tls-verify get pvc -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/persistent-volume-claims.json || true
   210    kubectl --insecure-skip-tls-verify get pv -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/persistent-volumes.json || true
   211    kubectl --insecure-skip-tls-verify get jobs.batch -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/jobs.json || true
   212    kubectl --insecure-skip-tls-verify get metricsbindings -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/metrics-bindings.json || true
   213    kubectl --insecure-skip-tls-verify get metricstemplates -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/metrics-templates.json || true
   214    kubectl --insecure-skip-tls-verify get multiclusterapplicationconfigurations -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/multicluster-application-configurations.json || true
   215    kubectl --insecure-skip-tls-verify get multiclustercomponents -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/multicluster-components.json || true
   216    kubectl --insecure-skip-tls-verify get multiclusterconfigmaps -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/multicluster-config-maps.json || true
   217    kubectl --insecure-skip-tls-verify get multiclusterloggingscopes -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/multicluster-logging-scopes.json || true
   218    kubectl --insecure-skip-tls-verify get multiclustersecrets -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/multicluster-secrets.json || true
   219    kubectl --insecure-skip-tls-verify get verrazzanoprojects -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/verrazzano-projects.json || true
   220    kubectl --insecure-skip-tls-verify get verrazzanomanagedclusters -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/verrazzano-managed-clusters.json || true
   221    kubectl --insecure-skip-tls-verify get verrazzanoweblogicworkload -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/verrazzano-weblogic-workload.json || true
   222    kubectl --insecure-skip-tls-verify get verrazzanocoherenceworkload -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/verrazzano-coherence-workload.json || true
   223    kubectl --insecure-skip-tls-verify get verrazzanohelidonworkload -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/verrazzano-helidon-workload.json || true
   224    kubectl --insecure-skip-tls-verify get domain -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/domain.json || true
   225    kubectl --insecure-skip-tls-verify get certificaterequests.cert-manager.io -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/certificate-requests.json || true
   226    kubectl --insecure-skip-tls-verify get orders.acme.cert-manager.io -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/acme-orders.json || true
   227    kubectl --insecure-skip-tls-verify get statefulsets -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/statefulsets.json || true
   228    kubectl --insecure-skip-tls-verify get secrets -n $NAMESPACE -o json |jq 'del(.items[].data)' 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/secrets.json || true
   229    kubectl --insecure-skip-tls-verify get serviceaccounts -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/serviceaccounts.json || true
   230    kubectl --insecure-skip-tls-verify get certificates -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/certificates.json || true
   231    kubectl --insecure-skip-tls-verify get MetricsTrait -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/metrics-traits.json || true
   232    kubectl --insecure-skip-tls-verify get servicemonitor -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/service-monitors.json || true
   233    kubectl --insecure-skip-tls-verify get podmonitor -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/pod-monitors.json || true
   234    kubectl --insecure-skip-tls-verify get endpoints -n $NAMESPACE -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/$NAMESPACE/endpoints.json || true
   235  }
   236  
   237  # This relies on the directory structure which is setup by kubectl cluster-info dump, so this is not a standalone function and currently
   238  # should only be called after that has been called.
   239  # kubectl cluster-info dump only captures certain information, we need additional information captured though and have it placed into
   240  # namespace specific directories which are created by cluster-info. We capture those things here.
   241  #
   242  function dump_extra_details_per_namespace() {
   243  
   244    if [ ! -z $SINGLE_NAMESPACE ]; then
   245      echo "NAMEHEADER" > $CAPTURE_DIR/cluster-snapshot/namespace_list.out
   246      echo "$SINGLE_NAMESPACE" >> $CAPTURE_DIR/cluster-snapshot/namespace_list.out
   247    else
   248      # Get list of all namespaces in the cluster
   249      kubectl --insecure-skip-tls-verify get -o custom-columns=NAMEHEADER:.metadata.name namespaces > $CAPTURE_DIR/cluster-snapshot/namespace_list.out || true
   250    fi
   251  
   252    # Iterate the list, describe each configmap individually in a file in the namespace
   253    local NAMESPACE=""
   254    while read NAMESPACE; do
   255      if [[ ! $NAMESPACE == *"NAMEHEADER"* ]]; then
   256        if [ ! -z $NAMESPACE ] ; then
   257          echo "Capturing $NAMESPACE namespace"
   258          if ! kubectl get ns $NAMESPACE 2>&1 > /dev/null ; then
   259            echo "Namespace ${NAMESPACE} not found, skipping"
   260            continue
   261          fi
   262          capture_extra_details_from_namespace $NAMESPACE
   263        fi
   264      fi
   265    done <$CAPTURE_DIR/cluster-snapshot/namespace_list.out
   266    rm $CAPTURE_DIR/cluster-snapshot/namespace_list.out
   267  }
   268  
   269  function gather_cronological_events() {
   270    find $CAPTURE_DIR/cluster-snapshot -name events.json -print0 | xargs -0 cat | jq '.items[] | [.firstTimestamp, .lastTimestamp, .reason, .message] | @csv' | sort > $CAPTURE_DIR/cluster-snapshot/cronological-event-messages.csv || true
   271  }
   272  
   273  function single_namespace_k8s_cluster_snapshot() {
   274    echo "Partial capture of kubernetes cluster for $SINGLE_NAMESPACE namespace"
   275    kubectl --insecure-skip-tls-verify cluster-info dump --namespaces $SINGLE_NAMESPACE --output-directory=$CAPTURE_DIR/cluster-snapshot >/dev/null 2>&1
   276    dump_extra_details_per_namespace
   277    dump_configmaps
   278    gather_cronological_events
   279  }
   280  
   281  function full_k8s_cluster_snapshot() {
   282    echo "Full capture of kubernetes cluster"
   283    # Get general cluster-info dump, this contains quite a bit but not everything, it also sets up the directory structure
   284    kubectl --insecure-skip-tls-verify cluster-info dump --all-namespaces --output-directory=$CAPTURE_DIR/cluster-snapshot >/dev/null 2>&1
   285  
   286    # Get the Verrazzano resource at the root level. The Verrazzano custom resource can define the namespace, so use all the namespaces in the command
   287    kubectl --insecure-skip-tls-verify get verrazzano --all-namespaces -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/verrazzano-resources.json || true
   288  
   289    # Get the installed Verrazzano modules, in the verrazzano-install namespace
   290    kubectl --insecure-skip-tls-verify get modules.platform.verrazzano.io --all-namespaces -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/verrazzano-modules.json || true
   291  
   292    # Generate the metadata.json file
   293    generate_metadata_json_file
   294  
   295    if [ $? -eq 0 ]; then
   296      kubectl --insecure-skip-tls-verify version -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/kubectl-version.json || true
   297      kubectl --insecure-skip-tls-verify get crd -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/crd.json || true
   298      kubectl --insecure-skip-tls-verify get pv -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/pv.json || true
   299      kubectl --insecure-skip-tls-verify get ingress -A -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/ingress.json || true
   300      kubectl --insecure-skip-tls-verify api-resources -o wide 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/api_resources.out || true
   301      kubectl --insecure-skip-tls-verify get netpol -A -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/network-policies.json || true
   302      kubectl --insecure-skip-tls-verify describe netpol -A 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/network-policies.txt || true
   303      kubectl --insecure-skip-tls-verify describe ClusterIssuer -A 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/cluster-issuers.txt || true
   304      kubectl --insecure-skip-tls-verify get MutatingWebhookConfigurations -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/mutating-webhook-configs.txt || true
   305      kubectl --insecure-skip-tls-verify get ValidatingWebhookConfigurations -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/validating-webhook-configs.txt || true
   306      kubectl --insecure-skip-tls-verify get settings -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/settings.json || true
   307      kubectl --insecure-skip-tls-verify get features -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/features.json || true
   308      # squelch the "too many clients" warnings from newer kubectl versions
   309      dump_extra_details_per_namespace
   310      dump_configmaps
   311      helm version 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/helm-version.out || true
   312      helm ls -A -o json 2>/dev/null | jq . > $CAPTURE_DIR/cluster-snapshot/helm-ls.json || true
   313      dump_es_indexes > $CAPTURE_DIR/cluster-snapshot/es_indexes.out || true
   314      process_nodes_output || true
   315      # dump the Prometheus scrape configuration
   316      if kubectl get ns verrazzano-monitoring 2>&1 > /dev/null ; then
   317        kubectl get secret prometheus-prometheus-operator-kube-p-prometheus -n verrazzano-monitoring -o json | jq -r '.data["prometheus.yaml.gz"]' | base64 -d | gunzip > $CAPTURE_DIR/cluster-snapshot/prom-scrape-config.yaml || true
   318      fi
   319      # Gather event messages in chronological order
   320      gather_cronological_events
   321      # Dump CAPI resources
   322      kubectl --insecure-skip-tls-verify get kontainerdrivers -o json 2>/dev/null > $CAPTURE_DIR/cluster-snapshot/kontainerdrivers.json || true
   323    else
   324      echo "Failed to dump cluster, verify kubectl has access to the cluster"
   325    fi
   326  }
   327  
   328  function analyze_dump() {
   329    if [ $ANALYZE == "TRUE" ]; then
   330      if ! [ -x "$(command -v go)" ]; then
   331        echo "Analyze requires go which does not appear to be installed, skipping analyze"
   332      else
   333        local FULL_PATH_CAPTURE_DIR=$(echo "$(cd "$(dirname "$CAPTURE_DIR")" && pwd -P)/$(basename "$CAPTURE_DIR")")
   334        local SAVE_DIR=$(pwd)
   335        cd $SCRIPT_DIR/../vz
   336        # To enable debug, add  -zap-log-level debug
   337        if [ -z $REPORT_FILE ]; then
   338            if [[ -x $GOPATH/bin/vz ]]; then
   339              $GOPATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR || true
   340            elif [[ -x $GO_REPO_PATH/vz ]]; then
   341              $GO_REPO_PATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR || true
   342            else
   343             echo "Warning: VZ tool is not available. Pleaes download and configured" $GO_REPO_PATH
   344            fi
   345        else
   346            # Since we have to change the current working directory to run go, we need to take into account if the reportFile specified was relative to the original
   347            # working directory. If it was absolute then we just use it directly
   348            if [[ $REPORT_FILE = /* ]]; then
   349                if [[ -x $GOPATH/bin/vz ]]; then
   350                    $GOPATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR --report-format detailed --report-file $REPORT_FILE || true
   351                elif [[ -x $GO_REPO_PATH/vz ]]; then
   352                    $GO_REPO_PATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR || true
   353                else
   354                    echo "Warning: VZ tool is not available. Pleaes download and configured" $GO_REPO_PATH
   355                fi
   356              else
   357                if [[ -x $GOPATH/bin/vz ]]; then
   358                    $GOPATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR --report-format detailed --report-file $SAVE_DIR/$REPORT_FILE || true
   359                elif [[ -x $GO_REPO_PATH/vz ]]; then
   360                            $GO_REPO_PATH/vz analyze --capture-dir $FULL_PATH_CAPTURE_DIR --report-format detailed --report-file $SAVE_DIR/$REPORT_FILE || true
   361                else
   362                    echo "Warning: VZ tool is not available. Pleaes download and configured" $GO_REPO_PATH
   363                fi
   364            fi
   365          fi
   366      fi
   367    cd $SAVE_DIR
   368    fi
   369  }
   370  
   371  function save_dump_file() {
   372    # This will save the dump to a tar gz file if that was specified
   373    if [ ! -z $TAR_GZ_FILE ]; then
   374      # We only save files into cluster-snapshot and below we do not save the temp directory portion
   375      if [ -d $CAPTURE_DIR/cluster-snapshot ]; then
   376        tar -czf $TAR_GZ_FILE -C $CAPTURE_DIR cluster-snapshot
   377        echo "Dump saved to $TAR_GZ_FILE"
   378      fi
   379    fi
   380  }
   381  
   382  function cleanup_dump() {
   383    # This will cleanup the capture directory if it was not specified (it is a temp directory in that case)
   384    if [ -z $DIRECTORY ]; then
   385      rm -rf $CAPTURE_DIR
   386    fi
   387  }
   388  
   389  if [ ! -z $SINGLE_NAMESPACE ]; then
   390    single_namespace_k8s_cluster_snapshot
   391  else
   392    full_k8s_cluster_snapshot
   393  fi
   394  
   395  if [ $? -eq 0 ]; then
   396    save_dump_file
   397  fi
   398  
   399  analyze_dump
   400  cleanup_dump