github.com/verrazzano/verrazzano@v1.7.0/tools/vz/pkg/analysis/internal/util/report/report.go (about)

     1  // Copyright (c) 2021, 2023, Oracle and/or its affiliates.
     2  // Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
     3  
     4  // Package report handles reporting
     5  package report
     6  
     7  import (
     8  	"errors"
     9  	"fmt"
    10  	"github.com/verrazzano/verrazzano/tools/vz/pkg/constants"
    11  	"github.com/verrazzano/verrazzano/tools/vz/pkg/helpers"
    12  	"go.uber.org/zap"
    13  	"io/fs"
    14  	"os"
    15  	"path/filepath"
    16  	"reflect"
    17  	"strings"
    18  	"sync"
    19  )
    20  
    21  // NOTE: This is part of the contract with the analyzers however it is currently an initial stake in the ground and
    22  //		 will be evolving rapidly initially as we add analysis cases
    23  
    24  // TODO: We have rudimentary settings and a rudimentary dump of the report to start with here. Ie: Bare bones to get
    25  //       the details out for now, go from there... But there are some things that are already on the radar here:
    26  //
    27  //      1) Format of the human readable report will evolve (wrapping long lines, etc...)
    28  //      2) Other format outputs suitable for automation to process (ie: other automation that will look at the results
    29  //         and do something automatically with it), maybe CSV file, JSON, etc...
    30  //      3) Detail consolidation/redacting suitable for sharing as a Bug report
    31  //      4) Etc...
    32  //
    33  
    34  // For example, when we report them we will want to report:
    35  //		1) Per source (cluster, build, etc...)
    36  //		2) Sort in priority order (worst first...) TODO
    37  
    38  // Tossing around whether per-source, if we have a map for tracking Issues so we have one Issue per type of issue
    39  // and allow contributing supporting data to it (rather than separate issues for each case found if found in different spots
    40  // I'm hesitant to do that now as that reduces the flexibility, and until we really have the analysis drill-down patterns and
    41  // more scenarios in place I think it is premature to do that (we could maybe allow both, but again not sure we need
    42  // that complexity yet either). One good example is that when there a a bunch of pods impacted by the same root cause
    43  // issue, really don't need to spam with a bunch of the same issue (we could add additional supporting data to one root
    44  // issue instead of having an issue for each occurrence), but the analyzer can manage that and knows more about whether
    45  // it is really a different issue or not.
    46  
    47  // We have a map per source. The source is a string here. Generally for clusters it would be something that identifies
    48  // the cluster. But other analyzers may not be looking at a cluster, so they may have some other identification.
    49  // For the current implementation, these are the root file path that the analyzer is looking at.
    50  var reports = make(map[string][]Issue)
    51  var allSourcesAnalyzed = make(map[string]string)
    52  var reportMutex = &sync.Mutex{}
    53  
    54  // ContributeIssuesMap allows a map of issues to be contributed
    55  func ContributeIssuesMap(log *zap.SugaredLogger, source string, issues map[string]Issue) (err error) {
    56  	log.Debugf("ContributeIssues called for source %s with %d issues", source, len(issues))
    57  	if len(source) == 0 {
    58  		return errors.New("ContributeIssues requires a non-empty source be specified")
    59  	}
    60  	for _, issue := range issues {
    61  		err = issue.Validate(log, source)
    62  		if err != nil {
    63  			return err
    64  		}
    65  	}
    66  	reportMutex.Lock()
    67  	reportIssues := reports[source]
    68  	if len(reportIssues) == 0 {
    69  		reportIssues = make([]Issue, 0, 10)
    70  	}
    71  	for _, issue := range issues {
    72  		issue.SupportingData = DeduplicateSupportingData(issue.SupportingData)
    73  		reportIssues = append(reportIssues, issue)
    74  	}
    75  	reports[source] = reportIssues
    76  	reportMutex.Unlock()
    77  	return nil
    78  }
    79  
    80  // ContributeIssue allows a single issue to be contributed
    81  func ContributeIssue(log *zap.SugaredLogger, issue Issue) (err error) {
    82  	log.Debugf("ContributeIssue called for source with %v", issue)
    83  	err = issue.Validate(log, "")
    84  	if err != nil {
    85  		log.Debugf("Validate failed %s", err.Error())
    86  		return err
    87  	}
    88  	reportMutex.Lock()
    89  	reportIssues := reports[issue.Source]
    90  	if len(reportIssues) == 0 {
    91  		reportIssues = make([]Issue, 0, 10)
    92  	}
    93  	issue.SupportingData = DeduplicateSupportingData(issue.SupportingData)
    94  	reportIssues = append(reportIssues, issue)
    95  	reports[issue.Source] = reportIssues
    96  	reportMutex.Unlock()
    97  	return nil
    98  }
    99  
   100  // GenerateHumanReport is a basic report generator
   101  // TODO: This is super basic for now, need to do things like sort based on Confidence, add other formats on output, etc...
   102  // Also add other niceties like time, Summary of what was analyzed, if no issues were found, etc...
   103  func GenerateHumanReport(log *zap.SugaredLogger, vzHelper helpers.VZHelper, reportCtx helpers.ReportCtx) (err error) {
   104  	// Default to stdout if no reportfile is supplied
   105  	//TODO: Eventually add support other reportFormat type (json)
   106  	writeOut, writeSummaryOut, sepOut := "", "", ""
   107  
   108  	// Lock the report data while generating the report itself
   109  	reportMutex.Lock()
   110  	defer reportMutex.Unlock()
   111  	sourcesWithoutIssues := allSourcesAnalyzed
   112  	for source, reportIssues := range reports {
   113  		log.Debugf("Will report on %d issues that were reported for %s", len(reportIssues), source)
   114  		// We need to filter and sort the list of Issues that will be reported
   115  		// TODO: Need to sort them as well eventually
   116  		actuallyReported := filterReportIssues(log, reportIssues, reportCtx.IncludeInfo, reportCtx.MinConfidence, reportCtx.MinImpact)
   117  		if len(actuallyReported) == 0 {
   118  			log.Debugf("No issues to report for source: %s", source)
   119  			continue
   120  		}
   121  
   122  		// Print the Source as it has issues
   123  		delete(sourcesWithoutIssues, source)
   124  		var issuesDetected string
   125  		if helpers.GetIsLiveCluster() {
   126  			issuesDetected = fmt.Sprintf("Detected %d issues in the cluster:", len(actuallyReported))
   127  		} else {
   128  			issuesDetected = fmt.Sprintf("Detected %d issues for %s:", len(actuallyReported), source)
   129  		}
   130  		sepOut = "\n" + issuesDetected + "\n" + strings.Repeat(constants.LineSeparator, len(issuesDetected)) + "\n"
   131  		for _, issue := range actuallyReported {
   132  			// Display only summary and action when the report-format is set to summary
   133  			if reportCtx.ReportFormat == constants.SummaryReport {
   134  				writeSummaryOut += fmt.Sprintf("\n\tISSUE (%s): %s\n", issue.Type, issue.Summary)
   135  				for _, action := range issue.Actions {
   136  					writeSummaryOut += fmt.Sprintf("\t%s\n", action.Summary)
   137  				}
   138  
   139  			}
   140  			writeOut += fmt.Sprintf("\n\tISSUE (%s)\n\t\tsummary: %s\n", issue.Type, issue.Summary)
   141  			if len(issue.Actions) > 0 && reportCtx.IncludeActions {
   142  				log.Debugf("Output actions")
   143  				writeOut += "\t\tactions:\n"
   144  				for _, action := range issue.Actions {
   145  					writeOut += fmt.Sprintf("\t\t\taction: %s\n", action.Summary)
   146  					if len(action.Steps) > 0 {
   147  						writeOut += "\t\t\t\tSteps:\n"
   148  						for i, step := range action.Steps {
   149  							writeOut += fmt.Sprintf("\t\t\t\t\tStep %d: %s\n", i+1, step)
   150  						}
   151  					}
   152  					if len(action.Links) > 0 {
   153  						writeOut += "\t\t\t\tLinks:\n"
   154  						for _, link := range action.Links {
   155  							writeOut += fmt.Sprintf("\t\t\t\t\t%s\n", link)
   156  						}
   157  					}
   158  				}
   159  			}
   160  			if len(issue.SupportingData) > 0 && reportCtx.IncludeSupportData {
   161  				log.Debugf("Output supporting data")
   162  				writeOut += "\t\tsupportingData:\n"
   163  				for _, data := range issue.SupportingData {
   164  					if len(data.Messages) > 0 {
   165  						writeOut += "\t\t\tmessages:\n"
   166  						for _, message := range data.Messages {
   167  							writeOut += fmt.Sprintf("\t\t\t\t%s\n", message)
   168  						}
   169  					}
   170  					if len(data.TextMatches) > 0 {
   171  						writeOut += "\t\t\tsearch matches:\n"
   172  						for _, match := range data.TextMatches {
   173  							if helpers.GetIsLiveCluster() {
   174  								writeOut += fmt.Sprintf("\t\t\t%s: %s\n", match.FileName, match.MatchedText)
   175  							} else {
   176  								writeOut += fmt.Sprintf("\t\t\t\t%s:%d: %s\n", match.FileName, match.FileLine, match.MatchedText)
   177  							}
   178  						}
   179  					}
   180  					if len(data.JSONPaths) > 0 {
   181  						writeOut += "\t\t\trelated json:\n"
   182  						for _, path := range data.JSONPaths {
   183  							writeOut += fmt.Sprintf("\t\t\t\t%s: %s\n", path.File, path.Path)
   184  						}
   185  					}
   186  					if len(data.RelatedFiles) > 0 {
   187  						writeOut += "\t\t\trelated resource(s):\n"
   188  						for _, fileName := range data.RelatedFiles {
   189  							writeOut += fmt.Sprintf("\t\t\t\t%s\n", fileName)
   190  						}
   191  					}
   192  				}
   193  			}
   194  		}
   195  	}
   196  
   197  	// genTmpReport opens report file at tmp path
   198  	genTmpReport := func(reportFile *string) (*os.File, error) {
   199  		*reportFile = constants.VzAnalysisReportTmpFile
   200  		repFile, err := os.CreateTemp(".", *reportFile)
   201  		if err != nil && (errors.Is(err, fs.ErrPermission) || strings.Contains(err.Error(), constants.ReadOnly)) {
   202  			fmt.Fprintf(vzHelper.GetOutputStream(), "Warning: %s to open report file in current directory\n", fs.ErrPermission)
   203  			repFile, err = os.CreateTemp("", *reportFile)
   204  		}
   205  		return repFile, err
   206  	}
   207  
   208  	// genUsrDefinedReport opens report file at user defined file path
   209  	genUsrDefinedReport := func(reportFile *string) (*os.File, error) {
   210  		return os.OpenFile(*reportFile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
   211  	}
   212  
   213  	// printReport std outs reports to console
   214  	printReport := func() {
   215  		if reportCtx.ReportFormat == constants.DetailedReport {
   216  			fmt.Fprintf(vzHelper.GetOutputStream(), sepOut+writeOut)
   217  		} else if reportCtx.ReportFormat == constants.SummaryReport {
   218  			fmt.Fprintf(vzHelper.GetOutputStream(), sepOut+writeSummaryOut)
   219  		}
   220  	}
   221  
   222  	if len(writeOut) > 0 {
   223  		var repFile *os.File
   224  		defer func() {
   225  			if repFile != nil {
   226  				currentDir, _ := os.Getwd()
   227  				fullPath := filepath.Join(currentDir, repFile.Name())
   228  				fmt.Fprintf(os.Stdout, "\nDetailed analysis report available in %s\n", fullPath)
   229  				repFile.Close()
   230  			}
   231  		}()
   232  		if reportCtx.ReportFile == "" {
   233  			// flag --report-file is unset or empty
   234  			repFile, err = genTmpReport(&reportCtx.ReportFile)
   235  		} else {
   236  			// flag --report-file is set
   237  			repFile, err = genUsrDefinedReport(&reportCtx.ReportFile)
   238  		}
   239  		if err != nil {
   240  			log.Errorf("Failed to create report file : %s, error found : %s", reportCtx.ReportFile, err.Error())
   241  			return err
   242  		}
   243  		// writes vz & k8s version, a separator and detected issues to report file
   244  		_, err = repFile.Write([]byte(helpers.GetVersionOut() + sepOut + writeOut))
   245  		if reportCtx.PrintReportToConsole {
   246  			printReport()
   247  		}
   248  		if err != nil {
   249  			log.Errorf("Failed to write to report file %s, error found : %s", reportCtx.ReportFile, err.Error())
   250  			return err
   251  		}
   252  	} else {
   253  		if reportCtx.IncludeInfo {
   254  			if len(sourcesWithoutIssues) > 0 {
   255  				writeOut += "\n\n"
   256  			}
   257  			if len(sourcesWithoutIssues) == 1 {
   258  				// This is a workaround to avoid printing the source when analyzing the live cluster, although it impacts the
   259  				// regular use case with a directory containing a single cluster snapshot
   260  				writeOut += "Verrazzano analysis CLI did not detect any issue in the cluster\n"
   261  			} else {
   262  				for _, source := range sourcesWithoutIssues {
   263  					writeOut += fmt.Sprintf("Verrazzano analysis CLI did not detect any issue in %s\n", source)
   264  				}
   265  			}
   266  			fmt.Fprintf(vzHelper.GetOutputStream(), writeOut)
   267  		}
   268  	}
   269  	return nil
   270  }
   271  
   272  // AddSourceAnalyzed tells the report which sources have been analyzed. This way it knows
   273  // the entire set of sources which were analyzed (not just the ones which had issues detected)
   274  func AddSourceAnalyzed(source string) {
   275  	reportMutex.Lock()
   276  	allSourcesAnalyzed[source] = source
   277  	reportMutex.Unlock()
   278  }
   279  
   280  // GetAllSourcesFilteredIssues is only being exported for the unit tests so they can inspect issues found in a report
   281  func GetAllSourcesFilteredIssues(log *zap.SugaredLogger, includeInfo bool, minConfidence int, minImpact int) (filtered []Issue) {
   282  	reportMutex.Lock()
   283  	for _, reportIssues := range reports {
   284  		subFiltered := filterReportIssues(log, reportIssues, includeInfo, minConfidence, minImpact)
   285  		if len(subFiltered) > 0 {
   286  			filtered = append(filtered, subFiltered...)
   287  		}
   288  	}
   289  	reportMutex.Unlock()
   290  	return filtered
   291  }
   292  
   293  // ClearReports clears the reports map, only for unit tests
   294  func ClearReports() {
   295  	reportMutex.Lock()
   296  	reports = make(map[string][]Issue)
   297  	reportMutex.Unlock()
   298  }
   299  
   300  // compare two structs are same or not
   301  func isEqualStructs(s1, s2 any) bool {
   302  	return reflect.DeepEqual(s1, s2)
   303  }
   304  
   305  // filter out duplicate issues
   306  func deDuplicateIssues(reportIssues []Issue) []Issue {
   307  	var deDuplicates = make([]Issue, 0, len(reportIssues))
   308  	for _, i1 := range reportIssues {
   309  		issueVisited := false
   310  		for _, i2 := range deDuplicates {
   311  			if isEqualStructs(i1, i2) {
   312  				issueVisited = true
   313  				break
   314  			}
   315  		}
   316  		if !issueVisited {
   317  			deDuplicates = append(deDuplicates, i1)
   318  		}
   319  	}
   320  	return deDuplicates
   321  }
   322  
   323  func filterReportIssues(log *zap.SugaredLogger, reportIssues []Issue, includeInfo bool, minConfidence int, minImpact int) (filtered []Issue) {
   324  	filtered = make([]Issue, 0, len(reportIssues))
   325  	for _, issue := range reportIssues {
   326  		// Skip issues that are Informational or lower Confidence that we want
   327  		if issue.Informational && !includeInfo || issue.Confidence < minConfidence || issue.Impact < minImpact {
   328  			log.Debugf("Skipping issue %s based on informational/confidence/impact settings", issue.Summary)
   329  			continue
   330  		}
   331  		filtered = append(filtered, issue)
   332  	}
   333  	return deDuplicateIssues(filtered)
   334  }