github.com/kevinklinger/open_terraform@v1.3.6/noninternal/command/views/test.go (about)

     1  package views
     2  
     3  import (
     4  	"encoding/xml"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"sort"
     8  	"strings"
     9  
    10  	"github.com/kevinklinger/open_terraform/noninternal/command/arguments"
    11  	"github.com/kevinklinger/open_terraform/noninternal/command/format"
    12  	"github.com/kevinklinger/open_terraform/noninternal/moduletest"
    13  	"github.com/kevinklinger/open_terraform/noninternal/terminal"
    14  	"github.com/kevinklinger/open_terraform/noninternal/tfdiags"
    15  	"github.com/mitchellh/colorstring"
    16  )
    17  
    18  // Test is the view interface for the "terraform test" command.
    19  type Test interface {
    20  	// Results presents the given test results.
    21  	Results(map[string]*moduletest.Suite) tfdiags.Diagnostics
    22  
    23  	// Diagnostics is for reporting warnings or errors that occurred with the
    24  	// mechanics of running tests. For this command in particular, some
    25  	// errors are considered to be test failures rather than mechanism failures,
    26  	// and so those will be reported via Results rather than via Diagnostics.
    27  	Diagnostics(tfdiags.Diagnostics)
    28  }
    29  
    30  // NewTest returns an implementation of Test configured to respect the
    31  // settings described in the given arguments.
    32  func NewTest(base *View, args arguments.TestOutput) Test {
    33  	return &testHuman{
    34  		streams:         base.streams,
    35  		showDiagnostics: base.Diagnostics,
    36  		colorize:        base.colorize,
    37  		junitXMLFile:    args.JUnitXMLFile,
    38  	}
    39  }
    40  
    41  type testHuman struct {
    42  	// This is the subset of functionality we need from the base view.
    43  	streams         *terminal.Streams
    44  	showDiagnostics func(diags tfdiags.Diagnostics)
    45  	colorize        *colorstring.Colorize
    46  
    47  	// If junitXMLFile is not empty then results will be written to
    48  	// the given file path in addition to the usual output.
    49  	junitXMLFile string
    50  }
    51  
    52  func (v *testHuman) Results(results map[string]*moduletest.Suite) tfdiags.Diagnostics {
    53  	var diags tfdiags.Diagnostics
    54  
    55  	// FIXME: Due to how this prototype command evolved concurrently with
    56  	// establishing the idea of command views, the handling of JUnit output
    57  	// as part of the "human" view rather than as a separate view in its
    58  	// own right is a little odd and awkward. We should refactor this
    59  	// prior to making "terraform test" a real supported command to make
    60  	// it be structured more like the other commands that use the views
    61  	// package.
    62  
    63  	v.humanResults(results)
    64  
    65  	if v.junitXMLFile != "" {
    66  		moreDiags := v.junitXMLResults(results, v.junitXMLFile)
    67  		diags = diags.Append(moreDiags)
    68  	}
    69  
    70  	return diags
    71  }
    72  
    73  func (v *testHuman) Diagnostics(diags tfdiags.Diagnostics) {
    74  	if len(diags) == 0 {
    75  		return
    76  	}
    77  	v.showDiagnostics(diags)
    78  }
    79  
    80  func (v *testHuman) humanResults(results map[string]*moduletest.Suite) {
    81  	failCount := 0
    82  	width := v.streams.Stderr.Columns()
    83  
    84  	suiteNames := make([]string, 0, len(results))
    85  	for suiteName := range results {
    86  		suiteNames = append(suiteNames, suiteName)
    87  	}
    88  	sort.Strings(suiteNames)
    89  	for _, suiteName := range suiteNames {
    90  		suite := results[suiteName]
    91  
    92  		componentNames := make([]string, 0, len(suite.Components))
    93  		for componentName := range suite.Components {
    94  			componentNames = append(componentNames, componentName)
    95  		}
    96  		for _, componentName := range componentNames {
    97  			component := suite.Components[componentName]
    98  
    99  			assertionNames := make([]string, 0, len(component.Assertions))
   100  			for assertionName := range component.Assertions {
   101  				assertionNames = append(assertionNames, assertionName)
   102  			}
   103  			sort.Strings(assertionNames)
   104  
   105  			for _, assertionName := range assertionNames {
   106  				assertion := component.Assertions[assertionName]
   107  
   108  				fullName := fmt.Sprintf("%s.%s.%s", suiteName, componentName, assertionName)
   109  				if strings.HasPrefix(componentName, "(") {
   110  					// parenthesis-prefixed components are placeholders that
   111  					// the test harness generates to represent problems that
   112  					// prevented checking any assertions at all, so we'll
   113  					// just hide them and show the suite name.
   114  					fullName = suiteName
   115  				}
   116  				headingExtra := fmt.Sprintf("%s (%s)", fullName, assertion.Description)
   117  
   118  				switch assertion.Outcome {
   119  				case moduletest.Failed:
   120  					// Failed means that the assertion was successfully
   121  					// excecuted but that the assertion condition didn't hold.
   122  					v.eprintRuleHeading("yellow", "Failed", headingExtra)
   123  
   124  				case moduletest.Error:
   125  					// Error means that the system encountered an unexpected
   126  					// error when trying to evaluate the assertion.
   127  					v.eprintRuleHeading("red", "Error", headingExtra)
   128  
   129  				default:
   130  					// We don't do anything for moduletest.Passed or
   131  					// moduletest.Skipped. Perhaps in future we'll offer a
   132  					// -verbose option to include information about those.
   133  					continue
   134  				}
   135  				failCount++
   136  
   137  				if len(assertion.Message) > 0 {
   138  					dispMsg := format.WordWrap(assertion.Message, width)
   139  					v.streams.Eprintln(dispMsg)
   140  				}
   141  				if len(assertion.Diagnostics) > 0 {
   142  					// We'll do our own writing of the diagnostics in this
   143  					// case, rather than using v.Diagnostics, because we
   144  					// specifically want all of these diagnostics to go to
   145  					// Stderr along with all of the other output we've
   146  					// generated.
   147  					for _, diag := range assertion.Diagnostics {
   148  						diagStr := format.Diagnostic(diag, nil, v.colorize, width)
   149  						v.streams.Eprint(diagStr)
   150  					}
   151  				}
   152  			}
   153  		}
   154  	}
   155  
   156  	if failCount > 0 {
   157  		// If we've printed at least one failure then we'll have printed at
   158  		// least one horizontal rule across the terminal, and so we'll balance
   159  		// that with another horizontal rule.
   160  		if width > 1 {
   161  			rule := strings.Repeat("─", width-1)
   162  			v.streams.Eprintln(v.colorize.Color("[dark_gray]" + rule))
   163  		}
   164  	}
   165  
   166  	if failCount == 0 {
   167  		if len(results) > 0 {
   168  			// This is not actually an error, but it's convenient if all of our
   169  			// result output goes to the same stream for when this is running in
   170  			// automation that might be gathering this output via a pipe.
   171  			v.streams.Eprint(v.colorize.Color("[bold][green]Success![reset] All of the test assertions passed.\n\n"))
   172  		} else {
   173  			v.streams.Eprint(v.colorize.Color("[bold][yellow]No tests defined.[reset] This module doesn't have any test suites to run.\n\n"))
   174  		}
   175  	}
   176  
   177  	// Try to flush any buffering that might be happening. (This isn't always
   178  	// successful, depending on what sort of fd Stderr is connected to.)
   179  	v.streams.Stderr.File.Sync()
   180  }
   181  
   182  func (v *testHuman) junitXMLResults(results map[string]*moduletest.Suite, filename string) tfdiags.Diagnostics {
   183  	var diags tfdiags.Diagnostics
   184  
   185  	// "JUnit XML" is a file format that has become a de-facto standard for
   186  	// test reporting tools but that is not formally specified anywhere, and
   187  	// so each producer and consumer implementation unfortunately tends to
   188  	// differ in certain ways from others.
   189  	// With that in mind, this is a best effort sort of thing aimed at being
   190  	// broadly compatible with various consumers, but it's likely that
   191  	// some consumers will present these results better than others.
   192  	// This implementation is based mainly on the pseudo-specification of the
   193  	// format curated here, based on the Jenkins parser implementation:
   194  	//    https://llg.cubic.org/docs/junit/
   195  
   196  	// An "Outcome" represents one of the various XML elements allowed inside
   197  	// a testcase element to indicate the test outcome.
   198  	type Outcome struct {
   199  		Message string `xml:"message,omitempty"`
   200  	}
   201  
   202  	// TestCase represents an individual test case as part of a suite. Note
   203  	// that a JUnit XML incorporates both the "component" and "assertion"
   204  	// levels of our model: we pretend that component is a class name and
   205  	// assertion is a method name in order to match with the Java-flavored
   206  	// expectations of JUnit XML, which are hopefully close enough to get
   207  	// a test result rendering that's useful to humans.
   208  	type TestCase struct {
   209  		AssertionName string `xml:"name"`
   210  		ComponentName string `xml:"classname"`
   211  
   212  		// These fields represent the different outcomes of a TestCase. Only one
   213  		// of these should be populated in each TestCase; this awkward
   214  		// structure is just to make this play nicely with encoding/xml's
   215  		// expecatations.
   216  		Skipped *Outcome `xml:"skipped,omitempty"`
   217  		Error   *Outcome `xml:"error,omitempty"`
   218  		Failure *Outcome `xml:"failure,omitempty"`
   219  
   220  		Stderr string `xml:"system-out,omitempty"`
   221  	}
   222  
   223  	// TestSuite represents an individual test suite, of potentially many
   224  	// in a JUnit XML document.
   225  	type TestSuite struct {
   226  		Name         string      `xml:"name"`
   227  		TotalCount   int         `xml:"tests"`
   228  		SkippedCount int         `xml:"skipped"`
   229  		ErrorCount   int         `xml:"errors"`
   230  		FailureCount int         `xml:"failures"`
   231  		Cases        []*TestCase `xml:"testcase"`
   232  	}
   233  
   234  	// TestSuites represents the root element of the XML document.
   235  	type TestSuites struct {
   236  		XMLName      struct{}     `xml:"testsuites"`
   237  		ErrorCount   int          `xml:"errors"`
   238  		FailureCount int          `xml:"failures"`
   239  		TotalCount   int          `xml:"tests"`
   240  		Suites       []*TestSuite `xml:"testsuite"`
   241  	}
   242  
   243  	xmlSuites := TestSuites{}
   244  	suiteNames := make([]string, 0, len(results))
   245  	for suiteName := range results {
   246  		suiteNames = append(suiteNames, suiteName)
   247  	}
   248  	sort.Strings(suiteNames)
   249  	for _, suiteName := range suiteNames {
   250  		suite := results[suiteName]
   251  
   252  		xmlSuite := &TestSuite{
   253  			Name: suiteName,
   254  		}
   255  		xmlSuites.Suites = append(xmlSuites.Suites, xmlSuite)
   256  
   257  		componentNames := make([]string, 0, len(suite.Components))
   258  		for componentName := range suite.Components {
   259  			componentNames = append(componentNames, componentName)
   260  		}
   261  		for _, componentName := range componentNames {
   262  			component := suite.Components[componentName]
   263  
   264  			assertionNames := make([]string, 0, len(component.Assertions))
   265  			for assertionName := range component.Assertions {
   266  				assertionNames = append(assertionNames, assertionName)
   267  			}
   268  			sort.Strings(assertionNames)
   269  
   270  			for _, assertionName := range assertionNames {
   271  				assertion := component.Assertions[assertionName]
   272  				xmlSuites.TotalCount++
   273  				xmlSuite.TotalCount++
   274  
   275  				xmlCase := &TestCase{
   276  					ComponentName: componentName,
   277  					AssertionName: assertionName,
   278  				}
   279  				xmlSuite.Cases = append(xmlSuite.Cases, xmlCase)
   280  
   281  				switch assertion.Outcome {
   282  				case moduletest.Pending:
   283  					// We represent "pending" cases -- cases blocked by
   284  					// upstream errors -- as if they were "skipped" in JUnit
   285  					// terms, because we didn't actually check them and so
   286  					// can't say whether they succeeded or not.
   287  					xmlSuite.SkippedCount++
   288  					xmlCase.Skipped = &Outcome{
   289  						Message: assertion.Message,
   290  					}
   291  				case moduletest.Failed:
   292  					xmlSuites.FailureCount++
   293  					xmlSuite.FailureCount++
   294  					xmlCase.Failure = &Outcome{
   295  						Message: assertion.Message,
   296  					}
   297  				case moduletest.Error:
   298  					xmlSuites.ErrorCount++
   299  					xmlSuite.ErrorCount++
   300  					xmlCase.Error = &Outcome{
   301  						Message: assertion.Message,
   302  					}
   303  
   304  					// We'll also include the diagnostics in the "stderr"
   305  					// portion of the output, so they'll hopefully be visible
   306  					// in a test log viewer in JUnit-XML-Consuming CI systems.
   307  					var buf strings.Builder
   308  					for _, diag := range assertion.Diagnostics {
   309  						diagStr := format.DiagnosticPlain(diag, nil, 68)
   310  						buf.WriteString(diagStr)
   311  					}
   312  					xmlCase.Stderr = buf.String()
   313  				}
   314  
   315  			}
   316  		}
   317  	}
   318  
   319  	xmlOut, err := xml.MarshalIndent(&xmlSuites, "", "  ")
   320  	if err != nil {
   321  		// If marshalling fails then that's a bug in the code above,
   322  		// because we should always be producing a value that is
   323  		// accepted by encoding/xml.
   324  		panic(fmt.Sprintf("invalid values to marshal as JUnit XML: %s", err))
   325  	}
   326  
   327  	err = ioutil.WriteFile(filename, xmlOut, 0644)
   328  	if err != nil {
   329  		diags = diags.Append(tfdiags.Sourceless(
   330  			tfdiags.Error,
   331  			"Failed to write JUnit XML file",
   332  			fmt.Sprintf(
   333  				"Could not create %s to record the test results in JUnit XML format: %s.",
   334  				filename,
   335  				err,
   336  			),
   337  		))
   338  	}
   339  
   340  	return diags
   341  }
   342  
   343  func (v *testHuman) eprintRuleHeading(color, prefix, extra string) {
   344  	const lineCell string = "─"
   345  	textLen := len(prefix) + len(": ") + len(extra)
   346  	spacingLen := 2
   347  	leftLineLen := 3
   348  
   349  	rightLineLen := 0
   350  	width := v.streams.Stderr.Columns()
   351  	if (textLen + spacingLen + leftLineLen) < (width - 1) {
   352  		// (we allow an extra column at the end because some terminals can't
   353  		// print in the final column without wrapping to the next line)
   354  		rightLineLen = width - (textLen + spacingLen + leftLineLen) - 1
   355  	}
   356  
   357  	colorCode := "[" + color + "]"
   358  
   359  	// We'll prepare what we're going to print in memory first, so that we can
   360  	// send it all to stderr in one write in case other programs are also
   361  	// concurrently trying to write to the terminal for some reason.
   362  	var buf strings.Builder
   363  	buf.WriteString(v.colorize.Color(colorCode + strings.Repeat(lineCell, leftLineLen)))
   364  	buf.WriteByte(' ')
   365  	buf.WriteString(v.colorize.Color("[bold]" + colorCode + prefix + ":"))
   366  	buf.WriteByte(' ')
   367  	buf.WriteString(extra)
   368  	if rightLineLen > 0 {
   369  		buf.WriteByte(' ')
   370  		buf.WriteString(v.colorize.Color(colorCode + strings.Repeat(lineCell, rightLineLen)))
   371  	}
   372  	v.streams.Eprintln(buf.String())
   373  }