github.com/opentofu/opentofu@v1.7.1/internal/command/test.go (about)

     1  // Copyright (c) The OpenTofu Authors
     2  // SPDX-License-Identifier: MPL-2.0
     3  // Copyright (c) 2023 HashiCorp, Inc.
     4  // SPDX-License-Identifier: MPL-2.0
     5  
     6  package command
     7  
     8  import (
     9  	"context"
    10  	"fmt"
    11  	"log"
    12  	"path"
    13  	"sort"
    14  	"strings"
    15  	"time"
    16  
    17  	"github.com/hashicorp/hcl/v2"
    18  	"github.com/zclconf/go-cty/cty"
    19  	"golang.org/x/exp/slices"
    20  
    21  	"github.com/opentofu/opentofu/internal/addrs"
    22  	"github.com/opentofu/opentofu/internal/backend"
    23  	"github.com/opentofu/opentofu/internal/command/arguments"
    24  	"github.com/opentofu/opentofu/internal/command/views"
    25  	"github.com/opentofu/opentofu/internal/configs"
    26  	"github.com/opentofu/opentofu/internal/encryption"
    27  	"github.com/opentofu/opentofu/internal/logging"
    28  	"github.com/opentofu/opentofu/internal/moduletest"
    29  	"github.com/opentofu/opentofu/internal/plans"
    30  	"github.com/opentofu/opentofu/internal/states"
    31  	"github.com/opentofu/opentofu/internal/tfdiags"
    32  	"github.com/opentofu/opentofu/internal/tofu"
    33  )
    34  
    35  const (
    36  	MainStateIdentifier = ""
    37  )
    38  
    39  type TestCommand struct {
    40  	Meta
    41  }
    42  
    43  func (c *TestCommand) Help() string {
    44  	helpText := `
    45  Usage: tofu [global options] test [options]
    46  
    47    Executes automated integration tests against the current OpenTofu 
    48    configuration.
    49  
    50    OpenTofu will search for .tftest.hcl files within the current configuration 
    51    and testing directories. OpenTofu will then execute the testing run blocks 
    52    within any testing files in order, and verify conditional checks and 
    53    assertions against the created infrastructure. 
    54  
    55    This command creates real infrastructure and will attempt to clean up the
    56    testing infrastructure on completion. Monitor the output carefully to ensure
    57    this cleanup process is successful.
    58  
    59  Options:
    60  
    61    -filter=testfile      If specified, OpenTofu will only execute the test files
    62                          specified by this flag. You can use this option multiple
    63                          times to execute more than one test file.
    64  
    65    -json                 If specified, machine readable output will be printed in
    66                          JSON format
    67  
    68    -no-color             If specified, output won't contain any color.
    69  
    70    -test-directory=path  Set the OpenTofu test directory, defaults to "tests". When set, the
    71                          test command will search for test files in the current directory and
    72                          in the one specified by the flag.
    73  
    74    -var 'foo=bar'        Set a value for one of the input variables in the root
    75                          module of the configuration. Use this option more than
    76                          once to set more than one variable.
    77  
    78    -var-file=filename    Load variable values from the given file, in addition
    79                          to the default files terraform.tfvars and *.auto.tfvars.
    80                          Use this option more than once to include more than one
    81                          variables file.
    82  
    83    -verbose              Print the plan or state for each test run block as it
    84                          executes.
    85  `
    86  	return strings.TrimSpace(helpText)
    87  }
    88  
    89  func (c *TestCommand) Synopsis() string {
    90  	return "Execute integration tests for OpenTofu modules"
    91  }
    92  
    93  func (c *TestCommand) Run(rawArgs []string) int {
    94  	var diags tfdiags.Diagnostics
    95  
    96  	common, rawArgs := arguments.ParseView(rawArgs)
    97  	c.View.Configure(common)
    98  
    99  	args, diags := arguments.ParseTest(rawArgs)
   100  	if diags.HasErrors() {
   101  		c.View.Diagnostics(diags)
   102  		c.View.HelpPrompt("test")
   103  		return 1
   104  	}
   105  
   106  	view := views.NewTest(args.ViewType, c.View)
   107  
   108  	config, configDiags := c.loadConfigWithTests(".", args.TestDirectory)
   109  	diags = diags.Append(configDiags)
   110  	if configDiags.HasErrors() {
   111  		view.Diagnostics(nil, nil, diags)
   112  		return 1
   113  	}
   114  
   115  	runCount := 0
   116  	fileCount := 0
   117  
   118  	var fileDiags tfdiags.Diagnostics
   119  	suite := moduletest.Suite{
   120  		Files: func() map[string]*moduletest.File {
   121  			files := make(map[string]*moduletest.File)
   122  
   123  			if len(args.Filter) > 0 {
   124  				for _, name := range args.Filter {
   125  					file, ok := config.Module.Tests[name]
   126  					if !ok {
   127  						// If the filter is invalid, we'll simply skip this
   128  						// entry and print a warning. But we could still execute
   129  						// any other tests within the filter.
   130  						fileDiags.Append(tfdiags.Sourceless(
   131  							tfdiags.Warning,
   132  							"Unknown test file",
   133  							fmt.Sprintf("The specified test file, %s, could not be found.", name)))
   134  						continue
   135  					}
   136  
   137  					fileCount++
   138  
   139  					var runs []*moduletest.Run
   140  					for ix, run := range file.Runs {
   141  						runs = append(runs, &moduletest.Run{
   142  							Config: run,
   143  							Index:  ix,
   144  							Name:   run.Name,
   145  						})
   146  					}
   147  
   148  					runCount += len(runs)
   149  					files[name] = &moduletest.File{
   150  						Config: file,
   151  						Name:   name,
   152  						Runs:   runs,
   153  					}
   154  				}
   155  
   156  				return files
   157  			}
   158  
   159  			// Otherwise, we'll just do all the tests in the directory!
   160  			for name, file := range config.Module.Tests {
   161  				fileCount++
   162  
   163  				var runs []*moduletest.Run
   164  				for ix, run := range file.Runs {
   165  					runs = append(runs, &moduletest.Run{
   166  						Config: run,
   167  						Index:  ix,
   168  						Name:   run.Name,
   169  					})
   170  				}
   171  
   172  				runCount += len(runs)
   173  				files[name] = &moduletest.File{
   174  					Config: file,
   175  					Name:   name,
   176  					Runs:   runs,
   177  				}
   178  			}
   179  			return files
   180  		}(),
   181  	}
   182  
   183  	log.Printf("[DEBUG] TestCommand: found %d files with %d run blocks", fileCount, runCount)
   184  
   185  	diags = diags.Append(fileDiags)
   186  	if fileDiags.HasErrors() {
   187  		view.Diagnostics(nil, nil, diags)
   188  		return 1
   189  	}
   190  
   191  	// Users can also specify variables via the command line, so we'll parse
   192  	// all that here.
   193  	var items []rawFlag
   194  	for _, variable := range args.Vars.All() {
   195  		items = append(items, rawFlag{
   196  			Name:  variable.Name,
   197  			Value: variable.Value,
   198  		})
   199  	}
   200  	c.variableArgs = rawFlags{items: &items}
   201  
   202  	variables, variableDiags := c.collectVariableValues()
   203  	diags = diags.Append(variableDiags)
   204  	if variableDiags.HasErrors() {
   205  		view.Diagnostics(nil, nil, diags)
   206  		return 1
   207  	}
   208  
   209  	opts, err := c.contextOpts()
   210  	if err != nil {
   211  		diags = diags.Append(err)
   212  		view.Diagnostics(nil, nil, diags)
   213  		return 1
   214  	}
   215  
   216  	// Don't use encryption during testing
   217  	opts.Encryption = encryption.Disabled()
   218  
   219  	// Print out all the diagnostics we have from the setup. These will just be
   220  	// warnings, and we want them out of the way before we start the actual
   221  	// testing.
   222  	view.Diagnostics(nil, nil, diags)
   223  
   224  	// We have two levels of interrupt here. A 'stop' and a 'cancel'. A 'stop'
   225  	// is a soft request to stop. We'll finish the current test, do the tidy up,
   226  	// but then skip all remaining tests and run blocks. A 'cancel' is a hard
   227  	// request to stop now. We'll cancel the current operation immediately
   228  	// even if it's a delete operation, and we won't clean up any infrastructure
   229  	// if we're halfway through a test. We'll print details explaining what was
   230  	// stopped so the user can do their best to recover from it.
   231  
   232  	runningCtx, done := context.WithCancel(context.Background())
   233  	stopCtx, stop := context.WithCancel(runningCtx)
   234  	cancelCtx, cancel := context.WithCancel(context.Background())
   235  
   236  	runner := &TestSuiteRunner{
   237  		command: c,
   238  
   239  		Suite:  &suite,
   240  		Config: config,
   241  		View:   view,
   242  
   243  		GlobalVariables: variables,
   244  		Opts:            opts,
   245  
   246  		CancelledCtx: cancelCtx,
   247  		StoppedCtx:   stopCtx,
   248  
   249  		// Just to be explicit, we'll set the following fields even though they
   250  		// default to these values.
   251  		Cancelled: false,
   252  		Stopped:   false,
   253  
   254  		Verbose: args.Verbose,
   255  	}
   256  
   257  	view.Abstract(&suite)
   258  
   259  	panicHandler := logging.PanicHandlerWithTraceFn()
   260  	go func() {
   261  		defer panicHandler()
   262  		defer done()
   263  		defer stop()
   264  		defer cancel()
   265  
   266  		runner.Start(variables)
   267  	}()
   268  
   269  	// Wait for the operation to complete, or for an interrupt to occur.
   270  	select {
   271  	case <-c.ShutdownCh:
   272  		// Nice request to be cancelled.
   273  
   274  		view.Interrupted()
   275  		runner.Stopped = true
   276  		stop()
   277  
   278  		select {
   279  		case <-c.ShutdownCh:
   280  			// The user pressed it again, now we have to get it to stop as
   281  			// fast as possible.
   282  
   283  			view.FatalInterrupt()
   284  			runner.Cancelled = true
   285  			cancel()
   286  
   287  			// We'll wait 5 seconds for this operation to finish now, regardless
   288  			// of whether it finishes successfully or not.
   289  			select {
   290  			case <-runningCtx.Done():
   291  			case <-time.After(5 * time.Second):
   292  			}
   293  
   294  		case <-runningCtx.Done():
   295  			// The application finished nicely after the request was stopped.
   296  		}
   297  	case <-runningCtx.Done():
   298  		// tests finished normally with no interrupts.
   299  	}
   300  
   301  	if runner.Cancelled {
   302  		// Don't print out the conclusion if the test was cancelled.
   303  		return 1
   304  	}
   305  
   306  	view.Conclusion(&suite)
   307  
   308  	if suite.Status != moduletest.Pass {
   309  		return 1
   310  	}
   311  	return 0
   312  }
   313  
   314  // test runner
   315  
   316  type TestSuiteRunner struct {
   317  	command *TestCommand
   318  
   319  	Suite  *moduletest.Suite
   320  	Config *configs.Config
   321  
   322  	GlobalVariables map[string]backend.UnparsedVariableValue
   323  	Opts            *tofu.ContextOpts
   324  
   325  	View views.Test
   326  
   327  	// Stopped and Cancelled track whether the user requested the testing
   328  	// process to be interrupted. Stopped is a nice graceful exit, we'll still
   329  	// tidy up any state that was created and mark the tests with relevant
   330  	// `skipped` status updates. Cancelled is a hard stop right now exit, we
   331  	// won't attempt to clean up any state left hanging, and tests will just
   332  	// be left showing `pending` as the status. We will still print out the
   333  	// destroy summary diagnostics that tell the user what state has been left
   334  	// behind and needs manual clean up.
   335  	Stopped   bool
   336  	Cancelled bool
   337  
   338  	// StoppedCtx and CancelledCtx allow in progress OpenTofu operations to
   339  	// respond to external calls from the test command.
   340  	StoppedCtx   context.Context
   341  	CancelledCtx context.Context
   342  
   343  	// Verbose tells the runner to print out plan files during each test run.
   344  	Verbose bool
   345  }
   346  
   347  func (runner *TestSuiteRunner) Start(globals map[string]backend.UnparsedVariableValue) {
   348  	var files []string
   349  	for name := range runner.Suite.Files {
   350  		files = append(files, name)
   351  	}
   352  	sort.Strings(files) // execute the files in alphabetical order
   353  
   354  	runner.Suite.Status = moduletest.Pass
   355  	for _, name := range files {
   356  		if runner.Cancelled {
   357  			return
   358  		}
   359  
   360  		file := runner.Suite.Files[name]
   361  
   362  		fileRunner := &TestFileRunner{
   363  			Suite: runner,
   364  			States: map[string]*TestFileState{
   365  				MainStateIdentifier: {
   366  					Run:   nil,
   367  					State: states.NewState(),
   368  				},
   369  			},
   370  		}
   371  
   372  		fileRunner.ExecuteTestFile(file)
   373  		fileRunner.Cleanup(file)
   374  		runner.Suite.Status = runner.Suite.Status.Merge(file.Status)
   375  	}
   376  }
   377  
   378  type TestFileRunner struct {
   379  	Suite *TestSuiteRunner
   380  
   381  	States map[string]*TestFileState
   382  }
   383  
   384  type TestFileState struct {
   385  	Run   *moduletest.Run
   386  	State *states.State
   387  }
   388  
   389  func (runner *TestFileRunner) ExecuteTestFile(file *moduletest.File) {
   390  	log.Printf("[TRACE] TestFileRunner: executing test file %s", file.Name)
   391  
   392  	file.Status = file.Status.Merge(moduletest.Pass)
   393  	for _, run := range file.Runs {
   394  		if runner.Suite.Cancelled {
   395  			// This means a hard stop has been requested, in this case we don't
   396  			// even stop to mark future tests as having been skipped. They'll
   397  			// just show up as pending in the printed summary.
   398  			return
   399  		}
   400  
   401  		if runner.Suite.Stopped {
   402  			// Then the test was requested to be stopped, so we just mark each
   403  			// following test as skipped and move on.
   404  			run.Status = moduletest.Skip
   405  			continue
   406  		}
   407  
   408  		if file.Status == moduletest.Error {
   409  			// If the overall test file has errored, we don't keep trying to
   410  			// execute tests. Instead, we mark all remaining run blocks as
   411  			// skipped.
   412  			run.Status = moduletest.Skip
   413  			continue
   414  		}
   415  
   416  		key := MainStateIdentifier
   417  		config := runner.Suite.Config
   418  		if run.Config.ConfigUnderTest != nil {
   419  			config = run.Config.ConfigUnderTest
   420  			// Then we need to load an alternate state and not the main one.
   421  
   422  			key = run.Config.Module.Source.String()
   423  			if key == MainStateIdentifier {
   424  				// This is bad. It means somehow the module we're loading has
   425  				// the same key as main state and we're about to corrupt things.
   426  
   427  				run.Diagnostics = run.Diagnostics.Append(&hcl.Diagnostic{
   428  					Severity: hcl.DiagError,
   429  					Summary:  "Invalid module source",
   430  					Detail:   fmt.Sprintf("The source for the selected module evaluated to %s which should not be possible. This is a bug in OpenTofu - please report it!", key),
   431  					Subject:  run.Config.Module.DeclRange.Ptr(),
   432  				})
   433  
   434  				run.Status = moduletest.Error
   435  				file.Status = moduletest.Error
   436  				continue // Abort!
   437  			}
   438  
   439  			if _, exists := runner.States[key]; !exists {
   440  				runner.States[key] = &TestFileState{
   441  					Run:   nil,
   442  					State: states.NewState(),
   443  				}
   444  			}
   445  		}
   446  
   447  		state, updatedState := runner.ExecuteTestRun(run, file, runner.States[key].State, config)
   448  		if updatedState {
   449  			// Only update the most recent run and state if the state was
   450  			// actually updated by this change. We want to use the run that
   451  			// most recently updated the tracked state as the cleanup
   452  			// configuration.
   453  			runner.States[key].State = state
   454  			runner.States[key].Run = run
   455  		}
   456  
   457  		file.Status = file.Status.Merge(run.Status)
   458  	}
   459  
   460  	runner.Suite.View.File(file)
   461  	for _, run := range file.Runs {
   462  		runner.Suite.View.Run(run, file)
   463  	}
   464  }
   465  
   466  func (runner *TestFileRunner) ExecuteTestRun(run *moduletest.Run, file *moduletest.File, state *states.State, config *configs.Config) (*states.State, bool) {
   467  	log.Printf("[TRACE] TestFileRunner: executing run block %s/%s", file.Name, run.Name)
   468  
   469  	if runner.Suite.Cancelled {
   470  		// Don't do anything, just give up and return immediately.
   471  		// The surrounding functions should stop this even being called, but in
   472  		// case of race conditions or something we can still verify this.
   473  		return state, false
   474  	}
   475  
   476  	if runner.Suite.Stopped {
   477  		// Basically the same as above, except we'll be a bit nicer.
   478  		run.Status = moduletest.Skip
   479  		return state, false
   480  	}
   481  
   482  	run.Diagnostics = run.Diagnostics.Append(run.Config.Validate())
   483  	if run.Diagnostics.HasErrors() {
   484  		run.Status = moduletest.Error
   485  		return state, false
   486  	}
   487  
   488  	resetConfig, configDiags := config.TransformForTest(run.Config, file.Config)
   489  	defer resetConfig()
   490  
   491  	run.Diagnostics = run.Diagnostics.Append(configDiags)
   492  	if configDiags.HasErrors() {
   493  		run.Status = moduletest.Error
   494  		return state, false
   495  	}
   496  
   497  	validateDiags := runner.validate(config, run, file)
   498  	run.Diagnostics = run.Diagnostics.Append(validateDiags)
   499  	if validateDiags.HasErrors() {
   500  		run.Status = moduletest.Error
   501  		return state, false
   502  	}
   503  
   504  	planCtx, plan, planDiags := runner.plan(config, state, run, file)
   505  	if run.Config.Command == configs.PlanTestCommand {
   506  		// Then we want to assess our conditions and diagnostics differently.
   507  		planDiags = run.ValidateExpectedFailures(planDiags)
   508  		run.Diagnostics = run.Diagnostics.Append(planDiags)
   509  		if planDiags.HasErrors() {
   510  			run.Status = moduletest.Error
   511  			return state, false
   512  		}
   513  
   514  		variables, resetVariables, variableDiags := runner.prepareInputVariablesForAssertions(config, run, file, runner.Suite.GlobalVariables)
   515  		defer resetVariables()
   516  
   517  		run.Diagnostics = run.Diagnostics.Append(variableDiags)
   518  		if variableDiags.HasErrors() {
   519  			run.Status = moduletest.Error
   520  			return state, false
   521  		}
   522  
   523  		if runner.Suite.Verbose {
   524  			schemas, diags := planCtx.Schemas(config, plan.PlannedState)
   525  
   526  			// If we're going to fail to render the plan, let's not fail the overall
   527  			// test. It can still have succeeded. So we'll add the diagnostics, but
   528  			// still report the test status as a success.
   529  			if diags.HasErrors() {
   530  				// This is very unlikely.
   531  				diags = diags.Append(tfdiags.Sourceless(
   532  					tfdiags.Warning,
   533  					"Failed to print verbose output",
   534  					fmt.Sprintf("OpenTofu failed to print the verbose output for %s, other diagnostics will contain more details as to why.", path.Join(file.Name, run.Name))))
   535  			} else {
   536  				run.Verbose = &moduletest.Verbose{
   537  					Plan:         plan,
   538  					State:        plan.PlannedState,
   539  					Config:       config,
   540  					Providers:    schemas.Providers,
   541  					Provisioners: schemas.Provisioners,
   542  				}
   543  			}
   544  
   545  			run.Diagnostics = run.Diagnostics.Append(diags)
   546  		}
   547  
   548  		planCtx.TestContext(config, plan.PlannedState, plan, variables).EvaluateAgainstPlan(run)
   549  		return state, false
   550  	}
   551  
   552  	// Otherwise any error during the planning prevents our apply from
   553  	// continuing which is an error.
   554  	run.Diagnostics = run.Diagnostics.Append(planDiags)
   555  	if planDiags.HasErrors() {
   556  		run.Status = moduletest.Error
   557  		return state, false
   558  	}
   559  
   560  	// Since we're carrying on an executing the apply operation as well, we're
   561  	// just going to do some post processing of the diagnostics. We remove the
   562  	// warnings generated from check blocks, as the apply operation will either
   563  	// reproduce them or fix them and we don't want fixed diagnostics to be
   564  	// reported and we don't want duplicates either.
   565  	var filteredDiags tfdiags.Diagnostics
   566  	for _, diag := range run.Diagnostics {
   567  		if rule, ok := addrs.DiagnosticOriginatesFromCheckRule(diag); ok && rule.Container.CheckableKind() == addrs.CheckableCheck {
   568  			continue
   569  		}
   570  		filteredDiags = filteredDiags.Append(diag)
   571  	}
   572  	run.Diagnostics = filteredDiags
   573  
   574  	applyCtx, updated, applyDiags := runner.apply(plan, state, config, run, file)
   575  
   576  	// Remove expected diagnostics, and add diagnostics in case anything that should have failed didn't.
   577  	applyDiags = run.ValidateExpectedFailures(applyDiags)
   578  
   579  	run.Diagnostics = run.Diagnostics.Append(applyDiags)
   580  	if applyDiags.HasErrors() {
   581  		run.Status = moduletest.Error
   582  		// Even though the apply operation failed, the graph may have done
   583  		// partial updates and the returned state should reflect this.
   584  		return updated, true
   585  	}
   586  
   587  	variables, resetVariables, variableDiags := runner.prepareInputVariablesForAssertions(config, run, file, runner.Suite.GlobalVariables)
   588  	defer resetVariables()
   589  
   590  	run.Diagnostics = run.Diagnostics.Append(variableDiags)
   591  	if variableDiags.HasErrors() {
   592  		run.Status = moduletest.Error
   593  		return updated, true
   594  	}
   595  
   596  	if runner.Suite.Verbose {
   597  		schemas, diags := planCtx.Schemas(config, plan.PlannedState)
   598  
   599  		// If we're going to fail to render the plan, let's not fail the overall
   600  		// test. It can still have succeeded. So we'll add the diagnostics, but
   601  		// still report the test status as a success.
   602  		if diags.HasErrors() {
   603  			// This is very unlikely.
   604  			diags = diags.Append(tfdiags.Sourceless(
   605  				tfdiags.Warning,
   606  				"Failed to print verbose output",
   607  				fmt.Sprintf("OpenTofu failed to print the verbose output for %s, other diagnostics will contain more details as to why.", path.Join(file.Name, run.Name))))
   608  		} else {
   609  			run.Verbose = &moduletest.Verbose{
   610  				Plan:         plan,
   611  				State:        updated,
   612  				Config:       config,
   613  				Providers:    schemas.Providers,
   614  				Provisioners: schemas.Provisioners,
   615  			}
   616  		}
   617  
   618  		run.Diagnostics = run.Diagnostics.Append(diags)
   619  	}
   620  
   621  	applyCtx.TestContext(config, updated, plan, variables).EvaluateAgainstState(run)
   622  	return updated, true
   623  }
   624  
   625  func (runner *TestFileRunner) validate(config *configs.Config, run *moduletest.Run, file *moduletest.File) tfdiags.Diagnostics {
   626  	log.Printf("[TRACE] TestFileRunner: called validate for %s/%s", file.Name, run.Name)
   627  
   628  	var diags tfdiags.Diagnostics
   629  
   630  	tfCtx, ctxDiags := tofu.NewContext(runner.Suite.Opts)
   631  	diags = diags.Append(ctxDiags)
   632  	if ctxDiags.HasErrors() {
   633  		return diags
   634  	}
   635  
   636  	runningCtx, done := context.WithCancel(context.Background())
   637  
   638  	var validateDiags tfdiags.Diagnostics
   639  	panicHandler := logging.PanicHandlerWithTraceFn()
   640  	go func() {
   641  		defer panicHandler()
   642  		defer done()
   643  
   644  		log.Printf("[DEBUG] TestFileRunner: starting validate for %s/%s", file.Name, run.Name)
   645  		validateDiags = tfCtx.Validate(config)
   646  		log.Printf("[DEBUG] TestFileRunner: completed validate for  %s/%s", file.Name, run.Name)
   647  	}()
   648  	waitDiags, cancelled := runner.wait(tfCtx, runningCtx, run, file, nil)
   649  
   650  	if cancelled {
   651  		diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Test interrupted", "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources."))
   652  	}
   653  
   654  	diags = diags.Append(waitDiags)
   655  	diags = diags.Append(validateDiags)
   656  
   657  	return diags
   658  }
   659  
   660  func (runner *TestFileRunner) destroy(config *configs.Config, state *states.State, run *moduletest.Run, file *moduletest.File) (*states.State, tfdiags.Diagnostics) {
   661  
   662  	log.Printf("[TRACE] TestFileRunner: called destroy for %s/%s", file.Name, run.Name)
   663  
   664  	if state.Empty() {
   665  		// Nothing to do!
   666  		return state, nil
   667  	}
   668  
   669  	var diags tfdiags.Diagnostics
   670  
   671  	variables, variableDiags := buildInputVariablesForTest(run, file, config, runner.Suite.GlobalVariables, runner.States)
   672  	diags = diags.Append(variableDiags)
   673  
   674  	if diags.HasErrors() {
   675  		return state, diags
   676  	}
   677  
   678  	planOpts := &tofu.PlanOpts{
   679  		Mode:         plans.DestroyMode,
   680  		SetVariables: variables,
   681  	}
   682  
   683  	tfCtx, ctxDiags := tofu.NewContext(runner.Suite.Opts)
   684  	diags = diags.Append(ctxDiags)
   685  	if ctxDiags.HasErrors() {
   686  		return state, diags
   687  	}
   688  
   689  	runningCtx, done := context.WithCancel(context.Background())
   690  
   691  	var plan *plans.Plan
   692  	var planDiags tfdiags.Diagnostics
   693  	panicHandler := logging.PanicHandlerWithTraceFn()
   694  	go func() {
   695  		defer panicHandler()
   696  		defer done()
   697  
   698  		log.Printf("[DEBUG] TestFileRunner: starting destroy plan for %s/%s", file.Name, run.Name)
   699  		plan, planDiags = tfCtx.Plan(config, state, planOpts)
   700  		log.Printf("[DEBUG] TestFileRunner: completed destroy plan for %s/%s", file.Name, run.Name)
   701  	}()
   702  	waitDiags, cancelled := runner.wait(tfCtx, runningCtx, run, file, nil)
   703  
   704  	if cancelled {
   705  		diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Test interrupted", "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources."))
   706  	}
   707  
   708  	diags = diags.Append(waitDiags)
   709  	diags = diags.Append(planDiags)
   710  
   711  	if diags.HasErrors() {
   712  		return state, diags
   713  	}
   714  
   715  	_, updated, applyDiags := runner.apply(plan, state, config, run, file)
   716  	diags = diags.Append(applyDiags)
   717  	return updated, diags
   718  }
   719  
   720  func (runner *TestFileRunner) plan(config *configs.Config, state *states.State, run *moduletest.Run, file *moduletest.File) (*tofu.Context, *plans.Plan, tfdiags.Diagnostics) {
   721  	log.Printf("[TRACE] TestFileRunner: called plan for %s/%s", file.Name, run.Name)
   722  
   723  	var diags tfdiags.Diagnostics
   724  
   725  	targets, targetDiags := run.GetTargets()
   726  	diags = diags.Append(targetDiags)
   727  
   728  	replaces, replaceDiags := run.GetReplaces()
   729  	diags = diags.Append(replaceDiags)
   730  
   731  	references, referenceDiags := run.GetReferences()
   732  	diags = diags.Append(referenceDiags)
   733  
   734  	variables, variableDiags := buildInputVariablesForTest(run, file, config, runner.Suite.GlobalVariables, runner.States)
   735  	diags = diags.Append(variableDiags)
   736  
   737  	if diags.HasErrors() {
   738  		return nil, nil, diags
   739  	}
   740  
   741  	planOpts := &tofu.PlanOpts{
   742  		Mode: func() plans.Mode {
   743  			switch run.Config.Options.Mode {
   744  			case configs.RefreshOnlyTestMode:
   745  				return plans.RefreshOnlyMode
   746  			default:
   747  				return plans.NormalMode
   748  			}
   749  		}(),
   750  		Targets:            targets,
   751  		ForceReplace:       replaces,
   752  		SkipRefresh:        !run.Config.Options.Refresh,
   753  		SetVariables:       variables,
   754  		ExternalReferences: references,
   755  	}
   756  
   757  	tfCtx, ctxDiags := tofu.NewContext(runner.Suite.Opts)
   758  	diags = diags.Append(ctxDiags)
   759  	if ctxDiags.HasErrors() {
   760  		return nil, nil, diags
   761  	}
   762  
   763  	runningCtx, done := context.WithCancel(context.Background())
   764  
   765  	var plan *plans.Plan
   766  	var planDiags tfdiags.Diagnostics
   767  	panicHandler := logging.PanicHandlerWithTraceFn()
   768  	go func() {
   769  		defer panicHandler()
   770  		defer done()
   771  
   772  		log.Printf("[DEBUG] TestFileRunner: starting plan for %s/%s", file.Name, run.Name)
   773  		plan, planDiags = tfCtx.Plan(config, state, planOpts)
   774  		log.Printf("[DEBUG] TestFileRunner: completed plan for %s/%s", file.Name, run.Name)
   775  	}()
   776  	waitDiags, cancelled := runner.wait(tfCtx, runningCtx, run, file, nil)
   777  
   778  	if cancelled {
   779  		diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Test interrupted", "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources."))
   780  	}
   781  
   782  	diags = diags.Append(waitDiags)
   783  	diags = diags.Append(planDiags)
   784  
   785  	return tfCtx, plan, diags
   786  }
   787  
   788  func (runner *TestFileRunner) apply(plan *plans.Plan, state *states.State, config *configs.Config, run *moduletest.Run, file *moduletest.File) (*tofu.Context, *states.State, tfdiags.Diagnostics) {
   789  	log.Printf("[TRACE] TestFileRunner: called apply for %s/%s", file.Name, run.Name)
   790  
   791  	var diags tfdiags.Diagnostics
   792  
   793  	// If things get cancelled while we are executing the apply operation below
   794  	// we want to print out all the objects that we were creating so the user
   795  	// can verify we managed to tidy everything up possibly.
   796  	//
   797  	// Unfortunately, this creates a race condition as the apply operation can
   798  	// edit the plan (by removing changes once they are applied) while at the
   799  	// same time our cancellation process will try to read the plan.
   800  	//
   801  	// We take a quick copy of the changes we care about here, which will then
   802  	// be used in place of the plan when we print out the objects to be created
   803  	// as part of the cancellation process.
   804  	var created []*plans.ResourceInstanceChangeSrc
   805  	for _, change := range plan.Changes.Resources {
   806  		if change.Action != plans.Create {
   807  			continue
   808  		}
   809  		created = append(created, change)
   810  	}
   811  
   812  	tfCtx, ctxDiags := tofu.NewContext(runner.Suite.Opts)
   813  	diags = diags.Append(ctxDiags)
   814  	if ctxDiags.HasErrors() {
   815  		return nil, state, diags
   816  	}
   817  
   818  	runningCtx, done := context.WithCancel(context.Background())
   819  
   820  	var updated *states.State
   821  	var applyDiags tfdiags.Diagnostics
   822  
   823  	panicHandler := logging.PanicHandlerWithTraceFn()
   824  	go func() {
   825  		defer panicHandler()
   826  		defer done()
   827  		log.Printf("[DEBUG] TestFileRunner: starting apply for %s/%s", file.Name, run.Name)
   828  		updated, applyDiags = tfCtx.Apply(plan, config)
   829  		log.Printf("[DEBUG] TestFileRunner: completed apply for %s/%s", file.Name, run.Name)
   830  	}()
   831  	waitDiags, cancelled := runner.wait(tfCtx, runningCtx, run, file, created)
   832  
   833  	if cancelled {
   834  		diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Test interrupted", "The test operation could not be completed due to an interrupt signal. Please read the remaining diagnostics carefully for any sign of failed state cleanup or dangling resources."))
   835  	}
   836  
   837  	diags = diags.Append(waitDiags)
   838  	diags = diags.Append(applyDiags)
   839  
   840  	return tfCtx, updated, diags
   841  }
   842  
   843  func (runner *TestFileRunner) wait(ctx *tofu.Context, runningCtx context.Context, run *moduletest.Run, file *moduletest.File, created []*plans.ResourceInstanceChangeSrc) (diags tfdiags.Diagnostics, cancelled bool) {
   844  	var identifier string
   845  	if file == nil {
   846  		identifier = "validate"
   847  	} else {
   848  		identifier = file.Name
   849  		if run != nil {
   850  			identifier = fmt.Sprintf("%s/%s", identifier, run.Name)
   851  		}
   852  	}
   853  	log.Printf("[TRACE] TestFileRunner: waiting for execution during %s", identifier)
   854  
   855  	// This function handles what happens when the user presses the second
   856  	// interrupt. This is a "hard cancel", we are going to stop doing whatever
   857  	// it is we're doing. This means even if we're halfway through creating or
   858  	// destroying infrastructure we just give up.
   859  	handleCancelled := func() {
   860  		log.Printf("[DEBUG] TestFileRunner: test execution cancelled during %s", identifier)
   861  
   862  		states := make(map[*moduletest.Run]*states.State)
   863  		states[nil] = runner.States[MainStateIdentifier].State
   864  		for key, module := range runner.States {
   865  			if key == MainStateIdentifier {
   866  				continue
   867  			}
   868  			states[module.Run] = module.State
   869  		}
   870  		runner.Suite.View.FatalInterruptSummary(run, file, states, created)
   871  
   872  		cancelled = true
   873  		go ctx.Stop()
   874  
   875  		// Just wait for things to finish now, the overall test execution will
   876  		// exit early if this takes too long.
   877  		<-runningCtx.Done()
   878  	}
   879  
   880  	// This function handles what happens when the user presses the first
   881  	// interrupt. This is essentially a "soft cancel", we're not going to do
   882  	// anything but just wait for things to finish safely. But, we do listen
   883  	// for the crucial second interrupt which will prompt a hard stop / cancel.
   884  	handleStopped := func() {
   885  		log.Printf("[DEBUG] TestFileRunner: test execution stopped during %s", identifier)
   886  
   887  		select {
   888  		case <-runner.Suite.CancelledCtx.Done():
   889  			// We've been asked again. This time we stop whatever we're doing
   890  			// and abandon all attempts to do anything reasonable.
   891  			handleCancelled()
   892  		case <-runningCtx.Done():
   893  			// Do nothing, we finished safely and skipping the remaining tests
   894  			// will be handled elsewhere.
   895  		}
   896  
   897  	}
   898  
   899  	select {
   900  	case <-runner.Suite.StoppedCtx.Done():
   901  		handleStopped()
   902  	case <-runner.Suite.CancelledCtx.Done():
   903  		handleCancelled()
   904  	case <-runningCtx.Done():
   905  		// The operation exited normally.
   906  	}
   907  
   908  	return diags, cancelled
   909  }
   910  
   911  func (runner *TestFileRunner) Cleanup(file *moduletest.File) {
   912  	log.Printf("[TRACE] TestStateManager: cleaning up state for %s", file.Name)
   913  
   914  	if runner.Suite.Cancelled {
   915  		// Don't try and clean anything up if the execution has been cancelled.
   916  		log.Printf("[DEBUG] TestStateManager: skipping state cleanup for %s due to cancellation", file.Name)
   917  		return
   918  	}
   919  
   920  	var states []*TestFileState
   921  	for key, state := range runner.States {
   922  		if state.Run == nil {
   923  			if state.State.Empty() {
   924  				// We can see a run block being empty when the state is empty if
   925  				// a module was only used to execute plan commands. So this is
   926  				// okay, and means we have nothing to cleanup so we'll just
   927  				// skip it.
   928  				continue
   929  			}
   930  
   931  			if key == MainStateIdentifier {
   932  				log.Printf("[ERROR] TestFileRunner: found inconsistent run block and state file in %s", file.Name)
   933  			} else {
   934  				log.Printf("[ERROR] TestFileRunner: found inconsistent run block and state file in %s for module %s", file.Name, key)
   935  			}
   936  
   937  			// Otherwise something bad has happened, and we have no way to
   938  			// recover from it. This shouldn't happen in reality, but we'll
   939  			// print a diagnostic instead of panicking later.
   940  
   941  			var diags tfdiags.Diagnostics
   942  			diags = diags.Append(tfdiags.Sourceless(tfdiags.Error, "Inconsistent state", fmt.Sprintf("Found inconsistent state while cleaning up %s. This is a bug in OpenTofu - please report it", file.Name)))
   943  			runner.Suite.View.DestroySummary(diags, nil, file, state.State)
   944  			continue
   945  		}
   946  
   947  		states = append(states, state)
   948  	}
   949  
   950  	slices.SortFunc(states, func(a, b *TestFileState) int {
   951  		// We want to clean up later run blocks first. So, we'll sort this in
   952  		// reverse according to index. This means larger indices first.
   953  		return b.Run.Index - a.Run.Index
   954  	})
   955  
   956  	// Clean up all the states (for main and custom modules) in reverse order.
   957  	for _, state := range states {
   958  		log.Printf("[DEBUG] TestStateManager: cleaning up state for %s/%s", file.Name, state.Run.Name)
   959  
   960  		if runner.Suite.Cancelled {
   961  			// In case the cancellation came while a previous state was being
   962  			// destroyed.
   963  			log.Printf("[DEBUG] TestStateManager: skipping state cleanup for %s/%s due to cancellation", file.Name, state.Run.Name)
   964  			return
   965  		}
   966  
   967  		var diags tfdiags.Diagnostics
   968  		var runConfig *configs.Config
   969  
   970  		isMainState := state.Run.Config.Module == nil
   971  		if isMainState {
   972  			runConfig = runner.Suite.Config
   973  		} else {
   974  			runConfig = state.Run.Config.ConfigUnderTest
   975  		}
   976  
   977  		reset, configDiags := runConfig.TransformForTest(state.Run.Config, file.Config)
   978  		diags = diags.Append(configDiags)
   979  
   980  		updated := state.State
   981  		if !diags.HasErrors() {
   982  			var destroyDiags tfdiags.Diagnostics
   983  			updated, destroyDiags = runner.destroy(runConfig, state.State, state.Run, file)
   984  			diags = diags.Append(destroyDiags)
   985  		}
   986  		runner.Suite.View.DestroySummary(diags, state.Run, file, updated)
   987  
   988  		if updated.HasManagedResourceInstanceObjects() {
   989  			views.SaveErroredTestStateFile(updated, state.Run, file, runner.Suite.View)
   990  		}
   991  		reset()
   992  	}
   993  }
   994  
   995  // helper functions
   996  
   997  // buildInputVariablesForTest creates a tofu.InputValues mapping for
   998  // variable values that are relevant to the config being tested.
   999  //
  1000  // Crucially, it differs from prepareInputVariablesForAssertions in that it only
  1001  // includes variables that are reference by the config and not everything that
  1002  // is defined within the test run block and test file.
  1003  func buildInputVariablesForTest(run *moduletest.Run, file *moduletest.File, config *configs.Config, globals map[string]backend.UnparsedVariableValue, states map[string]*TestFileState) (tofu.InputValues, tfdiags.Diagnostics) {
  1004  	variables := make(map[string]backend.UnparsedVariableValue)
  1005  	evalCtx := getEvalContextFromStates(states)
  1006  	for name := range config.Module.Variables {
  1007  		if run != nil {
  1008  			if expr, exists := run.Config.Variables[name]; exists {
  1009  				// Local variables take precedence.
  1010  				variables[name] = testVariableValueExpression{
  1011  					expr:       expr,
  1012  					sourceType: tofu.ValueFromConfig,
  1013  					ctx:        evalCtx,
  1014  				}
  1015  				continue
  1016  			}
  1017  		}
  1018  
  1019  		if file != nil {
  1020  			if expr, exists := file.Config.Variables[name]; exists {
  1021  				// If it's not set locally, it maybe set for the entire file.
  1022  				variables[name] = unparsedVariableValueExpression{
  1023  					expr:       expr,
  1024  					sourceType: tofu.ValueFromConfig,
  1025  				}
  1026  				continue
  1027  			}
  1028  		}
  1029  
  1030  		if globals != nil {
  1031  			// If it's not set locally or at the file level, maybe it was
  1032  			// defined globally.
  1033  			if variable, exists := globals[name]; exists {
  1034  				variables[name] = variable
  1035  			}
  1036  		}
  1037  
  1038  		// If it's not set at all that might be okay if the variable is optional
  1039  		// so we'll just not add anything to the map.
  1040  	}
  1041  
  1042  	return backend.ParseVariableValues(variables, config.Module.Variables)
  1043  }
  1044  
  1045  // getEvalContextFromStates constructs an hcl.EvalContext based on the provided map
  1046  // of TestFileState instances. It extracts the relevant information from the
  1047  // states to create a context suitable for HCL evaluation, including the output
  1048  // values of modules.
  1049  //
  1050  // Parameters:
  1051  //   - states: A map of TestFileState instances containing the state information.
  1052  //
  1053  // Returns:
  1054  //   - *hcl.EvalContext: The constructed HCL evaluation context.
  1055  func getEvalContextFromStates(states map[string]*TestFileState) *hcl.EvalContext {
  1056  	runCtx := make(map[string]cty.Value)
  1057  	for _, state := range states {
  1058  		if state.Run == nil {
  1059  			continue
  1060  		}
  1061  		outputs := make(map[string]cty.Value)
  1062  		mod := state.State.Modules[""] // Empty string is what is used by the module in the test runner
  1063  		for outName, out := range mod.OutputValues {
  1064  			outputs[outName] = out.Value
  1065  		}
  1066  		runCtx[state.Run.Name] = cty.ObjectVal(outputs)
  1067  	}
  1068  	ctx := &hcl.EvalContext{Variables: map[string]cty.Value{"run": cty.ObjectVal(runCtx)}}
  1069  
  1070  	return ctx
  1071  }
  1072  
  1073  type testVariableValueExpression struct {
  1074  	expr       hcl.Expression
  1075  	sourceType tofu.ValueSourceType
  1076  	ctx        *hcl.EvalContext
  1077  }
  1078  
  1079  func (v testVariableValueExpression) ParseVariableValue(mode configs.VariableParsingMode) (*tofu.InputValue, tfdiags.Diagnostics) {
  1080  	var diags tfdiags.Diagnostics
  1081  	val, hclDiags := v.expr.Value(v.ctx)
  1082  	diags = diags.Append(hclDiags)
  1083  
  1084  	rng := tfdiags.SourceRangeFromHCL(v.expr.Range())
  1085  
  1086  	return &tofu.InputValue{
  1087  		Value:       val,
  1088  		SourceType:  v.sourceType,
  1089  		SourceRange: rng,
  1090  	}, diags
  1091  }
  1092  
  1093  // prepareInputVariablesForAssertions creates a tofu.InputValues mapping
  1094  // that contains all the variables defined for a given run and file, alongside
  1095  // any unset variables that have defaults within the provided config.
  1096  //
  1097  // Crucially, it differs from buildInputVariablesForTest in that the returned
  1098  // input values include all variables available even if they are not defined
  1099  // within the config. This allows the assertions to refer to variables defined
  1100  // solely within the test file, and not only those within the configuration.
  1101  //
  1102  // It also allows references to previously run test module's outputs as variable
  1103  // expressions.  This relies upon the evaluation order and will not sort the test cases
  1104  // to run in the dependent order.
  1105  //
  1106  // In addition, it modifies the provided config so that any variables that are
  1107  // available are also defined in the config. It returns a function that resets
  1108  // the config which must be called so the config can be reused going forward.
  1109  func (runner *TestFileRunner) prepareInputVariablesForAssertions(config *configs.Config, run *moduletest.Run, file *moduletest.File, globals map[string]backend.UnparsedVariableValue) (tofu.InputValues, func(), tfdiags.Diagnostics) {
  1110  	ctx := getEvalContextFromStates(runner.States)
  1111  
  1112  	variables := make(map[string]backend.UnparsedVariableValue)
  1113  
  1114  	if run != nil {
  1115  		for name, expr := range run.Config.Variables {
  1116  			variables[name] = testVariableValueExpression{
  1117  				expr:       expr,
  1118  				sourceType: tofu.ValueFromConfig,
  1119  				ctx:        ctx,
  1120  			}
  1121  		}
  1122  	}
  1123  
  1124  	if file != nil {
  1125  		for name, expr := range file.Config.Variables {
  1126  			if _, exists := variables[name]; exists {
  1127  				// Then this variable was defined at the run level and we want
  1128  				// that value to take precedence.
  1129  				continue
  1130  			}
  1131  			variables[name] = testVariableValueExpression{
  1132  				expr:       expr,
  1133  				sourceType: tofu.ValueFromConfig,
  1134  				ctx:        ctx,
  1135  			}
  1136  		}
  1137  	}
  1138  
  1139  	for name, variable := range globals {
  1140  		if _, exists := variables[name]; exists {
  1141  			// Then this value was already defined at either the run level
  1142  			// or the file level, and we want those values to take
  1143  			// precedence.
  1144  			continue
  1145  		}
  1146  		variables[name] = variable
  1147  	}
  1148  
  1149  	// We've gathered all the values we have, let's convert them into
  1150  	// tofu.InputValues so they can be passed into the OpenTofu graph.
  1151  
  1152  	inputs := make(tofu.InputValues, len(variables))
  1153  	var diags tfdiags.Diagnostics
  1154  	for name, variable := range variables {
  1155  		value, valueDiags := variable.ParseVariableValue(configs.VariableParseLiteral)
  1156  		diags = diags.Append(valueDiags)
  1157  		inputs[name] = value
  1158  	}
  1159  
  1160  	// Next, we're going to apply any default values from the configuration.
  1161  	// We do this after the conversion into tofu.InputValues, as the
  1162  	// defaults have already been converted into cty.Value objects.
  1163  
  1164  	for name, variable := range config.Module.Variables {
  1165  		if _, exists := variables[name]; exists {
  1166  			// Then we don't want to apply the default for this variable as we
  1167  			// already have a value.
  1168  			continue
  1169  		}
  1170  
  1171  		if variable.Default != cty.NilVal {
  1172  			inputs[name] = &tofu.InputValue{
  1173  				Value:       variable.Default,
  1174  				SourceType:  tofu.ValueFromConfig,
  1175  				SourceRange: tfdiags.SourceRangeFromHCL(variable.DeclRange),
  1176  			}
  1177  		}
  1178  	}
  1179  
  1180  	// Finally, we're going to do a some modifications to the config.
  1181  	// If we have got variable values from the test file we need to make sure
  1182  	// they have an equivalent entry in the configuration. We're going to do
  1183  	// that dynamically here.
  1184  
  1185  	// First, take a backup of the existing configuration so we can easily
  1186  	// restore it later.
  1187  	currentVars := make(map[string]*configs.Variable)
  1188  	for name, variable := range config.Module.Variables {
  1189  		currentVars[name] = variable
  1190  	}
  1191  
  1192  	// Next, let's go through our entire inputs and add any that aren't already
  1193  	// defined into the config.
  1194  	for name, value := range inputs {
  1195  		if _, exists := config.Module.Variables[name]; exists {
  1196  			continue
  1197  		}
  1198  
  1199  		config.Module.Variables[name] = &configs.Variable{
  1200  			Name:           name,
  1201  			Type:           value.Value.Type(),
  1202  			ConstraintType: value.Value.Type(),
  1203  			DeclRange:      value.SourceRange.ToHCL(),
  1204  		}
  1205  	}
  1206  
  1207  	// We return our input values, a function that will reset the variables
  1208  	// within the config so it can be used again, and any diagnostics reporting
  1209  	// variables that we couldn't parse.
  1210  
  1211  	return inputs, func() {
  1212  		config.Module.Variables = currentVars
  1213  	}, diags
  1214  }