github.com/graywolf-at-work-2/terraform-vendor@v1.4.5/internal/command/test.go (about) 1 package command 2 3 import ( 4 "context" 5 "fmt" 6 "io/ioutil" 7 "log" 8 "os" 9 "path/filepath" 10 "strings" 11 12 ctyjson "github.com/zclconf/go-cty/cty/json" 13 14 "github.com/hashicorp/terraform/internal/addrs" 15 "github.com/hashicorp/terraform/internal/command/arguments" 16 "github.com/hashicorp/terraform/internal/command/format" 17 "github.com/hashicorp/terraform/internal/command/views" 18 "github.com/hashicorp/terraform/internal/configs" 19 "github.com/hashicorp/terraform/internal/configs/configload" 20 "github.com/hashicorp/terraform/internal/depsfile" 21 "github.com/hashicorp/terraform/internal/initwd" 22 "github.com/hashicorp/terraform/internal/moduletest" 23 "github.com/hashicorp/terraform/internal/plans" 24 "github.com/hashicorp/terraform/internal/providercache" 25 "github.com/hashicorp/terraform/internal/providers" 26 "github.com/hashicorp/terraform/internal/states" 27 "github.com/hashicorp/terraform/internal/terraform" 28 "github.com/hashicorp/terraform/internal/tfdiags" 29 ) 30 31 // TestCommand is the implementation of "terraform test". 32 type TestCommand struct { 33 Meta 34 } 35 36 func (c *TestCommand) Run(rawArgs []string) int { 37 // Parse and apply global view arguments 38 common, rawArgs := arguments.ParseView(rawArgs) 39 c.View.Configure(common) 40 41 args, diags := arguments.ParseTest(rawArgs) 42 view := views.NewTest(c.View, args.Output) 43 if diags.HasErrors() { 44 view.Diagnostics(diags) 45 return 1 46 } 47 48 diags = diags.Append(tfdiags.Sourceless( 49 tfdiags.Warning, 50 `The "terraform test" command is experimental`, 51 "We'd like to invite adventurous module authors to write integration tests for their modules using this command, but all of the behaviors of this command are currently experimental and may change based on feedback.\n\nFor more information on the testing experiment, including ongoing research goals and avenues for feedback, see:\n https://www.terraform.io/docs/language/modules/testing-experiment.html", 52 )) 53 54 ctx, cancel := c.InterruptibleContext() 55 defer cancel() 56 57 results, moreDiags := c.run(ctx, args) 58 diags = diags.Append(moreDiags) 59 60 initFailed := diags.HasErrors() 61 view.Diagnostics(diags) 62 diags = view.Results(results) 63 resultsFailed := diags.HasErrors() 64 view.Diagnostics(diags) // possible additional errors from saving the results 65 66 var testsFailed bool 67 for _, suite := range results { 68 for _, component := range suite.Components { 69 for _, assertion := range component.Assertions { 70 if !assertion.Outcome.SuiteCanPass() { 71 testsFailed = true 72 } 73 } 74 } 75 } 76 77 // Lots of things can possibly have failed 78 if initFailed || resultsFailed || testsFailed { 79 return 1 80 } 81 return 0 82 } 83 84 func (c *TestCommand) run(ctx context.Context, args arguments.Test) (results map[string]*moduletest.Suite, diags tfdiags.Diagnostics) { 85 suiteNames, err := c.collectSuiteNames() 86 if err != nil { 87 diags = diags.Append(tfdiags.Sourceless( 88 tfdiags.Error, 89 "Error while searching for test configurations", 90 fmt.Sprintf("While attempting to scan the 'tests' subdirectory for potential test configurations, Terraform encountered an error: %s.", err), 91 )) 92 return nil, diags 93 } 94 95 ret := make(map[string]*moduletest.Suite, len(suiteNames)) 96 for _, suiteName := range suiteNames { 97 if ctx.Err() != nil { 98 // If the context has already failed in some way then we'll 99 // halt early and report whatever's already happened. 100 break 101 } 102 suite, moreDiags := c.runSuite(ctx, suiteName) 103 diags = diags.Append(moreDiags) 104 ret[suiteName] = suite 105 } 106 107 return ret, diags 108 } 109 110 func (c *TestCommand) runSuite(ctx context.Context, suiteName string) (*moduletest.Suite, tfdiags.Diagnostics) { 111 var diags tfdiags.Diagnostics 112 ret := moduletest.Suite{ 113 Name: suiteName, 114 Components: map[string]*moduletest.Component{}, 115 } 116 117 // In order to make this initial round of "terraform test" pretty self 118 // contained while it's experimental, it's largely just mimicking what 119 // would happen when running the main Terraform workflow commands, which 120 // comes at the expense of a few irritants that we'll hopefully resolve 121 // in future iterations as the design solidifies: 122 // - We need to install remote modules separately for each of the 123 // test suites, because we don't have any sense of a shared cache 124 // of modules that multiple configurations can refer to at once. 125 // - We _do_ have a sense of a cache of remote providers, but it's fixed 126 // at being specifically a two-level cache (global vs. directory-specific) 127 // and so we can't easily capture a third level of "all of the test suites 128 // for this module" that sits between the two. Consequently, we need to 129 // dynamically choose between creating a directory-specific "global" 130 // cache or using the user's existing global cache, to avoid any 131 // situation were we'd be re-downloading the same providers for every 132 // one of the test suites. 133 // - We need to do something a bit horrid in order to have our test 134 // provider instance persist between the plan and apply steps, because 135 // normally that is the exact opposite of what we want. 136 // The above notes are here mainly as an aid to someone who might be 137 // planning a subsequent phase of this R&D effort, to help distinguish 138 // between things we're doing here because they are valuable vs. things 139 // we're doing just to make it work without doing any disruptive 140 // refactoring. 141 142 suiteDirs, moreDiags := c.prepareSuiteDir(ctx, suiteName) 143 diags = diags.Append(moreDiags) 144 if diags.HasErrors() { 145 // Generate a special failure representing the test initialization 146 // having failed, since we therefore won'tbe able to run the actual 147 // tests defined inside. 148 ret.Components["(init)"] = &moduletest.Component{ 149 Assertions: map[string]*moduletest.Assertion{ 150 "(init)": { 151 Outcome: moduletest.Error, 152 Description: "terraform init", 153 Message: "failed to install test suite dependencies", 154 Diagnostics: diags, 155 }, 156 }, 157 } 158 return &ret, nil 159 } 160 161 // When we run the suite itself, we collect up diagnostics associated 162 // with individual components, so ret.Components may or may not contain 163 // failed/errored components after runTestSuite returns. 164 var finalState *states.State 165 ret.Components, finalState = c.runTestSuite(ctx, suiteDirs) 166 167 // Regardless of the success or failure of the test suite, if there are 168 // any objects left in the state then we'll generate a top-level error 169 // about each one to minimize the chance of the user failing to notice 170 // that there are leftover objects that might continue to cost money 171 // unless manually deleted. 172 for _, ms := range finalState.Modules { 173 for _, rs := range ms.Resources { 174 for instanceKey, is := range rs.Instances { 175 var objs []*states.ResourceInstanceObjectSrc 176 if is.Current != nil { 177 objs = append(objs, is.Current) 178 } 179 for _, obj := range is.Deposed { 180 objs = append(objs, obj) 181 } 182 for _, obj := range objs { 183 // Unfortunately we don't have provider schemas out here 184 // and so we're limited in what we can achieve with these 185 // ResourceInstanceObjectSrc values, but we can try some 186 // heuristicy things to try to give some useful information 187 // in common cases. 188 var k, v string 189 if ty, err := ctyjson.ImpliedType(obj.AttrsJSON); err == nil { 190 if approxV, err := ctyjson.Unmarshal(obj.AttrsJSON, ty); err == nil { 191 k, v = format.ObjectValueIDOrName(approxV) 192 } 193 } 194 195 var detail string 196 if k != "" { 197 // We can be more specific if we were able to infer 198 // an identifying attribute for this object. 199 detail = fmt.Sprintf( 200 "Due to errors during destroy, test suite %q has left behind an object for %s, with the following identity:\n %s = %q\n\nYou will need to delete this object manually in the remote system, or else it may have an ongoing cost.", 201 suiteName, 202 rs.Addr.Instance(instanceKey), 203 k, v, 204 ) 205 } else { 206 // If our heuristics for finding a suitable identifier 207 // failed then unfortunately we must be more vague. 208 // (We can't just print the entire object, because it 209 // might be overly large and it might contain sensitive 210 // values.) 211 detail = fmt.Sprintf( 212 "Due to errors during destroy, test suite %q has left behind an object for %s. You will need to delete this object manually in the remote system, or else it may have an ongoing cost.", 213 suiteName, 214 rs.Addr.Instance(instanceKey), 215 ) 216 } 217 diags = diags.Append(tfdiags.Sourceless( 218 tfdiags.Error, 219 "Failed to clean up after tests", 220 detail, 221 )) 222 } 223 } 224 } 225 } 226 227 return &ret, diags 228 } 229 230 func (c *TestCommand) prepareSuiteDir(ctx context.Context, suiteName string) (testCommandSuiteDirs, tfdiags.Diagnostics) { 231 var diags tfdiags.Diagnostics 232 configDir := filepath.Join("tests", suiteName) 233 log.Printf("[TRACE] terraform test: Prepare directory for suite %q in %s", suiteName, configDir) 234 235 suiteDirs := testCommandSuiteDirs{ 236 SuiteName: suiteName, 237 ConfigDir: configDir, 238 } 239 240 // Before we can run a test suite we need to make sure that we have all of 241 // its dependencies available, so the following is essentially an 242 // abbreviated form of what happens during "terraform init", with some 243 // extra trickery in places. 244 245 // First, module installation. This will include linking in the module 246 // under test, but also includes grabbing the dependencies of that module 247 // if it has any. 248 suiteDirs.ModulesDir = filepath.Join(configDir, ".terraform", "modules") 249 os.MkdirAll(suiteDirs.ModulesDir, 0755) // if this fails then we'll ignore it and let InstallModules below fail instead 250 reg := c.registryClient() 251 moduleInst := initwd.NewModuleInstaller(suiteDirs.ModulesDir, reg) 252 _, moreDiags := moduleInst.InstallModules(ctx, configDir, true, nil) 253 diags = diags.Append(moreDiags) 254 if diags.HasErrors() { 255 return suiteDirs, diags 256 } 257 258 // The installer puts the files in a suitable place on disk, but we 259 // still need to actually load the configuration. We need to do this 260 // with a separate config loader because the Meta.configLoader instance 261 // is intended for interacting with the current working directory, not 262 // with the test suite subdirectories. 263 loader, err := configload.NewLoader(&configload.Config{ 264 ModulesDir: suiteDirs.ModulesDir, 265 Services: c.Services, 266 }) 267 if err != nil { 268 diags = diags.Append(tfdiags.Sourceless( 269 tfdiags.Error, 270 "Failed to create test configuration loader", 271 fmt.Sprintf("Failed to prepare loader for test configuration %s: %s.", configDir, err), 272 )) 273 return suiteDirs, diags 274 } 275 cfg, hclDiags := loader.LoadConfig(configDir) 276 diags = diags.Append(hclDiags) 277 if diags.HasErrors() { 278 return suiteDirs, diags 279 } 280 suiteDirs.Config = cfg 281 282 // With the full configuration tree available, we can now install 283 // the necessary providers. We'll use a separate local cache directory 284 // here, because the test configuration might have additional requirements 285 // compared to the module itself. 286 suiteDirs.ProvidersDir = filepath.Join(configDir, ".terraform", "providers") 287 os.MkdirAll(suiteDirs.ProvidersDir, 0755) // if this fails then we'll ignore it and operations below fail instead 288 localCacheDir := providercache.NewDir(suiteDirs.ProvidersDir) 289 providerInst := c.providerInstaller().Clone(localCacheDir) 290 if !providerInst.HasGlobalCacheDir() { 291 // If the user already configured a global cache directory then we'll 292 // just use it for caching the test providers too, because then we 293 // can potentially reuse cache entries they already have. However, 294 // if they didn't configure one then we'll still establish one locally 295 // in the working directory, which we'll then share across all tests 296 // to avoid downloading the same providers repeatedly. 297 cachePath := filepath.Join(c.DataDir(), "testing-providers") // note this is _not_ under the suite dir 298 err := os.MkdirAll(cachePath, 0755) 299 // If we were unable to create the directory for any reason then we'll 300 // just proceed without a cache, at the expense of repeated downloads. 301 // (With that said, later installing might end up failing for the 302 // same reason anyway...) 303 if err == nil || os.IsExist(err) { 304 cacheDir := providercache.NewDir(cachePath) 305 providerInst.SetGlobalCacheDir(cacheDir) 306 } 307 } 308 reqs, hclDiags := cfg.ProviderRequirements() 309 diags = diags.Append(hclDiags) 310 if diags.HasErrors() { 311 return suiteDirs, diags 312 } 313 314 // For test suites we only retain the "locks" in memory for the duration 315 // for one run, just to make sure that we use the same providers when we 316 // eventually run the test suite. 317 locks := depsfile.NewLocks() 318 evts := &providercache.InstallerEvents{ 319 QueryPackagesFailure: func(provider addrs.Provider, err error) { 320 if err != nil && addrs.IsDefaultProvider(provider) && provider.Type == "test" { 321 // This is some additional context for the failure error 322 // we'll generate afterwards. Not the most ideal UX but 323 // good enough for this prototype implementation, to help 324 // hint about the special builtin provider we use here. 325 diags = diags.Append(tfdiags.Sourceless( 326 tfdiags.Warning, 327 "Probably-unintended reference to \"hashicorp/test\" provider", 328 "For the purposes of this experimental implementation of module test suites, you must use the built-in test provider terraform.io/builtin/test, which requires an explicit required_providers declaration.", 329 )) 330 } 331 }, 332 } 333 ctx = evts.OnContext(ctx) 334 locks, err = providerInst.EnsureProviderVersions(ctx, locks, reqs, providercache.InstallUpgrades) 335 if err != nil { 336 diags = diags.Append(tfdiags.Sourceless( 337 tfdiags.Error, 338 "Failed to install required providers", 339 fmt.Sprintf("Couldn't install necessary providers for test configuration %s: %s.", configDir, err), 340 )) 341 return suiteDirs, diags 342 } 343 suiteDirs.ProviderLocks = locks 344 suiteDirs.ProviderCache = localCacheDir 345 346 return suiteDirs, diags 347 } 348 349 func (c *TestCommand) runTestSuite(ctx context.Context, suiteDirs testCommandSuiteDirs) (map[string]*moduletest.Component, *states.State) { 350 log.Printf("[TRACE] terraform test: Run test suite %q", suiteDirs.SuiteName) 351 352 ret := make(map[string]*moduletest.Component) 353 354 // To collect test results we'll use an instance of the special "test" 355 // provider, which records the intention to make a test assertion during 356 // planning and then hopefully updates that to an actual assertion result 357 // during apply, unless an apply error causes the graph walk to exit early. 358 // For this to work correctly, we must ensure we're using the same provider 359 // instance for both plan and apply. 360 testProvider := moduletest.NewProvider() 361 362 // synthError is a helper to return early with a synthetic failing 363 // component, for problems that prevent us from even discovering what an 364 // appropriate component and assertion name might be. 365 state := states.NewState() 366 synthError := func(name string, desc string, msg string, diags tfdiags.Diagnostics) (map[string]*moduletest.Component, *states.State) { 367 key := "(" + name + ")" // parens ensure this can't conflict with an actual component/assertion key 368 ret[key] = &moduletest.Component{ 369 Assertions: map[string]*moduletest.Assertion{ 370 key: { 371 Outcome: moduletest.Error, 372 Description: desc, 373 Message: msg, 374 Diagnostics: diags, 375 }, 376 }, 377 } 378 return ret, state 379 } 380 381 // NOTE: This function intentionally deviates from the usual pattern of 382 // gradually appending more diagnostics to the same diags, because 383 // here we're associating each set of diagnostics with the specific 384 // operation it belongs to. 385 386 providerFactories, diags := c.testSuiteProviders(suiteDirs, testProvider) 387 if diags.HasErrors() { 388 // It should be unusual to get in here, because testSuiteProviders 389 // should rely only on things guaranteed by prepareSuiteDir, but 390 // since we're doing external I/O here there is always the risk that 391 // the filesystem changes or fails between setting up and using the 392 // providers. 393 return synthError( 394 "init", 395 "terraform init", 396 "failed to resolve the required providers", 397 diags, 398 ) 399 } 400 401 plan, diags := c.testSuitePlan(ctx, suiteDirs, providerFactories) 402 if diags.HasErrors() { 403 // It should be unusual to get in here, because testSuitePlan 404 // should rely only on things guaranteed by prepareSuiteDir, but 405 // since we're doing external I/O here there is always the risk that 406 // the filesystem changes or fails between setting up and using the 407 // providers. 408 return synthError( 409 "plan", 410 "terraform plan", 411 "failed to create a plan", 412 diags, 413 ) 414 } 415 416 // Now we'll apply the plan. Once we try to apply, we might've created 417 // real remote objects, and so we must try to run destroy even if the 418 // apply returns errors, and we must return whatever state we end up 419 // with so the caller can generate additional loud errors if anything 420 // is left in it. 421 422 state, diags = c.testSuiteApply(ctx, plan, suiteDirs, providerFactories) 423 if diags.HasErrors() { 424 // We don't return here, unlike the others above, because we want to 425 // continue to the destroy below even if there are apply errors. 426 synthError( 427 "apply", 428 "terraform apply", 429 "failed to apply the created plan", 430 diags, 431 ) 432 } 433 434 // By the time we get here, the test provider will have gathered up all 435 // of the planned assertions and the final results for any assertions that 436 // were not blocked by an error. This also resets the provider so that 437 // the destroy operation below won't get tripped up on stale results. 438 ret = testProvider.Reset() 439 440 state, diags = c.testSuiteDestroy(ctx, state, suiteDirs, providerFactories) 441 if diags.HasErrors() { 442 synthError( 443 "destroy", 444 "terraform destroy", 445 "failed to destroy objects created during test (NOTE: leftover remote objects may still exist)", 446 diags, 447 ) 448 } 449 450 return ret, state 451 } 452 453 func (c *TestCommand) testSuiteProviders(suiteDirs testCommandSuiteDirs, testProvider *moduletest.Provider) (map[addrs.Provider]providers.Factory, tfdiags.Diagnostics) { 454 var diags tfdiags.Diagnostics 455 ret := make(map[addrs.Provider]providers.Factory) 456 457 // We can safely use the internal providers returned by Meta here because 458 // the built-in provider versions can never vary based on the configuration 459 // and thus we don't need to worry about potential version differences 460 // between main module and test suite modules. 461 for name, factory := range c.internalProviders() { 462 ret[addrs.NewBuiltInProvider(name)] = factory 463 } 464 465 // For the remaining non-builtin providers, we'll just take whatever we 466 // recorded earlier in the in-memory-only "lock file". All of these should 467 // typically still be available because we would've only just installed 468 // them, but this could fail if e.g. the filesystem has been somehow 469 // damaged in the meantime. 470 for provider, lock := range suiteDirs.ProviderLocks.AllProviders() { 471 version := lock.Version() 472 cached := suiteDirs.ProviderCache.ProviderVersion(provider, version) 473 if cached == nil { 474 diags = diags.Append(tfdiags.Sourceless( 475 tfdiags.Error, 476 "Required provider not found", 477 fmt.Sprintf("Although installation previously succeeded for %s v%s, it no longer seems to be present in the cache directory.", provider.ForDisplay(), version.String()), 478 )) 479 continue // potentially collect up multiple errors 480 } 481 482 // NOTE: We don't consider the checksums for test suite dependencies, 483 // because we're creating a fresh "lock file" each time we run anyway 484 // and so they wouldn't actually guarantee anything useful. 485 486 ret[provider] = providerFactory(cached) 487 } 488 489 // We'll replace the test provider instance with the one our caller 490 // provided, so it'll be able to interrogate the test results directly. 491 ret[addrs.NewBuiltInProvider("test")] = func() (providers.Interface, error) { 492 return testProvider, nil 493 } 494 495 return ret, diags 496 } 497 498 type testSuiteRunContext struct { 499 Core *terraform.Context 500 501 PlanMode plans.Mode 502 Config *configs.Config 503 InputState *states.State 504 Changes *plans.Changes 505 } 506 507 func (c *TestCommand) testSuiteContext(suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory, state *states.State, plan *plans.Plan, destroy bool) (*testSuiteRunContext, tfdiags.Diagnostics) { 508 var changes *plans.Changes 509 if plan != nil { 510 changes = plan.Changes 511 } 512 513 planMode := plans.NormalMode 514 if destroy { 515 planMode = plans.DestroyMode 516 } 517 518 tfCtx, diags := terraform.NewContext(&terraform.ContextOpts{ 519 Providers: providerFactories, 520 521 // We just use the provisioners from the main Meta here, because 522 // unlike providers provisioner plugins are not automatically 523 // installable anyway, and so we'll need to hunt for them in the same 524 // legacy way that normal Terraform operations do. 525 Provisioners: c.provisionerFactories(), 526 527 Meta: &terraform.ContextMeta{ 528 Env: "test_" + suiteDirs.SuiteName, 529 }, 530 }) 531 if diags.HasErrors() { 532 return nil, diags 533 } 534 return &testSuiteRunContext{ 535 Core: tfCtx, 536 537 PlanMode: planMode, 538 Config: suiteDirs.Config, 539 InputState: state, 540 Changes: changes, 541 }, diags 542 } 543 544 func (c *TestCommand) testSuitePlan(ctx context.Context, suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory) (*plans.Plan, tfdiags.Diagnostics) { 545 log.Printf("[TRACE] terraform test: create plan for suite %q", suiteDirs.SuiteName) 546 runCtx, diags := c.testSuiteContext(suiteDirs, providerFactories, nil, nil, false) 547 if diags.HasErrors() { 548 return nil, diags 549 } 550 551 // We'll also validate as part of planning, to ensure that the test 552 // configuration would pass "terraform validate". This is actually 553 // largely redundant with the runCtx.Core.Plan call below, but was 554 // included here originally because Plan did _originally_ assume that 555 // an earlier Validate had already passed, but now does its own 556 // validation work as (mostly) a superset of validate. 557 moreDiags := runCtx.Core.Validate(runCtx.Config) 558 diags = diags.Append(moreDiags) 559 if diags.HasErrors() { 560 return nil, diags 561 } 562 563 plan, moreDiags := runCtx.Core.Plan( 564 runCtx.Config, runCtx.InputState, &terraform.PlanOpts{Mode: runCtx.PlanMode}, 565 ) 566 diags = diags.Append(moreDiags) 567 return plan, diags 568 } 569 570 func (c *TestCommand) testSuiteApply(ctx context.Context, plan *plans.Plan, suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory) (*states.State, tfdiags.Diagnostics) { 571 log.Printf("[TRACE] terraform test: apply plan for suite %q", suiteDirs.SuiteName) 572 runCtx, diags := c.testSuiteContext(suiteDirs, providerFactories, nil, plan, false) 573 if diags.HasErrors() { 574 // To make things easier on the caller, we'll return a valid empty 575 // state even in this case. 576 return states.NewState(), diags 577 } 578 579 state, moreDiags := runCtx.Core.Apply(plan, runCtx.Config) 580 diags = diags.Append(moreDiags) 581 return state, diags 582 } 583 584 func (c *TestCommand) testSuiteDestroy(ctx context.Context, state *states.State, suiteDirs testCommandSuiteDirs, providerFactories map[addrs.Provider]providers.Factory) (*states.State, tfdiags.Diagnostics) { 585 log.Printf("[TRACE] terraform test: plan to destroy any existing objects for suite %q", suiteDirs.SuiteName) 586 runCtx, diags := c.testSuiteContext(suiteDirs, providerFactories, state, nil, true) 587 if diags.HasErrors() { 588 return state, diags 589 } 590 591 plan, moreDiags := runCtx.Core.Plan( 592 runCtx.Config, runCtx.InputState, &terraform.PlanOpts{Mode: runCtx.PlanMode}, 593 ) 594 diags = diags.Append(moreDiags) 595 if diags.HasErrors() { 596 return state, diags 597 } 598 599 log.Printf("[TRACE] terraform test: apply the plan to destroy any existing objects for suite %q", suiteDirs.SuiteName) 600 runCtx, moreDiags = c.testSuiteContext(suiteDirs, providerFactories, state, plan, true) 601 diags = diags.Append(moreDiags) 602 if diags.HasErrors() { 603 return state, diags 604 } 605 606 state, moreDiags = runCtx.Core.Apply(plan, runCtx.Config) 607 diags = diags.Append(moreDiags) 608 return state, diags 609 } 610 611 func (c *TestCommand) collectSuiteNames() ([]string, error) { 612 items, err := ioutil.ReadDir("tests") 613 if err != nil { 614 if os.IsNotExist(err) { 615 return nil, nil 616 } 617 return nil, err 618 } 619 620 ret := make([]string, 0, len(items)) 621 for _, item := range items { 622 if !item.IsDir() { 623 continue 624 } 625 name := item.Name() 626 suitePath := filepath.Join("tests", name) 627 tfFiles, err := filepath.Glob(filepath.Join(suitePath, "*.tf")) 628 if err != nil { 629 // We'll just ignore it and treat it like a dir with no .tf files 630 tfFiles = nil 631 } 632 tfJSONFiles, err := filepath.Glob(filepath.Join(suitePath, "*.tf.json")) 633 if err != nil { 634 // We'll just ignore it and treat it like a dir with no .tf.json files 635 tfJSONFiles = nil 636 } 637 if (len(tfFiles) + len(tfJSONFiles)) == 0 { 638 // Not a test suite, then. 639 continue 640 } 641 ret = append(ret, name) 642 } 643 644 return ret, nil 645 } 646 647 func (c *TestCommand) Help() string { 648 helpText := ` 649 Usage: terraform test [options] 650 651 This is an experimental command to help with automated integration 652 testing of shared modules. The usage and behavior of this command is 653 likely to change in breaking ways in subsequent releases, as we 654 are currently using this command primarily for research purposes. 655 656 In its current experimental form, "test" will look under the current 657 working directory for a subdirectory called "tests", and then within 658 that directory search for one or more subdirectories that contain 659 ".tf" or ".tf.json" files. For any that it finds, it will perform 660 Terraform operations similar to the following sequence of commands 661 in each of those directories: 662 terraform validate 663 terraform apply 664 terraform destroy 665 666 The test configurations should not declare any input variables and 667 should at least contain a call to the module being tested, which 668 will always be available at the path ../.. due to the expected 669 filesystem layout. 670 671 The tests are considered to be successful if all of the above steps 672 succeed. 673 674 Test configurations may optionally include uses of the special 675 built-in test provider terraform.io/builtin/test, which allows 676 writing explicit test assertions which must also all pass in order 677 for the test run to be considered successful. 678 679 This initial implementation is intended as a minimally-viable 680 product to use for further research and experimentation, and in 681 particular it currently lacks the following capabilities that we 682 expect to consider in later iterations, based on feedback: 683 - Testing of subsequent updates to existing infrastructure, 684 where currently it only supports initial creation and 685 then destruction. 686 - Testing top-level modules that are intended to be used for 687 "real" environments, which typically have hard-coded values 688 that don't permit creating a separate "copy" for testing. 689 - Some sort of support for unit test runs that don't interact 690 with remote systems at all, e.g. for use in checking pull 691 requests from untrusted contributors. 692 693 In the meantime, we'd like to hear feedback from module authors 694 who have tried writing some experimental tests for their modules 695 about what sorts of tests you were able to write, what sorts of 696 tests you weren't able to write, and any tests that you were 697 able to write but that were difficult to model in some way. 698 699 Options: 700 701 -compact-warnings Use a more compact representation for warnings, if 702 this command produces only warnings and no errors. 703 704 -junit-xml=FILE In addition to the usual output, also write test 705 results to the given file path in JUnit XML format. 706 This format is commonly supported by CI systems, and 707 they typically expect to be given a filename to search 708 for in the test workspace after the test run finishes. 709 710 -no-color Don't include virtual terminal formatting sequences in 711 the output. 712 ` 713 return strings.TrimSpace(helpText) 714 } 715 716 func (c *TestCommand) Synopsis() string { 717 return "Experimental support for module integration testing" 718 } 719 720 type testCommandSuiteDirs struct { 721 SuiteName string 722 723 ConfigDir string 724 ModulesDir string 725 ProvidersDir string 726 727 Config *configs.Config 728 ProviderCache *providercache.Dir 729 ProviderLocks *depsfile.Locks 730 }