github.com/opentofu/opentofu@v1.7.1/internal/tofu/node_resource_plan_instance.go (about)

     1  // Copyright (c) The OpenTofu Authors
     2  // SPDX-License-Identifier: MPL-2.0
     3  // Copyright (c) 2023 HashiCorp, Inc.
     4  // SPDX-License-Identifier: MPL-2.0
     5  
     6  package tofu
     7  
     8  import (
     9  	"fmt"
    10  	"log"
    11  	"path/filepath"
    12  	"sort"
    13  
    14  	"github.com/hashicorp/hcl/v2"
    15  	"github.com/hashicorp/hcl/v2/hclsyntax"
    16  	"github.com/zclconf/go-cty/cty"
    17  
    18  	"github.com/opentofu/opentofu/internal/addrs"
    19  	"github.com/opentofu/opentofu/internal/configs"
    20  	"github.com/opentofu/opentofu/internal/configs/configschema"
    21  	"github.com/opentofu/opentofu/internal/genconfig"
    22  	"github.com/opentofu/opentofu/internal/instances"
    23  	"github.com/opentofu/opentofu/internal/plans"
    24  	"github.com/opentofu/opentofu/internal/providers"
    25  	"github.com/opentofu/opentofu/internal/states"
    26  	"github.com/opentofu/opentofu/internal/tfdiags"
    27  )
    28  
    29  // NodePlannableResourceInstance represents a _single_ resource
    30  // instance that is plannable. This means this represents a single
    31  // count index, for example.
    32  type NodePlannableResourceInstance struct {
    33  	*NodeAbstractResourceInstance
    34  	ForceCreateBeforeDestroy bool
    35  
    36  	// skipRefresh indicates that we should skip refreshing individual instances
    37  	skipRefresh bool
    38  
    39  	// skipPlanChanges indicates we should skip trying to plan change actions
    40  	// for any instances.
    41  	skipPlanChanges bool
    42  
    43  	// forceReplace are resource instance addresses where the user wants to
    44  	// force generating a replace action. This set isn't pre-filtered, so
    45  	// it might contain addresses that have nothing to do with the resource
    46  	// that this node represents, which the node itself must therefore ignore.
    47  	forceReplace []addrs.AbsResourceInstance
    48  
    49  	// replaceTriggeredBy stores references from replace_triggered_by which
    50  	// triggered this instance to be replaced.
    51  	replaceTriggeredBy []*addrs.Reference
    52  
    53  	// importTarget, if populated, contains the information necessary to plan
    54  	// an import of this resource.
    55  	importTarget EvaluatedConfigImportTarget
    56  }
    57  
    58  // EvaluatedConfigImportTarget is a target that we need to import. It's created when an import target originated from
    59  // an import block, after everything regarding the configuration has been evaluated.
    60  // At this point, the import target is of a single resource instance
    61  type EvaluatedConfigImportTarget struct {
    62  	// Config is the original import block for this import. This might be null
    63  	// if the import did not originate in config.
    64  	Config *configs.Import
    65  
    66  	// Addr is the actual address of the resource instance that we should import into. At this point, the address
    67  	// should be fully evaluated
    68  	Addr addrs.AbsResourceInstance
    69  
    70  	// ID is the string ID of the resource to import. This is resource-instance specific.
    71  	ID string
    72  }
    73  
    74  var (
    75  	_ GraphNodeModuleInstance       = (*NodePlannableResourceInstance)(nil)
    76  	_ GraphNodeReferenceable        = (*NodePlannableResourceInstance)(nil)
    77  	_ GraphNodeReferencer           = (*NodePlannableResourceInstance)(nil)
    78  	_ GraphNodeConfigResource       = (*NodePlannableResourceInstance)(nil)
    79  	_ GraphNodeResourceInstance     = (*NodePlannableResourceInstance)(nil)
    80  	_ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil)
    81  	_ GraphNodeAttachResourceState  = (*NodePlannableResourceInstance)(nil)
    82  	_ GraphNodeExecutable           = (*NodePlannableResourceInstance)(nil)
    83  )
    84  
    85  // GraphNodeEvalable
    86  func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
    87  	addr := n.ResourceInstanceAddr()
    88  
    89  	// Eval info is different depending on what kind of resource this is
    90  	switch addr.Resource.Resource.Mode {
    91  	case addrs.ManagedResourceMode:
    92  		return n.managedResourceExecute(ctx)
    93  	case addrs.DataResourceMode:
    94  		return n.dataResourceExecute(ctx)
    95  	default:
    96  		panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
    97  	}
    98  }
    99  
   100  func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
   101  	config := n.Config
   102  	addr := n.ResourceInstanceAddr()
   103  
   104  	var change *plans.ResourceInstanceChange
   105  
   106  	_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
   107  	diags = diags.Append(err)
   108  	if diags.HasErrors() {
   109  		return diags
   110  	}
   111  
   112  	diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
   113  	if diags.HasErrors() {
   114  		return diags
   115  	}
   116  
   117  	checkRuleSeverity := tfdiags.Error
   118  	if n.skipPlanChanges || n.preDestroyRefresh {
   119  		checkRuleSeverity = tfdiags.Warning
   120  	}
   121  
   122  	change, state, repeatData, planDiags := n.planDataSource(ctx, checkRuleSeverity, n.skipPlanChanges)
   123  	diags = diags.Append(planDiags)
   124  	if diags.HasErrors() {
   125  		return diags
   126  	}
   127  
   128  	// write the data source into both the refresh state and the
   129  	// working state
   130  	diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState))
   131  	if diags.HasErrors() {
   132  		return diags
   133  	}
   134  	diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState))
   135  	if diags.HasErrors() {
   136  		return diags
   137  	}
   138  
   139  	diags = diags.Append(n.writeChange(ctx, change, ""))
   140  
   141  	// Post-conditions might block further progress. We intentionally do this
   142  	// _after_ writing the state/diff because we want to check against
   143  	// the result of the operation, and to fail on future operations
   144  	// until the user makes the condition succeed.
   145  	checkDiags := evalCheckRules(
   146  		addrs.ResourcePostcondition,
   147  		n.Config.Postconditions,
   148  		ctx, addr, repeatData,
   149  		checkRuleSeverity,
   150  	)
   151  	diags = diags.Append(checkDiags)
   152  
   153  	return diags
   154  }
   155  
   156  func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
   157  	config := n.Config
   158  	addr := n.ResourceInstanceAddr()
   159  
   160  	var instanceRefreshState *states.ResourceInstanceObject
   161  
   162  	checkRuleSeverity := tfdiags.Error
   163  	if n.skipPlanChanges || n.preDestroyRefresh {
   164  		checkRuleSeverity = tfdiags.Warning
   165  	}
   166  
   167  	provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
   168  	diags = diags.Append(err)
   169  	if diags.HasErrors() {
   170  		return diags
   171  	}
   172  
   173  	if config != nil {
   174  		diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
   175  		if diags.HasErrors() {
   176  			return diags
   177  		}
   178  	}
   179  
   180  	importing := n.shouldImport(ctx)
   181  
   182  	if importing && n.Config == nil && len(n.generateConfigPath) == 0 {
   183  		// Then the user wrote an import target to a target that didn't exist.
   184  		if n.Addr.Module.IsRoot() {
   185  			diags = diags.Append(&hcl.Diagnostic{
   186  				Severity: hcl.DiagError,
   187  				Summary:  "Import block target does not exist",
   188  				Detail:   "The target for the given import block does not exist. If you wish to automatically generate config for this resource, use the -generate-config-out option within tofu plan. Otherwise, make sure the target resource exists within your configuration. For example:\n\n  tofu plan -generate-config-out=generated.tf",
   189  				Subject:  n.importTarget.Config.DeclRange.Ptr(),
   190  			})
   191  		} else {
   192  			// You can't generate config for a resource that is inside a
   193  			// module, so we will present a different error message for
   194  			// this case.
   195  			diags = diags.Append(importResourceWithoutConfigDiags(n.Addr.String(), n.importTarget.Config))
   196  		}
   197  		return diags
   198  	}
   199  
   200  	// If the resource is to be imported, we now ask the provider for an Import
   201  	// and a Refresh, and save the resulting state to instanceRefreshState.
   202  	if importing {
   203  		instanceRefreshState, diags = n.importState(ctx, addr, n.importTarget.ID, provider, providerSchema)
   204  	} else {
   205  		var readDiags tfdiags.Diagnostics
   206  		instanceRefreshState, readDiags = n.readResourceInstanceState(ctx, addr)
   207  		diags = diags.Append(readDiags)
   208  		if diags.HasErrors() {
   209  			return diags
   210  		}
   211  	}
   212  
   213  	// We'll save a snapshot of what we just read from the state into the
   214  	// prevRunState before we do anything else, since this will capture the
   215  	// result of any schema upgrading that readResourceInstanceState just did,
   216  	// but not include any out-of-band changes we might detect in in the
   217  	// refresh step below.
   218  	diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
   219  	if diags.HasErrors() {
   220  		return diags
   221  	}
   222  	// Also the refreshState, because that should still reflect schema upgrades
   223  	// even if it doesn't reflect upstream changes.
   224  	diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
   225  	if diags.HasErrors() {
   226  		return diags
   227  	}
   228  
   229  	// In 0.13 we could be refreshing a resource with no config.
   230  	// We should be operating on managed resource, but check here to be certain
   231  	if n.Config == nil || n.Config.Managed == nil {
   232  		log.Printf("[WARN] managedResourceExecute: no Managed config value found in instance state for %q", n.Addr)
   233  	} else {
   234  		if instanceRefreshState != nil {
   235  			instanceRefreshState.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy
   236  		}
   237  	}
   238  
   239  	// Refresh, maybe
   240  	// The import process handles its own refresh
   241  	if !n.skipRefresh && !importing {
   242  		s, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState)
   243  		diags = diags.Append(refreshDiags)
   244  		if diags.HasErrors() {
   245  			return diags
   246  		}
   247  
   248  		instanceRefreshState = s
   249  
   250  		if instanceRefreshState != nil {
   251  			// When refreshing we start by merging the stored dependencies and
   252  			// the configured dependencies. The configured dependencies will be
   253  			// stored to state once the changes are applied. If the plan
   254  			// results in no changes, we will re-write these dependencies
   255  			// below.
   256  			instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies)
   257  		}
   258  
   259  		diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
   260  		if diags.HasErrors() {
   261  			return diags
   262  		}
   263  	}
   264  
   265  	// Plan the instance, unless we're in the refresh-only mode
   266  	if !n.skipPlanChanges {
   267  
   268  		// add this instance to n.forceReplace if replacement is triggered by
   269  		// another change
   270  		repData := instances.RepetitionData{}
   271  		switch k := addr.Resource.Key.(type) {
   272  		case addrs.IntKey:
   273  			repData.CountIndex = k.Value()
   274  		case addrs.StringKey:
   275  			repData.EachKey = k.Value()
   276  			repData.EachValue = cty.DynamicVal
   277  		}
   278  
   279  		diags = diags.Append(n.replaceTriggered(ctx, repData))
   280  		if diags.HasErrors() {
   281  			return diags
   282  		}
   283  
   284  		change, instancePlanState, repeatData, planDiags := n.plan(
   285  			ctx, nil, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace,
   286  		)
   287  		diags = diags.Append(planDiags)
   288  		if diags.HasErrors() {
   289  			// If we are importing and generating a configuration, we need to
   290  			// ensure the change is written out so the configuration can be
   291  			// captured.
   292  			if len(n.generateConfigPath) > 0 {
   293  				// Update our return plan
   294  				change := &plans.ResourceInstanceChange{
   295  					Addr:         n.Addr,
   296  					PrevRunAddr:  n.prevRunAddr(ctx),
   297  					ProviderAddr: n.ResolvedProvider,
   298  					Change: plans.Change{
   299  						// we only need a placeholder, so this will be a NoOp
   300  						Action:          plans.NoOp,
   301  						Before:          instanceRefreshState.Value,
   302  						After:           instanceRefreshState.Value,
   303  						GeneratedConfig: n.generatedConfigHCL,
   304  					},
   305  				}
   306  				diags = diags.Append(n.writeChange(ctx, change, ""))
   307  			}
   308  
   309  			return diags
   310  		}
   311  
   312  		if importing {
   313  			change.Importing = &plans.Importing{ID: n.importTarget.ID}
   314  		}
   315  
   316  		// FIXME: here we udpate the change to reflect the reason for
   317  		// replacement, but we still overload forceReplace to get the correct
   318  		// change planned.
   319  		if len(n.replaceTriggeredBy) > 0 {
   320  			change.ActionReason = plans.ResourceInstanceReplaceByTriggers
   321  		}
   322  
   323  		// FIXME: it is currently important that we write resource changes to
   324  		// the plan (n.writeChange) before we write the corresponding state
   325  		// (n.writeResourceInstanceState).
   326  		//
   327  		// This is because the planned resource state will normally have the
   328  		// status of states.ObjectPlanned, which causes later logic to refer to
   329  		// the contents of the plan to retrieve the resource data. Because
   330  		// there is no shared lock between these two data structures, reversing
   331  		// the order of these writes will cause a brief window of inconsistency
   332  		// which can lead to a failed safety check.
   333  		//
   334  		// Future work should adjust these APIs such that it is impossible to
   335  		// update these two data structures incorrectly through any objects
   336  		// reachable via the tofu.EvalContext API.
   337  		diags = diags.Append(n.writeChange(ctx, change, ""))
   338  		if diags.HasErrors() {
   339  			return diags
   340  		}
   341  		diags = diags.Append(n.checkPreventDestroy(change))
   342  		if diags.HasErrors() {
   343  			return diags
   344  		}
   345  
   346  		diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState))
   347  		if diags.HasErrors() {
   348  			return diags
   349  		}
   350  
   351  		// If this plan resulted in a NoOp, then apply won't have a chance to make
   352  		// any changes to the stored dependencies. Since this is a NoOp we know
   353  		// that the stored dependencies will have no effect during apply, and we can
   354  		// write them out now.
   355  		if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) {
   356  			// the refresh state will be the final state for this resource, so
   357  			// finalize the dependencies here if they need to be updated.
   358  			instanceRefreshState.Dependencies = n.Dependencies
   359  			diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
   360  			if diags.HasErrors() {
   361  				return diags
   362  			}
   363  		}
   364  
   365  		// Post-conditions might block completion. We intentionally do this
   366  		// _after_ writing the state/diff because we want to check against
   367  		// the result of the operation, and to fail on future operations
   368  		// until the user makes the condition succeed.
   369  		// (Note that some preconditions will end up being skipped during
   370  		// planning, because their conditions depend on values not yet known.)
   371  		checkDiags := evalCheckRules(
   372  			addrs.ResourcePostcondition,
   373  			n.Config.Postconditions,
   374  			ctx, n.ResourceInstanceAddr(), repeatData,
   375  			checkRuleSeverity,
   376  		)
   377  		diags = diags.Append(checkDiags)
   378  	} else {
   379  		// In refresh-only mode we need to evaluate the for-each expression in
   380  		// order to supply the value to the pre- and post-condition check
   381  		// blocks. This has the unfortunate edge case of a refresh-only plan
   382  		// executing with a for-each map which has the same keys but different
   383  		// values, which could result in a post-condition check relying on that
   384  		// value being inaccurate. Unless we decide to store the value of the
   385  		// for-each expression in state, this is unavoidable.
   386  		forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx)
   387  		repeatData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach)
   388  
   389  		checkDiags := evalCheckRules(
   390  			addrs.ResourcePrecondition,
   391  			n.Config.Preconditions,
   392  			ctx, addr, repeatData,
   393  			checkRuleSeverity,
   394  		)
   395  		diags = diags.Append(checkDiags)
   396  
   397  		// Even if we don't plan changes, we do still need to at least update
   398  		// the working state to reflect the refresh result. If not, then e.g.
   399  		// any output values refering to this will not react to the drift.
   400  		// (Even if we didn't actually refresh above, this will still save
   401  		// the result of any schema upgrading we did in readResourceInstanceState.)
   402  		diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, workingState))
   403  		if diags.HasErrors() {
   404  			return diags
   405  		}
   406  
   407  		// Here we also evaluate post-conditions after updating the working
   408  		// state, because we want to check against the result of the refresh.
   409  		// Unlike in normal planning mode, these checks are still evaluated
   410  		// even if pre-conditions generated diagnostics, because we have no
   411  		// planned changes to block.
   412  		checkDiags = evalCheckRules(
   413  			addrs.ResourcePostcondition,
   414  			n.Config.Postconditions,
   415  			ctx, addr, repeatData,
   416  			checkRuleSeverity,
   417  		)
   418  		diags = diags.Append(checkDiags)
   419  	}
   420  
   421  	return diags
   422  }
   423  
   424  // replaceTriggered checks if this instance needs to be replace due to a change
   425  // in a replace_triggered_by reference. If replacement is required, the
   426  // instance address is added to forceReplace
   427  func (n *NodePlannableResourceInstance) replaceTriggered(ctx EvalContext, repData instances.RepetitionData) tfdiags.Diagnostics {
   428  	var diags tfdiags.Diagnostics
   429  	if n.Config == nil {
   430  		return diags
   431  	}
   432  
   433  	for _, expr := range n.Config.TriggersReplacement {
   434  		ref, replace, evalDiags := ctx.EvaluateReplaceTriggeredBy(expr, repData)
   435  		diags = diags.Append(evalDiags)
   436  		if diags.HasErrors() {
   437  			continue
   438  		}
   439  
   440  		if replace {
   441  			// FIXME: forceReplace accomplishes the same goal, however we may
   442  			// want to communicate more information about which resource
   443  			// triggered the replacement in the plan.
   444  			// Rather than further complicating the plan method with more
   445  			// options, we can refactor both of these features later.
   446  			n.forceReplace = append(n.forceReplace, n.Addr)
   447  			log.Printf("[DEBUG] ReplaceTriggeredBy forcing replacement of %s due to change in %s", n.Addr, ref.DisplayString())
   448  
   449  			n.replaceTriggeredBy = append(n.replaceTriggeredBy, ref)
   450  			break
   451  		}
   452  	}
   453  
   454  	return diags
   455  }
   456  
   457  func (n *NodePlannableResourceInstance) importState(ctx EvalContext, addr addrs.AbsResourceInstance, importId string, provider providers.Interface, providerSchema providers.ProviderSchema) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
   458  	var diags tfdiags.Diagnostics
   459  	absAddr := addr.Resource.Absolute(ctx.Path())
   460  
   461  	diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
   462  		return h.PrePlanImport(absAddr, importId)
   463  	}))
   464  	if diags.HasErrors() {
   465  		return nil, diags
   466  	}
   467  
   468  	resp := provider.ImportResourceState(providers.ImportResourceStateRequest{
   469  		TypeName: addr.Resource.Resource.Type,
   470  		ID:       importId,
   471  	})
   472  	diags = diags.Append(resp.Diagnostics)
   473  	if diags.HasErrors() {
   474  		return nil, diags
   475  	}
   476  
   477  	imported := resp.ImportedResources
   478  
   479  	if len(imported) == 0 {
   480  		diags = diags.Append(tfdiags.Sourceless(
   481  			tfdiags.Error,
   482  			"Import returned no resources",
   483  			fmt.Sprintf("While attempting to import with ID %s, the provider"+
   484  				"returned no instance states.",
   485  				importId,
   486  			),
   487  		))
   488  		return nil, diags
   489  	}
   490  	for _, obj := range imported {
   491  		log.Printf("[TRACE] graphNodeImportState: import %s %q produced instance object of type %s", absAddr.String(), importId, obj.TypeName)
   492  	}
   493  	if len(imported) > 1 {
   494  		diags = diags.Append(tfdiags.Sourceless(
   495  			tfdiags.Error,
   496  			"Multiple import states not supported",
   497  			fmt.Sprintf("While attempting to import with ID %s, the provider "+
   498  				"returned multiple resource instance states. This "+
   499  				"is not currently supported.",
   500  				importId,
   501  			),
   502  		))
   503  		return nil, diags
   504  	}
   505  
   506  	// call post-import hook
   507  	diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
   508  		return h.PostPlanImport(absAddr, imported)
   509  	}))
   510  
   511  	if imported[0].TypeName == "" {
   512  		diags = diags.Append(fmt.Errorf("import of %s didn't set type", n.Addr.String()))
   513  		return nil, diags
   514  	}
   515  
   516  	importedState := imported[0].AsInstanceObject()
   517  
   518  	if importedState.Value.IsNull() {
   519  		diags = diags.Append(tfdiags.Sourceless(
   520  			tfdiags.Error,
   521  			"Import returned null resource",
   522  			fmt.Sprintf("While attempting to import with ID %s, the provider"+
   523  				"returned an instance with no state.",
   524  				n.importTarget.ID,
   525  			),
   526  		))
   527  	}
   528  
   529  	// refresh
   530  	riNode := &NodeAbstractResourceInstance{
   531  		Addr: n.Addr,
   532  		NodeAbstractResource: NodeAbstractResource{
   533  			ResolvedProvider: n.ResolvedProvider,
   534  		},
   535  	}
   536  	instanceRefreshState, refreshDiags := riNode.refresh(ctx, states.NotDeposed, importedState)
   537  	diags = diags.Append(refreshDiags)
   538  	if diags.HasErrors() {
   539  		return instanceRefreshState, diags
   540  	}
   541  
   542  	// verify the existence of the imported resource
   543  	if instanceRefreshState.Value.IsNull() {
   544  		var diags tfdiags.Diagnostics
   545  		diags = diags.Append(tfdiags.Sourceless(
   546  			tfdiags.Error,
   547  			"Cannot import non-existent remote object",
   548  			fmt.Sprintf(
   549  				"While attempting to import an existing object to %q, "+
   550  					"the provider detected that no object exists with the given id. "+
   551  					"Only pre-existing objects can be imported; check that the id "+
   552  					"is correct and that it is associated with the provider's "+
   553  					"configured region or endpoint, or use \"tofu apply\" to "+
   554  					"create a new remote object for this resource.",
   555  				n.Addr,
   556  			),
   557  		))
   558  		return instanceRefreshState, diags
   559  	}
   560  
   561  	// Insert marks from configuration
   562  	if n.Config != nil {
   563  		keyData := EvalDataForNoInstanceKey
   564  
   565  		switch {
   566  		case n.Config.Count != nil:
   567  			keyData = InstanceKeyEvalData{
   568  				CountIndex: cty.UnknownVal(cty.Number),
   569  			}
   570  		case n.Config.ForEach != nil:
   571  			keyData = InstanceKeyEvalData{
   572  				EachKey:   cty.UnknownVal(cty.String),
   573  				EachValue: cty.UnknownVal(cty.DynamicPseudoType),
   574  			}
   575  		}
   576  
   577  		valueWithConfigurationSchemaMarks, _, configDiags := ctx.EvaluateBlock(n.Config.Config, n.Schema, nil, keyData)
   578  		diags = diags.Append(configDiags)
   579  		if configDiags.HasErrors() {
   580  			return instanceRefreshState, diags
   581  		}
   582  		instanceRefreshState.Value = copyMarksFromValue(instanceRefreshState.Value, valueWithConfigurationSchemaMarks)
   583  	}
   584  
   585  	// If we're importing and generating config, generate it now.
   586  	if len(n.generateConfigPath) > 0 {
   587  		if n.Config != nil {
   588  			return instanceRefreshState, diags.Append(fmt.Errorf("tried to generate config for %s, but it already exists", n.Addr))
   589  		}
   590  
   591  		schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.Resource.Resource)
   592  		if schema == nil {
   593  			// Should be caught during validation, so we don't bother with a pretty error here
   594  			diags = diags.Append(fmt.Errorf("provider does not support resource type for %q", n.Addr))
   595  			return instanceRefreshState, diags
   596  		}
   597  
   598  		// Generate the HCL string first, then parse the HCL body from it.
   599  		// First we generate the contents of the resource block for use within
   600  		// the planning node. Then we wrap it in an enclosing resource block to
   601  		// pass into the plan for rendering.
   602  		generatedHCLAttributes, generatedDiags := n.generateHCLStringAttributes(n.Addr, instanceRefreshState, schema)
   603  		diags = diags.Append(generatedDiags)
   604  
   605  		n.generatedConfigHCL = genconfig.WrapResourceContents(n.Addr, generatedHCLAttributes)
   606  
   607  		// parse the "file" as HCL to get the hcl.Body
   608  		synthHCLFile, hclDiags := hclsyntax.ParseConfig([]byte(generatedHCLAttributes), filepath.Base(n.generateConfigPath), hcl.Pos{Byte: 0, Line: 1, Column: 1})
   609  		diags = diags.Append(hclDiags)
   610  		if hclDiags.HasErrors() {
   611  			return instanceRefreshState, diags
   612  		}
   613  
   614  		// We have to do a kind of mini parsing of the content here to correctly
   615  		// mark attributes like 'provider' as hidden. We only care about the
   616  		// resulting content, so it's remain that gets passed into the resource
   617  		// as the config.
   618  		_, remain, resourceDiags := synthHCLFile.Body.PartialContent(configs.ResourceBlockSchema)
   619  		diags = diags.Append(resourceDiags)
   620  		if resourceDiags.HasErrors() {
   621  			return instanceRefreshState, diags
   622  		}
   623  
   624  		n.Config = &configs.Resource{
   625  			Mode:     addrs.ManagedResourceMode,
   626  			Type:     n.Addr.Resource.Resource.Type,
   627  			Name:     n.Addr.Resource.Resource.Name,
   628  			Config:   remain,
   629  			Managed:  &configs.ManagedResource{},
   630  			Provider: n.ResolvedProvider.Provider,
   631  		}
   632  	}
   633  
   634  	diags = diags.Append(riNode.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
   635  	return instanceRefreshState, diags
   636  }
   637  
   638  func (n *NodePlannableResourceInstance) shouldImport(ctx EvalContext) bool {
   639  	if n.importTarget.ID == "" {
   640  		return false
   641  	}
   642  
   643  	// If the import target already has a state - we should not attempt to import it, but instead run a normal plan
   644  	// for it
   645  	state := ctx.State()
   646  	return state.ResourceInstance(n.ResourceInstanceAddr()) == nil
   647  }
   648  
   649  // generateHCLStringAttributes produces a string in HCL format for the given
   650  // resource state and schema without the surrounding block.
   651  func (n *NodePlannableResourceInstance) generateHCLStringAttributes(addr addrs.AbsResourceInstance, state *states.ResourceInstanceObject, schema *configschema.Block) (string, tfdiags.Diagnostics) {
   652  	filteredSchema := schema.Filter(
   653  		configschema.FilterOr(
   654  			configschema.FilterReadOnlyAttribute,
   655  			configschema.FilterDeprecatedAttribute,
   656  
   657  			// The legacy SDK adds an Optional+Computed "id" attribute to the
   658  			// resource schema even if not defined in provider code.
   659  			// During validation, however, the presence of an extraneous "id"
   660  			// attribute in config will cause an error.
   661  			// Remove this attribute so we do not generate an "id" attribute
   662  			// where there is a risk that it is not in the real resource schema.
   663  			//
   664  			// TRADEOFF: Resources in which there actually is an
   665  			// Optional+Computed "id" attribute in the schema will have that
   666  			// attribute missing from generated config.
   667  			configschema.FilterHelperSchemaIdAttribute,
   668  		),
   669  		configschema.FilterDeprecatedBlock,
   670  	)
   671  
   672  	providerAddr := addrs.LocalProviderConfig{
   673  		LocalName: n.ResolvedProvider.Provider.Type,
   674  		Alias:     n.ResolvedProvider.Alias,
   675  	}
   676  
   677  	return genconfig.GenerateResourceContents(addr, filteredSchema, providerAddr, state.Value)
   678  }
   679  
   680  // mergeDeps returns the union of 2 sets of dependencies
   681  func mergeDeps(a, b []addrs.ConfigResource) []addrs.ConfigResource {
   682  	switch {
   683  	case len(a) == 0:
   684  		return b
   685  	case len(b) == 0:
   686  		return a
   687  	}
   688  
   689  	set := make(map[string]addrs.ConfigResource)
   690  
   691  	for _, dep := range a {
   692  		set[dep.String()] = dep
   693  	}
   694  
   695  	for _, dep := range b {
   696  		set[dep.String()] = dep
   697  	}
   698  
   699  	newDeps := make([]addrs.ConfigResource, 0, len(set))
   700  	for _, dep := range set {
   701  		newDeps = append(newDeps, dep)
   702  	}
   703  
   704  	return newDeps
   705  }
   706  
   707  func depsEqual(a, b []addrs.ConfigResource) bool {
   708  	if len(a) != len(b) {
   709  		return false
   710  	}
   711  
   712  	// Because we need to sort the deps to compare equality, make shallow
   713  	// copies to prevent concurrently modifying the array values on
   714  	// dependencies shared between expanded instances.
   715  	copyA, copyB := make([]addrs.ConfigResource, len(a)), make([]addrs.ConfigResource, len(b))
   716  	copy(copyA, a)
   717  	copy(copyB, b)
   718  	a, b = copyA, copyB
   719  
   720  	less := func(s []addrs.ConfigResource) func(i, j int) bool {
   721  		return func(i, j int) bool {
   722  			return s[i].String() < s[j].String()
   723  		}
   724  	}
   725  
   726  	sort.Slice(a, less(a))
   727  	sort.Slice(b, less(b))
   728  
   729  	for i := range a {
   730  		if !a[i].Equal(b[i]) {
   731  			return false
   732  		}
   733  	}
   734  	return true
   735  }