github.com/muratcelep/terraform@v1.1.0-beta2-not-internal-4/not-internal/terraform/context_plan.go (about)

     1  package terraform
     2  
     3  import (
     4  	"bytes"
     5  	"fmt"
     6  	"log"
     7  	"sort"
     8  	"strings"
     9  
    10  	"github.com/zclconf/go-cty/cty"
    11  
    12  	"github.com/muratcelep/terraform/not-internal/addrs"
    13  	"github.com/muratcelep/terraform/not-internal/configs"
    14  	"github.com/muratcelep/terraform/not-internal/instances"
    15  	"github.com/muratcelep/terraform/not-internal/plans"
    16  	"github.com/muratcelep/terraform/not-internal/refactoring"
    17  	"github.com/muratcelep/terraform/not-internal/states"
    18  	"github.com/muratcelep/terraform/not-internal/tfdiags"
    19  )
    20  
    21  // PlanOpts are the various options that affect the details of how Terraform
    22  // will build a plan.
    23  type PlanOpts struct {
    24  	Mode         plans.Mode
    25  	SkipRefresh  bool
    26  	SetVariables InputValues
    27  	Targets      []addrs.Targetable
    28  	ForceReplace []addrs.AbsResourceInstance
    29  }
    30  
    31  // Plan generates an execution plan for the given context, and returns the
    32  // refreshed state.
    33  //
    34  // The execution plan encapsulates the context and can be stored
    35  // in order to reinstantiate a context later for Apply.
    36  //
    37  // Plan also updates the diff of this context to be the diff generated
    38  // by the plan, so Apply can be called after.
    39  func (c *Context) Plan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
    40  	defer c.acquireRun("plan")()
    41  	var diags tfdiags.Diagnostics
    42  
    43  	// Save the downstream functions from needing to deal with these broken situations.
    44  	// No real callers should rely on these, but we have a bunch of old and
    45  	// sloppy tests that don't always populate arguments properly.
    46  	if config == nil {
    47  		config = configs.NewEmptyConfig()
    48  	}
    49  	if prevRunState == nil {
    50  		prevRunState = states.NewState()
    51  	}
    52  	if opts == nil {
    53  		opts = &PlanOpts{
    54  			Mode: plans.NormalMode,
    55  		}
    56  	}
    57  
    58  	moreDiags := c.checkConfigDependencies(config)
    59  	diags = diags.Append(moreDiags)
    60  	// If required dependencies are not available then we'll bail early since
    61  	// otherwise we're likely to just see a bunch of other errors related to
    62  	// incompatibilities, which could be overwhelming for the user.
    63  	if diags.HasErrors() {
    64  		return nil, diags
    65  	}
    66  
    67  	switch opts.Mode {
    68  	case plans.NormalMode, plans.DestroyMode:
    69  		// OK
    70  	case plans.RefreshOnlyMode:
    71  		if opts.SkipRefresh {
    72  			// The CLI layer (and other similar callers) should prevent this
    73  			// combination of options.
    74  			diags = diags.Append(tfdiags.Sourceless(
    75  				tfdiags.Error,
    76  				"Incompatible plan options",
    77  				"Cannot skip refreshing in refresh-only mode. This is a bug in Terraform.",
    78  			))
    79  			return nil, diags
    80  		}
    81  	default:
    82  		// The CLI layer (and other similar callers) should not try to
    83  		// create a context for a mode that Terraform Core doesn't support.
    84  		diags = diags.Append(tfdiags.Sourceless(
    85  			tfdiags.Error,
    86  			"Unsupported plan mode",
    87  			fmt.Sprintf("Terraform Core doesn't know how to handle plan mode %s. This is a bug in Terraform.", opts.Mode),
    88  		))
    89  		return nil, diags
    90  	}
    91  	if len(opts.ForceReplace) > 0 && opts.Mode != plans.NormalMode {
    92  		// The other modes don't generate no-op or update actions that we might
    93  		// upgrade to be "replace", so doesn't make sense to combine those.
    94  		diags = diags.Append(tfdiags.Sourceless(
    95  			tfdiags.Error,
    96  			"Unsupported plan mode",
    97  			"Forcing resource instance replacement (with -replace=...) is allowed only in normal planning mode.",
    98  		))
    99  		return nil, diags
   100  	}
   101  
   102  	variables := mergeDefaultInputVariableValues(opts.SetVariables, config.Module.Variables)
   103  
   104  	// By the time we get here, we should have values defined for all of
   105  	// the root module variables, even if some of them are "unknown". It's the
   106  	// caller's responsibility to have already handled the decoding of these
   107  	// from the various ways the CLI allows them to be set and to produce
   108  	// user-friendly error messages if they are not all present, and so
   109  	// the error message from checkInputVariables should never be seen and
   110  	// includes language asking the user to report a bug.
   111  	varDiags := checkInputVariables(config.Module.Variables, variables)
   112  	diags = diags.Append(varDiags)
   113  
   114  	if len(opts.Targets) > 0 {
   115  		diags = diags.Append(tfdiags.Sourceless(
   116  			tfdiags.Warning,
   117  			"Resource targeting is in effect",
   118  			`You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration.
   119  
   120  The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`,
   121  		))
   122  	}
   123  
   124  	var plan *plans.Plan
   125  	var planDiags tfdiags.Diagnostics
   126  	switch opts.Mode {
   127  	case plans.NormalMode:
   128  		plan, planDiags = c.plan(config, prevRunState, variables, opts)
   129  	case plans.DestroyMode:
   130  		plan, planDiags = c.destroyPlan(config, prevRunState, variables, opts)
   131  	case plans.RefreshOnlyMode:
   132  		plan, planDiags = c.refreshOnlyPlan(config, prevRunState, variables, opts)
   133  	default:
   134  		panic(fmt.Sprintf("unsupported plan mode %s", opts.Mode))
   135  	}
   136  	diags = diags.Append(planDiags)
   137  	if diags.HasErrors() {
   138  		return nil, diags
   139  	}
   140  
   141  	// convert the variables into the format expected for the plan
   142  	varVals := make(map[string]plans.DynamicValue, len(variables))
   143  	for k, iv := range variables {
   144  		// We use cty.DynamicPseudoType here so that we'll save both the
   145  		// value _and_ its dynamic type in the plan, so we can recover
   146  		// exactly the same value later.
   147  		dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType)
   148  		if err != nil {
   149  			diags = diags.Append(tfdiags.Sourceless(
   150  				tfdiags.Error,
   151  				"Failed to prepare variable value for plan",
   152  				fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err),
   153  			))
   154  			continue
   155  		}
   156  		varVals[k] = dv
   157  	}
   158  
   159  	// insert the run-specific data from the context into the plan; variables,
   160  	// targets and provider SHAs.
   161  	if plan != nil {
   162  		plan.VariableValues = varVals
   163  		plan.TargetAddrs = opts.Targets
   164  	} else if !diags.HasErrors() {
   165  		panic("nil plan but no errors")
   166  	}
   167  
   168  	return plan, diags
   169  }
   170  
   171  var DefaultPlanOpts = &PlanOpts{
   172  	Mode: plans.NormalMode,
   173  }
   174  
   175  func (c *Context) plan(config *configs.Config, prevRunState *states.State, rootVariables InputValues, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
   176  	var diags tfdiags.Diagnostics
   177  
   178  	if opts.Mode != plans.NormalMode {
   179  		panic(fmt.Sprintf("called Context.plan with %s", opts.Mode))
   180  	}
   181  
   182  	plan, walkDiags := c.planWalk(config, prevRunState, rootVariables, opts)
   183  	diags = diags.Append(walkDiags)
   184  	if diags.HasErrors() {
   185  		return nil, diags
   186  	}
   187  
   188  	// The refreshed state ends up with some placeholder objects in it for
   189  	// objects pending creation. We only really care about those being in
   190  	// the working state, since that's what we're going to use when applying,
   191  	// so we'll prune them all here.
   192  	plan.PriorState.SyncWrapper().RemovePlannedResourceInstanceObjects()
   193  
   194  	return plan, diags
   195  }
   196  
   197  func (c *Context) refreshOnlyPlan(config *configs.Config, prevRunState *states.State, rootVariables InputValues, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
   198  	var diags tfdiags.Diagnostics
   199  
   200  	if opts.Mode != plans.RefreshOnlyMode {
   201  		panic(fmt.Sprintf("called Context.refreshOnlyPlan with %s", opts.Mode))
   202  	}
   203  
   204  	plan, walkDiags := c.planWalk(config, prevRunState, rootVariables, opts)
   205  	diags = diags.Append(walkDiags)
   206  	if diags.HasErrors() {
   207  		return nil, diags
   208  	}
   209  
   210  	// If the graph builder and graph nodes correctly obeyed our directive
   211  	// to refresh only, the set of resource changes should always be empty.
   212  	// We'll safety-check that here so we can return a clear message about it,
   213  	// rather than probably just generating confusing output at the UI layer.
   214  	if len(plan.Changes.Resources) != 0 {
   215  		// Some extra context in the logs in case the user reports this message
   216  		// as a bug, as a starting point for debugging.
   217  		for _, rc := range plan.Changes.Resources {
   218  			if depKey := rc.DeposedKey; depKey == states.NotDeposed {
   219  				log.Printf("[DEBUG] Refresh-only plan includes %s change for %s", rc.Action, rc.Addr)
   220  			} else {
   221  				log.Printf("[DEBUG] Refresh-only plan includes %s change for %s deposed object %s", rc.Action, rc.Addr, depKey)
   222  			}
   223  		}
   224  		diags = diags.Append(tfdiags.Sourceless(
   225  			tfdiags.Error,
   226  			"Invalid refresh-only plan",
   227  			"Terraform generated planned resource changes in a refresh-only plan. This is a bug in Terraform.",
   228  		))
   229  	}
   230  
   231  	// Prune out any placeholder objects we put in the state to represent
   232  	// objects that would need to be created.
   233  	plan.PriorState.SyncWrapper().RemovePlannedResourceInstanceObjects()
   234  
   235  	return plan, diags
   236  }
   237  
   238  func (c *Context) destroyPlan(config *configs.Config, prevRunState *states.State, rootVariables InputValues, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
   239  	var diags tfdiags.Diagnostics
   240  	pendingPlan := &plans.Plan{}
   241  
   242  	if opts.Mode != plans.DestroyMode {
   243  		panic(fmt.Sprintf("called Context.destroyPlan with %s", opts.Mode))
   244  	}
   245  
   246  	priorState := prevRunState
   247  
   248  	// A destroy plan starts by running Refresh to read any pending data
   249  	// sources, and remove missing managed resources. This is required because
   250  	// a "destroy plan" is only creating delete changes, and is essentially a
   251  	// local operation.
   252  	//
   253  	// NOTE: if skipRefresh _is_ set then we'll rely on the destroy-plan walk
   254  	// below to upgrade the prevRunState and priorState both to the latest
   255  	// resource type schemas, so NodePlanDestroyableResourceInstance.Execute
   256  	// must coordinate with this by taking that action only when c.skipRefresh
   257  	// _is_ set. This coupling between the two is unfortunate but necessary
   258  	// to work within our current structure.
   259  	if !opts.SkipRefresh {
   260  		log.Printf("[TRACE] Context.destroyPlan: calling Context.plan to get the effect of refreshing the prior state")
   261  		normalOpts := *opts
   262  		normalOpts.Mode = plans.NormalMode
   263  		refreshPlan, refreshDiags := c.plan(config, prevRunState, rootVariables, &normalOpts)
   264  		if refreshDiags.HasErrors() {
   265  			// NOTE: Normally we'd append diagnostics regardless of whether
   266  			// there are errors, just in case there are warnings we'd want to
   267  			// preserve, but we're intentionally _not_ doing that here because
   268  			// if the first plan succeeded then we'll be running another plan
   269  			// in DestroyMode below, and we don't want to double-up any
   270  			// warnings that both plan walks would generate.
   271  			// (This does mean we won't show any warnings that would've been
   272  			// unique to only this walk, but we're assuming here that if the
   273  			// warnings aren't also applicable to a destroy plan then we'd
   274  			// rather not show them here, because this non-destroy plan for
   275  			// refreshing is largely an implementation detail.)
   276  			diags = diags.Append(refreshDiags)
   277  			return nil, diags
   278  		}
   279  
   280  		// insert the refreshed state into the destroy plan result, and ignore
   281  		// the changes recorded from the refresh.
   282  		pendingPlan.PriorState = refreshPlan.PriorState.DeepCopy()
   283  		pendingPlan.PrevRunState = refreshPlan.PrevRunState.DeepCopy()
   284  		log.Printf("[TRACE] Context.destroyPlan: now _really_ creating a destroy plan")
   285  
   286  		// We'll use the refreshed state -- which is the  "prior state" from
   287  		// the perspective of this "pending plan" -- as the starting state
   288  		// for our destroy-plan walk, so it can take into account if we
   289  		// detected during refreshing that anything was already deleted outside
   290  		// of Terraform.
   291  		priorState = pendingPlan.PriorState
   292  	}
   293  
   294  	destroyPlan, walkDiags := c.planWalk(config, priorState, rootVariables, opts)
   295  	diags = diags.Append(walkDiags)
   296  	if walkDiags.HasErrors() {
   297  		return nil, diags
   298  	}
   299  
   300  	if !opts.SkipRefresh {
   301  		// If we didn't skip refreshing then we want the previous run state
   302  		// prior state to be the one we originally fed into the c.plan call
   303  		// above, not the refreshed version we used for the destroy walk.
   304  		destroyPlan.PrevRunState = pendingPlan.PrevRunState
   305  	}
   306  
   307  	return destroyPlan, diags
   308  }
   309  
   310  func (c *Context) prePlanFindAndApplyMoves(config *configs.Config, prevRunState *states.State, targets []addrs.Targetable) ([]refactoring.MoveStatement, refactoring.MoveResults) {
   311  	explicitMoveStmts := refactoring.FindMoveStatements(config)
   312  	implicitMoveStmts := refactoring.ImpliedMoveStatements(config, prevRunState, explicitMoveStmts)
   313  	var moveStmts []refactoring.MoveStatement
   314  	if stmtsLen := len(explicitMoveStmts) + len(implicitMoveStmts); stmtsLen > 0 {
   315  		moveStmts = make([]refactoring.MoveStatement, 0, stmtsLen)
   316  		moveStmts = append(moveStmts, explicitMoveStmts...)
   317  		moveStmts = append(moveStmts, implicitMoveStmts...)
   318  	}
   319  	moveResults := refactoring.ApplyMoves(moveStmts, prevRunState)
   320  	return moveStmts, moveResults
   321  }
   322  
   323  func (c *Context) prePlanVerifyTargetedMoves(moveResults refactoring.MoveResults, targets []addrs.Targetable) tfdiags.Diagnostics {
   324  	if len(targets) < 1 {
   325  		return nil // the following only matters when targeting
   326  	}
   327  
   328  	var diags tfdiags.Diagnostics
   329  
   330  	var excluded []addrs.AbsResourceInstance
   331  	for _, result := range moveResults.Changes {
   332  		fromMatchesTarget := false
   333  		toMatchesTarget := false
   334  		for _, targetAddr := range targets {
   335  			if targetAddr.TargetContains(result.From) {
   336  				fromMatchesTarget = true
   337  			}
   338  			if targetAddr.TargetContains(result.To) {
   339  				toMatchesTarget = true
   340  			}
   341  		}
   342  		if !fromMatchesTarget {
   343  			excluded = append(excluded, result.From)
   344  		}
   345  		if !toMatchesTarget {
   346  			excluded = append(excluded, result.To)
   347  		}
   348  	}
   349  	if len(excluded) > 0 {
   350  		sort.Slice(excluded, func(i, j int) bool {
   351  			return excluded[i].Less(excluded[j])
   352  		})
   353  
   354  		var listBuf strings.Builder
   355  		var prevResourceAddr addrs.AbsResource
   356  		for _, instAddr := range excluded {
   357  			// Targeting generally ends up selecting whole resources rather
   358  			// than individual instances, because we don't factor in
   359  			// individual instances until DynamicExpand, so we're going to
   360  			// always show whole resource addresses here, excluding any
   361  			// instance keys. (This also neatly avoids dealing with the
   362  			// different quoting styles required for string instance keys
   363  			// on different shells, which is handy.)
   364  			//
   365  			// To avoid showing duplicates when we have multiple instances
   366  			// of the same resource, we'll remember the most recent
   367  			// resource we rendered in prevResource, which is sufficient
   368  			// because we sorted the list of instance addresses above, and
   369  			// our sort order always groups together instances of the same
   370  			// resource.
   371  			resourceAddr := instAddr.ContainingResource()
   372  			if resourceAddr.Equal(prevResourceAddr) {
   373  				continue
   374  			}
   375  			fmt.Fprintf(&listBuf, "\n  -target=%q", resourceAddr.String())
   376  			prevResourceAddr = resourceAddr
   377  		}
   378  		diags = diags.Append(tfdiags.Sourceless(
   379  			tfdiags.Error,
   380  			"Moved resource instances excluded by targeting",
   381  			fmt.Sprintf(
   382  				"Resource instances in your current state have moved to new addresses in the latest configuration. Terraform must include those resource instances while planning in order to ensure a correct result, but your -target=... options to not fully cover all of those resource instances.\n\nTo create a valid plan, either remove your -target=... options altogether or add the following additional target options:%s\n\nNote that adding these options may include further additional resource instances in your plan, in order to respect object dependencies.",
   383  				listBuf.String(),
   384  			),
   385  		))
   386  	}
   387  
   388  	return diags
   389  }
   390  
   391  func (c *Context) postPlanValidateMoves(config *configs.Config, stmts []refactoring.MoveStatement, allInsts instances.Set) tfdiags.Diagnostics {
   392  	return refactoring.ValidateMoves(stmts, config, allInsts)
   393  }
   394  
   395  func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, rootVariables InputValues, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
   396  	var diags tfdiags.Diagnostics
   397  	log.Printf("[DEBUG] Building and walking plan graph for %s", opts.Mode)
   398  
   399  	prevRunState = prevRunState.DeepCopy() // don't modify the caller's object when we process the moves
   400  	moveStmts, moveResults := c.prePlanFindAndApplyMoves(config, prevRunState, opts.Targets)
   401  
   402  	// If resource targeting is in effect then it might conflict with the
   403  	// move result.
   404  	diags = diags.Append(c.prePlanVerifyTargetedMoves(moveResults, opts.Targets))
   405  	if diags.HasErrors() {
   406  		// We'll return early here, because if we have any moved resource
   407  		// instances excluded by targeting then planning is likely to encounter
   408  		// strange problems that may lead to confusing error messages.
   409  		return nil, diags
   410  	}
   411  
   412  	graph, walkOp, moreDiags := c.planGraph(config, prevRunState, opts, true)
   413  	diags = diags.Append(moreDiags)
   414  	if diags.HasErrors() {
   415  		return nil, diags
   416  	}
   417  
   418  	// If we get here then we should definitely have a non-nil "graph", which
   419  	// we can now walk.
   420  	changes := plans.NewChanges()
   421  	walker, walkDiags := c.walk(graph, walkOp, &graphWalkOpts{
   422  		Config:             config,
   423  		InputState:         prevRunState,
   424  		Changes:            changes,
   425  		MoveResults:        moveResults,
   426  		RootVariableValues: rootVariables,
   427  	})
   428  	diags = diags.Append(walker.NonFatalDiagnostics)
   429  	diags = diags.Append(walkDiags)
   430  	moveValidateDiags := c.postPlanValidateMoves(config, moveStmts, walker.InstanceExpander.AllInstances())
   431  	if moveValidateDiags.HasErrors() {
   432  		// If any of the move statements are invalid then those errors take
   433  		// precedence over any other errors because an incomplete move graph
   434  		// is quite likely to be the _cause_ of various errors. This oddity
   435  		// comes from the fact that we need to apply the moves before we
   436  		// actually validate them, because validation depends on the result
   437  		// of first trying to plan.
   438  		return nil, moveValidateDiags
   439  	}
   440  	diags = diags.Append(moveValidateDiags) // might just contain warnings
   441  
   442  	if len(moveResults.Blocked) > 0 && !diags.HasErrors() {
   443  		// If we had blocked moves and we're not going to be returning errors
   444  		// then we'll report the blockers as a warning. We do this only in the
   445  		// absense of errors because invalid move statements might well be
   446  		// the root cause of the blockers, and so better to give an actionable
   447  		// error message than a less-actionable warning.
   448  		diags = diags.Append(blockedMovesWarningDiag(moveResults))
   449  	}
   450  
   451  	prevRunState = walker.PrevRunState.Close()
   452  	priorState := walker.RefreshState.Close()
   453  	driftedResources, driftDiags := c.driftedResources(config, prevRunState, priorState, moveResults)
   454  	diags = diags.Append(driftDiags)
   455  
   456  	plan := &plans.Plan{
   457  		UIMode:           opts.Mode,
   458  		Changes:          changes,
   459  		DriftedResources: driftedResources,
   460  		PrevRunState:     prevRunState,
   461  		PriorState:       priorState,
   462  
   463  		// Other fields get populated by Context.Plan after we return
   464  	}
   465  	return plan, diags
   466  }
   467  
   468  func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, opts *PlanOpts, validate bool) (*Graph, walkOperation, tfdiags.Diagnostics) {
   469  	switch mode := opts.Mode; mode {
   470  	case plans.NormalMode:
   471  		graph, diags := (&PlanGraphBuilder{
   472  			Config:       config,
   473  			State:        prevRunState,
   474  			Plugins:      c.plugins,
   475  			Targets:      opts.Targets,
   476  			ForceReplace: opts.ForceReplace,
   477  			Validate:     validate,
   478  			skipRefresh:  opts.SkipRefresh,
   479  		}).Build(addrs.RootModuleInstance)
   480  		return graph, walkPlan, diags
   481  	case plans.RefreshOnlyMode:
   482  		graph, diags := (&PlanGraphBuilder{
   483  			Config:          config,
   484  			State:           prevRunState,
   485  			Plugins:         c.plugins,
   486  			Targets:         opts.Targets,
   487  			Validate:        validate,
   488  			skipRefresh:     opts.SkipRefresh,
   489  			skipPlanChanges: true, // this activates "refresh only" mode.
   490  		}).Build(addrs.RootModuleInstance)
   491  		return graph, walkPlan, diags
   492  	case plans.DestroyMode:
   493  		graph, diags := (&DestroyPlanGraphBuilder{
   494  			Config:      config,
   495  			State:       prevRunState,
   496  			Plugins:     c.plugins,
   497  			Targets:     opts.Targets,
   498  			Validate:    validate,
   499  			skipRefresh: opts.SkipRefresh,
   500  		}).Build(addrs.RootModuleInstance)
   501  		return graph, walkPlanDestroy, diags
   502  	default:
   503  		// The above should cover all plans.Mode values
   504  		panic(fmt.Sprintf("unsupported plan mode %s", mode))
   505  	}
   506  }
   507  
   508  func (c *Context) driftedResources(config *configs.Config, oldState, newState *states.State, moves refactoring.MoveResults) ([]*plans.ResourceInstanceChangeSrc, tfdiags.Diagnostics) {
   509  	var diags tfdiags.Diagnostics
   510  
   511  	if newState.ManagedResourcesEqual(oldState) && len(moves.Changes) == 0 {
   512  		// Nothing to do, because we only detect and report drift for managed
   513  		// resource instances.
   514  		return nil, diags
   515  	}
   516  
   517  	schemas, schemaDiags := c.Schemas(config, newState)
   518  	diags = diags.Append(schemaDiags)
   519  	if diags.HasErrors() {
   520  		return nil, diags
   521  	}
   522  
   523  	var drs []*plans.ResourceInstanceChangeSrc
   524  
   525  	for _, ms := range oldState.Modules {
   526  		for _, rs := range ms.Resources {
   527  			if rs.Addr.Resource.Mode != addrs.ManagedResourceMode {
   528  				// Drift reporting is only for managed resources
   529  				continue
   530  			}
   531  
   532  			provider := rs.ProviderConfig.Provider
   533  			for key, oldIS := range rs.Instances {
   534  				if oldIS.Current == nil {
   535  					// Not interested in instances that only have deposed objects
   536  					continue
   537  				}
   538  				addr := rs.Addr.Instance(key)
   539  
   540  				// Previous run address defaults to the current address, but
   541  				// can differ if the resource moved before refreshing
   542  				prevRunAddr := addr
   543  				if move, ok := moves.Changes[addr.UniqueKey()]; ok {
   544  					prevRunAddr = move.From
   545  				}
   546  
   547  				newIS := newState.ResourceInstance(addr)
   548  
   549  				schema, _ := schemas.ResourceTypeConfig(
   550  					provider,
   551  					addr.Resource.Resource.Mode,
   552  					addr.Resource.Resource.Type,
   553  				)
   554  				if schema == nil {
   555  					// This should never happen, but just in case
   556  					return nil, diags.Append(tfdiags.Sourceless(
   557  						tfdiags.Error,
   558  						"Missing resource schema from provider",
   559  						fmt.Sprintf("No resource schema found for %s.", addr.Resource.Resource.Type),
   560  					))
   561  				}
   562  				ty := schema.ImpliedType()
   563  
   564  				oldObj, err := oldIS.Current.Decode(ty)
   565  				if err != nil {
   566  					// This should also never happen
   567  					return nil, diags.Append(tfdiags.Sourceless(
   568  						tfdiags.Error,
   569  						"Failed to decode resource from state",
   570  						fmt.Sprintf("Error decoding %q from previous state: %s", addr.String(), err),
   571  					))
   572  				}
   573  
   574  				var newObj *states.ResourceInstanceObject
   575  				if newIS != nil && newIS.Current != nil {
   576  					newObj, err = newIS.Current.Decode(ty)
   577  					if err != nil {
   578  						// This should also never happen
   579  						return nil, diags.Append(tfdiags.Sourceless(
   580  							tfdiags.Error,
   581  							"Failed to decode resource from state",
   582  							fmt.Sprintf("Error decoding %q from prior state: %s", addr.String(), err),
   583  						))
   584  					}
   585  				}
   586  
   587  				var oldVal, newVal cty.Value
   588  				oldVal = oldObj.Value
   589  				if newObj != nil {
   590  					newVal = newObj.Value
   591  				} else {
   592  					newVal = cty.NullVal(ty)
   593  				}
   594  
   595  				if oldVal.RawEquals(newVal) && addr.Equal(prevRunAddr) {
   596  					// No drift if the two values are semantically equivalent
   597  					// and no move has happened
   598  					continue
   599  				}
   600  
   601  				// We can detect three types of changes after refreshing state,
   602  				// only two of which are easily understood as "drift":
   603  				//
   604  				// - Resources which were deleted outside of Terraform;
   605  				// - Resources where the object value has changed outside of
   606  				//   Terraform;
   607  				// - Resources which have been moved without other changes.
   608  				//
   609  				// All of these are returned as drift, to allow refresh-only plans
   610  				// to present a full set of changes which will be applied.
   611  				var action plans.Action
   612  				switch {
   613  				case newVal.IsNull():
   614  					action = plans.Delete
   615  				case !oldVal.RawEquals(newVal):
   616  					action = plans.Update
   617  				default:
   618  					action = plans.NoOp
   619  				}
   620  
   621  				change := &plans.ResourceInstanceChange{
   622  					Addr:         addr,
   623  					PrevRunAddr:  prevRunAddr,
   624  					ProviderAddr: rs.ProviderConfig,
   625  					Change: plans.Change{
   626  						Action: action,
   627  						Before: oldVal,
   628  						After:  newVal,
   629  					},
   630  				}
   631  
   632  				changeSrc, err := change.Encode(ty)
   633  				if err != nil {
   634  					diags = diags.Append(err)
   635  					return nil, diags
   636  				}
   637  
   638  				drs = append(drs, changeSrc)
   639  			}
   640  		}
   641  	}
   642  
   643  	return drs, diags
   644  }
   645  
   646  // PlanGraphForUI is a last vestage of graphs in the public interface of Context
   647  // (as opposed to graphs as an implementation detail) intended only for use
   648  // by the "terraform graph" command when asked to render a plan-time graph.
   649  //
   650  // The result of this is intended only for rendering ot the user as a dot
   651  // graph, and so may change in future in order to make the result more useful
   652  // in that context, even if drifts away from the physical graph that Terraform
   653  // Core currently uses as an implementation detail of planning.
   654  func (c *Context) PlanGraphForUI(config *configs.Config, prevRunState *states.State, mode plans.Mode) (*Graph, tfdiags.Diagnostics) {
   655  	// For now though, this really is just the not-internal graph, confusing
   656  	// implementation details and all.
   657  
   658  	var diags tfdiags.Diagnostics
   659  
   660  	opts := &PlanOpts{Mode: mode}
   661  
   662  	graph, _, moreDiags := c.planGraph(config, prevRunState, opts, false)
   663  	diags = diags.Append(moreDiags)
   664  	return graph, diags
   665  }
   666  
   667  func blockedMovesWarningDiag(results refactoring.MoveResults) tfdiags.Diagnostic {
   668  	if len(results.Blocked) < 1 {
   669  		// Caller should check first
   670  		panic("request to render blocked moves warning without any blocked moves")
   671  	}
   672  
   673  	var itemsBuf bytes.Buffer
   674  	for _, blocked := range results.Blocked {
   675  		fmt.Fprintf(&itemsBuf, "\n  - %s could not move to %s", blocked.Actual, blocked.Wanted)
   676  	}
   677  
   678  	return tfdiags.Sourceless(
   679  		tfdiags.Warning,
   680  		"Unresolved resource instance address changes",
   681  		fmt.Sprintf(
   682  			"Terraform tried to adjust resource instance addresses in the prior state based on change information recorded in the configuration, but some adjustments did not succeed due to existing objects already at the intended addresses:%s\n\nTerraform has planned to destroy these objects. If Terraform's proposed changes aren't appropriate, you must first resolve the conflicts using the \"terraform state\" subcommands and then create a new plan.",
   683  			itemsBuf.String(),
   684  		),
   685  	)
   686  }