kubeform.dev/terraform-backend-sdk@v0.0.0-20220310143633-45f07fe731c5/terraform/context_plan.go (about)

     1  package terraform
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"sort"
     7  	"strings"
     8  
     9  	"github.com/zclconf/go-cty/cty"
    10  
    11  	"kubeform.dev/terraform-backend-sdk/addrs"
    12  	"kubeform.dev/terraform-backend-sdk/configs"
    13  	"kubeform.dev/terraform-backend-sdk/instances"
    14  	"kubeform.dev/terraform-backend-sdk/plans"
    15  	"kubeform.dev/terraform-backend-sdk/refactoring"
    16  	"kubeform.dev/terraform-backend-sdk/states"
    17  	"kubeform.dev/terraform-backend-sdk/tfdiags"
    18  )
    19  
    20  // PlanOpts are the various options that affect the details of how Terraform
    21  // will build a plan.
    22  type PlanOpts struct {
    23  	Mode         plans.Mode
    24  	SkipRefresh  bool
    25  	SetVariables InputValues
    26  	Targets      []addrs.Targetable
    27  	ForceReplace []addrs.AbsResourceInstance
    28  }
    29  
    30  // Plan generates an execution plan for the given context, and returns the
    31  // refreshed state.
    32  //
    33  // The execution plan encapsulates the context and can be stored
    34  // in order to reinstantiate a context later for Apply.
    35  //
    36  // Plan also updates the diff of this context to be the diff generated
    37  // by the plan, so Apply can be called after.
    38  func (c *Context) Plan(config *configs.Config, prevRunState *states.State, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
    39  	defer c.acquireRun("plan")()
    40  	var diags tfdiags.Diagnostics
    41  
    42  	// Save the downstream functions from needing to deal with these broken situations.
    43  	// No real callers should rely on these, but we have a bunch of old and
    44  	// sloppy tests that don't always populate arguments properly.
    45  	if config == nil {
    46  		config = configs.NewEmptyConfig()
    47  	}
    48  	if prevRunState == nil {
    49  		prevRunState = states.NewState()
    50  	}
    51  	if opts == nil {
    52  		opts = &PlanOpts{
    53  			Mode: plans.NormalMode,
    54  		}
    55  	}
    56  
    57  	moreDiags := CheckCoreVersionRequirements(config)
    58  	diags = diags.Append(moreDiags)
    59  	// If version constraints are not met then we'll bail early since otherwise
    60  	// we're likely to just see a bunch of other errors related to
    61  	// incompatibilities, which could be overwhelming for the user.
    62  	if diags.HasErrors() {
    63  		return nil, diags
    64  	}
    65  
    66  	switch opts.Mode {
    67  	case plans.NormalMode, plans.DestroyMode:
    68  		// OK
    69  	case plans.RefreshOnlyMode:
    70  		if opts.SkipRefresh {
    71  			// The CLI layer (and other similar callers) should prevent this
    72  			// combination of options.
    73  			diags = diags.Append(tfdiags.Sourceless(
    74  				tfdiags.Error,
    75  				"Incompatible plan options",
    76  				"Cannot skip refreshing in refresh-only mode. This is a bug in Terraform.",
    77  			))
    78  			return nil, diags
    79  		}
    80  	default:
    81  		// The CLI layer (and other similar callers) should not try to
    82  		// create a context for a mode that Terraform Core doesn't support.
    83  		diags = diags.Append(tfdiags.Sourceless(
    84  			tfdiags.Error,
    85  			"Unsupported plan mode",
    86  			fmt.Sprintf("Terraform Core doesn't know how to handle plan mode %s. This is a bug in Terraform.", opts.Mode),
    87  		))
    88  		return nil, diags
    89  	}
    90  	if len(opts.ForceReplace) > 0 && opts.Mode != plans.NormalMode {
    91  		// The other modes don't generate no-op or update actions that we might
    92  		// upgrade to be "replace", so doesn't make sense to combine those.
    93  		diags = diags.Append(tfdiags.Sourceless(
    94  			tfdiags.Error,
    95  			"Unsupported plan mode",
    96  			"Forcing resource instance replacement (with -replace=...) is allowed only in normal planning mode.",
    97  		))
    98  		return nil, diags
    99  	}
   100  
   101  	variables := mergeDefaultInputVariableValues(opts.SetVariables, config.Module.Variables)
   102  
   103  	// By the time we get here, we should have values defined for all of
   104  	// the root module variables, even if some of them are "unknown". It's the
   105  	// caller's responsibility to have already handled the decoding of these
   106  	// from the various ways the CLI allows them to be set and to produce
   107  	// user-friendly error messages if they are not all present, and so
   108  	// the error message from checkInputVariables should never be seen and
   109  	// includes language asking the user to report a bug.
   110  	varDiags := checkInputVariables(config.Module.Variables, variables)
   111  	diags = diags.Append(varDiags)
   112  
   113  	if len(opts.Targets) > 0 {
   114  		diags = diags.Append(tfdiags.Sourceless(
   115  			tfdiags.Warning,
   116  			"Resource targeting is in effect",
   117  			`You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration.
   118  
   119  The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`,
   120  		))
   121  	}
   122  
   123  	var plan *plans.Plan
   124  	var planDiags tfdiags.Diagnostics
   125  	switch opts.Mode {
   126  	case plans.NormalMode:
   127  		plan, planDiags = c.plan(config, prevRunState, variables, opts)
   128  	case plans.DestroyMode:
   129  		plan, planDiags = c.destroyPlan(config, prevRunState, variables, opts)
   130  	case plans.RefreshOnlyMode:
   131  		plan, planDiags = c.refreshOnlyPlan(config, prevRunState, variables, opts)
   132  	default:
   133  		panic(fmt.Sprintf("unsupported plan mode %s", opts.Mode))
   134  	}
   135  	diags = diags.Append(planDiags)
   136  	if diags.HasErrors() {
   137  		return nil, diags
   138  	}
   139  
   140  	// convert the variables into the format expected for the plan
   141  	varVals := make(map[string]plans.DynamicValue, len(variables))
   142  	for k, iv := range variables {
   143  		// We use cty.DynamicPseudoType here so that we'll save both the
   144  		// value _and_ its dynamic type in the plan, so we can recover
   145  		// exactly the same value later.
   146  		dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType)
   147  		if err != nil {
   148  			diags = diags.Append(tfdiags.Sourceless(
   149  				tfdiags.Error,
   150  				"Failed to prepare variable value for plan",
   151  				fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err),
   152  			))
   153  			continue
   154  		}
   155  		varVals[k] = dv
   156  	}
   157  
   158  	// insert the run-specific data from the context into the plan; variables,
   159  	// targets and provider SHAs.
   160  	if plan != nil {
   161  		plan.VariableValues = varVals
   162  		plan.TargetAddrs = opts.Targets
   163  		plan.ProviderSHA256s = c.providerSHA256s
   164  	} else if !diags.HasErrors() {
   165  		panic("nil plan but no errors")
   166  	}
   167  
   168  	return plan, diags
   169  }
   170  
   171  var DefaultPlanOpts = &PlanOpts{
   172  	Mode: plans.NormalMode,
   173  }
   174  
   175  func (c *Context) plan(config *configs.Config, prevRunState *states.State, rootVariables InputValues, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
   176  	var diags tfdiags.Diagnostics
   177  
   178  	if opts.Mode != plans.NormalMode {
   179  		panic(fmt.Sprintf("called Context.plan with %s", opts.Mode))
   180  	}
   181  
   182  	plan, walkDiags := c.planWalk(config, prevRunState, rootVariables, opts)
   183  	diags = diags.Append(walkDiags)
   184  	if diags.HasErrors() {
   185  		return nil, diags
   186  	}
   187  
   188  	// The refreshed state ends up with some placeholder objects in it for
   189  	// objects pending creation. We only really care about those being in
   190  	// the working state, since that's what we're going to use when applying,
   191  	// so we'll prune them all here.
   192  	plan.PriorState.SyncWrapper().RemovePlannedResourceInstanceObjects()
   193  
   194  	return plan, diags
   195  }
   196  
   197  func (c *Context) refreshOnlyPlan(config *configs.Config, prevRunState *states.State, rootVariables InputValues, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
   198  	var diags tfdiags.Diagnostics
   199  
   200  	if opts.Mode != plans.RefreshOnlyMode {
   201  		panic(fmt.Sprintf("called Context.refreshOnlyPlan with %s", opts.Mode))
   202  	}
   203  
   204  	plan, walkDiags := c.planWalk(config, prevRunState, rootVariables, opts)
   205  	diags = diags.Append(walkDiags)
   206  	if diags.HasErrors() {
   207  		return nil, diags
   208  	}
   209  
   210  	// If the graph builder and graph nodes correctly obeyed our directive
   211  	// to refresh only, the set of resource changes should always be empty.
   212  	// We'll safety-check that here so we can return a clear message about it,
   213  	// rather than probably just generating confusing output at the UI layer.
   214  	if len(plan.Changes.Resources) != 0 {
   215  		// Some extra context in the logs in case the user reports this message
   216  		// as a bug, as a starting point for debugging.
   217  		for _, rc := range plan.Changes.Resources {
   218  			if depKey := rc.DeposedKey; depKey == states.NotDeposed {
   219  				log.Printf("[DEBUG] Refresh-only plan includes %s change for %s", rc.Action, rc.Addr)
   220  			} else {
   221  				log.Printf("[DEBUG] Refresh-only plan includes %s change for %s deposed object %s", rc.Action, rc.Addr, depKey)
   222  			}
   223  		}
   224  		diags = diags.Append(tfdiags.Sourceless(
   225  			tfdiags.Error,
   226  			"Invalid refresh-only plan",
   227  			"Terraform generated planned resource changes in a refresh-only plan. This is a bug in Terraform.",
   228  		))
   229  	}
   230  
   231  	// Prune out any placeholder objects we put in the state to represent
   232  	// objects that would need to be created.
   233  	plan.PriorState.SyncWrapper().RemovePlannedResourceInstanceObjects()
   234  
   235  	return plan, diags
   236  }
   237  
   238  func (c *Context) destroyPlan(config *configs.Config, prevRunState *states.State, rootVariables InputValues, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
   239  	var diags tfdiags.Diagnostics
   240  	pendingPlan := &plans.Plan{}
   241  
   242  	if opts.Mode != plans.DestroyMode {
   243  		panic(fmt.Sprintf("called Context.destroyPlan with %s", opts.Mode))
   244  	}
   245  
   246  	priorState := prevRunState
   247  
   248  	// A destroy plan starts by running Refresh to read any pending data
   249  	// sources, and remove missing managed resources. This is required because
   250  	// a "destroy plan" is only creating delete changes, and is essentially a
   251  	// local operation.
   252  	//
   253  	// NOTE: if skipRefresh _is_ set then we'll rely on the destroy-plan walk
   254  	// below to upgrade the prevRunState and priorState both to the latest
   255  	// resource type schemas, so NodePlanDestroyableResourceInstance.Execute
   256  	// must coordinate with this by taking that action only when c.skipRefresh
   257  	// _is_ set. This coupling between the two is unfortunate but necessary
   258  	// to work within our current structure.
   259  	if !opts.SkipRefresh {
   260  		log.Printf("[TRACE] Context.destroyPlan: calling Context.plan to get the effect of refreshing the prior state")
   261  		normalOpts := *opts
   262  		normalOpts.Mode = plans.NormalMode
   263  		refreshPlan, refreshDiags := c.plan(config, prevRunState, rootVariables, &normalOpts)
   264  		diags = diags.Append(refreshDiags)
   265  		if diags.HasErrors() {
   266  			return nil, diags
   267  		}
   268  
   269  		// insert the refreshed state into the destroy plan result, and ignore
   270  		// the changes recorded from the refresh.
   271  		pendingPlan.PriorState = refreshPlan.PriorState.DeepCopy()
   272  		pendingPlan.PrevRunState = refreshPlan.PrevRunState.DeepCopy()
   273  		log.Printf("[TRACE] Context.destroyPlan: now _really_ creating a destroy plan")
   274  
   275  		// We'll use the refreshed state -- which is the  "prior state" from
   276  		// the perspective of this "pending plan" -- as the starting state
   277  		// for our destroy-plan walk, so it can take into account if we
   278  		// detected during refreshing that anything was already deleted outside
   279  		// of Terraform.
   280  		priorState = pendingPlan.PriorState
   281  	}
   282  
   283  	destroyPlan, walkDiags := c.planWalk(config, priorState, rootVariables, opts)
   284  	diags = diags.Append(walkDiags)
   285  	if walkDiags.HasErrors() {
   286  		return nil, diags
   287  	}
   288  
   289  	if !opts.SkipRefresh {
   290  		// If we didn't skip refreshing then we want the previous run state
   291  		// prior state to be the one we originally fed into the c.plan call
   292  		// above, not the refreshed version we used for the destroy walk.
   293  		destroyPlan.PrevRunState = pendingPlan.PrevRunState
   294  	}
   295  
   296  	return destroyPlan, diags
   297  }
   298  
   299  func (c *Context) prePlanFindAndApplyMoves(config *configs.Config, prevRunState *states.State, targets []addrs.Targetable) ([]refactoring.MoveStatement, map[addrs.UniqueKey]refactoring.MoveResult) {
   300  	explicitMoveStmts := refactoring.FindMoveStatements(config)
   301  	implicitMoveStmts := refactoring.ImpliedMoveStatements(config, prevRunState, explicitMoveStmts)
   302  	var moveStmts []refactoring.MoveStatement
   303  	if stmtsLen := len(explicitMoveStmts) + len(implicitMoveStmts); stmtsLen > 0 {
   304  		moveStmts = make([]refactoring.MoveStatement, 0, stmtsLen)
   305  		moveStmts = append(moveStmts, explicitMoveStmts...)
   306  		moveStmts = append(moveStmts, implicitMoveStmts...)
   307  	}
   308  	moveResults := refactoring.ApplyMoves(moveStmts, prevRunState)
   309  	return moveStmts, moveResults
   310  }
   311  
   312  func (c *Context) prePlanVerifyTargetedMoves(moveResults map[addrs.UniqueKey]refactoring.MoveResult, targets []addrs.Targetable) tfdiags.Diagnostics {
   313  	if len(targets) < 1 {
   314  		return nil // the following only matters when targeting
   315  	}
   316  
   317  	var diags tfdiags.Diagnostics
   318  
   319  	var excluded []addrs.AbsResourceInstance
   320  	for _, result := range moveResults {
   321  		fromMatchesTarget := false
   322  		toMatchesTarget := false
   323  		for _, targetAddr := range targets {
   324  			if targetAddr.TargetContains(result.From) {
   325  				fromMatchesTarget = true
   326  			}
   327  			if targetAddr.TargetContains(result.To) {
   328  				toMatchesTarget = true
   329  			}
   330  		}
   331  		if !fromMatchesTarget {
   332  			excluded = append(excluded, result.From)
   333  		}
   334  		if !toMatchesTarget {
   335  			excluded = append(excluded, result.To)
   336  		}
   337  	}
   338  	if len(excluded) > 0 {
   339  		sort.Slice(excluded, func(i, j int) bool {
   340  			return excluded[i].Less(excluded[j])
   341  		})
   342  
   343  		var listBuf strings.Builder
   344  		var prevResourceAddr addrs.AbsResource
   345  		for _, instAddr := range excluded {
   346  			// Targeting generally ends up selecting whole resources rather
   347  			// than individual instances, because we don't factor in
   348  			// individual instances until DynamicExpand, so we're going to
   349  			// always show whole resource addresses here, excluding any
   350  			// instance keys. (This also neatly avoids dealing with the
   351  			// different quoting styles required for string instance keys
   352  			// on different shells, which is handy.)
   353  			//
   354  			// To avoid showing duplicates when we have multiple instances
   355  			// of the same resource, we'll remember the most recent
   356  			// resource we rendered in prevResource, which is sufficient
   357  			// because we sorted the list of instance addresses above, and
   358  			// our sort order always groups together instances of the same
   359  			// resource.
   360  			resourceAddr := instAddr.ContainingResource()
   361  			if resourceAddr.Equal(prevResourceAddr) {
   362  				continue
   363  			}
   364  			fmt.Fprintf(&listBuf, "\n  -target=%q", resourceAddr.String())
   365  			prevResourceAddr = resourceAddr
   366  		}
   367  		diags = diags.Append(tfdiags.Sourceless(
   368  			tfdiags.Error,
   369  			"Moved resource instances excluded by targeting",
   370  			fmt.Sprintf(
   371  				"Resource instances in your current state have moved to new addresses in the latest configuration. Terraform must include those resource instances while planning in order to ensure a correct result, but your -target=... options to not fully cover all of those resource instances.\n\nTo create a valid plan, either remove your -target=... options altogether or add the following additional target options:%s\n\nNote that adding these options may include further additional resource instances in your plan, in order to respect object dependencies.",
   372  				listBuf.String(),
   373  			),
   374  		))
   375  	}
   376  
   377  	return diags
   378  }
   379  
   380  func (c *Context) postPlanValidateMoves(config *configs.Config, stmts []refactoring.MoveStatement, allInsts instances.Set) tfdiags.Diagnostics {
   381  	return refactoring.ValidateMoves(stmts, config, allInsts)
   382  }
   383  
   384  func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, rootVariables InputValues, opts *PlanOpts) (*plans.Plan, tfdiags.Diagnostics) {
   385  	var diags tfdiags.Diagnostics
   386  	log.Printf("[DEBUG] Building and walking plan graph for %s", opts.Mode)
   387  
   388  	prevRunState = prevRunState.DeepCopy() // don't modify the caller's object when we process the moves
   389  	moveStmts, moveResults := c.prePlanFindAndApplyMoves(config, prevRunState, opts.Targets)
   390  
   391  	// If resource targeting is in effect then it might conflict with the
   392  	// move result.
   393  	diags = diags.Append(c.prePlanVerifyTargetedMoves(moveResults, opts.Targets))
   394  	if diags.HasErrors() {
   395  		// We'll return early here, because if we have any moved resource
   396  		// instances excluded by targeting then planning is likely to encounter
   397  		// strange problems that may lead to confusing error messages.
   398  		return nil, diags
   399  	}
   400  
   401  	graph, walkOp, moreDiags := c.planGraph(config, prevRunState, opts, true)
   402  	diags = diags.Append(moreDiags)
   403  	if diags.HasErrors() {
   404  		return nil, diags
   405  	}
   406  
   407  	// If we get here then we should definitely have a non-nil "graph", which
   408  	// we can now walk.
   409  	changes := plans.NewChanges()
   410  	walker, walkDiags := c.walk(graph, walkOp, &graphWalkOpts{
   411  		Config:             config,
   412  		InputState:         prevRunState,
   413  		Changes:            changes,
   414  		MoveResults:        moveResults,
   415  		RootVariableValues: rootVariables,
   416  	})
   417  	diags = diags.Append(walker.NonFatalDiagnostics)
   418  	diags = diags.Append(walkDiags)
   419  	diags = diags.Append(c.postPlanValidateMoves(config, moveStmts, walker.InstanceExpander.AllInstances()))
   420  
   421  	prevRunState = walker.PrevRunState.Close()
   422  	priorState := walker.RefreshState.Close()
   423  	driftedResources, driftDiags := c.driftedResources(config, prevRunState, priorState, moveResults)
   424  	diags = diags.Append(driftDiags)
   425  
   426  	plan := &plans.Plan{
   427  		UIMode:           opts.Mode,
   428  		Changes:          changes,
   429  		DriftedResources: driftedResources,
   430  		PrevRunState:     prevRunState,
   431  		PriorState:       priorState,
   432  
   433  		// Other fields get populated by Context.Plan after we return
   434  	}
   435  	return plan, diags
   436  }
   437  
   438  func (c *Context) planGraph(config *configs.Config, prevRunState *states.State, opts *PlanOpts, validate bool) (*Graph, walkOperation, tfdiags.Diagnostics) {
   439  	switch mode := opts.Mode; mode {
   440  	case plans.NormalMode:
   441  		graph, diags := (&PlanGraphBuilder{
   442  			Config:       config,
   443  			State:        prevRunState,
   444  			Plugins:      c.plugins,
   445  			Targets:      opts.Targets,
   446  			ForceReplace: opts.ForceReplace,
   447  			Validate:     validate,
   448  			skipRefresh:  opts.SkipRefresh,
   449  		}).Build(addrs.RootModuleInstance)
   450  		return graph, walkPlan, diags
   451  	case plans.RefreshOnlyMode:
   452  		graph, diags := (&PlanGraphBuilder{
   453  			Config:          config,
   454  			State:           prevRunState,
   455  			Plugins:         c.plugins,
   456  			Targets:         opts.Targets,
   457  			Validate:        validate,
   458  			skipRefresh:     opts.SkipRefresh,
   459  			skipPlanChanges: true, // this activates "refresh only" mode.
   460  		}).Build(addrs.RootModuleInstance)
   461  		return graph, walkPlan, diags
   462  	case plans.DestroyMode:
   463  		graph, diags := (&DestroyPlanGraphBuilder{
   464  			Config:      config,
   465  			State:       prevRunState,
   466  			Plugins:     c.plugins,
   467  			Targets:     opts.Targets,
   468  			Validate:    validate,
   469  			skipRefresh: opts.SkipRefresh,
   470  		}).Build(addrs.RootModuleInstance)
   471  		return graph, walkPlanDestroy, diags
   472  	default:
   473  		// The above should cover all plans.Mode values
   474  		panic(fmt.Sprintf("unsupported plan mode %s", mode))
   475  	}
   476  }
   477  
   478  func (c *Context) driftedResources(config *configs.Config, oldState, newState *states.State, moves map[addrs.UniqueKey]refactoring.MoveResult) ([]*plans.ResourceInstanceChangeSrc, tfdiags.Diagnostics) {
   479  	var diags tfdiags.Diagnostics
   480  
   481  	if newState.ManagedResourcesEqual(oldState) && len(moves) == 0 {
   482  		// Nothing to do, because we only detect and report drift for managed
   483  		// resource instances.
   484  		return nil, diags
   485  	}
   486  
   487  	schemas, schemaDiags := c.Schemas(config, newState)
   488  	diags = diags.Append(schemaDiags)
   489  	if diags.HasErrors() {
   490  		return nil, diags
   491  	}
   492  
   493  	var drs []*plans.ResourceInstanceChangeSrc
   494  
   495  	for _, ms := range oldState.Modules {
   496  		for _, rs := range ms.Resources {
   497  			if rs.Addr.Resource.Mode != addrs.ManagedResourceMode {
   498  				// Drift reporting is only for managed resources
   499  				continue
   500  			}
   501  
   502  			provider := rs.ProviderConfig.Provider
   503  			for key, oldIS := range rs.Instances {
   504  				if oldIS.Current == nil {
   505  					// Not interested in instances that only have deposed objects
   506  					continue
   507  				}
   508  				addr := rs.Addr.Instance(key)
   509  
   510  				// Previous run address defaults to the current address, but
   511  				// can differ if the resource moved before refreshing
   512  				prevRunAddr := addr
   513  				if move, ok := moves[addr.UniqueKey()]; ok {
   514  					prevRunAddr = move.From
   515  				}
   516  
   517  				newIS := newState.ResourceInstance(addr)
   518  
   519  				schema, _ := schemas.ResourceTypeConfig(
   520  					provider,
   521  					addr.Resource.Resource.Mode,
   522  					addr.Resource.Resource.Type,
   523  				)
   524  				if schema == nil {
   525  					// This should never happen, but just in case
   526  					return nil, diags.Append(tfdiags.Sourceless(
   527  						tfdiags.Error,
   528  						"Missing resource schema from provider",
   529  						fmt.Sprintf("No resource schema found for %s.", addr.Resource.Resource.Type),
   530  					))
   531  				}
   532  				ty := schema.ImpliedType()
   533  
   534  				oldObj, err := oldIS.Current.Decode(ty)
   535  				if err != nil {
   536  					// This should also never happen
   537  					return nil, diags.Append(tfdiags.Sourceless(
   538  						tfdiags.Error,
   539  						"Failed to decode resource from state",
   540  						fmt.Sprintf("Error decoding %q from previous state: %s", addr.String(), err),
   541  					))
   542  				}
   543  
   544  				var newObj *states.ResourceInstanceObject
   545  				if newIS != nil && newIS.Current != nil {
   546  					newObj, err = newIS.Current.Decode(ty)
   547  					if err != nil {
   548  						// This should also never happen
   549  						return nil, diags.Append(tfdiags.Sourceless(
   550  							tfdiags.Error,
   551  							"Failed to decode resource from state",
   552  							fmt.Sprintf("Error decoding %q from prior state: %s", addr.String(), err),
   553  						))
   554  					}
   555  				}
   556  
   557  				var oldVal, newVal cty.Value
   558  				oldVal = oldObj.Value
   559  				if newObj != nil {
   560  					newVal = newObj.Value
   561  				} else {
   562  					newVal = cty.NullVal(ty)
   563  				}
   564  
   565  				if oldVal.RawEquals(newVal) && addr.Equal(prevRunAddr) {
   566  					// No drift if the two values are semantically equivalent
   567  					// and no move has happened
   568  					continue
   569  				}
   570  
   571  				// We can detect three types of changes after refreshing state,
   572  				// only two of which are easily understood as "drift":
   573  				//
   574  				// - Resources which were deleted outside of Terraform;
   575  				// - Resources where the object value has changed outside of
   576  				//   Terraform;
   577  				// - Resources which have been moved without other changes.
   578  				//
   579  				// All of these are returned as drift, to allow refresh-only plans
   580  				// to present a full set of changes which will be applied.
   581  				var action plans.Action
   582  				switch {
   583  				case newVal.IsNull():
   584  					action = plans.Delete
   585  				case !oldVal.RawEquals(newVal):
   586  					action = plans.Update
   587  				default:
   588  					action = plans.NoOp
   589  				}
   590  
   591  				change := &plans.ResourceInstanceChange{
   592  					Addr:         addr,
   593  					PrevRunAddr:  prevRunAddr,
   594  					ProviderAddr: rs.ProviderConfig,
   595  					Change: plans.Change{
   596  						Action: action,
   597  						Before: oldVal,
   598  						After:  newVal,
   599  					},
   600  				}
   601  
   602  				changeSrc, err := change.Encode(ty)
   603  				if err != nil {
   604  					diags = diags.Append(err)
   605  					return nil, diags
   606  				}
   607  
   608  				drs = append(drs, changeSrc)
   609  			}
   610  		}
   611  	}
   612  
   613  	return drs, diags
   614  }
   615  
   616  // PlanGraphForUI is a last vestage of graphs in the public interface of Context
   617  // (as opposed to graphs as an implementation detail) intended only for use
   618  // by the "terraform graph" command when asked to render a plan-time graph.
   619  //
   620  // The result of this is intended only for rendering ot the user as a dot
   621  // graph, and so may change in future in order to make the result more useful
   622  // in that context, even if drifts away from the physical graph that Terraform
   623  // Core currently uses as an implementation detail of planning.
   624  func (c *Context) PlanGraphForUI(config *configs.Config, prevRunState *states.State, mode plans.Mode) (*Graph, tfdiags.Diagnostics) {
   625  	// For now though, this really is just the internal graph, confusing
   626  	// implementation details and all.
   627  
   628  	var diags tfdiags.Diagnostics
   629  
   630  	opts := &PlanOpts{Mode: mode}
   631  
   632  	graph, _, moreDiags := c.planGraph(config, prevRunState, opts, false)
   633  	diags = diags.Append(moreDiags)
   634  	return graph, diags
   635  }