github.com/kanishk98/terraform@v1.3.0-dev.0.20220917174235-661ca8088a6a/internal/terraform/node_resource_plan_instance.go (about)

     1  package terraform
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"sort"
     7  
     8  	"github.com/hashicorp/terraform/internal/instances"
     9  	"github.com/hashicorp/terraform/internal/plans"
    10  	"github.com/hashicorp/terraform/internal/states"
    11  	"github.com/hashicorp/terraform/internal/tfdiags"
    12  	"github.com/zclconf/go-cty/cty"
    13  
    14  	"github.com/hashicorp/terraform/internal/addrs"
    15  )
    16  
    17  // NodePlannableResourceInstance represents a _single_ resource
    18  // instance that is plannable. This means this represents a single
    19  // count index, for example.
    20  type NodePlannableResourceInstance struct {
    21  	*NodeAbstractResourceInstance
    22  	ForceCreateBeforeDestroy bool
    23  
    24  	// skipRefresh indicates that we should skip refreshing individual instances
    25  	skipRefresh bool
    26  
    27  	// skipPlanChanges indicates we should skip trying to plan change actions
    28  	// for any instances.
    29  	skipPlanChanges bool
    30  
    31  	// forceReplace are resource instance addresses where the user wants to
    32  	// force generating a replace action. This set isn't pre-filtered, so
    33  	// it might contain addresses that have nothing to do with the resource
    34  	// that this node represents, which the node itself must therefore ignore.
    35  	forceReplace []addrs.AbsResourceInstance
    36  
    37  	// replaceTriggeredBy stores references from replace_triggered_by which
    38  	// triggered this instance to be replaced.
    39  	replaceTriggeredBy []*addrs.Reference
    40  }
    41  
    42  var (
    43  	_ GraphNodeModuleInstance       = (*NodePlannableResourceInstance)(nil)
    44  	_ GraphNodeReferenceable        = (*NodePlannableResourceInstance)(nil)
    45  	_ GraphNodeReferencer           = (*NodePlannableResourceInstance)(nil)
    46  	_ GraphNodeConfigResource       = (*NodePlannableResourceInstance)(nil)
    47  	_ GraphNodeResourceInstance     = (*NodePlannableResourceInstance)(nil)
    48  	_ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil)
    49  	_ GraphNodeAttachResourceState  = (*NodePlannableResourceInstance)(nil)
    50  	_ GraphNodeExecutable           = (*NodePlannableResourceInstance)(nil)
    51  )
    52  
    53  // GraphNodeEvalable
    54  func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
    55  	addr := n.ResourceInstanceAddr()
    56  
    57  	// Eval info is different depending on what kind of resource this is
    58  	switch addr.Resource.Resource.Mode {
    59  	case addrs.ManagedResourceMode:
    60  		return n.managedResourceExecute(ctx)
    61  	case addrs.DataResourceMode:
    62  		return n.dataResourceExecute(ctx)
    63  	default:
    64  		panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
    65  	}
    66  }
    67  
    68  func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
    69  	config := n.Config
    70  	addr := n.ResourceInstanceAddr()
    71  
    72  	var change *plans.ResourceInstanceChange
    73  
    74  	_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
    75  	diags = diags.Append(err)
    76  	if diags.HasErrors() {
    77  		return diags
    78  	}
    79  
    80  	diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
    81  	if diags.HasErrors() {
    82  		return diags
    83  	}
    84  
    85  	checkRuleSeverity := tfdiags.Error
    86  	if n.skipPlanChanges {
    87  		checkRuleSeverity = tfdiags.Warning
    88  	}
    89  
    90  	change, state, repeatData, planDiags := n.planDataSource(ctx, checkRuleSeverity)
    91  	diags = diags.Append(planDiags)
    92  	if diags.HasErrors() {
    93  		return diags
    94  	}
    95  
    96  	// write the data source into both the refresh state and the
    97  	// working state
    98  	diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState))
    99  	if diags.HasErrors() {
   100  		return diags
   101  	}
   102  	diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState))
   103  	if diags.HasErrors() {
   104  		return diags
   105  	}
   106  
   107  	diags = diags.Append(n.writeChange(ctx, change, ""))
   108  
   109  	// Post-conditions might block further progress. We intentionally do this
   110  	// _after_ writing the state/diff because we want to check against
   111  	// the result of the operation, and to fail on future operations
   112  	// until the user makes the condition succeed.
   113  	checkDiags := evalCheckRules(
   114  		addrs.ResourcePostcondition,
   115  		n.Config.Postconditions,
   116  		ctx, addr, repeatData,
   117  		checkRuleSeverity,
   118  	)
   119  	diags = diags.Append(checkDiags)
   120  
   121  	return diags
   122  }
   123  
   124  func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
   125  	config := n.Config
   126  	addr := n.ResourceInstanceAddr()
   127  
   128  	var change *plans.ResourceInstanceChange
   129  	var instanceRefreshState *states.ResourceInstanceObject
   130  
   131  	_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
   132  	diags = diags.Append(err)
   133  	if diags.HasErrors() {
   134  		return diags
   135  	}
   136  
   137  	diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
   138  	if diags.HasErrors() {
   139  		return diags
   140  	}
   141  
   142  	instanceRefreshState, readDiags := n.readResourceInstanceState(ctx, addr)
   143  	diags = diags.Append(readDiags)
   144  	if diags.HasErrors() {
   145  		return diags
   146  	}
   147  
   148  	// We'll save a snapshot of what we just read from the state into the
   149  	// prevRunState before we do anything else, since this will capture the
   150  	// result of any schema upgrading that readResourceInstanceState just did,
   151  	// but not include any out-of-band changes we might detect in in the
   152  	// refresh step below.
   153  	diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
   154  	if diags.HasErrors() {
   155  		return diags
   156  	}
   157  	// Also the refreshState, because that should still reflect schema upgrades
   158  	// even if it doesn't reflect upstream changes.
   159  	diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
   160  	if diags.HasErrors() {
   161  		return diags
   162  	}
   163  
   164  	// In 0.13 we could be refreshing a resource with no config.
   165  	// We should be operating on managed resource, but check here to be certain
   166  	if n.Config == nil || n.Config.Managed == nil {
   167  		log.Printf("[WARN] managedResourceExecute: no Managed config value found in instance state for %q", n.Addr)
   168  	} else {
   169  		if instanceRefreshState != nil {
   170  			instanceRefreshState.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy
   171  		}
   172  	}
   173  
   174  	// Refresh, maybe
   175  	if !n.skipRefresh {
   176  		s, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState)
   177  		diags = diags.Append(refreshDiags)
   178  		if diags.HasErrors() {
   179  			return diags
   180  		}
   181  
   182  		instanceRefreshState = s
   183  
   184  		if instanceRefreshState != nil {
   185  			// When refreshing we start by merging the stored dependencies and
   186  			// the configured dependencies. The configured dependencies will be
   187  			// stored to state once the changes are applied. If the plan
   188  			// results in no changes, we will re-write these dependencies
   189  			// below.
   190  			instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies)
   191  		}
   192  
   193  		diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
   194  		if diags.HasErrors() {
   195  			return diags
   196  		}
   197  	}
   198  
   199  	// Plan the instance, unless we're in the refresh-only mode
   200  	if !n.skipPlanChanges {
   201  
   202  		// add this instance to n.forceReplace if replacement is triggered by
   203  		// another change
   204  		repData := instances.RepetitionData{}
   205  		switch k := addr.Resource.Key.(type) {
   206  		case addrs.IntKey:
   207  			repData.CountIndex = k.Value()
   208  		case addrs.StringKey:
   209  			repData.EachKey = k.Value()
   210  			repData.EachValue = cty.DynamicVal
   211  		}
   212  
   213  		diags = diags.Append(n.replaceTriggered(ctx, repData))
   214  		if diags.HasErrors() {
   215  			return diags
   216  		}
   217  
   218  		change, instancePlanState, repeatData, planDiags := n.plan(
   219  			ctx, change, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace,
   220  		)
   221  		diags = diags.Append(planDiags)
   222  		if diags.HasErrors() {
   223  			return diags
   224  		}
   225  
   226  		// FIXME: here we udpate the change to reflect the reason for
   227  		// replacement, but we still overload forceReplace to get the correct
   228  		// change planned.
   229  		if len(n.replaceTriggeredBy) > 0 {
   230  			change.ActionReason = plans.ResourceInstanceReplaceByTriggers
   231  		}
   232  
   233  		diags = diags.Append(n.checkPreventDestroy(change))
   234  		if diags.HasErrors() {
   235  			return diags
   236  		}
   237  
   238  		// FIXME: it is currently important that we write resource changes to
   239  		// the plan (n.writeChange) before we write the corresponding state
   240  		// (n.writeResourceInstanceState).
   241  		//
   242  		// This is because the planned resource state will normally have the
   243  		// status of states.ObjectPlanned, which causes later logic to refer to
   244  		// the contents of the plan to retrieve the resource data. Because
   245  		// there is no shared lock between these two data structures, reversing
   246  		// the order of these writes will cause a brief window of inconsistency
   247  		// which can lead to a failed safety check.
   248  		//
   249  		// Future work should adjust these APIs such that it is impossible to
   250  		// update these two data structures incorrectly through any objects
   251  		// reachable via the terraform.EvalContext API.
   252  		diags = diags.Append(n.writeChange(ctx, change, ""))
   253  
   254  		diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState))
   255  		if diags.HasErrors() {
   256  			return diags
   257  		}
   258  
   259  		// If this plan resulted in a NoOp, then apply won't have a chance to make
   260  		// any changes to the stored dependencies. Since this is a NoOp we know
   261  		// that the stored dependencies will have no effect during apply, and we can
   262  		// write them out now.
   263  		if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) {
   264  			// the refresh state will be the final state for this resource, so
   265  			// finalize the dependencies here if they need to be updated.
   266  			instanceRefreshState.Dependencies = n.Dependencies
   267  			diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
   268  			if diags.HasErrors() {
   269  				return diags
   270  			}
   271  		}
   272  
   273  		// Post-conditions might block completion. We intentionally do this
   274  		// _after_ writing the state/diff because we want to check against
   275  		// the result of the operation, and to fail on future operations
   276  		// until the user makes the condition succeed.
   277  		// (Note that some preconditions will end up being skipped during
   278  		// planning, because their conditions depend on values not yet known.)
   279  		checkDiags := evalCheckRules(
   280  			addrs.ResourcePostcondition,
   281  			n.Config.Postconditions,
   282  			ctx, n.ResourceInstanceAddr(), repeatData,
   283  			tfdiags.Error,
   284  		)
   285  		diags = diags.Append(checkDiags)
   286  	} else {
   287  		// In refresh-only mode we need to evaluate the for-each expression in
   288  		// order to supply the value to the pre- and post-condition check
   289  		// blocks. This has the unfortunate edge case of a refresh-only plan
   290  		// executing with a for-each map which has the same keys but different
   291  		// values, which could result in a post-condition check relying on that
   292  		// value being inaccurate. Unless we decide to store the value of the
   293  		// for-each expression in state, this is unavoidable.
   294  		forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx)
   295  		repeatData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach)
   296  
   297  		checkDiags := evalCheckRules(
   298  			addrs.ResourcePrecondition,
   299  			n.Config.Preconditions,
   300  			ctx, addr, repeatData,
   301  			tfdiags.Warning,
   302  		)
   303  		diags = diags.Append(checkDiags)
   304  
   305  		// Even if we don't plan changes, we do still need to at least update
   306  		// the working state to reflect the refresh result. If not, then e.g.
   307  		// any output values refering to this will not react to the drift.
   308  		// (Even if we didn't actually refresh above, this will still save
   309  		// the result of any schema upgrading we did in readResourceInstanceState.)
   310  		diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, workingState))
   311  		if diags.HasErrors() {
   312  			return diags
   313  		}
   314  
   315  		// Here we also evaluate post-conditions after updating the working
   316  		// state, because we want to check against the result of the refresh.
   317  		// Unlike in normal planning mode, these checks are still evaluated
   318  		// even if pre-conditions generated diagnostics, because we have no
   319  		// planned changes to block.
   320  		checkDiags = evalCheckRules(
   321  			addrs.ResourcePostcondition,
   322  			n.Config.Postconditions,
   323  			ctx, addr, repeatData,
   324  			tfdiags.Warning,
   325  		)
   326  		diags = diags.Append(checkDiags)
   327  	}
   328  
   329  	return diags
   330  }
   331  
   332  // replaceTriggered checks if this instance needs to be replace due to a change
   333  // in a replace_triggered_by reference. If replacement is required, the
   334  // instance address is added to forceReplace
   335  func (n *NodePlannableResourceInstance) replaceTriggered(ctx EvalContext, repData instances.RepetitionData) tfdiags.Diagnostics {
   336  	var diags tfdiags.Diagnostics
   337  
   338  	for _, expr := range n.Config.TriggersReplacement {
   339  		ref, replace, evalDiags := ctx.EvaluateReplaceTriggeredBy(expr, repData)
   340  		diags = diags.Append(evalDiags)
   341  		if diags.HasErrors() {
   342  			continue
   343  		}
   344  
   345  		if replace {
   346  			// FIXME: forceReplace accomplishes the same goal, however we may
   347  			// want to communicate more information about which resource
   348  			// triggered the replacement in the plan.
   349  			// Rather than further complicating the plan method with more
   350  			// options, we can refactor both of these features later.
   351  			n.forceReplace = append(n.forceReplace, n.Addr)
   352  			log.Printf("[DEBUG] ReplaceTriggeredBy forcing replacement of %s due to change in %s", n.Addr, ref.DisplayString())
   353  
   354  			n.replaceTriggeredBy = append(n.replaceTriggeredBy, ref)
   355  			break
   356  		}
   357  	}
   358  
   359  	return diags
   360  }
   361  
   362  // mergeDeps returns the union of 2 sets of dependencies
   363  func mergeDeps(a, b []addrs.ConfigResource) []addrs.ConfigResource {
   364  	switch {
   365  	case len(a) == 0:
   366  		return b
   367  	case len(b) == 0:
   368  		return a
   369  	}
   370  
   371  	set := make(map[string]addrs.ConfigResource)
   372  
   373  	for _, dep := range a {
   374  		set[dep.String()] = dep
   375  	}
   376  
   377  	for _, dep := range b {
   378  		set[dep.String()] = dep
   379  	}
   380  
   381  	newDeps := make([]addrs.ConfigResource, 0, len(set))
   382  	for _, dep := range set {
   383  		newDeps = append(newDeps, dep)
   384  	}
   385  
   386  	return newDeps
   387  }
   388  
   389  func depsEqual(a, b []addrs.ConfigResource) bool {
   390  	if len(a) != len(b) {
   391  		return false
   392  	}
   393  
   394  	// Because we need to sort the deps to compare equality, make shallow
   395  	// copies to prevent concurrently modifying the array values on
   396  	// dependencies shared between expanded instances.
   397  	copyA, copyB := make([]addrs.ConfigResource, len(a)), make([]addrs.ConfigResource, len(b))
   398  	copy(copyA, a)
   399  	copy(copyB, b)
   400  	a, b = copyA, copyB
   401  
   402  	less := func(s []addrs.ConfigResource) func(i, j int) bool {
   403  		return func(i, j int) bool {
   404  			return s[i].String() < s[j].String()
   405  		}
   406  	}
   407  
   408  	sort.Slice(a, less(a))
   409  	sort.Slice(b, less(b))
   410  
   411  	for i := range a {
   412  		if !a[i].Equal(b[i]) {
   413  			return false
   414  		}
   415  	}
   416  	return true
   417  }