github.com/pulumi/terraform@v1.4.0/pkg/terraform/node_resource_plan_instance.go (about)

     1  package terraform
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"sort"
     7  
     8  	"github.com/pulumi/terraform/pkg/instances"
     9  	"github.com/pulumi/terraform/pkg/plans"
    10  	"github.com/pulumi/terraform/pkg/states"
    11  	"github.com/pulumi/terraform/pkg/tfdiags"
    12  	"github.com/zclconf/go-cty/cty"
    13  
    14  	"github.com/pulumi/terraform/pkg/addrs"
    15  )
    16  
    17  // NodePlannableResourceInstance represents a _single_ resource
    18  // instance that is plannable. This means this represents a single
    19  // count index, for example.
    20  type NodePlannableResourceInstance struct {
    21  	*NodeAbstractResourceInstance
    22  	ForceCreateBeforeDestroy bool
    23  
    24  	// skipRefresh indicates that we should skip refreshing individual instances
    25  	skipRefresh bool
    26  
    27  	// skipPlanChanges indicates we should skip trying to plan change actions
    28  	// for any instances.
    29  	skipPlanChanges bool
    30  
    31  	// forceReplace are resource instance addresses where the user wants to
    32  	// force generating a replace action. This set isn't pre-filtered, so
    33  	// it might contain addresses that have nothing to do with the resource
    34  	// that this node represents, which the node itself must therefore ignore.
    35  	forceReplace []addrs.AbsResourceInstance
    36  
    37  	// replaceTriggeredBy stores references from replace_triggered_by which
    38  	// triggered this instance to be replaced.
    39  	replaceTriggeredBy []*addrs.Reference
    40  }
    41  
    42  var (
    43  	_ GraphNodeModuleInstance       = (*NodePlannableResourceInstance)(nil)
    44  	_ GraphNodeReferenceable        = (*NodePlannableResourceInstance)(nil)
    45  	_ GraphNodeReferencer           = (*NodePlannableResourceInstance)(nil)
    46  	_ GraphNodeConfigResource       = (*NodePlannableResourceInstance)(nil)
    47  	_ GraphNodeResourceInstance     = (*NodePlannableResourceInstance)(nil)
    48  	_ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil)
    49  	_ GraphNodeAttachResourceState  = (*NodePlannableResourceInstance)(nil)
    50  	_ GraphNodeExecutable           = (*NodePlannableResourceInstance)(nil)
    51  )
    52  
    53  // GraphNodeEvalable
    54  func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
    55  	addr := n.ResourceInstanceAddr()
    56  
    57  	// Eval info is different depending on what kind of resource this is
    58  	switch addr.Resource.Resource.Mode {
    59  	case addrs.ManagedResourceMode:
    60  		return n.managedResourceExecute(ctx)
    61  	case addrs.DataResourceMode:
    62  		return n.dataResourceExecute(ctx)
    63  	default:
    64  		panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
    65  	}
    66  }
    67  
    68  func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
    69  	config := n.Config
    70  	addr := n.ResourceInstanceAddr()
    71  
    72  	var change *plans.ResourceInstanceChange
    73  
    74  	_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
    75  	diags = diags.Append(err)
    76  	if diags.HasErrors() {
    77  		return diags
    78  	}
    79  
    80  	diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
    81  	if diags.HasErrors() {
    82  		return diags
    83  	}
    84  
    85  	checkRuleSeverity := tfdiags.Error
    86  	if n.skipPlanChanges || n.preDestroyRefresh {
    87  		checkRuleSeverity = tfdiags.Warning
    88  	}
    89  
    90  	change, state, repeatData, planDiags := n.planDataSource(ctx, checkRuleSeverity, n.skipPlanChanges)
    91  	diags = diags.Append(planDiags)
    92  	if diags.HasErrors() {
    93  		return diags
    94  	}
    95  
    96  	// write the data source into both the refresh state and the
    97  	// working state
    98  	diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState))
    99  	if diags.HasErrors() {
   100  		return diags
   101  	}
   102  	diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState))
   103  	if diags.HasErrors() {
   104  		return diags
   105  	}
   106  
   107  	diags = diags.Append(n.writeChange(ctx, change, ""))
   108  
   109  	// Post-conditions might block further progress. We intentionally do this
   110  	// _after_ writing the state/diff because we want to check against
   111  	// the result of the operation, and to fail on future operations
   112  	// until the user makes the condition succeed.
   113  	checkDiags := evalCheckRules(
   114  		addrs.ResourcePostcondition,
   115  		n.Config.Postconditions,
   116  		ctx, addr, repeatData,
   117  		checkRuleSeverity,
   118  	)
   119  	diags = diags.Append(checkDiags)
   120  
   121  	return diags
   122  }
   123  
   124  func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
   125  	config := n.Config
   126  	addr := n.ResourceInstanceAddr()
   127  
   128  	var change *plans.ResourceInstanceChange
   129  	var instanceRefreshState *states.ResourceInstanceObject
   130  
   131  	checkRuleSeverity := tfdiags.Error
   132  	if n.skipPlanChanges || n.preDestroyRefresh {
   133  		checkRuleSeverity = tfdiags.Warning
   134  	}
   135  
   136  	_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
   137  	diags = diags.Append(err)
   138  	if diags.HasErrors() {
   139  		return diags
   140  	}
   141  
   142  	diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
   143  	if diags.HasErrors() {
   144  		return diags
   145  	}
   146  
   147  	instanceRefreshState, readDiags := n.readResourceInstanceState(ctx, addr)
   148  	diags = diags.Append(readDiags)
   149  	if diags.HasErrors() {
   150  		return diags
   151  	}
   152  
   153  	// We'll save a snapshot of what we just read from the state into the
   154  	// prevRunState before we do anything else, since this will capture the
   155  	// result of any schema upgrading that readResourceInstanceState just did,
   156  	// but not include any out-of-band changes we might detect in in the
   157  	// refresh step below.
   158  	diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
   159  	if diags.HasErrors() {
   160  		return diags
   161  	}
   162  	// Also the refreshState, because that should still reflect schema upgrades
   163  	// even if it doesn't reflect upstream changes.
   164  	diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
   165  	if diags.HasErrors() {
   166  		return diags
   167  	}
   168  
   169  	// In 0.13 we could be refreshing a resource with no config.
   170  	// We should be operating on managed resource, but check here to be certain
   171  	if n.Config == nil || n.Config.Managed == nil {
   172  		log.Printf("[WARN] managedResourceExecute: no Managed config value found in instance state for %q", n.Addr)
   173  	} else {
   174  		if instanceRefreshState != nil {
   175  			instanceRefreshState.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy
   176  		}
   177  	}
   178  
   179  	// Refresh, maybe
   180  	if !n.skipRefresh {
   181  		s, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState)
   182  		diags = diags.Append(refreshDiags)
   183  		if diags.HasErrors() {
   184  			return diags
   185  		}
   186  
   187  		instanceRefreshState = s
   188  
   189  		if instanceRefreshState != nil {
   190  			// When refreshing we start by merging the stored dependencies and
   191  			// the configured dependencies. The configured dependencies will be
   192  			// stored to state once the changes are applied. If the plan
   193  			// results in no changes, we will re-write these dependencies
   194  			// below.
   195  			instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies)
   196  		}
   197  
   198  		diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
   199  		if diags.HasErrors() {
   200  			return diags
   201  		}
   202  	}
   203  
   204  	// Plan the instance, unless we're in the refresh-only mode
   205  	if !n.skipPlanChanges {
   206  
   207  		// add this instance to n.forceReplace if replacement is triggered by
   208  		// another change
   209  		repData := instances.RepetitionData{}
   210  		switch k := addr.Resource.Key.(type) {
   211  		case addrs.IntKey:
   212  			repData.CountIndex = k.Value()
   213  		case addrs.StringKey:
   214  			repData.EachKey = k.Value()
   215  			repData.EachValue = cty.DynamicVal
   216  		}
   217  
   218  		diags = diags.Append(n.replaceTriggered(ctx, repData))
   219  		if diags.HasErrors() {
   220  			return diags
   221  		}
   222  
   223  		change, instancePlanState, repeatData, planDiags := n.plan(
   224  			ctx, change, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace,
   225  		)
   226  		diags = diags.Append(planDiags)
   227  		if diags.HasErrors() {
   228  			return diags
   229  		}
   230  
   231  		// FIXME: here we udpate the change to reflect the reason for
   232  		// replacement, but we still overload forceReplace to get the correct
   233  		// change planned.
   234  		if len(n.replaceTriggeredBy) > 0 {
   235  			change.ActionReason = plans.ResourceInstanceReplaceByTriggers
   236  		}
   237  
   238  		diags = diags.Append(n.checkPreventDestroy(change))
   239  		if diags.HasErrors() {
   240  			return diags
   241  		}
   242  
   243  		// FIXME: it is currently important that we write resource changes to
   244  		// the plan (n.writeChange) before we write the corresponding state
   245  		// (n.writeResourceInstanceState).
   246  		//
   247  		// This is because the planned resource state will normally have the
   248  		// status of states.ObjectPlanned, which causes later logic to refer to
   249  		// the contents of the plan to retrieve the resource data. Because
   250  		// there is no shared lock between these two data structures, reversing
   251  		// the order of these writes will cause a brief window of inconsistency
   252  		// which can lead to a failed safety check.
   253  		//
   254  		// Future work should adjust these APIs such that it is impossible to
   255  		// update these two data structures incorrectly through any objects
   256  		// reachable via the terraform.EvalContext API.
   257  		diags = diags.Append(n.writeChange(ctx, change, ""))
   258  
   259  		diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState))
   260  		if diags.HasErrors() {
   261  			return diags
   262  		}
   263  
   264  		// If this plan resulted in a NoOp, then apply won't have a chance to make
   265  		// any changes to the stored dependencies. Since this is a NoOp we know
   266  		// that the stored dependencies will have no effect during apply, and we can
   267  		// write them out now.
   268  		if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) {
   269  			// the refresh state will be the final state for this resource, so
   270  			// finalize the dependencies here if they need to be updated.
   271  			instanceRefreshState.Dependencies = n.Dependencies
   272  			diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
   273  			if diags.HasErrors() {
   274  				return diags
   275  			}
   276  		}
   277  
   278  		// Post-conditions might block completion. We intentionally do this
   279  		// _after_ writing the state/diff because we want to check against
   280  		// the result of the operation, and to fail on future operations
   281  		// until the user makes the condition succeed.
   282  		// (Note that some preconditions will end up being skipped during
   283  		// planning, because their conditions depend on values not yet known.)
   284  		checkDiags := evalCheckRules(
   285  			addrs.ResourcePostcondition,
   286  			n.Config.Postconditions,
   287  			ctx, n.ResourceInstanceAddr(), repeatData,
   288  			checkRuleSeverity,
   289  		)
   290  		diags = diags.Append(checkDiags)
   291  	} else {
   292  		// In refresh-only mode we need to evaluate the for-each expression in
   293  		// order to supply the value to the pre- and post-condition check
   294  		// blocks. This has the unfortunate edge case of a refresh-only plan
   295  		// executing with a for-each map which has the same keys but different
   296  		// values, which could result in a post-condition check relying on that
   297  		// value being inaccurate. Unless we decide to store the value of the
   298  		// for-each expression in state, this is unavoidable.
   299  		forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx)
   300  		repeatData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach)
   301  
   302  		checkDiags := evalCheckRules(
   303  			addrs.ResourcePrecondition,
   304  			n.Config.Preconditions,
   305  			ctx, addr, repeatData,
   306  			checkRuleSeverity,
   307  		)
   308  		diags = diags.Append(checkDiags)
   309  
   310  		// Even if we don't plan changes, we do still need to at least update
   311  		// the working state to reflect the refresh result. If not, then e.g.
   312  		// any output values refering to this will not react to the drift.
   313  		// (Even if we didn't actually refresh above, this will still save
   314  		// the result of any schema upgrading we did in readResourceInstanceState.)
   315  		diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, workingState))
   316  		if diags.HasErrors() {
   317  			return diags
   318  		}
   319  
   320  		// Here we also evaluate post-conditions after updating the working
   321  		// state, because we want to check against the result of the refresh.
   322  		// Unlike in normal planning mode, these checks are still evaluated
   323  		// even if pre-conditions generated diagnostics, because we have no
   324  		// planned changes to block.
   325  		checkDiags = evalCheckRules(
   326  			addrs.ResourcePostcondition,
   327  			n.Config.Postconditions,
   328  			ctx, addr, repeatData,
   329  			checkRuleSeverity,
   330  		)
   331  		diags = diags.Append(checkDiags)
   332  	}
   333  
   334  	return diags
   335  }
   336  
   337  // replaceTriggered checks if this instance needs to be replace due to a change
   338  // in a replace_triggered_by reference. If replacement is required, the
   339  // instance address is added to forceReplace
   340  func (n *NodePlannableResourceInstance) replaceTriggered(ctx EvalContext, repData instances.RepetitionData) tfdiags.Diagnostics {
   341  	var diags tfdiags.Diagnostics
   342  
   343  	for _, expr := range n.Config.TriggersReplacement {
   344  		ref, replace, evalDiags := ctx.EvaluateReplaceTriggeredBy(expr, repData)
   345  		diags = diags.Append(evalDiags)
   346  		if diags.HasErrors() {
   347  			continue
   348  		}
   349  
   350  		if replace {
   351  			// FIXME: forceReplace accomplishes the same goal, however we may
   352  			// want to communicate more information about which resource
   353  			// triggered the replacement in the plan.
   354  			// Rather than further complicating the plan method with more
   355  			// options, we can refactor both of these features later.
   356  			n.forceReplace = append(n.forceReplace, n.Addr)
   357  			log.Printf("[DEBUG] ReplaceTriggeredBy forcing replacement of %s due to change in %s", n.Addr, ref.DisplayString())
   358  
   359  			n.replaceTriggeredBy = append(n.replaceTriggeredBy, ref)
   360  			break
   361  		}
   362  	}
   363  
   364  	return diags
   365  }
   366  
   367  // mergeDeps returns the union of 2 sets of dependencies
   368  func mergeDeps(a, b []addrs.ConfigResource) []addrs.ConfigResource {
   369  	switch {
   370  	case len(a) == 0:
   371  		return b
   372  	case len(b) == 0:
   373  		return a
   374  	}
   375  
   376  	set := make(map[string]addrs.ConfigResource)
   377  
   378  	for _, dep := range a {
   379  		set[dep.String()] = dep
   380  	}
   381  
   382  	for _, dep := range b {
   383  		set[dep.String()] = dep
   384  	}
   385  
   386  	newDeps := make([]addrs.ConfigResource, 0, len(set))
   387  	for _, dep := range set {
   388  		newDeps = append(newDeps, dep)
   389  	}
   390  
   391  	return newDeps
   392  }
   393  
   394  func depsEqual(a, b []addrs.ConfigResource) bool {
   395  	if len(a) != len(b) {
   396  		return false
   397  	}
   398  
   399  	// Because we need to sort the deps to compare equality, make shallow
   400  	// copies to prevent concurrently modifying the array values on
   401  	// dependencies shared between expanded instances.
   402  	copyA, copyB := make([]addrs.ConfigResource, len(a)), make([]addrs.ConfigResource, len(b))
   403  	copy(copyA, a)
   404  	copy(copyB, b)
   405  	a, b = copyA, copyB
   406  
   407  	less := func(s []addrs.ConfigResource) func(i, j int) bool {
   408  		return func(i, j int) bool {
   409  			return s[i].String() < s[j].String()
   410  		}
   411  	}
   412  
   413  	sort.Slice(a, less(a))
   414  	sort.Slice(b, less(b))
   415  
   416  	for i := range a {
   417  		if !a[i].Equal(b[i]) {
   418  			return false
   419  		}
   420  	}
   421  	return true
   422  }