github.com/jaredpalmer/terraform@v1.1.0-alpha20210908.0.20210911170307-88705c943a03/internal/terraform/node_count_boundary.go (about)

     1  package terraform
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  
     7  	"github.com/hashicorp/terraform/internal/addrs"
     8  	"github.com/hashicorp/terraform/internal/configs"
     9  	"github.com/hashicorp/terraform/internal/tfdiags"
    10  )
    11  
    12  // NodeCountBoundary fixes up any transitions between "each modes" in objects
    13  // saved in state, such as switching from NoEach to EachInt.
    14  type NodeCountBoundary struct {
    15  	Config *configs.Config
    16  }
    17  
    18  var _ GraphNodeExecutable = (*NodeCountBoundary)(nil)
    19  
    20  func (n *NodeCountBoundary) Name() string {
    21  	return "meta.count-boundary (EachMode fixup)"
    22  }
    23  
    24  // GraphNodeExecutable
    25  func (n *NodeCountBoundary) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) {
    26  	// We'll temporarily lock the state to grab the modules, then work on each
    27  	// one separately while taking a lock again for each separate resource.
    28  	// This means that if another caller concurrently adds a module here while
    29  	// we're working then we won't update it, but that's no worse than the
    30  	// concurrent writer blocking for our entire fixup process and _then_
    31  	// adding a new module, and in practice the graph node associated with
    32  	// this eval depends on everything else in the graph anyway, so there
    33  	// should not be concurrent writers.
    34  	state := ctx.State().Lock()
    35  	moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules))
    36  	for _, m := range state.Modules {
    37  		moduleAddrs = append(moduleAddrs, m.Addr)
    38  	}
    39  	ctx.State().Unlock()
    40  
    41  	for _, addr := range moduleAddrs {
    42  		cfg := n.Config.DescendentForInstance(addr)
    43  		if cfg == nil {
    44  			log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr)
    45  			continue
    46  		}
    47  		if err := n.fixModule(ctx, addr); err != nil {
    48  			diags = diags.Append(err)
    49  			return diags
    50  		}
    51  	}
    52  	return diags
    53  }
    54  
    55  func (n *NodeCountBoundary) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error {
    56  	ms := ctx.State().Module(moduleAddr)
    57  	cfg := n.Config.DescendentForInstance(moduleAddr)
    58  	if ms == nil {
    59  		// Theoretically possible for a concurrent writer to delete a module
    60  		// while we're running, but in practice the graph node that called us
    61  		// depends on everything else in the graph and so there can never
    62  		// be a concurrent writer.
    63  		return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr)
    64  	}
    65  	if cfg == nil {
    66  		return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr)
    67  	}
    68  
    69  	for _, r := range ms.Resources {
    70  		rCfg := cfg.Module.ResourceByAddr(r.Addr.Resource)
    71  		if rCfg == nil {
    72  			log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", r.Addr)
    73  			continue
    74  		}
    75  		hasCount := rCfg.Count != nil
    76  		fixResourceCountSetTransition(ctx, r.Addr.Config(), hasCount)
    77  	}
    78  
    79  	return nil
    80  }