github.com/pulumi/terraform@v1.4.0/pkg/backend/local/backend_apply.go (about)

     1  package local
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"fmt"
     7  	"log"
     8  
     9  	"github.com/pulumi/terraform/pkg/backend"
    10  	"github.com/pulumi/terraform/pkg/command/views"
    11  	"github.com/pulumi/terraform/pkg/logging"
    12  	"github.com/pulumi/terraform/pkg/plans"
    13  	"github.com/pulumi/terraform/pkg/states"
    14  	"github.com/pulumi/terraform/pkg/states/statefile"
    15  	"github.com/pulumi/terraform/pkg/states/statemgr"
    16  	"github.com/pulumi/terraform/pkg/terraform"
    17  	"github.com/pulumi/terraform/pkg/tfdiags"
    18  )
    19  
    20  // test hook called between plan+apply during opApply
    21  var testHookStopPlanApply func()
    22  
    23  func (b *Local) opApply(
    24  	stopCtx context.Context,
    25  	cancelCtx context.Context,
    26  	op *backend.Operation,
    27  	runningOp *backend.RunningOperation) {
    28  	log.Printf("[INFO] backend/local: starting Apply operation")
    29  
    30  	var diags, moreDiags tfdiags.Diagnostics
    31  
    32  	// If we have a nil module at this point, then set it to an empty tree
    33  	// to avoid any potential crashes.
    34  	if op.PlanFile == nil && op.PlanMode != plans.DestroyMode && !op.HasConfig() {
    35  		diags = diags.Append(tfdiags.Sourceless(
    36  			tfdiags.Error,
    37  			"No configuration files",
    38  			"Apply requires configuration to be present. Applying without a configuration "+
    39  				"would mark everything for destruction, which is normally not what is desired. "+
    40  				"If you would like to destroy everything, run 'terraform destroy' instead.",
    41  		))
    42  		op.ReportResult(runningOp, diags)
    43  		return
    44  	}
    45  
    46  	stateHook := new(StateHook)
    47  	op.Hooks = append(op.Hooks, stateHook)
    48  
    49  	// Get our context
    50  	lr, _, opState, contextDiags := b.localRun(op)
    51  	diags = diags.Append(contextDiags)
    52  	if contextDiags.HasErrors() {
    53  		op.ReportResult(runningOp, diags)
    54  		return
    55  	}
    56  	// the state was locked during successful context creation; unlock the state
    57  	// when the operation completes
    58  	defer func() {
    59  		diags := op.StateLocker.Unlock()
    60  		if diags.HasErrors() {
    61  			op.View.Diagnostics(diags)
    62  			runningOp.Result = backend.OperationFailure
    63  		}
    64  	}()
    65  
    66  	// We'll start off with our result being the input state, and replace it
    67  	// with the result state only if we eventually complete the apply
    68  	// operation.
    69  	runningOp.State = lr.InputState
    70  
    71  	schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState)
    72  	diags = diags.Append(moreDiags)
    73  	if moreDiags.HasErrors() {
    74  		op.ReportResult(runningOp, diags)
    75  		return
    76  	}
    77  
    78  	var plan *plans.Plan
    79  	// If we weren't given a plan, then we refresh/plan
    80  	if op.PlanFile == nil {
    81  		// Perform the plan
    82  		log.Printf("[INFO] backend/local: apply calling Plan")
    83  		plan, moreDiags = lr.Core.Plan(lr.Config, lr.InputState, lr.PlanOpts)
    84  		diags = diags.Append(moreDiags)
    85  		if moreDiags.HasErrors() {
    86  			// If Terraform Core generated a partial plan despite the errors
    87  			// then we'll make a best effort to render it. Terraform Core
    88  			// promises that if it returns a non-nil plan along with errors
    89  			// then the plan won't necessarily contain all of the needed
    90  			// actions but that any it does include will be properly-formed.
    91  			// plan.Errored will be true in this case, which our plan
    92  			// renderer can rely on to tailor its messaging.
    93  			if plan != nil && (len(plan.Changes.Resources) != 0 || len(plan.Changes.Outputs) != 0) {
    94  				schemas, moreDiags := lr.Core.Schemas(lr.Config, lr.InputState)
    95  				// If schema loading returns errors then we'll just give up and
    96  				// ignore them to avoid distracting from the plan-time errors we're
    97  				// mainly trying to report here.
    98  				if !moreDiags.HasErrors() {
    99  					op.View.Plan(plan, schemas)
   100  				}
   101  			}
   102  			op.ReportResult(runningOp, diags)
   103  			return
   104  		}
   105  
   106  		trivialPlan := !plan.CanApply()
   107  		hasUI := op.UIOut != nil && op.UIIn != nil
   108  		mustConfirm := hasUI && !op.AutoApprove && !trivialPlan
   109  		op.View.Plan(plan, schemas)
   110  
   111  		if testHookStopPlanApply != nil {
   112  			testHookStopPlanApply()
   113  		}
   114  
   115  		// Check if we've been stopped before going through confirmation, or
   116  		// skipping confirmation in the case of -auto-approve.
   117  		// This can currently happen if a single stop request was received
   118  		// during the final batch of resource plan calls, so no operations were
   119  		// forced to abort, and no errors were returned from Plan.
   120  		if stopCtx.Err() != nil {
   121  			diags = diags.Append(errors.New("execution halted"))
   122  			runningOp.Result = backend.OperationFailure
   123  			op.ReportResult(runningOp, diags)
   124  			return
   125  		}
   126  
   127  		if mustConfirm {
   128  			var desc, query string
   129  			switch op.PlanMode {
   130  			case plans.DestroyMode:
   131  				if op.Workspace != "default" {
   132  					query = "Do you really want to destroy all resources in workspace \"" + op.Workspace + "\"?"
   133  				} else {
   134  					query = "Do you really want to destroy all resources?"
   135  				}
   136  				desc = "Terraform will destroy all your managed infrastructure, as shown above.\n" +
   137  					"There is no undo. Only 'yes' will be accepted to confirm."
   138  			case plans.RefreshOnlyMode:
   139  				if op.Workspace != "default" {
   140  					query = "Would you like to update the Terraform state for \"" + op.Workspace + "\" to reflect these detected changes?"
   141  				} else {
   142  					query = "Would you like to update the Terraform state to reflect these detected changes?"
   143  				}
   144  				desc = "Terraform will write these changes to the state without modifying any real infrastructure.\n" +
   145  					"There is no undo. Only 'yes' will be accepted to confirm."
   146  			default:
   147  				if op.Workspace != "default" {
   148  					query = "Do you want to perform these actions in workspace \"" + op.Workspace + "\"?"
   149  				} else {
   150  					query = "Do you want to perform these actions?"
   151  				}
   152  				desc = "Terraform will perform the actions described above.\n" +
   153  					"Only 'yes' will be accepted to approve."
   154  			}
   155  
   156  			// We'll show any accumulated warnings before we display the prompt,
   157  			// so the user can consider them when deciding how to answer.
   158  			if len(diags) > 0 {
   159  				op.View.Diagnostics(diags)
   160  				diags = nil // reset so we won't show the same diagnostics again later
   161  			}
   162  
   163  			v, err := op.UIIn.Input(stopCtx, &terraform.InputOpts{
   164  				Id:          "approve",
   165  				Query:       "\n" + query,
   166  				Description: desc,
   167  			})
   168  			if err != nil {
   169  				diags = diags.Append(fmt.Errorf("error asking for approval: %w", err))
   170  				op.ReportResult(runningOp, diags)
   171  				return
   172  			}
   173  			if v != "yes" {
   174  				op.View.Cancelled(op.PlanMode)
   175  				runningOp.Result = backend.OperationFailure
   176  				return
   177  			}
   178  		}
   179  	} else {
   180  		plan = lr.Plan
   181  		if plan.Errored {
   182  			diags = diags.Append(tfdiags.Sourceless(
   183  				tfdiags.Error,
   184  				"Cannot apply incomplete plan",
   185  				"Terraform encountered an error when generating this plan, so it cannot be applied.",
   186  			))
   187  			op.ReportResult(runningOp, diags)
   188  			return
   189  		}
   190  		for _, change := range plan.Changes.Resources {
   191  			if change.Action != plans.NoOp {
   192  				op.View.PlannedChange(change)
   193  			}
   194  		}
   195  	}
   196  
   197  	// Set up our hook for continuous state updates
   198  	stateHook.StateMgr = opState
   199  
   200  	// Start the apply in a goroutine so that we can be interrupted.
   201  	var applyState *states.State
   202  	var applyDiags tfdiags.Diagnostics
   203  	doneCh := make(chan struct{})
   204  	go func() {
   205  		defer logging.PanicHandler()
   206  		defer close(doneCh)
   207  		log.Printf("[INFO] backend/local: apply calling Apply")
   208  		applyState, applyDiags = lr.Core.Apply(plan, lr.Config)
   209  	}()
   210  
   211  	if b.opWait(doneCh, stopCtx, cancelCtx, lr.Core, opState, op.View) {
   212  		return
   213  	}
   214  	diags = diags.Append(applyDiags)
   215  
   216  	// Even on error with an empty state, the state value should not be nil.
   217  	// Return early here to prevent corrupting any existing state.
   218  	if diags.HasErrors() && applyState == nil {
   219  		log.Printf("[ERROR] backend/local: apply returned nil state")
   220  		op.ReportResult(runningOp, diags)
   221  		return
   222  	}
   223  
   224  	// Store the final state
   225  	runningOp.State = applyState
   226  	err := statemgr.WriteAndPersist(opState, applyState, schemas)
   227  	if err != nil {
   228  		// Export the state file from the state manager and assign the new
   229  		// state. This is needed to preserve the existing serial and lineage.
   230  		stateFile := statemgr.Export(opState)
   231  		if stateFile == nil {
   232  			stateFile = &statefile.File{}
   233  		}
   234  		stateFile.State = applyState
   235  
   236  		diags = diags.Append(b.backupStateForError(stateFile, err, op.View))
   237  		op.ReportResult(runningOp, diags)
   238  		return
   239  	}
   240  
   241  	if applyDiags.HasErrors() {
   242  		op.ReportResult(runningOp, diags)
   243  		return
   244  	}
   245  
   246  	// If we've accumulated any warnings along the way then we'll show them
   247  	// here just before we show the summary and next steps. If we encountered
   248  	// errors then we would've returned early at some other point above.
   249  	op.View.Diagnostics(diags)
   250  }
   251  
   252  // backupStateForError is called in a scenario where we're unable to persist the
   253  // state for some reason, and will attempt to save a backup copy of the state
   254  // to local disk to help the user recover. This is a "last ditch effort" sort
   255  // of thing, so we really don't want to end up in this codepath; we should do
   256  // everything we possibly can to get the state saved _somewhere_.
   257  func (b *Local) backupStateForError(stateFile *statefile.File, err error, view views.Operation) tfdiags.Diagnostics {
   258  	var diags tfdiags.Diagnostics
   259  
   260  	diags = diags.Append(tfdiags.Sourceless(
   261  		tfdiags.Error,
   262  		"Failed to save state",
   263  		fmt.Sprintf("Error saving state: %s", err),
   264  	))
   265  
   266  	local := statemgr.NewFilesystem("errored.tfstate")
   267  	writeErr := local.WriteStateForMigration(stateFile, true)
   268  	if writeErr != nil {
   269  		diags = diags.Append(tfdiags.Sourceless(
   270  			tfdiags.Error,
   271  			"Failed to create local state file",
   272  			fmt.Sprintf("Error creating local state file for recovery: %s", writeErr),
   273  		))
   274  
   275  		// To avoid leaving the user with no state at all, our last resort
   276  		// is to print the JSON state out onto the terminal. This is an awful
   277  		// UX, so we should definitely avoid doing this if at all possible,
   278  		// but at least the user has _some_ path to recover if we end up
   279  		// here for some reason.
   280  		if dumpErr := view.EmergencyDumpState(stateFile); dumpErr != nil {
   281  			diags = diags.Append(tfdiags.Sourceless(
   282  				tfdiags.Error,
   283  				"Failed to serialize state",
   284  				fmt.Sprintf(stateWriteFatalErrorFmt, dumpErr),
   285  			))
   286  		}
   287  
   288  		diags = diags.Append(tfdiags.Sourceless(
   289  			tfdiags.Error,
   290  			"Failed to persist state to backend",
   291  			stateWriteConsoleFallbackError,
   292  		))
   293  		return diags
   294  	}
   295  
   296  	diags = diags.Append(tfdiags.Sourceless(
   297  		tfdiags.Error,
   298  		"Failed to persist state to backend",
   299  		stateWriteBackedUpError,
   300  	))
   301  
   302  	return diags
   303  }
   304  
   305  const stateWriteBackedUpError = `The error shown above has prevented Terraform from writing the updated state to the configured backend. To allow for recovery, the state has been written to the file "errored.tfstate" in the current working directory.
   306  
   307  Running "terraform apply" again at this point will create a forked state, making it harder to recover.
   308  
   309  To retry writing this state, use the following command:
   310      terraform state push errored.tfstate
   311  `
   312  
   313  const stateWriteConsoleFallbackError = `The errors shown above prevented Terraform from writing the updated state to
   314  the configured backend and from creating a local backup file. As a fallback,
   315  the raw state data is printed above as a JSON object.
   316  
   317  To retry writing this state, copy the state data (from the first { to the last } inclusive) and save it into a local file called errored.tfstate, then run the following command:
   318      terraform state push errored.tfstate
   319  `
   320  
   321  const stateWriteFatalErrorFmt = `Failed to save state after apply.
   322  
   323  Error serializing state: %s
   324  
   325  A catastrophic error has prevented Terraform from persisting the state file or creating a backup. Unfortunately this means that the record of any resources created during this apply has been lost, and such resources may exist outside of Terraform's management.
   326  
   327  For resources that support import, it is possible to recover by manually importing each resource using its id from the target system.
   328  
   329  This is a serious bug in Terraform and should be reported.
   330  `