github.com/myhau/pulumi/pkg/v3@v3.70.2-0.20221116134521-f2775972e587/resource/deploy/deployment_executor.go (about)

     1  // Copyright 2016-2018, Pulumi Corporation.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package deploy
    16  
    17  import (
    18  	"context"
    19  	"errors"
    20  	"fmt"
    21  	"strings"
    22  
    23  	"github.com/pulumi/pulumi/pkg/v3/resource/deploy/providers"
    24  	"github.com/pulumi/pulumi/pkg/v3/resource/graph"
    25  	"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
    26  	"github.com/pulumi/pulumi/sdk/v3/go/common/display"
    27  	"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
    28  	"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
    29  	"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
    30  	"github.com/pulumi/pulumi/sdk/v3/go/common/util/result"
    31  )
    32  
    33  // deploymentExecutor is responsible for taking a deployment and driving it to completion.
    34  // Its primary responsibility is to own a `stepGenerator` and `stepExecutor`, serving
    35  // as the glue that links the two subsystems together.
    36  type deploymentExecutor struct {
    37  	deployment *Deployment // The deployment that we are executing
    38  
    39  	stepGen  *stepGenerator // step generator owned by this deployment
    40  	stepExec *stepExecutor  // step executor owned by this deployment
    41  }
    42  
    43  // checkTargets validates that all the targets passed in refer to existing resources.  Diagnostics
    44  // are generated for any target that cannot be found.  The target must either have existed in the stack
    45  // prior to running the operation, or it must be the urn for a resource that was created.
    46  func (ex *deploymentExecutor) checkTargets(targets UrnTargets, op display.StepOp) result.Result {
    47  	if !targets.IsConstrained() {
    48  		return nil
    49  	}
    50  
    51  	olds := ex.deployment.olds
    52  	var news map[resource.URN]bool
    53  	if ex.stepGen != nil {
    54  		news = ex.stepGen.urns
    55  	}
    56  
    57  	hasUnknownTarget := false
    58  	for _, target := range targets.Literals() {
    59  		hasOld := olds != nil && olds[target] != nil
    60  		hasNew := news != nil && news[target]
    61  		if !hasOld && !hasNew {
    62  			hasUnknownTarget = true
    63  
    64  			logging.V(7).Infof("Resource to %v (%v) could not be found in the stack.", op, target)
    65  			if strings.Contains(string(target), "$") {
    66  				ex.deployment.Diag().Errorf(diag.GetTargetCouldNotBeFoundError(), target)
    67  			} else {
    68  				ex.deployment.Diag().Errorf(diag.GetTargetCouldNotBeFoundDidYouForgetError(), target)
    69  			}
    70  		}
    71  	}
    72  
    73  	if hasUnknownTarget {
    74  		return result.Bail()
    75  	}
    76  
    77  	return nil
    78  }
    79  
    80  func (ex *deploymentExecutor) printPendingOperationsWarning() {
    81  	pendingOperations := ""
    82  	for _, op := range ex.deployment.prev.PendingOperations {
    83  		pendingOperations = pendingOperations + fmt.Sprintf("  * %s, interrupted while %s\n", op.Resource.URN, op.Type)
    84  	}
    85  
    86  	resolutionMessage := "" +
    87  		"These resources are in an unknown state because the Pulumi CLI was interrupted while " +
    88  		"waiting for changes to these resources to complete. You should confirm whether or not the " +
    89  		"operations listed completed successfully by checking the state of the appropriate provider. " +
    90  		"For example, if you are using AWS, you can confirm using the AWS Console.\n" +
    91  		"\n" +
    92  		"Once you have confirmed the status of the interrupted operations, you can repair your stack " +
    93  		"using `pulumi refresh` which will refresh the state from the provider you are using and " +
    94  		"clear the pending operations if there are any.\n" +
    95  		"\n" +
    96  		"Note that `pulumi refresh` will need to be run interactively to clear pending CREATE operations."
    97  
    98  	warning := "Attempting to deploy or update resources " +
    99  		fmt.Sprintf("with %d pending operations from previous deployment.\n", len(ex.deployment.prev.PendingOperations)) +
   100  		pendingOperations +
   101  		resolutionMessage
   102  
   103  	ex.deployment.Diag().Warningf(diag.RawMessage("" /*urn*/, warning))
   104  }
   105  
   106  // reportExecResult issues an appropriate diagnostic depending on went wrong.
   107  func (ex *deploymentExecutor) reportExecResult(message string, preview bool) {
   108  	kind := "update"
   109  	if preview {
   110  		kind = "preview"
   111  	}
   112  
   113  	ex.reportError("", errors.New(kind+" "+message))
   114  }
   115  
   116  // reportError reports a single error to the executor's diag stream with the indicated URN for context.
   117  func (ex *deploymentExecutor) reportError(urn resource.URN, err error) {
   118  	ex.deployment.Diag().Errorf(diag.RawMessage(urn, err.Error()))
   119  }
   120  
   121  // Execute executes a deployment to completion, using the given cancellation context and running a preview
   122  // or update.
   123  func (ex *deploymentExecutor) Execute(callerCtx context.Context, opts Options, preview bool) (*Plan, result.Result) {
   124  	// Set up a goroutine that will signal cancellation to the deployment's plugins if the caller context is cancelled.
   125  	// We do not hang this off of the context we create below because we do not want the failure of a single step to
   126  	// cause other steps to fail.
   127  	done := make(chan bool)
   128  	defer close(done)
   129  	go func() {
   130  		select {
   131  		case <-callerCtx.Done():
   132  			logging.V(4).Infof("deploymentExecutor.Execute(...): signalling cancellation to providers...")
   133  			cancelErr := ex.deployment.ctx.Host.SignalCancellation()
   134  			if cancelErr != nil {
   135  				logging.V(4).Infof("deploymentExecutor.Execute(...): failed to signal cancellation to providers: %v", cancelErr)
   136  			}
   137  		case <-done:
   138  			logging.V(4).Infof("deploymentExecutor.Execute(...): exiting provider canceller")
   139  		}
   140  	}()
   141  
   142  	// If this deployment is an import, run the imports and exit.
   143  	if ex.deployment.isImport {
   144  		return ex.importResources(callerCtx, opts, preview)
   145  	}
   146  
   147  	// Before doing anything else, optionally refresh each resource in the base checkpoint.
   148  	if opts.Refresh {
   149  		if res := ex.refresh(callerCtx, opts, preview); res != nil {
   150  			return nil, res
   151  		}
   152  		if opts.RefreshOnly {
   153  			return nil, nil
   154  		}
   155  	} else if ex.deployment.prev != nil && len(ex.deployment.prev.PendingOperations) > 0 && !preview {
   156  		// Print a warning for users that there are pending operations.
   157  		// Explain that these operations can be cleared using pulumi refresh (except for CREATE operations)
   158  		// since these require user intevention:
   159  		ex.printPendingOperationsWarning()
   160  	}
   161  
   162  	// The set of -t targets provided on the command line.  'nil' means 'update everything'.
   163  	// Non-nil means 'update only in this set'.  We don't error if the user specifies a target
   164  	// during `update` that we don't know about because it might be the urn for a resource they
   165  	// want to create.
   166  	updateTargetsOpt := opts.UpdateTargets
   167  	replaceTargetsOpt := opts.ReplaceTargets
   168  	destroyTargetsOpt := opts.DestroyTargets
   169  	if res := ex.checkTargets(opts.ReplaceTargets, OpReplace); res != nil {
   170  		return nil, res
   171  	}
   172  	if res := ex.checkTargets(opts.DestroyTargets, OpDelete); res != nil {
   173  		return nil, res
   174  	}
   175  
   176  	if (updateTargetsOpt.IsConstrained() || replaceTargetsOpt.IsConstrained()) && destroyTargetsOpt.IsConstrained() {
   177  		contract.Failf("Should not be possible to have both .DestroyTargets and .UpdateTargets or .ReplaceTargets")
   178  	}
   179  
   180  	// Begin iterating the source.
   181  	src, res := ex.deployment.source.Iterate(callerCtx, opts, ex.deployment)
   182  	if res != nil {
   183  		return nil, res
   184  	}
   185  
   186  	// Set up a step generator for this deployment.
   187  	ex.stepGen = newStepGenerator(ex.deployment, opts, updateTargetsOpt, replaceTargetsOpt)
   188  
   189  	// Derive a cancellable context for this deployment. We will only cancel this context if some piece of the
   190  	// deployment's execution fails.
   191  	ctx, cancel := context.WithCancel(callerCtx)
   192  
   193  	// Set up a step generator and executor for this deployment.
   194  	ex.stepExec = newStepExecutor(ctx, cancel, ex.deployment, opts, preview, false)
   195  
   196  	// We iterate the source in its own goroutine because iteration is blocking and we want the main loop to be able to
   197  	// respond to cancellation requests promptly.
   198  	type nextEvent struct {
   199  		Event  SourceEvent
   200  		Result result.Result
   201  	}
   202  	incomingEvents := make(chan nextEvent)
   203  	go func() {
   204  		for {
   205  			event, sourceErr := src.Next()
   206  			select {
   207  			case incomingEvents <- nextEvent{event, sourceErr}:
   208  				if event == nil {
   209  					return
   210  				}
   211  			case <-done:
   212  				logging.V(4).Infof("deploymentExecutor.Execute(...): incoming events goroutine exiting")
   213  				return
   214  			}
   215  		}
   216  	}()
   217  
   218  	// The main loop. We'll continuously select for incoming events and the cancellation signal. There are
   219  	// a three ways we can exit this loop:
   220  	//  1. The SourceIterator sends us a `nil` event. This means that we're done processing source events and
   221  	//     we should begin processing deletes.
   222  	//  2. The SourceIterator sends us an error. This means some error occurred in the source program and we
   223  	//     should bail.
   224  	//  3. The stepExecCancel cancel context gets canceled. This means some error occurred in the step executor
   225  	//     and we need to bail. This can also happen if the user hits Ctrl-C.
   226  	canceled, res := func() (bool, result.Result) {
   227  		logging.V(4).Infof("deploymentExecutor.Execute(...): waiting for incoming events")
   228  		for {
   229  			select {
   230  			case event := <-incomingEvents:
   231  				logging.V(4).Infof("deploymentExecutor.Execute(...): incoming event (nil? %v, %v)", event.Event == nil,
   232  					event.Result)
   233  
   234  				if event.Result != nil {
   235  					if !event.Result.IsBail() {
   236  						ex.reportError("", event.Result.Error())
   237  					}
   238  					cancel()
   239  
   240  					// We reported any errors above.  So we can just bail now.
   241  					return false, result.Bail()
   242  				}
   243  
   244  				if event.Event == nil {
   245  					res := ex.performDeletes(ctx, updateTargetsOpt, destroyTargetsOpt)
   246  					if res != nil {
   247  						if resErr := res.Error(); resErr != nil {
   248  							logging.V(4).Infof("deploymentExecutor.Execute(...): error performing deletes: %v", resErr)
   249  							ex.reportError("", resErr)
   250  							return false, result.Bail()
   251  						}
   252  					}
   253  					return false, res
   254  				}
   255  
   256  				if res := ex.handleSingleEvent(event.Event); res != nil {
   257  					if resErr := res.Error(); resErr != nil {
   258  						logging.V(4).Infof("deploymentExecutor.Execute(...): error handling event: %v", resErr)
   259  						ex.reportError(ex.deployment.generateEventURN(event.Event), resErr)
   260  					}
   261  					cancel()
   262  					return false, result.Bail()
   263  				}
   264  			case <-ctx.Done():
   265  				logging.V(4).Infof("deploymentExecutor.Execute(...): context finished: %v", ctx.Err())
   266  
   267  				// NOTE: we use the presence of an error in the caller context in order to distinguish caller-initiated
   268  				// cancellation from internally-initiated cancellation.
   269  				return callerCtx.Err() != nil, nil
   270  			}
   271  		}
   272  	}()
   273  
   274  	ex.stepExec.WaitForCompletion()
   275  	logging.V(4).Infof("deploymentExecutor.Execute(...): step executor has completed")
   276  
   277  	// Now that we've performed all steps in the deployment, ensure that the list of targets to update was
   278  	// valid.  We have to do this *after* performing the steps as the target list may have referred
   279  	// to a resource that was created in one of the steps.
   280  	if res == nil {
   281  		res = ex.checkTargets(opts.UpdateTargets, OpUpdate)
   282  	}
   283  
   284  	// Check that we did operations for everything expected in the plan. We mutate ResourcePlan.Ops as we run
   285  	// so by the time we get here everything in the map should have an empty ops list (except for unneeded
   286  	// deletes). We skip this check if we already have an error, chances are if the deployment failed lots of
   287  	// operations wouldn't have got a chance to run so we'll spam errors about all of those failed operations
   288  	// making it less clear to the user what the root cause error was.
   289  	if res == nil && ex.deployment.plan != nil {
   290  		for urn, resourcePlan := range ex.deployment.plan.ResourcePlans {
   291  			if len(resourcePlan.Ops) != 0 {
   292  				if len(resourcePlan.Ops) == 1 && resourcePlan.Ops[0] == OpDelete {
   293  					// We haven't done a delete for this resource check if it was in the snapshot,
   294  					// if it's already gone this wasn't done because it wasn't needed
   295  					found := false
   296  					for i := range ex.deployment.prev.Resources {
   297  						if ex.deployment.prev.Resources[i].URN == urn {
   298  							found = true
   299  							break
   300  						}
   301  					}
   302  
   303  					// Didn't find the resource in the old snapshot so this was just an unneeded delete
   304  					if !found {
   305  						continue
   306  					}
   307  				}
   308  
   309  				err := fmt.Errorf("expected resource operations for %v but none were seen", urn)
   310  				logging.V(4).Infof("deploymentExecutor.Execute(...): error handling event: %v", err)
   311  				ex.reportError(urn, err)
   312  				res = result.Bail()
   313  			}
   314  		}
   315  	}
   316  
   317  	if res != nil && res.IsBail() {
   318  		return nil, res
   319  	}
   320  
   321  	// If the step generator and step executor were both successful, then we send all the resources
   322  	// observed to be analyzed. Otherwise, this step is skipped.
   323  	if res == nil && !ex.stepExec.Errored() {
   324  		res := ex.stepGen.AnalyzeResources()
   325  		if res != nil {
   326  			if resErr := res.Error(); resErr != nil {
   327  				logging.V(4).Infof("deploymentExecutor.Execute(...): error analyzing resources: %v", resErr)
   328  				ex.reportError("", resErr)
   329  			}
   330  			return nil, result.Bail()
   331  		}
   332  	}
   333  
   334  	// Figure out if execution failed and why. Step generation and execution errors trump cancellation.
   335  	if res != nil || ex.stepExec.Errored() || ex.stepGen.Errored() {
   336  		// TODO(cyrusn): We seem to be losing any information about the original 'res's errors.  Should
   337  		// we be doing a merge here?
   338  		ex.reportExecResult("failed", preview)
   339  		return nil, result.Bail()
   340  	} else if canceled {
   341  		ex.reportExecResult("canceled", preview)
   342  		return nil, result.Bail()
   343  	}
   344  
   345  	return ex.deployment.newPlans.plan(), res
   346  }
   347  
   348  func (ex *deploymentExecutor) performDeletes(
   349  	ctx context.Context, updateTargetsOpt, destroyTargetsOpt UrnTargets) result.Result {
   350  
   351  	defer func() {
   352  		// We're done here - signal completion so that the step executor knows to terminate.
   353  		ex.stepExec.SignalCompletion()
   354  	}()
   355  
   356  	prev := ex.deployment.prev
   357  	if prev == nil || len(prev.Resources) == 0 {
   358  		return nil
   359  	}
   360  
   361  	logging.V(7).Infof("performDeletes(...): beginning")
   362  
   363  	// At this point we have generated the set of resources above that we would normally want to
   364  	// delete.  However, if the user provided -target's we will only actually delete the specific
   365  	// resources that are in the set explicitly asked for.
   366  	var targetsOpt UrnTargets
   367  	if updateTargetsOpt.IsConstrained() {
   368  		targetsOpt = updateTargetsOpt
   369  	} else if destroyTargetsOpt.IsConstrained() {
   370  		targetsOpt = destroyTargetsOpt
   371  	}
   372  
   373  	deleteSteps, res := ex.stepGen.GenerateDeletes(targetsOpt)
   374  	if res != nil {
   375  		logging.V(7).Infof("performDeletes(...): generating deletes produced error result")
   376  		return res
   377  	}
   378  
   379  	deletes := ex.stepGen.ScheduleDeletes(deleteSteps)
   380  
   381  	// ScheduleDeletes gives us a list of lists of steps. Each list of steps can safely be executed
   382  	// in parallel, but each list must execute completes before the next list can safely begin
   383  	// executing.
   384  	//
   385  	// This is not "true" delete parallelism, since there may be resources that could safely begin
   386  	// deleting but we won't until the previous set of deletes fully completes. This approximation
   387  	// is conservative, but correct.
   388  	for _, antichain := range deletes {
   389  		logging.V(4).Infof("deploymentExecutor.Execute(...): beginning delete antichain")
   390  		tok := ex.stepExec.ExecuteParallel(antichain)
   391  		tok.Wait(ctx)
   392  		logging.V(4).Infof("deploymentExecutor.Execute(...): antichain complete")
   393  	}
   394  
   395  	// After executing targeted deletes, we may now have resources that depend on the resource that
   396  	// were deleted.  Go through and clean things up accordingly for them.
   397  	if targetsOpt.IsConstrained() {
   398  		resourceToStep := make(map[*resource.State]Step)
   399  		for _, step := range deleteSteps {
   400  			resourceToStep[ex.deployment.olds[step.URN()]] = step
   401  		}
   402  
   403  		ex.rebuildBaseState(resourceToStep, false /*refresh*/)
   404  	}
   405  
   406  	return nil
   407  }
   408  
   409  // handleSingleEvent handles a single source event. For all incoming events, it produces a chain that needs
   410  // to be executed and schedules the chain for execution.
   411  func (ex *deploymentExecutor) handleSingleEvent(event SourceEvent) result.Result {
   412  	contract.Require(event != nil, "event != nil")
   413  
   414  	var steps []Step
   415  	var res result.Result
   416  	switch e := event.(type) {
   417  	case RegisterResourceEvent:
   418  		logging.V(4).Infof("deploymentExecutor.handleSingleEvent(...): received RegisterResourceEvent")
   419  		steps, res = ex.stepGen.GenerateSteps(e)
   420  	case ReadResourceEvent:
   421  		logging.V(4).Infof("deploymentExecutor.handleSingleEvent(...): received ReadResourceEvent")
   422  		steps, res = ex.stepGen.GenerateReadSteps(e)
   423  	case RegisterResourceOutputsEvent:
   424  		logging.V(4).Infof("deploymentExecutor.handleSingleEvent(...): received register resource outputs")
   425  		return ex.stepExec.ExecuteRegisterResourceOutputs(e)
   426  	}
   427  
   428  	if res != nil {
   429  		return res
   430  	}
   431  
   432  	ex.stepExec.ExecuteSerial(steps)
   433  	return nil
   434  }
   435  
   436  // import imports a list of resources into a stack.
   437  func (ex *deploymentExecutor) importResources(
   438  	callerCtx context.Context,
   439  	opts Options,
   440  	preview bool) (*Plan, result.Result) {
   441  
   442  	if len(ex.deployment.imports) == 0 {
   443  		return nil, nil
   444  	}
   445  
   446  	// Create an executor for this import.
   447  	ctx, cancel := context.WithCancel(callerCtx)
   448  	stepExec := newStepExecutor(ctx, cancel, ex.deployment, opts, preview, true)
   449  
   450  	importer := &importer{
   451  		deployment: ex.deployment,
   452  		executor:   stepExec,
   453  		preview:    preview,
   454  	}
   455  	res := importer.importResources(ctx)
   456  	stepExec.SignalCompletion()
   457  	stepExec.WaitForCompletion()
   458  
   459  	// NOTE: we use the presence of an error in the caller context in order to distinguish caller-initiated
   460  	// cancellation from internally-initiated cancellation.
   461  	canceled := callerCtx.Err() != nil
   462  
   463  	if res != nil || stepExec.Errored() {
   464  		if res != nil && res.Error() != nil {
   465  			ex.reportExecResult(fmt.Sprintf("failed: %s", res.Error()), preview)
   466  		} else {
   467  			ex.reportExecResult("failed", preview)
   468  		}
   469  		return nil, result.Bail()
   470  	} else if canceled {
   471  		ex.reportExecResult("canceled", preview)
   472  		return nil, result.Bail()
   473  	}
   474  	return ex.deployment.newPlans.plan(), nil
   475  }
   476  
   477  // refresh refreshes the state of the base checkpoint file for the current deployment in memory.
   478  func (ex *deploymentExecutor) refresh(callerCtx context.Context, opts Options, preview bool) result.Result {
   479  	prev := ex.deployment.prev
   480  	if prev == nil || len(prev.Resources) == 0 {
   481  		return nil
   482  	}
   483  
   484  	// Make sure if there were any targets specified, that they all refer to existing resources.
   485  	if res := ex.checkTargets(opts.RefreshTargets, OpRefresh); res != nil {
   486  		return res
   487  	}
   488  
   489  	// If the user did not provide any --target's, create a refresh step for each resource in the
   490  	// old snapshot.  If they did provider --target's then only create refresh steps for those
   491  	// specific targets.
   492  	steps := []Step{}
   493  	resourceToStep := map[*resource.State]Step{}
   494  	for _, res := range prev.Resources {
   495  		if opts.RefreshTargets.Contains(res.URN) {
   496  			step := NewRefreshStep(ex.deployment, res, nil)
   497  			steps = append(steps, step)
   498  			resourceToStep[res] = step
   499  		}
   500  	}
   501  
   502  	// Fire up a worker pool and issue each refresh in turn.
   503  	ctx, cancel := context.WithCancel(callerCtx)
   504  	stepExec := newStepExecutor(ctx, cancel, ex.deployment, opts, preview, true)
   505  	stepExec.ExecuteParallel(steps)
   506  	stepExec.SignalCompletion()
   507  	stepExec.WaitForCompletion()
   508  
   509  	ex.rebuildBaseState(resourceToStep, true /*refresh*/)
   510  
   511  	// NOTE: we use the presence of an error in the caller context in order to distinguish caller-initiated
   512  	// cancellation from internally-initiated cancellation.
   513  	canceled := callerCtx.Err() != nil
   514  
   515  	if stepExec.Errored() {
   516  		ex.reportExecResult("failed", preview)
   517  		return result.Bail()
   518  	} else if canceled {
   519  		ex.reportExecResult("canceled", preview)
   520  		return result.Bail()
   521  	}
   522  	return nil
   523  }
   524  
   525  func (ex *deploymentExecutor) rebuildBaseState(resourceToStep map[*resource.State]Step, refresh bool) {
   526  	// Rebuild this deployment's map of old resources and dependency graph, stripping out any deleted
   527  	// resources and repairing dependency lists as necessary. Note that this updates the base
   528  	// snapshot _in memory_, so it is critical that any components that use the snapshot refer to
   529  	// the same instance and avoid reading it concurrently with this rebuild.
   530  	//
   531  	// The process of repairing dependency lists is a bit subtle. Because multiple physical
   532  	// resources may share a URN, the ability of a particular URN to be referenced in a dependency
   533  	// list can change based on the dependent resource's position in the resource list. For example,
   534  	// consider the following list of resources, where each resource is a (URN, ID, Dependencies)
   535  	// tuple:
   536  	//
   537  	//     [ (A, 0, []), (B, 0, [A]), (A, 1, []), (A, 2, []), (C, 0, [A]) ]
   538  	//
   539  	// Let `(A, 0, [])` and `(A, 2, [])` be deleted by the refresh. This produces the following
   540  	// intermediate list before dependency lists are repaired:
   541  	//
   542  	//     [ (B, 0, [A]), (A, 1, []), (C, 0, [A]) ]
   543  	//
   544  	// In order to repair the dependency lists, we iterate over the intermediate resource list,
   545  	// keeping track of which URNs refer to at least one physical resource at each point in the
   546  	// list, and remove any dependencies that refer to URNs that do not refer to any physical
   547  	// resources. This process produces the following final list:
   548  	//
   549  	//     [ (B, 0, []), (A, 1, []), (C, 0, [A]) ]
   550  	//
   551  	// Note that the correctness of this process depends on the fact that the list of resources is a
   552  	// topological sort of its corresponding dependency graph, so a resource always appears in the
   553  	// list after any resources on which it may depend.
   554  	resources := []*resource.State{}
   555  	referenceable := make(map[resource.URN]bool)
   556  	olds := make(map[resource.URN]*resource.State)
   557  	for _, s := range ex.deployment.prev.Resources {
   558  		var old, new *resource.State
   559  		if step, has := resourceToStep[s]; has {
   560  			// We produced a refresh step for this specific resource.  Use the new information about
   561  			// its dependencies during the update.
   562  			old = step.Old()
   563  			new = step.New()
   564  		} else {
   565  			// We didn't do anything with this resource.  However, we still may want to update its
   566  			// dependencies.  So use this resource itself as the 'new' one to update.
   567  			old = s
   568  			new = s
   569  		}
   570  
   571  		if new == nil {
   572  			if refresh {
   573  				contract.Assertf(old.Custom, "Expected custom resource")
   574  				contract.Assert(!providers.IsProviderType(old.Type))
   575  			}
   576  			continue
   577  		}
   578  
   579  		// Remove any deleted resources from this resource's dependency list.
   580  		if len(new.Dependencies) != 0 {
   581  			deps := make([]resource.URN, 0, len(new.Dependencies))
   582  			for _, d := range new.Dependencies {
   583  				if referenceable[d] {
   584  					deps = append(deps, d)
   585  				}
   586  			}
   587  			new.Dependencies = deps
   588  		}
   589  
   590  		// Add this resource to the resource list and mark it as referenceable.
   591  		resources = append(resources, new)
   592  		referenceable[new.URN] = true
   593  
   594  		// Do not record resources that are pending deletion in the "olds" lookup table.
   595  		if !new.Delete {
   596  			olds[new.URN] = new
   597  		}
   598  	}
   599  
   600  	undangleParentResources(olds, resources)
   601  
   602  	ex.deployment.prev.Resources = resources
   603  	ex.deployment.olds, ex.deployment.depGraph = olds, graph.NewDependencyGraph(resources)
   604  }
   605  
   606  func undangleParentResources(undeleted map[resource.URN]*resource.State, resources []*resource.State) {
   607  	// Since a refresh may delete arbitrary resources, we need to handle the case where
   608  	// the parent of a still existing resource is deleted.
   609  	//
   610  	// Invalid parents need to be fixed since otherwise they leave the state invalid, and
   611  	// the user sees an error:
   612  	// ```
   613  	// snapshot integrity failure; refusing to use it: child resource ${validURN} refers to missing parent ${deletedURN}
   614  	// ```
   615  	// To solve the problem we traverse the topologically sorted list of resources in
   616  	// order, setting newly invalidated parent URNS to the URN of the parent's parent.
   617  	//
   618  	// This can be illustrated by an example. Consider the graph of resource parents:
   619  	//
   620  	//         A            xBx
   621  	//       /   \           |
   622  	//    xCx      D        xEx
   623  	//     |     /   \       |
   624  	//     F    G     xHx    I
   625  	//
   626  	// When a capital letter is marked for deletion, it is bracketed by `x`s.
   627  	// We can obtain a topological sort by reading left to right, top to bottom.
   628  	//
   629  	// A..D -> valid parents, so we do nothing
   630  	// E -> The parent of E is marked for deletion, so set E.Parent to E.Parent.Parent.
   631  	//      Since B (E's parent) has no parent, we set E.Parent to "".
   632  	// F -> The parent of F is marked for deletion, so set F.Parent to F.Parent.Parent.
   633  	//      We set F.Parent to "A"
   634  	// G, H -> valid parents, do nothing
   635  	// I -> The parent of I is marked for deletion, so set I.Parent to I.Parent.Parent.
   636  	//      The parent of I has parent "", (since we addressed the parent of E
   637  	//      previously), so we set I.Parent = "".
   638  	//
   639  	// The new graph looks like this:
   640  	//
   641  	//         A        xBx   xEx   I
   642  	//       / | \
   643  	//     xCx F  D
   644  	//          /   \
   645  	//         G    xHx
   646  	// We observe that it is perfectly valid for deleted nodes to be leaf nodes, but they
   647  	// cannot be intermediary nodes.
   648  	_, hasEmptyValue := undeleted[""]
   649  	contract.Assertf(!hasEmptyValue, "the zero value for an URN is not a valid URN")
   650  	availableParents := map[resource.URN]resource.URN{}
   651  	for _, r := range resources {
   652  		if _, ok := undeleted[r.Parent]; !ok {
   653  			// Since existing must obey a topological sort, we have already addressed
   654  			// p.Parent. Since we know that it doesn't dangle, and that r.Parent no longer
   655  			// exists, we set r.Parent as r.Parent.Parent.
   656  			r.Parent = availableParents[r.Parent]
   657  		}
   658  		availableParents[r.URN] = r.Parent
   659  	}
   660  }