github.com/myhau/pulumi/pkg/v3@v3.70.2-0.20221116134521-f2775972e587/backend/httpstate/state.go (about)

     1  // Copyright 2016-2018, Pulumi Corporation.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package httpstate
    16  
    17  import (
    18  	"context"
    19  	"fmt"
    20  	"sync"
    21  	"time"
    22  
    23  	"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
    24  	"github.com/pulumi/pulumi/sdk/v3/go/common/tokens"
    25  	"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
    26  	"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
    27  
    28  	"github.com/pulumi/pulumi/pkg/v3/backend"
    29  	"github.com/pulumi/pulumi/pkg/v3/backend/display"
    30  	"github.com/pulumi/pulumi/pkg/v3/backend/httpstate/client"
    31  	"github.com/pulumi/pulumi/pkg/v3/engine"
    32  	"github.com/pulumi/pulumi/pkg/v3/resource/deploy"
    33  	"github.com/pulumi/pulumi/pkg/v3/resource/stack"
    34  	"github.com/pulumi/pulumi/sdk/v3/go/common/apitype"
    35  	"github.com/pulumi/pulumi/sdk/v3/go/common/resource/config"
    36  	"github.com/pulumi/pulumi/sdk/v3/go/common/workspace"
    37  )
    38  
    39  type cloudQuery struct {
    40  	root string
    41  	proj *workspace.Project
    42  }
    43  
    44  func (q *cloudQuery) GetRoot() string {
    45  	return q.root
    46  }
    47  
    48  func (q *cloudQuery) GetProject() *workspace.Project {
    49  	return q.proj
    50  }
    51  
    52  // cloudUpdate is an implementation of engine.Update backed by remote state and a local program.
    53  type cloudUpdate struct {
    54  	context context.Context
    55  	backend *cloudBackend
    56  
    57  	update      client.UpdateIdentifier
    58  	tokenSource *tokenSource
    59  
    60  	root   string
    61  	proj   *workspace.Project
    62  	target *deploy.Target
    63  }
    64  
    65  func (u *cloudUpdate) GetRoot() string {
    66  	return u.root
    67  }
    68  
    69  func (u *cloudUpdate) GetProject() *workspace.Project {
    70  	return u.proj
    71  }
    72  
    73  func (u *cloudUpdate) GetTarget() *deploy.Target {
    74  	return u.target
    75  }
    76  
    77  func (u *cloudUpdate) Complete(status apitype.UpdateStatus) error {
    78  	defer u.tokenSource.Close()
    79  
    80  	token, err := u.tokenSource.GetToken()
    81  	if err != nil {
    82  		return err
    83  	}
    84  	return u.backend.client.CompleteUpdate(u.context, u.update, status, token)
    85  }
    86  
    87  // recordEngineEvents will record the events with the Pulumi Service, enabling things like viewing
    88  // the update logs or drilling into the timeline of an update.
    89  func (u *cloudUpdate) recordEngineEvents(startingSeqNumber int, events []engine.Event) error {
    90  	contract.Assert(u.tokenSource != nil)
    91  	token, err := u.tokenSource.GetToken()
    92  	if err != nil {
    93  		return err
    94  	}
    95  
    96  	var apiEvents apitype.EngineEventBatch
    97  	for idx, event := range events {
    98  		apiEvent, convErr := display.ConvertEngineEvent(event, false /* showSecrets */)
    99  		if convErr != nil {
   100  			return fmt.Errorf("converting engine event: %w", convErr)
   101  		}
   102  
   103  		// Each event within an update must have a unique sequence number. Any request to
   104  		// emit an update with the same sequence number will fail. (Read: the caller needs
   105  		// to be accurate about this.)
   106  		apiEvent.Sequence = idx + startingSeqNumber
   107  		apiEvent.Timestamp = int(time.Now().Unix())
   108  
   109  		apiEvents.Events = append(apiEvents.Events, apiEvent)
   110  	}
   111  
   112  	return u.backend.client.RecordEngineEvents(u.context, u.update, apiEvents, token)
   113  }
   114  
   115  // RecordAndDisplayEvents inspects engine events from the given channel, and prints them to the CLI as well as
   116  // posting them to the Pulumi service.
   117  func (u *cloudUpdate) RecordAndDisplayEvents(
   118  	label string, action apitype.UpdateKind, stackRef backend.StackReference, op backend.UpdateOperation,
   119  	events <-chan engine.Event, done chan<- bool, opts display.Options, isPreview bool) {
   120  	// We take the channel of engine events and pass them to separate components that will display
   121  	// them to the console or persist them on the Pulumi Service. Both should terminate as soon as
   122  	// they see a CancelEvent, and when finished, close the "done" channel.
   123  	displayEvents := make(chan engine.Event) // Note: unbuffered, but we assume it won't matter in practice.
   124  	displayEventsDone := make(chan bool)
   125  
   126  	persistEvents := make(chan engine.Event, 100)
   127  	persistEventsDone := make(chan bool)
   128  
   129  	// We close our own done channel when both of the dependent components have finished.
   130  	defer func() {
   131  		<-displayEventsDone
   132  		<-persistEventsDone
   133  		close(done)
   134  	}()
   135  
   136  	// Start the Go-routines for displaying and persisting events.
   137  	go display.ShowEvents(
   138  		label, action, stackRef.Name(), op.Proj.Name,
   139  		displayEvents, displayEventsDone, opts, isPreview)
   140  	go persistEngineEvents(
   141  		u, opts.Debug, /* persist debug events */
   142  		persistEvents, persistEventsDone)
   143  
   144  	for e := range events {
   145  		displayEvents <- e
   146  		persistEvents <- e
   147  
   148  		// We stop reading from the event stream as soon as we see the CancelEvent,
   149  		// which will also signal the display/persist components to shutdown too.
   150  		if e.Type == engine.CancelEvent {
   151  			break
   152  		}
   153  	}
   154  
   155  	// Note that we don't return immediately, the defer'd function will block until
   156  	// the display and persistence go-routines are finished processing events.
   157  }
   158  
   159  func (b *cloudBackend) newQuery(ctx context.Context,
   160  	op backend.QueryOperation) (engine.QueryInfo, error) {
   161  
   162  	return &cloudQuery{root: op.Root, proj: op.Proj}, nil
   163  }
   164  
   165  func (b *cloudBackend) newUpdate(ctx context.Context, stackRef backend.StackReference, op backend.UpdateOperation,
   166  	update client.UpdateIdentifier, token string) (*cloudUpdate, error) {
   167  
   168  	// Create a token source for this update if necessary.
   169  	var tokenSource *tokenSource
   170  	if token != "" {
   171  
   172  		// TODO[pulumi/pulumi#10482] instead of assuming
   173  		// expiration, consider expiration times returned by
   174  		// the backend, if any.
   175  		duration := 5 * time.Minute
   176  		assumedExpires := func() time.Time {
   177  			return time.Now().Add(duration)
   178  		}
   179  
   180  		renewLease := func(
   181  			ctx context.Context,
   182  			duration time.Duration,
   183  			currentToken string) (string, time.Time, error) {
   184  			tok, err := b.Client().RenewUpdateLease(
   185  				ctx, update, currentToken, duration)
   186  			if err != nil {
   187  				return "", time.Time{}, err
   188  			}
   189  			return tok, assumedExpires(), err
   190  		}
   191  
   192  		ts, err := newTokenSource(ctx, token, assumedExpires(), duration, renewLease)
   193  		if err != nil {
   194  			return nil, err
   195  		}
   196  		tokenSource = ts
   197  	}
   198  
   199  	// Construct the deployment target.
   200  	target, err := b.getTarget(ctx, stackRef, op.StackConfiguration.Config, op.StackConfiguration.Decrypter)
   201  	if err != nil {
   202  		return nil, err
   203  	}
   204  
   205  	// Construct and return a new update.
   206  	return &cloudUpdate{
   207  		context:     ctx,
   208  		backend:     b,
   209  		update:      update,
   210  		tokenSource: tokenSource,
   211  		root:        op.Root,
   212  		proj:        op.Proj,
   213  		target:      target,
   214  	}, nil
   215  }
   216  
   217  func (b *cloudBackend) getSnapshot(ctx context.Context, stackRef backend.StackReference) (*deploy.Snapshot, error) {
   218  	untypedDeployment, err := b.exportDeployment(ctx, stackRef, nil /* get latest */)
   219  	if err != nil {
   220  		return nil, err
   221  	}
   222  
   223  	snapshot, err := stack.DeserializeUntypedDeployment(ctx, untypedDeployment, stack.DefaultSecretsProvider)
   224  	if err != nil {
   225  		return nil, err
   226  	}
   227  
   228  	return snapshot, nil
   229  }
   230  
   231  func (b *cloudBackend) getTarget(ctx context.Context, stackRef backend.StackReference,
   232  	cfg config.Map, dec config.Decrypter) (*deploy.Target, error) {
   233  	stackID, err := b.getCloudStackIdentifier(stackRef)
   234  	if err != nil {
   235  		return nil, err
   236  	}
   237  
   238  	snapshot, err := b.getSnapshot(ctx, stackRef)
   239  	if err != nil {
   240  		switch err {
   241  		case stack.ErrDeploymentSchemaVersionTooOld:
   242  			return nil, fmt.Errorf("the stack '%s' is too old to be used by this version of the Pulumi CLI",
   243  				stackRef.Name())
   244  		case stack.ErrDeploymentSchemaVersionTooNew:
   245  			return nil, fmt.Errorf("the stack '%s' is newer than what this version of the Pulumi CLI understands. "+
   246  				"Please update your version of the Pulumi CLI", stackRef.Name())
   247  		default:
   248  			return nil, fmt.Errorf("could not deserialize deployment: %w", err)
   249  		}
   250  	}
   251  
   252  	return &deploy.Target{
   253  		Name:         tokens.Name(stackID.Stack),
   254  		Organization: tokens.Name(stackID.Owner),
   255  		Config:       cfg,
   256  		Decrypter:    dec,
   257  		Snapshot:     snapshot,
   258  	}, nil
   259  }
   260  
   261  func isDebugDiagEvent(e engine.Event) bool {
   262  	return e.Type == engine.DiagEvent && (e.Payload().(engine.DiagEventPayload)).Severity == diag.Debug
   263  }
   264  
   265  type engineEventBatch struct {
   266  	sequenceStart int
   267  	events        []engine.Event
   268  }
   269  
   270  // persistEngineEvents reads from a channel of engine events and persists them on the
   271  // Pulumi Service. This is the data that powers the logs display.
   272  func persistEngineEvents(
   273  	update *cloudUpdate, persistDebugEvents bool,
   274  	events <-chan engine.Event, done chan<- bool) {
   275  	// A single update can emit hundreds, if not thousands, or tens of thousands of
   276  	// engine events. We transmit engine events in large batches to reduce the overhead
   277  	// associated with each HTTP request to the service. We also send multiple HTTP
   278  	// requests concurrently, as to not block processing subsequent engine events.
   279  
   280  	// Maximum number of events to batch up before transmitting.
   281  	const maxEventsToTransmit = 50
   282  	// Maximum wait time before sending all batched events.
   283  	const maxTransmissionDelay = 4 * time.Second
   284  	// Maximum number of concurrent requests to the Pulumi Service to persist
   285  	// engine events.
   286  	const maxConcurrentRequests = 3
   287  
   288  	// We don't want to indicate that we are done processing every engine event in the
   289  	// provided channel until every HTTP request has completed. We use a wait group to
   290  	// track all of those requests.
   291  	var wg sync.WaitGroup
   292  
   293  	defer func() {
   294  		wg.Wait()
   295  		close(done)
   296  	}()
   297  
   298  	var eventBatch []engine.Event
   299  	maxDelayTicker := time.NewTicker(maxTransmissionDelay)
   300  
   301  	// We maintain a sequence counter for each event to ensure that the Pulumi Service can
   302  	// ensure events can be reconstructured in the same order they were emitted. (And not
   303  	// out of order from parallel writes and/or network delays.)
   304  	eventIdx := 0
   305  
   306  	// As we identify batches of engine events to transmit, we put them into a channel.
   307  	// This will allow us to issue HTTP requests concurrently, but also limit the maximum
   308  	// number of requests in-flight at any one time.
   309  	//
   310  	// This channel isn't buffered, so adding a new batch of events to persist will block
   311  	// until a go-routine is available to send the batch.
   312  	batchesToTransmit := make(chan engineEventBatch)
   313  
   314  	transmitBatchLoop := func() {
   315  		wg.Add(1)
   316  		defer wg.Done()
   317  
   318  		for eventBatch := range batchesToTransmit {
   319  			err := update.recordEngineEvents(eventBatch.sequenceStart, eventBatch.events)
   320  			if err != nil {
   321  				logging.V(3).Infof("error recording engine events: %s", err)
   322  			}
   323  		}
   324  	}
   325  	// Start N different go-routines which will all pull from the batchesToTransmit channel
   326  	// and persist those engine events until the channel is closed.
   327  	for i := 0; i < maxConcurrentRequests; i++ {
   328  		go transmitBatchLoop()
   329  	}
   330  
   331  	// transmitBatch sends off the current batch of engine events (eventIdx, eventBatch) to the
   332  	// batchesToTransmit channel. Will mutate eventIdx, eventBatch as a side effect.
   333  	transmitBatch := func() {
   334  		if len(eventBatch) == 0 {
   335  			return
   336  		}
   337  
   338  		batch := engineEventBatch{
   339  			sequenceStart: eventIdx,
   340  			events:        eventBatch,
   341  		}
   342  		// This will block until one of the spawned go-routines is available to read the data.
   343  		// Effectively providing a global rate limit for how quickly we can send data to the
   344  		// Pulumi Service, if an update is particularly chatty.
   345  		batchesToTransmit <- batch
   346  
   347  		// With the values of eventIdx and eventBatch copied into engineEventBatch,
   348  		// we now modify their values for the next time transmitBatch is called.
   349  		eventIdx += len(eventBatch)
   350  		eventBatch = nil
   351  	}
   352  
   353  	var sawCancelEvent bool
   354  	for {
   355  		select {
   356  		case e := <-events:
   357  			// Ignore debug events unless asked to.
   358  			if isDebugDiagEvent(e) && !persistDebugEvents {
   359  				break
   360  			}
   361  
   362  			// Stop processing once we see the CancelEvent.
   363  			if e.Type == engine.CancelEvent {
   364  				sawCancelEvent = true
   365  				break
   366  			}
   367  
   368  			eventBatch = append(eventBatch, e)
   369  			if len(eventBatch) >= maxEventsToTransmit {
   370  				transmitBatch()
   371  			}
   372  
   373  		case <-maxDelayTicker.C:
   374  			// If the ticker has fired, send any batched events. This sets an upper bound for
   375  			// the delay between the event being observed and persisted.
   376  			transmitBatch()
   377  		}
   378  
   379  		if sawCancelEvent {
   380  			break
   381  		}
   382  	}
   383  
   384  	// Transmit any lingering events.
   385  	transmitBatch()
   386  	// Closing the batchesToTransmit channel will signal the worker persistence routines to
   387  	// terminate, which will trigger the `wg` WaitGroup to be marked as complete, which will
   388  	// finally close the `done` channel so the caller knows we are finished processing the
   389  	// engine event stream.
   390  	close(batchesToTransmit)
   391  }