github.com/myhau/pulumi/pkg/v3@v3.70.2-0.20221116134521-f2775972e587/backend/filestate/state.go (about)

     1  // Copyright 2016-2022, Pulumi Corporation.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package filestate
    16  
    17  import (
    18  	"context"
    19  	"errors"
    20  	"fmt"
    21  	"io"
    22  	"os"
    23  	"path"
    24  	"path/filepath"
    25  	"strings"
    26  	"time"
    27  
    28  	"github.com/pulumi/pulumi/sdk/v3/go/common/util/retry"
    29  
    30  	"github.com/pulumi/pulumi/pkg/v3/engine"
    31  
    32  	"gocloud.dev/blob"
    33  	"gocloud.dev/gcerrors"
    34  
    35  	"github.com/pulumi/pulumi/pkg/v3/backend"
    36  	"github.com/pulumi/pulumi/pkg/v3/resource/deploy"
    37  	"github.com/pulumi/pulumi/pkg/v3/resource/stack"
    38  	"github.com/pulumi/pulumi/pkg/v3/secrets"
    39  	"github.com/pulumi/pulumi/sdk/v3/go/common/apitype"
    40  	"github.com/pulumi/pulumi/sdk/v3/go/common/encoding"
    41  	"github.com/pulumi/pulumi/sdk/v3/go/common/resource/config"
    42  	"github.com/pulumi/pulumi/sdk/v3/go/common/tokens"
    43  	"github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil"
    44  	"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
    45  	"github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil"
    46  	"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
    47  	"github.com/pulumi/pulumi/sdk/v3/go/common/workspace"
    48  )
    49  
    50  const DisableCheckpointBackupsEnvVar = "PULUMI_DISABLE_CHECKPOINT_BACKUPS"
    51  
    52  // DisableIntegrityChecking can be set to true to disable checkpoint state integrity verification.  This is not
    53  // recommended, because it could mean proceeding even in the face of a corrupted checkpoint state file, but can
    54  // be used as a last resort when a command absolutely must be run.
    55  var DisableIntegrityChecking bool
    56  
    57  type localQuery struct {
    58  	root string
    59  	proj *workspace.Project
    60  }
    61  
    62  func (q *localQuery) GetRoot() string {
    63  	return q.root
    64  }
    65  
    66  func (q *localQuery) GetProject() *workspace.Project {
    67  	return q.proj
    68  }
    69  
    70  // update is an implementation of engine.Update backed by local state.
    71  type update struct {
    72  	root    string
    73  	proj    *workspace.Project
    74  	target  *deploy.Target
    75  	backend *localBackend
    76  }
    77  
    78  func (u *update) GetRoot() string {
    79  	return u.root
    80  }
    81  
    82  func (u *update) GetProject() *workspace.Project {
    83  	return u.proj
    84  }
    85  
    86  func (u *update) GetTarget() *deploy.Target {
    87  	return u.target
    88  }
    89  
    90  func (b *localBackend) newQuery(
    91  	ctx context.Context,
    92  	op backend.QueryOperation) (engine.QueryInfo, error) {
    93  
    94  	return &localQuery{root: op.Root, proj: op.Proj}, nil
    95  }
    96  
    97  func (b *localBackend) newUpdate(
    98  	ctx context.Context,
    99  	stackName tokens.Name,
   100  	op backend.UpdateOperation) (*update, error) {
   101  	contract.Require(stackName != "", "stackName")
   102  
   103  	// Construct the deployment target.
   104  	target, err := b.getTarget(ctx, stackName,
   105  		op.StackConfiguration.Config, op.StackConfiguration.Decrypter)
   106  	if err != nil {
   107  		return nil, err
   108  	}
   109  
   110  	// Construct and return a new update.
   111  	return &update{
   112  		root:    op.Root,
   113  		proj:    op.Proj,
   114  		target:  target,
   115  		backend: b,
   116  	}, nil
   117  }
   118  
   119  func (b *localBackend) getTarget(
   120  	ctx context.Context,
   121  	stackName tokens.Name,
   122  	cfg config.Map,
   123  	dec config.Decrypter) (*deploy.Target, error) {
   124  	snapshot, _, err := b.getStack(ctx, stackName)
   125  	if err != nil {
   126  		return nil, err
   127  	}
   128  	return &deploy.Target{
   129  		Name:         stackName,
   130  		Organization: "", // filestate has no organizations
   131  		Config:       cfg,
   132  		Decrypter:    dec,
   133  		Snapshot:     snapshot,
   134  	}, nil
   135  }
   136  
   137  func (b *localBackend) getStack(
   138  	ctx context.Context,
   139  	name tokens.Name) (*deploy.Snapshot, string, error) {
   140  	if name == "" {
   141  		return nil, "", errors.New("invalid empty stack name")
   142  	}
   143  
   144  	file := b.stackPath(name)
   145  
   146  	chk, err := b.getCheckpoint(name)
   147  	if err != nil {
   148  		return nil, file, fmt.Errorf("failed to load checkpoint: %w", err)
   149  	}
   150  
   151  	// Materialize an actual snapshot object.
   152  	snapshot, err := stack.DeserializeCheckpoint(ctx, chk)
   153  	if err != nil {
   154  		return nil, "", err
   155  	}
   156  
   157  	// Ensure the snapshot passes verification before returning it, to catch bugs early.
   158  	if !DisableIntegrityChecking {
   159  		if verifyerr := snapshot.VerifyIntegrity(); verifyerr != nil {
   160  			return nil, file, fmt.Errorf("%s: snapshot integrity failure; refusing to use it: %w", file, verifyerr)
   161  		}
   162  	}
   163  
   164  	return snapshot, file, nil
   165  }
   166  
   167  // GetCheckpoint loads a checkpoint file for the given stack in this project, from the current project workspace.
   168  func (b *localBackend) getCheckpoint(stackName tokens.Name) (*apitype.CheckpointV3, error) {
   169  	chkpath := b.stackPath(stackName)
   170  	bytes, err := b.bucket.ReadAll(context.TODO(), chkpath)
   171  	if err != nil {
   172  		return nil, err
   173  	}
   174  	m := encoding.JSON
   175  	if encoding.IsCompressed(bytes) {
   176  		m = encoding.Gzip(m)
   177  	}
   178  
   179  	return stack.UnmarshalVersionedCheckpointToLatestCheckpoint(m, bytes)
   180  }
   181  
   182  func (b *localBackend) saveStack(name tokens.Name, snap *deploy.Snapshot, sm secrets.Manager) (string, error) {
   183  	// Make a serializable stack and then use the encoder to encode it.
   184  	file := b.stackPath(name)
   185  	m, ext := encoding.Detect(strings.TrimSuffix(file, ".gz"))
   186  	if m == nil {
   187  		return "", fmt.Errorf("resource serialization failed; illegal markup extension: '%v'", ext)
   188  	}
   189  	if filepath.Ext(file) == "" {
   190  		file = file + ext
   191  	}
   192  	if b.gzip {
   193  		if filepath.Ext(file) != encoding.GZIPExt {
   194  			file = file + ".gz"
   195  		}
   196  		m = encoding.Gzip(m)
   197  	} else {
   198  		file = strings.TrimSuffix(file, ".gz")
   199  	}
   200  
   201  	chk, err := stack.SerializeCheckpoint(name, snap, sm, false /* showSecrets */)
   202  	if err != nil {
   203  		return "", fmt.Errorf("serializaing checkpoint: %w", err)
   204  	}
   205  	byts, err := m.Marshal(chk)
   206  	if err != nil {
   207  		return "", fmt.Errorf("An IO error occurred while marshalling the checkpoint: %w", err)
   208  	}
   209  
   210  	// Back up the existing file if it already exists. Don't delete the original, the following WriteAll will
   211  	// atomically replace it anyway and various other bits of the system depend on being able to find the
   212  	// .json file to know the stack currently exists (see https://github.com/pulumi/pulumi/issues/9033 for
   213  	// context).
   214  	filePlain := strings.TrimSuffix(file, ".gz")
   215  	fileGzip := filePlain + ".gz"
   216  	// We need to make sure that an out of date state file doesn't exist so we
   217  	// only keep the file of the type we are working with.
   218  	bckGzip := backupTarget(b.bucket, fileGzip, b.gzip)
   219  	bckPlain := backupTarget(b.bucket, filePlain, !b.gzip)
   220  	var bck string
   221  	if b.gzip {
   222  		bck = bckGzip
   223  	} else {
   224  		bck = bckPlain
   225  	}
   226  
   227  	// And now write out the new snapshot file, overwriting that location.
   228  	if err = b.bucket.WriteAll(context.TODO(), file, byts, nil); err != nil {
   229  
   230  		b.mutex.Lock()
   231  		defer b.mutex.Unlock()
   232  
   233  		// FIXME: Would be nice to make these configurable
   234  		delay, _ := time.ParseDuration("1s")
   235  		maxDelay, _ := time.ParseDuration("30s")
   236  		backoff := 1.2
   237  
   238  		// Retry the write 10 times in case of upstream bucket errors
   239  		_, _, err = retry.Until(context.TODO(), retry.Acceptor{
   240  			Delay:    &delay,
   241  			MaxDelay: &maxDelay,
   242  			Backoff:  &backoff,
   243  			Accept: func(try int, nextRetryTime time.Duration) (bool, interface{}, error) {
   244  				// And now write out the new snapshot file, overwriting that location.
   245  				err := b.bucket.WriteAll(context.TODO(), file, byts, nil)
   246  				if err != nil {
   247  					logging.V(7).Infof("Error while writing snapshot to: %s (attempt=%d, error=%s)", file, try, err)
   248  					if try > 10 {
   249  						return false, nil, fmt.Errorf("An IO error occurred while writing the new snapshot file: %w", err)
   250  					}
   251  					return false, nil, nil
   252  				}
   253  				return true, nil, nil
   254  			},
   255  		})
   256  		if err != nil {
   257  			return "", err
   258  		}
   259  	}
   260  
   261  	logging.V(7).Infof("Saved stack %s checkpoint to: %s (backup=%s)", name, file, bck)
   262  
   263  	// And if we are retaining historical checkpoint information, write it out again
   264  	if cmdutil.IsTruthy(os.Getenv("PULUMI_RETAIN_CHECKPOINTS")) {
   265  		if err = b.bucket.WriteAll(context.TODO(), fmt.Sprintf("%v.%v", file, time.Now().UnixNano()), byts, nil); err != nil {
   266  			return "", fmt.Errorf("An IO error occurred while writing the new snapshot file: %w", err)
   267  		}
   268  	}
   269  
   270  	if !DisableIntegrityChecking {
   271  		// Finally, *after* writing the checkpoint, check the integrity.  This is done afterwards so that we write
   272  		// out the checkpoint file since it may contain resource state updates.  But we will warn the user that the
   273  		// file is already written and might be bad.
   274  		if verifyerr := snap.VerifyIntegrity(); verifyerr != nil {
   275  			return "", fmt.Errorf(
   276  				"%s: snapshot integrity failure; it was already written, but is invalid (backup available at %s): %w",
   277  				file, bck, verifyerr)
   278  
   279  		}
   280  	}
   281  
   282  	return file, nil
   283  }
   284  
   285  // removeStack removes information about a stack from the current workspace.
   286  func (b *localBackend) removeStack(name tokens.Name) error {
   287  	contract.Require(name != "", "name")
   288  
   289  	// Just make a backup of the file and don't write out anything new.
   290  	file := b.stackPath(name)
   291  	backupTarget(b.bucket, file, false)
   292  
   293  	historyDir := b.historyDirectory(name)
   294  	return removeAllByPrefix(b.bucket, historyDir)
   295  }
   296  
   297  // backupTarget makes a backup of an existing file, in preparation for writing a new one.
   298  func backupTarget(bucket Bucket, file string, keepOriginal bool) string {
   299  	contract.Require(file != "", "file")
   300  	bck := file + ".bak"
   301  
   302  	err := bucket.Copy(context.TODO(), bck, file, nil)
   303  	if err != nil {
   304  		logging.V(5).Infof("error copying %s to %s: %s", file, bck, err)
   305  	}
   306  
   307  	if !keepOriginal {
   308  		err = bucket.Delete(context.TODO(), file)
   309  		if err != nil {
   310  			logging.V(5).Infof("error deleting source object after rename: %v (%v) skipping", file, err)
   311  		}
   312  	}
   313  
   314  	// IDEA: consider multiple backups (.bak.bak.bak...etc).
   315  	return bck
   316  }
   317  
   318  // backupStack copies the current Checkpoint file to ~/.pulumi/backups.
   319  func (b *localBackend) backupStack(name tokens.Name) error {
   320  	contract.Require(name != "", "name")
   321  
   322  	// Exit early if backups are disabled.
   323  	if cmdutil.IsTruthy(os.Getenv(DisableCheckpointBackupsEnvVar)) {
   324  		return nil
   325  	}
   326  
   327  	// Read the current checkpoint file. (Assuming it aleady exists.)
   328  	stackPath := b.stackPath(name)
   329  	byts, err := b.bucket.ReadAll(context.TODO(), stackPath)
   330  	if err != nil {
   331  		return err
   332  	}
   333  
   334  	// Get the backup directory.
   335  	backupDir := b.backupDirectory(name)
   336  
   337  	// Write out the new backup checkpoint file.
   338  	stackFile := filepath.Base(stackPath)
   339  	ext := filepath.Ext(stackFile)
   340  	base := strings.TrimSuffix(stackFile, ext)
   341  	if ext2 := filepath.Ext(base); ext2 != "" && ext == encoding.GZIPExt {
   342  		// base: stack-name.json, ext: .gz
   343  		// ->
   344  		// base: stack-name, ext: .json.gz
   345  		ext = ext2 + ext
   346  		base = strings.TrimSuffix(base, ext2)
   347  	}
   348  	backupFile := fmt.Sprintf("%s.%v%s", base, time.Now().UnixNano(), ext)
   349  	return b.bucket.WriteAll(context.TODO(), filepath.Join(backupDir, backupFile), byts, nil)
   350  }
   351  
   352  func (b *localBackend) stackPath(stack tokens.Name) string {
   353  	path := filepath.Join(b.StateDir(), workspace.StackDir)
   354  	if stack == "" {
   355  		return path
   356  	}
   357  
   358  	// We can't use listBucket here for as we need to do a partial prefix match on filename, while the
   359  	// "dir" option to listBucket is always suffixed with "/". Also means we don't need to save any
   360  	// results in a slice.
   361  	plainPath := filepath.ToSlash(filepath.Join(path, fsutil.NamePath(stack)) + ".json")
   362  	gzipedPath := plainPath + ".gz"
   363  
   364  	bucketIter := b.bucket.List(&blob.ListOptions{
   365  		Delimiter: "/",
   366  		Prefix:    plainPath,
   367  	})
   368  
   369  	var plainObj *blob.ListObject
   370  	ctx := context.TODO()
   371  	for {
   372  		file, err := bucketIter.Next(ctx)
   373  		if err == io.EOF {
   374  			break
   375  		}
   376  		if err != nil {
   377  			// Error fetching the available ojects, assume .json
   378  			return plainPath
   379  		}
   380  
   381  		// plainObj will always come out first since allObjs is sorted by Key
   382  		if file.Key == plainPath {
   383  			plainObj = file
   384  		} else if file.Key == gzipedPath {
   385  			// We have a plain .json file and it was modified after this gzipped one so use it.
   386  			if plainObj != nil && plainObj.ModTime.After(file.ModTime) {
   387  				return plainPath
   388  			}
   389  			// else use the gzipped object
   390  			return gzipedPath
   391  		}
   392  	}
   393  	// Couldn't find any objects, assume nongzipped path?
   394  	return plainPath
   395  }
   396  
   397  func (b *localBackend) historyDirectory(stack tokens.Name) string {
   398  	contract.Require(stack != "", "stack")
   399  	return filepath.Join(b.StateDir(), workspace.HistoryDir, fsutil.NamePath(stack))
   400  }
   401  
   402  func (b *localBackend) backupDirectory(stack tokens.Name) string {
   403  	contract.Require(stack != "", "stack")
   404  	return filepath.Join(b.StateDir(), workspace.BackupDir, fsutil.NamePath(stack))
   405  }
   406  
   407  // getHistory returns locally stored update history. The first element of the result will be
   408  // the most recent update record.
   409  func (b *localBackend) getHistory(name tokens.Name, pageSize int, page int) ([]backend.UpdateInfo, error) {
   410  	contract.Require(name != "", "name")
   411  
   412  	dir := b.historyDirectory(name)
   413  	// TODO: we could consider optimizing the list operation using `page` and `pageSize`.
   414  	// Unfortunately, this is mildly invasive given the gocloud List API.
   415  	allFiles, err := listBucket(b.bucket, dir)
   416  	if err != nil {
   417  		// History doesn't exist until a stack has been updated.
   418  		if gcerrors.Code(err) == gcerrors.NotFound {
   419  			return nil, nil
   420  		}
   421  		return nil, err
   422  	}
   423  
   424  	var historyEntries []*blob.ListObject
   425  
   426  	// filter down to just history entries, reversing list to be in most recent order.
   427  	// listBucket returns the array sorted by file name, but because of how we name files, older updates come before
   428  	// newer ones.
   429  	for i := len(allFiles) - 1; i >= 0; i-- {
   430  		file := allFiles[i]
   431  		filepath := file.Key
   432  
   433  		// ignore checkpoints
   434  		if !strings.HasSuffix(filepath, ".history.json") &&
   435  			!strings.HasSuffix(filepath, ".history.json.gz") {
   436  			continue
   437  		}
   438  
   439  		historyEntries = append(historyEntries, file)
   440  	}
   441  
   442  	start := 0
   443  	end := len(historyEntries) - 1
   444  	if pageSize > 0 {
   445  		if page < 1 {
   446  			page = 1
   447  		}
   448  		start = (page - 1) * pageSize
   449  		end = start + pageSize - 1
   450  		if end > len(historyEntries)-1 {
   451  			end = len(historyEntries) - 1
   452  		}
   453  	}
   454  
   455  	var updates []backend.UpdateInfo
   456  
   457  	for i := start; i <= end; i++ {
   458  		file := historyEntries[i]
   459  		filepath := file.Key
   460  
   461  		var update backend.UpdateInfo
   462  		b, err := b.bucket.ReadAll(context.TODO(), filepath)
   463  		if err != nil {
   464  			return nil, fmt.Errorf("reading history file %s: %w", filepath, err)
   465  		}
   466  		m := encoding.JSON
   467  		if encoding.IsCompressed(b) {
   468  			m = encoding.Gzip(m)
   469  		}
   470  		err = m.Unmarshal(b, &update)
   471  		if err != nil {
   472  			return nil, fmt.Errorf("reading history file %s: %w", filepath, err)
   473  		}
   474  
   475  		updates = append(updates, update)
   476  	}
   477  
   478  	return updates, nil
   479  }
   480  
   481  func (b *localBackend) renameHistory(oldName tokens.Name, newName tokens.Name) error {
   482  	contract.Require(oldName != "", "oldName")
   483  	contract.Require(newName != "", "newName")
   484  
   485  	oldHistory := b.historyDirectory(oldName)
   486  	newHistory := b.historyDirectory(newName)
   487  
   488  	allFiles, err := listBucket(b.bucket, oldHistory)
   489  	if err != nil {
   490  		// if there's nothing there, we don't really need to do a rename.
   491  		if gcerrors.Code(err) == gcerrors.NotFound {
   492  			return nil
   493  		}
   494  		return err
   495  	}
   496  
   497  	for _, file := range allFiles {
   498  		fileName := objectName(file)
   499  		oldBlob := path.Join(oldHistory, fileName)
   500  
   501  		// The filename format is <stack-name>-<timestamp>.[checkpoint|history].json[.gz], we need to change
   502  		// the stack name part but retain the other parts. If we find files that don't match this format
   503  		// ignore them.
   504  		dashIndex := strings.LastIndex(fileName, "-")
   505  		if dashIndex == -1 || (fileName[:dashIndex] != oldName.String()) {
   506  			// No dash or the string up to the dash isn't the old name
   507  			continue
   508  		}
   509  
   510  		newFileName := string(newName) + fileName[dashIndex:]
   511  		newBlob := path.Join(newHistory, newFileName)
   512  
   513  		if err := b.bucket.Copy(context.TODO(), newBlob, oldBlob, nil); err != nil {
   514  			return fmt.Errorf("copying history file: %w", err)
   515  		}
   516  		if err := b.bucket.Delete(context.TODO(), oldBlob); err != nil {
   517  			return fmt.Errorf("deleting existing history file: %w", err)
   518  		}
   519  	}
   520  
   521  	return nil
   522  }
   523  
   524  // addToHistory saves the UpdateInfo and makes a copy of the current Checkpoint file.
   525  func (b *localBackend) addToHistory(name tokens.Name, update backend.UpdateInfo) error {
   526  	contract.Require(name != "", "name")
   527  
   528  	dir := b.historyDirectory(name)
   529  
   530  	// Prefix for the update and checkpoint files.
   531  	pathPrefix := path.Join(dir, fmt.Sprintf("%s-%d", name, time.Now().UnixNano()))
   532  
   533  	m, ext := encoding.JSON, "json"
   534  	if b.gzip {
   535  		m = encoding.Gzip(m)
   536  		ext += ".gz"
   537  	}
   538  
   539  	// Save the history file.
   540  	byts, err := m.Marshal(&update)
   541  	if err != nil {
   542  		return err
   543  	}
   544  
   545  	historyFile := fmt.Sprintf("%s.history.%s", pathPrefix, ext)
   546  	if err = b.bucket.WriteAll(context.TODO(), historyFile, byts, nil); err != nil {
   547  		return err
   548  	}
   549  
   550  	// Make a copy of the checkpoint file. (Assuming it already exists.)
   551  	checkpointFile := fmt.Sprintf("%s.checkpoint.%s", pathPrefix, ext)
   552  	return b.bucket.Copy(context.TODO(), checkpointFile, b.stackPath(name), nil)
   553  }